From dab0dc52059df826c73141c4bad0955bb50bfd64 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=AD=A6=E5=AE=AE=E8=AA=A0?= Date: Tue, 28 Apr 2026 22:45:40 +0400 Subject: [PATCH] update docs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 武宮誠 --- .vitepress/config.mts | 124 +- .../components/CompatibilityMatrixTable.vue | 66 +- .../theme/components/ToriiApiConsole.vue | 525 ++++++++ .vitepress/theme/index.ts | 4 + .vitepress/theme/rapidoc.d.ts | 1 + .vitepress/theme/style/index.scss | 30 +- CONTRIBUTING.md | 10 +- README.md | 13 +- etc/meta.ts | 4 +- etc/snippet-sources.ts | 14 +- package.json | 1 + pnpm-lock.yaml | 1134 ++++++++++++++++- src/blockchain/accounts.md | 35 + src/blockchain/assets.md | 79 +- src/blockchain/consensus.md | 159 ++- src/blockchain/data-model.md | 93 +- src/blockchain/domains.md | 42 + src/blockchain/events.md | 26 + src/blockchain/fastpq.md | 488 ++++++- src/blockchain/instructions.md | 76 +- src/blockchain/metadata.md | 46 +- src/blockchain/nfts.md | 195 +++ src/blockchain/queries.md | 22 + src/blockchain/rwas.md | 506 ++++++++ src/blockchain/sora-nexus-services.md | 688 +++++++--- src/blockchain/transactions.md | 115 ++ src/documenting/snippets.md | 21 +- src/get-started/index.md | 8 +- src/get-started/install-iroha-2.md | 2 +- src/get-started/operate-iroha-2-via-cli.md | 75 +- .../private-dataspace-fee-sponsor.md | 17 + src/get-started/sora-nexus-dataspaces.md | 144 +++ src/guide/advanced/chaos-testing.md | 216 ++++ src/guide/advanced/metrics.md | 20 + .../best-practices/application-development.md | 79 ++ src/guide/best-practices/data-modeling.md | 75 ++ src/guide/best-practices/index.md | 43 + .../best-practices/network-deployment.md | 74 ++ src/guide/best-practices/operations.md | 67 + src/guide/best-practices/release-readiness.md | 67 + .../best-practices/security-and-access.md | 74 ++ .../configure/metadata-and-store-assets.md | 40 +- src/guide/index.md | 34 + src/guide/security/fraud-monitoring.md | 134 ++ src/guide/security/index.md | 8 + src/guide/security/security-principles.md | 2 +- src/guide/security/vpn.md | 120 ++ src/guide/tutorials/index.md | 10 +- src/guide/tutorials/javascript.md | 49 +- src/guide/tutorials/kaigi.md | 5 +- src/guide/tutorials/kotlin-java.md | 42 +- src/guide/tutorials/musubi.md | 212 +++ src/guide/tutorials/python.md | 244 +++- src/guide/tutorials/rust.md | 34 +- src/guide/tutorials/sample-apps.md | 50 +- src/guide/tutorials/swift.md | 201 ++- src/help/integration-issues.md | 14 + src/index.md | 42 +- src/public/compat-matrix.json | 69 +- src/reference/binaries.md | 6 +- src/reference/compatibility-matrix.md | 4 +- src/reference/data-model-schema.md | 11 +- src/reference/genesis.md | 4 +- src/reference/index.md | 5 +- src/reference/naming.md | 30 +- src/reference/norito.md | 6 +- src/reference/torii-api-console.md | 53 + src/reference/torii-endpoints.md | 104 +- 68 files changed, 6470 insertions(+), 541 deletions(-) create mode 100644 .vitepress/theme/components/ToriiApiConsole.vue create mode 100644 .vitepress/theme/rapidoc.d.ts create mode 100644 src/blockchain/nfts.md create mode 100644 src/blockchain/rwas.md create mode 100644 src/guide/advanced/chaos-testing.md create mode 100644 src/guide/best-practices/application-development.md create mode 100644 src/guide/best-practices/data-modeling.md create mode 100644 src/guide/best-practices/index.md create mode 100644 src/guide/best-practices/network-deployment.md create mode 100644 src/guide/best-practices/operations.md create mode 100644 src/guide/best-practices/release-readiness.md create mode 100644 src/guide/best-practices/security-and-access.md create mode 100644 src/guide/index.md create mode 100644 src/guide/security/fraud-monitoring.md create mode 100644 src/guide/security/vpn.md create mode 100644 src/guide/tutorials/musubi.md create mode 100644 src/reference/torii-api-console.md diff --git a/.vitepress/config.mts b/.vitepress/config.mts index 9685e5848..218df1705 100644 --- a/.vitepress/config.mts +++ b/.vitepress/config.mts @@ -16,8 +16,8 @@ function nav(): DefaultTheme.NavItem[] { activeMatch: '/get-started/', }, { - text: 'SDKs', - link: '/guide/tutorials/', + text: 'Guide', + link: '/guide/', activeMatch: '/guide/', }, { @@ -75,6 +75,17 @@ function sidebarStart(): DefaultTheme.SidebarItem[] { function sidebarGuide(): DefaultTheme.SidebarItem[] { return [ + { + text: 'Guide', + link: '/guide/', + collapsed: false, + items: [ + { + text: 'Overview', + link: '/guide/', + }, + ], + }, { text: 'SDK Tutorials', link: '/guide/tutorials/', @@ -108,12 +119,51 @@ function sidebarGuide(): DefaultTheme.SidebarItem[] { text: 'Embed Kaigi', link: '/guide/tutorials/kaigi', }, + { + text: 'Musubi Packages', + link: '/guide/tutorials/musubi', + }, { text: 'Compatibility Matrix', link: '/reference/compatibility-matrix', }, ], }, + { + text: 'Best Practices', + link: '/guide/best-practices/', + collapsed: false, + items: [ + { + text: 'Overview', + link: '/guide/best-practices/', + }, + { + text: 'Application Development', + link: '/guide/best-practices/application-development.md', + }, + { + text: 'Data Modeling', + link: '/guide/best-practices/data-modeling.md', + }, + { + text: 'Network Deployment', + link: '/guide/best-practices/network-deployment.md', + }, + { + text: 'Operations', + link: '/guide/best-practices/operations.md', + }, + { + text: 'Security and Access', + link: '/guide/best-practices/security-and-access.md', + }, + { + text: 'Release Readiness', + link: '/guide/best-practices/release-readiness.md', + }, + ], + }, { text: 'Operator Quick Links', collapsed: false, @@ -146,16 +196,67 @@ function sidebarGuide(): DefaultTheme.SidebarItem[] { text: 'Torii Endpoints', link: '/reference/torii-endpoints.md', }, + { + text: 'Torii API Console', + link: '/reference/torii-api-console.md', + }, { text: 'Performance and Metrics', link: '/guide/advanced/metrics.md', }, + { + text: 'Chaos Testing', + link: '/guide/advanced/chaos-testing.md', + }, { text: 'Binaries', link: '/reference/binaries.md', }, ], }, + { + text: 'Security', + link: '/guide/security/', + collapsed: false, + items: [ + { + text: 'Overview', + link: '/guide/security/', + }, + { + text: 'Security Principles', + link: '/guide/security/security-principles.md', + }, + { + text: 'Virtual Private Networks', + link: '/guide/security/vpn.md', + }, + { + text: 'Operational Security', + link: '/guide/security/operational-security.md', + }, + { + text: 'Fraud Monitoring', + link: '/guide/security/fraud-monitoring.md', + }, + { + text: 'Password Security', + link: '/guide/security/password-security.md', + }, + { + text: 'Public Key Cryptography', + link: '/guide/security/public-key-cryptography.md', + }, + { + text: 'Generating Cryptographic Keys', + link: '/guide/security/generating-cryptographic-keys.md', + }, + { + text: 'Storing Cryptographic Keys', + link: '/guide/security/storing-cryptographic-keys.md', + }, + ], + }, ] } @@ -198,6 +299,14 @@ function sidebarChain(): DefaultTheme.SidebarItem[] { text: 'Assets', link: '/blockchain/assets', }, + { + text: 'NFTs', + link: '/blockchain/nfts', + }, + { + text: 'Real-World Assets', + link: '/blockchain/rwas', + }, { text: 'Metadata', link: '/blockchain/metadata', @@ -286,6 +395,10 @@ function sidebarReference(): DefaultTheme.SidebarItem[] { text: 'Torii API', link: '/reference/torii-endpoints.md', }, + { + text: 'Torii API Console', + link: '/reference/torii-api-console.md', + }, { text: 'Norito', link: '/reference/norito.md', @@ -351,6 +464,13 @@ export default defineConfig({ plugins: [ViteUnoCSS('../uno.config.ts'), ViteSvgLoader()], envDir: resolve(__dirname, '../'), }, + vue: { + template: { + compilerOptions: { + isCustomElement: (tag: string) => tag === 'rapi-doc', + }, + }, + }, lastUpdated: true, head: [ diff --git a/.vitepress/theme/components/CompatibilityMatrixTable.vue b/.vitepress/theme/components/CompatibilityMatrixTable.vue index 529c67958..284fdc2a2 100644 --- a/.vitepress/theme/components/CompatibilityMatrixTable.vue +++ b/.vitepress/theme/components/CompatibilityMatrixTable.vue @@ -15,7 +15,9 @@ interface Matrix { interface MatrixSource { repo?: string + repo_url?: string branch?: string + branch_url?: string commit?: string dirty?: boolean generated_at?: string @@ -126,20 +128,32 @@ const table = computed(() => { return { headers, rows } }) -const sourceSummary = computed(() => { +function toHttpUrl(value: unknown): string | undefined { + if (typeof value !== 'string') return undefined + + try { + const url = new URL(value) + return url.protocol === 'http:' || url.protocol === 'https:' ? url.href : undefined + } catch { + return undefined + } +} + +const matrixSource = computed(() => { if (!task.state.fulfilled) return null const { source } = task.state.fulfilled.value if (!source) return null - const parts: string[] = [] - if (source.repo) parts.push(`Source: ${source.repo}`) - if (source.branch) parts.push(source.branch) - if (source.commit) parts.push(source.commit) - if (source.dirty) parts.push('dirty worktree') - if (source.generated_at) parts.push(`generated ${source.generated_at}`) - - return parts.join(' | ') + return { + repo: source.repo, + repoUrl: toHttpUrl(source.repo_url), + branch: source.branch, + branchUrl: toHttpUrl(source.branch_url), + commit: source.commit, + dirty: source.dirty, + generatedAt: source.generated_at, + } }) const rejectionReason = computed(() => { @@ -180,10 +194,40 @@ const rejectionReason = computed(() => {

- {{ sourceSummary }} + Source: + + {{ matrixSource.repo }} + + {{ matrixSource.repo }} + + + +

diff --git a/.vitepress/theme/components/ToriiApiConsole.vue b/.vitepress/theme/components/ToriiApiConsole.vue new file mode 100644 index 000000000..2b2a409bd --- /dev/null +++ b/.vitepress/theme/components/ToriiApiConsole.vue @@ -0,0 +1,525 @@ + + + + + diff --git a/.vitepress/theme/index.ts b/.vitepress/theme/index.ts index 3b8bb4e56..71464a23e 100644 --- a/.vitepress/theme/index.ts +++ b/.vitepress/theme/index.ts @@ -22,5 +22,9 @@ export default { 'CompatibilityMatrixTableIcon', defineAsyncComponent(async () => import('./components/CompatibilityMatrixTableIcon.vue')), ) + app.component( + 'ToriiApiConsole', + defineAsyncComponent(() => import('./components/ToriiApiConsole.vue')), + ) }, } satisfies import('vitepress').Theme diff --git a/.vitepress/theme/rapidoc.d.ts b/.vitepress/theme/rapidoc.d.ts new file mode 100644 index 000000000..b290d127e --- /dev/null +++ b/.vitepress/theme/rapidoc.d.ts @@ -0,0 +1 @@ +declare module 'rapidoc' diff --git a/.vitepress/theme/style/index.scss b/.vitepress/theme/style/index.scss index be42647c2..ee3c90f39 100644 --- a/.vitepress/theme/style/index.scss +++ b/.vitepress/theme/style/index.scss @@ -1,12 +1,10 @@ -@import url("https://fonts.googleapis.com/css2?family=Sora:wght@100..800&display=swap"); +@import url('https://fonts.googleapis.com/css2?family=Sora:wght@100..800&display=swap'); :root { - --vp-font-family-mono: - "JetBrains Mono", "Fira Code", Menlo, Monaco, Consolas, "Courier New", - monospace; + --vp-font-family-mono: 'JetBrains Mono', 'Fira Code', Menlo, Monaco, Consolas, 'Courier New', monospace; --vp-font-family-base: - "Sora", "Inter", ui-sans-serif, system-ui, sans-serif, "Apple Color Emoji", - "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji"; + 'Sora', 'Inter', ui-sans-serif, system-ui, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol', + 'Noto Color Emoji'; --vp-c-brand-1: var(--vp-c-red-1); --vp-c-brand-2: var(--vp-c-red-2); @@ -210,7 +208,7 @@ } .vp-doc { - div[class*="language-"] { + div[class*='language-'] { margin-right: 0; margin-left: 0; @@ -244,7 +242,7 @@ overflow-x: auto; } - tr.transposed-table:nth-child(2n+1) { + tr.transposed-table:nth-child(2n + 1) { background: var(--vp-c-bg-soft); pre { @@ -262,6 +260,20 @@ } } +.torii-api-console-page { + .VPDoc .container { + max-width: 1440px; + } + + .VPDoc .content { + max-width: 1120px; + } + + .VPDoc .content-container { + max-width: none; + } +} + @media (max-width: 640px) { .VPHome .VPHero { padding-bottom: 28px; @@ -272,7 +284,7 @@ } .vp-doc { - div[class*="language-"] { + div[class*='language-'] { border-radius: 0; pre { diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index bf4f91751..aea35ce61 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -5,8 +5,7 @@ repository contains the VitePress source for the public documentation site. Use this guide for documentation changes, site changes, and repository tooling changes. For implementation details, verify behavior against the -[Hyperledger Iroha](https://github.com/hyperledger-iroha/iroha) source -repository. +[Hyperledger Iroha `i23-features` branch](https://github.com/hyperledger-iroha/iroha/tree/i23-features). ## Before You Start @@ -34,9 +33,10 @@ corepack enable pnpm install ``` -The install step runs `pnpm get-snippets` after dependencies are installed. By -default, snippet tooling expects the Iroha source checkout at `../iroha`. If your -checkout is elsewhere, set `IROHA_SOURCE_DIR`: +The install step runs `pnpm get-snippets` after dependencies are installed. +Snippet tooling reads from a local checkout of the +[`hyperledger-iroha/iroha` `i23-features` branch](https://github.com/hyperledger-iroha/iroha/tree/i23-features). +Set `IROHA_SOURCE_DIR` when you need to point it at a specific checkout: ```bash IROHA_SOURCE_DIR=/path/to/iroha pnpm get-snippets diff --git a/README.md b/README.md index 0fb67927a..4897136c9 100644 --- a/README.md +++ b/README.md @@ -3,11 +3,8 @@ This repository contains the VitePress source for the public Hyperledger Iroha documentation site, updated for the Iroha 3 / SORA Nexus track. -The implementation source of truth lives in the main -[hyperledger-iroha/iroha](https://github.com/hyperledger-iroha/iroha/) -repository. In this workspace the sibling `../iroha` checkout is used as the -authoritative reference for binaries, configs, CLI help, genesis layout, and -SDK surfaces. +The implementation source of truth lives in the +[`hyperledger-iroha/iroha` `i23-features` branch](https://github.com/hyperledger-iroha/iroha/tree/i23-features). The site focuses on: @@ -91,9 +88,9 @@ VITE_COMPAT_MATRIX_URL=https://example.com/compat-matrix ``` The compatibility matrix uses the bundled -`src/public/compat-matrix.json` snapshot by default. That snapshot is generated -from the local sibling `../iroha` checkout and should be refreshed when SDK -coverage changes there. +`src/public/compat-matrix.json` snapshot by default. That snapshot tracks the +[`hyperledger-iroha/iroha` `i23-features` branch](https://github.com/hyperledger-iroha/iroha/tree/i23-features) +and should be refreshed when SDK coverage changes there. Set `VITE_COMPAT_MATRIX_URL` only when you want to override the bundled snapshot with a live endpoint. The endpoint must return the compatibility diff --git a/etc/meta.ts b/etc/meta.ts index 98e3345ec..90ae7e962 100644 --- a/etc/meta.ts +++ b/etc/meta.ts @@ -1,6 +1,6 @@ import path from 'path' -export const IROHA_REV = process.env.IROHA_REV ?? '11d3d92d74d278583467461e52f3d390ec18ba64' +export const IROHA_REV = process.env.IROHA_REV ?? 'i23-features' export const IROHA_RAW_BASE = process.env.IROHA_RAW_BASE ?? `https://raw.githubusercontent.com/hyperledger-iroha/iroha/${IROHA_REV}` -export const IROHA_SOURCE_DIR = path.resolve(process.env.IROHA_SOURCE_DIR ?? path.resolve(__dirname, '../../iroha')) +export const IROHA_SOURCE_DIR = process.env.IROHA_SOURCE_DIR ? path.resolve(process.env.IROHA_SOURCE_DIR) : undefined diff --git a/etc/snippet-sources.ts b/etc/snippet-sources.ts index 41d76f9cd..9912e4fde 100644 --- a/etc/snippet-sources.ts +++ b/etc/snippet-sources.ts @@ -11,6 +11,10 @@ function irohaRawSource(...segments: string[]): string { } function generateDataModelSchema(): string { + if (!IROHA_SOURCE_DIR) { + throw new Error('IROHA_SOURCE_DIR is not configured.') + } + const command = spawnSync('cargo', ['run', '-p', 'iroha_kagami', '--', 'advanced', 'schema'], { cwd: IROHA_SOURCE_DIR, encoding: 'utf8', @@ -18,7 +22,11 @@ function generateDataModelSchema(): string { if (command.status !== 0 || command.error) { throw new Error( - [`Failed to generate data-model schema from ${IROHA_SOURCE_DIR}.`, command.error?.message, command.stderr] + [ + 'Failed to generate data-model schema from the configured Iroha source checkout.', + command.error?.message, + command.stderr, + ] .filter(Boolean) .join('\n'), ) @@ -38,9 +46,9 @@ function renderCurrentDataModelSchema(source: string): string { return [ '> [!WARNING]', '> The Iroha data-model schema snapshot is currently unavailable.', - `> \`${irohaRawSource(IROHA_SCHEMA_PATH)}\` is empty, and \`kagami advanced schema\` failed against \`${IROHA_SOURCE_DIR}\`.`, + `> \`${irohaRawSource(IROHA_SCHEMA_PATH)}\` is empty, and \`kagami advanced schema\` could not generate a replacement from the configured Iroha source checkout.`, '>', - '> Refresh this page with `pnpm get-snippets` after the upstream schema generator succeeds.', + '> Refresh this page with `pnpm get-snippets` after the upstream schema generator succeeds. Set `IROHA_SOURCE_DIR` if you need to generate the schema from a local checkout.', '', '```text', detail, diff --git a/package.json b/package.json index 9921b9a7d..4a8bc27a4 100644 --- a/package.json +++ b/package.json @@ -65,6 +65,7 @@ "@vueuse/core": "^12.8.2", "@vueuse/math": "^12.8.2", "mande": "^2.0.9", + "rapidoc": "^9.3.8", "tiny-invariant": "^1.3.3", "vue": "^3.5.22" } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index afe2dded0..1f7299a80 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -23,6 +23,9 @@ importers: mande: specifier: ^2.0.9 version: 2.0.9 + rapidoc: + specifier: ^9.3.8 + version: 9.3.8 tiny-invariant: specifier: ^1.3.3 version: 1.3.3 @@ -140,7 +143,7 @@ importers: version: 5.1.0(vue@3.5.22(typescript@5.6.3)) vitepress: specifier: 1.6.4 - version: 1.6.4(@algolia/client-search@5.41.0)(@types/node@20.6.0)(postcss@8.5.6)(sass@1.93.2)(search-insights@2.13.0)(typescript@5.6.3) + version: 1.6.4(@algolia/client-search@5.41.0)(@types/node@20.6.0)(axios@1.15.2)(postcss@8.5.6)(sass@1.93.2)(search-insights@2.13.0)(typescript@5.6.3) vitest: specifier: ^2.1.4 version: 2.1.4(@types/node@20.6.0)(sass@1.93.2) @@ -236,6 +239,9 @@ packages: '@antfu/utils@0.7.7': resolution: {integrity: sha512-gFPqTG7otEJ8uP6wrhDv6mqwGWYZKNvAcCq6u9hOj0c+IKCEsY4L1oC9trPq2SaWIzAfHvqfBDxF591JkMf+kg==} + '@apitools/openapi-parser@0.0.33': + resolution: {integrity: sha512-on8oZKkRPrPUvJmmQGpLtlcthNrREH5OjDUK2ZczKuFPOx8Tkn9mzyPc7DTQ7O0JQolaZIwymFmBaajglI6LHA==} + '@babel/code-frame@7.23.5': resolution: {integrity: sha512-CgH3s1a96LipHCmSUmYFPwY7MNx8C3avkq7i4Wl3cfa662ldtUe4VM1TPXX70pfmrlWTb6jLqTYrZyT2ZTJBgA==} engines: {node: '>=6.9.0'} @@ -373,6 +379,10 @@ packages: peerDependencies: '@babel/core': ^7.0.0-0 + '@babel/runtime-corejs3@7.29.2': + resolution: {integrity: sha512-Lc94FOD5+0aXhdb0Tdg3RUtqT6yWbI/BbFWvlaSJ3gAb9Ks+99nHRDKADVqC37er4eCB0fHyWT+y+K3QOvJKbw==} + engines: {node: '>=6.9.0'} + '@babel/template@7.23.9': resolution: {integrity: sha512-+xrD2BWLpvHKNmX2QbpdpsBaWnRxahMwJjO+KZk2JOElj5nSmKezyS1B4u+QbHMTX69t4ukm6hh9lsYQ7GHCKA==} engines: {node: '>=6.9.0'} @@ -786,6 +796,12 @@ packages: '@jridgewell/trace-mapping@0.3.22': resolution: {integrity: sha512-Wf963MzWtA2sjrNt+g18IAln9lKnlRp+K2eH4jjIoF1wYeq3aMREpG09xhlhdzS0EjwU7qmUJYangWa+151vZw==} + '@lit-labs/ssr-dom-shim@1.5.1': + resolution: {integrity: sha512-Aou5UdlSpr5whQe8AA/bZG0jMj96CoJIWbGfZ91qieWu5AWUMKw8VR/pAkQkJYvBNhmCcWnZlyyk5oze8JIqYA==} + + '@lit/reactive-element@2.1.2': + resolution: {integrity: sha512-pbCDiVMnne1lYUIaYNN5wrwQXDtHaYtg7YEFPeW+hws6U47WeFvISGUWekPGKWOP1ygrs0ef0o1VJMk1exos5A==} + '@mdit-vue/shared@0.12.1': resolution: {integrity: sha512-bXgd0KThe4jC2leCFDFsyrudXIckvTwV4WnQK/rRMrXq0/BAuVdSNdIv1LGCWZxD5+oDyPyEPd0lalTIFwqsmg==} @@ -1045,6 +1061,9 @@ packages: '@scale-codec/util@1.1.2': resolution: {integrity: sha512-Aali9gWoI1vOUUwk2H1FktstiInl6x5rjrPd3Am/b1WJV603NEI9QcJ7UGM7Eh42UjLD9e6H7E1ZF0yFnlRUUQ==} + '@scarf/scarf@1.4.0': + resolution: {integrity: sha512-xxeapPiUXdZAE3che6f3xogoJPeZgig6omHEy1rIY5WVsB3H2BHNnZH+gHG6x91SCWyQCzWGsuL2Hh3ClO5/qQ==} + '@shikijs/core@2.5.0': resolution: {integrity: sha512-uu/8RExTKtavlpH7XqnVYBrfBkUc20ngXiX9NSrBhOVZYv/7XQRKUyhtkeflY5QsxC0GbJThCerruZfsUaSldg==} @@ -1072,6 +1091,122 @@ packages: '@sinclair/typebox@0.27.8': resolution: {integrity: sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==} + '@swagger-api/apidom-ast@1.11.0': + resolution: {integrity: sha512-poLd6eNipLCFCrxjZD+E9E0Z85CLfFzueNiVcYj86rwMp2OszYsTzZS2jz82yR/usNCjXCpkQ2xEXWSmDhefPg==} + + '@swagger-api/apidom-core@1.11.0': + resolution: {integrity: sha512-7TvbbC3dG3yM8cjqyrFXoTOpwgOC68+Z17Ro36drJwZ0k/c7QQc0dI/KvTSPHn9UfimEMdZ0q+yIIzqrAiEmww==} + + '@swagger-api/apidom-error@1.11.0': + resolution: {integrity: sha512-JPt37oOrf73CAZNQBPffnLzU5iEUs8cT9pFmc9vy2gHQp+vjSKxeJ9F6zagTp8VnLPUq0gVjIvCQvcX8NPW2jA==} + + '@swagger-api/apidom-json-pointer@1.11.0': + resolution: {integrity: sha512-11JWHr55FciYGTbcicNZrBsFEwNuLLZybi00YHJ3OBcuXcFJPKmKluLnVL7GhZYEqvLYOcVsCfInYW5MXoj00w==} + + '@swagger-api/apidom-ns-api-design-systems@1.11.0': + resolution: {integrity: sha512-IskDsUkUtNas4guoChRKKkw0wOst64nRA24WuIjLf8ztfBdcl/oqx/cgy8pwWCUqNYvL9L3+sD5HeuokqMrySw==} + + '@swagger-api/apidom-ns-arazzo-1@1.11.0': + resolution: {integrity: sha512-n+aGSlLHyrpmCaBa9DBZkIqnNVzYAYSa010MvAwhlwtW3EbFYNwYWinbTwLqCd3leN6XWTvQYCvk0/k7/9Cq4A==} + + '@swagger-api/apidom-ns-asyncapi-2@1.11.0': + resolution: {integrity: sha512-SHh3naFZlXFI0gG36tNYvJ/VO8aZsjnXIQAqJHfOE6rrpl5msJrdDatmNczh+57WPZxEZA+KTXWCqNKdeu3G3Q==} + + '@swagger-api/apidom-ns-asyncapi-3@1.11.0': + resolution: {integrity: sha512-4vrgNYDj68hgmgZj1eGBaBr5xqIETWn4jAioiRHek4jV1FLvmxCs3nC2nYs8CzQqqJ1bqirdiirrUpqhaQvTEA==} + + '@swagger-api/apidom-ns-json-schema-2019-09@1.11.0': + resolution: {integrity: sha512-5avPMY1YbQmJIqXlu7rm3yftf4xhT2REBxpEgw8Nc7Zlbn4Z5iGXBsHr60982MwqeE6W8wA+HHQMKHM5siuhdQ==} + + '@swagger-api/apidom-ns-json-schema-2020-12@1.11.0': + resolution: {integrity: sha512-zddyOxWKlQ9WPaZR0e8ykmy8AbGnDvqCqqy6BdYqKZ9Ts8ZK1XwOB2j9ruccZpoiy/rp2tow+CUf3XE5rricmQ==} + + '@swagger-api/apidom-ns-json-schema-draft-4@1.11.0': + resolution: {integrity: sha512-upc0xKb3nxsYPECRDf5UygnZHTSj78xHj5+SBIHNDXoaGDhvMCtWoDVGAKFtZ+jZlIkyJt7cGAeOX0w9IV3XkA==} + + '@swagger-api/apidom-ns-json-schema-draft-6@1.11.0': + resolution: {integrity: sha512-sd/U6Y34uRqdgd3Phz1oEhO7UBCn60+OfIasFFpHZcKe7O0jTmayiaJqbpwirhwt7Fv5Ev5m58+y1nVomLnhQw==} + + '@swagger-api/apidom-ns-json-schema-draft-7@1.11.0': + resolution: {integrity: sha512-7ptuxmuh2vN1hDr3cLkYm2rl+ak2J1byoGxswucKfSb+7IaFoA36/t7kcOsE/hIO4yI7T3ZPOuNSpeg1NBVjEw==} + + '@swagger-api/apidom-ns-openapi-2@1.11.0': + resolution: {integrity: sha512-cAIPJhLxm/nj1kzneNySeaTahY+hH5gkGNsgbmifGnLPsC5YOOfEVMKLj18IREdXqdnxJgRbsI9Azl4g09TPkg==} + + '@swagger-api/apidom-ns-openapi-3-0@1.11.0': + resolution: {integrity: sha512-IUEWVSuETE5DdgTJhIt6oZyRTYUV892/I9UdyTResR0Bypc7gy3YXwlzMlUZx73S2klaiFo1dL4iu/fqzA2fEg==} + + '@swagger-api/apidom-ns-openapi-3-1@1.11.0': + resolution: {integrity: sha512-WpUvFgOs4YMUmyeJRQEADps+U5o71YTtzKMPNr1cF1ZHKKkcRMUJL9QlJ4Y9cxAdjo6oXzXZa5922XOpwMYhxA==} + + '@swagger-api/apidom-ns-openapi-3-2@1.11.0': + resolution: {integrity: sha512-mUErHIq8rHVoOrkHnRj3mhoNYVIl8th474/m0+E8OB2wBAe0KgiczaJX9KkBQoAo5XIxoRfmI5T3bp+fRabwCA==} + + '@swagger-api/apidom-parser-adapter-api-design-systems-json@1.11.0': + resolution: {integrity: sha512-0OdwcnV/QF+Vs3Vj0dTmlRHEp9WQg9aBvWWl8Fq25OviyDhGGRpqgkEAOjtVYCH3XyZ1Xz+jhIDOdd5pxBajsA==} + + '@swagger-api/apidom-parser-adapter-api-design-systems-yaml@1.11.0': + resolution: {integrity: sha512-K714DT6nFW+ZM9LTo+c120zkUjsEcIFO2DU+0cnzReRyenb1x6RZe+uOqTt7iWohnnWp2FV/j0exd/mCsxW65Q==} + + '@swagger-api/apidom-parser-adapter-arazzo-json-1@1.11.0': + resolution: {integrity: sha512-z9K6XEr3AafV2EA+1pfW+8VoMCCSSpm2IU7oUTjSnhxRb5t/DZR4Qg8FEK8tRKdS2BO2kFFLb2xikrY3Qx8B+g==} + + '@swagger-api/apidom-parser-adapter-arazzo-yaml-1@1.11.0': + resolution: {integrity: sha512-HPb7Wzr+cj0IJkRRlqsK1tNCQXivuGRP4iB2yek16sQZXo2eqSUZ3j3Lz/WwWgnN/FWGAODm4bj9+EhGQ11TnA==} + + '@swagger-api/apidom-parser-adapter-asyncapi-json-2@1.11.0': + resolution: {integrity: sha512-sQenLXZRmTDQehe3JCSQpz6jpE3DhMQ0aoe2gpNqo23Gt/4oeW6nAP2h49q9Ne+CHPp0ApFUUyIXF7UTmbUWqA==} + + '@swagger-api/apidom-parser-adapter-asyncapi-json-3@1.11.0': + resolution: {integrity: sha512-aGnG3AYp4Qsimn1FOP0B9leYCJAQVockzHqyJj30xiNAXquBMXr6lq3L2/AEsmpDGv/x/++YJ4p2ggSxy12QNw==} + + '@swagger-api/apidom-parser-adapter-asyncapi-yaml-2@1.11.0': + resolution: {integrity: sha512-iIRlB8B46UPiu0EkKhq1TvwloBgObASJ5ROx8rhT5+Pj+BBegE+KIY02EUKwcz5FgXJrH3XcltLiI7ZA68347Q==} + + '@swagger-api/apidom-parser-adapter-asyncapi-yaml-3@1.11.0': + resolution: {integrity: sha512-BF2ZyQYMUNrjP1nMneX6ZD2IWBLycWpxg3yllXDCJtfdQT/IMzldIPKCNI9qoBE57lM6j2hpy+Jd86QJk20t2w==} + + '@swagger-api/apidom-parser-adapter-json@1.11.0': + resolution: {integrity: sha512-DObW0LxYwif0erzGoXiEAZ6ecc/18LIEKxjEAc5Bw2M5I0C/iGW4y/UxAywihGvhMEo1gOvdO6w9Jh6UnuPVmA==} + + '@swagger-api/apidom-parser-adapter-openapi-json-2@1.11.0': + resolution: {integrity: sha512-dREUHAEHVry9aSGjqDpYF9Wzm1lgUkV6EgoYDflyQ9HxgCwhucDPFmUgI7UaR0G6bplnJumMcZXh1I1TGn1v7Q==} + + '@swagger-api/apidom-parser-adapter-openapi-json-3-0@1.11.0': + resolution: {integrity: sha512-U/NZpvuj9IpUS48zF2tYbgW2AtTw6Yi6kXNiHUtgUEomxYdb6XQeKLDGvgeWjgAgfUROohakcH+wx713VCGxfQ==} + + '@swagger-api/apidom-parser-adapter-openapi-json-3-1@1.11.0': + resolution: {integrity: sha512-fYarNeaz39oKZ6VwqwON+IeJszidZGPvUYDfggLaar81NGimrz07y1U+DhAf96IX3qgUa2J6Fu3Bv1r57hs6Ng==} + + '@swagger-api/apidom-parser-adapter-openapi-json-3-2@1.11.0': + resolution: {integrity: sha512-jtMoAH3R73bQUc4D2cJTUUvO4iJz9CV1W4+zoU/gT2l6h8Ji5EhZH0/VyynUk4J6mW/GdwxUN/q5z2P/DtSmfA==} + + '@swagger-api/apidom-parser-adapter-openapi-yaml-2@1.11.0': + resolution: {integrity: sha512-e8L4kHahgkOIzCCSGs5jTahXLInERNr37teSLS4SuqYgSVWr9AVXuNvpHNYGeMECD8briGIGfAAtnZChCGYrEA==} + + '@swagger-api/apidom-parser-adapter-openapi-yaml-3-0@1.11.0': + resolution: {integrity: sha512-s+AXnNzLeAk28jUAeXwTSR1AlX+TXIAt2GfFgWUAV+SFw2OhRpoKYLzItN3n2UsHselqHvfyUL9xNCJBZleQtQ==} + + '@swagger-api/apidom-parser-adapter-openapi-yaml-3-1@1.11.0': + resolution: {integrity: sha512-xyUyehHhB+BSOAT7mYGqmcEozuLKxmx1Hug97O9SVgNU8QTClc95+VWrAHhJbn8juPR6y2vSwm/wrQDwb4yq7w==} + + '@swagger-api/apidom-parser-adapter-openapi-yaml-3-2@1.11.0': + resolution: {integrity: sha512-u7Y98zdjEs+0Upa8TdxOsb7z8hYJmLz9lVleRiB7rqysVga6oSDI5NAFdLVqMB6uAUuFi/tyiuiFT4Qosfd6Vw==} + + '@swagger-api/apidom-parser-adapter-yaml-1-2@1.11.0': + resolution: {integrity: sha512-FZK9KfwiTnNc+imxg7Wu2ktKhXCYPeFQZ1uZJzJL/hk1n+zyPfRY/4Aue4HzDcG8+wbItd3dRjKClFanVZAXoA==} + + '@swagger-api/apidom-reference@1.11.0': + resolution: {integrity: sha512-ftqegYrxxl9UwQFbdVOtXIqNolVd25M5u53X8fP96Wx6lEVr5Ed7B6+dzch8ttCUmKeoLIeagvt76b6BoYtnLw==} + + '@swaggerexpert/cookie@2.0.2': + resolution: {integrity: sha512-DPI8YJ0Vznk4CT+ekn3rcFNq1uQwvUHZhH6WvTSPD0YKBIlMS9ur2RYKghXuxxOiqOam/i4lHJH4xTIiTgs3Mg==} + engines: {node: '>=12.20.0'} + + '@swaggerexpert/json-pointer@2.10.2': + resolution: {integrity: sha512-qMx1nOrzoB+PF+pzb26Q4Tc2sOlrx9Ba2UBNX9hB31Omrq+QoZ2Gly0KLrQWw4Of1AQ4J9lnD+XOdwOdcdXqqw==} + engines: {node: '>=12.20.0'} + '@tanstack/virtual-core@3.13.12': resolution: {integrity: sha512-1YBOJfRHV4sXUmWsFSf5rQor4Ss82G8dQWLRbnk3GA4jeP8hQt1hxXh0tmflpC0dz3VgEv/1+qwPyLeWkQuPFA==} @@ -1080,6 +1215,14 @@ packages: peerDependencies: vue: ^2.7.0 || ^3.0.0 + '@tree-sitter-grammars/tree-sitter-yaml@0.7.1': + resolution: {integrity: sha512-AynBwkIoQCTgjDR33bDUp9Mqq+YTco0is3n5hRApMqG9of/6A4eQsfC1/uSEeHSUyMQSYawcAWamsexnVpIP4Q==} + peerDependencies: + tree-sitter: ^0.22.4 + peerDependenciesMeta: + tree-sitter: + optional: true + '@trysound/sax@0.2.0': resolution: {integrity: sha512-L7z9BgrNEcYyUYtF+HaEfiS5ebkh9jXqbszz7pC0hRBPaatV0XjSD3+eHrpqFemQfgwiFF0QPIarnIihIDn7OA==} engines: {node: '>=10.13.0'} @@ -1138,6 +1281,12 @@ packages: '@types/node@20.6.0': resolution: {integrity: sha512-najjVq5KN2vsH2U/xyh2opaSEz6cZMR2SetLIlxlj08nOcmPOemJmUK2o4kUzfLqfrWE0PIrNeE16XhYDd3nqg==} + '@types/ramda@0.30.2': + resolution: {integrity: sha512-PyzHvjCalm2BRYjAU6nIB3TprYwMNOUY/7P/N8bSzp9W/yM2YrtGtAnnVtaCNSeOZ8DzKyFDvaqQs7LnWwwmBA==} + + '@types/trusted-types@2.0.7': + resolution: {integrity: sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==} + '@types/unist@2.0.8': resolution: {integrity: sha512-d0XxK3YTObnWVp6rZuev3c49+j4Lo8g4L1ZRm9z5L0xpoZycUPshHgczK5gsUMaZOstjVYYi09p5gYvUtfChYw==} @@ -1547,6 +1696,9 @@ packages: resolution: {integrity: sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==} engines: {node: '>= 8'} + apg-lite@1.0.5: + resolution: {integrity: sha512-SlI+nLMQDzCZfS39ihzjGp3JNBQfJXyMi6cg9tkLOCPVErgFsUIAEdO9IezR7kbP5Xd0ozcPNQBkf9TO5cHgWw==} + argparse@2.0.1: resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} @@ -1562,9 +1714,26 @@ packages: resolution: {integrity: sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==} engines: {node: '>=12'} + asynckit@0.4.0: + resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==} + + axios@1.15.2: + resolution: {integrity: sha512-wLrXxPtcrPTsNlJmKjkPnNPK2Ihe0hn0wGSaTEiHRPxwjvJwT3hKmXF4dpqxmPO9SoNb2FsYXj/xEo0gHN+D5A==} + balanced-match@1.0.2: resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} + balanced-match@4.0.4: + resolution: {integrity: sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA==} + engines: {node: 18 || 20 || >=22} + + base64-arraybuffer@1.0.2: + resolution: {integrity: sha512-I3yl4r9QB5ZRY3XuJVEPfc2XhZO6YweFPI+UovAzn+8/hb3oJ6lnysaFcjVpkCPfVWFUDvoZ8kmVDP7WyRtYtQ==} + engines: {node: '>= 0.6.0'} + + base64-js@1.5.1: + resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==} + binary-extensions@2.2.0: resolution: {integrity: sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==} engines: {node: '>=8'} @@ -1584,6 +1753,10 @@ packages: brace-expansion@2.0.2: resolution: {integrity: sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==} + brace-expansion@5.0.5: + resolution: {integrity: sha512-VZznLgtwhn+Mact9tfiwx64fA9erHH/MCXEUfB/0bX/6Fz6ny5EGTXYltMocqg4xFAQZtnO3DHWWXi8RiuN7cQ==} + engines: {node: 18 || 20 || >=22} + braces@3.0.3: resolution: {integrity: sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==} engines: {node: '>=8'} @@ -1593,10 +1766,17 @@ packages: engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7} hasBin: true + buffer@6.0.3: + resolution: {integrity: sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==} + cac@6.7.14: resolution: {integrity: sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==} engines: {node: '>=8'} + call-bind-apply-helpers@1.0.2: + resolution: {integrity: sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==} + engines: {node: '>= 0.4'} + callsites@3.1.0: resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==} engines: {node: '>=6'} @@ -1680,6 +1860,10 @@ packages: colorette@2.0.20: resolution: {integrity: sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==} + combined-stream@1.0.8: + resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} + engines: {node: '>= 0.8'} + comma-separated-tokens@2.0.3: resolution: {integrity: sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==} @@ -1709,6 +1893,9 @@ packages: resolution: {integrity: sha512-7Vv6asjS4gMOuILabD3l739tsaxFQmC+a7pLZm02zyvs8p977bL3zEgq3yDk5rn9B0PbYgIv++jmHcuUab4RhA==} engines: {node: '>=18'} + core-js-pure@3.49.0: + resolution: {integrity: sha512-XM4RFka59xATyJv/cS3O3Kml72hQXUeGRuuTmMYFxwzc9/7C8OYTaIR/Ji+Yt8DXzsFLNhat15cE/JP15HrCgw==} + core-js@3.35.1: resolution: {integrity: sha512-IgdsbxNyMskrTFxa9lWHyMwAJU5gXOPP+1yO+K59d50VLVAIDAbs7gIv705KzALModfK3ZrSZTPNpC0PQgIZuw==} @@ -1940,6 +2127,10 @@ packages: deep-is@0.1.4: resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==} + deepmerge@4.3.1: + resolution: {integrity: sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==} + engines: {node: '>=0.10.0'} + defu@6.1.4: resolution: {integrity: sha512-mEQCMmwJu317oSz8CwdIOdwf3xMif1ttiM8LTufzc3g6kR+9Pe236twL8j3IYT1F7GfRgGcW6MWxzZjLIkuHIg==} @@ -1950,6 +2141,10 @@ packages: delaunator@5.0.0: resolution: {integrity: sha512-AyLvtyJdbv/U1GkiS6gUUzclRoAY4Gs75qkMygJJhU75LW4DNuSF2RMzpxs9jw9Oz1BobHjTdkG3zdP55VxAqw==} + delayed-stream@1.0.0: + resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} + engines: {node: '>=0.4.0'} + dequal@2.0.3: resolution: {integrity: sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==} engines: {node: '>=6'} @@ -1999,6 +2194,14 @@ packages: domutils@3.1.0: resolution: {integrity: sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA==} + drange@1.1.1: + resolution: {integrity: sha512-pYxfDYpued//QpnLIm4Avk7rsNtAtQkUES2cwAYSvD/wd2pKD71gN2Ebj3e7klzXwjocvE8c5vx/1fxwpqmSxA==} + engines: {node: '>=4'} + + dunder-proto@1.0.1: + resolution: {integrity: sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==} + engines: {node: '>= 0.4'} + duplexer@0.1.2: resolution: {integrity: sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==} @@ -2028,6 +2231,22 @@ packages: resolution: {integrity: sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==} engines: {node: '>=0.12'} + es-define-property@1.0.1: + resolution: {integrity: sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==} + engines: {node: '>= 0.4'} + + es-errors@1.3.0: + resolution: {integrity: sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==} + engines: {node: '>= 0.4'} + + es-object-atoms@1.1.1: + resolution: {integrity: sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==} + engines: {node: '>= 0.4'} + + es-set-tostringtag@2.1.0: + resolution: {integrity: sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==} + engines: {node: '>= 0.4'} + esbuild@0.21.5: resolution: {integrity: sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==} engines: {node: '>=12'} @@ -2130,6 +2349,9 @@ packages: resolution: {integrity: sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==} engines: {node: '>=8.6.0'} + fast-json-patch@3.1.1: + resolution: {integrity: sha512-vf6IHUX2SBcA+5/+4883dsIjpBTqmfBjmYiWK1savxQmFk4JfBMLa7ynTYOs1Rolp/T1betJxHiGD3g1Mn8lUQ==} + fast-json-stable-stringify@2.1.0: resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==} @@ -2165,10 +2387,23 @@ packages: focus-trap@7.6.6: resolution: {integrity: sha512-v/Z8bvMCajtx4mEXmOo7QEsIzlIOqRXTIwgUfsFOF9gEsespdbD0AkPIka1bSXZ8Y8oZ+2IVDQZePkTfEHZl7Q==} + follow-redirects@1.16.0: + resolution: {integrity: sha512-y5rN/uOsadFT/JfYwhxRS5R7Qce+g3zG97+JrtFZlC9klX/W5hD7iiLzScI4nZqUS7DNUdhPgw4xI8W2LuXlUw==} + engines: {node: '>=4.0'} + peerDependencies: + debug: '*' + peerDependenciesMeta: + debug: + optional: true + foreground-child@3.1.1: resolution: {integrity: sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg==} engines: {node: '>=14'} + form-data@4.0.5: + resolution: {integrity: sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==} + engines: {node: '>= 6'} + formdata-polyfill@4.0.10: resolution: {integrity: sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==} engines: {node: '>=12.20.0'} @@ -2181,6 +2416,9 @@ packages: engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} os: [darwin] + function-bind@1.1.2: + resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} + gensync@1.0.0-beta.2: resolution: {integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==} engines: {node: '>=6.9.0'} @@ -2189,6 +2427,14 @@ packages: resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} engines: {node: 6.* || 8.* || >= 10.*} + get-intrinsic@1.3.0: + resolution: {integrity: sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==} + engines: {node: '>= 0.4'} + + get-proto@1.0.1: + resolution: {integrity: sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==} + engines: {node: '>= 0.4'} + get-stdin@8.0.0: resolution: {integrity: sha512-sY22aA6xchAzprjyqmSEQv4UbAAzRN0L2dQB0NlN5acTTK9Don6nhoc3eAbUnpZiCANAMfd/+40kVdKfFygohg==} engines: {node: '>=10'} @@ -2232,6 +2478,10 @@ packages: resolution: {integrity: sha512-Y1zNGV+pzQdh7H39l9zgB4PJqjRNqydvdYCDG4HFXM4XuvSaQQlEc91IU1yALL8gUTDomgBAfz3XJdmUS+oo0w==} engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + gopd@1.2.0: + resolution: {integrity: sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==} + engines: {node: '>= 0.4'} + graceful-fs@4.2.10: resolution: {integrity: sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==} @@ -2254,10 +2504,22 @@ packages: resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} engines: {node: '>=8'} + has-symbols@1.1.0: + resolution: {integrity: sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==} + engines: {node: '>= 0.4'} + + has-tostringtag@1.0.2: + resolution: {integrity: sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==} + engines: {node: '>= 0.4'} + hasha@5.2.2: resolution: {integrity: sha512-Hrp5vIK/xr5SkeN2onO32H0MgNZ0f17HRNH39WfL0SYUNOTZ5Lz1TJ8Pajo/87dYGEFlLMm7mIc/k/s6Bvz9HQ==} engines: {node: '>=8'} + hasown@2.0.3: + resolution: {integrity: sha512-ej4AhfhfL2Q2zpMmLo7U1Uv9+PyhIZpgQLGT1F9miIGmiCJIoCgSmczFdrc97mWT4kVY72KA+WnnhJ5pghSvSg==} + engines: {node: '>= 0.4'} + hast-util-to-html@9.0.5: resolution: {integrity: sha512-OguPdidb+fbHQSU4Q4ZiLKnzWo8Wwsf5bZfbvu7//a9oTYoqD/fWpe96NuHkoS9h0ccGOTe0C4NGXdtS0iObOw==} @@ -2284,6 +2546,9 @@ packages: resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==} engines: {node: '>=0.10.0'} + ieee754@1.2.1: + resolution: {integrity: sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==} + ignore@5.2.4: resolution: {integrity: sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==} engines: {node: '>= 4'} @@ -2449,6 +2714,15 @@ packages: linkify-it@4.0.1: resolution: {integrity: sha512-C7bfi1UZmoj8+PQx22XyeXCuBlokoyWQL5pWSP+EI6nzRylyThouddufc2c1NDIcP9k5agmN9fLpA7VNJfIiqw==} + lit-element@4.2.2: + resolution: {integrity: sha512-aFKhNToWxoyhkNDmWZwEva2SlQia+jfG0fjIWV//YeTaWrVnOxD89dPKfigCUspXFmjzOEUQpOkejH5Ly6sG0w==} + + lit-html@3.3.2: + resolution: {integrity: sha512-Qy9hU88zcmaxBXcc10ZpdK7cOLXvXpRoBxERdtqV9QOrfpMZZ6pSYP91LhpPtap3sFMUiL7Tw2RImbe0Al2/kw==} + + lit@3.3.2: + resolution: {integrity: sha512-NF9zbsP79l4ao2SNrH3NkfmFgN/hBYSQo90saIVI1o5GpjAdCPVstVzO1MrLOakHoEhYkrtRjPK6Ob521aoYWQ==} + local-pkg@0.5.0: resolution: {integrity: sha512-ok6z3qlYyCDS4ZEU27HaU6x/xZa9Whf8jD4ptH5UZTQYZVYeb9bnZ3ojVhiJNLiXK1Hfc0GNbLXcmZ5plLDDBg==} engines: {node: '>=14'} @@ -2520,6 +2794,15 @@ packages: resolution: {integrity: sha512-lTlxriVoy2criHP0JKRhO2VDG9c2ypWCsT237eDiLqi09rmbKoUetyGHq2uOIRoRS//kfoJckS0eUzzkDR+k2Q==} hasBin: true + marked@4.3.0: + resolution: {integrity: sha512-PRsaiG84bK+AMvxziE/lCFss8juXjNaWzVbN5tXAm4XjeaS9NAHhop+PjQxz2A9h8Q4M/xGmzP8vqNwy6JeK0A==} + engines: {node: '>= 12'} + hasBin: true + + math-intrinsics@1.1.0: + resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==} + engines: {node: '>= 0.4'} + mdast-util-from-markdown@1.3.1: resolution: {integrity: sha512-4xTO/M8c82qBcnQc1tgpNtubGUW/Y1tBQ1B0i5CtSoelOLKFYlElIr3bvgREYYO5iRqbMY1YuqZng0GVOI8Qww==} @@ -2630,10 +2913,26 @@ packages: resolution: {integrity: sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==} engines: {node: '>=8.6'} + mime-db@1.52.0: + resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==} + engines: {node: '>= 0.6'} + + mime-types@2.1.35: + resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==} + engines: {node: '>= 0.6'} + mimic-fn@2.1.0: resolution: {integrity: sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==} engines: {node: '>=6'} + minim@0.23.8: + resolution: {integrity: sha512-bjdr2xW1dBCMsMGGsUeqM4eFI60m94+szhxWys+B1ztIt6gWSfeGBdSVCIawezeHYLYn0j6zrsXdQS/JllBzww==} + engines: {node: '>=6'} + + minimatch@10.2.5: + resolution: {integrity: sha512-MULkVLfKGYDFYejP07QOurDLLQpcjk7Fw+7jXS2R2czRQzR56yHRveU5NDJEOviH+hETZKSkIk5c+T23GjFUMg==} + engines: {node: 18 || 20 || >=22} + minimatch@3.1.2: resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} @@ -2680,14 +2979,29 @@ packages: natural-compare@1.4.0: resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} + neotraverse@0.6.18: + resolution: {integrity: sha512-Z4SmBUweYa09+o6pG+eASabEpP6QkQ70yHj351pQoEXIs8uHbaU2DWVmzBANKgflPa47A50PtB2+NgRpQvr7vA==} + engines: {node: '>= 10'} + + node-abort-controller@3.1.1: + resolution: {integrity: sha512-AGK2yQKIjRuqnc6VkX2Xj5d+QW8xZ87pa1UK6yA6ouUyuxfHuMP6umE5QK7UmTeOAymo+Zx1Fxiuw9rVx8taHQ==} + node-addon-api@7.1.1: resolution: {integrity: sha512-5m3bsyrjFWE1xf7nz7YXdN4udnVtXK6/Yfgn5qnahL6bCkf2yKt4k3nuTKAtT4r3IG8JNR2ncsIMdZuAzJjHQQ==} + node-addon-api@8.7.0: + resolution: {integrity: sha512-9MdFxmkKaOYVTV+XVRG8ArDwwQ77XIgIPyKASB1k3JPq3M8fGQQQE3YpMOrKm6g//Ktx8ivZr8xo1Qmtqub+GA==} + engines: {node: ^18 || ^20 || >= 21} + node-domexception@1.0.0: resolution: {integrity: sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==} engines: {node: '>=10.5.0'} deprecated: Use your platform's native DOMException instead + node-fetch-commonjs@3.3.2: + resolution: {integrity: sha512-VBlAiynj3VMLrotgwOS3OyECFxas5y7ltLcK4t41lMUZeaK15Ym4QRkqN0EQKAFL42q9i21EPKjzLUPfltR72A==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + node-fetch-native@1.6.2: resolution: {integrity: sha512-69mtXOFZ6hSkYiXAVB5SqaRvrbITC/NPyqv7yuu/qw0nmgPyYbIMYYNIDhNtwPrzk0ptrimrLz/hhjvm4w5Z+w==} @@ -2695,6 +3009,10 @@ packages: resolution: {integrity: sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA==} engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + node-gyp-build@4.8.4: + resolution: {integrity: sha512-LA4ZjwlnUblHVgq0oBF3Jl/6h/Nvs5fzBLwdEF4nuxnFdsfajde4WfxtJr3CaiH+F6ewcIB/q4jQ4UzPyid+CQ==} + hasBin: true + node-releases@2.0.14: resolution: {integrity: sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw==} @@ -2725,6 +3043,14 @@ packages: oniguruma-to-es@3.1.1: resolution: {integrity: sha512-bUH8SDvPkH3ho3dvwJwfonjlQ4R80vjyvrU8YpxuROddv55vAEJrTuCuCVUhhsHbtlD9tGGbaNApGQckXhS8iQ==} + openapi-path-templating@2.2.1: + resolution: {integrity: sha512-eN14VrDvl/YyGxxrkGOHkVkWEoPyhyeydOUrbvjoz8K5eIGgELASwN1eqFOJ2CTQMGCy2EntOK1KdtJ8ZMekcg==} + engines: {node: '>=12.20.0'} + + openapi-server-url-templating@1.3.0: + resolution: {integrity: sha512-DPlCms3KKEbjVQb0spV6Awfn6UWNheuG/+folQPzh/wUaKwuqvj8zt5gagD7qoyxtE03cIiKPgLFS3Q8Bz00uQ==} + engines: {node: '>=12.20.0'} + optionator@0.9.4: resolution: {integrity: sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==} engines: {node: '>= 0.8.0'} @@ -2834,9 +3160,17 @@ packages: resolution: {integrity: sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + prismjs@1.30.0: + resolution: {integrity: sha512-DEvV2ZF2r2/63V+tK8hQvrR2ZGn10srHbXviTlcv7Kpzw8jWiNTqbVgjO3IY8RxrrOUF8VPMQQFysYYYv0YZxw==} + engines: {node: '>=6'} + property-information@7.1.0: resolution: {integrity: sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==} + proxy-from-env@2.1.0: + resolution: {integrity: sha512-cJ+oHTW1VAEa8cJslgmUZrc+sjRKgAKl3Zyse6+PV38hZe/V6Z14TbCuXcan9F9ghlz4QrFr2c92TNF82UkYHA==} + engines: {node: '>=10'} + punycode@2.3.1: resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==} engines: {node: '>=6'} @@ -2848,6 +3182,23 @@ packages: resolution: {integrity: sha512-AAFUA5O1d83pIHEhJwWCq/RQcRukCkn/NSm2QsTEMle5f2hP0ChI2+3Xb051PZCkLryI/Ir1MVKviT2FIloaTQ==} engines: {node: '>=12'} + ramda-adjunct@5.1.0: + resolution: {integrity: sha512-8qCpl2vZBXEJyNbi4zqcgdfHtcdsWjOGbiNSEnEBrM6Y0OKOT8UxJbIVGm1TIcjaSu2MxaWcgtsNlKlCk7o7qg==} + engines: {node: '>=0.10.3'} + peerDependencies: + ramda: '>= 0.30.0' + + ramda@0.30.1: + resolution: {integrity: sha512-tEF5I22zJnuclswcZMc8bDIrwRHRzf+NqVEmqg50ShAZMP7MWeR/RGDthfM/p+BlqvF2fXAzpn8i+SJcYD3alw==} + + randexp@0.5.3: + resolution: {integrity: sha512-U+5l2KrcMNOUPYvazA3h5ekF80FHTUG+87SEAmHZmolh1M+i/WyTCxVzmi+tidIa1tM4BSe8g2Y/D3loWDjj+w==} + engines: {node: '>=4'} + + rapidoc@9.3.8: + resolution: {integrity: sha512-eCYEbr1Xr8OJZvVCw8SXl9zBCRoLJbhNGuG5IZTHq/RWAOq/O4MafUCuFEyZHsrhLrlUcGZMa64pyhpib8fQKQ==} + engines: {node: '>=18.16.0'} + react-is@18.3.1: resolution: {integrity: sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==} @@ -2868,6 +3219,10 @@ packages: regex@6.0.1: resolution: {integrity: sha512-uorlqlzAKjKQZ5P+kTJr3eeJGSVroLKoHmquUj4zHWuR+hEyNqlXsSKlYYF5F4NI6nl7tWCs0apKJ0lmfsXAPA==} + repeat-string@1.6.1: + resolution: {integrity: sha512-PV0dzCYDNfRi1jCDbJzpW7jNNDRuCOG/jI5ctQcGKt/clZD+YcPS3yIlWuTJMmESC8aevCFmWJy5wjAFgNqN6w==} + engines: {node: '>=0.10'} + require-directory@2.1.1: resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==} engines: {node: '>=0.10.0'} @@ -2882,6 +3237,10 @@ packages: resolve-pkg-maps@1.0.0: resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==} + ret@0.2.2: + resolution: {integrity: sha512-M0b3YWQs7R3Z917WRQy1HHA7Ba7D8hvZg6UE5mLykJxQVE2ju0IXbGlaHPPlkY+WN7wFP+wUMXmBFA0aV6vYGQ==} + engines: {node: '>=4'} + reusify@1.0.4: resolution: {integrity: sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==} engines: {iojs: '>=1.0.0', node: '>=0.10.0'} @@ -2957,6 +3316,10 @@ packages: shiki@2.5.0: resolution: {integrity: sha512-mI//trrsaiCIPsja5CNfsyNOqgAZUb6VpJA+340toL42UpzQlXpwRV9nch69X6gaUxrr9kaOOa6e3y3uAkGFxQ==} + short-unique-id@5.3.2: + resolution: {integrity: sha512-KRT/hufMSxXKEDSQujfVE0Faa/kZ51ihUcZQAcmP04t00DvPj7Ox5anHke1sJYUtzSuiT/Y5uyzg/W7bBEGhCg==} + hasBin: true + siginfo@2.0.0: resolution: {integrity: sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==} @@ -3058,6 +3421,9 @@ packages: engines: {node: '>=14.0.0'} hasBin: true + swagger-client@3.37.3: + resolution: {integrity: sha512-PZv5smQPnPwfP6mnkq96fOp/RNDKBqd8vfwE4UuwA229wsesj20yd7RadXx+9uLBC3c0H6cu/H+bnbMTWG6oUQ==} + tabbable@6.3.0: resolution: {integrity: sha512-EIHvdY5bPLuWForiR/AN2Bxngzpuwn1is4asboytXtpTgsArc+WmSJKVLlhdh71u7jFcryDqB2A8lQvj78MkyQ==} @@ -3096,6 +3462,20 @@ packages: resolution: {integrity: sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==} engines: {node: '>=6'} + tree-sitter-json@0.24.8: + resolution: {integrity: sha512-Tc9ZZYwHyWZ3Tt1VEw7Pa2scu1YO7/d2BCBbKTx5hXwig3UfdQjsOPkPyLpDJOn/m1UBEWYAtSdGAwCSyagBqQ==} + peerDependencies: + tree-sitter: ^0.21.1 + peerDependenciesMeta: + tree-sitter: + optional: true + + tree-sitter@0.21.1: + resolution: {integrity: sha512-7dxoA6kYvtgWw80265MyqJlkRl4yawIjO7S5MigytjELkX43fV2WsAXzsNfO7sBpPPCF5Gp0+XzHk0DwLCq3xQ==} + + tree-sitter@0.22.4: + resolution: {integrity: sha512-usbHZP9/oxNsUY65MQUsduGRqDHQOou1cagUSwjhoSYAmSahjQDAVsh9s+SlZkn8X8+O1FULRGwHu7AFP3kjzg==} + trim-lines@3.0.1: resolution: {integrity: sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==} @@ -3115,12 +3495,18 @@ packages: resolution: {integrity: sha512-q5W7tVM71e2xjHZTlgfTDoPF/SmqKG5hddq9SzR49CH2hayqRKJtQ4mtRlSxKaJlR/+9rEM+mnBHf7I2/BQcpQ==} engines: {node: '>=6.10'} + ts-mixer@6.0.4: + resolution: {integrity: sha512-ufKpbmrugz5Aou4wcr5Wc1UUFWOLhq+Fm6qa6P0w0K5Qw2yhaUoiWszhCVuNQyNwrlGiscHOmqYoAox1PtvgjA==} + ts-pattern@4.3.0: resolution: {integrity: sha512-pefrkcd4lmIVR0LA49Imjf9DYLK8vtWhqBPA3Ya1ir8xCW0O2yjL9dsCVvI7pCodLC5q7smNpEtDR2yVulQxOg==} ts-pattern@5.9.0: resolution: {integrity: sha512-6s5V71mX8qBUmlgbrfL33xDUwO0fq48rxAu2LBE11WBeGdpCPOsXksQbZJHvHwhrd3QjUusd3mAOM5Gg0mFBLg==} + ts-toolbelt@9.6.0: + resolution: {integrity: sha512-nsZd8ZeNUzukXPlJmTBwUAuABDe/9qtVDelJeT/qW0ow3ZS3BsQJtNkan1802aM9Uf68/Y8ljw86Hu0h5IUW3w==} + tslib@2.8.1: resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==} @@ -3149,6 +3535,9 @@ packages: resolution: {integrity: sha512-anpAG63wSpdEbLwOqH8L84urkL6PiVIov3EMmgIhhThevh9aiMQov+6Btx0wldNcvm4wV+e2/Rt1QdDwKHFbHw==} engines: {node: '>=16'} + types-ramda@0.30.1: + resolution: {integrity: sha512-1HTsf5/QVRmLzcGfldPFvkVsAdi1db1BBKzi7iW3KBUlOICg/nKnFS+jGqDJS3YD8VsWbAh7JiHeBvbsw8RPxA==} + typescript@5.6.3: resolution: {integrity: sha512-hjcS1mhfuyi4WW8IWtjP7brDrG2cuDZukyrYrSauoXGNgx0S7zceP07adYkJycEr56BOUTNPzbInooiN3fn1qw==} engines: {node: '>=14.17'} @@ -3193,6 +3582,9 @@ packages: vite: optional: true + unraw@3.0.0: + resolution: {integrity: sha512-08/DA66UF65OlpUDIQtbJyrqTR0jTAlJ+jsnkQ4jxR7+K5g5YG1APZKQSMCE1vqqmD+2pv6+IdEjmopFatacvg==} + update-browserslist-db@1.0.13: resolution: {integrity: sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg==} hasBin: true @@ -3327,6 +3719,9 @@ packages: resolution: {integrity: sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==} engines: {node: '>= 8'} + web-tree-sitter@0.24.5: + resolution: {integrity: sha512-+J/2VSHN8J47gQUAvF8KDadrfz6uFYVjxoxbKWDoXVsH2u7yLdarCnIURnrMA6uSRkgX3SdmqM5BOoQjPdSh5w==} + web-worker@1.2.0: resolution: {integrity: sha512-PgF341avzqyx60neE9DD+XS26MMNMoUQRz9NOZwW32nPQrF6p77f1htcnjBSEV8BGMKZ16choqUG4hyI0Hx7mA==} @@ -3355,6 +3750,9 @@ packages: wrappy@1.0.2: resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} + xml-but-prettier@1.0.1: + resolution: {integrity: sha512-C2CJaadHrZTqESlH03WOyw0oZTtoy2uEg6dSDF6YRg+9GnYNub53RRemLpnvtbHDFelxMx4LajiFsYeR6XJHgQ==} + xml-name-validator@4.0.0: resolution: {integrity: sha512-ICP2e+jsHvAj2E2lIHxa5tjXRlKDJo4IdvPvCXbXQGdzSfmSpNVyIKMvoZHjDY9DP0zV17iI85o90vRFXNccRw==} engines: {node: '>=12'} @@ -3511,6 +3909,12 @@ snapshots: '@antfu/utils@0.7.7': {} + '@apitools/openapi-parser@0.0.33': + dependencies: + swagger-client: 3.37.3 + transitivePeerDependencies: + - debug + '@babel/code-frame@7.23.5': dependencies: '@babel/highlight': 7.23.4 @@ -3681,6 +4085,10 @@ snapshots: '@babel/plugin-transform-modules-commonjs': 7.23.3(@babel/core@7.23.9) '@babel/plugin-transform-typescript': 7.23.6(@babel/core@7.23.9) + '@babel/runtime-corejs3@7.29.2': + dependencies: + core-js-pure: 3.49.0 + '@babel/template@7.23.9': dependencies: '@babel/code-frame': 7.23.5 @@ -3975,6 +4383,12 @@ snapshots: '@jridgewell/resolve-uri': 3.1.1 '@jridgewell/sourcemap-codec': 1.5.5 + '@lit-labs/ssr-dom-shim@1.5.1': {} + + '@lit/reactive-element@2.1.2': + dependencies: + '@lit-labs/ssr-dom-shim': 1.5.1 + '@mdit-vue/shared@0.12.1': dependencies: '@mdit-vue/types': 0.12.0 @@ -4182,6 +4596,8 @@ snapshots: '@scale-codec/util@1.1.2': {} + '@scarf/scarf@1.4.0': {} + '@shikijs/core@2.5.0': dependencies: '@shikijs/engine-javascript': 2.5.0 @@ -4224,6 +4640,438 @@ snapshots: '@sinclair/typebox@0.27.8': {} + '@swagger-api/apidom-ast@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-error': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + unraw: 3.0.0 + + '@swagger-api/apidom-core@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-ast': 1.11.0 + '@swagger-api/apidom-error': 1.11.0 + '@types/ramda': 0.30.2 + minim: 0.23.8 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + short-unique-id: 5.3.2 + ts-mixer: 6.0.4 + + '@swagger-api/apidom-error@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + + '@swagger-api/apidom-json-pointer@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-error': 1.11.0 + '@swaggerexpert/json-pointer': 2.10.2 + + '@swagger-api/apidom-ns-api-design-systems@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-error': 1.11.0 + '@swagger-api/apidom-ns-openapi-3-1': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + ts-mixer: 6.0.4 + optional: true + + '@swagger-api/apidom-ns-arazzo-1@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-ns-json-schema-2020-12': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + ts-mixer: 6.0.4 + optional: true + + '@swagger-api/apidom-ns-asyncapi-2@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-ns-json-schema-draft-7': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + ts-mixer: 6.0.4 + optional: true + + '@swagger-api/apidom-ns-asyncapi-3@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-ns-asyncapi-2': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + ts-mixer: 6.0.4 + optional: true + + '@swagger-api/apidom-ns-json-schema-2019-09@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-error': 1.11.0 + '@swagger-api/apidom-ns-json-schema-draft-7': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + ts-mixer: 6.0.4 + + '@swagger-api/apidom-ns-json-schema-2020-12@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-error': 1.11.0 + '@swagger-api/apidom-ns-json-schema-2019-09': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + ts-mixer: 6.0.4 + + '@swagger-api/apidom-ns-json-schema-draft-4@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-ast': 1.11.0 + '@swagger-api/apidom-core': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + ts-mixer: 6.0.4 + + '@swagger-api/apidom-ns-json-schema-draft-6@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-error': 1.11.0 + '@swagger-api/apidom-ns-json-schema-draft-4': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + ts-mixer: 6.0.4 + + '@swagger-api/apidom-ns-json-schema-draft-7@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-error': 1.11.0 + '@swagger-api/apidom-ns-json-schema-draft-6': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + ts-mixer: 6.0.4 + + '@swagger-api/apidom-ns-openapi-2@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-error': 1.11.0 + '@swagger-api/apidom-ns-json-schema-draft-4': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + ts-mixer: 6.0.4 + optional: true + + '@swagger-api/apidom-ns-openapi-3-0@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-error': 1.11.0 + '@swagger-api/apidom-ns-json-schema-draft-4': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + ts-mixer: 6.0.4 + + '@swagger-api/apidom-ns-openapi-3-1@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-ast': 1.11.0 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-json-pointer': 1.11.0 + '@swagger-api/apidom-ns-json-schema-2020-12': 1.11.0 + '@swagger-api/apidom-ns-openapi-3-0': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + ts-mixer: 6.0.4 + + '@swagger-api/apidom-ns-openapi-3-2@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-ast': 1.11.0 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-json-pointer': 1.11.0 + '@swagger-api/apidom-ns-json-schema-2020-12': 1.11.0 + '@swagger-api/apidom-ns-openapi-3-0': 1.11.0 + '@swagger-api/apidom-ns-openapi-3-1': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + ts-mixer: 6.0.4 + + '@swagger-api/apidom-parser-adapter-api-design-systems-json@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-ns-api-design-systems': 1.11.0 + '@swagger-api/apidom-parser-adapter-json': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + optional: true + + '@swagger-api/apidom-parser-adapter-api-design-systems-yaml@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-ns-api-design-systems': 1.11.0 + '@swagger-api/apidom-parser-adapter-yaml-1-2': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + optional: true + + '@swagger-api/apidom-parser-adapter-arazzo-json-1@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-ns-arazzo-1': 1.11.0 + '@swagger-api/apidom-parser-adapter-json': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + optional: true + + '@swagger-api/apidom-parser-adapter-arazzo-yaml-1@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-ns-arazzo-1': 1.11.0 + '@swagger-api/apidom-parser-adapter-yaml-1-2': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + optional: true + + '@swagger-api/apidom-parser-adapter-asyncapi-json-2@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-ns-asyncapi-2': 1.11.0 + '@swagger-api/apidom-parser-adapter-json': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + optional: true + + '@swagger-api/apidom-parser-adapter-asyncapi-json-3@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-ns-asyncapi-3': 1.11.0 + '@swagger-api/apidom-parser-adapter-json': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + optional: true + + '@swagger-api/apidom-parser-adapter-asyncapi-yaml-2@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-ns-asyncapi-2': 1.11.0 + '@swagger-api/apidom-parser-adapter-yaml-1-2': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + optional: true + + '@swagger-api/apidom-parser-adapter-asyncapi-yaml-3@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-ns-asyncapi-3': 1.11.0 + '@swagger-api/apidom-parser-adapter-yaml-1-2': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + optional: true + + '@swagger-api/apidom-parser-adapter-json@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-ast': 1.11.0 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-error': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + tree-sitter: 0.21.1 + tree-sitter-json: 0.24.8(tree-sitter@0.21.1) + web-tree-sitter: 0.24.5 + optional: true + + '@swagger-api/apidom-parser-adapter-openapi-json-2@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-ns-openapi-2': 1.11.0 + '@swagger-api/apidom-parser-adapter-json': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + optional: true + + '@swagger-api/apidom-parser-adapter-openapi-json-3-0@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-ns-openapi-3-0': 1.11.0 + '@swagger-api/apidom-parser-adapter-json': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + optional: true + + '@swagger-api/apidom-parser-adapter-openapi-json-3-1@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-ns-openapi-3-1': 1.11.0 + '@swagger-api/apidom-parser-adapter-json': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + optional: true + + '@swagger-api/apidom-parser-adapter-openapi-json-3-2@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-ns-openapi-3-2': 1.11.0 + '@swagger-api/apidom-parser-adapter-json': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + optional: true + + '@swagger-api/apidom-parser-adapter-openapi-yaml-2@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-ns-openapi-2': 1.11.0 + '@swagger-api/apidom-parser-adapter-yaml-1-2': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + optional: true + + '@swagger-api/apidom-parser-adapter-openapi-yaml-3-0@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-ns-openapi-3-0': 1.11.0 + '@swagger-api/apidom-parser-adapter-yaml-1-2': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + optional: true + + '@swagger-api/apidom-parser-adapter-openapi-yaml-3-1@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-ns-openapi-3-1': 1.11.0 + '@swagger-api/apidom-parser-adapter-yaml-1-2': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + optional: true + + '@swagger-api/apidom-parser-adapter-openapi-yaml-3-2@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-ns-openapi-3-2': 1.11.0 + '@swagger-api/apidom-parser-adapter-yaml-1-2': 1.11.0 + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + optional: true + + '@swagger-api/apidom-parser-adapter-yaml-1-2@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-ast': 1.11.0 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-error': 1.11.0 + '@tree-sitter-grammars/tree-sitter-yaml': 0.7.1(tree-sitter@0.22.4) + '@types/ramda': 0.30.2 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + tree-sitter: 0.22.4 + web-tree-sitter: 0.24.5 + optional: true + + '@swagger-api/apidom-reference@1.11.0': + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-error': 1.11.0 + '@types/ramda': 0.30.2 + axios: 1.15.2 + minimatch: 10.2.5 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + optionalDependencies: + '@swagger-api/apidom-json-pointer': 1.11.0 + '@swagger-api/apidom-ns-arazzo-1': 1.11.0 + '@swagger-api/apidom-ns-asyncapi-2': 1.11.0 + '@swagger-api/apidom-ns-openapi-2': 1.11.0 + '@swagger-api/apidom-ns-openapi-3-0': 1.11.0 + '@swagger-api/apidom-ns-openapi-3-1': 1.11.0 + '@swagger-api/apidom-ns-openapi-3-2': 1.11.0 + '@swagger-api/apidom-parser-adapter-api-design-systems-json': 1.11.0 + '@swagger-api/apidom-parser-adapter-api-design-systems-yaml': 1.11.0 + '@swagger-api/apidom-parser-adapter-arazzo-json-1': 1.11.0 + '@swagger-api/apidom-parser-adapter-arazzo-yaml-1': 1.11.0 + '@swagger-api/apidom-parser-adapter-asyncapi-json-2': 1.11.0 + '@swagger-api/apidom-parser-adapter-asyncapi-json-3': 1.11.0 + '@swagger-api/apidom-parser-adapter-asyncapi-yaml-2': 1.11.0 + '@swagger-api/apidom-parser-adapter-asyncapi-yaml-3': 1.11.0 + '@swagger-api/apidom-parser-adapter-json': 1.11.0 + '@swagger-api/apidom-parser-adapter-openapi-json-2': 1.11.0 + '@swagger-api/apidom-parser-adapter-openapi-json-3-0': 1.11.0 + '@swagger-api/apidom-parser-adapter-openapi-json-3-1': 1.11.0 + '@swagger-api/apidom-parser-adapter-openapi-json-3-2': 1.11.0 + '@swagger-api/apidom-parser-adapter-openapi-yaml-2': 1.11.0 + '@swagger-api/apidom-parser-adapter-openapi-yaml-3-0': 1.11.0 + '@swagger-api/apidom-parser-adapter-openapi-yaml-3-1': 1.11.0 + '@swagger-api/apidom-parser-adapter-openapi-yaml-3-2': 1.11.0 + '@swagger-api/apidom-parser-adapter-yaml-1-2': 1.11.0 + transitivePeerDependencies: + - debug + + '@swaggerexpert/cookie@2.0.2': + dependencies: + apg-lite: 1.0.5 + + '@swaggerexpert/json-pointer@2.10.2': + dependencies: + apg-lite: 1.0.5 + '@tanstack/virtual-core@3.13.12': {} '@tanstack/vue-virtual@3.13.12(vue@3.5.22(typescript@5.6.3))': @@ -4231,6 +5079,14 @@ snapshots: '@tanstack/virtual-core': 3.13.12 vue: 3.5.22(typescript@5.6.3) + '@tree-sitter-grammars/tree-sitter-yaml@0.7.1(tree-sitter@0.22.4)': + dependencies: + node-addon-api: 8.7.0 + node-gyp-build: 4.8.4 + optionalDependencies: + tree-sitter: 0.22.4 + optional: true + '@trysound/sax@0.2.0': {} '@types/d3-scale-chromatic@3.0.0': {} @@ -4288,6 +5144,12 @@ snapshots: '@types/node@20.6.0': {} + '@types/ramda@0.30.2': + dependencies: + types-ramda: 0.30.1 + + '@types/trusted-types@2.0.7': {} + '@types/unist@2.0.8': {} '@types/unist@3.0.3': {} @@ -4734,12 +5596,13 @@ snapshots: optionalDependencies: vue: 3.5.22(typescript@5.6.3) - '@vueuse/integrations@12.8.2(focus-trap@7.6.6)(typescript@5.6.3)': + '@vueuse/integrations@12.8.2(axios@1.15.2)(focus-trap@7.6.6)(typescript@5.6.3)': dependencies: '@vueuse/core': 12.8.2(typescript@5.6.3) '@vueuse/shared': 12.8.2(typescript@5.6.3) vue: 3.5.22(typescript@5.6.3) optionalDependencies: + axios: 1.15.2 focus-trap: 7.6.6 transitivePeerDependencies: - typescript @@ -4827,6 +5690,8 @@ snapshots: normalize-path: 3.0.0 picomatch: 2.3.1 + apg-lite@1.0.5: {} + argparse@2.0.1: {} array-union@2.1.0: {} @@ -4835,8 +5700,24 @@ snapshots: assertion-error@2.0.1: {} + asynckit@0.4.0: {} + + axios@1.15.2: + dependencies: + follow-redirects: 1.16.0 + form-data: 4.0.5 + proxy-from-env: 2.1.0 + transitivePeerDependencies: + - debug + balanced-match@1.0.2: {} + balanced-match@4.0.4: {} + + base64-arraybuffer@1.0.2: {} + + base64-js@1.5.1: {} + binary-extensions@2.2.0: {} birpc@2.6.1: {} @@ -4854,6 +5735,10 @@ snapshots: dependencies: balanced-match: 1.0.2 + brace-expansion@5.0.5: + dependencies: + balanced-match: 4.0.4 + braces@3.0.3: dependencies: fill-range: 7.1.1 @@ -4865,8 +5750,18 @@ snapshots: node-releases: 2.0.14 update-browserslist-db: 1.0.13(browserslist@4.22.3) + buffer@6.0.3: + dependencies: + base64-js: 1.5.1 + ieee754: 1.2.1 + cac@6.7.14: {} + call-bind-apply-helpers@1.0.2: + dependencies: + es-errors: 1.3.0 + function-bind: 1.1.2 + callsites@3.1.0: {} camelcase-keys@9.1.3: @@ -4959,6 +5854,10 @@ snapshots: colorette@2.0.20: {} + combined-stream@1.0.8: + dependencies: + delayed-stream: 1.0.0 + comma-separated-tokens@2.0.3: {} commander@7.2.0: {} @@ -4977,6 +5876,8 @@ snapshots: dependencies: is-what: 5.5.0 + core-js-pure@3.49.0: {} + core-js@3.35.1: {} cose-base@1.0.3: @@ -5228,6 +6129,8 @@ snapshots: deep-is@0.1.4: {} + deepmerge@4.3.1: {} + defu@6.1.4: {} del@7.0.0: @@ -5245,6 +6148,8 @@ snapshots: dependencies: robust-predicates: 3.0.1 + delayed-stream@1.0.0: {} + dequal@2.0.3: {} destr@2.0.2: {} @@ -5294,6 +6199,14 @@ snapshots: domelementtype: 2.3.0 domhandler: 5.0.3 + drange@1.1.1: {} + + dunder-proto@1.0.1: + dependencies: + call-bind-apply-helpers: 1.0.2 + es-errors: 1.3.0 + gopd: 1.2.0 + duplexer@0.1.2: {} eastasianwidth@0.2.0: {} @@ -5312,6 +6225,21 @@ snapshots: entities@4.5.0: {} + es-define-property@1.0.1: {} + + es-errors@1.3.0: {} + + es-object-atoms@1.1.1: + dependencies: + es-errors: 1.3.0 + + es-set-tostringtag@2.1.0: + dependencies: + es-errors: 1.3.0 + get-intrinsic: 1.3.0 + has-tostringtag: 1.0.2 + hasown: 2.0.3 + esbuild@0.21.5: optionalDependencies: '@esbuild/aix-ppc64': 0.21.5 @@ -5501,6 +6429,8 @@ snapshots: merge2: 1.4.1 micromatch: 4.0.8 + fast-json-patch@3.1.1: {} + fast-json-stable-stringify@2.1.0: {} fast-levenshtein@2.0.6: {} @@ -5539,11 +6469,21 @@ snapshots: dependencies: tabbable: 6.3.0 + follow-redirects@1.16.0: {} + foreground-child@3.1.1: dependencies: cross-spawn: 7.0.6 signal-exit: 4.1.0 + form-data@4.0.5: + dependencies: + asynckit: 0.4.0 + combined-stream: 1.0.8 + es-set-tostringtag: 2.1.0 + hasown: 2.0.3 + mime-types: 2.1.35 + formdata-polyfill@4.0.10: dependencies: fetch-blob: 3.2.0 @@ -5553,10 +6493,30 @@ snapshots: fsevents@2.3.3: optional: true + function-bind@1.1.2: {} + gensync@1.0.0-beta.2: {} get-caller-file@2.0.5: {} + get-intrinsic@1.3.0: + dependencies: + call-bind-apply-helpers: 1.0.2 + es-define-property: 1.0.1 + es-errors: 1.3.0 + es-object-atoms: 1.1.1 + function-bind: 1.1.2 + get-proto: 1.0.1 + gopd: 1.2.0 + has-symbols: 1.1.0 + hasown: 2.0.3 + math-intrinsics: 1.1.0 + + get-proto@1.0.1: + dependencies: + dunder-proto: 1.0.1 + es-object-atoms: 1.1.1 + get-stdin@8.0.0: {} get-stream@6.0.1: {} @@ -5613,6 +6573,8 @@ snapshots: merge2: 1.4.1 slash: 4.0.0 + gopd@1.2.0: {} + graceful-fs@4.2.10: {} graphemer@1.4.0: {} @@ -5629,11 +6591,21 @@ snapshots: has-flag@4.0.0: {} + has-symbols@1.1.0: {} + + has-tostringtag@1.0.2: + dependencies: + has-symbols: 1.1.0 + hasha@5.2.2: dependencies: is-stream: 2.0.1 type-fest: 0.8.1 + hasown@2.0.3: + dependencies: + function-bind: 1.1.2 + hast-util-to-html@9.0.5: dependencies: '@types/hast': 3.0.4 @@ -5671,6 +6643,8 @@ snapshots: dependencies: safer-buffer: 2.1.2 + ieee754@1.2.1: {} + ignore@5.2.4: {} ignore@5.3.1: {} @@ -5786,6 +6760,22 @@ snapshots: dependencies: uc.micro: 1.0.6 + lit-element@4.2.2: + dependencies: + '@lit-labs/ssr-dom-shim': 1.5.1 + '@lit/reactive-element': 2.1.2 + lit-html: 3.3.2 + + lit-html@3.3.2: + dependencies: + '@types/trusted-types': 2.0.7 + + lit@3.3.2: + dependencies: + '@lit/reactive-element': 2.1.2 + lit-element: 4.2.2 + lit-html: 3.3.2 + local-pkg@0.5.0: dependencies: mlly: 1.5.0 @@ -5852,6 +6842,10 @@ snapshots: mdurl: 1.0.1 uc.micro: 1.0.6 + marked@4.3.0: {} + + math-intrinsics@1.1.0: {} + mdast-util-from-markdown@1.3.1: dependencies: '@types/mdast': 3.0.12 @@ -6075,8 +7069,22 @@ snapshots: braces: 3.0.3 picomatch: 2.3.1 + mime-db@1.52.0: {} + + mime-types@2.1.35: + dependencies: + mime-db: 1.52.0 + mimic-fn@2.1.0: {} + minim@0.23.8: + dependencies: + lodash: 4.17.21 + + minimatch@10.2.5: + dependencies: + brace-expansion: 5.0.5 + minimatch@3.1.2: dependencies: brace-expansion: 1.1.12 @@ -6114,11 +7122,23 @@ snapshots: natural-compare@1.4.0: {} + neotraverse@0.6.18: {} + + node-abort-controller@3.1.1: {} + node-addon-api@7.1.1: optional: true + node-addon-api@8.7.0: + optional: true + node-domexception@1.0.0: {} + node-fetch-commonjs@3.3.2: + dependencies: + node-domexception: 1.0.0 + web-streams-polyfill: 3.3.3 + node-fetch-native@1.6.2: {} node-fetch@3.3.2: @@ -6127,6 +7147,9 @@ snapshots: fetch-blob: 3.2.0 formdata-polyfill: 4.0.10 + node-gyp-build@4.8.4: + optional: true + node-releases@2.0.14: {} non-layered-tidy-tree-layout@2.0.2: {} @@ -6161,6 +7184,14 @@ snapshots: regex: 6.0.1 regex-recursion: 6.0.2 + openapi-path-templating@2.2.1: + dependencies: + apg-lite: 1.0.5 + + openapi-server-url-templating@1.3.0: + dependencies: + apg-lite: 1.0.5 + optionator@0.9.4: dependencies: deep-is: 0.1.4 @@ -6286,14 +7317,42 @@ snapshots: ansi-styles: 5.2.0 react-is: 18.3.1 + prismjs@1.30.0: {} + property-information@7.1.0: {} + proxy-from-env@2.1.0: {} + punycode@2.3.1: {} queue-microtask@1.2.3: {} quick-lru@6.1.2: {} + ramda-adjunct@5.1.0(ramda@0.30.1): + dependencies: + ramda: 0.30.1 + + ramda@0.30.1: {} + + randexp@0.5.3: + dependencies: + drange: 1.1.1 + ret: 0.2.2 + + rapidoc@9.3.8: + dependencies: + '@apitools/openapi-parser': 0.0.33 + base64-arraybuffer: 1.0.2 + buffer: 6.0.3 + lit: 3.3.2 + marked: 4.3.0 + prismjs: 1.30.0 + randexp: 0.5.3 + xml-but-prettier: 1.0.1 + transitivePeerDependencies: + - debug + react-is@18.3.1: {} readdirp@3.6.0: @@ -6312,6 +7371,8 @@ snapshots: dependencies: regex-utilities: 2.3.0 + repeat-string@1.6.1: {} + require-directory@2.1.1: {} require-relative@0.8.7: {} @@ -6320,6 +7381,8 @@ snapshots: resolve-pkg-maps@1.0.0: {} + ret@0.2.2: {} + reusify@1.0.4: {} rfdc@1.4.1: {} @@ -6411,6 +7474,8 @@ snapshots: '@shikijs/vscode-textmate': 10.0.2 '@types/hast': 3.0.4 + short-unique-id@5.3.2: {} + siginfo@2.0.0: {} signal-exit@3.0.7: {} @@ -6500,6 +7565,30 @@ snapshots: csso: 5.0.5 picocolors: 1.0.0 + swagger-client@3.37.3: + dependencies: + '@babel/runtime-corejs3': 7.29.2 + '@scarf/scarf': 1.4.0 + '@swagger-api/apidom-core': 1.11.0 + '@swagger-api/apidom-error': 1.11.0 + '@swagger-api/apidom-json-pointer': 1.11.0 + '@swagger-api/apidom-ns-openapi-3-1': 1.11.0 + '@swagger-api/apidom-ns-openapi-3-2': 1.11.0 + '@swagger-api/apidom-reference': 1.11.0 + '@swaggerexpert/cookie': 2.0.2 + deepmerge: 4.3.1 + fast-json-patch: 3.1.1 + js-yaml: 4.1.0 + neotraverse: 0.6.18 + node-abort-controller: 3.1.1 + node-fetch-commonjs: 3.3.2 + openapi-path-templating: 2.2.1 + openapi-server-url-templating: 1.3.0 + ramda: 0.30.1 + ramda-adjunct: 5.1.0(ramda@0.30.1) + transitivePeerDependencies: + - debug + tabbable@6.3.0: {} tarjan-graph@3.0.0: {} @@ -6524,6 +7613,26 @@ snapshots: totalist@3.0.1: {} + tree-sitter-json@0.24.8(tree-sitter@0.21.1): + dependencies: + node-addon-api: 8.7.0 + node-gyp-build: 4.8.4 + optionalDependencies: + tree-sitter: 0.21.1 + optional: true + + tree-sitter@0.21.1: + dependencies: + node-addon-api: 8.7.0 + node-gyp-build: 4.8.4 + optional: true + + tree-sitter@0.22.4: + dependencies: + node-addon-api: 8.7.0 + node-gyp-build: 4.8.4 + optional: true + trim-lines@3.0.1: {} ts-api-utils@1.4.3(typescript@5.6.3): @@ -6536,10 +7645,14 @@ snapshots: ts-dedent@2.2.0: {} + ts-mixer@6.0.4: {} + ts-pattern@4.3.0: {} ts-pattern@5.9.0: {} + ts-toolbelt@9.6.0: {} + tslib@2.8.1: {} tsx@4.20.6: @@ -6561,6 +7674,10 @@ snapshots: type-fest@4.10.2: {} + types-ramda@0.30.1: + dependencies: + ts-toolbelt: 9.6.0 + typescript@5.6.3: {} uc.micro@1.0.6: {} @@ -6630,6 +7747,8 @@ snapshots: - rollup - supports-color + unraw@3.0.0: {} + update-browserslist-db@1.0.13(browserslist@4.22.3): dependencies: browserslist: 4.22.3 @@ -6693,7 +7812,7 @@ snapshots: fsevents: 2.3.3 sass: 1.93.2 - vitepress@1.6.4(@algolia/client-search@5.41.0)(@types/node@20.6.0)(postcss@8.5.6)(sass@1.93.2)(search-insights@2.13.0)(typescript@5.6.3): + vitepress@1.6.4(@algolia/client-search@5.41.0)(@types/node@20.6.0)(axios@1.15.2)(postcss@8.5.6)(sass@1.93.2)(search-insights@2.13.0)(typescript@5.6.3): dependencies: '@docsearch/css': 3.8.2 '@docsearch/js': 3.8.2(@algolia/client-search@5.41.0)(search-insights@2.13.0) @@ -6706,7 +7825,7 @@ snapshots: '@vue/devtools-api': 7.7.7 '@vue/shared': 3.5.22 '@vueuse/core': 12.8.2(typescript@5.6.3) - '@vueuse/integrations': 12.8.2(focus-trap@7.6.6)(typescript@5.6.3) + '@vueuse/integrations': 12.8.2(axios@1.15.2)(focus-trap@7.6.6)(typescript@5.6.3) focus-trap: 7.6.6 mark.js: 8.11.1 minisearch: 7.2.0 @@ -6806,6 +7925,9 @@ snapshots: web-streams-polyfill@3.3.3: {} + web-tree-sitter@0.24.5: + optional: true + web-worker@1.2.0: {} which@2.0.2: @@ -6833,6 +7955,10 @@ snapshots: wrappy@1.0.2: {} + xml-but-prettier@1.0.1: + dependencies: + repeat-string: 1.6.1 + xml-name-validator@4.0.0: {} y18n@5.0.8: {} diff --git a/src/blockchain/accounts.md b/src/blockchain/accounts.md index 8dda5e69e..66b32aa9e 100644 --- a/src/blockchain/accounts.md +++ b/src/blockchain/accounts.md @@ -47,6 +47,41 @@ See [client configuration](/guide/configure/client-configuration.md) and [key generation](/guide/security/generating-cryptographic-keys.md) for the current key formats. +## Try It on Taira + +List a few canonical account IDs from the public Taira testnet: + +```bash +curl -fsS 'https://taira.sora.org/v1/accounts?limit=5' \ + | jq -r '.items[] | [.id, (.primary_alias // "-")] | @tsv' +``` + +To inspect account assets, copy an account ID from the first call and URL-encode +it before placing it in the path. This Python snippet does that for the first +listed account: + +```bash +python3 - <<'PY' +import json +import urllib.parse +import urllib.request + +root = "https://taira.sora.org" +accounts = json.load(urllib.request.urlopen(f"{root}/v1/accounts?limit=1"))["items"] +account_id = accounts[0]["id"] +encoded = urllib.parse.quote(account_id, safe="") +assets = json.load( + urllib.request.urlopen(f"{root}/v1/accounts/{encoded}/assets?limit=5") +) + +print(json.dumps({"account_id": account_id, "assets": assets["items"]}, indent=2)) +PY +``` + +These are public reads. Creating or updating an account is a signed transaction +and requires the faucet-funded Taira setup described in +[Connect to SORA Nexus Dataspaces](/get-started/sora-nexus-dataspaces.md). + ## Registration and permissions Accounts are registered and unregistered with the generic diff --git a/src/blockchain/assets.md b/src/blockchain/assets.md index 565a1c124..6b4921150 100644 --- a/src/blockchain/assets.md +++ b/src/blockchain/assets.md @@ -1,8 +1,8 @@ # Assets An Iroha asset is a numeric balance held by an account. Every concrete -balance points to an `AssetDefinition`, and the definition describes how that -asset can be named, minted, displayed, and partitioned. +balance points to an `AssetDefinition`, and the definition describes how +that asset can be named, minted, displayed, and partitioned. ## Asset Definition @@ -17,7 +17,8 @@ An `AssetDefinition` contains: - `mintable`: the mintability policy - `logo`: optional `SoraFS` URI - `metadata`: arbitrary key-value metadata -- `balance_scope_policy`: whether balances are global or dataspace-restricted +- `balance_scope_policy`: whether balances are global or + dataspace-restricted - `owned_by`: the account that registered or owns the definition - `total_quantity`: total issued quantity - `confidential_policy`: policy for shielded asset operations @@ -31,8 +32,8 @@ address. An `Asset` contains: -- `id`: an `AssetId`, which combines the asset definition, holder account, and - optional balance scope +- `id`: an `AssetId`, which combines the asset definition, holder account, + and optional balance scope - `value`: a `Numeric` balance The holder account is canonical and domainless. The asset definition may be @@ -43,16 +44,16 @@ projected under a dataspace-qualified domain, for example Asset definitions support these mintability modes: -| Mode | Meaning | -| --- | --- | -| `Infinitely` | Elastic supply. The asset can be minted and burned repeatedly. | -| `Once` | Fixed-supply token. It can be minted once and then burned. | -| `Not` | Fixed-supply token that can be burned but not minted again. | +| Mode | Meaning | +| ------------ | ----------------------------------------------------------------- | +| `Infinitely` | Elastic supply. The asset can be minted and burned repeatedly. | +| `Once` | Fixed-supply token. It can be minted once and then burned. | +| `Not` | Fixed-supply token that can be burned but not minted again. | | `Limited(n)` | Minting is allowed for a limited number of additional operations. | Use `Infinitely` for normal elastic assets and `Once` or `Limited(n)` for -fixed-supply or bounded-supply assets. Do not use `Not` as an initial policy -unless the asset supply is already established. +fixed-supply or bounded-supply assets. Do not use `Not` as an initial +policy unless the asset supply is already established. ## Balance Scope @@ -64,10 +65,59 @@ The `balance_scope_policy` controls how balances are bucketed: Dataspace-restricted balances are useful when the same asset definition is used across multiple Nexus dataspaces but balances must remain isolated. +## Try It on Taira + +These read-only calls show real asset definitions on the public Taira testnet: + +```bash +TAIRA_ROOT=https://taira.sora.org + +curl -fsS "$TAIRA_ROOT/v1/assets/definitions?limit=10" \ + | jq -r '.items[] | [.id, .name, .mintable, .total_quantity] | @tsv' +``` + +Find the current Taira XOR fee asset definition: + +```bash +curl -fsS "$TAIRA_ROOT/v1/assets/definitions?limit=100" \ + | jq '.items[] + | select(.name == "XOR") + | {id, name, total_quantity, mintable, confidential_policy: .confidential_policy.mode}' +``` + +Look for definitions that carry metadata: + +```bash +curl -fsS "$TAIRA_ROOT/v1/assets/definitions?limit=100" \ + | jq '.items[] + | select((.metadata | length) > 0) + | {id, name, metadata}' +``` + +All three examples are reads. To mint, burn, or transfer assets on Taira, use a +faucet-funded account and the guarded flow in +[Connect to SORA Nexus Dataspaces](/get-started/sora-nexus-dataspaces.md). + +For a fee-paying Taira asset example, save the faucet helper from +[Get Testnet XOR on Taira](/get-started/sora-nexus-dataspaces.md#_4-get-testnet-xor-on-taira) +as `taira_faucet_claim.py`, then claim the faucet asset first and use it as the +transaction gas asset: + +```bash +export TAIRA_ACCOUNT_ID='' +export TAIRA_FEE_ASSET=6TEAJqbb8oEPmLncoNiMRbLEK6tw + +python3 taira_faucet_claim.py "$TAIRA_ACCOUNT_ID" +printf '{"gas_asset_id":"%s"}\n' "$TAIRA_FEE_ASSET" > taira.tx-metadata.json +``` + +Then include `--metadata ./taira.tx-metadata.json` on `ledger asset mint`, +`ledger asset burn`, and `ledger asset transfer` commands. + ## Instructions -Assets can be registered, minted, burned, and transferred with Iroha Special -Instructions: +Assets can be registered, minted, burned, and transferred with Iroha +Special Instructions: - [`Register` and `Unregister`](/blockchain/instructions.md#un-register) - [`Mint` and `Burn`](/blockchain/instructions.md#mint-burn) @@ -81,3 +131,4 @@ See also: - [Python tutorial](/guide/tutorials/python.md) - [JavaScript/TypeScript tutorial](/guide/tutorials/javascript.md) - [Data model](/blockchain/data-model.md) +- [NFTs](/blockchain/nfts.md) diff --git a/src/blockchain/consensus.md b/src/blockchain/consensus.md index 198a9139c..d25fe63ea 100644 --- a/src/blockchain/consensus.md +++ b/src/blockchain/consensus.md @@ -1,3 +1,7 @@ + + # Consensus Each time you send a transaction to Iroha, it gets put into a queue. When @@ -31,6 +35,119 @@ without the use of ISI, the good™ peers cannot know of them. They won't be able to reproduce the hash of the world state, and thus consensus will fail. The same thing happens if the peers have different instructions. +## Sumeragi + +Sumeragi is Iroha's Byzantine-fault-tolerant consensus engine. It takes +transactions from the queue, has validator peers agree on the same ordered +block, and finalizes that block only after enough validators have +reproduced the same result and signed the commit certificate. + +Sumeragi proposal-to-commit data flow + +### Proposal and commit path + +Sumeragi runs the ledger forward one block height at a time. At each height, +one validator acts as proposer for the current view. The proposer drains +eligible transactions from the queue, builds a candidate block, and announces +the proposal to the active validator set. + +The same Sumeragi pipeline is used in both permissioned and Nominated +Proof-of-Stake (NPoS) deployments: + +1. A validator proposes a block from queued transactions. +2. Validators validate the proposal by executing the transactions against + the same world state. +3. Validators exchange votes and quorum certificates for the current height + and view. +4. Once the commit quorum is reached, peers commit the block and update + their world state. + +Validators sign only data they can reproduce locally. Before voting, a +validator checks that the proposal belongs to the expected chain, height, and +view; that transaction signatures and limits are valid; that lane routing and +executor validation are deterministic; and that executing the payload produces +the expected state transition. If the local result differs, the validator +rejects the proposal instead of voting for it. + +Votes are small signed consensus messages. They refer to the proposed block, +the height, the view, and the validator identity. Collectors aggregate those +votes into a quorum certificate or commit certificate. The certificate is the +durable proof that enough validators observed the same result for the same +block. + +### Quorum, collectors, and observers + +The voting validator count `n` defines the Byzantine fault budget. For +networks with at least four validators, the budget is `f = floor((n - 1) / 3)` +and the commit quorum is `2f + 1`. For one to three validators, all validators +are required for commit, which is useful for development but has no practical +offline slack. + +Collectors are a fanout optimization. Instead of every validator sending every +vote to every other validator, Sumeragi can select one or more collectors for a +height. The collectors assemble votes, publish quorum progress, and reduce the +amount of duplicate vote traffic. The effective collector settings are exposed +through `ops sumeragi collectors` and `/v1/sumeragi/collectors`. + +Observer peers can synchronize committed blocks, but they do not propose, +vote, collect votes, or count toward the commit quorum. Use observers when a +deployment needs local query capacity, indexing, monitoring, or regional block +replication without increasing the number of voting validators. + +### View changes and recovery + +A view is Sumeragi's attempt to finalize one height with a particular proposer +and timing plan. If proposal, payload, vote, or commit progress stalls, the +pacemaker can move the height to a later view. A view change does not rewrite a +committed block. It changes how validators try to finish the uncommitted +height, carrying forward the highest known quorum or commit evidence so peers +do not finalize conflicting blocks. + +Payload recovery is separate from the finality decision. A peer might receive +a quorum or commit certificate before it has the full block payload. In that +case, the peer uses reliable broadcast (RBC) or block sync to recover the +payload, verifies it against the advertised hashes, and only then applies the +block to the world state and Kura. + +### Consensus modes + +The selected mode controls how the validator set is formed and operated. It +is declared in genesis through [`consensus_mode`](/reference/genesis.md) +and in peer configuration through `sumeragi.consensus_mode`. Treat it as +network-wide state: validators need the same signed genesis, topology, +trusted peer data, and effective Sumeragi parameters. + +Sumeragi consensus mode data flow + +| Mode | Best fit | Validator set | Operational focus | +| ------------ | -------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------- | +| Permissioned | Private, consortium, and operator-managed networks | Validators come from the trusted peer topology agreed by the deployment | Keep all validators on the same signed genesis, trusted peers, peer keys, and Sumeragi parameters | +| NPoS | Public or Nexus-oriented networks where validation follows nomination and stake policy | Validators are selected by the NPoS profile, usually across epochs, and require BLS keys plus Proofs-of-Possession | Keep stake snapshots, epoch parameters, validator PoPs, and NPoS phase timeouts aligned across the network | + +::: tip Permissioned mode + +Use permissioned mode when the validator roster is an explicit operational +choice. This is the usual starting point for self-hosted Iroha networks +because membership changes are deliberate governance or administrator +actions. The important operational rule is that every validator must run with +the same view of genesis, trusted peers, BLS Proofs-of-Possession, and +Sumeragi parameters. A single peer with a different topology or signed genesis +can prevent the network from committing. + +::: + +::: tip NPoS mode + +Use NPoS mode when the deployment profile expects validator participation +to be driven by nomination and stake state. Public SORA Nexus deployments +use NPoS, and their generated profiles include the BLS validator +identities, Proofs-of-Possession, epoch settings, and Sumeragi NPoS +parameters needed at startup. Epoch changes can replace the active validator +set at defined heights, so operators need to monitor both consensus health and +the stake or nomination state that feeds the next roster. + +::: + ## Multilane consensus Iroha's multilane consensus path is implemented through Nexus lane and @@ -79,6 +196,46 @@ settlement commitments and relay envelopes that bind the block header, commit certificate, data-availability commitment hash, settlement proof, and lane payload size. +## Reliable broadcast (RBC) + +Reliable broadcast (RBC) is Sumeragi's payload dissemination and recovery +path. It helps validators and observers obtain the block body that belongs +to a proposal or commit certificate, especially when a `BlockCreated` +message, block-sync update, or direct payload transfer is delayed or lost. + +RBC works at the payload level. The proposer announces an RBC session for a +block height, view, and payload hash, then sends payload chunks across the +commit topology. Peers track chunk receipt, validate the recovered payload +against the advertised hash, and exchange `READY` and `DELIVER` signals +once enough validators have observed the same payload. Sessions are bounded +by TTL, chunk, fanout, pending-stash, and persisted-store limits so +recovery traffic cannot grow without limit. + +RBC is not a separate consensus decision and it does not replace the commit +certificate. A block still finalizes only when the peer has a valid commit +certificate and the matching payload locally. When data availability is +enabled, RBC contributes availability evidence and payload recovery, but +commit progress is driven by the commit certificate plus local payload. If +the certificate arrives before the payload, the peer can recover the +payload through RBC or block sync and then commit. + +Operationally, RBC is useful for diagnosing missing-payload and +data-availability bottlenecks: + +- `iroha --output-format text ops sumeragi rbc status` shows aggregate RBC + session and throughput counters. +- `iroha --output-format text ops sumeragi rbc sessions` lists active + sessions, including chunk progress, readiness, delivery state, and + lane/dataspace backlog. +- `GET /v1/sumeragi/rbc` and `GET /v1/sumeragi/rbc/sessions` expose the + same data over Torii; see + [Torii endpoints](/reference/torii-endpoints.md). +- Prometheus signals such as `sumeragi_rbc_store_pressure`, + `sumeragi_rbc_backpressure_deferrals_total`, and per-lane or + per-dataspace RBC backlog gauges help separate network loss, chunk + recovery, and storage pressure; see + [Performance and metrics](/guide/advanced/metrics.md). + Kura uses the derived lane configuration for storage layout. Each lane receives deterministic storage names such as `blocks/lane_000_core` and `merge_ledger/lane_000_core_merge.log`; lane lifecycle changes can @@ -87,5 +244,5 @@ block order. [^1]: For prospective wizards, the - [Iroha 2 Whitepaper](https://github.com/hyperledger-iroha/iroha/blob/main/docs/source/iroha_2_whitepaper.md) + [Iroha 2 Whitepaper](https://github.com/hyperledger-iroha/iroha/blob/i23-features/docs/source/iroha_2_whitepaper.md) is a good start. diff --git a/src/blockchain/data-model.md b/src/blockchain/data-model.md index 658d01051..8f0e4ff3e 100644 --- a/src/blockchain/data-model.md +++ b/src/blockchain/data-model.md @@ -1,8 +1,8 @@ # Data Model Iroha stores ledger state in the `World`. The current model keeps the same -high-level entities as Iroha 2 while changing several identifiers for Iroha 3 -and Nexus flows: +high-level entities as Iroha 2 while changing several identifiers for Iroha +3 and Nexus flows: - domains are dataspace-qualified, for example `payments.universal` - accounts are canonical and domainless; the account ID is derived from the @@ -10,6 +10,11 @@ and Nexus flows: - asset definitions can keep a domain/name projection, but their canonical textual address is an opaque Base58 identifier - assets are balances held by accounts for a specific asset definition +- NFTs are uniquely owned records with domain-qualified IDs and metadata + content +- RWAs are generated-ID lots that represent off-chain assets with current + owner, quantity, provenance, metadata, holds, freezes, and lifecycle + controls ```mermaid classDiagram @@ -42,16 +47,39 @@ class Asset { id: AssetId value } +class Nft { + id: NftId + content: Metadata + owned_by: AccountId +} +class Rwa { + id: RwaId + owned_by: AccountId + quantity + spec + primary_reference + status + metadata + parents + controls + is_frozen + held_quantity +} World *-- Domain : registers World *-- Account : registers World *-- AssetDefinition : registers World *-- Asset : stores balances +World *-- Nft : registers +World *-- Rwa : registers lots Account --> AccountController : authorized by Domain --> Account : owned_by AssetDefinition --> Domain : optional projection Asset --> AssetDefinition : definition Asset --> Account : held by +Nft --> Domain : scoped by +Nft --> Account : owned_by +Rwa --> Account : owned_by ``` ## Example @@ -59,9 +87,10 @@ Asset --> Account : held by In an Iroha 3 network, `wonderland.universal` is a domain inside the `universal` dataspace. `alice` and `rabbit` are not encoded as `alice@wonderland`; they are canonical accounts controlled by their keys or -policies. A projected asset definition can still be constructed from a domain -and name such as `rose` in `wonderland.universal`, while the canonical asset -definition address used on the wire is the generated Base58 address. +policies. A projected asset definition can still be constructed from a +domain and name such as `rose` in `wonderland.universal`, while the +canonical asset definition address used on the wire is the generated Base58 +address. ```mermaid classDiagram @@ -88,13 +117,53 @@ account_alice --> asset_rose : holds balance account_rabbit --> asset_rose : may receive balance ``` +## Aliases + +Aliases are human-facing names layered over canonical ledger identifiers. +They are useful at API, CLI, wallet, and explorer boundaries, but canonical +IDs remain the stable identifiers stored in strict ledger fields. + +| Target | Canonical target | Alias literal | Backing model | +| -------------- | --------------------------------------------------- | ------------------------------------------------------ | ----------------------------------------------------------------------------- | +| User account | domainless `AccountId` encoded as an I105 address | `name@domain.dataspace` or `name@dataspace` | `AccountAlias`; primary alias is `Account.label`, extra aliases are bindings | +| Asset definition | canonical `AssetDefinitionId` Base58 address | `name#domain.dataspace` or `name#dataspace` | `AssetDefinitionAlias` bound to an asset definition | +| Contract | canonical Bech32m `ContractAddress` | `name::domain.dataspace` or `name::dataspace` | `ContractAlias` bound to a deployed contract address | +| Domain name | `DomainId` in `domain.dataspace` form | `domain.dataspace` | SNS `domain` namespace record | +| Dataspace name | numeric `DataSpaceId` from the active Nexus catalog | dataspace alias such as `universal`, `paynet`, or `zk` | SNS `dataspace` namespace record plus the active dataspace catalog | + +Account aliases are the user-facing account names. They survive account +rekeying because the alias points at the active account ID through world-state +indexes and account rekey records. Use `SetPrimaryAccountAlias` for the +account's primary label, `SetAccountAliasBinding` for additional non-primary +aliases, and `FindAccountByAlias` or `FindAliasesByAccountId` for reads. +Account aliases normally require an active SNS account-alias lease acquired +with `AcquireAccountAliasLease` and renewed with `RenewAccountAliasLease`. + +Asset aliases name asset definitions, not individual account balances. Asset +aliases and contract aliases are direct bindings from a readable name to an +existing canonical target. Asset aliases are set with `SetAssetDefinitionAlias`; +the alias name segment must match the asset definition display name or +projected definition name. Contract aliases are set with `SetContractAlias`; +the alias dataspace must match the dataspace encoded in the contract address. +Both bindings can carry `lease_expiry_ms`; after expiry they stop resolving +when the grace window elapses and are swept from world-state indexes. + +Domains do not have a separate `DomainAlias` object. A domain identifier is +already a dataspace-qualified name such as `payments.universal`. SNS tracks +lease ownership for domain names in the `domain` namespace and for dataspace +aliases in the `dataspace` namespace. The reserved `universal` dataspace alias +must remain defined. + ## Related docs -| Topic | Where to go | -| --- | --- | -| Domains | [Domains](/blockchain/domains.md) | -| Accounts | [Accounts](/blockchain/accounts.md) | -| Assets | [Assets](/blockchain/assets.md) | -| Metadata | [Metadata](/blockchain/metadata.md) | +| Topic | Where to go | +| -------------------------------------- | ------------------------------------------- | +| Domains | [Domains](/blockchain/domains.md) | +| Accounts | [Accounts](/blockchain/accounts.md) | +| Assets | [Assets](/blockchain/assets.md) | +| NFTs | [NFTs](/blockchain/nfts.md) | +| Real-world assets | [Real-World Assets](/blockchain/rwas.md) | +| Metadata | [Metadata](/blockchain/metadata.md) | | Registration and transfer instructions | [Instructions](/blockchain/instructions.md) | -| Runtime permissions | [Permissions](/blockchain/permissions.md) | +| Runtime permissions | [Permissions](/blockchain/permissions.md) | +| Naming rules | [Naming rules](/reference/naming.md) | diff --git a/src/blockchain/domains.md b/src/blockchain/domains.md index 5b92417ca..5c728b29f 100644 --- a/src/blockchain/domains.md +++ b/src/blockchain/domains.md @@ -41,6 +41,48 @@ under the active runtime validator. Domain metadata can be updated with [`SetKeyValue` and `RemoveKeyValue`](/blockchain/instructions.md#setkeyvalue-removekeyvalue) when the authority has permission to modify that domain. +## Try It on Taira + +List the domains currently visible on the public Taira testnet: + +```bash +curl -fsS 'https://taira.sora.org/v1/domains?limit=20' \ + | jq -r '.items[].id' +``` + +Map the public lane catalog back to dataspace aliases: + +```bash +curl -fsS https://taira.sora.org/status \ + | jq -r '.teu_lane_commit[] + | [.lane_id, .alias, .dataspace_alias, .visibility, .block_height, .finality_lag_slots] + | @tsv' +``` + +Use the first command when an app needs to check whether a domain exists. Use +the lane catalog when you need to confirm whether a dataspace is public, +restricted, or lagging behind the core lane. + +Domain registration is a fee-paying write. Before trying it on Taira, save the +faucet helper from +[Get Testnet XOR on Taira](/get-started/sora-nexus-dataspaces.md#_4-get-testnet-xor-on-taira) +as `taira_faucet_claim.py`, fund the signer through the public faucet, and +attach fee metadata: + +```bash +export TAIRA_ACCOUNT_ID='' +export TAIRA_FEE_ASSET=6TEAJqbb8oEPmLncoNiMRbLEK6tw + +python3 taira_faucet_claim.py "$TAIRA_ACCOUNT_ID" +printf '{"gas_asset_id":"%s"}\n' "$TAIRA_FEE_ASSET" > taira.tx-metadata.json + +iroha --config ./taira.client.toml \ + --metadata ./taira.tx-metadata.json \ + ledger domain register --id docs-example.universal +``` + +Use a unique domain name for repeated testnet runs. + ## Relationship to other entities Domains group ledger objects and provide a namespace for domain-scoped data. diff --git a/src/blockchain/events.md b/src/blockchain/events.md index e5d3405f8..49b0fa543 100644 --- a/src/blockchain/events.md +++ b/src/blockchain/events.md @@ -18,6 +18,32 @@ and status. The status can be either `Validating` (validation in progress), `Rejected`, or `Committed`. If an entity was rejected, the reason for the rejection is provided. +### Try It on Taira + +Check that the public pipeline event stream is mounted: + +```bash +curl -fsSI https://taira.sora.org/v1/events/sse \ + | sed -n '1,12p' +``` + +For a snapshot you can inspect without keeping a stream open, read recent +explorer transactions: + +```bash +curl -fsS 'https://taira.sora.org/v1/explorer/transactions?page=1&per_page=5' \ + | jq '{pagination, txs: [.items[] | {hash, block, status, executable}]}' +``` + +Open the SSE route in a terminal when you need live events: + +```bash +curl -fsS -N https://taira.sora.org/v1/events/sse +``` + +If no transactions are submitted while the stream is open, the command can stay +quiet even though the route is healthy. + ## Data Events Data events are emitted when there is a change related to ledger data such diff --git a/src/blockchain/fastpq.md b/src/blockchain/fastpq.md index 961190f0b..a7ead27f2 100644 --- a/src/blockchain/fastpq.md +++ b/src/blockchain/fastpq.md @@ -107,6 +107,119 @@ $$ \operatorname{le64}(\operatorname{Hash}(D)[0..8])\bmod p $$ +Here `Hash` means Iroha's `iroha_crypto::Hash::new`, a 32-byte Blake2bVar +digest, unless a formula explicitly names Poseidon2 or SHA-256. + +### Field Arithmetic + +The Rust code represents field elements as canonical `u64` values in +`[0,p)`. Addition and subtraction are: + +$$ +a +_F b = (a+b)\bmod p +$$ + +$$ +a -_F b = (a-b)\bmod p +$$ + +Multiplication first computes the 128-bit product: + +$$ +a\cdot b = \operatorname{lo} + 2^{64}\operatorname{hi} +$$ + +Goldilocks reduction then uses the identity: + +$$ +2^{64}\equiv2^{32}-1\pmod p +$$ + +If: + +$$ +\operatorname{hi}=\operatorname{hi}_{lo}+2^{32}\operatorname{hi}_{hi} +$$ + +then the reducer computes: + +$$ +\operatorname{lo} ++2^{32}\operatorname{hi}_{lo} +-\operatorname{hi}_{lo} +-\operatorname{hi}_{hi} +\pmod p +$$ + +The implementation conditionally adds or subtracts `p` until the result is +canonical. Signed integers, such as balance deltas, are embedded by: + +$$ +\operatorname{field}(x)=x\bmod p,\qquad 0\leq\operatorname{field}(x)

0) +$$ + +For smaller domains derived from the catalogue root, the generator is: + +$$ +\omega_{\ell}=\omega_{\max}^{2^{k_{\max}-\ell}} +$$ + +### Row and Leaf Hashes + +After LDE, FastPQ hashes each row across all LDE columns. For `m` columns: + +$$ +r_i = +H_F(i,m,x_{i,0},x_{i,1},\ldots,x_{i,m-1}) +$$ + +If row hashes are still on the trace domain rather than the evaluation +domain, the prover interpolates and extends that single row-hash column +with the same coset LDE process. + ### Merkle Openings LDE values are grouped into chunks of: @@ -605,6 +776,29 @@ $$ Odd levels duplicate the last node. Query paths verify by hashing left or right according to the query leaf index parity at each level. +For a leaf at index `i`, a path `(s_0,\ldots,s_{d-1})` verifies against +root `R` by the recurrence: + +$$ +y_0=L_i +$$ + +$$ +y_{k+1}= +\begin{cases} +H_F(\operatorname{seed}(\texttt{fastpq:v1:trace:node}),y_k,s_k), +& \lfloor i/2^k\rfloor \equiv 0 \pmod 2\\ +H_F(\operatorname{seed}(\texttt{fastpq:v1:trace:node}),s_k,y_k), +& \lfloor i/2^k\rfloor \equiv 1 \pmod 2 +\end{cases} +$$ + +The check passes only when: + +$$ +y_d=R +$$ + AIR trace row leaves are: $$ @@ -618,6 +812,21 @@ $$ L^{\text{comp}}_i = H_D(i\|A_i) $$ +The LDE query opening also checks that the value opened at evaluation index +`i` is present in its authenticated chunk: + +$$ +\operatorname{chunk\_index}=\left\lfloor\frac{i}{B_{\text{lde}}}\right\rfloor +$$ + +$$ +\operatorname{chunk\_offset}=i\bmod B_{\text{lde}} +$$ + +$$ +\operatorname{chunk}[\operatorname{chunk\_offset}]=v_i +$$ + ### FRI Folding FRI commits to AIR composition evaluations. For each round `l`, the @@ -676,6 +885,83 @@ $$ The sampled set is returned in sorted order. +### Verifier Replay + +The verifier first recomputes the batch commitment: + +$$ +\operatorname{commitment}_{expected} +=\operatorname{trace\_commitment}(\operatorname{params},\operatorname{batch}) +$$ + +and requires: + +$$ +\operatorname{commitment}_{expected} +=\operatorname{proof.trace\_commitment} +$$ + +It also rebuilds public IO: + +$$ +\operatorname{PublicIO}= +(\operatorname{dsid},\operatorname{slot},\operatorname{old\_root}, +\operatorname{new\_root},\operatorname{perm\_root}, +\operatorname{tx\_set\_hash},\operatorname{ordering\_hash}, +\operatorname{permission\_hashes}) +$$ + +Every field must match the proof's public IO byte-for-byte. The verifier +then reconstructs the same transcript and derives the same: + +$$ +\gamma,\quad \alpha_0,\alpha_1,\quad +\beta_0,\ldots,\beta_{\ell-1},\quad +q_0,\ldots,q_{t-1} +$$ + +For each sampled query `q`, it checks: + +$$ +\operatorname{MerkleVerify}( +R_{\text{lde}}, +L_{\lfloor q/B_{\text{lde}}\rfloor}, +\lfloor q/B_{\text{lde}}\rfloor, +\pi_{\text{lde}} +) +$$ + +$$ +\operatorname{MerkleVerify}( +R_{\text{air}}, +L^{\text{air}}_q, +q, +\pi_{\text{air,current}} +) +$$ + +$$ +\operatorname{MerkleVerify}( +R_{\text{air}}, +L^{\text{air}}_{q+1\bmod N_{\text{eval}}}, +q+1\bmod N_{\text{eval}}, +\pi_{\text{air,next}} +) +$$ + +and: + +$$ +A_q = +\operatorname{AIRComposition}( +\operatorname{row}_q,\operatorname{row}_{q+1},\alpha_0,\alpha_1 +) +$$ + +The AIR composition opening must authenticate under `R_air_composition`. +The FRI chain then starts from the same `A_q` and must end in an +authenticated final FRI leaf under the terminal FRI root. + ## What The Prover Checks Before building the trace, the FastPQ prover canonicalizes the batch order @@ -762,6 +1048,135 @@ height, block header hash, settlement hash, and manifest root. A relay is merge admissible only when it has both a QC and valid FastPQ proof material. +### AXT Binding Math + +For Nexus AXT envelopes, `AxtFastpqBinding` is canonicalized before proof +replay. Empty parameter values default to `fastpq-lane-balanced`; empty +verifier id and version default to `fastpq` and `v1`; claim type is trimmed +and lowercased. + +The AXT FastPQ public inputs are deterministic byte hashes: + +$$ +\operatorname{dsid}=\operatorname{dsid\_bytes}(\operatorname{source\_dsid}) +$$ + +$$ +\operatorname{slot}=\operatorname{le64}(\operatorname{source\_tx\_commitment}[0..8]) +$$ + +$$ +\operatorname{old\_root} = +\operatorname{Hash}( +\texttt{fastpq-json:old\_root}\| +\operatorname{source\_tx\_commitment}\| +\operatorname{policy\_commitment}\| +\operatorname{effect\_type} +) +$$ + +$$ +\operatorname{new\_root} = +\operatorname{Hash}( +\texttt{fastpq-json:new\_root}\| +\operatorname{source\_tx\_commitment}\| +\operatorname{claim\_digest}\| +\operatorname{effect\_type} +) +$$ + +$$ +\operatorname{perm\_root} = +\operatorname{Hash}( +\texttt{fastpq-json:perm\_root}\| +\operatorname{policy\_commitment}\| +\operatorname{verifier\_id}\| +\operatorname{verifier\_version} +) +$$ + +$$ +\operatorname{tx\_set\_hash} = +\operatorname{Hash}( +\texttt{fastpq-json:tx\_set\_hash}\| +\operatorname{source\_tx\_commitment}\| +\operatorname{claim\_digest}\| +\operatorname{witness\_commitment} +) +$$ + +AXT transition keys are: + +$$ +\operatorname{key}(\operatorname{prefix},x,y)= +\operatorname{prefix}\|\texttt{/}\|x\|\texttt{/}\|y +$$ + +The `authorization` claim inserts a role-grant row: + +$$ +\operatorname{role\_id}=\operatorname{claim\_digest} +$$ + +$$ +\operatorname{permission\_id}=\operatorname{witness\_commitment} +$$ + +$$ +\operatorname{epoch}= +\operatorname{le64}(\operatorname{policy\_commitment}[0..8]) +$$ + +and a metadata row binding the authorization policy. The `compliance` claim +inserts two metadata rows: one for policy and one for target dataspaces. + +For `tx_predicate` and `value_conservation`, an explicit effect amount is +used when the binding contains a positive source or destination amount. +Otherwise the code derives a bounded deterministic amount: + +$$ +\operatorname{bounded}(d,\min,\operatorname{span}) += +\min + (\operatorname{le64}(d[0..8])\bmod\max(\operatorname{span},1)) +$$ + +Then the same transfer equations are used: + +$$ +\operatorname{sender\_after}=\operatorname{sender\_before}-a +$$ + +$$ +\operatorname{receiver\_after}=\operatorname{receiver\_before}+a +$$ + +The synthetic sender and receiver account ids are generated from key seeds: + +$$ +\operatorname{seed}= +\operatorname{Hash}(\operatorname{label}\|\operatorname{entropy})[0..32] +$$ + +The transfer batch hash is: + +$$ +\operatorname{batch\_hash} = +\operatorname{Hash}( +\operatorname{label}\| +\operatorname{corridor}\| +\operatorname{source\_tx\_commitment}\| +\operatorname{claim\_digest} +) +$$ + +The AXT batch manifest digest is SHA-256 over the Norito encoding of the +canonical binding: + +$$ +\operatorname{manifest\_digest} = +\operatorname{SHA256}(E(\operatorname{canonical\_binding})) +$$ + ## SCCP Transparent Message Proofs The SCCP helper crate also uses FastPQ for transparent cross-chain message @@ -788,6 +1203,76 @@ Its public inputs are derived from the SCCP transparent inner proof: | `perm_root` | Finality block hash | | `tx_set_hash` | Statement hash | +The SCCP canonical encoders write integers little-endian and encode +variable-length byte arrays as: + +$$ +\operatorname{vec}(x)=\operatorname{le32}(|x|)\|x +$$ + +The transparent public input byte string is: + +$$ +P = +\operatorname{version}\| +\operatorname{message\_id}\| +\operatorname{payload\_hash}\| +\operatorname{le32}(\operatorname{target\_domain})\| +\operatorname{commitment\_root}\| +\operatorname{le64}(\operatorname{finality\_height})\| +\operatorname{finality\_block\_hash} +$$ + +The transparent statement bytes are the concatenation of version, chain +family, local and counterparty domains, security model, anchor governance, +account codec, finality model, verifier target, verifier backend family, +length-prefixed chain/backend/manifest fields, destination binding hash, +account codec key, payload kind, public input bytes, and payload hash. The +statement hash is: + +$$ +\operatorname{statement\_hash} = +\operatorname{Blake2bVar}_{32}( +\texttt{sccp:transparent:statement:v1}\|\operatorname{statement} +) +$$ + +The FastPQ dataspace id for this proof path is the first sixteen bytes of +another prefixed Blake2b digest: + +$$ +\operatorname{dsid} = +\operatorname{Blake2bVar}_{32}( +\texttt{sccp:transparent:fastpq:dsid:v1}\|\operatorname{statement\_hash} +)[0..16] +$$ + +The SCCP FastPQ batch is exactly: + +$$ +(\texttt{sccp:transparent:v1:statement},\varnothing,\operatorname{statement},\operatorname{MetaSet}) +$$ + +$$ +(\texttt{sccp:transparent:v1:context},\varnothing,E(\operatorname{inner\_proof}),\operatorname{MetaSet}) +$$ + +$$ +(\texttt{sccp:transparent:v1:payload},\varnothing,\operatorname{canonical\_payload},\operatorname{MetaSet}) +$$ + +then sorted by the same FastPQ ordering rule. + +The OpenVerify verifier commitment is SHA-256 over the SCCP message backend +name and the canonical FastPQ verifier descriptor: + +$$ +\operatorname{vk\_hash} = +\operatorname{SHA256}( +\operatorname{message\_backend}\|\operatorname{verifier\_descriptor} +) +$$ + The raw FastPQ proof is Norito-encoded into a `StarkFriOpenProofV1`, then wrapped in an `OpenVerifyEnvelope` with backend `Stark`. SCCP verification rebuilds the same FastPQ batch from the bundle and manifest, checks the @@ -896,7 +1381,8 @@ signals listed in [Performance and Metrics](/guide/advanced/metrics.md). ## Related Reference -- [Data Model Schema](/reference/data-model-schema.md) for generated type details +- [Data Model Schema](/reference/data-model-schema.md) for generated type + details - `FastpqTransitionBatch` - `FastpqPublicInputs` - `TransferTranscript` diff --git a/src/blockchain/instructions.md b/src/blockchain/instructions.md index 536424b3c..7b56d9446 100644 --- a/src/blockchain/instructions.md +++ b/src/blockchain/instructions.md @@ -35,29 +35,30 @@ you transfer assets, you always need to specify to which account you are transferring them. On the other hand, when you are registering something, all you need is the object that you want to register. -| Instruction | Objects | Destination | -| --------------------------------------------------------- | ------------------------------------------------------------------------------------------------- | -------------------- | -| [Register/Unregister](#un-register) | domains, accounts, asset definitions, NFTs, roles, triggers, peers | | -| [Mint/Burn](#mint-burn) | numeric assets, trigger repetitions | accounts or triggers | -| [SetKeyValue/RemoveKeyValue](#setkeyvalue-removekeyvalue) | objects that have [metadata](./metadata.md): domains, accounts, asset definitions, NFTs, triggers | | -| [SetParameter](#setparameter) | chain parameters | | -| [Grant/Revoke](#grant-revoke) | [roles, permission tokens](/blockchain/permissions.md) | accounts or roles | -| [Transfer](#transfer) | domains, asset definitions, numeric assets, NFTs | accounts | -| [ExecuteTrigger](#executetrigger) | triggers | | -| [Log/Custom/Upgrade](#other-instructions) | logs, executor-specific payloads, executor upgrades | | +| Instruction | Objects | Destination | +| --------------------------------------------------------- | ------------------------------------------------------------------------------------------------------- | -------------------- | +| [Register/Unregister](#un-register) | domains, accounts, asset definitions, NFTs, roles, triggers, peers | | +| [Mint/Burn](#mint-burn) | numeric assets, trigger repetitions | accounts or triggers | +| [SetKeyValue/RemoveKeyValue](#setkeyvalue-removekeyvalue) | objects that have [metadata](./metadata.md): domains, accounts, asset definitions, NFTs, RWAs, triggers | | +| [SetParameter](#setparameter) | chain parameters | | +| [Grant/Revoke](#grant-revoke) | [roles, permission tokens](/blockchain/permissions.md) | accounts or roles | +| [Transfer](#transfer) | domains, asset definitions, numeric assets, NFTs | accounts | +| [ExecuteTrigger](#executetrigger) | triggers | | +| [Log/Custom/Upgrade](#other-instructions) | logs, executor-specific payloads, executor upgrades | | There is also another way of looking at ISI, in terms of the ledger object they touch: -| Target | Instructions | -| ---------------- | --------------------------------------------------------------------------------------------------------- | -| Account | register/unregister accounts, receive assets, update account metadata, grant/revoke permissions and roles | -| Domain | register/unregister domains, transfer domain ownership, update domain metadata | -| Asset definition | register/unregister definitions, transfer ownership, update metadata | -| Asset | mint/burn numeric quantity, transfer numeric quantity | -| NFT | register/unregister NFTs, transfer ownership, update metadata | -| Trigger | register/unregister, mint/burn trigger repetitions, execute trigger, update trigger metadata | -| World | register/unregister peers and roles, set parameters, upgrade the executor | +| Target | Instructions | +| ---------------- | ------------------------------------------------------------------------------------------------------------ | +| Account | register/unregister accounts, receive assets, update account metadata, grant/revoke permissions and roles | +| Domain | register/unregister domains, transfer domain ownership, update domain metadata | +| Asset definition | register/unregister definitions, transfer ownership, update metadata | +| Asset | mint/burn numeric quantity, transfer numeric quantity | +| NFT | register/unregister NFTs, transfer ownership, update metadata | +| RWA | register lots, transfer quantity, hold/release, freeze/unfreeze, redeem, merge, update metadata and controls | +| Trigger | register/unregister, mint/burn trigger repetitions, execute trigger, update trigger metadata | +| World | register/unregister peers and roles, set parameters, upgrade the executor | ## CLI Examples @@ -80,11 +81,28 @@ export PEER_KEY="" export PEER_POP="" ``` -When targeting the public Taira testnet, use a Taira client configuration -and attach the required gas asset metadata to write transactions: +When targeting the public Taira testnet, use a Taira client configuration. +Before running fee-paying examples, save the faucet helper from +[Get Testnet XOR on Taira](/get-started/sora-nexus-dataspaces.md#_4-get-testnet-xor-on-taira) +as `taira_faucet_claim.py`, then claim testnet XOR from the faucet: ```bash -printf '{"gas_asset_id":"6TEAJqbb8oEPmLncoNiMRbLEK6tw"}\n' > taira.tx-metadata.json +export TAIRA_ACCOUNT_ID="" +export TAIRA_FEE_ASSET="6TEAJqbb8oEPmLncoNiMRbLEK6tw" + +curl -fsS https://taira.sora.org/v1/accounts/faucet/puzzle | jq . +python3 taira_faucet_claim.py "$TAIRA_ACCOUNT_ID" + +iroha --config ./taira.client.toml ledger asset get \ + --definition "$TAIRA_FEE_ASSET" \ + --account "$TAIRA_ACCOUNT_ID" +``` + +After the faucet-funded asset is visible, attach the required gas asset +metadata to write transactions: + +```bash +printf '{"gas_asset_id":"%s"}\n' "$TAIRA_FEE_ASSET" > taira.tx-metadata.json cargo run --bin iroha -- \ --config ./taira.client.toml \ @@ -112,6 +130,10 @@ proof of possession for the peer key. Check our [naming conventions](/reference/naming.md) to learn about the restrictions put on entity names. +RWA lots are created through the dedicated `RegisterRwa` instruction. The +current code does not expose an `UnregisterRwa` instruction; use +`RedeemRwa` to retire represented quantity. + ::: info Note that depending on how you decide to set up your @@ -288,8 +310,10 @@ cargo run --bin iroha -- --config ./defaults/client.toml \ ## Transfer -Transfers move ownership or value between accounts. Current transfer -variants cover domains, asset definitions, numeric assets, and NFTs. +Transfers move ownership or value between accounts. Generic transfer +variants cover domains, asset definitions, numeric assets, and NFTs. RWA +quantity movement uses the dedicated `TransferRwa` and `ForceTransferRwa` +instructions described in [Real-World Assets](/blockchain/rwas.md). To do this, an account have to be granted the [permission to transfer assets](/reference/permissions.md). Refer to an @@ -383,8 +407,8 @@ cargo run --bin iroha -- --config ./defaults/client.toml \ ledger domain meta remove --id docs.universal --key environment ``` -The same pattern is available for accounts, asset definitions, NFTs, and -triggers: +The same pattern is available for accounts, asset definitions, NFTs, RWAs, +and triggers: ```bash printf '{"display_name":"Alice"}\n' | diff --git a/src/blockchain/metadata.md b/src/blockchain/metadata.md index 07c4ebeb1..24bb7e6cb 100644 --- a/src/blockchain/metadata.md +++ b/src/blockchain/metadata.md @@ -9,6 +9,8 @@ The following objects can carry metadata: - accounts - assets - asset definitions +- NFTs +- RWAs - triggers - transactions @@ -16,9 +18,35 @@ Use metadata for small descriptive or indexing fields that belong in ledger state. Large payloads should be stored outside the WSV and referenced by a digest, URI, or SoraFS path. -For guidance on choosing metadata, assets, NFTs, or off-chain storage, see +For guidance on choosing metadata, assets, NFTs, RWAs, or off-chain +storage, see [Metadata and Ledger Storage Choices](/guide/configure/metadata-and-store-assets.md). +## Try It on Taira + +Metadata is visible through normal resource reads. This command lists Taira +asset definitions that currently have metadata: + +```bash +curl -fsS 'https://taira.sora.org/v1/assets/definitions?limit=100' \ + | jq '.items[] + | select((.metadata | length) > 0) + | {id, name, metadata}' +``` + +Use the same pattern for domains and accounts: + +```bash +curl -fsS 'https://taira.sora.org/v1/domains?limit=20' \ + | jq '.items[] | select((.metadata // {} | length) > 0)' + +curl -fsS 'https://taira.sora.org/v1/accounts?limit=20' \ + | jq '.items[] | select((.metadata // {} | length) > 0)' +``` + +Treat empty output as a valid result. It means the current page of Taira +objects does not carry metadata, not that the endpoint failed. + ## Updating Metadata Metadata is changed with Iroha Special Instructions: @@ -28,8 +56,8 @@ Metadata is changed with Iroha Special Instructions: - [`RemoveKeyValue`](/blockchain/instructions.md#setkeyvalue-removekeyvalue) removes a key -The authority submitting the transaction must have the permission required by -the active runtime validator. For the default permission surface, see +The authority submitting the transaction must have the permission required +by the active runtime validator. For the default permission surface, see [Permission Tokens](/reference/permissions.md). ## Events @@ -66,9 +94,13 @@ matters to an integration. Metadata is returned as part of the queried object. For example, use [`FindAccountById`](/reference/queries.md#accounts-and-permissions), [`FindDomainById`](/reference/queries.md#domains-and-peers), or -[`FindAssetDefinitionById`](/reference/queries.md#assets-nfts-and-rwas) and -then read the object's `metadata` field. +[`FindAssetDefinitionById`](/reference/queries.md#assets-nfts-and-rwas). +Use [`FindNfts`](/reference/queries.md#assets-nfts-and-rwas) or +[`FindNftsByAccountId`](/reference/queries.md#assets-nfts-and-rwas) for +NFTs, and [`FindRwas`](/reference/queries.md#assets-nfts-and-rwas) for RWA +lots. Then read the object's metadata field. NFT query responses expose the +NFT `content` map as the record metadata. Metadata keys are part of the ledger state, so keep them stable and avoid -encoding application-specific version churn into the key name when a JSON value -can carry that version explicitly. +encoding application-specific version churn into the key name when a JSON +value can carry that version explicitly. diff --git a/src/blockchain/nfts.md b/src/blockchain/nfts.md new file mode 100644 index 000000000..45fb80d01 --- /dev/null +++ b/src/blockchain/nfts.md @@ -0,0 +1,195 @@ +# NFTs + +An Iroha NFT is a unique ledger object with one owner. Use NFTs when a +record needs its own identity, metadata, lifecycle events, and ownership +transfer semantics, but does not need a numeric balance. + +Unlike a numeric [asset](/blockchain/assets.md), an NFT does not have +precision, mintability, or per-account quantities. The NFT exists as one +registered object, and ownership is tracked directly on that object. + +## Structure + +A registered `Nft` contains: + +- `id`: an `NftId` +- `content`: metadata that describes the NFT +- `owned_by`: the account that owns the NFT + +The `content` field is a `Metadata` map. Keep it compact: store descriptive +fields, stable references, hashes, URIs, or SoraFS paths there. Store large +documents, media, or high-churn application state off-chain and keep only a +verifiable reference on the NFT. + +## Try It on Taira + +Check whether the public Taira testnet currently has NFT records: + +```bash +curl -fsS 'https://taira.sora.org/v1/nfts?limit=5' \ + | jq '{total, nft_ids: [.items[].id]}' +``` + +Check the live OpenAPI document for NFT routes exposed by the node: + +```bash +curl -fsS https://taira.sora.org/openapi.json \ + | jq -r '.paths | keys[] | select(startswith("/v1/nfts") or startswith("/v1/explorer/nfts"))' +``` + +An empty `items` array is a valid response on a public testnet. It means there +are no NFTs in the current page, not that NFT instructions are unavailable. + +## NFT IDs + +`NftId` uses this text form: + +```text +name$domain +name$domain.dataspace +``` + +For example, `badge$docs.universal` identifies the `badge` NFT in the +`docs.universal` domain. If the dataspace is omitted, the current parser +uses the `universal` dataspace, so `badge$docs` resolves to +`badge$docs.universal`. + +Use stable names for NFT IDs. The ID is the object identity used by +instructions, queries, permissions, event filters, and application +references. + +## Lifecycle + +NFT lifecycle operations use Iroha Special Instructions: + +- [`Register`](/blockchain/instructions.md#un-register) creates the NFT + with initial `content`. +- [`Unregister`](/blockchain/instructions.md#un-register) removes the NFT. +- [`Transfer`](/blockchain/instructions.md#transfer) changes `owned_by`. +- [`SetKeyValue` and `RemoveKeyValue`](/blockchain/instructions.md#setkeyvalue-removekeyvalue) + update NFT metadata. + +## Try It Locally + +These examples assume you have launched a local network and have the +generated client configuration from the +[CLI guide](/get-started/operate-iroha-2-via-cli.md): + +```bash +export IROHA_CONFIG=./localnet/client.toml +export NFT_DOMAIN=nft_demo.universal +export NFT_ID='badge_intro$nft_demo.universal' +``` + +Register a domain for the example. If it already exists, skip this command +or choose a different `NFT_DOMAIN`. + +```bash +cargo run --bin iroha -- --config "$IROHA_CONFIG" \ + ledger domain register --id "$NFT_DOMAIN" +``` + +Register an NFT. Registration reads the initial content JSON from standard +input: + +```bash +printf '{"kind":"badge","level":"intro","issuer":"docs"}\n' | + cargo run --bin iroha -- --config "$IROHA_CONFIG" \ + ledger nft register --id "$NFT_ID" +``` + +Inspect the NFT directly and then list all NFTs with full entries: + +```bash +cargo run --bin iroha -- --config "$IROHA_CONFIG" \ + ledger nft get --id "$NFT_ID" + +cargo run --bin iroha -- --config "$IROHA_CONFIG" \ + ledger nft list all --verbose +``` + +Add a metadata key and read the NFT again: + +```bash +printf '{"color":"blue","rarity":"tutorial"}\n' | + cargo run --bin iroha -- --config "$IROHA_CONFIG" \ + ledger nft meta set --id "$NFT_ID" --key traits + +cargo run --bin iroha -- --config "$IROHA_CONFIG" \ + ledger nft get --id "$NFT_ID" +``` + +Remove the metadata key: + +```bash +cargo run --bin iroha -- --config "$IROHA_CONFIG" \ + ledger nft meta remove --id "$NFT_ID" --key traits +``` + +Optionally transfer the NFT. Use `ledger nft get` to read the current owner +from `owned_by`, and use `ledger account list all` to find a destination +account ID. + +```bash +cargo run --bin iroha -- --config "$IROHA_CONFIG" \ + ledger account list all + +export CURRENT_OWNER='' +export NEW_OWNER='' + +cargo run --bin iroha -- --config "$IROHA_CONFIG" \ + ledger nft transfer --id "$NFT_ID" --from "$CURRENT_OWNER" --to "$NEW_OWNER" +``` + +Clean up when you are done. If you transferred the NFT, run this command +with the current owner's account configuration or transfer the NFT back +first. + +```bash +cargo run --bin iroha -- --config "$IROHA_CONFIG" \ + ledger nft unregister --id "$NFT_ID" +``` + +## Queries and Events + +Use [`FindNfts`](/reference/queries.md#assets-nfts-and-rwas) to list NFTs +and [`FindNftsByAccountId`](/reference/queries.md#assets-nfts-and-rwas) to +list NFTs owned by an account. + +NFT registration, deletion, transfer, and metadata updates emit NFT data +events. Use the `Nft` data event filter when subscribing to ledger changes +or building triggers that react to NFT lifecycle events. + +## Permissions + +The default permission surface includes NFT-specific tokens: + +- `CanRegisterNft` +- `CanUnregisterNft` +- `CanTransferNft` +- `CanModifyNftMetadata` + +Permission checks are enforced by the active runtime validator, so a +network can customize authorization by upgrading the executor. See +[Permission Tokens](/reference/permissions.md) for the current default +token list. + +## Choosing NFTs + +Use an NFT for records where uniqueness and ownership matter: + +- certificates, badges, licenses, and attestations +- membership or access records +- identity-bound or account-owned application records +- references to off-chain media, documents, or manifests + +Use a numeric asset for fungible balances, and use plain +[metadata](/blockchain/metadata.md) when the data is only a compact +attribute of an existing ledger object. + +See also: + +- [Assets](/blockchain/assets.md) +- [Metadata](/blockchain/metadata.md) +- [Instructions](/blockchain/instructions.md) +- [Queries](/blockchain/queries.md) diff --git a/src/blockchain/queries.md b/src/blockchain/queries.md index 913a22284..5a647e397 100644 --- a/src/blockchain/queries.md +++ b/src/blockchain/queries.md @@ -29,6 +29,28 @@ lexicographically on metadata keys. Filtering can be done on a variety of principles, from domain-specific (individual IP address filter masks) to sub-string methods like `begins_with` combined using logical operations. +## Try It on Taira + +Taira exposes read-only query helpers over JSON for common resources. Use them +to practice pagination and response handling before wiring an SDK: + +```bash +TAIRA_ROOT=https://taira.sora.org + +curl -fsS "$TAIRA_ROOT/v1/accounts?limit=3" \ + | jq '{total, ids: [.items[].id]}' + +curl -fsS "$TAIRA_ROOT/v1/domains?limit=3" \ + | jq '{total, domains: [.items[].id]}' + +curl -fsS "$TAIRA_ROOT/v1/assets/definitions?limit=3" \ + | jq '{total, assets: [.items[] | {id, name, total_quantity}]}' +``` + +For app diagnostics, keep these smoke checks separate from signed transaction +tests. A read-only query failure usually points to endpoint availability, +network reachability, or route compatibility before it points to signer setup. + ## Create a query Use typed query builders from the SDK or CLI. For example, the current data diff --git a/src/blockchain/rwas.md b/src/blockchain/rwas.md new file mode 100644 index 000000000..b9371332c --- /dev/null +++ b/src/blockchain/rwas.md @@ -0,0 +1,506 @@ +# Real-World Assets + +Real-world assets (RWAs) model off-chain assets whose ownership or control +is tracked on-chain. In Iroha, an RWA is a registered ledger lot with a +generated identifier, an owner account, a quantity, business metadata, +provenance, and optional lifecycle controls. + +RWAs are different from numeric asset balances: + +- a numeric asset is a fungible balance held by an account +- an NFT is a unique on-chain record with one owner +- an RWA is a lot that can carry business metadata, quantity, holds, + freezes, redemption state, provenance, and controller policy + +Use RWAs when the ledger needs to represent a specific off-chain lot +instead of only a fungible balance. + +## RWA Lot + +An RWA lot contains: + +- `id`: the generated canonical RWA identifier, displayed as + `$` +- `owned_by`: the account that currently owns the lot +- `quantity`: the outstanding quantity represented by the lot +- `spec`: quantity specification, such as decimal scale +- `primary_reference`: the main off-chain receipt, certificate, invoice, or + registry reference +- `status`: optional business status text +- `metadata`: compact JSON fields used for business context and indexing +- `parents`: source lots used to derive this lot +- `controls`: controller accounts, controller roles, and enabled controller + operations +- `is_frozen` and `held_quantity`: lifecycle state enforced by the runtime + +Keep the on-chain payload compact. Store large legal documents, inspection +reports, and audit bundles outside the WSV, then put a digest, URI, SoraFS +path, or manifest reference in RWA metadata. + +## Identifiers + +`RegisterRwa` does not accept a caller-chosen `id`, and it does not accept +an `owner` field. The transaction authority becomes the initial `owned_by` +account, and the runtime generates the `RwaId` in the target domain. + +The textual form of an RWA ID is: + +```text +$ +``` + +For example: + +```text +0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef$commodities.universal +``` + +Applications should store their business identifier in `primary_reference` +or `metadata`, then discover the generated `RwaId` from +`RwaEvent::Created`, `FindRwas`, `/v1/rwas`, or the explorer route set +after the transaction commits. + +## Lifecycle + +Common RWA workflows include: + +| Operation | Implemented behavior | +| ------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------- | +| `RegisterRwa` | Create a generated-ID lot in a domain; the transaction authority becomes `owned_by`. | +| `TransferRwa` | Move quantity to another account. A full transfer can change `owned_by`; a partial transfer creates a generated child lot. | +| `HoldRwa` | Reserve quantity. Requires a configured controller and `hold_enabled`. | +| `ReleaseRwa` | Remove held quantity. Requires a configured controller and `hold_enabled`. | +| `FreezeRwa` | Block ordinary owner operations. Requires a configured controller and `freeze_enabled`. | +| `UnfreezeRwa` | Re-enable ordinary owner operations. Requires a configured controller and `freeze_enabled`. | +| `RedeemRwa` | Retire quantity. Requires the owner or a controller and `redeem_enabled`. | +| `MergeRwas` | Combine quantities from parent lots with the same domain and spec into a generated child lot. | +| `ForceTransferRwa` | Move quantity through a controller flow. Requires a configured controller and `force_transfer_enabled`. | +| `SetRwaControls` | Replace the lot control policy. Requires the owner or a controller. | +| `SetKeyValue` / `RemoveKeyValue` | Update lot metadata. Requires the owner or a controller; frozen lots require a controller. | + +There is no `UnregisterRwa` instruction in the current code. Retire an +off-chain lot with `RedeemRwa` when the represented quantity is delivered, +consumed, settled, or otherwise removed from circulation. + +## Metadata and Controls + +Use metadata for compact facts that help applications identify and verify +the lot: + +- asset class, issuer, custodian, or registry reference +- warehouse, vault, ISIN, invoice, or certificate identifiers +- content hashes for attestations and legal documents +- SoraFS paths or manifest references for larger evidence bundles +- maturity, jurisdiction, or compliance tags used by off-chain services + +The implemented `RwaControlPolicy` has these fields: + +```json +{ + "controller_accounts": [], + "controller_roles": [], + "freeze_enabled": true, + "hold_enabled": true, + "force_transfer_enabled": false, + "redeem_enabled": true +} +``` + +Controller accounts and roles are allowed to perform only the controller +operations enabled by the corresponding boolean flag. The current control +payload is not an allow-list transfer policy and does not contain nested +`transfers` rules. + +## Queries, Events, and APIs + +Use [`FindRwas`](/reference/queries.md#assets-nfts-and-rwas) to list +registered RWA lots. Applications that need live updates can subscribe to +[`Rwa` data events](/blockchain/filters.md#data-event-filters) for created, +owner-changed, split, merged, redeemed, frozen, unfrozen, held, released, +force-transferred, controls-changed, and metadata events. + +Torii exposes chain-state routes such as `/v1/rwas` and `/v1/rwas/query`, +plus explorer routes such as `/v1/explorer/rwas` and +`/v1/explorer/rwas/{rwa_id}` when that route family is enabled. Generated +clients should prefer the live +[`/openapi`](/reference/torii-endpoints.md#common-endpoints) document for +the exact response shape exposed by a node. + +### Try It on Taira + +Check whether public Taira currently has registered RWA lots: + +```bash +curl -fsS 'https://taira.sora.org/v1/rwas?limit=5' \ + | jq '{total, rwa_ids: [.items[].id]}' +``` + +List the RWA routes exposed by the live Taira OpenAPI document: + +```bash +curl -fsS https://taira.sora.org/openapi.json \ + | jq -r '.paths | keys[] | select(startswith("/v1/rwas") or startswith("/v1/explorer/rwas"))' +``` + +Empty `items` output is expected when no public lots have been registered yet. +Registration, transfer, hold, freeze, and redemption are signed transactions. + +## Try It + +The examples below use the Python SDK surfaces from +[Shared Setup](/guide/tutorials/python.md#shared-setup). Replace the +account IDs, private keys, and generated lot IDs with values from your own +network before submitting a transaction. + +### Discover RWA API Routes + +This read-only example asks a running Torii node which app-facing RWA +routes are enabled: + +```python +from iroha_python import create_torii_client + +client = create_torii_client("https://taira.sora.org") +openapi = client.request_json("GET", "/openapi", expected_status=(200,)) + +rwa_paths = sorted( + path for path in openapi.get("paths", {}) if path.startswith("/v1/rwas") +) + +for path in rwa_paths: + print(path) +``` + +If the list is empty, the node may still support RWA instructions and +queries through other Torii APIs, but it is not exposing the optional JSON +route family. + +### Register a Warehouse Receipt + +Use a draft when one business action should become one signed transaction. +The business receipt number goes in `primary_reference`; the ledger ID is +generated after the transaction commits. + +```python +from iroha_python import TransactionConfig, TransactionDraft + +config = TransactionConfig( + chain_id=CHAIN_ID, + authority=alice, + metadata={**TX_METADATA, "source": "rwa-docs"}, +) + +draft = TransactionDraft(config) +draft.register_rwa( + { + "domain": "commodities.universal", + "quantity": "100", + "spec": {"scale": 0}, + "primary_reference": "warehouse-receipt-001", + "status": "active", + "metadata": { + "asset_class": "commodity", + "commodity": "copper", + "warehouse": "DXB-01", + "inspection_report": "sorafs://reports/copper-001.json", + }, + "parents": [], + "controls": { + "controller_accounts": [alice], + "controller_roles": [], + "freeze_enabled": True, + "hold_enabled": True, + "force_transfer_enabled": False, + "redeem_enabled": True, + }, + } +) + +envelope = draft.sign_with_keypair(alice_pair) +client.submit_transaction_envelope_and_wait(envelope) +``` + +After the transaction commits, list generated RWA IDs. Chain-state routes +expose the canonical IDs; use events or explorer detail routes when you +need to match an ID back to `primary_reference` or metadata: + +```python +page = client.list_rwas_typed(limit=20, offset=0) + +for lot in page.items: + print(lot.id) +``` + +Explorer-enabled nodes can also return richer projections: + +```python +page = client.list_explorer_rwas_typed(domain="commodities.universal") + +for lot in page.items: + print(lot.id, lot.primary_reference, lot.owned_by, lot.quantity) +``` + +### Transfer With a Temporary Hold + +Use the generated RWA ID returned by the chain. This example assumes +`alice` is the owner and is also configured as a controller with +`hold_enabled`. + +```python +warehouse_lot_id = ( + "0123456789abcdef0123456789abcdef" + "0123456789abcdef0123456789abcdef$commodities.universal" +) + +draft = TransactionDraft( + TransactionConfig(chain_id=CHAIN_ID, authority=alice, metadata=TX_METADATA) +) + +draft.transfer_rwa(warehouse_lot_id, quantity="10", destination=bob) +draft.hold_rwa(warehouse_lot_id, quantity="5") + +envelope = draft.sign_with_keypair(alice_pair) +client.submit_transaction_envelope_and_wait(envelope) +``` + +Release the hold when the off-chain process is complete: + +```python +draft = TransactionDraft( + TransactionConfig(chain_id=CHAIN_ID, authority=alice, metadata=TX_METADATA) +) +draft.release_rwa(warehouse_lot_id, quantity="5") + +envelope = draft.sign_with_keypair(alice_pair) +client.submit_transaction_envelope_and_wait(envelope) +``` + +### Add Controls and Audit Metadata + +Controls and metadata are separate. Use controls for controller policy, and +metadata for facts that applications or auditors need to display: + +```python +draft = TransactionDraft( + TransactionConfig(chain_id=CHAIN_ID, authority=alice, metadata=TX_METADATA) +) + +draft.set_rwa_controls( + warehouse_lot_id, + { + "controller_accounts": [alice], + "controller_roles": [], + "freeze_enabled": True, + "hold_enabled": True, + "force_transfer_enabled": True, + "redeem_enabled": True, + }, +) +draft.set_rwa_key_value(warehouse_lot_id, "auditor", "alice") +draft.set_rwa_key_value( + warehouse_lot_id, + "proof_hash", + "sha256:2b1c7a4e...", +) + +envelope = draft.sign_with_keypair(alice_pair) +client.submit_transaction_envelope_and_wait(envelope) +``` + +### Redeem or Retire Quantity + +Redeem quantity when the represented off-chain asset has been delivered, +consumed, retired, or otherwise removed from circulation. The lot must have +`redeem_enabled`, and the signer must be the owner or a controller. + +```python +draft = TransactionDraft( + TransactionConfig(chain_id=CHAIN_ID, authority=alice, metadata=TX_METADATA) +) +draft.redeem_rwa(warehouse_lot_id, quantity="1") + +envelope = draft.sign_with_keypair(alice_pair) +client.submit_transaction_envelope_and_wait(envelope) +``` + +### Freeze During Compliance Review + +Freeze a lot when an off-chain review must block ordinary owner operations. +The signer must be a controller and the lot must have `freeze_enabled`. + +```python +draft = TransactionDraft( + TransactionConfig(chain_id=CHAIN_ID, authority=alice, metadata=TX_METADATA) +) +draft.freeze_rwa(warehouse_lot_id) +draft.set_rwa_key_value( + warehouse_lot_id, + "review", + { + "status": "frozen", + "reason": "custodian inventory check", + "case_id": "OPS-2026-0042", + }, +) + +envelope = draft.sign_with_keypair(alice_pair) +client.submit_transaction_envelope_and_wait(envelope) +``` + +Unfreeze it when the review passes: + +```python +draft = TransactionDraft( + TransactionConfig(chain_id=CHAIN_ID, authority=alice, metadata=TX_METADATA) +) +draft.unfreeze_rwa(warehouse_lot_id) +draft.set_rwa_key_value( + warehouse_lot_id, + "review", + {"status": "cleared", "case_id": "OPS-2026-0042"}, +) + +envelope = draft.sign_with_keypair(alice_pair) +client.submit_transaction_envelope_and_wait(envelope) +``` + +### Invoice Receivable + +Represent an invoice as an RWA lot by storing the invoice number in +`primary_reference` and metadata. After registration, use the generated ID +for transfer and redemption. + +```python +draft = TransactionDraft( + TransactionConfig(chain_id=CHAIN_ID, authority=alice, metadata=TX_METADATA) +) +draft.register_rwa( + { + "domain": "receivables.universal", + "quantity": "50000", + "spec": {"scale": 2}, + "primary_reference": "INV-2026-0007", + "status": "issued", + "metadata": { + "asset_class": "invoice", + "currency": "USD", + "debtor": "example-buyer", + "due_date": "2026-06-30", + "document_hash": "sha256:4df4c8...", + }, + "parents": [], + "controls": { + "controller_accounts": [alice], + "controller_roles": [], + "freeze_enabled": True, + "hold_enabled": False, + "force_transfer_enabled": False, + "redeem_enabled": True, + }, + } +) + +envelope = draft.sign_with_keypair(alice_pair) +client.submit_transaction_envelope_and_wait(envelope) +``` + +When the receivable is financed or paid, use the generated invoice lot ID: + +```python +invoice_lot_id = ( + "fedcba9876543210fedcba9876543210" + "fedcba9876543210fedcba9876543210$receivables.universal" +) + +draft = TransactionDraft( + TransactionConfig(chain_id=CHAIN_ID, authority=alice, metadata=TX_METADATA) +) +draft.transfer_rwa(invoice_lot_id, quantity="50000", destination=bob) + +envelope = draft.sign_with_keypair(alice_pair) +client.submit_transaction_envelope_and_wait(envelope) +``` + +Redeem the represented amount after off-chain settlement: + +```python +draft = TransactionDraft( + TransactionConfig(chain_id=CHAIN_ID, authority=bob, metadata=TX_METADATA) +) +draft.redeem_rwa(invoice_lot_id, quantity="50000") + +envelope = draft.sign_with_keypair(bob_pair) +client.submit_transaction_envelope_and_wait(envelope) +``` + +### Carbon Credit Retirement + +Use redemption to retire credits after they are claimed. The metadata +points to the off-chain certificate or registry proof: + +```python +carbon_lot_id = ( + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa$carbon.universal" +) + +draft = TransactionDraft( + TransactionConfig(chain_id=CHAIN_ID, authority=alice, metadata=TX_METADATA) +) +draft.redeem_rwa(carbon_lot_id, quantity="250") +draft.set_rwa_key_value( + carbon_lot_id, + "retirement_certificate", + "sorafs://certificates/carbon-credit-2026-001-retired.json", +) + +envelope = draft.sign_with_keypair(alice_pair) +client.submit_transaction_envelope_and_wait(envelope) +``` + +### Merge Two Lots + +Merge lots when two off-chain positions are consolidated. The parents must +be in the same domain and use the same quantity spec. The runtime generates +the child lot ID. + +```python +warehouse_lot_id_2 = ( + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb$commodities.universal" +) + +draft = TransactionDraft( + TransactionConfig(chain_id=CHAIN_ID, authority=alice, metadata=TX_METADATA) +) +draft.merge_rwas( + { + "parents": [ + {"rwa": warehouse_lot_id, "quantity": "40"}, + {"rwa": warehouse_lot_id_2, "quantity": "60"}, + ], + "primary_reference": "warehouse-receipt-003", + "status": "merged", + "metadata": { + "asset_class": "commodity", + "commodity": "copper", + "warehouse": "DXB-01", + "merge_reason": "same custodian and quality grade", + }, + } +) + +envelope = draft.sign_with_keypair(alice_pair) +client.submit_transaction_envelope_and_wait(envelope) +``` + +For the full Python transaction example, see +[Real-World Assets](/guide/tutorials/python.md#real-world-assets). + +## Related Docs + +- [Assets](/blockchain/assets.md) +- [Metadata](/blockchain/metadata.md) +- [Iroha Special Instructions](/blockchain/instructions.md) +- [Queries](/reference/queries.md#assets-nfts-and-rwas) +- [Torii endpoints](/reference/torii-endpoints.md#app-and-sora-route-families) diff --git a/src/blockchain/sora-nexus-services.md b/src/blockchain/sora-nexus-services.md index aeb08f42f..9ebd76461 100644 --- a/src/blockchain/sora-nexus-services.md +++ b/src/blockchain/sora-nexus-services.md @@ -1,37 +1,44 @@ # SORA Nexus Services -SORA Nexus adds app-facing service planes around Iroha 3. These services are -not separate ledgers. They are anchored by Iroha world state, Norito manifests, -governance records, and Torii route families. +SORA Nexus adds app-facing service planes around Iroha 3. These services +are not separate ledgers. They are anchored by Iroha world state, Norito +manifests, governance records, and Torii route families. Availability depends on the node build and network profile. Use -[`/openapi`](/reference/torii-endpoints.md#app-and-sora-route-families) on the -target node as the authoritative list of enabled routes. +[`/openapi`](/reference/torii-endpoints.md#app-and-sora-route-families) on +the target node as the authoritative list of enabled routes. ## Component Map -| Component | Role | Main surfaces | -| --- | --- | --- | -| Soracloud | Application deployment, hosted services, private model/runtime state, and service lifecycle control. | `/v1/soracloud/*`, `/api/*`, `iroha app soracloud ...` | -| Inrou | Soracloud hosted HTTP runtime for service revisions that need a live HTTP plane. | Soracloud runtime config, host capability adverts, replica runtime state | -| SoraNet | Privacy and transport overlay for circuits, relay traffic, VPN, Connect sessions, and streaming routes. | `/v1/connect/*`, `/v1/vpn/*`, SoraNet route metadata | -| SoraFS | Content-addressed storage fabric for manifests, CAR payloads, pinned content, gateway fetches, and proof-of-retrievability flows. | `/v1/sorafs/*`, `/sorafs/*`, `FindSorafsProviderOwner` | -| SoraDNS | Deterministic naming and resolver-attestation layer for SORA-hosted services and content. | `/v1/soradns/*`, `/soradns/*`, resolver directory events | +| Component | Role | Main surfaces | +| ---------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| Soracloud | Application deployment, hosted services, private model/runtime state, and service lifecycle control. | `/v1/soracloud/*`, `/api/*`, `iroha app soracloud ...` | +| Inrou | Soracloud hosted HTTP runtime for service revisions that need a live HTTP plane. | Soracloud runtime config, host capability adverts, replica runtime state | +| SoraNet | Privacy and transport overlay for circuits, relay traffic, VPN, Connect sessions, and streaming routes. | `/v1/connect/*`, `/v1/vpn/*`, SoraNet route metadata | +| Data Availability (DA) | Availability evidence, commitment, and pin-intent layer for payloads that are referenced by Nexus lanes, SoraFS manifests, and proof flows. | `/v1/da/*`, `FindDaPinIntent*`, `[sumeragi.da]` | +| SoraFS | Content-addressed storage fabric for manifests, CAR payloads, pinned content, gateway fetches, and proof-of-retrievability flows. | `/v1/sorafs/*`, `/sorafs/*`, `FindSorafsProviderOwner` | +| SoraDNS | Deterministic naming and resolver-attestation layer for SORA-hosted services and content. | `/v1/soradns/*`, `/soradns/*`, resolver directory events | +| Aitai | App-level fiat and asset settlement corridor backed by native escrow records, not by a separate ledger. | `OpenAssetEscrow`, `FindAssetEscrow*`, `EscrowEventFilter`, Kotodama `escrow_*` builtins | ```mermaid flowchart LR app["Application or user"] --> dns["SoraDNS name resolution"] + app --> aitai["Aitai escrow app"] dns --> route["Soracloud route"] dns --> content["SoraFS content gateway"] route --> ivm["Deterministic IVM service"] route --> inrou["Inrou hosted HTTP service"] - content --> storage["SoraFS providers"] + aitai --> escrow["Native escrow records"] + content --> da["DA pin intents and commitments"] + da --> storage["SoraFS providers"] app --> net["SoraNet private route"] net --> content net --> route ledger["Iroha world state and governance"] --> dns ledger --> route ledger --> content + ledger --> da + escrow --> ledger ``` ## Common Flows @@ -41,21 +48,22 @@ flowchart LR A typical mixed-plane app uses all of the pieces together: 1. Static frontend assets are packaged and pinned through SoraFS. -2. The public host, for example `.sora`, is registered through SoraDNS. +2. The public host, for example `.sora`, is registered through + SoraDNS. 3. Soracloud routes `/api/v1/search` or `/api/v1/stream` to an Inrou HTTP service. 4. Soracloud routes `/api/auth` and `/api/v1/user` to deterministic IVM handlers. -5. Clients that need privacy can reach the same content or API route through - a SoraNet circuit. +5. Clients that need privacy can reach the same content or API route + through a SoraNet circuit. -| Path | Backing plane | Why | -| --- | --- | --- | -| `/` | SoraFS static content | Reproducible content root and gateway caching | -| `/assets/*` | SoraFS static content | Content-addressed assets and manifest proofs | -| `/api/auth*` | Soracloud IVM | Replay-safe auth and wallet challenge state | -| `/api/v1/user*` | Soracloud IVM | Governance-sensitive state mutations | -| `/api/v1/search*` | Soracloud Inrou | Live HTTP service, cache, SSE, or collector state | +| Path | Backing plane | Why | +| ----------------- | --------------------- | ------------------------------------------------- | +| `/` | SoraFS static content | Reproducible content root and gateway caching | +| `/assets/*` | SoraFS static content | Content-addressed assets and manifest proofs | +| `/api/auth*` | Soracloud IVM | Replay-safe auth and wallet challenge state | +| `/api/v1/user*` | Soracloud IVM | Governance-sensitive state mutations | +| `/api/v1/search*` | Soracloud Inrou | Live HTTP service, cache, SSE, or collector state | ### Content Publication @@ -65,7 +73,9 @@ SoraFS publication produces durable artifacts before a name points at them: 2. Pack it into a CAR archive and chunk plan. 3. Build a Norito manifest with pin policy and governance data. 4. Submit the manifest to Torii. -5. Bind the manifest to a SoraDNS name or Soracloud static frontend route. +5. Record a DA pin intent or availability commitment when the target + profile requires explicit evidence. +6. Bind the manifest to a SoraDNS name or Soracloud static frontend route. ### Private Fetch or Streaming Route @@ -74,30 +84,106 @@ SoraNet can sit in front of SoraFS or Soracloud: 1. The client resolves the name or manifest. 2. A guard directory or route manifest chooses entry and exit relays. 3. Traffic is padded and sent through the SoraNet circuit. -4. The exit relay reaches the SoraFS gateway, Torii stream, or Soracloud route. +4. The exit relay reaches the SoraFS gateway, Torii stream, or Soracloud + route. + +## Aitai + +Aitai is the SORA app corridor for marketplace-style settlement where a +buyer and seller coordinate an off-chain payment while Iroha controls the +on-chain asset custody. It should use the native escrow instruction family +instead of a contract-owned escrow account for new numeric-asset custody +flows. + +Native escrow keeps custody in the ledger: + +1. The seller opens an offer with `OpenAssetEscrow`, selecting an + `EscrowId`, asset definition, amount, and optional evidence hashes. +2. Iroha moves the seller's numeric asset into a deterministic protocol + custody account and records an `AssetEscrowRecord`. +3. The buyer accepts with `AcceptAssetEscrow` and marks the off-chain + payment as sent with `MarkEscrowPaymentSent`. +4. The seller releases the funds with `ReleaseAssetEscrow`, cancels before + payment is marked with `CancelAssetEscrow`, or a party opens a dispute + with `OpenEscrowDispute`. +5. A resolver with `CanResolveEscrowDispute` can close a disputed escrow + with `ResolveEscrowDispute`, splitting the locked amount between buyer + and seller. + +While an escrow is active, generic asset debits from the custody account +are rejected. Release, cancellation, and dispute resolution are the +intended custody exit paths. Evidence fields store hashes, not invoice +files, chat logs, or other off-chain payloads; publish larger evidence +bundles through SoraFS or another audited storage path and attach the +digest to the escrow. + +| Aitai surface | Use it for | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | +| `OpenAssetEscrow`, `AcceptAssetEscrow`, `MarkEscrowPaymentSent`, `ReleaseAssetEscrow`, `CancelAssetEscrow` | Transparent numeric asset offers, including XOR-denominated settlement flows. | +| `OpenAnonymousAssetEscrow`, `AcceptAnonymousAssetEscrow`, `MarkAnonymousEscrowPaymentSent`, `ReleaseAnonymousAssetEscrow`, `CancelAnonymousAssetEscrow` | Shielded offers where the funding and closing movements are carried by proof attachments. | +| `OpenEscrowDispute`, `ResolveEscrowDispute`, `OpenAnonymousEscrowDispute`, `ResolveAnonymousEscrowDispute` | Dispute entry and court-style resolution. | +| `FindAssetEscrowById`, `FindAssetEscrowsBySeller`, `FindAssetEscrowsByBuyer`, `FindAssetEscrowsByStatus` | App status pages, reconciliation jobs, and support tooling. | +| `EscrowEventFilter` | Live lifecycle subscriptions by escrow id, seller, buyer, status, or event kind. | +| Kotodama `escrow_open_offer`, `escrow_accept`, `escrow_mark_payment_sent`, `escrow_release`, `escrow_cancel`, `escrow_open_dispute`, `escrow_resolve_dispute` | Contract wrapper calls that still need IVM/Kotodama compatibility. | + +An SDK-backed transparent offer follows this shape: + +```rust +use iroha::data_model::{ + isi::escrow::{ + AcceptAssetEscrow, MarkEscrowPaymentSent, OpenAssetEscrow, + ReleaseAssetEscrow, + }, + prelude::*, +}; +use iroha_crypto::Hash; + +let escrow_id = EscrowId::new(Hash::new("aitai-offer-001")); +let asset_definition_id: AssetDefinitionId = + "62Fk4FPcMuLvW5QjDGNF2a4jAmjM".parse()?; + +seller_client.submit_blocking(OpenAssetEscrow::with_evidence_hashes( + escrow_id, + asset_definition_id, + Numeric::from(40_u64), + vec![Hash::new("fiat-invoice")], +))?; + +buyer_client.submit_blocking(AcceptAssetEscrow::new(escrow_id))?; +buyer_client.submit_blocking(MarkEscrowPaymentSent::new(escrow_id))?; +seller_client.submit_blocking(ReleaseAssetEscrow::new(escrow_id))?; + +let record = seller_client.query_single(FindAssetEscrowById::new(escrow_id))?; +assert_eq!(record.status, AssetEscrowStatus::Released); +``` + +For public Taira or Minamoto usage, treat the off-chain payment rail and +any support or court workflow as application policy. Iroha records the +custody state, lifecycle events, evidence hashes, and final asset movement; +it does not verify fiat settlement by itself. ## Check a Target Node -Before using examples from this page, confirm that the route family exists on -the node you are targeting: +Before using examples from this page, confirm that the route family exists +on the node you are targeting: ```bash export TORII_URL=https://taira.sora.org curl -fsS "$TORII_URL/openapi.json" \ - | jq '.paths | keys[] | select(test("^/v1/(soracloud|sorafs|soradns|connect|vpn)/"))' + | jq '.paths | keys[] | select(test("^/v1/(soracloud|sorafs|soradns|connect|vpn|da)/"))' curl -fsS "$TORII_URL/status" | jq . ``` -If `/openapi.json` is not exposed by the profile, try `/openapi`. Exact route -availability depends on build features and network configuration. +If `/openapi.json` is not exposed by the profile, try `/openapi`. Exact +route availability depends on build features and network configuration. ### Taira Read-Only Smoke Checks -The public Taira endpoint is useful for read-side checks, but do not use it for -mutating examples unless you are operating an authorized account and intend to -change live state. +The public Taira endpoint is useful for read-side checks, but do not use it +for mutating examples unless you are operating an authorized account and +intend to change live state. ```bash export TORII_URL=https://taira.sora.org @@ -117,34 +203,35 @@ curl -fsS -H 'Accept: application/json' "$TORII_URL/v1/soracloud/status" \ | jq '.control_plane | {service_count, services: [.services[] | {service_name, current_version}]}' ``` -Taira may expose compatibility or control-plane routes that are not listed in -the OpenAPI path map. Treat `/openapi` as the primary generated API contract, -then confirm any compatibility route directly before documenting it as live. +Taira may expose compatibility or control-plane routes that are not listed +in the OpenAPI path map. Treat `/openapi` as the primary generated API +contract, then confirm any compatibility route directly before documenting +it as live. ## Soracloud -Soracloud is the SORA application control plane. It tracks deployment bundles, -service revisions, routing, rollout state, authoritative config entries, -encrypted service secrets, model registry records, private inference sessions, -and runtime receipts. +Soracloud is the SORA application control plane. It tracks deployment +bundles, service revisions, routing, rollout state, authoritative config +entries, encrypted service secrets, model registry records, private +inference sessions, and runtime receipts. Soracloud uses two execution planes: -| Execution plane | Runtime | Use it for | -| --- | --- | --- | -| `DeterministicService` | `Ivm` | Auth, vault state, certified reads, ordered mailbox handlers, governance-sensitive mutations | -| `HttpService` | `Inrou` | Live HTTP APIs, collector-heavy work, cache-backed services, SSE, browser-assisted flows | +| Execution plane | Runtime | Use it for | +| ---------------------- | ------- | -------------------------------------------------------------------------------------------- | +| `DeterministicService` | `Ivm` | Auth, vault state, certified reads, ordered mailbox handlers, governance-sensitive mutations | +| `HttpService` | `Inrou` | Live HTTP APIs, collector-heavy work, cache-backed services, SSE, browser-assisted flows | -The control plane is authoritative. Deploy, upgrade, rollback, config, secret, -model, and status commands submit through Torii and read committed world state; -they do not rely on a separate CLI-local mirror. Public routing is -longest-prefix based, so one registered host can split traffic between hosted -HTTP routes and deterministic API routes. +The control plane is authoritative. Deploy, upgrade, rollback, config, +secret, model, and status commands submit through Torii and read committed +world state; they do not rely on a separate CLI-local mirror. Public +routing is longest-prefix based, so one registered host can split traffic +between hosted HTTP routes and deterministic API routes. ### Scaffold a Split App -The split-app template creates a static frontend plus one hosted live API and -one deterministic vault/API service: +The split-app template creates a static frontend plus one hosted live API +and one deterministic vault/API service: ```bash iroha app soracloud app init \ @@ -161,9 +248,9 @@ iroha app soracloud app doctor \ --manifest ./apps/solswap-indexer/app_manifest.json ``` -`local-plan` prints the route split, child service manifests, workspace script -paths, and the expected frontend publication mode. `doctor` validates the local -release contract before you involve Torii. +`local-plan` prints the route split, child service manifests, workspace +script paths, and the expected frontend publication mode. `doctor` +validates the local release contract before you involve Torii. ### Deploy and Inspect App State @@ -194,9 +281,9 @@ iroha app soracloud rollback \ ### Config and Secret Material -Soracloud config and secret entries are part of authoritative deployment state. -Deploy, upgrade, and rollback fail closed when required config or secret -bindings are missing or inconsistent with the active manifests. +Soracloud config and secret entries are part of authoritative deployment +state. Deploy, upgrade, and rollback fail closed when required config or +secret bindings are missing or inconsistent with the active manifests. ```bash iroha app soracloud config-set \ @@ -224,17 +311,19 @@ iroha app soracloud secret-set --help Inrou is the hosted HTTP runtime used by Soracloud. An Iroha node with the embedded Soracloud runtime projects admitted Soracloud state into a local materialization plan, starts assigned hosted-service replicas as loopback -services, and reports replica runtime state back into the authoritative model. +services, and reports replica runtime state back into the authoritative +model. -Use Inrou for workloads that need a live HTTP surface, such as collector-heavy -APIs, SSE streams, cache-backed handlers, or browser-assisted services. +Use Inrou for workloads that need a live HTTP surface, such as +collector-heavy APIs, SSE streams, cache-backed handlers, or +browser-assisted services. ### Runtime Requirements - Container manifest runtime must be `Inrou`. - Service manifest execution plane must be `HttpService`. -- `HttpService + Inrou` requires exactly one - `PersistentRootLeaseVolume` mounted at `/`. +- `HttpService + Inrou` requires exactly one `PersistentRootLeaseVolume` + mounted at `/`. - Replicated Inrou services also need shared service or confidential lease storage when they retain mutable shared state. - Production hosting nodes should advertise real Inrou capacity instead of @@ -242,8 +331,8 @@ APIs, SSE streams, cache-backed handlers, or browser-assisted services. ### Manifest Fragment -The example below shows the shape of the two manifests. It is a fragment, not -a complete deployment bundle. +The example below shows the shape of the two manifests. It is a fragment, +not a complete deployment bundle. ```jsonc // container_manifest.json @@ -254,7 +343,7 @@ a complete deployment bundle. "entrypoint": "/app/bin/launch-indexer.sh", "args": [], "env": { - "RUST_LOG": "info" + "RUST_LOG": "info", }, "inrou": { "schema_version": 1, @@ -263,20 +352,20 @@ a complete deployment bundle. "x86_64": { "kernel_image_path": "/inrou/x86_64/vmlinux", "rootfs_image_path": "/inrou/x86_64/rootfs.ext4", - "initrd_image_path": null + "initrd_image_path": null, }, "aarch64": { "kernel_image_path": "/inrou/aarch64/vmlinux", "rootfs_image_path": "/inrou/aarch64/rootfs.ext4", - "initrd_image_path": null - } - } + "initrd_image_path": null, + }, + }, }, "lifecycle": { "start_grace_secs": 60, "stop_grace_secs": 30, - "healthcheck_path": "/api/indexer/v1/health" - } + "healthcheck_path": "/api/indexer/v1/health", + }, } ``` @@ -293,29 +382,32 @@ a complete deployment bundle. "path_prefix": "/api/v1/search", "service_port": 8080, "visibility": { "visibility": "Public", "value": null }, - "tls_mode": { "tls": "Required", "value": null } + "tls_mode": { "tls": "Required", "value": null }, }, "lease_volumes": [ { "volume_name": "root_disk", - "kind": { "lease_volume": "PersistentRootLeaseVolume", "value": null }, + "kind": { + "lease_volume": "PersistentRootLeaseVolume", + "value": null, + }, "storage_class": { "storage_class": "Warm", "value": null }, "mount_path": "/", - "max_total_bytes": 8589934592 + "max_total_bytes": 8589934592, }, { "volume_name": "index_state", "kind": { "lease_volume": "ServiceLeaseVolume", "value": null }, "storage_class": { "storage_class": "Warm", "value": null }, "mount_path": "/var/lib/solswap-indexer", - "max_total_bytes": 1073741824 - } - ] + "max_total_bytes": 1073741824, + }, + ], } ``` -At runtime, each mounted lease volume is exposed through environment variables -derived from the volume name: +At runtime, each mounted lease volume is exposed through environment +variables derived from the volume name: ```text SORACLOUD_LEASE_VOLUME_ROOT_DISK_DIR @@ -326,16 +418,16 @@ SORACLOUD_LEASE_VOLUME_INDEX_STATE_MOUNT_PATH ## SoraNet -SoraNet is the privacy and transport overlay. It provides relay-based routes -for traffic that should not connect directly to the target gateway or service. -The transport design uses entry, middle, and exit relay roles, QUIC transport, -a Noise-based hybrid handshake, capability negotiation, relay directory -metadata, and fixed-size padded cells. +SoraNet is the privacy and transport overlay. It provides relay-based +routes for traffic that should not connect directly to the target gateway +or service. The transport design uses entry, middle, and exit relay roles, +QUIC transport, a Noise-based hybrid handshake, capability negotiation, +relay directory metadata, and fixed-size padded cells. -In Nexus deployments, SoraNet can carry content fetches, gateway traffic, VPN -or Connect sessions, and Norito streaming routes. Directory entries can mark -relays that support `norito-stream`, which lets clients prefer routes suitable -for Torii RPC or streaming traffic. +In Nexus deployments, SoraNet can carry content fetches, gateway traffic, +VPN or Connect sessions, and Norito streaming routes. Directory entries can +mark relays that support `norito-stream`, which lets clients prefer routes +suitable for Torii RPC or streaming traffic. ### Streaming Configuration @@ -356,14 +448,14 @@ provision_window_segments = 4 provision_queue_capacity = 256 ``` -Use `access_kind = "read-only"` for content routes that do not require viewer -authentication. Use `authenticated` when the exit relay must enforce tickets or -viewer identity before bridging to Torii or a hosted service. +Use `access_kind = "read-only"` for content routes that do not require +viewer authentication. Use `authenticated` when the exit relay must enforce +tickets or viewer identity before bridging to Torii or a hosted service. ### SoraNet-Aware SoraFS Fetch -The SoraFS fetch CLI can emit a local proxy manifest and spool SoraNet route -metadata for browser extensions or SDK adapters: +The SoraFS fetch CLI can emit a local proxy manifest and spool SoraNet +route metadata for browser extensions or SDK adapters: ```bash sorafs_cli fetch \ @@ -381,20 +473,211 @@ sorafs_cli fetch \ --retry-budget=4 ``` -The summary records provider reports, chunk receipts, local proxy metadata, and -the effective route settings used for the fetch. +The summary records provider reports, chunk receipts, local proxy metadata, +and the effective route settings used for the fetch. + +## Data Availability (DA) + +DA is the availability-evidence layer for payloads that are too large, too +privacy-sensitive, or too service-specific to place directly in world +state. It records deterministic commitments and retrieval obligations so +validators, gateways, and clients can agree on which bytes were promised, +which policy applies, and which evidence has been observed. + +DA does not replace Kura or SoraFS: + +- Kura stores the finalized block stream and consensus recovery data. +- SoraFS stores and serves content-addressed bytes, CAR payloads, and + manifests. +- DA records commitments, proof policies, proof openings, and pin intents + that let those bytes be scheduled, audited, and linked back to ledger + state. + +Use DA when an application or Nexus lane needs a ledger-visible promise +that off-chain data remains retrievable. Common examples include lane +payload commitments for settlement flows, SoraFS pin intents for published +content, proof bundles that must be retained for later verification, and +application artifacts whose public state should be a digest rather than the +full payload. + +### Lifecycle + +| Stage | What is recorded | +| ---------- | ----------------------------------------------------------------------------------------------------------------------------------------------------- | +| Intent | A ticket, manifest reference, alias, lane/epoch/sequence reference, retention policy, or replication target. | +| Commitment | Digest material that binds the manifest, lane payload, proof bundle, or content root to the ledger-visible record. | +| Evidence | Availability votes, proof openings, provider attestations, or other profile-specific evidence accepted by the target network. | +| Query | Pin-intent lookups through `FindDaPinIntentByTicket`, `FindDaPinIntentByManifest`, `FindDaPinIntentByAlias`, or `FindDaPinIntentByLaneEpochSequence`. | + +A typical DA-backed publication flow is: + +1. Build or receive the payload outside the WSV, for example a SoraFS CAR + file or Nexus lane payload. +2. Hash and describe the payload in a Norito manifest or route-specific + commitment record. +3. Submit the manifest, pin intent, or commitment through `/v1/da/*` when + that route family is enabled, or through the network's signed + transaction path. +4. Let validators or availability providers collect the evidence required + by the active proof policy. +5. Query the resulting pin intent or commitment before promoting an alias, + settlement proof, or gateway route that depends on the payload. + +### Algorithmic Model + +DA turns a payload into a signed, replay-protected, block-indexed commitment. +The important algorithms are deterministic so validators and gateways can +recompute the same digests from the same bytes. + +1. **Canonicalize the submitted payload.** Torii accepts an ingest request with + `(lane_id, epoch, sequence)`, payload bytes, compression metadata, chunk + size, erasure profile, retention policy, and submitter signature. The node + decompresses gzip, deflate, or Zstandard payloads when requested, then + verifies that the canonical byte length equals `total_size`. +2. **Validate lane and chunk parameters.** The lane must exist in the Nexus + lane catalog. `chunk_size` must be a non-zero power of two, at least two + bytes, and no larger than the configured maximum. The erasure profile must + include data shards and at least two parity shards. The lane catalog selects + the proof scheme, either `merkle_sha256` or `kzg_bls12_381`. +3. **Apply network policy.** The node enforces the configured replication and + retention baseline for the blob class. Public metadata must stay plaintext; + governance-only metadata is encrypted with the node's configured governance + metadata key before it is written into the manifest. +4. **Chunk and commit.** The canonical payload is chunked with a fixed-size + profile derived from `chunk_size`. Torii computes the payload digest, the + proof-of-retrievability tree root, and per-chunk commitments. Data chunks + carry BLAKE3 commitments over their bytes. +5. **Add erasure commitments.** Chunks are grouped into stripes of + `data_shards`. Missing cells in the final stripe are zero padded for parity + calculation. RS(16) parity creates row/global parity shards; optional + `row_parity_stripes` add column-style stripe parity across the matrix. + Parity shard commitments are BLAKE3 digests of little-endian `u16` symbols. +6. **Build the manifest.** `DaManifestV1` records the lane, epoch, blob class, + codec, payload digest, chunk root, chunk size, erasure profile, retention + policy, rent quote, chunk commitments, optional IPA commitment, metadata, + and issue time. The storage ticket is deterministic: the node first hashes a + manifest template with an empty ticket, then writes that fingerprint back as + the final `storage_ticket`. +7. **Reject replay conflicts.** The replay key is + `(lane_id, epoch, sequence, manifest_fingerprint)`. A duplicate with the + same fingerprint is idempotent. A stale sequence or the same sequence with a + different fingerprint is rejected. +8. **Emit signed artifacts.** Torii computes a PDP commitment, signs a + `DaIngestReceipt`, builds a `DaCommitmentRecord`, and writes spool artifacts + for the manifest, PDP commitment, commitment record, commitment schedule, + pin intent, receipt file, and receipt log. The receipt cursor advances + monotonically per `(lane_id, epoch)`. + +Commitment records are what blocks carry. A record binds: + +- lane, epoch, and sequence +- caller blob ID and canonical manifest hash +- lane proof scheme +- chunk root +- optional KZG commitment for KZG lanes +- PDP/proof digest +- retention class and storage ticket +- Torii DA acknowledgement signature + +Before a block embeds DA records, the block assembly path validates the bundle: + +- `(lane_id, epoch, sequence)` must be unique inside the bundle. +- Manifest hashes must be non-zero and unique inside the bundle. +- The commitment proof scheme must match the configured lane policy. +- Merkle lanes reject KZG commitments; KZG lanes require a non-zero KZG + commitment. +- Pin intents are canonicalized, sorted, and filtered by lane, manifest hash, + storage ticket, owner account, and alias-collision rules. + +The block header stores hashes for DA proof policies, commitments, and pin +intents. For membership proofs, the commitment bundle also exposes a Merkle +root whose leaves are hashes of canonical Norito-encoded +`DaCommitmentRecord` values. Parent nodes hash the concatenation of left and +right children; an odd leaf is promoted unchanged to the next layer. + +### Proof Verification + +`/v1/da/commitments/prove` can produce a proof for one commitment in a block. +The proof contains the commitment, block height, index in the bundle, bundle +hash, bundle length, Merkle root, and sibling path. Verification checks: + +1. The proof bundle hash matches the block header's DA commitment hash. +2. The proof block height matches the referenced block header. +3. The index is in bounds and the commitment equals the bundle entry at that + index. +4. The lane proof policy accepts the commitment. +5. Folding the sibling path from the commitment leaf reconstructs the supplied + root. +6. The reconstructed root equals the bundle root. + +This proves that a specific availability commitment was included in a specific +block payload; it does not prove that every replica is currently online. Live +retrievability is checked separately through SoraFS provider fetches, PDP/PoTR +checks, or profile-specific availability evidence. + +### Consensus Interaction + +DA is coupled to Sumeragi through reliable broadcast (RBC), but it is not a +second finality protocol. RBC disseminates and recovers proposal payloads: +the proposer announces a session for `(height, view, payload_hash)`, peers +exchange chunks, and `READY`/`DELIVER` signals track whether enough validators +observed the same payload. + +With DA enabled, a peer considers the pending block payload available when +either: + +- the local pending block bytes hash to the expected payload hash, or +- RBC has recovered a payload matching the block hash, height, view, and + payload hash. + +If neither condition holds, the peer records `missing_local_data`, keeps trying +to recover the payload through RBC or block sync, and reports the DA gate in +status and telemetry. In the current implementation these DA signals are +advisory for finality: a block still finalizes from the commit certificate plus +the matching local payload, not from a separate DA quorum certificate. + +DA timing widens recovery windows. The effective DA quorum timeout is derived +from the configured block and commit timings, then multiplied by +`sumeragi.advanced.da.quorum_timeout_multiplier`. The availability timeout is +`max(quorum_timeout, availability_timeout_floor_ms) * availability_timeout_multiplier`. +Before that availability timeout expires, the node favors payload recovery and +avoids premature rescheduling; after it expires, normal recovery and +view-change paths can proceed. + +### Operator Notes + +Consensus profiles that enable DA add RBC-backed payload dissemination, +manifest guards, DA bundle validation, and recovery telemetry. The peer +template exposes `[sumeragi.da]` limits for commitments and proof openings per +block, plus `[sumeragi.advanced.da]` timeout multipliers for quorum and +availability behavior. Keep these settings consistent across validators in one +network profile. + +For route discovery, start with the node's OpenAPI document: + +```bash +curl -fsS "$TORII_URL/openapi.json" \ + | jq '.paths | keys[] | select(startswith("/v1/da/"))' +``` + +Use the +[query reference](/reference/queries.md#nexus-data-availability-and-packages) +for the current DA query names, and the +[peer configuration template](/reference/peer-config/) for the local +`[sumeragi.da]` knobs exposed by your build. ## SoraFS -SoraFS is the decentralized content-addressed storage fabric. It packages bytes -into deterministic chunks, CAR archives, and Norito manifests that bind content -roots, chunking profiles, pin policies, and governance attestations. Storage -providers advertise capacity and content availability, while gateways verify -manifests and chunk commitments before serving content. +SoraFS is the decentralized content-addressed storage fabric. It packages +bytes into deterministic chunks, CAR archives, and Norito manifests that +bind content roots, chunking profiles, pin policies, and governance +attestations. Storage providers advertise capacity and content +availability, while gateways verify manifests and chunk commitments before +serving content. -Typical SoraFS uses include static application assets, documentation builds, -zone bundles, model or artifact references, and governance evidence bundles. -The Iroha data model exposes SoraFS gateway events and a +Typical SoraFS uses include static application assets, documentation +builds, zone bundles, model or artifact references, and governance evidence +bundles. The Iroha data model exposes SoraFS gateway events and a [`FindSorafsProviderOwner`](/reference/queries.md#nexus-data-availability-and-packages) query for provider ownership resolution. @@ -436,9 +719,9 @@ cargo run -p sorafs_car --features cli --bin sorafs_cli -- \ --response-out artifacts/site.manifest.submit.body ``` -If `/v1/sorafs/pin/register` is not routed on the target node, the CLI can fall -back to a signed `/transaction` submission and wait for a terminal pipeline -status. +If `/v1/sorafs/pin/register` is not routed on the target node, the CLI can +fall back to a signed `/transaction` submission and wait for a terminal +pipeline status. ### Verify and Fetch @@ -481,28 +764,30 @@ sorafs_cli por trigger \ ## SoraDNS SoraDNS is the deterministic naming layer for SORA services and content. It -normalizes names, anchors resolver directory updates in Iroha, and distributes -signed zone or resolver bundles through SoraFS. Resolvers and gateways verify -resolver attestation documents before trusting discovery metadata. +normalizes names, anchors resolver directory updates in Iroha, and +distributes signed zone or resolver bundles through SoraFS. Resolvers and +gateways verify resolver attestation documents before trusting discovery +metadata. -For browser access, SoraDNS derives gateway hosts from a registered FQDN. The -registered vanity host remains the canonical application origin, while gateway -profiles can expose compatibility hosts for clients that cannot resolve SoraDNS -names directly yet. +For browser access, SoraDNS derives gateway hosts from a registered FQDN. +The registered vanity host remains the canonical application origin, while +gateway profiles can expose compatibility hosts for clients that cannot +resolve SoraDNS names directly yet. ### Host Forms -| Form | Example | Purpose | -| --- | --- | --- | -| Vanity origin | `https:///` | Canonical app URL recorded in manifests and release notes | -| Taira browser gateway | `https://.mon.taira.sora.net/` | Public browser access when the alias is active and native SoraDNS resolution is unavailable | -| Torii fallback path | `https://taira.sora.org/soradns//` | Transitional compatibility gateway when the alias is active | -| Canonical hash gateway | `.gw.sora.id` | Deterministic gateway identity and GAR verification | +| Form | Example | Purpose | +| ---------------------- | ---------------------------------------------- | ------------------------------------------------------------------------------------------- | +| Vanity origin | `https:///` | Canonical app URL recorded in manifests and release notes | +| Taira browser gateway | `https://.mon.taira.sora.net/` | Public browser access when the alias is active and native SoraDNS resolution is unavailable | +| Torii fallback path | `https://taira.sora.org/soradns//` | Transitional compatibility gateway when the alias is active | +| Canonical hash gateway | `.gw.sora.id` | Deterministic gateway identity and GAR verification | -The `/soradns//...` fallback is not the preferred public URL. Tooling, -app manifests, and frontend configuration should prefer the vanity host itself. -If an alias is not active on Taira, the browser gateway or fallback path can -return `404` or fail TLS before application routing starts. +The `/soradns//...` fallback is not the preferred public URL. +Tooling, app manifests, and frontend configuration should prefer the vanity +host itself. If an alias is not active on Taira, the browser gateway or +fallback path can return `404` or fail TLS before application routing +starts. ### Derive Gateway Hosts @@ -510,27 +795,27 @@ return `404` or fail TLS before application routing starts. import { deriveSoradnsGatewayHosts, hostPatternsCoverDerivedHosts, -} from "@iroha/iroha-js"; +} from '@iroha/iroha-js' -const derived = deriveSoradnsGatewayHosts("docs.sora"); -console.log(derived.canonicalHost); -console.log(derived.prettyHost); +const derived = deriveSoradnsGatewayHosts('docs.sora') +console.log(derived.canonicalHost) +console.log(derived.prettyHost) -const taira = deriveSoradnsGatewayHosts("solswap-indexer.sora", { - prettySuffix: "mon.taira.sora.net", -}); -console.log(taira.prettyHost); +const taira = deriveSoradnsGatewayHosts('solswap-indexer.sora', { + prettySuffix: 'mon.taira.sora.net', +}) +console.log(taira.prettyHost) const patterns = [ derived.canonicalHost, derived.canonicalWildcard, derived.prettyHost, -]; -console.log(hostPatternsCoverDerivedHosts(patterns, derived)); +] +console.log(hostPatternsCoverDerivedHosts(patterns, derived)) ``` -GAR payloads should cover the canonical hash host, the canonical wildcard, and -the selected pretty host. +GAR payloads should cover the canonical hash host, the canonical wildcard, +and the selected pretty host. ### Fetch a Resolver Directory Snapshot @@ -546,20 +831,22 @@ soradns_resolver rad verify \ --rad ./state/soradns-directory/rad/resolver-a.norito ``` -Gateways should reject resolvers whose resolver attestation document is missing, -expired, unsigned, or not anchored in the latest directory Merkle root. -On a network where no resolver directory has been published yet, +Gateways should reject resolvers whose resolver attestation document is +missing, expired, unsigned, or not anchored in the latest directory Merkle +root. On a network where no resolver directory has been published yet, `/v1/soradns/directory/latest` can return `404` even though the route is enabled. ### Public DNS Delegation -SoraDNS host derivation does not replace regular internet DNS delegation. If a -public DNS name should point at a SoraDNS gateway: +SoraDNS host derivation does not replace regular internet DNS delegation. +If a public DNS name should point at a SoraDNS gateway: - for subdomains, publish a CNAME to the selected pretty host -- for apex names, use ALIAS/ANAME or A/AAAA records to the gateway anycast IPs -- keep the canonical hash host under the SoraDNS gateway domain for GAR checks +- for apex names, use ALIAS/ANAME or A/AAAA records to the gateway anycast + IPs +- keep the canonical hash host under the SoraDNS gateway domain for GAR + checks ## FHE and UAID @@ -567,60 +854,62 @@ Iroha exposes two FHE-related surfaces for Nexus services: - `iroha_crypto::fhe_bfv` implements deterministic BFV support for scalar ciphertext evaluation. Identifier resolution uses - `BfvIdentifierPublicParameters` and `BfvIdentifierCiphertext`, where slot 0 - stores the input byte length and later slots store one encrypted byte each. + `BfvIdentifierPublicParameters` and `BfvIdentifierCiphertext`, where slot + 0 stores the input byte length and later slots store one encrypted byte + each. - Soracloud state and job schemas model FHE ciphertext workloads with governance-managed parameter sets, execution policies, ciphertext commitments, query envelopes, and disclosure requests. -The BFV identifier path is used for privacy-preserving enrollment. A client can -submit an encrypted identifier to the Torii resolver. The resolver evaluates it -under the active identifier policy, derives an `OpaqueAccountId`, and emits a -receipt. `ClaimIdentifier` then binds that receipt to the UAID attached to the -target account. - -The UAID is the identity and capability anchor around that flow. In the data -model, `UniversalAccountId` is hash-backed and displays as `uaid:`. -Parsers accept either `uaid:` or the raw 64-hex digest. `Account` and -`NewAccount` include optional `uaid` and `opaque_ids` fields. Runtime -registration enforces a one-to-one UAID-to-account index, rejects duplicate or -colliding opaque identifiers, and rejects opaque identifiers without a UAID. -Whenever a UAID account binding changes, the runtime rebuilds Space Directory -dataspace bindings for that UAID. +The BFV identifier path is used for privacy-preserving enrollment. A client +can submit an encrypted identifier to the Torii resolver. The resolver +evaluates it under the active identifier policy, derives an +`OpaqueAccountId`, and emits a receipt. `ClaimIdentifier` then binds that +receipt to the UAID attached to the target account. + +The UAID is the identity and capability anchor around that flow. In the +data model, `UniversalAccountId` is hash-backed and displays as +`uaid:`. Parsers accept either `uaid:` or the raw 64-hex +digest. `Account` and `NewAccount` include optional `uaid` and `opaque_ids` +fields. Runtime registration enforces a one-to-one UAID-to-account index, +rejects duplicate or colliding opaque identifiers, and rejects opaque +identifiers without a UAID. Whenever a UAID account binding changes, the +runtime rebuilds Space Directory dataspace bindings for that UAID. Space Directory manifests attach capabilities to a UAID. An -`AssetPermissionManifest` names the UAID, dataspace, activation and optional -expiry epoch, and ordered allow/deny entries scoped by dataspace, program, -method, asset, and AMX role. Evaluation is deny-wins: the first matching deny -rejects the request, otherwise the latest matching allow candidate is checked -against any amount limit. Publishing, expiring, and revoking these manifests is -guarded by `CanPublishSpaceDirectoryManifest`. +`AssetPermissionManifest` names the UAID, dataspace, activation and +optional expiry epoch, and ordered allow/deny entries scoped by dataspace, +program, method, asset, and AMX role. Evaluation is deny-wins: the first +matching deny rejects the request, otherwise the latest matching allow +candidate is checked against any amount limit. Publishing, expiring, and +revoking these manifests is guarded by `CanPublishSpaceDirectoryManifest`. For Soracloud FHE state, the implemented schemas are: -| Schema | What it controls | -| --- | --- | -| `SoraStateBindingV1` with `FheCiphertext` | Declares that values under a state key prefix are FHE ciphertexts. | -| `FheParamSetV1` | Names the scheme, backend, modulus chain, polynomial degree, slot count, security target, lifecycle, and parameter digest. | -| `FheExecutionPolicyV1` | Bounds ciphertext size, plaintext size, input/output count, multiplication depth, rotations, bootstraps, and rounding mode. | -| `FheGovernanceBundleV1` | Couples one parameter set with one execution policy for admission validation. | -| `FheJobSpecV1` | Describes deterministic `Add`, `Multiply`, `RotateLeft`, or `Bootstrap` work over ciphertext state keys and commitments. | -| `CiphertextQuerySpecV1` | Queries ciphertext-only state by service, binding, key prefix, result limit, metadata level, and optional inclusion proof. | -| `DecryptionRequestV1` | Requests disclosure for one ciphertext commitment under a decryption-authority policy. | - -`FheJobSpecV1::validate_for_execution` checks that the job, execution policy, -and parameter set agree before admission. It also enforces operation-specific -rules: add and multiply need at least two inputs, rotate and bootstrap need -exactly one input, and requested depth, rotation count, bootstrap count, input -count, payload bytes, and deterministic output size must stay within policy -bounds. Ciphertext query results must not return plaintext rows. +| Schema | What it controls | +| ----------------------------------------- | --------------------------------------------------------------------------------------------------------------------------- | +| `SoraStateBindingV1` with `FheCiphertext` | Declares that values under a state key prefix are FHE ciphertexts. | +| `FheParamSetV1` | Names the scheme, backend, modulus chain, polynomial degree, slot count, security target, lifecycle, and parameter digest. | +| `FheExecutionPolicyV1` | Bounds ciphertext size, plaintext size, input/output count, multiplication depth, rotations, bootstraps, and rounding mode. | +| `FheGovernanceBundleV1` | Couples one parameter set with one execution policy for admission validation. | +| `FheJobSpecV1` | Describes deterministic `Add`, `Multiply`, `RotateLeft`, or `Bootstrap` work over ciphertext state keys and commitments. | +| `CiphertextQuerySpecV1` | Queries ciphertext-only state by service, binding, key prefix, result limit, metadata level, and optional inclusion proof. | +| `DecryptionRequestV1` | Requests disclosure for one ciphertext commitment under a decryption-authority policy. | + +`FheJobSpecV1::validate_for_execution` checks that the job, execution +policy, and parameter set agree before admission. It also enforces +operation-specific rules: add and multiply need at least two inputs, rotate +and bootstrap need exactly one input, and requested depth, rotation count, +bootstrap count, input count, payload bytes, and deterministic output size +must stay within policy bounds. Ciphertext query results must not return +plaintext rows. UAID is not the ciphertext and not the FHE policy itself. It is the stable -account capability anchor used to find the account, opaque identifier claims, -and Space Directory bindings that authorize a service or dataspace flow. FHE -schemas govern encrypted payload admission and execution separately through -parameter sets, execution policies, ciphertext commitments, and decryption -authority policies. +account capability anchor used to find the account, opaque identifier +claims, and Space Directory bindings that authorize a service or dataspace +flow. FHE schemas govern encrypted payload admission and execution +separately through parameter sets, execution policies, ciphertext +commitments, and decryption authority policies. Relevant Torii surfaces include: @@ -635,26 +924,29 @@ Relevant Torii surfaces include: - `/v1/soracloud/model/run-private/finalize` - `/v1/soracloud/model/decrypt-output` -The public metadata boundary is explicit in the schemas: UAID bindings, opaque -identifier records, manifest lifecycle, state-key digests, ciphertext sizes, -ciphertext commitments, policy names, parameter-set versions, job operations, -output state keys, and disclosure request metadata can be visible. Identifier -plaintexts, decrypted state, model inputs and outputs, and FHE secret keys are -outside these public query records. +The public metadata boundary is explicit in the schemas: UAID bindings, +opaque identifier records, manifest lifecycle, state-key digests, +ciphertext sizes, ciphertext commitments, policy names, parameter-set +versions, job operations, output state keys, and disclosure request +metadata can be visible. Identifier plaintexts, decrypted state, model +inputs and outputs, and FHE secret keys are outside these public query +records. ## Operational Checklist -- Confirm enabled service families with `/openapi` on the target Torii node. +- Confirm enabled service families with `/openapi` on the target Torii + node. - Treat Soracloud deployment manifests, SoraFS manifests, SoraDNS resolver - directory records, and SoraNet relay directory records as governance-sensitive - artifacts. + directory records, SoraNet relay directory records, and DA pin intents or + availability commitments as governance-sensitive artifacts. - Use the same SORA Nexus profile consistently across validators in one network. -- Keep Inrou root and shared lease volumes in manifests instead of relying on - ad hoc node-local paths. +- Keep Inrou root and shared lease volumes in manifests instead of relying + on ad hoc node-local paths. - Use SoraFS proof verification before promoting content aliases. -- Monitor SoraNet handshake failures, SoraFS gateway refusals, SoraDNS RAD - freshness, and Soracloud rollout health. +- Monitor SoraNet handshake failures, DA quorum or availability timeouts, + SoraFS gateway refusals, SoraDNS RAD freshness, and Soracloud rollout + health. - For public Taira or Minamoto usage, start with [Connect to SORA Nexus dataspaces](/get-started/sora-nexus-dataspaces.md). diff --git a/src/blockchain/transactions.md b/src/blockchain/transactions.md index 233480248..04d83f3ed 100644 --- a/src/blockchain/transactions.md +++ b/src/blockchain/transactions.md @@ -20,6 +20,121 @@ For proof evidence over selected transparent execution effects, see transaction execution and builds deterministic proof batches for supported state transitions. +## Try It on Taira + +Use the explorer routes to inspect recent public Taira blocks and transaction +statuses without a signing account: + +```bash +curl -fsS 'https://taira.sora.org/v1/explorer/blocks?page=1&per_page=3' \ + | jq '{pagination, blocks: [.items[] | {height, hash, transactions_total, transactions_rejected}]}' + +curl -fsS 'https://taira.sora.org/v1/explorer/transactions?page=1&per_page=5' \ + | jq '{pagination, txs: [.items[] | {hash, block, status, executable}]}' +``` + +To follow a transaction your app submitted earlier, copy the `hash` from the +list and inspect the explorer detail route: + +```bash +TX_HASH='' + +curl -fsS "https://taira.sora.org/v1/explorer/transactions/$TX_HASH" \ + | jq '{hash, block, status, authority, executable}' +``` + +This is still read-only. Submitting a transaction requires a signed Norito +envelope, correct chain ID, fee metadata, and a faucet-funded Taira account. + +For fee-paying examples on Taira, save the faucet helper from +[Get Testnet XOR on Taira](/get-started/sora-nexus-dataspaces.md#_4-get-testnet-xor-on-taira) +as `taira_faucet_claim.py`, then fund the signer through the public faucet +first: + +```bash +export TAIRA_ACCOUNT_ID='' +export TAIRA_FEE_ASSET=6TEAJqbb8oEPmLncoNiMRbLEK6tw + +curl -fsS https://taira.sora.org/v1/accounts/faucet/puzzle | jq . +python3 taira_faucet_claim.py "$TAIRA_ACCOUNT_ID" + +iroha --config ./taira.client.toml ledger asset get \ + --definition "$TAIRA_FEE_ASSET" \ + --account "$TAIRA_ACCOUNT_ID" +``` + +If the faucet puzzle or claim route returns `502`, wait and retry before +debugging the transaction itself. + +Then attach the Taira fee asset metadata when submitting the transaction: + +```bash +printf '{"gas_asset_id":"%s"}\n' "$TAIRA_FEE_ASSET" > taira.tx-metadata.json + +iroha --config ./taira.client.toml \ + --metadata ./taira.tx-metadata.json \ + ledger transaction ping --msg "faucet-funded taira transaction" +``` + +## Offline Transactions + +Iroha has two offline transaction workflows: + +- **Offline signing** creates a normal signed transaction while the signing + device is disconnected. The transaction is not processed until an online + client submits the signed envelope to Torii, so it still needs the + correct chain ID, authority, permissions, fees, and transaction lifetime. +- **Offline V2 notes** support offline value transfer through ledger-backed + bearer notes. Online transactions reserve value into escrow, later audit + or redeem offline payment tokens, and enforce replay protection when the + token reaches the ledger. + +Offline V2 is the maintained offline payment surface. Torii exposes +`GET /v1/offline/v2/readiness` for feature discovery; legacy offline +allowance, reserve, revocation, transfer-history, and cash HTTP routes are +not published. Offline V2 note issuance, audit, and redemption are +submitted as normal transaction instructions: + +Check the public Taira readiness flags: + +```bash +curl -fsS https://taira.sora.org/v1/offline/v2/readiness \ + | jq '{offline_note_v2, offline_one_use_keys, offline_recursive_note_proof, offline_sync_optional}' +``` + +| Instruction | Purpose | +| --------------------- | ------------------------------------------------------------------------------------------------------------------- | +| `IssueOfflineNoteV2` | Reserve an online asset amount into offline escrow and record a note commitment bound to a one-use key certificate. | +| `AuditOfflineNoteV2` | Optionally record an offline payment token, its consumed nullifiers, output commitments, and recursive proof. | +| `RedeemOfflineNoteV2` | Verify the final offline note proof, consume replay keys and nullifiers, and credit the recipient from escrow. | + +The typical flow is: + +1. Check Offline V2 readiness on the target Torii endpoint. +2. Enable offline support for the asset and configure or derive its offline + escrow account. +3. Register an active Offline V2 recursive verifier key and grant + `CanManageOfflineEscrow` to the account that issues notes. +4. Submit `IssueOfflineNoteV2`. The ledger debits the note owner's asset, + credits escrow, records replay keys, and emits + `OfflineNoteEvent::NoteIssued`. +5. Exchange the offline payment token outside the ledger. Wallets carry the + one-use key certificate, nullifiers, output commitments, and recursive + proof through their chosen transport, such as QR or a local hand-off. +6. Submit `AuditOfflineNoteV2` when operators or wallets want an online + audit record before final redemption. Audit is optional for offline + finality. +7. Submit `RedeemOfflineNoteV2` when the recipient comes online. Validators + check the verifier key, proof binding, issued claim, amount, recipient, + and nullifier uniqueness before crediting the recipient. + +Replay protection is enforced when audit or redemption reaches the ledger. +Validators reject duplicate note issues, duplicate issued key certificates, +duplicate nullifiers, already redeemed issued claims, and conflicting audit +tokens. Until a token is audited or redeemed, the ledger cannot observe an +offline conflict, so wallet and operator policies should limit value, +expiry, accepted issuers, and reconciliation windows. + Here is an example of creating a new transaction with the `Grant` instruction. In this transaction, Mouse is granting Alice the specified role (`role_id`). Check diff --git a/src/documenting/snippets.md b/src/documenting/snippets.md index 138cb9f75..e67d97f05 100644 --- a/src/documenting/snippets.md +++ b/src/documenting/snippets.md @@ -10,12 +10,13 @@ located in other repositories, where they are built, run, and tested. Snippet sources are defined in [`snippet_sources.ts`](https://github.com/hyperledger-iroha/iroha-2-docs/blob/main/etc/snippet-sources.ts). -The `snippet_sources.ts` file is located in the documentation repository. By -default, Iroha snippets are loaded from pinned raw GitHub sources so CI and -preview builds do not require a sibling checkout. Override `IROHA_REV` or -`IROHA_RAW_BASE` to point snippets at another published revision. Set -`IROHA_SOURCE_DIR` when the data-model schema snapshot is empty and you want to -regenerate that page from a local Iroha source checkout. +The `snippet_sources.ts` file is located in the documentation repository. +By default, Iroha snippets are loaded from pinned raw GitHub sources so CI +and preview builds do not depend on a local repository layout. Override +`IROHA_REV` or `IROHA_RAW_BASE` to point snippets at another published +revision. Set `IROHA_SOURCE_DIR` when the data-model schema snapshot is +empty and you want to regenerate that page from a local Iroha source +checkout. It has the following format: @@ -39,8 +40,8 @@ export default [ - `src` defines the source file location and could be either an HTTP(s) URI or a relative file path. - `filename` (optional) explicitly defines the local filename. -- `transform` (optional) can derive a snippet from generated source data. The - data-model reference uses this to render the current schema. +- `transform` (optional) can derive a snippet from generated source data. + The data-model reference uses this to render the current schema. ### Fetching Snippets @@ -108,8 +109,8 @@ Let's add a code snippet from Iroha JavaScript SDK. For example, this one: ::: tip Since `snippet_sources.ts` is a TypeScript file, it can use small helper - functions. Keep those helpers focused: snippets should continue to reflect - built and tested source files, not hand-written copies. + functions. Keep those helpers focused: snippets should continue to + reflect built and tested source files, not hand-written copies. ::: diff --git a/src/get-started/index.md b/src/get-started/index.md index e12dada43..7505f7dcf 100644 --- a/src/get-started/index.md +++ b/src/get-started/index.md @@ -49,7 +49,7 @@ The pages you will use most often while running a network are: ## Learn More -- [Main Iroha repository](https://github.com/hyperledger-iroha/iroha) -- [Workspace docs index](https://github.com/hyperledger-iroha/iroha/blob/main/docs/README.md) -- [Iroha 3 whitepaper](https://github.com/hyperledger-iroha/iroha/blob/main/docs/source/iroha_3_whitepaper.md) -- [Iroha 2 whitepaper](https://github.com/hyperledger-iroha/iroha/blob/main/docs/source/iroha_2_whitepaper.md) +- [Iroha `i23-features` branch](https://github.com/hyperledger-iroha/iroha/tree/i23-features) +- [Workspace docs index](https://github.com/hyperledger-iroha/iroha/blob/i23-features/docs/README.md) +- [Iroha 3 whitepaper](https://github.com/hyperledger-iroha/iroha/blob/i23-features/docs/source/iroha_3_whitepaper.md) +- [Iroha 2 whitepaper](https://github.com/hyperledger-iroha/iroha/blob/i23-features/docs/source/iroha_2_whitepaper.md) diff --git a/src/get-started/install-iroha-2.md b/src/get-started/install-iroha-2.md index 2e70bbca6..64219ab3b 100644 --- a/src/get-started/install-iroha-2.md +++ b/src/get-started/install-iroha-2.md @@ -15,7 +15,7 @@ Install these first: ## 2. Clone the Workspace ```bash -git clone https://github.com/hyperledger-iroha/iroha.git +git clone --branch i23-features https://github.com/hyperledger-iroha/iroha.git cd iroha ``` diff --git a/src/get-started/operate-iroha-2-via-cli.md b/src/get-started/operate-iroha-2-via-cli.md index 7370a7430..fb4c05990 100644 --- a/src/get-started/operate-iroha-2-via-cli.md +++ b/src/get-started/operate-iroha-2-via-cli.md @@ -43,7 +43,74 @@ The `ledger` group also contains domain-specific transaction helpers such as Use `--output-format text` for human-readable operator output and `--machine` for strict automation mode. -## 3. Basic Ledger Commands +## 3. Try the Public Taira Testnet + +You can try read-only Taira checks before running a local peer or creating a +signer. These commands use public Torii JSON routes and do not spend testnet +XOR. + +Check Taira health: + +```bash +curl -fsS https://taira.sora.org/status \ + | jq '{blocks, txs_approved, txs_rejected, queue_size, peers}' +``` + +List public domains in the `universal` dataspace: + +```bash +curl -fsS 'https://taira.sora.org/v1/domains?limit=10' \ + | jq -r '.items[].id' +``` + +List a few asset definitions and their current supply: + +```bash +curl -fsS 'https://taira.sora.org/v1/assets/definitions?limit=10' \ + | jq -r '.items[] | [.id, .name, .mintable, .total_quantity] | @tsv' +``` + +If you have the current `iroha` binary, run the Taira diagnostics helper: + +```bash +iroha taira doctor --public-root https://taira.sora.org --json +``` + +Create `taira.client.toml` only when you are ready to test signed commands. +See [Connect to SORA Nexus Dataspaces](/get-started/sora-nexus-dataspaces.md) +for the config, faucet, and canary flow. Do not run write commands against +Taira until the account is funded with the faucet fee asset. + +For any fee-paying Taira CLI example, save the faucet helper from +[Get Testnet XOR on Taira](/get-started/sora-nexus-dataspaces.md#_4-get-testnet-xor-on-taira) +as `taira_faucet_claim.py`, then claim testnet XOR first: + +```bash +export TAIRA_ACCOUNT_ID='' +export TAIRA_FEE_ASSET=6TEAJqbb8oEPmLncoNiMRbLEK6tw + +curl -fsS https://taira.sora.org/v1/accounts/faucet/puzzle | jq . +python3 taira_faucet_claim.py "$TAIRA_ACCOUNT_ID" + +iroha --config ./taira.client.toml ledger asset get \ + --definition "$TAIRA_FEE_ASSET" \ + --account "$TAIRA_ACCOUNT_ID" +``` + +If the faucet puzzle or claim route returns `502`, wait and retry. That is a +public testnet availability issue, not a signal to regenerate the account keys. + +After the balance is visible, attach the fee asset metadata to writes: + +```bash +printf '{"gas_asset_id":"%s"}\n' "$TAIRA_FEE_ASSET" > taira.tx-metadata.json + +iroha --config ./taira.client.toml \ + --metadata ./taira.tx-metadata.json \ + ledger transaction ping --msg "hello from faucet-funded taira" +``` + +## 4. Basic Ledger Commands List all domains: @@ -71,7 +138,7 @@ cargo run --bin iroha -- --config ./localnet/client.toml ledger blocks 1 --timeo cargo run --bin iroha -- --config ./localnet/client.toml ledger events block ``` -## 4. Operator Commands +## 5. Operator Commands Consensus status: @@ -99,12 +166,12 @@ cargo run --bin iroha -- --config ./localnet/client.toml ops sumeragi collectors cargo run --bin iroha -- --config ./localnet/client.toml ops sumeragi params ``` -## 5. Where to Go Next +## 6. Where to Go Next - [SDK tutorials](/guide/tutorials/) - [Torii endpoints](/reference/torii-endpoints.md) - [Working with Iroha binaries](/reference/binaries.md) -- [CLI README](https://github.com/hyperledger-iroha/iroha/blob/main/crates/iroha_cli/README.md) +- [CLI README](https://github.com/hyperledger-iroha/iroha/blob/i23-features/crates/iroha_cli/README.md) To regenerate a full Markdown help snapshot from the source checkout, run: diff --git a/src/get-started/private-dataspace-fee-sponsor.md b/src/get-started/private-dataspace-fee-sponsor.md index 4ce8b2d2f..e5cbdf519 100644 --- a/src/get-started/private-dataspace-fee-sponsor.md +++ b/src/get-started/private-dataspace-fee-sponsor.md @@ -342,6 +342,23 @@ iroha --config ./treasury.client.toml \ --quantity 1000 ``` +For Taira rehearsals, save the faucet helper from +[Get Testnet XOR on Taira](/get-started/sora-nexus-dataspaces.md#_4-get-testnet-xor-on-taira) +as `taira_faucet_claim.py`, then fund the sponsor with the public faucet +instead of a treasury transfer: + +```bash +export SPONSOR='' +export XOR_ASSET=6TEAJqbb8oEPmLncoNiMRbLEK6tw + +python3 taira_faucet_claim.py "$SPONSOR" + +iroha --config ./sponsor.client.toml \ + ledger asset get \ + --definition "$XOR_ASSET" \ + --account "$SPONSOR" +``` + Check the sponsor's XOR balance: ```bash diff --git a/src/get-started/sora-nexus-dataspaces.md b/src/get-started/sora-nexus-dataspaces.md index a860faade..3a2bbf3e0 100644 --- a/src/get-started/sora-nexus-dataspaces.md +++ b/src/get-started/sora-nexus-dataspaces.md @@ -84,6 +84,146 @@ curl -fsS https://taira.sora.org/status \ Use the same command with `https://minamoto.sora.org/status` for mainnet. +## Taira MCP for Agents + +Taira also exposes a Torii-native Model Context Protocol (MCP) bridge for +agent runtimes. Use it when an agent needs live testnet reads, scripted +diagnostics, or tightly reviewed write rehearsals without building a custom +Torii client first. + +| Setting | Value | +| --- | --- | +| MCP endpoint | `https://taira.sora.org/v1/mcp` | +| Network root | `https://taira.sora.org` | +| Intended use | Taira testnet reads and faucet-funded write rehearsals | +| Production equivalent | Do not point this entry at Minamoto unless a mainnet MCP endpoint and release controls are explicitly approved | + +Check the bridge metadata before adding signing material: + +```bash +curl -fsS https://taira.sora.org/v1/mcp \ + | jq '{protocolVersion, server: .serverInfo.name, tools: .capabilities.tools.count}' +``` + +Configure the URL as a user-local MCP server in the agent runtime. Do not +commit agent MCP config, API tokens, forwarded auth headers, `authority`, or +`private_key` values into this docs repo or an application repo. + +Agent prompt rules that work well with Taira: + +- Discover tools from the MCP server before calling them; re-discover if the + server reports `listChanged`. +- Prefer the curated `iroha.*` tools over raw `torii.*` tools. +- Start read-only: inspect status, accounts, assets, aliases, blocks, + governance state, and transaction status before proposing writes. +- Require an explicit human instruction before live testnet mutations. For + pre-signed transaction envelopes, use `iroha.transactions.submit_and_wait` + so the agent waits for the result instead of only submitting. +- Summarize transaction hashes, final status, and server validation errors in + the agent response. + +### Development Workflow With Agents + +Use agents as development helpers for Iroha clients, transaction builders, +diagnostic scripts, and testnet runbooks. Keep the agent's authority narrow: +it can inspect code, read Taira state, propose changes, and run local tests, +but it should not mutate a live network until a human approves the exact +operation. + +A practical workflow is: + +1. Ask the agent to inspect the relevant docs, SDK code, CLI command, or MCP + tool schema before it writes code. +2. Have the agent write the smallest client path first: status check, account + lookup, alias resolution, or balance lookup. +3. Add transaction-building code only after read-only calls work against + Taira. +4. Keep live-network tests opt-in, for example behind `TAIRA_LIVE=1`, so a + normal unit test run never spends testnet funds or depends on network + availability. +5. Require the agent to report the network root, chain, authority account, + instruction summary, fee asset, and expected state change before it submits + any transaction. +6. Review generated code for secret handling, retry behavior, idempotency, and + rejection handling before promoting it to CI or mainnet workflows. + +Useful read-only MCP tools for development include account asset lookups, +alias resolution, block lookup, transaction lookup, transaction lists, and +pipeline status checks. Use these to build confidence before submitting any +signed payload. + +```text +Use Taira MCP as a read-only inspector while developing this Iroha feature. +Inspect available iroha.* tools, verify the target account and asset state, +then update the client code. Do not submit transactions unless I explicitly +say "submit this transaction". +``` + +### Transaction Workflow Through Agents + +The MCP bridge can submit a signed Iroha transaction, but it does not remove +the normal transaction requirements. A transaction still needs a correct +authority, permissions, fee funding, chain ID, metadata, and signature. + +For raw Iroha transactions, build and sign the transaction envelope with an +SDK or CLI first, then give the agent only the canonical signed transaction +bytes encoded as `body_base64`. The agent can submit the envelope with +`iroha.transactions.submit_and_wait`, or submit with +`iroha.transactions.submit` and poll with `iroha.transactions.wait`. + +Do not paste private keys into an agent prompt. If an agent needs to build a +transaction, point it at local code that loads secrets from the user's runtime +environment, keychain, hardware signer, or ignored testnet config file. The +agent should never write the key material into Markdown, fixtures, logs, or +commits. + +Before submitting a transaction, make the agent produce a short transaction +plan: + +- `network`: Taira testnet root and chain ID +- `authority`: account that signs and pays fees +- `instructions`: register, mint, burn, transfer, metadata, permission, or + contract call summary +- `fee asset`: asset that will be charged on Taira +- `preflight reads`: account, asset balance, permissions, alias, or block + checks already performed +- `expected result`: the state that should be visible after confirmation +- `idempotency`: what happens if the same request is retried + +After submission, make the agent wait for a terminal status, then verify the +state change with a read query. A useful completion report includes: + +- transaction hash +- terminal status such as `Committed`, `Applied`, `Rejected`, or `Expired` +- block or explorer detail when available +- verification read results +- rejection message and whether the failure looks like permissions, fees, + validation, stale state, or endpoint availability + +Example guarded prompt: + +```text +Prepare a Taira transaction plan, but do not submit yet. Use MCP reads to +verify the authority account, fee balance, target asset or alias, and current +transaction status if a hash already exists. Show the exact instructions and +expected post-state. Wait for my explicit "submit" message before calling +iroha.transactions.submit_and_wait. +``` + +When the signed envelope is already prepared: + +```text +Submit this pre-signed Taira transaction envelope with +iroha.transactions.submit_and_wait. Use the provided body_base64 only; do not +ask for private keys. Wait for a terminal status, then verify the resulting +state with read-only iroha.* tools and report the hash, status, and +verification result. +``` + +Treat Taira MCP as a public testnet control surface. Taira keys, testnet XOR, +faucet accounts, and canary signers are disposable and must stay separate from +Minamoto keys and production release workflows. + ## Toy Examples You Can Try Now These examples are read-only unless noted. They work before you generate @@ -317,6 +457,10 @@ Fetch the puzzle: curl -fsS https://taira.sora.org/v1/accounts/faucet/puzzle | jq . ``` +The faucet is a public testnet service. If the puzzle or claim endpoint +returns `502`, a timeout, or another gateway-level error, wait and retry +before changing your keys or client config. + The response has this shape: ```json diff --git a/src/guide/advanced/chaos-testing.md b/src/guide/advanced/chaos-testing.md new file mode 100644 index 000000000..21333c6e8 --- /dev/null +++ b/src/guide/advanced/chaos-testing.md @@ -0,0 +1,216 @@ +# Chaos Testing with Izanami + +Izanami is the chaosnet orchestrator in the upstream Iroha workspace. It +starts a disposable local Iroha cluster, submits a configurable workload, +and injects faults into selected peers so operators can check whether the +network keeps making progress under controlled failure. + +Use Izanami for pre-production resilience checks, regression reproduction, +and consensus tuning. Do not point it at a production network: the tool is +designed to own the peers it starts, including peer restarts, storage +wipes, artificial packet loss, and local CPU or disk pressure. + +## Prerequisites + +Run Izanami from the +[`i23-features` branch of the Iroha repository](https://github.com/hyperledger-iroha/iroha/tree/i23-features), +not from this documentation repository: + +```bash +git clone --branch i23-features https://github.com/hyperledger-iroha/iroha.git +cd iroha +cargo build -p izanami +``` + +The binary must be explicitly allowed to create and manipulate networked +peers. Pass `--allow-net` for every non-TUI run, or enable `allow_net` in +the TUI. + +```bash +cargo run -p izanami -- --allow-net --peers 4 --faulty 1 --duration 120s +``` + +For an interactive run configuration: + +```bash +cargo run -p izanami -- --tui --allow-net +``` + +Izanami persists TUI and CLI settings under the user config directory, so +review the displayed settings before reusing a previous profile. + +## Baseline Run + +Start with one reproducible baseline before adding severe faults: + +```bash +cargo run -p izanami -- \ + --allow-net \ + --peers 4 \ + --faulty 1 \ + --duration 5m \ + --target-blocks 100 \ + --progress-interval 15s \ + --progress-timeout 120s \ + --latency-p95-threshold 2s \ + --tps 15 \ + --max-inflight 32 \ + --submitters 1 \ + --seed 42 +``` + +This run succeeds only if the cluster reaches the requested block target, +keeps making progress within the timeout, and stays under the optional p95 +block interval threshold. + +Record the command, seed, Iroha commit, peer count, faulty-peer count, +workload profile, target TPS, and latency threshold with the logs. Without +these values, another operator cannot replay the same failure pattern. + +## Workload Profiles + +Izanami has two workload profiles: + +| Profile | Use it for | Notes | +| -------- | -------------------------------------------------- | -------------------------------------- | +| `stable` | Long soak runs and reproducible performance checks | Favors execution-safe recipes | +| `chaos` | Failure-path coverage | Includes intentionally invalid recipes | + +Use the stable profile first: + +```bash +cargo run -p izanami -- --allow-net --workload-profile stable --seed 42 +``` + +Switch to the chaos profile when the baseline is already understood: + +```bash +cargo run -p izanami -- --allow-net --workload-profile chaos --seed 42 +``` + +Contract deployment recipes are disabled in stable runs unless explicitly +allowed: + +```bash +cargo run -p izanami -- \ + --allow-net \ + --workload-profile stable \ + --allow-contract-deploy-in-stable +``` + +Use `--nexus` when the run should use the embedded SORA Nexus defaults from +the upstream workspace. + +## Fault Controls + +When `--faulty` is greater than zero, at least one fault scenario must be +enabled. Fault toggles default to enabled, and boolean flags can be +disabled with `=false`. + +| Fault | CLI flag | What it exercises | +| ------------------------ | ------------------------------------------ | ------------------------------------------ | +| Crash and restart | `--fault-enable-crash-restart` | Peer process loss and recovery | +| Wipe storage and restart | `--fault-enable-wipe-storage` | Recovery from missing local state | +| Invalid transaction spam | `--fault-enable-spam-invalid-transactions` | Admission and rejection paths | +| Network latency | `--fault-enable-network-latency` | Slow gossip and delayed consensus messages | +| Network partition | `--fault-enable-network-partition` | Temporary trusted-peer isolation | +| P2P packet loss | `--fault-enable-network-packet-loss` | Dropped application-frame traffic | +| CPU stress | `--fault-enable-cpu-stress` | Local validation and scheduling pressure | +| Disk saturation | `--fault-enable-disk-saturation` | Local storage pressure | + +For a packet-loss-only run: + +```bash +cargo run -p izanami -- \ + --allow-net \ + --peers 20 \ + --faulty 5 \ + --duration 800s \ + --fault-window-start 133s \ + --fault-window-end 266s \ + --tps 200 \ + --submitters 20 \ + --max-inflight 512 \ + --fault-enable-crash-restart=false \ + --fault-enable-wipe-storage=false \ + --fault-enable-spam-invalid-transactions=false \ + --fault-enable-network-latency=false \ + --fault-enable-network-partition=false \ + --fault-enable-network-packet-loss=true \ + --fault-enable-cpu-stress=false \ + --fault-enable-disk-saturation=false \ + --fault-network-packet-loss-percent 75 \ + --seed 42 +``` + +Use `--fault-window-start` and `--fault-window-end` to keep a controlled +steady-state period before and after the injected failure. This makes it +easier to distinguish startup noise from the effect of the fault. + +## Scenario Shapes + +The upstream Izanami catalog maps common blockchain communication-failure +shapes to CLI profiles. You can model them with the same flags: + +| Scenario | Typical shape | +| --------------------- | ------------------------------------------------------------------------------------------------------------------------ | +| Targeted load | `--faulty 0`, high `--tps`, one submitter, high `--max-inflight` | +| Transient failure | Enable crash/restart only inside a bounded fault window | +| Packet loss | Enable packet loss only, usually with the default 75% loss rate | +| Stopping and recovery | Use a large faulty-peer population with crash/restart | +| Leader isolation | Use exactly one faulty peer with only network-partition or packet-loss faults; Izanami follows Sumeragi leader telemetry | + +Keep one variable fixed at a time. If you change peer count, workload +profile, fault window, and TPS in the same run, the result is difficult to +interpret. + +## What to Watch + +During the run, watch the same signals used for performance validation: + +- block-height progress across every running peer +- submitted, accepted, rejected, and timed-out transactions +- queue depth, queue saturation, and endpoint backpressure +- view changes, recovery paths, missing blocks, and missing quorum + certificates +- RBC backlog, pending sessions, and dropped or delayed consensus traffic +- CPU, memory, disk, and network saturation on the host running the peers + +For validation-latency analysis, enable main-loop debug logs: + +```bash +RUST_LOG=iroha_core::sumeragi::main_loop=debug \ + cargo run -p izanami -- --allow-net --seed 42 +``` + +Each block should emit `block validation timings` with `stateless_ms`, +`execution_ms`, and `total_ms`. Compare those timings with p95 block +intervals, view-change counters, and queue pressure before changing +consensus timers. + +## Interpreting Results + +Treat a run as healthy when all selected peers continue to commit blocks, +backlog does not grow without bound, and faults stop causing new recovery +activity after the configured window ends. + +Treat a run as a failure when: + +- block progress stalls longer than `--progress-timeout` +- peer heights diverge and do not reconverge +- p95 latency exceeds `--latency-p95-threshold` +- queues grow for the rest of the run after a fault window closes +- rejected or timed-out transactions are not explained by the selected + workload +- peer restart, storage wipe, or packet-loss recovery requires manual + cleanup + +After a failure, rerun with the same seed and one fewer fault type. This +keeps the workload and timing reproducible while narrowing the failure +surface. + +## Related Pages + +- [Performance and Metrics](./metrics.md) +- [Running Iroha on Bare Metal](./running-iroha-on-bare-metal.md) +- [Torii endpoints](../../reference/torii-endpoints.md) diff --git a/src/guide/advanced/metrics.md b/src/guide/advanced/metrics.md index f584ca538..80e46f1ec 100644 --- a/src/guide/advanced/metrics.md +++ b/src/guide/advanced/metrics.md @@ -30,6 +30,25 @@ curl -s "$TORII/v1/sumeragi/params" | jq . curl -s "$TORII/metrics" > metrics.prom ``` +You can try the same read-only pattern against public Taira: + +```bash +TAIRA=https://taira.sora.org + +curl -fsS "$TAIRA/status" \ + | jq '{blocks, txs_approved, txs_rejected, queue_size, peers}' + +curl -fsS "$TAIRA/v1/time/status" \ + | jq '{healthy: .health.healthy, peers, samples_used, rtt_count: .rtt.count}' + +curl -fsS "$TAIRA/metrics" \ + | grep -E '^(block_height|queue_size|sumeragi_tx_queue_depth|txs|view_changes)' \ + | head -n 20 +``` + +Public Taira metrics are useful for learning the signal names. Do not use them +as production capacity numbers for your own deployment. + The same consensus snapshots are available through the CLI: ```bash @@ -251,6 +270,7 @@ Without these details, a TPS number should be treated as anecdotal. ## Related Pages +- [Chaos Testing with Izanami](./chaos-testing.md) - [Torii endpoints](../../reference/torii-endpoints.md) - [Operate Iroha 3 via CLI](../../get-started/operate-iroha-2-via-cli.md) - [Peer configuration reference](../../reference/peer-config/params.md) diff --git a/src/guide/best-practices/application-development.md b/src/guide/best-practices/application-development.md new file mode 100644 index 000000000..f807e5c4b --- /dev/null +++ b/src/guide/best-practices/application-development.md @@ -0,0 +1,79 @@ +# Application Development + +Iroha applications should make transaction behavior explicit, keep signing +state contained, and use queries and events in ways that are easy to +observe in production. + +## Client Setup + +- Store client configuration outside application source code. Load the + chain ID, Torii URL, signing account, and transaction settings from + environment-specific config. +- Keep `client.toml` files separate for localnet, Taira, Minamoto, and + private networks. A copied testnet signer should never become a mainnet + signer. +- Set transaction lifetimes and status timeouts deliberately. A very short + lifetime can expire under normal network jitter, while a very long one + can make duplicate submissions harder to reason about. +- Use `nonce = true` only when repeated transactions should have distinct + hashes. For idempotent business operations, store and reuse an + application request ID so retries are traceable. + +See [Client Configuration](/guide/configure/client-configuration.md) for +the current TOML fields. + +## Transactions + +- Build transactions from typed SDK instructions where possible instead of + raw JSON or string-assembled payloads. +- Preflight important writes with read-only queries: account existence, + asset balances, permission state, fee asset availability, and target + object state. +- Record the transaction hash, authority account, instruction summary, and + expected state change before submitting. +- Treat `Rejected`, `Expired`, and timeout outcomes differently. A timeout + means the client did not observe a final status; it does not prove that + the network ignored the transaction. +- After a successful write, verify the resulting state with a query or + event checkpoint that matches the business operation. + +For transaction mechanics, see [Transactions](/blockchain/transactions.md). + +## Queries and Events + +- Use queries for current state and event streams for change notifications. + Avoid replacing event handling with repeated broad queries. +- Paginate broad iterable queries such as account, asset, and block + listings. +- Prefer narrow filters for subscriptions and triggers. Broad filters are + useful for diagnostics but can add unnecessary execution and client-side + processing. +- Keep read-only smoke checks separate from signed transaction tests so + endpoint availability is easier to diagnose. + +See [Queries](/blockchain/queries.md), [Events](/blockchain/events.md), and +[Filters](/blockchain/filters.md). + +## Agent-Assisted Development + +- Let agents inspect docs, SDK code, and read-only network state before + asking them to write transaction code. +- Keep live-network tests opt-in behind an environment flag such as + `TAIRA_LIVE=1`. +- Do not paste private keys, account recovery material, API tokens, or + forwarded auth headers into prompts. +- Require a transaction plan before any agent submits a live testnet + transaction. The plan should name the network, authority, instructions, + fee asset, preflight reads, expected result, and retry behavior. + +For the Taira MCP workflow, see +[Build on SORA 3: Taira and Minamoto](/get-started/sora-nexus-dataspaces.md#taira-mcp-for-agents). + +## SDK Hygiene + +- Pin SDK and binary versions together using the + [Compatibility Matrix](/reference/compatibility-matrix.md). +- Keep generated client code, snippets, and examples synchronized with the + upstream workspace rather than copying older Iroha 2 examples forward. +- Add unit tests for transaction-building code and integration tests for + the smallest read and write paths your application depends on. diff --git a/src/guide/best-practices/data-modeling.md b/src/guide/best-practices/data-modeling.md new file mode 100644 index 000000000..8ebe8459b --- /dev/null +++ b/src/guide/best-practices/data-modeling.md @@ -0,0 +1,75 @@ +# Data Modeling + +Ledger data should be modeled around ownership, transfer behavior, +permission boundaries, and query patterns. Choose the smallest on-chain +representation that can support auditability and deterministic execution. + +## Domains and Accounts + +- Use domains to represent administrative and policy boundaries. Keep + domain names stable because they appear in account and asset identifiers. +- Avoid overloading a single account with unrelated responsibilities. Use + separate accounts for users, services, triggers, operators, and fee + sponsors. +- Use canonical account and domain identifiers in config and tests. Iroha + names are case-sensitive after canonical parsing. +- Keep test and production identities visibly distinct in names, domains, + and configuration file paths. + +See [Domains](/blockchain/domains.md), [Accounts](/blockchain/accounts.md), +and [Naming](/reference/naming.md). + +## Assets and NFTs + +- Use numeric assets for fungible balances and transferable quantities. +- Use NFTs or domain-specific objects for uniquely owned records. +- Avoid encoding value-bearing state only in metadata. Assets and NFTs + provide lifecycle events, transfer semantics, and permission checks that + metadata does not. +- Define precision, supply policy, issuer responsibility, and burn/mint + authority before exposing an asset to applications. + +See [Assets](/blockchain/assets.md), [NFTs](/blockchain/nfts.md), and +[RWAs](/blockchain/rwas.md). + +## Metadata + +- Use metadata for compact attributes of ledger objects, such as labels, + integration IDs, policy flags, hashes, URIs, or content-addressed + references. +- Keep metadata keys stable and documented. Changing key names after + clients depend on them creates a migration problem. +- Do not store large documents, logs, private user data, or high-churn + application state directly in metadata. +- When metadata points to off-chain data, store a verifiable reference such + as a content hash, URI, SoraFS path, manifest reference, or compact + commitment. + +See +[Metadata and Ledger Storage Choices](/guide/configure/metadata-and-store-assets.md) +and [Metadata](/blockchain/metadata.md). + +## Permissions by Model + +- Design roles around business operations, not around implementation + conveniences. A role named after a job or service is easier to audit than + a role named after a broad technical capability. +- Scope permission tokens to the smallest object that satisfies the + workflow. +- Treat permissions for minting, burning, peer management, executor + changes, trigger management, and metadata mutation as high-impact + permissions. +- Add explicit revocation and rotation procedures for temporary + permissions. + +See [Permissions](/blockchain/permissions.md) and +[Permission Tokens](/reference/permissions.md). + +## Query Shape + +- Choose identifiers and metadata keys that support the queries your + application will need most often. +- Paginate broad result sets and avoid user interfaces that require + unrestricted ledger-wide scans for normal actions. +- Keep off-chain indexes reconstructible from ledger data and events + whenever they are used for critical application behavior. diff --git a/src/guide/best-practices/index.md b/src/guide/best-practices/index.md new file mode 100644 index 000000000..1e167b6c0 --- /dev/null +++ b/src/guide/best-practices/index.md @@ -0,0 +1,43 @@ +# Best Practices + +This section collects production-oriented guidance for Iroha applications +and networks. It is organized by the decision you need to make, not by the +feature that happens to implement it. + +Use it as a checklist before a shared testnet rehearsal, a production +launch, or a major client release. + +## Categories + +| Category | Focus | +| ------------------------------------------------------- | ------------------------------------------------------------------------------------------------------ | +| [Application Development](./application-development.md) | Client configuration, transaction submission, retries, events, queries, and agent-assisted development | +| [Data Modeling](./data-modeling.md) | Domains, accounts, assets, NFTs, metadata, off-chain data, and naming conventions | +| [Network Deployment](./network-deployment.md) | Genesis, topology, peer keys, Torii exposure, consensus settings, and environment separation | +| [Operations](./operations.md) | Observability, runbooks, backups, change management, capacity checks, and incident handling | +| [Security and Access](./security-and-access.md) | Secret handling, permissions, technical accounts, network access, and audit trails | +| [Release Readiness](./release-readiness.md) | Localnet, Taira, Minamoto, compatibility checks, live-network safeguards, and rollback planning | + +## Cross-Cutting Rules + +- Keep local development, shared testnet, and production configuration + separate. +- Treat genesis, peer topology, executor policy, and key material as + controlled deployment artifacts. +- Model durable ledger state intentionally. Do not use metadata as a + dumping ground for large, private, or high-churn data. +- Submit transactions through idempotent workflows that can handle + rejection, expiry, retries, and delayed status. +- Prefer narrow permissions, dedicated technical accounts, and explicit + operational runbooks over broad administrator access. +- Prove behavior on a disposable local network first, then rehearse on + Taira or another shared testnet before any mainnet operation. + +## Related References + +- [Configuration and Management](/guide/configure/overview.md) +- [Security](/guide/security/) +- [Performance and Metrics](/guide/advanced/metrics.md) +- [Compatibility Matrix](/reference/compatibility-matrix.md) +- [Torii Endpoints](/reference/torii-endpoints.md) +- [Permission Tokens](/reference/permissions.md) diff --git a/src/guide/best-practices/network-deployment.md b/src/guide/best-practices/network-deployment.md new file mode 100644 index 000000000..0e59ec427 --- /dev/null +++ b/src/guide/best-practices/network-deployment.md @@ -0,0 +1,74 @@ +# Network Deployment + +Treat an Iroha network as a coordinated system. Validators must agree on +genesis, topology, trusted peers, and consensus-relevant configuration +before the network can start and keep finalizing blocks. + +## Environment Separation + +- Maintain separate config bundles for local development, shared testnet, + staging, and production. +- Generate fresh keys for every non-disposable environment. Do not reuse + localnet or Taira key material in production. +- Keep peer config, client config, signed genesis, scripts, and deployment + notes together as a versioned release artifact. +- Store private keys outside repositories and deployment templates. + +See +[Keys for Network Deployment](/guide/configure/keys-for-network-deployment.md). + +## Genesis and Topology + +- Make every validator use the same signed genesis transaction, trusted + peer set, topology, and validator Proofs-of-Possession when the profile + requires them. +- Use at least four validators for a minimum Byzantine-fault-tolerant + deployment. +- Separate validators from observers in capacity planning. Observers do not + vote, propose, or collect, but they still consume storage, block sync, + and network bandwidth. +- Treat genesis, executor, and topology changes as coordinated migrations + rather than single-peer edits. + +See [Genesis](/reference/genesis.md), +[Peer Management](/guide/configure/peer-management.md), and +[Performance and Metrics](/guide/advanced/metrics.md#node-count-and-quorum). + +## Torii and Network Access + +- Put Torii behind a reverse proxy or firewall when it is exposed outside + the host or private network. +- Terminate TLS and apply basic authentication, rate limiting, and + request-size controls at the edge when the deployment requires them. +- Publish only the endpoints needed by the environment. Operator and + telemetry routes should be more restricted than public read-only routes. +- Bind listener addresses to host-local interfaces when peers should not + accept remote traffic directly. + +See [Torii Endpoints](/reference/torii-endpoints.md) and +[Virtual Private Networks](/guide/security/vpn.md). + +## Consensus and Capacity + +- Measure the deployment before tuning consensus timers. Lower timeouts can + reduce latency only while network, storage, and execution layers keep up. +- Watch queue direction, not just short samples of throughput. A queue that + grows during steady load means the network is overloaded. +- Record effective Sumeragi parameters, telemetry profile, validator count, + network RTT, workload shape, and hardware details for each benchmark. +- Increase collector fanout only after comparing latency, traffic, and + backpressure signals. + +See [Performance and Metrics](/guide/advanced/metrics.md). + +## Bare-Metal and Process Management + +- Keep each peer's `config.toml`, private key, storage directory, and ports + separate. +- Use process managers such as systemd with explicit restart, logging, and + resource policies. +- Preserve generated README and start commands from Kagami localnet bundles + when translating a test topology to managed hosts. + +See +[Running Iroha on Bare Metal](/guide/advanced/running-iroha-on-bare-metal.md). diff --git a/src/guide/best-practices/operations.md b/src/guide/best-practices/operations.md new file mode 100644 index 000000000..6ac02469d --- /dev/null +++ b/src/guide/best-practices/operations.md @@ -0,0 +1,67 @@ +# Operations + +Operational readiness means that the network can be observed, changed, +backed up, and recovered without relying on improvised access to validator +hosts. + +## Observability + +- Enable telemetry profiles intentionally. Use `extended` when `/metrics` + is needed and `full` during test runs that need detailed Sumeragi + operator routes. +- Dashboard accepted throughput, rejected throughput, commit latency, queue + depth, queue saturation, view changes, dropped consensus messages, and + storage pressure. +- Keep status snapshots, metrics scrapes, logs, and deployment + configuration in the same incident or benchmark artifact set. +- Alert on sustained queue growth, unexpected rejection spikes, stalled + block height, view-change churn, and peer health changes. + +See [Performance and Metrics](/guide/advanced/metrics.md). + +## Runbooks + +- Write runbooks for peer restart, Torii degradation, key compromise, + permission mistakes, fee sponsor depletion, stuck queues, and network + partition symptoms. +- Include exact read-only checks before write operations, especially for + peer registration, permission grants, and parameter changes. +- Keep emergency contacts and escalation rules outside the docs repo if + they include private operational data. +- Review runbooks after every incident, rehearsal, or major upgrade. + +See [Operational Security](/guide/security/operational-security.md). + +## Backups and Recovery + +- Back up peer storage according to the recovery point required by the + deployment. Validate restores on non-production hosts. +- Keep signed genesis, release metadata, peer config, and key custody + records recoverable even if a validator host is unavailable. +- Document whether a recovery procedure rebuilds from genesis, restores + from a snapshot, or replaces a failed peer with a new identity. +- Never test restore procedures for the first time during a production + incident. + +## Change Management + +- Treat on-chain configuration changes as transactions that require review, + preflight reads, authorization, and post-change verification. +- Roll out peer binary upgrades with a compatibility plan and a rollback + decision point. +- Avoid changing peer topology, consensus timing, and application workload + in the same maintenance window unless the migration plan requires it. +- Record the transaction hashes and block heights for operational changes. + +See [Hot Reload](/guide/advanced/hot-reload.md) and +[Compatibility Matrix](/reference/compatibility-matrix.md). + +## Capacity Reviews + +- Re-run load checks when validator count, hardware, network placement, + workload mix, or consensus parameters change. +- Measure warm-up, steady state, and expected peak load rather than relying + on a short best-case throughput sample. +- Compare accepted throughput with committed throughput and queue depth. If + submitted TPS exceeds committed TPS and queues grow, the network is past + its sustainable envelope. diff --git a/src/guide/best-practices/release-readiness.md b/src/guide/best-practices/release-readiness.md new file mode 100644 index 000000000..eb7e7fa89 --- /dev/null +++ b/src/guide/best-practices/release-readiness.md @@ -0,0 +1,67 @@ +# Release Readiness + +Before promoting an Iroha application or network change, prove the behavior +in the smallest environment that can expose the relevant risk, then move +through shared testnet and production gates deliberately. + +## Localnet Gate + +- Launch a disposable local network with the same Iroha track and the + closest practical validator count. +- Run unit tests for transaction builders, query parsing, rejection + handling, and config loading. +- Exercise the smallest successful read and write paths through the same + SDK or CLI shape the application will use later. +- Capture expected transaction hashes, statuses, events, and state reads in + test artifacts. + +See [Launch Iroha 3](/get-started/launch-iroha-2.md) and +[SDK Tutorials](/guide/tutorials/). + +## Shared Testnet Gate + +- Use Taira or another shared testnet for endpoint behavior, fees, account + funding, latency, and operational rehearsals. +- Keep live testnet writes opt-in so ordinary test runs do not depend on + network availability or spend testnet funds. +- Verify signer funding, fee asset metadata, authority permissions, and + expected state before submitting each live test transaction. +- Wait for a terminal status, then verify the resulting state with a + read-only query. + +See +[Build on SORA 3: Taira and Minamoto](/get-started/sora-nexus-dataspaces.md). + +## Mainnet or Production Gate + +- Use separate production signers, funding, domains, and config paths. Do + not promote testnet keys or faucet assumptions. +- Confirm SDK, CLI, peer, and network compatibility with the + [Compatibility Matrix](/reference/compatibility-matrix.md). +- Review permissions, fee sponsorship, rate limits, monitoring, backup + status, and rollback criteria before the release window. +- Require a written transaction or migration plan for high-impact writes. + +## Rollback and Recovery + +- Define which changes can be rolled back by code deploy, which require an + on-chain transaction, and which cannot be undone directly. +- For on-chain data changes, prepare compensating transactions or migration + scripts before the first production write. +- For network changes, keep the previous binary, config bundle, signed + genesis, and operational runbook available during the release. +- Set a decision point for aborting the rollout based on objective signals + such as rejection rate, queue growth, latency, or peer health. + +## Final Checklist + +- Configuration is environment-specific and does not contain test-only + secrets. +- Transaction retry behavior is idempotent or explicitly bounded. +- The application can distinguish rejection, expiry, timeout, and endpoint + availability failures. +- Monitoring covers throughput, latency, queue depth, rejections, view + changes, and relevant business events. +- Operators have runbooks for expected failure modes. +- Security review covered key custody, permissions, network exposure, and + automation authority. diff --git a/src/guide/best-practices/security-and-access.md b/src/guide/best-practices/security-and-access.md new file mode 100644 index 000000000..abc63e561 --- /dev/null +++ b/src/guide/best-practices/security-and-access.md @@ -0,0 +1,74 @@ +# Security and Access + +Security practice in Iroha should be based on narrow authority, controlled +key custody, explicit network exposure, and auditable changes. + +## Key Custody + +- Generate production keys with production-grade entropy and store private + keys outside repositories, issue trackers, prompts, chat logs, and CI + output. +- Use separate key material for clients, peers, genesis signing, + validators, fee sponsors, and technical accounts. +- Rotate keys according to a written process and rehearse recovery before a + live incident. +- Use hardware-backed or operating-system-backed storage for high-value + signing keys when the deployment risk justifies it. + +See +[Generating Cryptographic Keys](/guide/security/generating-cryptographic-keys.md) +and +[Storing Cryptographic Keys](/guide/security/storing-cryptographic-keys.md). + +## Permissions + +- Grant the smallest permission token or role that supports the workflow. +- Prefer dedicated technical accounts for services, triggers, agents, and + automation. Avoid running long-lived automation through a personal + operator account. +- Review permissions for peer management, metadata mutation, minting, + burning, trigger registration, executor changes, and SORA/Nexus + governance before production launch. +- Revoke temporary permissions after the maintenance window or migration + that required them. + +See [Permissions](/blockchain/permissions.md) and +[Permission Tokens](/reference/permissions.md). + +## Network Exposure + +- Restrict peer-to-peer, Torii, telemetry, and operator routes according to + the environment. Public read access does not imply public write or + operator access. +- Use VPNs, firewalls, reverse proxies, TLS termination, and rate limits + where appropriate for the deployment. +- Keep basic-auth credentials, proxy tokens, and forwarded headers out of + committed config. +- Test that unauthorized clients cannot reach restricted routes. + +See [Virtual Private Networks](/guide/security/vpn.md) and +[Torii Endpoints](/reference/torii-endpoints.md). + +## Fraud and Abuse Monitoring + +- Monitor ledger events and operational signals for unexpected asset + movement, permission grants, trigger changes, peer changes, and repeated + rejected transactions. +- Preserve evidence with transaction hashes, block heights, event records, + logs, and status snapshots. +- Route alerts to the security, operations, and business owners responsible + for the affected assets or workflows. + +See [Fraud Monitoring](/guide/security/fraud-monitoring.md). + +## Agent and Automation Guardrails + +- Start automation with read-only permissions and add write authority only + after the workflow is reviewed. +- Require explicit human approval for live-network mutations unless the + automation is a deliberately deployed production service. +- Do not expose private keys to agent prompts. Use local code that loads + secrets from environment variables, keychains, hardware signers, or + ignored config files. +- Log automation decisions in a way that supports audits without leaking + secret material. diff --git a/src/guide/configure/metadata-and-store-assets.md b/src/guide/configure/metadata-and-store-assets.md index c7119bc88..f44e33dbd 100644 --- a/src/guide/configure/metadata-and-store-assets.md +++ b/src/guide/configure/metadata-and-store-assets.md @@ -1,34 +1,37 @@ # Metadata and Ledger Storage Choices -Older Iroha documentation described a separate `Store` asset type for arbitrary -key-value data. The current data model does not use that asset type. Use the -following options instead. +Older Iroha documentation described a separate `Store` asset type for +arbitrary key-value data. The current data model does not use that asset +type. Use the following options instead. ## Metadata -Use [metadata](/blockchain/metadata.md) for small JSON fields that belong to a -ledger object: +Use [metadata](/blockchain/metadata.md) for small JSON fields that belong +to a ledger object: - display names and labels - integration IDs - small policy flags - hashes, URIs, CIDs, or SoraFS paths that point to larger payloads -Metadata is part of world state and is returned with the object that owns it. -Keep keys stable, values compact, and permissions explicit. Do not store large -documents, logs, or high-churn application state directly in metadata. +Metadata is part of world state and is returned with the object that owns +it. Keep keys stable, values compact, and permissions explicit. Do not +store large documents, logs, or high-churn application state directly in +metadata. ## Numeric Assets and NFTs -Use assets when the state is value-bearing: +Use [assets](/blockchain/assets.md) and [NFTs](/blockchain/nfts.md) when +the state is value-bearing: - numeric assets for fungible balances - NFTs for uniquely owned records -- RWA and other domain-specific objects when the active data model exposes them +- [RWAs](/blockchain/rwas.md) and other domain-specific objects when the + active data model exposes them -Assets and NFTs have their own IDs, lifecycle events, transfer behavior, and -permission checks. They are better than metadata when ownership, scarcity, -or transfer history matters. +Assets and NFTs have their own IDs, lifecycle events, transfer behavior, +and permission checks. They are better than metadata when ownership, +scarcity, or transfer history matters. ## Off-Chain Data @@ -40,8 +43,8 @@ reference on-chain, such as: - a SoraFS path or manifest reference - a compact commitment used by an application proof -This keeps the WSV small while still allowing applications to verify that the -off-chain payload matches the on-chain reference. +This keeps the WSV small while still allowing applications to verify that +the off-chain payload matches the on-chain reference. ## Choosing a Location @@ -50,7 +53,8 @@ Use this rule of thumb: - If it is a compact attribute of a ledger object, use metadata. - If it is value-bearing or transferable, model it as an asset, NFT, or domain-specific object. -- If it is large, high-churn, or application-private, store it outside the WSV - and put a verifiable reference on-chain. +- If it is large, high-churn, or application-private, store it outside the + WSV and put a verifiable reference on-chain. -For metadata permissions, see [Permission Tokens](/reference/permissions.md). +For metadata permissions, see +[Permission Tokens](/reference/permissions.md). diff --git a/src/guide/index.md b/src/guide/index.md new file mode 100644 index 000000000..924559920 --- /dev/null +++ b/src/guide/index.md @@ -0,0 +1,34 @@ +# Guide + +Use this section when you are building, operating, or integrating with +Iroha. Start with the SDK tutorials for a first client, then move to the +best practices and operator references before deploying against a shared +network. + +## Sections + +| Section | Use it for | +| ------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------- | +| [SDK Tutorials](/guide/tutorials/) | Language-specific client setup and sample applications | +| [Best Practices](/guide/best-practices/) | Production-oriented guidance grouped by development, data modeling, deployment, operations, security, and release readiness | +| [Configuration and Management](/guide/configure/overview.md) | Local peer configuration, genesis, client configuration, keys, and peer management | +| [Security](/guide/security/) | Key handling, operational security, VPNs, fraud monitoring, and permission hygiene | +| [Advanced Operations](/guide/advanced/metrics.md) | Metrics, performance checks, chaos testing, hot reload, and bare-metal operation | + +## Recommended Path + +1. [Install Iroha 3](/get-started/install-iroha-2.md) and + [launch a local network](/get-started/launch-iroha-2.md). +2. Pick an [SDK tutorial](/guide/tutorials/) and submit a small + transaction. +3. Review + [Application Development](/guide/best-practices/application-development.md) + and [Data Modeling](/guide/best-practices/data-modeling.md) before + shaping an application API. +4. Use [Network Deployment](/guide/best-practices/network-deployment.md), + [Operations](/guide/best-practices/operations.md), and + [Security and Access](/guide/best-practices/security-and-access.md) + before running a shared or production network. +5. Follow [Release Readiness](/guide/best-practices/release-readiness.md) + when promoting from local development to Taira, Minamoto, or another + live deployment. diff --git a/src/guide/security/fraud-monitoring.md b/src/guide/security/fraud-monitoring.md new file mode 100644 index 000000000..e523a5afd --- /dev/null +++ b/src/guide/security/fraud-monitoring.md @@ -0,0 +1,134 @@ +# Fraud Monitoring + +Fraud monitoring for an Iroha deployment is an operational control built around +ledger events, queries, permissions, and application context. Iroha records what +was submitted, accepted, rejected, and committed. Your monitoring system decides +which patterns are suspicious for your business process and routes those cases +to reviewers or automated response controls. + +Treat fraud monitoring as a separate service rather than logic embedded in a +validator. The service should subscribe to ledger activity, enrich it with +off-chain risk context, persist evidence, and submit response transactions only +through accounts that have explicit permissions. + +## Monitoring Model + +A useful monitoring pipeline has four stages: + +1. **Collect** ledger and operator signals from Torii event streams, queries, + and metrics. +2. **Enrich** events with off-chain context such as customer status, + counterparty lists, application session identifiers, expected limits, and + case IDs. +3. **Detect** suspicious behavior with deterministic rules, reviewer queues, or + risk scoring. +4. **Respond** by alerting operators, pausing application-side workflows, + revoking unnecessary permissions, or submitting compensating transactions + when your governance process allows it. + +Keep policy decisions outside consensus unless every validator must replay the +same decision. Runtime validation should enforce permissions and transaction +validity. Fraud monitoring should explain risk, preserve evidence, and help +operators act quickly. + +## Signals to Collect + +Start with narrow subscriptions and add broader streams only for investigation: + +| Signal | Source | Use | +| --- | --- | --- | +| Transaction status | Pipeline events | Detect repeated rejections, failed authorization attempts, and unusual submission patterns | +| Account lifecycle and metadata | Data events and account queries | Detect new accounts, alias changes, identity updates, and unexpected metadata edits | +| Asset balances and transfers | Asset data events and asset queries | Detect high-value movement, rapid fan-out, balance drains, and unusual counterparties | +| Roles and permissions | Role and permission queries, role data events | Detect privilege escalation, emergency grants, and stale high-risk access | +| Trigger and contract changes | Trigger, contract, and executor events | Detect new automation, changed execution paths, and suspicious upgrade activity | +| Configuration and peer changes | Configuration and peer events | Detect governance changes that affect validation, networking, or operator visibility | +| Operator health | `/metrics` and Sumeragi status routes | Separate suspicious user behavior from node overload, queue pressure, or network faults | + +Use [event filters](/blockchain/filters.md) to avoid processing the entire event +stream when a rule only needs accounts, assets, roles, or configuration changes. +For periodic reconciliation, combine the stream with paginated +[queries](/blockchain/queries.md) so the monitor can recover after downtime. + +## Detection Rules + +Common rule families include: + +| Rule family | Example condition | Typical response | +| --- | --- | --- | +| Velocity | An account transfers more than the expected amount or count within a short window | Alert reviewers and pause application-side withdrawals for that account | +| Fan-out | Funds move from one account to many newly seen accounts | Require manual approval before allowing additional transfers | +| Balance drain | A large share of an account balance leaves shortly after a key, alias, or metadata change | Escalate as possible account takeover | +| Privilege escalation | A high-risk permission or role is granted outside a change window | Alert operators and review the grant transaction | +| Rejection burst | One signer or client produces repeated rejected transactions | Check for credential abuse, integration errors, or probing | +| Automation change | A trigger, contract, or executor-related object changes unexpectedly | Pause dependent workflows until the change is reviewed | +| Governance-sensitive change | Peer, configuration, or runtime state changes occur without an approved ticket | Compare against the governance record and incident process | + +Rules should be explicit about the evidence they require, the time window they +evaluate, the action they take, and the person or system that can close the +case. Thresholds that depend on customer risk, asset type, or jurisdiction +belong in your monitoring service configuration, not in ad hoc scripts. + +## Response Controls + +Design response actions before enabling alerts. A high-severity fraud case +should have a documented path from detection to containment: + +- notify the security, operations, and business owners responsible for the + affected domain or asset definition +- preserve the event cursor, block hash, transaction hash, authority, payload, + and query snapshot used by the detection rule +- pause application-side actions that are outside the ledger, such as checkout, + withdrawal, signing, bridge, or settlement workflows +- revoke roles or permissions that are no longer justified by the incident + response plan +- submit follow-up ledger transactions only when the active governance policy + and permission model allow them +- rotate keys when the evidence suggests signer compromise + +Avoid giving the monitoring service broad write access. Use a dedicated +technical account with the smallest set of permissions required for the response +actions it is allowed to perform. Human approval should remain part of any +workflow that can move assets, change permissions, or alter validator-facing +configuration. + +## Evidence and Retention + +Store monitoring evidence in an append-only system that is separate from the +validator data directory. Each alert should include: + +- event stream name and cursor +- block height or block hash when available +- transaction hash and authority +- affected account, domain, asset, role, trigger, or configuration ID +- raw event payload or a canonical hash of it +- query snapshots used to enrich the alert +- rule name, version, threshold, score, and reviewer decision + +Do not store sensitive investigation notes as public ledger metadata unless the +network's data governance policy explicitly allows it. If you need to link an +off-chain case to on-chain state, prefer a case identifier, signed attestation, +or hash commitment that does not expose private details. + +## Implementation Checklist + +- Enable the telemetry profile needed for `/metrics` and operator routes. +- Subscribe to Torii event streams with narrow filters for the objects you + monitor. +- Persist event cursors so the monitor can resume without gaps. +- Reconcile streams with paginated queries on a regular schedule. +- Keep risk thresholds and allow lists in version-controlled configuration. +- Test alert rules against historical blocks before enabling automated actions. +- Use dedicated technical accounts for response actions. +- Review role and permission grants on a recurring schedule. +- Include fraud-monitoring alerts in the incident response process. + +## Related Pages + +- [Events](/blockchain/events.md) +- [Filters](/blockchain/filters.md) +- [Queries](/blockchain/queries.md) +- [Permissions](/blockchain/permissions.md) +- [Performance and Metrics](/guide/advanced/metrics.md) +- [Torii endpoints](/reference/torii-endpoints.md) +- [Operational Security](/guide/security/operational-security.md) diff --git a/src/guide/security/index.md b/src/guide/security/index.md index 9e7fbe13b..3b8895d1c 100644 --- a/src/guide/security/index.md +++ b/src/guide/security/index.md @@ -10,10 +10,18 @@ In this section you can learn about various aspects of securing your Iroha netwo The core security principles that individuals and organizations can adopt to protect their data and decrease the chance of a breach and/or leak. +- [Virtual Private Networks](./vpn.md): + + How to use a VPN to restrict peer-to-peer, Torii, and operator access in private or consortium deployments. + - [Operational Security](./operational-security.md): Best practices for securing the day-to-day operations of your network, including access controls, monitoring, incident responses, the use of browsers, etc. +- [Fraud Monitoring](./fraud-monitoring.md): + + How to use ledger events, queries, permissions, and operational signals to detect suspicious activity and preserve response evidence. + - [Password Security](./password-security.md): A deep-dive into password entropy, creating strong passwords and avoiding password vulnerabilities. diff --git a/src/guide/security/security-principles.md b/src/guide/security/security-principles.md index 8452cced9..e2fca7d73 100644 --- a/src/guide/security/security-principles.md +++ b/src/guide/security/security-principles.md @@ -4,7 +4,7 @@ Organisations and individual users need to work together to ensure secure intera ## General Security Principles -1. Use a Virtual Private Network (VPN): +1. Use a [Virtual Private Network](./vpn.md) (VPN): - Whenever accessing sensitive data or resources, especially over public networks, use a VPN to establish a secure connection that safeguards your information. diff --git a/src/guide/security/vpn.md b/src/guide/security/vpn.md new file mode 100644 index 000000000..cb7a90a81 --- /dev/null +++ b/src/guide/security/vpn.md @@ -0,0 +1,120 @@ +# Virtual Private Networks + +A VPN is a network control that +limits who can reach Iroha services. It is most useful for private and +consortium deployments where validators, application backends, and operators +should communicate over private addresses instead of open internet routes. + +A VPN does not replace Iroha peer keys, account keys, permissions, firewall +rules, monitoring, or secure key storage. Treat it as one layer in the +deployment boundary: the VPN narrows network reachability, while Iroha +configuration and governance decide which peers and accounts are trusted. + +## When to Use a VPN + +Use a VPN when: + +- validators are operated by different organizations or in different hosting + environments +- Torii should only be reachable by application backends, operators, or trusted + clients +- metrics, logs, SSH, or other administration endpoints must stay on a private + operator network +- a test or staging network should resemble production access controls without + exposing public endpoints + +A VPN is not required for every deployment. Public networks may intentionally +expose Torii through a public gateway, load balancer, or reverse proxy. Even in +that case, keep validator peer-to-peer traffic and administration endpoints on a +restricted network whenever possible. + +::: tip + +A browser VPN only protects traffic from that browser. It does not protect +`irohad`, CLI, SDK, SSH, metrics, or backup traffic unless those processes are +routed through the same private network. + +::: + +## Deployment Pattern + +For a private validator mesh, give every validator a stable VPN address or +private DNS name. Configure peers so their advertised peer-to-peer addresses are +reachable from the other validators over that network: + +```toml +trusted_peers = [ + "PUBLIC_KEY_1@10.20.0.11:1337", + "PUBLIC_KEY_2@10.20.0.12:1337", + "PUBLIC_KEY_3@10.20.0.13:1337", + "PUBLIC_KEY_4@10.20.0.14:1337", +] + +[network] +address = "10.20.0.11:1337" +public_address = "10.20.0.11:1337" + +[torii] +address = "10.20.0.11:8080" +``` + +Use the address assigned to the current peer in `network.address` and +`network.public_address`. Each peer should list the same trusted peer identities, +but with addresses that are reachable from its own VPN route table. + +Client and CLI configurations should point at a Torii endpoint reachable through +the VPN or through a controlled internal gateway: + +```toml +torii_url = "http://10.20.0.11:8080" +``` + +If Torii must be available outside the VPN, put it behind a reverse proxy or +load balancer that provides TLS, authentication, rate limiting, and logging. +Avoid exposing raw peer-to-peer ports or administration endpoints directly to the +public internet. + +## Firewall Rules + +Use host and cloud firewall rules even when a VPN is present: + +| Service | Recommended access | +| --- | --- | +| Peer-to-peer port | Other validator VPN addresses only | +| Torii | Application backends, operators, or trusted client VPN ranges | +| Metrics and health checks | Monitoring systems on the operator network | +| SSH and administration | Bastion host, privileged operator VPN range, or break-glass process | +| Backups and storage replication | Backup systems on a private network | + +Default-deny rules are easier to audit than broad allow rules. When a new peer +joins the network, update the VPN membership, firewall allow list, and Iroha +trusted peer configuration as one coordinated change. + +## Operational Checklist + +- Choose an audited and actively maintained VPN implementation, such as + WireGuard, IPsec, or an organization-approved managed private network. +- Use unique VPN credentials for each host and operator. Do not share VPN keys + between validators. +- Keep VPN credentials separate from Iroha private keys and genesis signing + material. +- Monitor VPN latency, packet loss, reconnects, and route changes. Consensus is + sensitive to sustained network instability. +- Test the effective MTU. Packet fragmentation can look like intermittent peer + or Torii failures. +- Document which VPN ranges are allowed to reach peer-to-peer, Torii, metrics, + SSH, and backup endpoints. +- Rotate VPN credentials when a host, operator account, or organization leaves + the network. +- Avoid a single VPN gateway as the only route between validators. Plan + redundant gateways or site-to-site routes for production networks. +- Include VPN failures in incident response drills so operators know when to + distinguish a network partition from an Iroha process failure. + +## Related Pages + +- [Security Principles](/guide/security/security-principles.md) +- [Operational Security](/guide/security/operational-security.md) +- [Keys for Network Deployment](/guide/configure/keys-for-network-deployment.md) +- [Peer Management](/guide/configure/peer-management.md) +- [Peer Configuration Reference](/reference/peer-config/index.md) diff --git a/src/guide/tutorials/index.md b/src/guide/tutorials/index.md index 921aaf36b..912bf0dae 100644 --- a/src/guide/tutorials/index.md +++ b/src/guide/tutorials/index.md @@ -19,12 +19,17 @@ from the upstream repository. complete client application reference. 5. Use [Embed Kaigi](/guide/tutorials/kaigi.md) when you want to add wallet-backed audio/video meetings to your own app. +6. Use [Musubi packages](/guide/tutorials/musubi.md) when you need reusable + Kotodama source libraries with pinned on-chain registry dependencies. ## Sample Apps We maintain sample applications for JavaScript desktop, Android, and iOS client -flows. The JavaScript demo is the most complete current reference; the mobile -demos are useful for point-app layout and historical context. +flows. The JavaScript demo is the most complete external reference. Swift/iOS +examples exist in the upstream workspace under `examples/ios/`, but their +checked-in project manifests are currently out of sync with the package API and +dependency layout. The external mobile point demos are useful mostly for layout +and historical context. - [Sample apps overview](/guide/tutorials/sample-apps.md) - [Embed Kaigi in a JavaScript app](/guide/tutorials/kaigi.md) @@ -38,6 +43,7 @@ All SDK pages here are derived from the current upstream workspace: - `javascript/iroha_js` - `java/iroha_android` - `IrohaSwift` +- `crates/musubi` When in doubt, prefer the README and package metadata in those directories over older Iroha 2-era examples. diff --git a/src/guide/tutorials/javascript.md b/src/guide/tutorials/javascript.md index b768d057d..c20899bcb 100644 --- a/src/guide/tutorials/javascript.md +++ b/src/guide/tutorials/javascript.md @@ -2,7 +2,7 @@ The current JavaScript SDK is published as `@iroha/iroha-js`. It is the Node.js-first SDK for Torii, Norito builders, signing, pagination, Connect -previews, and offline-envelope workflows. +previews, and offline readiness plus QR stream workflows. ## Install @@ -44,13 +44,56 @@ const keys = generateKeyPair(); console.log(keys.publicKey); ``` +## Try Taira Read-Only + +Use built-in `fetch` in Node.js 18+ to probe Taira before adding signing and +Norito transaction code: + +```js +const root = "https://taira.sora.org"; + +const status = await fetch(`${root}/status`).then((res) => res.json()); +console.log({ + blocks: status.blocks, + queueSize: status.queue_size, + peers: status.peers, +}); + +const domains = await fetch(`${root}/v1/domains?limit=5`).then((res) => + res.json(), +); +console.log(domains.items.map((domain) => domain.id)); + +const assets = await fetch(`${root}/v1/assets/definitions?limit=5`).then((res) => + res.json(), +); +for (const asset of assets.items) { + console.log(asset.id, asset.name, asset.total_quantity); +} +``` + +Save it as `taira-readonly.mjs`, then run it: + +```bash +node taira-readonly.mjs +``` + +Move to signed SDK calls only after these read-only checks work. Public Taira +can temporarily return a saturated queue or gateway error, so keep live-network +tests opt-in in CI. + Useful subpath imports: ```js import { ToriiClient } from "@iroha/iroha-js/torii"; import { noritoEncodeInstruction } from "@iroha/iroha-js/norito"; import { generateKeyPair } from "@iroha/iroha-js/crypto"; -import { buildOfflineEnvelope } from "@iroha/iroha-js/offline"; +``` + +Offline QR stream helpers are exported from the package root: + +```js +import { OfflineQrStream } from "@iroha/iroha-js"; ``` For browser-only Connect bootstrap, use `@iroha/iroha-js/connect-browser` @@ -65,7 +108,7 @@ The SDK focuses on: - Ed25519 signing and key generation - pagination and retry helpers - Connect browser bootstrap helpers -- offline envelope tooling +- Offline V2 readiness and QR stream tooling ## Upstream References diff --git a/src/guide/tutorials/kaigi.md b/src/guide/tutorials/kaigi.md index 3c9afbcbb..1f57826ac 100644 --- a/src/guide/tutorials/kaigi.md +++ b/src/guide/tutorials/kaigi.md @@ -39,8 +39,9 @@ npm install npm run dev ``` -The demo loads `@iroha/iroha-js` from `../iroha/javascript/iroha_js`. If the -native binding changes, rebuild it: +Use the demo with +[`@iroha/iroha-js`](https://github.com/hyperledger-iroha/iroha/tree/i23-features/javascript/iroha_js) +from the Iroha `i23-features` branch. If the native binding changes, rebuild it: ```bash (cd node_modules/@iroha/iroha-js && npm run build:native) diff --git a/src/guide/tutorials/kotlin-java.md b/src/guide/tutorials/kotlin-java.md index 131416014..a1c9fc5f5 100644 --- a/src/guide/tutorials/kotlin-java.md +++ b/src/guide/tutorials/kotlin-java.md @@ -17,10 +17,13 @@ repositories { dependencies { implementation("org.hyperledger.iroha:iroha-android:") - implementation("org.hyperledger.iroha:iroha-android-jvm:") } ``` +The checked-in Android and JVM publication scripts currently use the +`iroha-android` artifact ID. There is no separate `iroha-android-jvm` artifact +ID in the source build. + ## Local Sample Build ```bash @@ -44,6 +47,43 @@ System.out.println(formats.i105); System.out.println(formats.i105Warning); ``` +## Try Taira Read-Only + +For a plain JVM smoke test, use Java's built-in HTTP client before adding SDK +transaction signing: + +```java +import java.net.URI; +import java.net.http.HttpClient; +import java.net.http.HttpRequest; +import java.net.http.HttpResponse; + +public class TairaProbe { + public static void main(String[] args) throws Exception { + var client = HttpClient.newHttpClient(); + var request = HttpRequest.newBuilder() + .uri(URI.create("https://taira.sora.org/status")) + .GET() + .build(); + + var response = client.send(request, HttpResponse.BodyHandlers.ofString()); + System.out.println(response.statusCode()); + System.out.println(response.body()); + } +} +``` + +Save it as `TairaProbe.java`, then run it with JDK 11 or newer: + +```bash +javac TairaProbe.java +java TairaProbe +``` + +Extend the same pattern to read `https://taira.sora.org/v1/domains?limit=5` or +`https://taira.sora.org/v1/assets/definitions?limit=5`. Use the Android SDK +for key handling and signed transactions after the read-only route is reachable. + ## Current Coverage The Android/JVM SDK currently focuses on: diff --git a/src/guide/tutorials/musubi.md b/src/guide/tutorials/musubi.md new file mode 100644 index 000000000..5cf5c8921 --- /dev/null +++ b/src/guide/tutorials/musubi.md @@ -0,0 +1,212 @@ +# Musubi Kotodama Packages + +Musubi is the package manager for Kotodama source packages. It gives +developers a Cargo-like workflow for sharing composable Kotodama functions +while keeping package identity tied to SORA and Iroha namespaces instead of +a global first-come name table. + +Use Musubi when you need to: + +- publish reusable Kotodama source libraries +- pin exact transitive source dependencies in `Musubi.lock` +- reconstruct dependency source from verified SoraFS archive commitments +- connect a package namespace to dapp contract aliases in the same + namespace +- inspect, publish, yank, or alias packages through the on-chain registry + +## Package Names + +Canonical package ids use: + +```text +namespace/package +``` + +Exact release references use: + +```text +namespace/package@version +``` + +There is no leading `@` before a namespace. The `@` separator is reserved +for the version suffix. + +The namespace segment matches the suffix used by Kotodama dapp contract +aliases: + +| Package id | Related contract alias shape | +| ------------------------- | ---------------------------- | +| `universal/math` | `router::universal` | +| `dex.universal/swap-core` | `router::dex.universal` | + +Namespaces have either `` or `.` form. When a +package has a dapp link, Musubi checks that every linked contract alias +uses the same namespace suffix as the package. + +## Manifest + +A package starts with `Musubi.toml`: + +```toml +[package] +namespace = "dex.universal" +name = "swap-core" +version = "0.1.0" + +[dependencies.math] +package = "std.universal/math" +version = "^1.0.0" + +[exports] +functions = ["quote"] + +[dapp] +namespace = "dex.universal" +contracts = ["router::dex.universal"] +``` + +Dependencies may use exact versions, caret requirements, tilde +requirements, wildcards such as `1.*`, or comparator lists such as +`>=1.0.0,<2.0.0`. + +`Musubi.lock` records the selected transitive graph from the on-chain +registry. Each locked node stores its canonical package ref, selected +requirement, SoraFS manifest digest, source archive hash, byte count, file +count, exported functions, deterministic source archive plan, and +dependency aliases. Short aliases are resolved before they enter the +lockfile. + +## Local Workflow + +From the upstream Iroha workspace root, run Musubi through Cargo: + +```bash +cargo run -p musubi -- init --namespace dex.universal --name swap-core --dapp +cargo run -p musubi -- add std.universal/math --version '^1.0.0' --alias math +cargo run -p musubi -- install --config client.toml +cargo run -p musubi -- build src/lib.ko --manifest-out target/lib.contract.json +cargo run -p musubi -- pack \ + --car-out source.car \ + --sorafs-manifest-out manifest.norito \ + --source-plan-out source-plan.norito +``` + +Use `install --offline` to write an unresolved lockfile for exact-version +dependencies without querying a node. Use `install --locked` in CI to +reject a stale lockfile. + +`build` links cached dependency sources by rewriting calls such as +`math::add()` to deterministic internal Kotodama function names. It rejects +calls to functions that the dependency did not export. Musubi v1 libraries +are function-only: dependency sources that contain state declarations, +triggers, kotoba blocks, constants, or other non-function contract items +are rejected. + +## Fetching Source Archives + +Musubi can fetch missing dependency sources while resolving or later +through the cache subcommands: + +```bash +cargo run -p musubi -- install --config client.toml --fetch \ + --provider-payload math.payload + +cargo run -p musubi -- cache import math --source-root ../math +cargo run -p musubi -- cache fetch math --provider-payload math.payload +``` + +Live gateway fetches use one or more SoraFS gateway provider specs: + +```bash +cargo run -p musubi -- install --config client.toml --fetch \ + --gateway-provider 'name=hot-a,provider-id=1111111111111111111111111111111111111111111111111111111111111111,base-url=https://gw.example,stream-token=BASE64,package=math' +``` + +Provider payload files and gateway providers are mutually exclusive for one +fetch operation. If more than one locked package is missing, scope every +gateway provider with `package=`, +`package=`, `package=`, or +`manifest=<64-hex SoraFS manifest digest>`. + +Gateway `base-url` and `privacy-url` values must use `https://` by default. +Local test gateways can use `http://localhost`, `http://127.0.0.1`, or +`http://[::1]` only with `--gateway-allow-insecure-localhost`. Stream +tokens are runtime credentials and are not written into `Musubi.lock`. + +## Publishing + +`pack` computes the deterministic BLAKE3-256 source archive hash plus the +source byte and file counts. When `--car-out`, `--sorafs-manifest-out`, or +`--source-plan-out` is supplied, it also builds the deterministic SoraFS +CAR payload, SoraFS manifest, and Musubi source archive plan from the same +source file set. + +Use a dry run before publishing: + +```bash +cargo run -p musubi -- publish --config client.toml --dry-run +``` + +Without `--dry-run`, `publish` writes default artifacts under +`.musubi/dist////`, optionally uploads the +manifest and payload through Torii's SoraFS storage-pin endpoint with +`--upload`, registers the generated SoraFS pin, and submits +`PublishMusubiRelease` through the configured Iroha client. + +Published releases must include: + +- a non-empty canonical source archive +- a deterministic source archive plan +- at least one exported Kotodama function +- dependency records that do not select yanked releases +- a dapp link, when present, whose contract aliases match the package + namespace + +## Registry Queries and Lifecycle + +Search and inspect the registry with: + +```bash +cargo run -p musubi -- search swap --config client.toml +cargo run -p musubi -- versions dex.universal/swap-core --config client.toml +cargo run -p musubi -- alias resolve swap --config client.toml +``` + +Yanking hides a release from new resolution, but keeps existing lockfiles +reproducible: + +```bash +cargo run -p musubi -- yank dex.universal/swap-core@0.1.0 \ + --reason "bad archive" \ + --config client.toml \ + --dry-run +``` + +Musubi avoids global name squatting by making `namespace/package` the +canonical package name. Publishing into a namespace must be authorized by +the same ownership or delegated permission model used for that Kotodama +dapp namespace. Curated global short aliases are separate from package +ownership: `SetMusubiShortAlias` requires the `CanSetMusubiShortAlias` +permission, and the target package must already have at least one active +release. + +## Iroha Surfaces + +Musubi uses first-class Iroha instructions and queries: + +| Surface | Purpose | +| ---------------------------- | -------------------------------------------------- | +| `PublishMusubiRelease` | Publish an immutable package release. | +| `YankMusubiRelease` | Mark an existing release as yanked. | +| `SetMusubiShortAlias` | Bind a curated global short alias to a package id. | +| `AssertMusubiReleaseExists` | Require a concrete package version to exist. | +| `FindMusubiReleaseByRef` | Fetch a release by exact package reference. | +| `FindMusubiPackageVersions` | List versions for a package id. | +| `FindMusubiPackageReleases` | List release summaries for a package id. | +| `SearchMusubiPackages` | Search package summaries by namespace and text. | +| `FindMusubiShortAliasByName` | Resolve a curated short alias. | + +Torii exposes the Musubi HTTP route family under `/v1/musubi/*`. +Agent-facing MCP tools are exposed as `iroha.musubi.*` aliases. See +[Torii endpoints](/reference/torii-endpoints.md) and +[query reference](/reference/queries.md) for the broader API map. diff --git a/src/guide/tutorials/python.md b/src/guide/tutorials/python.md index 257e7a41f..9fe2571ec 100644 --- a/src/guide/tutorials/python.md +++ b/src/guide/tutorials/python.md @@ -59,6 +59,7 @@ from iroha_python import ( client = create_torii_client("https://taira.sora.org") +# Public reads do not need an authority or private key. status = client.request_json("GET", "/status", expected_status=(200,)) accounts = client.list_accounts_typed(limit=5) @@ -91,9 +92,11 @@ TORII_URL = "https://taira.sora.org" CHAIN_ID = "taira" AUTH_TOKEN = None +# Replace these placeholders with the real signing keys for your accounts. alice_pair = Ed25519KeyPair.from_private_key(bytes.fromhex("")) bob_pair = Ed25519KeyPair.from_private_key(bytes.fromhex("")) +# The authority string must identify the same account as the private key. alice = "" bob = "" @@ -110,6 +113,7 @@ client = create_torii_client(TORII_URL, auth_token=AUTH_TOKEN) def submit(*instructions): + # This is the network boundary: build, sign, submit, and wait for status. return client.build_and_submit_transaction( chain_id=CHAIN_ID, authority=alice, @@ -136,6 +140,7 @@ Fee metadata belongs on the transaction, not on individual instructions. The ```python TX_METADATA = { + # Taira expects the fee asset definition in transaction metadata. "gas_asset_id": "6TEAJqbb8oEPmLncoNiMRbLEK6tw", } @@ -143,6 +148,7 @@ envelope, status = client.build_and_submit_transaction( chain_id=CHAIN_ID, authority=alice, private_key=alice_pair.private_key, + # Fee metadata is attached to the transaction, not the instruction. instructions=[Instruction.register_domain("wonderland")], metadata=TX_METADATA, wait=True, @@ -155,9 +161,11 @@ shape: ```python FEE_ASSET_DEFINITION = "6TEAJqbb8oEPmLncoNiMRbLEK6tw" +# The faucet returns the concrete account asset ID to check here. FEE_ASSET_ID = "" TX_METADATA = {"gas_asset_id": FEE_ASSET_DEFINITION} +# Fail before submitting if the signer cannot pay gas. fee_assets = client.list_account_assets_typed( alice, limit=10, @@ -175,6 +183,7 @@ when you build a transaction: ```python APP_METADATA = {"source": "python-docs"} +# Merge app metadata with required fee metadata before building the draft. metadata = {**TX_METADATA, **APP_METADATA} draft = TransactionDraft( @@ -197,13 +206,16 @@ These calls returned successfully against public Taira: ```python client = create_torii_client("https://taira.sora.org") +# Use raw requests for endpoints that do not need a typed wrapper. status = client.request_json("GET", "/status", expected_status=(200,)) parameters = client.request_json("GET", "/v1/parameters", expected_status=(200,)) +# Typed helpers parse pagination and records into dataclasses. accounts = client.list_accounts_typed(limit=1) domains = client.list_domains_typed(limit=1) definitions = client.query_asset_definitions_typed(limit=1) +# These calls inspect live node subsystems without mutating state. time_now = client.get_time_now_typed() time_status = client.get_time_status_typed() sumeragi = client.get_sumeragi_status_typed() @@ -251,6 +263,7 @@ the target domain. On a shared network such as Taira, use a domain and account namespace assigned to you. ```python +# Submit related registrations together when they share one authority. submit( Instruction.register_domain("wonderland", {"environment": "dev"}), Instruction.register_account(alice, {"display_name": "Alice"}), @@ -275,8 +288,13 @@ These calls use an existing asset ID. Register the asset definition first, then build the concrete asset ID for the account that owns the asset. ```python +# Increase the account's asset balance. submit(Instruction.mint_asset_numeric(ROSE_ASSET, "100.00")) + +# Move part of the balance to another account. submit(Instruction.transfer_asset_numeric(ROSE_ASSET, "25.50", bob)) + +# Decrease the remaining balance. submit(Instruction.burn_asset_numeric(ROSE_ASSET, "10.00")) ``` @@ -286,6 +304,7 @@ Ownership transfers change who controls the domain, asset definition, or NFT. Use the current owner as the transaction authority. ```python +# The first argument is the current owner; the last is the new owner. submit(Instruction.transfer_domain(alice, "wonderland", bob)) submit(Instruction.transfer_asset_definition(alice, ROSE_DEFINITION, bob)) submit(Instruction.transfer_nft(alice, BADGE_NFT, bob)) @@ -297,6 +316,7 @@ Metadata values must be JSON-serializable. When you use `TransactionDraft`, the authority in `TransactionConfig` becomes the default target account. ```python +# Values are encoded as JSON metadata under the target account. submit( Instruction.set_account_key_value( alice, @@ -305,6 +325,7 @@ submit( ) ) +# Removing the key deletes the metadata entry from the account. submit(Instruction.remove_account_key_value(alice, "profile")) ``` @@ -314,68 +335,137 @@ The high-level draft helper targets the transaction authority by default: draft = TransactionDraft( TransactionConfig(chain_id=CHAIN_ID, authority=alice, metadata=TX_METADATA) ) +# With a draft, account metadata methods default to the draft authority. draft.set_account_key_value("nickname", "Queen Alice") draft.remove_account_key_value("nickname") ``` ### Real-World Assets -RWA helpers use JSON-serializable payloads for asset-specific controls and -metadata: +RWA helpers use JSON-serializable payloads for asset-specific metadata, +provenance, and controller policy. `register_rwa` does not accept an `id` or +`owner`: the runtime generates the `RwaId`, and the transaction authority +becomes the initial owner. ```python draft = TransactionDraft( TransactionConfig(chain_id=CHAIN_ID, authority=alice, metadata=TX_METADATA) ) +# Register the lot in a domain. Store business identifiers in primary_reference +# or metadata, then query the generated RWA ID after the transaction commits. draft.register_rwa( { - "id": "warehouse-receipt-001#wonderland", - "owner": alice, + "domain": "commodities.universal", "quantity": "100", - "metadata": {"commodity": "copper", "warehouse": "DXB-01"}, + "spec": {"scale": 0}, + "primary_reference": "warehouse-receipt-001", + "status": "active", + "metadata": { + "commodity": "copper", + "warehouse": "DXB-01", + }, + "parents": [], + "controls": { + "controller_accounts": [alice], + "controller_roles": [], + "freeze_enabled": True, + "hold_enabled": True, + "force_transfer_enabled": True, + "redeem_enabled": True, + }, } ) +``` + +After the registration transaction commits, use `FindRwas`, `/v1/rwas`, an RWA +event, or the explorer route set to discover the generated ID: + +```python +page = client.list_rwas_typed(limit=20, offset=0) + +for lot in page.items: + print(lot.id) +``` + +Subsequent operations use the generated `hash$domain` ID: + +```python +registered_rwa_id = ( + "0123456789abcdef0123456789abcdef" + "0123456789abcdef0123456789abcdef$commodities.universal" +) + +draft = TransactionDraft( + TransactionConfig(chain_id=CHAIN_ID, authority=alice, metadata=TX_METADATA) +) + +# Transfer, hold, release, freeze, and redeem model the lot lifecycle. draft.transfer_rwa( - "warehouse-receipt-001#wonderland", + registered_rwa_id, quantity="10", destination=bob, ) -draft.hold_rwa("warehouse-receipt-001#wonderland", quantity="5") -draft.release_rwa("warehouse-receipt-001#wonderland", quantity="5") -draft.freeze_rwa("warehouse-receipt-001#wonderland") -draft.unfreeze_rwa("warehouse-receipt-001#wonderland") -draft.redeem_rwa("warehouse-receipt-001#wonderland", quantity="1") -draft.set_rwa_key_value("warehouse-receipt-001#wonderland", "auditor", "alice") -draft.remove_rwa_key_value("warehouse-receipt-001#wonderland", "auditor") +draft.hold_rwa(registered_rwa_id, quantity="5") +draft.release_rwa(registered_rwa_id, quantity="5") +draft.freeze_rwa(registered_rwa_id) +draft.unfreeze_rwa(registered_rwa_id) +draft.redeem_rwa(registered_rwa_id, quantity="1") + +# RWA metadata and controls are separate from account metadata. +draft.set_rwa_key_value(registered_rwa_id, "auditor", "alice") +draft.remove_rwa_key_value(registered_rwa_id, "auditor") draft.set_rwa_controls( - "warehouse-receipt-001#wonderland", - {"transfers": {"allow_list": [alice, bob]}}, + registered_rwa_id, + { + "controller_accounts": [alice], + "controller_roles": [], + "freeze_enabled": True, + "hold_enabled": True, + "force_transfer_enabled": True, + "redeem_enabled": True, + }, ) + +# Merge consumes quantities from parent lots with the same domain and spec. The +# child lot gets a generated ID. draft.merge_rwas( { - "sources": [ - "warehouse-receipt-001#wonderland", - "warehouse-receipt-002#wonderland", + "parents": [ + {"rwa": registered_rwa_id, "quantity": "40"}, + { + "rwa": "fedcba9876543210fedcba9876543210" + "fedcba9876543210fedcba9876543210$commodities.universal", + "quantity": "60", + }, ], - "destination": "warehouse-receipt-003#wonderland", + "primary_reference": "warehouse-receipt-003", + "status": "merged", + "metadata": {"merge_reason": "same custodian and quality grade"}, } ) + +# Force transfer requires a configured controller and force_transfer_enabled. draft.force_transfer_rwa( - "warehouse-receipt-001#wonderland", + registered_rwa_id, quantity="1", destination=bob, ) ``` +Full transfers can change `owned_by` on the existing lot. Partial transfers and +merges create generated child lots. + ### Triggers Use trigger registration helpers when the executable is another instruction sequence: ```python +# The trigger executable is just another instruction payload. reward = Instruction.mint_asset_numeric(ROSE_ASSET, "1") +# Time triggers run on a schedule once registered. register_hourly = Instruction.register_time_trigger( "hourly_reward", alice, @@ -387,6 +477,7 @@ register_hourly = Instruction.register_time_trigger( ) submit(register_hourly) +# Precommit triggers run during the transaction pipeline. register_precommit = Instruction.register_precommit_trigger( "precommit_reward", alice, @@ -396,6 +487,7 @@ register_precommit = Instruction.register_precommit_trigger( ) submit(register_precommit) +# Trigger execution and repetition changes are also transactions. submit(Instruction.execute_trigger("hourly_reward", args={"reason": "manual"})) submit(Instruction.mint_trigger_repetitions("hourly_reward", 5)) submit(Instruction.burn_trigger_repetitions("hourly_reward", 1)) @@ -405,6 +497,7 @@ submit(Instruction.unregister_trigger("hourly_reward")) Torii also exposes REST helpers for trigger inventory: ```python +# Inventory helpers are reads; they do not unregister or execute triggers. registered = client.list_triggers_typed(limit=20) for trigger in registered.items: print(trigger.id, trigger.authority) @@ -434,11 +527,13 @@ from iroha_python import ( config = TransactionConfig( chain_id=CHAIN_ID, authority=alice, + # Keep repo and settlement examples bounded by a short TTL. ttl_ms=120_000, metadata=TX_METADATA, ) draft = TransactionDraft(config) +# Each repo leg describes one side of the financing agreement. cash = RepoCashLeg(asset_definition_id="usd#wonderland", quantity="1000") collateral = RepoCollateralLeg( asset_definition_id="bond#wonderland", @@ -447,6 +542,7 @@ collateral = RepoCollateralLeg( ) governance = RepoGovernance(haircut_bps=1500, margin_frequency_secs=86_400) +# Domain-specific draft methods append the corresponding instructions. draft.repo_initiate( agreement_id="daily_repo", initiator=alice, @@ -467,6 +563,7 @@ draft.repo_unwind( settlement_timestamp_ms=1_704_086_400_000, ) +# DVP/PVP settlement plans encode ordering and atomicity for both legs. delivery = SettlementLeg( asset_definition_id="bond#wonderland", quantity="10", @@ -526,6 +623,7 @@ instruction_box_json = """ instruction = Instruction.from_json(instruction_box_json) submit(instruction) +# Use TransactionBuilder when you need lower-level control than TransactionDraft. builder = TransactionBuilder(CHAIN_ID, alice) builder.set_metadata(TX_METADATA) builder.add_instruction_json(instruction_box_json) @@ -537,6 +635,7 @@ For generated or opaque instructions, round-trip through JSON before storing fixtures: ```python +# Round trips are useful for validating fixtures generated by another tool. payload = Instruction.mint_asset_numeric(ROSE_ASSET, "1").to_json() same_instruction = Instruction.from_json(payload) print(same_instruction.as_dict()) @@ -552,12 +651,14 @@ signing. A draft lets you keep transaction-level settings such as `ttl_ms`, config = TransactionConfig( chain_id=CHAIN_ID, authority=alice, + # TTL and nonce are transaction-level properties shared by all instructions. ttl_ms=120_000, nonce=1, metadata={**TX_METADATA, "source": "python-docs"}, ) draft = TransactionDraft(config) +# Draft methods append instructions but do not submit anything yet. draft.register_domain("wonderland", metadata={"owner": "docs"}) draft.register_account(bob, metadata={"role": "user"}) draft.register_asset_definition_numeric( @@ -569,6 +670,7 @@ draft.register_asset_definition_numeric( draft.mint_asset_numeric(ROSE_ASSET, "100") draft.transfer_asset_numeric(ROSE_ASSET, "25", destination=bob) +# Signing freezes the draft into an envelope ready for Torii. envelope = draft.sign_with_keypair(alice_pair) receipt = client.submit_transaction_envelope(envelope) status = client.wait_for_transaction_status(envelope.hash_hex(), timeout=30) @@ -581,6 +683,7 @@ Export a deterministic manifest for review, auditing, or wallet handoff: import json from pathlib import Path +# Manifests are review artifacts; they are not submitted by themselves. manifest = draft.to_manifest_dict(include_creation_time=True) print(json.dumps(manifest, indent=2)) @@ -593,6 +696,7 @@ Path("transaction_manifest.json").write_text( Attach a lane privacy proof before signing when the target lane requires it: ```python +# Attach the proof before signing so it is covered by the transaction hash. draft.add_lane_privacy_merkle_proof( commitment_id=7, leaf=bytes.fromhex("aa" * 32), @@ -612,6 +716,7 @@ are the easiest way to start because the SDK parses pagination and common record fields for you: ```python +# Typed pages expose `.items` plus pagination metadata such as `.total`. accounts = client.list_accounts_typed(limit=25, sort="id") for account in accounts.items: print(account.id, account.metadata) @@ -625,6 +730,7 @@ Use the generic request helpers when a Torii endpoint does not yet have a typed wrapper: ```python +# Drop to raw JSON when you need an endpoint before a typed helper exists. payload = client.request_json("GET", "/v1/parameters", expected_status=(200,)) metrics = client.get_metrics(as_text=True) ``` @@ -635,6 +741,7 @@ explorer or raw endpoint returns an ID that the SDK rejects, resolve it to a canonical account ID before calling these helpers: ```python +# These helpers expect a canonical account ID or an alias the SDK can normalize. assets = client.list_account_assets_typed(alice, limit=10) transactions = client.query_account_transactions_typed(alice, limit=5) permissions = client.list_account_permissions_typed(alice, limit=20) @@ -653,11 +760,13 @@ enabled and active. ```python from iroha_python import DataEventFilter, EventCursor +# Narrow the stream to proof events with the expected backend and proof hash. proof_filter = DataEventFilter.proof( backend="halo2/ipa", proof_hash_hex="deadbeef" * 8, ) +# Persist the latest SSE id so a reconnect can resume from the same point. cursor = EventCursor() for event in client.stream_events( filter=proof_filter, @@ -693,14 +802,17 @@ from iroha_python import ( ) from iroha_python.address import AccountAddress +# Key derivation and signing are local; no network call is made here. ed_pair = derive_keypair_from_seed(b"alice", ED25519_ALGORITHM) signature = ed_pair.sign(b"payload") assert verify(ED25519_ALGORITHM, ed_pair.public_key, b"payload", signature) +# Account addresses combine a domain and public key into canonical I105 form. address = AccountAddress.from_account(domain="wonderland", public_key=ed_pair.public_key) print(address.canonical_hex()) print(address.to_i105(0x02F1)) +# Confidential key helpers derive local viewing/spending material. confidential = derive_confidential_keyset_from_hex("01" * 32) print(confidential.as_hex()) print(hash_blake2b_32(b"payload").hex()) @@ -726,17 +838,21 @@ from iroha_python import ( message = b"iroha multi-algorithm signing" +# Iterate the algorithms compiled into the installed native extension. for algorithm in supported_crypto_algorithms(): keypair = derive_keypair_from_seed(f"docs:{algorithm}".encode(), algorithm) signature = keypair.sign(message) + # Both the object method and the generic helper verify the same signature. assert keypair.verify(message, signature) assert verify(algorithm, keypair.public_key, message, signature) + # Loading a private key should reconstruct the same public key. loaded = load_keypair(keypair.private_key, algorithm) assert loaded.public_key == keypair.public_key assert sign(algorithm, loaded.private_key, message) != b"" + # Prefixed multihashes carry the algorithm label with the key bytes. public_multihash = public_key_multihash( algorithm, keypair.public_key, @@ -752,6 +868,7 @@ for algorithm in supported_crypto_algorithms(): private_algorithm, private_key = parse_private_key_multihash(private_multihash) restored = CryptoKeyPair.from_private_key_multihash(private_multihash) + # Round-trip checks catch mismatched algorithm labels or key encodings. assert public_algorithm == algorithm assert public_key == keypair.public_key assert private_algorithm == algorithm @@ -779,8 +896,10 @@ from iroha_python import ( capabilities = client.get_node_capabilities_typed() sm = capabilities.crypto.sm if capabilities.crypto else None +# Use the node's default SM2 distinguishing ID when the node advertises one. distid = sm.sm2_distid_default if sm else SM2_DEFAULT_DISTINGUISHED_ID +# The SM2-specific helper accepts the distinguishing ID explicitly. pair = derive_sm2_keypair_from_seed(bytes.fromhex("11" * 32), distid=distid) message = b"iroha-sm2-example" signature = pair.sign(message) @@ -789,6 +908,7 @@ assert pair.verify(message, signature) assert verify_sm2(pair.public_key, message, signature, distid=distid) assert sign_sm2(pair.private_key, message, distid=distid) != b"" +# The generic API works when you only need the canonical `sm2` label. generic_pair = derive_keypair_from_seed(bytes.fromhex("22" * 32), SM2_ALGORITHM) generic_signature = sign(SM2_ALGORITHM, generic_pair.private_key, message) assert verify(SM2_ALGORITHM, generic_pair.public_key, message, generic_signature) @@ -804,6 +924,7 @@ status, which is useful when deciding whether to enable SM2-specific flows: ```python capabilities = client.get_node_capabilities_typed() +# `enabled` is the submit-time policy flag, not just local SDK support. if capabilities.crypto and capabilities.crypto.sm.enabled: sm = capabilities.crypto.sm print(sm.default_hash) @@ -840,6 +961,7 @@ from iroha_python.address import AccountAddress CHAIN_DISCRIMINANT = 0x02F1 message = b"iroha gost and post-quantum example" +# Crypto helpers use canonical labels; account addresses use compact aliases. GOST_ADDRESS_ALIASES = { GOST_3410_2012_256_PARAMSET_A_ALGORITHM: "gost-256-a", GOST_3410_2012_256_PARAMSET_B_ALGORITHM: "gost-256-b", @@ -848,6 +970,7 @@ GOST_ADDRESS_ALIASES = { GOST_3410_2012_512_PARAMSET_B_ALGORITHM: "gost-512-b", } +# Derive and verify one local keypair for every GOST parameter set. for crypto_algorithm, address_algorithm in GOST_ADDRESS_ALIASES.items(): keypair = derive_keypair_from_seed( f"docs:{crypto_algorithm}".encode(), @@ -868,6 +991,7 @@ for crypto_algorithm, address_algorithm in GOST_ADDRESS_ALIASES.items(): print(address.to_i105(CHAIN_DISCRIMINANT)) print(keypair.prefixed_public_key_multihash) +# ML-DSA follows the same generic signing and address flow. mldsa_keypair = derive_keypair_from_seed(b"docs:ml-dsa", ML_DSA_ALGORITHM) mldsa_signature = mldsa_keypair.sign(message) assert verify(ML_DSA_ALGORITHM, mldsa_keypair.public_key, message, mldsa_signature) @@ -892,6 +1016,7 @@ capabilities = client.request_json( ) crypto = capabilities.get("crypto", {}) sm = crypto.get("sm", {}) +# Nodes advertise the signing algorithms they will accept for transactions. allowed = set(sm.get("allowed_signing", [])) GOST_ALGORITHMS = { @@ -902,6 +1027,7 @@ GOST_ALGORITHMS = { "gost3410-2012-512-paramset-b", } +# Local support is not enough; submit only when the node advertises support. supports_gost = bool(allowed & GOST_ALGORITHMS) supports_post_quantum = "ml-dsa" in allowed supports_sm2 = "sm2" in allowed and bool(sm.get("enabled", False)) @@ -927,62 +1053,39 @@ from iroha_python import create_torii_client, resolve_torii_client_config with open("iroha_config.json", "r", encoding="utf-8") as handle: raw_config = json.load(handle) +# Override only the fields that vary by environment. resolved = resolve_torii_client_config( config=raw_config, overrides={"timeout_ms": 2_000, "max_retries": 5}, ) +# Pass the resolved config into the same client constructor used elsewhere. client = create_torii_client( raw_config.get("torii", {}).get("address", TORII_URL), resolved_config=resolved, ) ``` -## Offline Allowances +## Offline V2 Readiness -Offline allowance endpoints issue and register wallet certificates. If the -certificate is already signed, call `register_offline_allowance` or -`renew_offline_allowance` directly instead of the top-up helpers. These are -mutating wallet-service calls and require a signing account. +The current Python SDK exposes Torii's Offline V2 readiness endpoint. It does +not expose high-level offline allowance registration or renewal helpers. ```python -draft_certificate = { - "controller": alice, - "allowance": { - "asset": "usd#wonderland", - "amount": "10", - "commitment": [1, 2], - }, - "spend_public_key": "ed0120deadbeef", - "attestation_report": [3, 4], - "issued_at_ms": 100, - "expires_at_ms": 200, - "policy": {"max_balance": "10", "max_tx_value": "5", "expires_at_ms": 200}, - "metadata": {}, -} - -top_up = client.top_up_offline_allowance( - certificate=draft_certificate, - authority=alice, - private_key=alice_pair.private_key_hex, -) -print(top_up.registration.certificate_id_hex) - -renewed = client.top_up_offline_allowance_renewal( - certificate_id_hex=top_up.registration.certificate_id_hex, - certificate=draft_certificate, - authority=alice, - private_key=alice_pair.private_key_hex, -) -print(renewed.registration.certificate_id_hex) +readiness = client.get_offline_v2_readiness() +print(readiness.offline_note_v2) +print(readiness.offline_one_use_keys) +print(readiness.offline_fountain_qr_v1) ``` ## Subscriptions -Subscription helpers are also mutating service calls. Use IDs and assets that -exist on the network you target. +Subscription helpers are mutating service calls inherited from the shared Torii +client used by `iroha_python.ToriiClient`. Use IDs and assets that exist on the +network you target. ```python +# The plan defines billing cadence, retry policy, and usage pricing. usage_plan = { "provider": alice, "billing": { @@ -1005,18 +1108,23 @@ usage_plan = { }, } +# The provider signs plan creation. client.create_subscription_plan( authority=alice, private_key=alice_pair.private_key_hex, plan_id="compute#wonderland", plan=usage_plan, ) + +# The subscriber signs subscription creation. client.create_subscription( authority=bob, private_key=bob_pair.private_key_hex, subscription_id="sub-001", plan_id="compute#wonderland", ) + +# Usage is recorded by the provider and then charged on demand. client.record_subscription_usage( "sub-001", authority=alice, @@ -1039,6 +1147,7 @@ Taira: ```python from iroha_python.connect import ConnectUri, build_connect_uri, parse_connect_uri +# Connect URIs are what an app hands to a wallet to start a session. uri = build_connect_uri( ConnectUri( sid="base64url-session-id", @@ -1047,6 +1156,7 @@ uri = build_connect_uri( ) ) parsed = parse_connect_uri(uri) +# Status tells you whether the node currently exposes Connect. status = client.get_connect_status_typed() assert parsed.chain_id == "taira" @@ -1068,12 +1178,14 @@ from iroha_python import ( generate_connect_keypair, ) +# The app keypair is separate from the account key used for transactions. connect_pair = generate_connect_keypair() info = client.create_connect_session_info( {"role": "app", "sid": connect_pair.public_key.hex()} ) print(info.app_uri, info.wallet_token, info.expires_at) +# Control frames negotiate permissions before encrypted messages are sent. frame = ConnectFrame( sid=bytes.fromhex("01" * 32), direction=ConnectDirection.APP_TO_WALLET, @@ -1087,6 +1199,7 @@ frame = ConnectFrame( payload = encode_connect_frame(frame) assert decode_connect_frame(payload) == frame +# Closing the control channel is explicit and carries a reason code. client.send_connect_control_frame( "base64url-session-id", ConnectControlClose(role="App", code=4100, reason="finished", retryable=False), @@ -1103,6 +1216,7 @@ from iroha_python import ( ConnectSignRequestRawPayload, ) +# Derive symmetric session keys from both parties' keys and the session ID. keys = ConnectSessionKeys.derive( local_private_key=bytes.fromhex("11" * 32), peer_public_key=bytes.fromhex("22" * 32), @@ -1112,6 +1226,7 @@ session = ConnectSession( sid=bytes.fromhex("33" * 32), keys=keys, ) +# Encrypt application payloads after the session is approved. encrypted = session.encrypt_app_to_wallet( ConnectSignRequestRawPayload(domain_tag="SIGN", payload=b"hash") ) @@ -1126,6 +1241,7 @@ These read-only calls returned successfully against public Taira: ```python client = create_torii_client("https://taira.sora.org") +# Governance reads return either current settings or typed not-found wrappers. protected = client.get_protected_namespaces() referendum = client.get_governance_referendum_typed("ref-1") tally = client.get_governance_tally_typed("ref-1") @@ -1135,6 +1251,7 @@ unlock_stats = client.get_governance_unlock_stats_typed() print(protected, referendum.found) print(tally.approve, list(locks.locks), unlock_stats.expired_locks_now) +# Runtime reads expose the active ABI and any pending upgrade records. abi = client.get_runtime_abi_active_typed() abi_hash = client.get_runtime_abi_hash_typed() runtime_metrics = client.get_runtime_metrics_typed() @@ -1153,9 +1270,10 @@ account and token are authorized: admin = create_torii_client( TORII_URL, auth_token="admin-token", - api_token="torii-token", +api_token="torii-token", ) +# Propose creates the upgrade instructions; activation/cancel are operator actions. upgrade = admin.propose_runtime_upgrade( { "name": "Refresh runtime provenance", @@ -1177,9 +1295,11 @@ admin.cancel_runtime_upgrade("feedface" * 4) ## Status, Consensus, and Network Telemetry ```python +# `/status` is the public node snapshot endpoint on Taira. status = client.request_json("GET", "/status", expected_status=(200,)) print(status["blocks"], status["txs_approved"]) +# Sumeragi and time endpoints expose consensus and clock diagnostics. sumeragi = client.get_sumeragi_status_typed() print(sumeragi.highest_qc.height, sumeragi.tx_queue.saturated) @@ -1197,9 +1317,11 @@ Nexus/SORA endpoints. Treat empty lists as a valid response: public Taira may have the route enabled without data for the sample manifest or UAID. ```python +# SoraFS status queries are reads scoped by manifest and status. por_status = client.get_sorafs_por_status(manifest_hex="ab" * 32, status="verified") print(len(por_status)) +# UAID helpers inspect wallet/data-space bindings for one identifier. uaid = "aabb" * 16 bindings = client.get_uaid_bindings_typed(uaid) manifests = client.list_space_directory_manifests_typed( @@ -1209,6 +1331,7 @@ manifests = client.list_space_directory_manifests_typed( ) print(len(bindings.dataspaces), len(manifests.manifests)) +# Kaigi health summarizes relay availability when the route is enabled. health = client.get_kaigi_relays_health_typed() print(health.healthy_total, health.failovers_total) ``` @@ -1222,6 +1345,7 @@ transaction template: ```python from iroha_python import NoritoRpcClient, NoritoRpcConfig +# Use the binary RPC client for endpoints that expect Norito bytes. with NoritoRpcClient(NoritoRpcConfig(TORII_URL, timeout=5.0)) as rpc: response_bytes = rpc.call("/v1/transaction", envelope.signed_transaction_versioned) print(len(response_bytes)) @@ -1233,6 +1357,7 @@ can fall back to scalar implementations: ```python from iroha_python import bn254_add_cuda, cuda_available, poseidon2_cuda +# Always probe CUDA availability before calling optional GPU helpers. if cuda_available(): print(poseidon2_cuda(1, 2)) print(bn254_add_cuda((1, 0, 0, 0), (2, 0, 0, 0))) @@ -1244,9 +1369,10 @@ The Python SDK already includes helpers for: - Torii submission, status, query, and admin flows - typed instruction builders for common ISI and domain-specific extensions -- transaction drafts, manifests, signing, and offline envelope workflows +- transaction drafts, manifests, signing, and signed transaction envelope + workflows - streaming events, filters, and resumable cursors -- offline allowances and subscriptions +- Offline V2 readiness and Torii subscription helpers - account address, all-algorithm signing helpers, multihash round trips, SM2, GOST, ML-DSA, BLS, and confidential key handling - Connect URIs, sessions, frames, encryption helpers, and registry admin diff --git a/src/guide/tutorials/rust.md b/src/guide/tutorials/rust.md index 21912ea31..b632aa9be 100644 --- a/src/guide/tutorials/rust.md +++ b/src/guide/tutorials/rust.md @@ -17,24 +17,48 @@ For the current state of the project, start with the reference CLI and the workspace itself: ```bash -git clone https://github.com/hyperledger-iroha/iroha.git +git clone --branch i23-features https://github.com/hyperledger-iroha/iroha.git cd iroha cargo build --workspace ``` -Run the reference client against a generated local network: +Run the reference client with the checked-in default client config: ```bash -cargo run --bin iroha -- --config ./localnet/client.toml ledger domain list all +cargo run --bin iroha -- --config ./defaults/client.toml ledger domain list all ``` +## Try Taira Read-Only + +From the same workspace checkout, try the public Taira diagnostics helper: + +```bash +cargo run --bin iroha -- taira doctor \ + --public-root https://taira.sora.org \ + --json +``` + +For route-level checks, use Torii's JSON API directly: + +```bash +curl -fsS https://taira.sora.org/status \ + | jq '{blocks, txs_approved, txs_rejected, queue_size, peers}' + +curl -fsS 'https://taira.sora.org/v1/assets/definitions?limit=5' \ + | jq -r '.items[] | [.id, .name, .total_quantity] | @tsv' +``` + +After you create `taira.client.toml`, the same binary can run signed canary +commands against Taira. Keep those separate from ordinary unit tests because +they require a faucet-funded account and live testnet availability. + ## Using the Rust Client Crate -When developing inside the monorepo, depend on the local crate directly: +For the current source state, depend on the `i23-features` branch directly: ```toml [dependencies] -iroha = { path = "../iroha/crates/iroha" } +iroha = { git = "https://github.com/hyperledger-iroha/iroha.git", branch = "i23-features", package = "iroha" } ``` If you need the most complete examples of how the Rust surfaces are used in diff --git a/src/guide/tutorials/sample-apps.md b/src/guide/tutorials/sample-apps.md index 1329928ce..3bf44cc60 100644 --- a/src/guide/tutorials/sample-apps.md +++ b/src/guide/tutorials/sample-apps.md @@ -1,8 +1,8 @@ # Sample Apps -These repositories show complete client applications built around Iroha. Use -them when you want to see SDK setup, account flows, signing, Torii calls, and UI -integration in a larger codebase than the minimal tutorials. +These repositories show complete client applications built around Iroha. +Use them when you want to see SDK setup, account flows, signing, Torii +calls, and UI integration in a larger codebase than the minimal tutorials. The sample apps are examples, not production wallet templates. Review their dependency versions, network assumptions, and key-storage choices before @@ -10,17 +10,21 @@ copying code into a real product. ## Available Apps -| App | Platform | What it demonstrates | Status | -| --- | --- | --- | --- | -| [Iroha Demo JavaScript](https://github.com/soramitsu/iroha-demo-javascript) | Desktop app with Electron, Vue 3, Pinia, and Vite | Direct Torii connectivity through `@iroha/iroha-js`, local transaction signing, wallet balances and history, send/receive QR flows, staking, governance, explorer, and live E2E checks | Most complete current sample | -| [Iroha Demo Android](https://github.com/soramitsu/iroha-demo-android) | Android point app | Native Android project structure for a point-transfer style mobile application | Older mobile demo; use the [Android, Kotlin, and Java SDK page](/guide/tutorials/kotlin-java.md) for current SDK setup | -| [Iroha Demo iOS](https://github.com/soramitsu/iroha-demo-ios) | iOS point app | Xcode/CocoaPods project structure for a point-transfer style mobile application | The repository README marks this demo as out of date with the latest Iroha | +| App | Platform | What it demonstrates | Status | +| ------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [Iroha Demo JavaScript](https://github.com/soramitsu/iroha-demo-javascript) | Desktop app with Electron, Vue 3, Pinia, and Vite | Direct Torii connectivity through `@iroha/iroha-js`, local transaction signing, wallet balances and history, send/receive QR flows, staking, governance, explorer, and live E2E checks | Most complete current sample | +| [Iroha Demo Android](https://github.com/soramitsu/iroha-demo-android) | Android point app | Native Android project structure for a point-transfer style mobile application | Older mobile demo; use the [Android, Kotlin, and Java SDK page](/guide/tutorials/kotlin-java.md) for current SDK setup | +| [`examples/ios/ConnectMinimalApp`](https://github.com/hyperledger-iroha/iroha/tree/i23-features/examples/ios/ConnectMinimalApp) | SwiftPM executable | `NoritoNativeBridge` availability check, `ConnectSession` event stream intent, and diagnostics export intent | Iroha repository harness, but currently out of sync: the package path resolves to `examples/IrohaSwift`, and source references Connect helpers absent from `IrohaSwift/Sources` | +| [`examples/ios/NoritoDemo`](https://github.com/hyperledger-iroha/iroha/tree/i23-features/examples/ios/NoritoDemo) | SwiftUI iOS template | XcodeGen template with conditional `NoritoBridge` linkage and Iroha Connect UI code | Iroha repository template, but the project manifest does not declare the `IrohaSwift` package dependency imported by the sources | +| [`examples/ios/NoritoDemoXcode`](https://github.com/hyperledger-iroha/iroha/tree/i23-features/examples/ios/NoritoDemoXcode) | SwiftUI Xcode project | Generated Xcode project with Swift sources importing `IrohaSwift` and conditionally using `NoritoBridgeKit` | Iroha repository demo, but the checked-in Xcode project does not declare the `IrohaSwift` package dependency imported by the sources | +| [Iroha Demo iOS](https://github.com/soramitsu/iroha-demo-ios) | iOS point app | Xcode/CocoaPods project structure for a point-transfer style mobile application | Historical external demo; use the in-tree Swift examples and [Swift and iOS SDK page](/guide/tutorials/swift.md) for current setup | ## JavaScript Desktop Demo Start with the JavaScript demo if you want a working reference for current -application flows. It is a desktop client that talks directly to Torii through -the in-repo JavaScript SDK, without a separate backend. The app includes: +application flows. It is a desktop client that talks directly to Torii +through the in-repo JavaScript SDK, without a separate backend. The app +includes: - first-run account setup and key import or generation - endpoint settings for SORA Nexus networks @@ -30,20 +34,26 @@ the in-repo JavaScript SDK, without a separate backend. The app includes: - staking and governance screens - live Electron E2E checks against configured Torii endpoints -The JavaScript demo requires Node.js 20+ and a Rust toolchain for the native -`iroha_js_host` module. Its README contains the current install, build, test, -and live E2E commands. +The JavaScript demo requires Node.js 20+ and a Rust toolchain for the +native `iroha_js_host` module. Its README contains the current install, +build, test, and live E2E commands. -## Mobile Point Demos +## Mobile Samples -The Android and iOS repositories are useful as historical examples of the -original point-app concept and mobile project layout. Treat them as reference -material for UI and project organization, then use the current SDK pages for -new application setup: +The external Android and iOS point-app repositories are historical examples +of the original point-app concept and mobile project layout. Swift/iOS +sample code also exists in the +[Iroha repository's `examples/ios/` directory](https://github.com/hyperledger-iroha/iroha/tree/i23-features/examples/ios), +but its checked-in project manifests are currently out of sync with the +package API and dependency layout. The current Android SDK lives under +`java/iroha_android/`. + +Use the SDK pages for new application setup: - [Android, Kotlin, and Java](/guide/tutorials/kotlin-java.md) - [Swift and iOS](/guide/tutorials/swift.md) For new mobile work, confirm the SDK version, Torii endpoint shape, account -format, and transaction format against the current upstream workspace before -porting code from either mobile demo. +format, and transaction format against the current +[Iroha `i23-features` branch](https://github.com/hyperledger-iroha/iroha/tree/i23-features) +before porting code from either external mobile demo. diff --git a/src/guide/tutorials/swift.md b/src/guide/tutorials/swift.md index ff96429c8..2b22e04fd 100644 --- a/src/guide/tutorials/swift.md +++ b/src/guide/tutorials/swift.md @@ -1,60 +1,213 @@ # Swift and iOS -The Apple-platform SDK in the upstream workspace is `IrohaSwift`. It targets -Iroha 2-era flows and the current Iroha 3 / Sora Nexus Torii surfaces. +The Swift SDK shipped by the upstream workspace is the `IrohaSwift` Swift +package under `IrohaSwift/`. Its package manifest defines one library product, +`IrohaSwift`, and targets iOS 15+ and macOS 12+ with Swift tools 5.9. + +The package depends on the native `NoritoBridge` binary target. Package +resolution validates `../dist/NoritoBridge.xcframework` before building, and +transaction or Connect crypto paths throw bridge-unavailable errors when the +native symbols are not loaded. ## Swift Package Manager -Add the package in Xcode or in `Package.swift`: +When developing against a checked-out workspace, point SwiftPM at the local +`IrohaSwift/` package directory. The package identity used by +`Package.swift` is `IrohaSwift`: ```swift dependencies: [ - .package( - url: "https://github.com/hyperledger/iroha-swift", - branch: "main" - ) + .package(name: "IrohaSwift", path: "/path/to/iroha/IrohaSwift") ], targets: [ .target( name: "YourApp", dependencies: [ - .product(name: "IrohaSwift", package: "iroha-swift") + .product(name: "IrohaSwift", package: "IrohaSwift") ] ) ] ``` -When working inside the monorepo, use the local path dependency instead. +Adjust the path for your app. Do not copy the current +`examples/ios/ConnectMinimalApp` path as-is; that manifest resolves +`../../IrohaSwift` to `examples/IrohaSwift`. + +Before resolving the package, make sure the bridge exists at the workspace root: + +```bash +cd /path/to/iroha +make bridge-xcframework +``` + +This produces `dist/NoritoBridge.xcframework`; `IrohaSwift/Package.swift` +references it as `../dist/NoritoBridge.xcframework`. -`IrohaSwift` requires the native `dist/NoritoBridge.xcframework`. SwiftPM and -the CocoaPods lint flow fail fast when that bridge is missing. +## CocoaPods + +The codebase also contains `IrohaSwift/IrohaSwift.podspec`. It declares the +`IrohaSwift` pod, Swift 5.9, and iOS 15. The podspec pulls Swift sources from +the main repository; the native bridge still has to be present and linked for +transaction encoding, non-Ed25519 signing, and Connect crypto. ## Quickstart ```swift +import Foundation import IrohaSwift let torii = ToriiClient(baseURL: URL(string: "http://127.0.0.1:8080")!) -let sdk = IrohaSDK(baseURL: torii.baseURL) +let sdk = IrohaSDK(toriiClient: torii) + let keypair = try Keypair.generate() +let accountId = try keypair.accountId() -print(keypair.publicKey) +if #available(iOS 15.0, macOS 12.0, *) { + let balances = try await torii.getAssets(accountId: accountId) + print("balances:", balances) +} ``` +## Try Taira Read-Only + +Start with a plain HTTP probe to confirm the device or simulator can reach the +public Taira endpoint: + +```swift +import Foundation + +if #available(iOS 15.0, macOS 12.0, *) { + let url = URL(string: "https://taira.sora.org/status")! + let (data, response) = try await URLSession.shared.data(from: url) + + if let http = response as? HTTPURLResponse { + print("status:", http.statusCode) + } + print(String(decoding: data, as: UTF8.self)) +} +``` + +Use the same `URLSession` check for +`https://taira.sora.org/v1/assets/definitions?limit=5` while you are building +UI and retry behavior. Switch to `IrohaSDK` submit helpers only after the +app loads signer material from secure storage and the account is funded on +Taira. + +To build and submit a transaction, use the `IrohaSDK` helpers. These call the +native bridge-backed transaction encoder: + +```swift +let transfer = TransferRequest( + chainId: "00000000-0000-0000-0000-000000000000", + authority: accountId, + assetDefinitionId: "66owaQmAQMuHxPzxUN3bqZ6FJfDa", + quantity: "1", + destination: accountId, + description: "demo" +) + +if #available(iOS 15.0, macOS 12.0, *) { + let status = try await sdk.submitAndWait( + transfer: transfer, + keypair: keypair + ) + print(status.content.status.kind) +} +``` + +`TransferRequest`, `MintRequest`, `BurnRequest`, `ShieldRequest`, and +`UnshieldRequest` validate canonical account IDs and canonical unprefixed +Base58 asset-definition IDs before signing. + +## Signing + +`Keypair` is the Ed25519 convenience API. For other algorithms, construct an +`IrohaSDK` with `defaultSigningAlgorithm` and use `generateSigningKey()` or +`signingKey(fromSeed:)`: + +```swift +let pqSdk = IrohaSDK( + baseURL: torii.baseURL, + defaultSigningAlgorithm: .mlDsa +) +let signingKey = try pqSdk.generateSigningKey() +``` + +The `SigningAlgorithm` enum currently includes Ed25519, secp256k1, BLS normal +and small variants, ML-DSA, GOST R 34.10-2012 parameter sets, and SM2. Native +bridge support is required outside the Ed25519 convenience path. + +## Connect + +The Connect client is implemented in Swift source, with crypto and frame codecs +backed by `NoritoBridge`: + +```swift +let sessionID = Data(repeating: 0, count: 32) // replace with the session bytes +let sid = "" +let request = try ConnectClient.makeWebSocketRequest( + baseURL: URL(string: "https://node.example")!, + sid: sid, + role: .app, + token: "" +) + +let client = ConnectClient(request: request) +await client.start() + +let session = ConnectSession(sessionID: sessionID, client: client) +let keyPair = try ConnectCrypto.generateKeyPair() +``` + +`ConnectSession` handles open and close controls, encrypted envelope reads, +direction keys, flow control, event streams, balance streams, and diagnostics +journals. + ## Current Coverage -The Swift SDK already includes: +The Swift source currently includes: + +- `ToriiClient` HTTP helpers for accounts, assets, aliases, explorer pages, + RWA, contracts, multisig, governance, subscriptions, data availability, + confidential assets, node/runtime status, health, metrics, and SSE streams +- `IrohaSDK` transaction builders and submit/poll helpers for transfer, mint, + burn, shield, unshield, ZK transfer, ZK asset registration, metadata, + identifier claims, multisig registration, and governance instructions +- pending transaction queue support through `PendingTransactionQueue` and + `FilePendingTransactionQueue` +- account-address and I105 helpers through `AccountAddress` and `AccountId` +- Ed25519, secp256k1, ML-DSA, BLS, GOST, and SM2 signing surfaces, with native + bridge support where required +- Connect WebSocket, frame, crypto, session, queue, replay, and diagnostics + helpers +- Offline V2 note, receipt, QR stream, and transaction models +- SoraFS, data-availability, and proof-attachment helpers + +## In-Tree Samples + +The upstream workspace contains Swift/iOS example directories under +`examples/ios/`, but the project manifests are not a reliable source of current +setup instructions: + +- `examples/ios/ConnectMinimalApp` is a SwiftPM executable harness, but its + package manifest currently resolves `../../IrohaSwift` to + `examples/IrohaSwift`, and its source references Connect helpers that are not + present in `IrohaSwift/Sources`. +- `examples/ios/NoritoDemo` and `examples/ios/NoritoDemoXcode` contain SwiftUI + code that imports `IrohaSwift` and conditionally uses `NoritoBridgeKit`, but + their checked-in project manifests do not declare the `IrohaSwift` package + dependency. -- Torii HTTP helpers -- Norito envelope encoding -- Ed25519 and ML-DSA signing helpers -- offline allowance helpers -- runtime capability and event helpers -- subscription and explorer helpers -- Connect, pending-transaction, and Norito RPC helpers +Use `IrohaSwift/Sources/IrohaSwift` and `IrohaSwift/Tests/IrohaSwiftTests` as +the current API references until those sample manifests are brought back in sync. -## Upstream References +## Source References -- `IrohaSwift/README.md` - `IrohaSwift/Package.swift` -- `docs/connect_swift_ios.md` +- `IrohaSwift/IrohaSwift.podspec` +- `IrohaSwift/Sources/IrohaSwift/ToriiClient.swift` +- `IrohaSwift/Sources/IrohaSwift/TxBuilder.swift` +- `IrohaSwift/Sources/IrohaSwift/TransactionEncoder.swift` +- `IrohaSwift/Sources/IrohaSwift/ConnectClient.swift` +- `IrohaSwift/Sources/IrohaSwift/ConnectSession.swift` +- `examples/ios/ConnectMinimalApp/Package.swift` diff --git a/src/help/integration-issues.md b/src/help/integration-issues.md index 93b4a0c47..c96549a82 100644 --- a/src/help/integration-issues.md +++ b/src/help/integration-issues.md @@ -22,6 +22,20 @@ If the peer runs in Docker or Kubernetes, use the host or service address that is reachable from the client process. `127.0.0.1` inside a container is not the host machine. +For public Taira tests, start with an unsigned endpoint probe: + +```bash +curl -fsS https://taira.sora.org/status \ + | jq '{blocks, txs_approved, txs_rejected, queue_size, peers}' + +curl -fsS 'https://taira.sora.org/v1/domains?limit=5' \ + | jq -r '.items[].id' +``` + +If these commands fail with `502`, TLS, DNS, or timeout errors, fix network +reachability or wait for the public testnet endpoint before debugging account +keys or transaction payloads. + ## Transactions are rejected Most transaction failures are caused by identity or authorization mismatch: diff --git a/src/index.md b/src/index.md index 527f612a2..27ab87e65 100644 --- a/src/index.md +++ b/src/index.md @@ -4,39 +4,49 @@ layout: home hero: name: Hyperledger Iroha 3 text: Documentation - tagline: Deterministic blockchain platform for SORA Nexus, SDKs, and operator workflows + tagline: + Deterministic blockchain platform for SORA Nexus, SDKs, and operator + workflows image: src: /icon.svg alt: Hyperledger Iroha 3 logo #actions: #- theme: alt # text: View on GitHub - # link: https://github.com/hyperledger-iroha/iroha/ + # link: https://github.com/hyperledger-iroha/iroha/tree/i23-features features: - icon: - dark: /start.svg - light: /start-light.svg + dark: /start.svg + light: /start-light.svg title: Get Started - details: Build the current workspace, launch a local network, and start using the Iroha 3 CLI + details: + Build the current workspace, launch a local network, and start using + the Iroha 3 CLI link: /get-started/ - icon: - dark: /build.svg - light: /build-light.svg - title: SDKs - details: Find the current Rust, Python, JavaScript, Android, and Swift entry points - link: /guide/tutorials/ + dark: /build.svg + light: /build-light.svg + title: Guide + details: + Find SDKs, best practices, configuration, security, and operator + workflows + link: /guide/ - icon: - dark: /explained.svg - light: /explained-light.svg + dark: /explained.svg + light: /explained-light.svg title: Architecture - details: Understand Torii, Sumeragi, Norito, IVM, and the Nexus data-space model + details: + Understand Torii, Sumeragi, Norito, IVM, and the Nexus data-space + model link: /blockchain/iroha-explained - icon: - dark: /reference.svg - light: /reference-light.svg + dark: /reference.svg + light: /reference-light.svg title: Reference - details: Consult the current binary, genesis, Torii, and compatibility reference pages + details: + Consult the current binary, genesis, Torii, and compatibility + reference pages link: /reference/ # - title: Cookbook # (TBA) diff --git a/src/public/compat-matrix.json b/src/public/compat-matrix.json index 958e06767..51431510a 100644 --- a/src/public/compat-matrix.json +++ b/src/public/compat-matrix.json @@ -1,32 +1,11 @@ { "source": { - "kind": "local-snapshot", - "repo": "../iroha", + "repo": "hyperledger-iroha/iroha", + "repo_url": "https://github.com/hyperledger-iroha/iroha", "branch": "i23-features", - "commit": "6be123546b7c", - "dirty": true, - "generated_at": "2026-04-28", - "basis": [ - "SDK READMEs, in-tree tests, fixtures, and module layout in ../iroha.", - "Uncommitted genesis, schema, and Python SDK changes were present when this snapshot was generated." - ], - "dirty_files": [ - "crates/iroha_data_model/src/events/mod.rs", - "crates/iroha_genesis/src/lib.rs", - "crates/iroha_kagami/src/genesis/sign.rs", - "crates/iroha_kagami/src/schema.rs", - "docs/source/references/peer.template.toml", - "python/iroha_python/DESIGN.md", - "python/iroha_python/README.md", - "python/iroha_python/iroha_python_rs/Cargo.toml", - "python/iroha_python/iroha_python_rs/src/lib.rs", - "python/iroha_python/src/iroha_python/__init__.py", - "python/iroha_python/src/iroha_python/crypto.py", - "python/iroha_python/tests/crypto_algorithms_test.py", - "python/iroha_torii_client/__pycache__/__init__.cpython-311.pyc", - "python/iroha_torii_client/__pycache__/client.cpython-311.pyc", - "python/norito_py/src/iroha_norito.egg-info/SOURCES.txt" - ] + "branch_url": "https://github.com/hyperledger-iroha/iroha/tree/i23-features", + "commit": "b530168a7468", + "generated_at": "2026-04-28" }, "included_sdks": [ { @@ -65,7 +44,7 @@ "status": "ok" }, { - "status": "no-data" + "status": "ok" }, { "status": "ok" @@ -85,16 +64,16 @@ "status": "ok" }, { - "status": "no-data" + "status": "ok" }, { - "status": "no-data" + "status": "ok" }, { - "status": "no-data" + "status": "ok" }, { - "status": "no-data" + "status": "ok" }, { "status": "ok" @@ -146,7 +125,7 @@ "status": "ok" }, { - "status": "no-data" + "status": "ok" }, { "status": "ok" @@ -169,7 +148,7 @@ "status": "ok" }, { - "status": "no-data" + "status": "ok" }, { "status": "ok" @@ -241,7 +220,7 @@ "status": "ok" }, { - "status": "no-data" + "status": "ok" }, { "status": "ok" @@ -270,7 +249,7 @@ "status": "ok" }, { - "status": "no-data" + "status": "ok" }, { "status": "ok" @@ -282,7 +261,7 @@ "status": "ok" }, { - "status": "no-data" + "status": "ok" } ] }, @@ -293,16 +272,16 @@ "status": "ok" }, { - "status": "no-data" + "status": "ok" }, { - "status": "no-data" + "status": "ok" }, { "status": "ok" }, { - "status": "no-data" + "status": "ok" }, { "status": "ok" @@ -342,22 +321,22 @@ "name": "Live testnet smoke lane", "results": [ { - "status": "no-data" + "status": "ok" }, { - "status": "no-data" + "status": "ok" }, { - "status": "no-data" + "status": "ok" }, { - "status": "no-data" + "status": "ok" }, { - "status": "no-data" + "status": "ok" }, { - "status": "no-data" + "status": "ok" }, { "status": "ok" diff --git a/src/reference/binaries.md b/src/reference/binaries.md index e5cc943ee..bc8ce2948 100644 --- a/src/reference/binaries.md +++ b/src/reference/binaries.md @@ -3,9 +3,9 @@ The current Iroha 2 and Iroha 3 operator workflow revolves around three primary binaries: -- [`irohad`](https://github.com/hyperledger-iroha/iroha/tree/main/crates/irohad) for running a peer daemon -- [`iroha`](https://github.com/hyperledger-iroha/iroha/tree/main/crates/iroha_cli) for CLI and operator commands -- [`kagami`](https://github.com/hyperledger-iroha/iroha/tree/main/crates/iroha_kagami) for keys, genesis, localnets, and profiles +- [`irohad`](https://github.com/hyperledger-iroha/iroha/tree/i23-features/crates/irohad) for running a peer daemon +- [`iroha`](https://github.com/hyperledger-iroha/iroha/tree/i23-features/crates/iroha_cli) for CLI and operator commands +- [`kagami`](https://github.com/hyperledger-iroha/iroha/tree/i23-features/crates/iroha_kagami) for keys, genesis, localnets, and profiles The source tree also exposes track-specific aliases: diff --git a/src/reference/compatibility-matrix.md b/src/reference/compatibility-matrix.md index 2c9dcbcc0..aabfc7e4c 100644 --- a/src/reference/compatibility-matrix.md +++ b/src/reference/compatibility-matrix.md @@ -1,8 +1,8 @@ # Compatibility Matrix The compatibility matrix shows cross-SDK scenario coverage for the current -Iroha 3 docs set. By default, the page loads the bundled snapshot generated -from the local sibling `../iroha` checkout. +Iroha 3 docs set. By default, the page loads the bundled snapshot for the +[`hyperledger-iroha/iroha` `i23-features` branch](https://github.com/hyperledger-iroha/iroha/tree/i23-features). The matrix consists of: diff --git a/src/reference/data-model-schema.md b/src/reference/data-model-schema.md index 383c87a32..c4688ee4d 100644 --- a/src/reference/data-model-schema.md +++ b/src/reference/data-model-schema.md @@ -1,8 +1,11 @@ # Data Model Schema -This page is generated from the current Iroha source checkout. `pnpm -get-snippets` reads `../iroha/docs/source/references/schema.json` when it is -available. If that snapshot is empty, the snippet tool runs `kagami advanced -schema` against the local `../iroha` workspace and renders the live schema. +This page is generated from the +[`hyperledger-iroha/iroha` `i23-features` branch](https://github.com/hyperledger-iroha/iroha/tree/i23-features). +`pnpm get-snippets` reads +[`docs/source/references/schema.json`](https://github.com/hyperledger-iroha/iroha/blob/i23-features/docs/source/references/schema.json) +from that source when it is available. If that snapshot is empty, regenerate +this page from the same branch with the snippet tooling after the upstream +schema generator succeeds. diff --git a/src/reference/genesis.md b/src/reference/genesis.md index 1d5dde835..e8b778920 100644 --- a/src/reference/genesis.md +++ b/src/reference/genesis.md @@ -75,5 +75,5 @@ public_key = "" For the full upstream details, see: -- [docs/genesis.md](https://github.com/hyperledger-iroha/iroha/blob/main/docs/genesis.md) -- [crates/iroha_kagami/README.md](https://github.com/hyperledger-iroha/iroha/blob/main/crates/iroha_kagami/README.md) +- [docs/genesis.md](https://github.com/hyperledger-iroha/iroha/blob/i23-features/docs/genesis.md) +- [crates/iroha_kagami/README.md](https://github.com/hyperledger-iroha/iroha/blob/i23-features/crates/iroha_kagami/README.md) diff --git a/src/reference/index.md b/src/reference/index.md index 824c753a9..65147ffbd 100644 --- a/src/reference/index.md +++ b/src/reference/index.md @@ -8,10 +8,11 @@ Start here for: - [Working with Iroha binaries](/reference/binaries.md) - [Genesis reference](/reference/genesis.md) - [Torii endpoints](/reference/torii-endpoints.md) +- [Torii API console](/reference/torii-api-console.md) - [Norito](/reference/norito.md) - [Compatibility matrix](/reference/compatibility-matrix.md) For the broader upstream documentation map, see: -- [docs/README.md](https://github.com/hyperledger-iroha/iroha/blob/main/docs/README.md) -- [docs/source/README.md](https://github.com/hyperledger-iroha/iroha/blob/main/docs/source/README.md) +- [docs/README.md](https://github.com/hyperledger-iroha/iroha/blob/i23-features/docs/README.md) +- [docs/source/README.md](https://github.com/hyperledger-iroha/iroha/blob/i23-features/docs/source/README.md) diff --git a/src/reference/naming.md b/src/reference/naming.md index 7ede56303..f7dc85502 100644 --- a/src/reference/naming.md +++ b/src/reference/naming.md @@ -3,14 +3,38 @@ When you are naming accounts, domains, or assets, you have to keep in mind the following conventions used in Iroha: -1. There is a number of reserved characters that are used for specific +1. There is a number of reserved separators that are used for specific types of constructs: - - `@` is reserved for account address and legacy account-selector forms - - `#` is reserved for asset aliases and asset balance literals + - `@` is reserved for account aliases and scoped account/public-key forms + - `#` is reserved for asset definition aliases and asset balance literals + - `::` is reserved for contract aliases + - `.` is reserved for domain and dataspace qualification - `$` is reserved for trigger-scoped textual forms - `%` is reserved for validator-scoped textual forms 2. The maximum number of characters (including UTF-8 characters) a name can have is limited by two factors: `[0, u32::MAX]` and the currently allocated stack space. + +## Try It on Taira + +Resolve a public asset alias into its canonical asset definition ID: + +```bash +curl -fsS https://taira.sora.org/v1/assets/aliases/resolve \ + -H 'content-type: application/json' \ + -d '{"alias":"usd#wonderland"}' \ + | jq '{alias, asset_definition_id, asset_name, status: .alias_binding.status}' +``` + +Compare that with the asset definition list: + +```bash +curl -fsS 'https://taira.sora.org/v1/assets/definitions?limit=20' \ + | jq -r '.items[] | select(.alias != null) | [.alias, .id, .name] | @tsv' +``` + +The `#` character separates an asset alias from the domain context. Keep it out +of plain names unless you are intentionally writing an asset alias or asset +balance literal. diff --git a/src/reference/norito.md b/src/reference/norito.md index 2c89bf51b..4f569a97a 100644 --- a/src/reference/norito.md +++ b/src/reference/norito.md @@ -253,6 +253,6 @@ Norito so routing, billing, replay, and audit evidence stay reproducible. ## Upstream References -- [Norito format specification](https://github.com/hyperledger-iroha/iroha/blob/main/norito.md) -- [Norito crate README](https://github.com/hyperledger-iroha/iroha/blob/main/crates/norito/README.md) -- [Norito streaming design notes](https://github.com/hyperledger-iroha/iroha/blob/main/docs/source/norito_streaming.md) +- [Norito format specification](https://github.com/hyperledger-iroha/iroha/blob/i23-features/norito.md) +- [Norito crate README](https://github.com/hyperledger-iroha/iroha/blob/i23-features/crates/norito/README.md) +- [Norito streaming design notes](https://github.com/hyperledger-iroha/iroha/blob/i23-features/docs/source/norito_streaming.md) diff --git a/src/reference/torii-api-console.md b/src/reference/torii-api-console.md new file mode 100644 index 000000000..7ec6e6751 --- /dev/null +++ b/src/reference/torii-api-console.md @@ -0,0 +1,53 @@ +--- +aside: false +pageClass: torii-api-console-page +--- + +# Torii API Console + +Use the live OpenAPI document from a running Torii endpoint to inspect routes, +send test requests, copy curl commands, and generate client code. + + + +## Requirements + +- The Torii endpoint must expose `/openapi.json`. +- Browser testing requires CORS to allow this docs origin. +- The browser must be able to reach the endpoint directly. +- Code generation requires Node.js, pnpm, and a Java runtime for OpenAPI + Generator. + +The console defaults to `https://taira.sora.org`. Local development usually +works with `http://127.0.0.1:8080` when you run Torii on your machine. + +## Try Taira First + +Before generating a client, check that the public OpenAPI document is reachable +from your machine: + +```bash +curl -fsS https://taira.sora.org/openapi.json -o /tmp/taira-openapi.json +jq '{title: .info.title, version: .info.version, paths: (.paths | length)}' \ + /tmp/taira-openapi.json +``` + +Then paste `https://taira.sora.org/openapi.json` into the console and try a +read-only route such as `GET /status`, `GET /v1/domains`, or +`GET /v1/assets/definitions`. Save signed transaction and private-key flows for +an SDK or CLI client that loads secrets from your runtime environment. + +## Generated Clients + +The generator command uses the same live OpenAPI document that the console +loads. This is useful for JSON operator, explorer, app, and telemetry routes. + +For signed ledger transactions, signed queries, and Norito-native payloads, +prefer the official Iroha SDKs. OpenAPI clients do not assemble signatures, +manage account keys, or encode Norito transaction bodies for you. + +To inspect every generator supported by OpenAPI Generator, run: + +```bash +pnpm dlx @openapitools/openapi-generator-cli list +``` diff --git a/src/reference/torii-endpoints.md b/src/reference/torii-endpoints.md index ed40ee73d..44766ed0f 100644 --- a/src/reference/torii-endpoints.md +++ b/src/reference/torii-endpoints.md @@ -38,6 +38,43 @@ Norito RPC guidance, see the [Norito reference](/reference/norito.md). `/openapi` is the authoritative endpoint list for a running node. The exact surface depends on build features and runtime configuration, so generated clients should prefer the live OpenAPI document over a hand-copied route list. +Use the [Torii API console](/reference/torii-api-console.md) to load that live +document, test JSON routes, copy curl requests, and generate client code from +the current schema. + +## Try Live Taira Routes + +The public Taira testnet exposes the same Torii JSON surface that application +clients use for read-only exploration. These commands do not require keys: + +```bash +TAIRA_ROOT=https://taira.sora.org + +curl -fsS "$TAIRA_ROOT/status" \ + | jq '{blocks, txs_approved, txs_rejected, queue_size, peers}' + +curl -fsS "$TAIRA_ROOT/openapi.json" \ + | jq -r '.paths | keys[]' \ + | grep '^/v1/' \ + | head -n 20 + +curl -fsS "$TAIRA_ROOT/v1/node/capabilities" \ + | jq '{abi_version, data_model_version, query: .query.aggregate.supported_resources}' +``` + +Try resource reads against the current world state: + +```bash +curl -fsS "$TAIRA_ROOT/v1/domains?limit=5" \ + | jq -r '.items[].id' + +curl -fsS "$TAIRA_ROOT/v1/assets/definitions?limit=5" \ + | jq -r '.items[] | [.id, .name, .total_quantity] | @tsv' +``` + +If a public testnet route returns `502`, times out, or reports a saturated +queue, treat it as an endpoint availability issue and retry later before +debugging your client code. ## Consensus and Runtime Endpoints @@ -103,6 +140,62 @@ families are not all enabled on every network profile. | `/v1/offline/*`, `/v1/repo/*`, `/v1/space-directory/*`, `/v1/ram-lfe/*` | Offline readiness, repository agreements, dataspace manifests, and RAM LFE helpers | | `/v1/kaigi/*`, `/v1/webhooks/*`, `/v1/notify/*`, `/v1/telemetry/*` | Collaboration, webhook, push notification, and live telemetry integrations | +## ISO 20022 Bridge + +Torii exposes the ISO 20022 bridge under `/v1/iso20022/*` when the app-facing +API and bridge runtime are enabled. The bridge is intentionally scoped: it is +not a general-purpose ISO 20022 clearing gateway, but a supported subset for +turning selected payment messages into signed Iroha transfers and for tracking +their ledger status. + +### Torii Ingestion Endpoints + +| ISO 20022 message | Endpoint | Purpose | +| --- | --- | --- | +| `pacs.008.001.08` (`pacs.008`) | `POST /v1/iso20022/pacs008` | Submit an FI-to-FI customer credit transfer and build the matching Iroha asset transfer | +| `pacs.009.001.10` (`pacs.009`) | `POST /v1/iso20022/pacs009` | Submit an FI-to-FI credit transfer used for PvP or securities-related cash funding | +| `pacs.002`-style status | `GET /v1/iso20022/status/{msg_id}` | Read the bridge state for a submitted message, including the derived `pacs002_code`, transaction hash, rejection detail, and resolved ledger context | + +`pacs.008` submissions must provide the message ID, interbank settlement +amount, currency, settlement date, debtor and creditor IBANs, and debtor and +creditor BICs. When reference data is configured, the bridge also checks the +BIC, IBAN, and ISO 4217 currency crosswalks before the generated transaction +enters the pipeline. + +`pacs.009` submissions must provide the business message ID, message definition +ID, creation time, interbank settlement amount, currency, settlement date, +instructing and instructed agent BICs, and debtor and creditor IBANs. If the +message includes `Purp`, the bridge currently accepts securities-purpose funding +only: `Purp=SECU`. + +Both ingestion endpoints accept XML ISO envelopes or the flat field format used +by the bridge tests. Optional `SplmtryData` fields can pin the target Iroha +ledger, source and target account IDs or addresses, and asset definition ID. +The response is `202 Accepted` with `message_id`, `transaction_hash`, `status`, +`pacs002_code`, and the resolved ledger/account/asset context. + +### Parser and Mapping Support + +The IVM ISO helper also validates and materializes additional message families +used by bridge tests, settlement mapping, or downstream reconciliation. These +messages do not have standalone Torii ingestion endpoints unless listed above. + +| Message family | Current support | +| --- | --- | +| `head.001` | Business application header validation for ISO envelopes, including `BizMsgIdr`, `MsgDefIdr`, creation time, and optional sender/receiver BIC fields | +| `pacs.002` | Payment status report parsing and status-code vocabulary used by `GET /v1/iso20022/status/{msg_id}` | +| `pacs.004` | Payment return parsing for return/unwind flows | +| `pacs.007`, `pacs.028`, `pacs.029` | Payment reversal, status request, and resolution/status scaffolding for investigation flows | +| `pain.001`, `pain.002` | Customer payment initiation and payment status report validation scaffolding | +| `camt.052`, `camt.053`, `camt.054`, `camt.056` | Account report, statement, notification, and cancellation-request validation scaffolding | +| `sese.023`, `sese.025` | Securities settlement instruction and confirmation mapping for DvP/PvP flows | +| `colr.007` | Collateral substitution confirmation mapping | + +Settlement choreography may refer to related market messages such as +`sese.024`, `sese.030`, `sese.031`, `colr.010`, `colr.011`, `colr.012`, or +`camt.029`. Treat those as integration-level workflow references until a Torii +endpoint or IVM schema is added for the specific message. + ## Kaigi Sessions Kaigi provides paid, real-time audio/video rooms on SORA Nexus. Use it when @@ -174,8 +267,9 @@ desktop demo for an end-to-end wallet test. The demo is an Electron and Vue application that talks directly to Torii through the local `@iroha/iroha-js` binding and includes a `/kaigi` route for browser-native one-to-one media. -Prepare the demo beside a checkout of the Iroha source tree, because its -`@iroha/iroha-js` dependency is loaded from `../iroha/javascript/iroha_js`: +Use the demo with +[`@iroha/iroha-js`](https://github.com/hyperledger-iroha/iroha/tree/i23-features/javascript/iroha_js) +from the Iroha `i23-features` branch: ```bash git clone https://github.com/soramitsu/iroha-demo-javascript.git @@ -315,5 +409,7 @@ iroha --config ./localnet/client.toml ops sumeragi collectors ## Upstream References -- [README.md API and Observability](https://github.com/hyperledger-iroha/iroha/blob/main/README.md) -- [docs/source/telemetry.md](https://github.com/hyperledger-iroha/iroha/blob/main/docs/source/telemetry.md) +- [README.md API and Observability](https://github.com/hyperledger-iroha/iroha/blob/i23-features/README.md) +- [docs/source/telemetry.md](https://github.com/hyperledger-iroha/iroha/blob/i23-features/docs/source/telemetry.md) +- [ISO 20022 bridge implementation](https://github.com/hyperledger-iroha/iroha/blob/i23-features/crates/iroha_torii/src/iso20022_bridge.rs) +- [Settlement ISO mapping](https://github.com/hyperledger-iroha/iroha/blob/i23-features/docs/portal/docs/finance/settlement-iso-mapping.md)