diff --git a/.changeset/warm-eagles-count.md b/.changeset/warm-eagles-count.md
new file mode 100644
index 00000000000..df5473a9fc4
--- /dev/null
+++ b/.changeset/warm-eagles-count.md
@@ -0,0 +1,5 @@
+---
+'@iota/dapp-kit': patch
+---
+
+Sync typography styling in both dark and light themes
diff --git a/.github/workflows/_move_tests.yml b/.github/workflows/_move_tests.yml
index a98c7ad8f3f..c7d4b59e5b3 100644
--- a/.github/workflows/_move_tests.yml
+++ b/.github/workflows/_move_tests.yml
@@ -17,7 +17,6 @@ jobs:
# to Move code but not Rust code (If there are Rust changes, they
# will be run as part of a larger test suite).
move-test:
- timeout-minutes: 10
runs-on: [self-hosted]
steps:
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
@@ -25,5 +24,24 @@ jobs:
with:
tool: nextest
- name: Run move tests
- run: |
- cargo nextest run -p iota-framework-tests -- unit_tests::
+ run: >
+ cargo nextest run --profile ci -E
+ 'package(iota-framework-tests)
+ or (package(iota-core) and test(quorum_driver::))
+ or package(iota-benchmark)
+ or test(move_tests::)'
+
+ move-simtest:
+ runs-on: [self-hosted]
+ steps:
+ - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
+ - uses: taiki-e/install-action@375e0c7f08a66b8c2ba7e7eef31a6f91043a81b0 # v2.44.38
+ with:
+ tool: nextest
+ - name: Run move tests
+ run: >
+ scripts/simtest/cargo-simtest simtest --profile ci -E
+ 'package(iota-framework-tests)
+ or (package(iota-core) and test(quorum_driver::))
+ or package(iota-benchmark)
+ or test(move_tests::)'
diff --git a/.github/workflows/apps_wallet_prod_build.yml b/.github/workflows/apps_wallet_prod_build.yml
index f4714cd53e1..9a8052331d8 100644
--- a/.github/workflows/apps_wallet_prod_build.yml
+++ b/.github/workflows/apps_wallet_prod_build.yml
@@ -20,6 +20,11 @@ jobs:
steps:
- name: Checking out the repository
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0
+ with:
+ # Number of commits to fetch. 0 indicates all history for all branches and tags. Default: 1
+ fetch-depth: 0
+ # Whether to fetch tags, even if fetch-depth > 0.
+ fetch-tags: "true"
- uses: pnpm/action-setup@fe02b34f77f8bc703788d5817da081398fad5dd2 # v4.0.0
- name: Install Nodejs
uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2
@@ -61,8 +66,9 @@ jobs:
echo "No previous tag found. Skipping changelog generation."
echo "changelog=No previous tag found. Changelog generation skipped." >> $GITHUB_OUTPUT
else
- echo "## Changelog" >> CHANGELOG.md
- git log ${{ env.PREV_TAG }}..${{ env.CURRENT_TAG }} --pretty=format:"- %s in #%h" -- ./apps/wallet > CHANGELOG.md
+ echo "## Changelog" > CHANGELOG.md
+ git log ${{ env.PREV_TAG }}..${{ env.CURRENT_TAG }} --pretty=format:"- %s in #%h" -- ./apps/wallet >> CHANGELOG.md
+ cat CHANGELOG.md
fi
- name: Get version from tag
diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml
index 3e02cfc233f..e35ec912be6 100644
--- a/.github/workflows/nightly.yml
+++ b/.github/workflows/nightly.yml
@@ -81,9 +81,6 @@ jobs:
external-tests:
uses: ./.github/workflows/_external_rust_tests.yml
- move-tests:
- uses: ./.github/workflows/_move_tests.yml
-
deny:
uses: ./.github/workflows/_cargo_deny.yml
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 2b5e2c8a2b8..b989c1cd964 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -4,7 +4,7 @@ Thanks for considering making a contribution to the IOTA network or its document
## Contribute to IOTA
-See [IOTA Environment Setup](https://github.com/iotaledger/iota/blob/main/docs/content/guides/developer/getting-started/iota-environment.mdx) for approach to submitting code fixes and enhancements.
+See [IOTA Environment Setup](https://github.com/iotaledger/iota/blob/main/docs/content/developer/getting-started/iota-environment.mdx) for approach to submitting code fixes and enhancements.
Found a bug or security vulnerability? Create a [GitHub issue](https://github.com/iotaledger/iota/issues/new/choose).
diff --git a/apps/core/src/constants/coins.constants.ts b/apps/core/src/constants/coins.constants.ts
index 11fba0c12a0..b0e38e586cd 100644
--- a/apps/core/src/constants/coins.constants.ts
+++ b/apps/core/src/constants/coins.constants.ts
@@ -3,3 +3,4 @@
export const COINS_QUERY_REFETCH_INTERVAL = 20_000;
export const COINS_QUERY_STALE_TIME = 20_000;
+export const COIN_TYPE = '0x2::coin::Coin';
diff --git a/apps/explorer/src/components/top-validators-card/TopValidatorsCard.tsx b/apps/explorer/src/components/top-validators-card/TopValidatorsCard.tsx
index 2f4f653a9cc..bd777c19e41 100644
--- a/apps/explorer/src/components/top-validators-card/TopValidatorsCard.tsx
+++ b/apps/explorer/src/components/top-validators-card/TopValidatorsCard.tsx
@@ -3,9 +3,18 @@
// SPDX-License-Identifier: Apache-2.0
import { useIotaClientQuery } from '@iota/dapp-kit';
-import { PlaceholderTable, TableCard } from '~/components/ui';
+import { Link, PlaceholderTable, TableCard } from '~/components/ui';
import { generateValidatorsTableColumns } from '~/lib/ui';
-import { InfoBox, InfoBoxStyle, InfoBoxType, Panel, Title } from '@iota/apps-ui-kit';
+import {
+ Button,
+ ButtonSize,
+ ButtonType,
+ InfoBox,
+ InfoBoxStyle,
+ InfoBoxType,
+ Panel,
+ Title,
+} from '@iota/apps-ui-kit';
import { ErrorBoundary } from '../error-boundary/ErrorBoundary';
import { Warning } from '@iota/ui-icons';
@@ -19,6 +28,9 @@ type TopValidatorsCardProps = {
export function TopValidatorsCard({ limit, showIcon }: TopValidatorsCardProps): JSX.Element {
const { data, isPending, isSuccess, isError } = useIotaClientQuery('getLatestIotaSystemState');
+ const topActiveValidators =
+ data?.activeValidators.slice(0, limit || NUMBER_OF_VALIDATORS) ?? [];
+
const tableColumns = generateValidatorsTableColumns({
atRiskValidators: [],
validatorEvents: [],
@@ -42,26 +54,33 @@ export function TopValidatorsCard({ limit, showIcon }: TopValidatorsCardProps):
return (
-
-
-
- {isPending && (
-
- )}
+
+
+
+
+
+
+
- {isSuccess && (
-
-
+ {isPending && (
+
-
- )}
+ )}
+
+ {isSuccess && (
+
+
+
+ )}
+
);
diff --git a/apps/wallet-dashboard/app/(protected)/assets/page.tsx b/apps/wallet-dashboard/app/(protected)/assets/page.tsx
index 7007cc86c5c..9af8669a717 100644
--- a/apps/wallet-dashboard/app/(protected)/assets/page.tsx
+++ b/apps/wallet-dashboard/app/(protected)/assets/page.tsx
@@ -4,7 +4,7 @@
'use client';
import { Panel, Title, Chip, TitleSize } from '@iota/apps-ui-kit';
-import { hasDisplayData, useGetOwnedObjects } from '@iota/core';
+import { COIN_TYPE, hasDisplayData, useGetOwnedObjects } from '@iota/core';
import { useCurrentAccount } from '@iota/dapp-kit';
import { IotaObjectData } from '@iota/iota-sdk/client';
import { useState } from 'react';
@@ -31,25 +31,28 @@ export default function AssetsDashboardPage(): React.JSX.Element {
const account = useCurrentAccount();
const { data, isFetching, fetchNextPage, hasNextPage, refetch } = useGetOwnedObjects(
account?.address,
- undefined,
+ {
+ MatchNone: [{ StructType: COIN_TYPE }],
+ },
OBJECTS_PER_REQ,
);
- const assets: IotaObjectData[] = [];
-
- for (const page of data?.pages || []) {
- for (const asset of page.data) {
- if (asset.data && asset.data.objectId) {
- if (selectedCategory == AssetCategory.Visual) {
- if (hasDisplayData(asset)) {
- assets.push(asset.data);
- }
- } else if (selectedCategory == AssetCategory.Other) {
- assets.push(asset.data);
- }
+ const assets = (data?.pages || [])
+ .flatMap((page) => page.data)
+ .filter((asset) => {
+ if (!asset.data || !asset.data.objectId) {
+ return false;
}
- }
- }
+ if (selectedCategory === AssetCategory.Visual) {
+ return hasDisplayData(asset);
+ }
+ if (selectedCategory === AssetCategory.Other) {
+ return !hasDisplayData(asset);
+ }
+ return false;
+ })
+ .map((asset) => asset.data)
+ .filter((data): data is IotaObjectData => data !== null && data !== undefined);
function onAssetClick(asset: IotaObjectData) {
setSelectedAsset(asset);
diff --git a/apps/wallet-dashboard/app/(protected)/migrations/page.tsx b/apps/wallet-dashboard/app/(protected)/migrations/page.tsx
index b042465c49e..e2cd767e663 100644
--- a/apps/wallet-dashboard/app/(protected)/migrations/page.tsx
+++ b/apps/wallet-dashboard/app/(protected)/migrations/page.tsx
@@ -25,12 +25,14 @@ import { STARDUST_BASIC_OUTPUT_TYPE, STARDUST_NFT_OUTPUT_TYPE, useFormatCoin } f
import { IOTA_TYPE_ARG } from '@iota/iota-sdk/utils';
import { StardustOutputMigrationStatus } from '@/lib/enums';
import { MigrationObjectsPanel, MigrationDialog } from '@/components';
+import { useRouter } from 'next/navigation';
function MigrationDashboardPage(): JSX.Element {
const account = useCurrentAccount();
const address = account?.address || '';
const queryClient = useQueryClient();
const iotaClient = useIotaClient();
+ const router = useRouter();
const [isMigrationDialogOpen, setIsMigrationDialogOpen] = useState(false);
const [selectedStardustObjectsCategory, setSelectedStardustObjectsCategory] = useState<
StardustOutputMigrationStatus | undefined
@@ -139,6 +141,11 @@ function MigrationDashboardPage(): JSX.Element {
setSelectedStardustObjectsCategory(undefined);
}
+ function handleMigrationDialogClose() {
+ setIsMigrationDialogOpen(false);
+ router.push('/');
+ }
+
return (
)}
diff --git a/apps/wallet-dashboard/components/Dialogs/MigrationDialog.tsx b/apps/wallet-dashboard/components/Dialogs/MigrationDialog.tsx
deleted file mode 100644
index f1f661c2db4..00000000000
--- a/apps/wallet-dashboard/components/Dialogs/MigrationDialog.tsx
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright (c) 2024 IOTA Stiftung
-// SPDX-License-Identifier: Apache-2.0
-
-import React from 'react';
-import { VirtualList } from '@/components';
-import { useCurrentAccount, useSignAndExecuteTransaction } from '@iota/dapp-kit';
-import { IotaObjectData } from '@iota/iota-sdk/client';
-import { useMigrationTransaction } from '@/hooks/useMigrationTransaction';
-import {
- Button,
- Dialog,
- Header,
- InfoBox,
- InfoBoxStyle,
- InfoBoxType,
- KeyValueInfo,
- LoadingIndicator,
- Panel,
- Title,
- TitleSize,
-} from '@iota/apps-ui-kit';
-import { useGroupedMigrationObjectsByExpirationDate } from '@/hooks';
-import { Loader, Warning } from '@iota/ui-icons';
-import { DialogLayout, DialogLayoutBody, DialogLayoutFooter } from './layout';
-import { MigrationObjectDetailsCard } from '../migration/migration-object-details-card';
-import { Collapsible, useFormatCoin } from '@iota/core';
-import { IOTA_TYPE_ARG } from '@iota/iota-sdk/utils';
-import { summarizeMigratableObjectValues } from '@/lib/utils';
-import toast from 'react-hot-toast';
-
-interface MigrationDialogProps {
- basicOutputObjects: IotaObjectData[] | undefined;
- nftOutputObjects: IotaObjectData[] | undefined;
- onSuccess: (digest: string) => void;
- setOpen: (bool: boolean) => void;
- open: boolean;
- isTimelocked: boolean;
-}
-
-export function MigrationDialog({
- basicOutputObjects = [],
- nftOutputObjects = [],
- onSuccess,
- open,
- setOpen,
- isTimelocked,
-}: MigrationDialogProps): JSX.Element {
- const account = useCurrentAccount();
- const {
- data: migrateData,
- isPending: isMigrationPending,
- isError: isMigrationError,
- } = useMigrationTransaction(account?.address || '', basicOutputObjects, nftOutputObjects);
-
- const {
- data: resolvedObjects = [],
- isLoading,
- error: isGroupedMigrationError,
- } = useGroupedMigrationObjectsByExpirationDate(
- [...basicOutputObjects, ...nftOutputObjects],
- isTimelocked,
- );
-
- const { mutateAsync: signAndExecuteTransaction, isPending: isSendingTransaction } =
- useSignAndExecuteTransaction();
- const { totalNotOwnedStorageDepositReturnAmount } = summarizeMigratableObjectValues({
- basicOutputs: basicOutputObjects,
- nftOutputs: nftOutputObjects,
- address: account?.address || '',
- });
-
- const [gasFee, gasFeeSymbol] = useFormatCoin(migrateData?.gasBudget, IOTA_TYPE_ARG);
- const [totalStorageDepositReturnAmountFormatted, totalStorageDepositReturnAmountSymbol] =
- useFormatCoin(totalNotOwnedStorageDepositReturnAmount.toString(), IOTA_TYPE_ARG);
-
- async function handleMigrate(): Promise {
- if (!migrateData) return;
- signAndExecuteTransaction(
- {
- transaction: migrateData.transaction,
- },
- {
- onSuccess: (tx) => {
- onSuccess(tx.digest);
- },
- },
- )
- .then(() => {
- toast.success('Migration transaction has been sent');
- })
- .catch(() => {
- toast.error('Migration transaction was not sent');
- });
- }
-
- return (
-
- );
-}
diff --git a/apps/wallet-dashboard/components/Dialogs/index.ts b/apps/wallet-dashboard/components/Dialogs/index.ts
index db380d17927..1475376b5ec 100644
--- a/apps/wallet-dashboard/components/Dialogs/index.ts
+++ b/apps/wallet-dashboard/components/Dialogs/index.ts
@@ -7,4 +7,4 @@ export * from './Staking';
export * from './unstake';
export * from './vesting';
export * from './settings';
-export * from './MigrationDialog';
+export * from './migration';
diff --git a/apps/wallet-dashboard/components/Dialogs/migration/MigrationDialog.tsx b/apps/wallet-dashboard/components/Dialogs/migration/MigrationDialog.tsx
new file mode 100644
index 00000000000..3fd91b0eb35
--- /dev/null
+++ b/apps/wallet-dashboard/components/Dialogs/migration/MigrationDialog.tsx
@@ -0,0 +1,88 @@
+// Copyright (c) 2024 IOTA Stiftung
+// SPDX-License-Identifier: Apache-2.0
+
+import React, { useState } from 'react';
+import { useCurrentAccount, useSignAndExecuteTransaction } from '@iota/dapp-kit';
+import { IotaObjectData } from '@iota/iota-sdk/client';
+import { useMigrationTransaction } from '@/hooks/useMigrationTransaction';
+import { Dialog } from '@iota/apps-ui-kit';
+import toast from 'react-hot-toast';
+import { TransactionDialogView } from '../TransactionDialog';
+import { MigrationDialogView } from './enums';
+import { ConfirmMigrationView } from './views';
+
+interface MigrationDialogProps {
+ handleClose: () => void;
+ basicOutputObjects: IotaObjectData[] | undefined;
+ nftOutputObjects: IotaObjectData[] | undefined;
+ onSuccess: (digest: string) => void;
+ setOpen: (bool: boolean) => void;
+ open: boolean;
+ isTimelocked: boolean;
+}
+
+export function MigrationDialog({
+ handleClose,
+ basicOutputObjects = [],
+ nftOutputObjects = [],
+ onSuccess,
+ open,
+ setOpen,
+ isTimelocked,
+}: MigrationDialogProps): JSX.Element {
+ const account = useCurrentAccount();
+ const [txDigest, setTxDigest] = useState('');
+ const [view, setView] = useState(MigrationDialogView.Confirmation);
+
+ const {
+ data: migrateData,
+ isPending: isMigrationPending,
+ isError: isMigrationError,
+ } = useMigrationTransaction(account?.address || '', basicOutputObjects, nftOutputObjects);
+
+ const { mutateAsync: signAndExecuteTransaction, isPending: isSendingTransaction } =
+ useSignAndExecuteTransaction();
+
+ async function handleMigrate(): Promise {
+ if (!migrateData) return;
+ signAndExecuteTransaction(
+ {
+ transaction: migrateData.transaction,
+ },
+ {
+ onSuccess: (tx) => {
+ onSuccess(tx.digest);
+ setTxDigest(tx.digest);
+ setView(MigrationDialogView.TransactionDetails);
+ },
+ },
+ )
+ .then(() => {
+ toast.success('Migration transaction has been sent');
+ })
+ .catch(() => {
+ toast.error('Migration transaction was not sent');
+ });
+ }
+
+ return (
+
+ );
+}
diff --git a/apps/wallet-dashboard/components/Dialogs/migration/enums/index.ts b/apps/wallet-dashboard/components/Dialogs/migration/enums/index.ts
new file mode 100644
index 00000000000..6f408e39b8c
--- /dev/null
+++ b/apps/wallet-dashboard/components/Dialogs/migration/enums/index.ts
@@ -0,0 +1,4 @@
+// Copyright (c) 2024 IOTA Stiftung
+// SPDX-License-Identifier: Apache-2.0
+
+export * from './view.enums';
diff --git a/apps/wallet-dashboard/components/Dialogs/migration/enums/view.enums.ts b/apps/wallet-dashboard/components/Dialogs/migration/enums/view.enums.ts
new file mode 100644
index 00000000000..5b16d31b836
--- /dev/null
+++ b/apps/wallet-dashboard/components/Dialogs/migration/enums/view.enums.ts
@@ -0,0 +1,7 @@
+// Copyright (c) 2024 IOTA Stiftung
+// SPDX-License-Identifier: Apache-2.0
+
+export enum MigrationDialogView {
+ Confirmation = 'Confirmation',
+ TransactionDetails = 'TransactionDetails',
+}
diff --git a/apps/wallet-dashboard/components/Dialogs/migration/index.ts b/apps/wallet-dashboard/components/Dialogs/migration/index.ts
new file mode 100644
index 00000000000..41dd3ff2b30
--- /dev/null
+++ b/apps/wallet-dashboard/components/Dialogs/migration/index.ts
@@ -0,0 +1,6 @@
+// Copyright (c) 2024 IOTA Stiftung
+// SPDX-License-Identifier: Apache-2.0
+
+export * from './MigrationDialog';
+
+export * from './views';
diff --git a/apps/wallet-dashboard/components/Dialogs/migration/views/ConfirmMigrationView.tsx b/apps/wallet-dashboard/components/Dialogs/migration/views/ConfirmMigrationView.tsx
new file mode 100644
index 00000000000..7066a922187
--- /dev/null
+++ b/apps/wallet-dashboard/components/Dialogs/migration/views/ConfirmMigrationView.tsx
@@ -0,0 +1,169 @@
+// Copyright (c) 2024 IOTA Stiftung
+// SPDX-License-Identifier: Apache-2.0
+
+import React from 'react';
+import { MigrationObjectLoading, VirtualList } from '@/components';
+import { useCurrentAccount } from '@iota/dapp-kit';
+import { IotaObjectData } from '@iota/iota-sdk/client';
+import {
+ Button,
+ Header,
+ InfoBox,
+ InfoBoxStyle,
+ InfoBoxType,
+ KeyValueInfo,
+ Panel,
+ Skeleton,
+ Title,
+ TitleSize,
+} from '@iota/apps-ui-kit';
+import { useGroupedMigrationObjectsByExpirationDate } from '@/hooks';
+import { Loader, Warning } from '@iota/ui-icons';
+import { Collapsible, useFormatCoin } from '@iota/core';
+import { IOTA_TYPE_ARG } from '@iota/iota-sdk/utils';
+import { summarizeMigratableObjectValues } from '@/lib/utils';
+import { MigrationObjectDetailsCard } from '@/components/migration/migration-object-details-card';
+import { DialogLayout, DialogLayoutBody, DialogLayoutFooter } from '../../layout';
+import { Transaction } from '@iota/iota-sdk/transactions';
+
+interface ConfirmMigrationViewProps {
+ basicOutputObjects: IotaObjectData[] | undefined;
+ nftOutputObjects: IotaObjectData[] | undefined;
+ onSuccess: () => void;
+ setOpen: (bool: boolean) => void;
+ isTimelocked: boolean;
+ migrateData:
+ | {
+ transaction: Transaction;
+ gasBudget: string | number | null;
+ }
+ | undefined;
+ isMigrationPending: boolean;
+ isMigrationError: boolean;
+ isSendingTransaction: boolean;
+}
+
+export function ConfirmMigrationView({
+ basicOutputObjects = [],
+ nftOutputObjects = [],
+ onSuccess,
+ setOpen,
+ isTimelocked,
+ migrateData,
+ isMigrationPending,
+ isMigrationError,
+ isSendingTransaction,
+}: ConfirmMigrationViewProps): JSX.Element {
+ const account = useCurrentAccount();
+
+ const {
+ data: resolvedObjects = [],
+ isLoading,
+ error: isGroupedMigrationError,
+ } = useGroupedMigrationObjectsByExpirationDate(
+ [...basicOutputObjects, ...nftOutputObjects],
+ isTimelocked,
+ );
+
+ const { totalNotOwnedStorageDepositReturnAmount } = summarizeMigratableObjectValues({
+ basicOutputs: basicOutputObjects,
+ nftOutputs: nftOutputObjects,
+ address: account?.address || '',
+ });
+
+ const [gasFee, gasFeeSymbol] = useFormatCoin(migrateData?.gasBudget, IOTA_TYPE_ARG);
+ const [totalStorageDepositReturnAmountFormatted, totalStorageDepositReturnAmountSymbol] =
+ useFormatCoin(totalNotOwnedStorageDepositReturnAmount.toString(), IOTA_TYPE_ARG);
+
+ return (
+
+ setOpen(false)} titleCentered />
+
+
+ {isGroupedMigrationError && !isLoading && (
+
}
+ />
+ )}
+ {isLoading ? (
+ <>
+
+
+
+
+
+
+
+
+
+
+
+
+ >
+ ) : (
+ <>
+
(
+
+ )}
+ >
+
+ 58}
+ render={(migrationObject) => (
+
+ )}
+ />
+
+
+
+
+
+
+
+
+ >
+ )}
+
+
+
+
+ ) : null
+ }
+ iconAfterText
+ fullWidth
+ />
+
+
+ );
+}
diff --git a/apps/wallet-dashboard/components/Dialogs/migration/views/index.ts b/apps/wallet-dashboard/components/Dialogs/migration/views/index.ts
new file mode 100644
index 00000000000..b5a03528f1e
--- /dev/null
+++ b/apps/wallet-dashboard/components/Dialogs/migration/views/index.ts
@@ -0,0 +1,4 @@
+// Copyright (c) 2024 IOTA Stiftung
+// SPDX-License-Identifier: Apache-2.0
+
+export * from './ConfirmMigrationView';
diff --git a/apps/wallet-dashboard/components/migration/MigrationObjectLoading.tsx b/apps/wallet-dashboard/components/migration/MigrationObjectLoading.tsx
new file mode 100644
index 00000000000..297d1a8fa81
--- /dev/null
+++ b/apps/wallet-dashboard/components/migration/MigrationObjectLoading.tsx
@@ -0,0 +1,27 @@
+// Copyright (c) 2024 IOTA Stiftung
+// SPDX-License-Identifier: Apache-2.0
+
+import { Card, CardImage, ImageShape, Skeleton } from '@iota/apps-ui-kit';
+
+export function MigrationObjectLoading() {
+ return (
+
+ {new Array(10).fill(0).map((_, index) => (
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ))}
+
+ );
+}
diff --git a/apps/wallet-dashboard/components/migration/MigrationObjectsPanel.tsx b/apps/wallet-dashboard/components/migration/MigrationObjectsPanel.tsx
index 915cc56dda5..a3864b329cf 100644
--- a/apps/wallet-dashboard/components/migration/MigrationObjectsPanel.tsx
+++ b/apps/wallet-dashboard/components/migration/MigrationObjectsPanel.tsx
@@ -12,15 +12,11 @@ import { StardustOutputDetailsFilter } from '@/lib/enums';
import {
Button,
ButtonType,
- Card,
- CardImage,
Chip,
- ImageShape,
InfoBox,
InfoBoxStyle,
InfoBoxType,
Panel,
- Skeleton,
Title,
} from '@iota/apps-ui-kit';
import type { IotaObjectData } from '@iota/iota-sdk/client';
@@ -30,6 +26,7 @@ import { useState } from 'react';
import { MigrationObjectDetailsCard } from './migration-object-details-card';
import VirtualList from '../VirtualList';
import { filterMigrationObjects } from '@/lib/utils';
+import { MigrationObjectLoading } from './MigrationObjectLoading';
const FILTERS = {
migratable: STARDUST_MIGRATABLE_OBJECTS_FILTER_LIST,
@@ -83,7 +80,7 @@ export function MigrationObjectsPanel({
- {isLoading &&
}
+ {isLoading &&
}
{isErrored && !isLoading && (
);
}
-
-function LoadingPanel() {
- return (
-
- {new Array(10).fill(0).map((_, index) => (
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- ))}
-
- );
-}
diff --git a/apps/wallet-dashboard/components/migration/index.ts b/apps/wallet-dashboard/components/migration/index.ts
index cf43709989c..2690e18d11b 100644
--- a/apps/wallet-dashboard/components/migration/index.ts
+++ b/apps/wallet-dashboard/components/migration/index.ts
@@ -2,3 +2,4 @@
// SPDX-License-Identifier: Apache-2.0
export * from './MigrationObjectsPanel';
+export * from './MigrationObjectLoading';
diff --git a/apps/wallet-dashboard/components/tiles/AssetTileLink.tsx b/apps/wallet-dashboard/components/tiles/AssetTileLink.tsx
index 64c4d805513..6d5d25d23b7 100644
--- a/apps/wallet-dashboard/components/tiles/AssetTileLink.tsx
+++ b/apps/wallet-dashboard/components/tiles/AssetTileLink.tsx
@@ -25,7 +25,7 @@ export function AssetTileLink({ asset, type, onClick }: AssetTileLinkProps): Rea
{type === AssetCategory.Visual ? (
} onClick={handleClick} />
) : (
-
+
)}
>
);
diff --git a/apps/wallet-dashboard/components/tiles/NonVisualAssetTile.tsx b/apps/wallet-dashboard/components/tiles/NonVisualAssetTile.tsx
index 0758714510e..c73c519bcd9 100644
--- a/apps/wallet-dashboard/components/tiles/NonVisualAssetTile.tsx
+++ b/apps/wallet-dashboard/components/tiles/NonVisualAssetTile.tsx
@@ -5,21 +5,25 @@ import { Card, CardAction, CardActionType, CardBody, CardType } from '@iota/apps
import { IotaObjectData } from '@iota/iota-sdk/client';
import { formatAddress, parseStructTag } from '@iota/iota-sdk/utils';
import { ArrowTopRight } from '@iota/ui-icons';
+import { ExplorerLink } from '../ExplorerLink';
+import { ExplorerLinkType } from '@iota/core';
type NonVisualAssetCardProps = {
asset: IotaObjectData;
-} & Pick, 'onClick'>;
+} & React.ComponentProps;
-export function NonVisualAssetCard({ asset, onClick }: NonVisualAssetCardProps): React.JSX.Element {
+export function NonVisualAssetCard({ asset }: NonVisualAssetCardProps): React.JSX.Element {
const { address, module, name } = parseStructTag(asset.type!);
return (
-
-
- } />
-
+
+
+
+ } />
+
+
);
}
diff --git a/apps/wallet/src/ui/app/components/receipt-card/TxnAmount.tsx b/apps/wallet/src/ui/app/components/receipt-card/TxnAmount.tsx
index 85d4ff481e0..cb17eab7d23 100644
--- a/apps/wallet/src/ui/app/components/receipt-card/TxnAmount.tsx
+++ b/apps/wallet/src/ui/app/components/receipt-card/TxnAmount.tsx
@@ -28,7 +28,7 @@ export function TxnAmount({ amount, coinType, subtitle, approximation }: TxnAmou
return Number(amount) !== 0 ? (
-
+
[!NOTE]
+> BigQuery and Snowflake are cloud-based data warehousing solutions.
+> After getting data there one can analyse it in the cloud using SQL queries.
+>
+> BigQuery is part of Google Cloud Platform: [https://cloud.google.com/bigquery]
+>
+> Snowflake isn't part of any large cloud provider: [https://snowflake.com]
+
+## **Relation to iota-indexer**
+
+### iota-indexer
+
+Currently iota-indexer is computing and storing analytical metrics about:
+
+- network statistics (amount of transactions, transactions per second)
+- (active) addresses (transactions senders/recipients)
+- move calls
+
+Those metrics are computed by a separate analytical worker instance of the indexer, but it uses the main DB as the main indexer instance.
+
+It seems that some of the values stored in main indexer tables by iota-indexer's `fullnode_sync_worker` are only stored there for analytical purposes (move calls, tx recipients) and could potentially be excluded from further processing if it was not for analytical reasons.
+
+### iota-analytics-indexer
+
+The `iota-analytics-indexer` is not computing any analytical metrics directly.
+It is only exporting data for further processing via external tools (BigQuery/SnowFlake).
+
+On this premise, the functionality in `iota-indexer` that is currently used for extracting analytics (and thus unrelated to the JSON-RPC/GraphQL service) could be moved out and delegated to another tool that processes data exported by `iota-analytics-indexer`.
+Then the sync logic in `iota-indexer` could be simplified as well to store only data that is needed for the purposes of the RPC APIs.
+
+## **Schemas**
+
+The crate provides:
+
+- [BigQuery Schemas](src/store/bq/schemas/)
+- [SnowFlake Schemas](src/store/snowflake/schemas/)
+- [Rust struct representations](src/tables.rs)
+
+for the data that it is exporting.
+
+The tables covered by the schemas:
+
+- CHECKPOINT
+- EVENT
+- MOVE_CALL
+- OBJECT
+- MOVE_PACKAGE
+- TRANSACTION_OBJECT - input and output objects for given transactions
+- TRANSACTION
+
+> [!NOTE]
+> The following rust structs currently do not have DB schemas prepared:
+>
+> - DynamicFieldEntry
+> - WrappedObjectEntry
+
+## **Architecture**
+
+When running the indexer, one needs to specify object type that would be extracted from checkpoints and uploaded to the cloud.
+
+The following object types are supported:
+
+- Checkpoint
+- Object
+- Transaction
+- TransactionObjects
+- Event
+- MoveCall
+- MovePackage
+- DynamicField
+- WrappedObject
+
+Only one object type can be passed in given run, to process multiple object types it is needed to run multiple analytics indexer instances.
+
+In general, the data flow is as follows:
+
+- Checkpoints are read via JSON RPC using reused code from `iota_data_ingestion_core`.
+- Checkpoints are processed by an appropriate handler (e.g. `EventHandler`), which extracts relevant objects from each transaction of the checkpoint.
+- Objects are passed to the Writer, which writes the objects to a local temporary store in CSV or Parquet format.
+- The `AnalyticsProcessor` syncs the objects from the local store to the remote store (S3/GCS/Azure, or also local, for testing purposes).
+- Every 5 minutes the last processed checkpoint ID is fetched from BigQuery/Snowflake and reported as a metric.
+
+> [!NOTE]
+> It is assumed that data from the big object store will be readable from BigQuery/Snowflake automatically, the indexer is not putting the data in BigQuery/Snowflake tables explicitly.
+
+Here is a graph summarizing the data flow:
+
+```mermaid
+flowchart TD
+ FNODE["Fullnode/Indexer"] <-->|JSON RPC| CPREADER["`IndexerExecutor/CheckpointReader from the **iota_data_ingestion_core** package`"];
+ subgraph "`**iota-analytics-indexer**`"
+ CPREADER -->|"`Executor calls **AnalyticsProcessor** for each checkpoint, which in turn passes the checkpoint to appropriate Handler`"| HANDLER["CheckpointHandler/EventHandler etc., depending on indexer configuration"]
+ HANDLER -->|"`**AnalyticsProcessor** reads object data extracted from the checkpoint by the Handler and passes it to the Writer`"| WRITER["CSVWriter/ParquetWriter"]
+ WRITER -->|Writes objects to temporary local storage| DISK[Temporary Local Storage]
+ DISK --> REMOTESYNC["`Task inside of **AnalyticsProcessor** that removes files from Local Storage and uploads them to Remote Storage(S3/GCS/Azure)`"]
+ WRITER -->|"`Once every few checkpoints, **AnalyticsProcessor** calls cut() to prepare file to be sent, FileMetadata is sent to the Remote Sync Task which triggers the sync`"| REMOTESYNC
+ REMOTESYNC -->|Some process outside of analytics indexer makes the newly uploaded data available via BigQuery/Snowflake tables| BQSF["BigQuery/Snowflake"]
+ BQSF -->|"Every 5 minutes max processed checkpoint number is read from the tables"| METRICS[Analytics Indexer Prometheus Metrics]
+ end
+
+linkStyle 6 stroke:red,stroke-width:2px,stroke-dasharray:3;
+```
+
+## **Metrics**
+
+The following Prometheus metrics are served by `iota-analytics-indexer` to monitor the indexer execution:
+
+- **total_received**: count of checkpoints processed in given run
+- **last_uploaded_checkpoint**: id of last checkpoint uploaded to the big object store
+- **max_checkpoint_on_store**: id of last checkpoint available via BigQuery/Snowflake tables
diff --git a/crates/iota-core/src/unit_tests/authority_aggregator_tests.rs b/crates/iota-core/src/unit_tests/authority_aggregator_tests.rs
index f0ec7181006..ac734a40e8f 100644
--- a/crates/iota-core/src/unit_tests/authority_aggregator_tests.rs
+++ b/crates/iota-core/src/unit_tests/authority_aggregator_tests.rs
@@ -345,254 +345,262 @@ fn effects_with_tx(digest: TransactionDigest) -> TransactionEffects {
effects
}
-/// The intent of this is to test whether client side timeouts
-/// have any impact on the server execution. Turns out because
-/// we spawn a tokio task on the server, client timing out and
-/// terminating the connection does not stop server from completing
-/// execution on its side
-#[sim_test(config = "constant_latency_ms(1)")]
-async fn test_quorum_map_and_reduce_timeout() {
- let build_config = BuildConfig::new_for_testing();
- let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
- path.extend(["src", "unit_tests", "data", "object_basics"]);
- let client_ip = make_socket_addr();
- let modules: Vec<_> = build_config
- .build(&path)
- .unwrap()
- .get_modules()
- .cloned()
- .collect();
- let pkg = Object::new_package_for_testing(
- &modules,
- TransactionDigest::genesis_marker(),
- BuiltInFramework::genesis_move_packages(),
- )
- .unwrap();
- let (addr1, key1): (_, AccountKeyPair) = get_key_pair();
- let gas_object1 = Object::with_owner_for_testing(addr1);
- let genesis_objects = vec![pkg.clone(), gas_object1.clone()];
- let (mut authorities, _, genesis, _) = init_local_authorities(4, genesis_objects).await;
- let rgp = reference_gas_price(&authorities);
- let pkg = genesis.object(pkg.id()).unwrap();
- let gas_object1 = genesis.object(gas_object1.id()).unwrap();
- let gas_ref_1 = gas_object1.compute_object_reference();
- let tx = create_object_move_transaction(addr1, &key1, addr1, 100, pkg.id(), gas_ref_1, rgp);
- let certified_tx = authorities
- .process_transaction(tx.clone(), Some(client_ip))
- .await;
- assert!(certified_tx.is_ok());
- let certificate = certified_tx.unwrap().into_cert_for_testing();
- // Send request with a very small timeout to trigger timeout error
- authorities.timeouts.pre_quorum_timeout = Duration::from_nanos(0);
- authorities.timeouts.post_quorum_timeout = Duration::from_nanos(0);
- let request = HandleCertificateRequestV1 {
- certificate: certificate.clone(),
- include_events: true,
- include_input_objects: false,
- include_output_objects: false,
- include_auxiliary_data: false,
- };
- let certified_effects = authorities
- .process_certificate(request, Some(client_ip))
- .await;
- // Ensure it is an error
- assert!(certified_effects.is_err());
- assert!(matches!(
- certified_effects,
- Err(AggregatorProcessCertificateError::RetryableExecuteCertificate { .. })
- ));
- tokio::time::sleep(tokio::time::Duration::from_secs(10)).await;
- let tx_info = TransactionInfoRequest {
- transaction_digest: *tx.digest(),
- };
- for (_, client) in authorities.authority_clients.iter() {
- let resp = client
- .handle_transaction_info_request(tx_info.clone())
+mod move_tests {
+ use super::*;
+
+ /// The intent of this is to test whether client side timeouts
+ /// have any impact on the server execution. Turns out because
+ /// we spawn a tokio task on the server, client timing out and
+ /// terminating the connection does not stop server from completing
+ /// execution on its side
+ #[sim_test(config = "constant_latency_ms(1)")]
+ async fn test_quorum_map_and_reduce_timeout() {
+ let build_config = BuildConfig::new_for_testing();
+ let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
+ path.extend(["src", "unit_tests", "data", "object_basics"]);
+ let client_ip = make_socket_addr();
+ let modules: Vec<_> = build_config
+ .build(&path)
+ .unwrap()
+ .get_modules()
+ .cloned()
+ .collect();
+ let pkg = Object::new_package_for_testing(
+ &modules,
+ TransactionDigest::genesis_marker(),
+ BuiltInFramework::genesis_move_packages(),
+ )
+ .unwrap();
+ let (addr1, key1): (_, AccountKeyPair) = get_key_pair();
+ let gas_object1 = Object::with_owner_for_testing(addr1);
+ let genesis_objects = vec![pkg.clone(), gas_object1.clone()];
+ let (mut authorities, _, genesis, _) = init_local_authorities(4, genesis_objects).await;
+ let rgp = reference_gas_price(&authorities);
+ let pkg = genesis.object(pkg.id()).unwrap();
+ let gas_object1 = genesis.object(gas_object1.id()).unwrap();
+ let gas_ref_1 = gas_object1.compute_object_reference();
+ let tx = create_object_move_transaction(addr1, &key1, addr1, 100, pkg.id(), gas_ref_1, rgp);
+ let certified_tx = authorities
+ .process_transaction(tx.clone(), Some(client_ip))
.await;
- // Server should return a signed effect even though previous calls
- // failed due to timeout
- assert!(resp.is_ok());
- assert!(resp.unwrap().is_executed());
+ assert!(certified_tx.is_ok());
+ let certificate = certified_tx.unwrap().into_cert_for_testing();
+ // Send request with a very small timeout to trigger timeout error
+ authorities.timeouts.pre_quorum_timeout = Duration::from_nanos(0);
+ authorities.timeouts.post_quorum_timeout = Duration::from_nanos(0);
+ let request = HandleCertificateRequestV1 {
+ certificate: certificate.clone(),
+ include_events: true,
+ include_input_objects: false,
+ include_output_objects: false,
+ include_auxiliary_data: false,
+ };
+ let certified_effects = authorities
+ .process_certificate(request, Some(client_ip))
+ .await;
+ // Ensure it is an error
+ assert!(certified_effects.is_err());
+ assert!(matches!(
+ certified_effects,
+ Err(AggregatorProcessCertificateError::RetryableExecuteCertificate { .. })
+ ));
+ tokio::time::sleep(tokio::time::Duration::from_secs(10)).await;
+ let tx_info = TransactionInfoRequest {
+ transaction_digest: *tx.digest(),
+ };
+ for (_, client) in authorities.authority_clients.iter() {
+ let resp = client
+ .handle_transaction_info_request(tx_info.clone())
+ .await;
+ // Server should return a signed effect even though previous calls
+ // failed due to timeout
+ assert!(resp.is_ok());
+ assert!(resp.unwrap().is_executed());
+ }
}
-}
-#[sim_test]
-async fn test_map_reducer() {
- let (authorities, _, _, _) = init_local_authorities(4, vec![]).await;
-
- // Test: mapper errors do not get propagated up, reducer works
- let res = quorum_map_then_reduce_with_timeout::<_, _, _, _, _, (), _, _, _>(
- authorities.committee.clone(),
- authorities.authority_clients.clone(),
- 0usize,
- |_name, _client| {
- Box::pin(async move {
- let res: Result = Err(IotaError::TooManyIncorrectAuthorities {
- errors: vec![],
- action: "".to_string(),
- });
- res
- })
- },
- |mut accumulated_state, _authority_name, _authority_weight, result| {
- Box::pin(async move {
- assert!(matches!(
- result,
- Err(IotaError::TooManyIncorrectAuthorities { .. })
- ));
- accumulated_state += 1;
- ReduceOutput::Continue(accumulated_state)
- })
- },
- Duration::from_millis(1000),
- )
- .await
- .unwrap_err();
- assert_eq!(4, res);
-
- // Test: early end
- let res = quorum_map_then_reduce_with_timeout(
- authorities.committee.clone(),
- authorities.authority_clients.clone(),
- 0usize,
- |_name, _client| Box::pin(async move { Ok::<(), anyhow::Error>(()) }),
- |mut accumulated_state, _authority_name, _authority_weight, _result| {
- Box::pin(async move {
- if accumulated_state > 2 {
- ReduceOutput::Success(accumulated_state)
- } else {
+ #[sim_test]
+ async fn test_map_reducer() {
+ let (authorities, _, _, _) = init_local_authorities(4, vec![]).await;
+
+ // Test: mapper errors do not get propagated up, reducer works
+ let res = quorum_map_then_reduce_with_timeout::<_, _, _, _, _, (), _, _, _>(
+ authorities.committee.clone(),
+ authorities.authority_clients.clone(),
+ 0usize,
+ |_name, _client| {
+ Box::pin(async move {
+ let res: Result =
+ Err(IotaError::TooManyIncorrectAuthorities {
+ errors: vec![],
+ action: "".to_string(),
+ });
+ res
+ })
+ },
+ |mut accumulated_state, _authority_name, _authority_weight, result| {
+ Box::pin(async move {
+ assert!(matches!(
+ result,
+ Err(IotaError::TooManyIncorrectAuthorities { .. })
+ ));
accumulated_state += 1;
ReduceOutput::Continue(accumulated_state)
- }
- })
- },
- Duration::from_millis(1000),
- )
- .await
- .unwrap();
- assert_eq!(3, res.0);
-
- // Test: Global timeout works
- let res = quorum_map_then_reduce_with_timeout::<_, _, _, _, _, (), _, _, _>(
- authorities.committee.clone(),
- authorities.authority_clients.clone(),
- 0usize,
- |_name, _client| {
- Box::pin(async move {
- // 10 mins
- tokio::time::sleep(Duration::from_secs(10 * 60)).await;
- Ok::<(), anyhow::Error>(())
- })
- },
- |_accumulated_state, _authority_name, _authority_weight, _result| {
- Box::pin(async move { ReduceOutput::Continue(0) })
- },
- Duration::from_millis(10),
- )
- .await
- .unwrap_err();
- assert_eq!(0, res);
-
- // Test: Local timeout works
- let bad_auth = *authorities.committee.sample();
- let res = quorum_map_then_reduce_with_timeout::<_, _, _, _, _, (), _, _, _>(
- authorities.committee.clone(),
- authorities.authority_clients.clone(),
- HashSet::new(),
- |_name, _client| {
- Box::pin(async move {
- // 10 mins
- if _name == bad_auth {
+ })
+ },
+ Duration::from_millis(1000),
+ )
+ .await
+ .unwrap_err();
+ assert_eq!(4, res);
+
+ // Test: early end
+ let res = quorum_map_then_reduce_with_timeout(
+ authorities.committee.clone(),
+ authorities.authority_clients.clone(),
+ 0usize,
+ |_name, _client| Box::pin(async move { Ok::<(), anyhow::Error>(()) }),
+ |mut accumulated_state, _authority_name, _authority_weight, _result| {
+ Box::pin(async move {
+ if accumulated_state > 2 {
+ ReduceOutput::Success(accumulated_state)
+ } else {
+ accumulated_state += 1;
+ ReduceOutput::Continue(accumulated_state)
+ }
+ })
+ },
+ Duration::from_millis(1000),
+ )
+ .await
+ .unwrap();
+ assert_eq!(3, res.0);
+
+ // Test: Global timeout works
+ let res = quorum_map_then_reduce_with_timeout::<_, _, _, _, _, (), _, _, _>(
+ authorities.committee.clone(),
+ authorities.authority_clients.clone(),
+ 0usize,
+ |_name, _client| {
+ Box::pin(async move {
+ // 10 mins
tokio::time::sleep(Duration::from_secs(10 * 60)).await;
- }
- Ok::<(), anyhow::Error>(())
- })
- },
- |mut accumulated_state, authority_name, _authority_weight, _result| {
- Box::pin(async move {
- accumulated_state.insert(authority_name);
- if accumulated_state.len() <= 3 {
- ReduceOutput::Continue(accumulated_state)
- } else {
- ReduceOutput::ContinueWithTimeout(accumulated_state, Duration::from_millis(10))
- }
- })
- },
- // large delay
- Duration::from_millis(10 * 60),
- )
- .await;
- assert_eq!(res.as_ref().unwrap_err().len(), 3);
- assert!(!res.as_ref().unwrap_err().contains(&bad_auth));
-}
+ Ok::<(), anyhow::Error>(())
+ })
+ },
+ |_accumulated_state, _authority_name, _authority_weight, _result| {
+ Box::pin(async move { ReduceOutput::Continue(0) })
+ },
+ Duration::from_millis(10),
+ )
+ .await
+ .unwrap_err();
+ assert_eq!(0, res);
+
+ // Test: Local timeout works
+ let bad_auth = *authorities.committee.sample();
+ let res = quorum_map_then_reduce_with_timeout::<_, _, _, _, _, (), _, _, _>(
+ authorities.committee.clone(),
+ authorities.authority_clients.clone(),
+ HashSet::new(),
+ |_name, _client| {
+ Box::pin(async move {
+ // 10 mins
+ if _name == bad_auth {
+ tokio::time::sleep(Duration::from_secs(10 * 60)).await;
+ }
+ Ok::<(), anyhow::Error>(())
+ })
+ },
+ |mut accumulated_state, authority_name, _authority_weight, _result| {
+ Box::pin(async move {
+ accumulated_state.insert(authority_name);
+ if accumulated_state.len() <= 3 {
+ ReduceOutput::Continue(accumulated_state)
+ } else {
+ ReduceOutput::ContinueWithTimeout(
+ accumulated_state,
+ Duration::from_millis(10),
+ )
+ }
+ })
+ },
+ // large delay
+ Duration::from_millis(10 * 60),
+ )
+ .await;
+ assert_eq!(res.as_ref().unwrap_err().len(), 3);
+ assert!(!res.as_ref().unwrap_err().contains(&bad_auth));
+ }
-#[sim_test]
-async fn test_process_transaction_fault_success() {
- // This test exercises the 4 different possible failing case when one authority
- // is faulty. A transaction is sent to all authories, however one of them
- // will error out either before or after processing the transaction.
- // A cert should still be created, and sent out to all authorities again. This
- // time a different authority errors out either before or after processing
- // the cert.
- for i in 0..4 {
- let mut config_before_process_transaction = LocalAuthorityClientFaultConfig::default();
- if i % 2 == 0 {
- config_before_process_transaction.fail_before_handle_transaction = true;
- } else {
- config_before_process_transaction.fail_after_handle_transaction = true;
- }
- let mut config_before_process_certificate = LocalAuthorityClientFaultConfig::default();
- if i < 2 {
- config_before_process_certificate.fail_before_handle_confirmation = true;
- } else {
- config_before_process_certificate.fail_after_handle_confirmation = true;
+ #[sim_test]
+ async fn test_process_transaction_fault_success() {
+ // This test exercises the 4 different possible failing case when one authority
+ // is faulty. A transaction is sent to all authories, however one of them
+ // will error out either before or after processing the transaction.
+ // A cert should still be created, and sent out to all authorities again. This
+ // time a different authority errors out either before or after processing
+ // the cert.
+ for i in 0..4 {
+ let mut config_before_process_transaction = LocalAuthorityClientFaultConfig::default();
+ if i % 2 == 0 {
+ config_before_process_transaction.fail_before_handle_transaction = true;
+ } else {
+ config_before_process_transaction.fail_after_handle_transaction = true;
+ }
+ let mut config_before_process_certificate = LocalAuthorityClientFaultConfig::default();
+ if i < 2 {
+ config_before_process_certificate.fail_before_handle_confirmation = true;
+ } else {
+ config_before_process_certificate.fail_after_handle_confirmation = true;
+ }
+ assert!(
+ execute_transaction_with_fault_configs(
+ &[(0, config_before_process_transaction)],
+ &[(1, config_before_process_certificate)],
+ )
+ .await
+ );
}
+ }
+
+ #[sim_test]
+ async fn test_process_transaction_fault_fail() {
+ // This test exercises the cases when there are 2 authorities faulty,
+ // and hence no quorum could be formed. This is tested on the
+ // process_transaction phase.
+ let fail_before_process_transaction_config = LocalAuthorityClientFaultConfig {
+ fail_before_handle_transaction: true,
+ ..Default::default()
+ };
assert!(
- execute_transaction_with_fault_configs(&[(0, config_before_process_transaction)], &[(
- 1,
- config_before_process_certificate
- )],)
+ !execute_transaction_with_fault_configs(
+ &[
+ (0, fail_before_process_transaction_config),
+ (1, fail_before_process_transaction_config),
+ ],
+ &[],
+ )
.await
);
}
-}
-#[sim_test]
-async fn test_process_transaction_fault_fail() {
- // This test exercises the cases when there are 2 authorities faulty,
- // and hence no quorum could be formed. This is tested on the
- // process_transaction phase.
- let fail_before_process_transaction_config = LocalAuthorityClientFaultConfig {
- fail_before_handle_transaction: true,
- ..Default::default()
- };
- assert!(
- !execute_transaction_with_fault_configs(
- &[
- (0, fail_before_process_transaction_config),
- (1, fail_before_process_transaction_config),
- ],
- &[],
- )
- .await
- );
-}
-
-#[sim_test]
-async fn test_process_certificate_fault_fail() {
- // Similar to test_process_transaction_fault_fail but tested on the
- // process_certificate phase.
- let fail_before_process_certificate_config = LocalAuthorityClientFaultConfig {
- fail_before_handle_confirmation: true,
- ..Default::default()
- };
- assert!(
- !execute_transaction_with_fault_configs(&[], &[
- (0, fail_before_process_certificate_config),
- (1, fail_before_process_certificate_config),
- ],)
- .await
- );
+ #[sim_test]
+ async fn test_process_certificate_fault_fail() {
+ // Similar to test_process_transaction_fault_fail but tested on the
+ // process_certificate phase.
+ let fail_before_process_certificate_config = LocalAuthorityClientFaultConfig {
+ fail_before_handle_confirmation: true,
+ ..Default::default()
+ };
+ assert!(
+ !execute_transaction_with_fault_configs(&[], &[
+ (0, fail_before_process_certificate_config),
+ (1, fail_before_process_certificate_config),
+ ],)
+ .await
+ );
+ }
}
#[tokio::test(start_paused = true)]
diff --git a/crates/iota-core/src/unit_tests/execution_driver_tests.rs b/crates/iota-core/src/unit_tests/execution_driver_tests.rs
index 50363e411c1..e41d0583860 100644
--- a/crates/iota-core/src/unit_tests/execution_driver_tests.rs
+++ b/crates/iota-core/src/unit_tests/execution_driver_tests.rs
@@ -298,268 +298,300 @@ async fn execute_shared_on_first_three_authorities(
(cert, effects)
}
-#[tokio::test(flavor = "current_thread", start_paused = true)]
-async fn test_execution_with_dependencies() {
- telemetry_subscribers::init_for_testing();
-
- // ---- Initialize a network with three accounts, each with 10 gas objects.
-
- const NUM_ACCOUNTS: usize = 3;
- let accounts: Vec<(_, AccountKeyPair)> =
- (0..NUM_ACCOUNTS).map(|_| get_key_pair()).collect_vec();
-
- const NUM_GAS_OBJECTS_PER_ACCOUNT: usize = 10;
- let gas_objects = (0..NUM_ACCOUNTS)
- .map(|i| {
- (0..NUM_GAS_OBJECTS_PER_ACCOUNT)
- .map(|_| Object::with_owner_for_testing(accounts[i].0))
- .collect_vec()
- })
- .collect_vec();
- let all_gas_objects = gas_objects.clone().into_iter().flatten().collect_vec();
-
- let (aggregator, authorities, _genesis, package) =
- init_local_authorities(4, all_gas_objects.clone()).await;
- let authority_clients: Vec<_> = authorities
- .iter()
- .map(|a| aggregator.authority_clients[&a.name].clone())
- .collect();
- let rgp = authorities
- .first()
- .unwrap()
- .reference_gas_price_for_testing()
- .unwrap();
-
- // ---- Create an owned object and a shared counter.
-
- let mut executed_owned_certs = Vec::new();
- let mut executed_shared_certs = Vec::new();
-
- // Initialize an object owned by 1st account.
- let (addr1, key1): &(_, AccountKeyPair) = &accounts[0];
- let gas_ref = get_latest_ref(authority_clients[0].clone(), gas_objects[0][0].id()).await;
- let tx1 = create_object_move_transaction(*addr1, key1, *addr1, 100, package, gas_ref, rgp);
- let (cert, effects1) =
- execute_owned_on_first_three_authorities(&authority_clients, &aggregator.committee, &tx1)
- .await;
- executed_owned_certs.push(cert);
- let mut owned_object_ref = effects1.created()[0].0;
-
- // Initialize a shared counter, re-using gas_ref_0 so it has to execute after
- // tx1.
- let gas_ref = get_latest_ref(authority_clients[0].clone(), gas_objects[0][0].id()).await;
- let tx2 = TestTransactionBuilder::new(*addr1, gas_ref, rgp)
- .call_counter_create(package)
- .build_and_sign(key1);
- let (cert, effects2) =
- execute_owned_on_first_three_authorities(&authority_clients, &aggregator.committee, &tx2)
- .await;
- executed_owned_certs.push(cert);
- let (mut shared_counter_ref, owner) = effects2.created()[0];
- let shared_counter_initial_version = if let Owner::Shared {
- initial_shared_version,
- } = owner
- {
- // Because the gas object used has version 2, the initial lamport timestamp of
- // the shared counter is 3.
- assert_eq!(initial_shared_version.value(), 3);
- initial_shared_version
- } else {
- panic!("Not a shared object! {:?} {:?}", shared_counter_ref, owner);
- };
+mod move_tests {
+ use super::*;
+
+ #[tokio::test(flavor = "current_thread", start_paused = true)]
+ async fn test_execution_with_dependencies() {
+ telemetry_subscribers::init_for_testing();
+
+ // ---- Initialize a network with three accounts, each with 10 gas objects.
+
+ const NUM_ACCOUNTS: usize = 3;
+ let accounts: Vec<(_, AccountKeyPair)> =
+ (0..NUM_ACCOUNTS).map(|_| get_key_pair()).collect_vec();
+
+ const NUM_GAS_OBJECTS_PER_ACCOUNT: usize = 10;
+ let gas_objects = (0..NUM_ACCOUNTS)
+ .map(|i| {
+ (0..NUM_GAS_OBJECTS_PER_ACCOUNT)
+ .map(|_| Object::with_owner_for_testing(accounts[i].0))
+ .collect_vec()
+ })
+ .collect_vec();
+ let all_gas_objects = gas_objects.clone().into_iter().flatten().collect_vec();
+
+ let (aggregator, authorities, _genesis, package) =
+ init_local_authorities(4, all_gas_objects.clone()).await;
+ let authority_clients: Vec<_> = authorities
+ .iter()
+ .map(|a| aggregator.authority_clients[&a.name].clone())
+ .collect();
+ let rgp = authorities
+ .first()
+ .unwrap()
+ .reference_gas_price_for_testing()
+ .unwrap();
- // ---- Execute transactions with dependencies on first 3 nodes in the
- // dependency order.
+ // ---- Create an owned object and a shared counter.
- // In each iteration, creates an owned and a shared transaction that depends on
- // previous input and gas objects.
- for i in 0..100 {
- let source_index = i % NUM_ACCOUNTS;
- let (source_addr, source_key) = &accounts[source_index];
+ let mut executed_owned_certs = Vec::new();
+ let mut executed_shared_certs = Vec::new();
- let gas_ref = get_latest_ref(
- authority_clients[source_index].clone(),
- gas_objects[source_index][i * 3 % NUM_GAS_OBJECTS_PER_ACCOUNT].id(),
+ // Initialize an object owned by 1st account.
+ let (addr1, key1): &(_, AccountKeyPair) = &accounts[0];
+ let gas_ref = get_latest_ref(authority_clients[0].clone(), gas_objects[0][0].id()).await;
+ let tx1 = create_object_move_transaction(*addr1, key1, *addr1, 100, package, gas_ref, rgp);
+ let (cert, effects1) = execute_owned_on_first_three_authorities(
+ &authority_clients,
+ &aggregator.committee,
+ &tx1,
)
.await;
- let (dest_addr, _) = &accounts[(i + 1) % NUM_ACCOUNTS];
- let owned_tx = make_transfer_object_move_transaction(
- *source_addr,
- source_key,
- *dest_addr,
- owned_object_ref,
- package,
- gas_ref,
- TEST_ONLY_GAS_UNIT_FOR_HEAVY_COMPUTATION_STORAGE,
- rgp,
- );
- let (cert, effects) = execute_owned_on_first_three_authorities(
+ executed_owned_certs.push(cert);
+ let mut owned_object_ref = effects1.created()[0].0;
+
+ // Initialize a shared counter, re-using gas_ref_0 so it has to execute after
+ // tx1.
+ let gas_ref = get_latest_ref(authority_clients[0].clone(), gas_objects[0][0].id()).await;
+ let tx2 = TestTransactionBuilder::new(*addr1, gas_ref, rgp)
+ .call_counter_create(package)
+ .build_and_sign(key1);
+ let (cert, effects2) = execute_owned_on_first_three_authorities(
&authority_clients,
&aggregator.committee,
- &owned_tx,
+ &tx2,
)
.await;
executed_owned_certs.push(cert);
- owned_object_ref = effects.mutated_excluding_gas().first().unwrap().0;
+ let (mut shared_counter_ref, owner) = effects2.created()[0];
+ let shared_counter_initial_version = if let Owner::Shared {
+ initial_shared_version,
+ } = owner
+ {
+ // Because the gas object used has version 2, the initial lamport timestamp of
+ // the shared counter is 3.
+ assert_eq!(initial_shared_version.value(), 3);
+ initial_shared_version
+ } else {
+ panic!("Not a shared object! {:?} {:?}", shared_counter_ref, owner);
+ };
- let gas_ref = get_latest_ref(
- authority_clients[source_index].clone(),
- gas_objects[source_index][i * 7 % NUM_GAS_OBJECTS_PER_ACCOUNT].id(),
- )
- .await;
- let shared_tx = TestTransactionBuilder::new(*source_addr, gas_ref, rgp)
- .call_counter_increment(
+ // ---- Execute transactions with dependencies on first 3 nodes in the
+ // dependency order.
+
+ // In each iteration, creates an owned and a shared transaction that depends on
+ // previous input and gas objects.
+ for i in 0..100 {
+ let source_index = i % NUM_ACCOUNTS;
+ let (source_addr, source_key) = &accounts[source_index];
+
+ let gas_ref = get_latest_ref(
+ authority_clients[source_index].clone(),
+ gas_objects[source_index][i * 3 % NUM_GAS_OBJECTS_PER_ACCOUNT].id(),
+ )
+ .await;
+ let (dest_addr, _) = &accounts[(i + 1) % NUM_ACCOUNTS];
+ let owned_tx = make_transfer_object_move_transaction(
+ *source_addr,
+ source_key,
+ *dest_addr,
+ owned_object_ref,
package,
- shared_counter_ref.0,
- shared_counter_initial_version,
+ gas_ref,
+ TEST_ONLY_GAS_UNIT_FOR_HEAVY_COMPUTATION_STORAGE,
+ rgp,
+ );
+ let (cert, effects) = execute_owned_on_first_three_authorities(
+ &authority_clients,
+ &aggregator.committee,
+ &owned_tx,
)
- .build_and_sign(source_key);
- let (cert, effects) = execute_shared_on_first_three_authorities(
- &authority_clients,
- &aggregator.committee,
- &shared_tx,
- )
- .await;
- executed_shared_certs.push(cert);
- shared_counter_ref = effects.mutated_excluding_gas().first().unwrap().0;
- }
+ .await;
+ executed_owned_certs.push(cert);
+ owned_object_ref = effects.mutated_excluding_gas().first().unwrap().0;
- // ---- Execute transactions in reverse dependency order on the last authority.
+ let gas_ref = get_latest_ref(
+ authority_clients[source_index].clone(),
+ gas_objects[source_index][i * 7 % NUM_GAS_OBJECTS_PER_ACCOUNT].id(),
+ )
+ .await;
+ let shared_tx = TestTransactionBuilder::new(*source_addr, gas_ref, rgp)
+ .call_counter_increment(
+ package,
+ shared_counter_ref.0,
+ shared_counter_initial_version,
+ )
+ .build_and_sign(source_key);
+ let (cert, effects) = execute_shared_on_first_three_authorities(
+ &authority_clients,
+ &aggregator.committee,
+ &shared_tx,
+ )
+ .await;
+ executed_shared_certs.push(cert);
+ shared_counter_ref = effects.mutated_excluding_gas().first().unwrap().0;
+ }
- // Sets shared object locks in the executed order.
- for cert in executed_shared_certs.iter() {
- send_consensus_no_execution(&authorities[3], cert).await;
- }
+ // ---- Execute transactions in reverse dependency order on the last authority.
- // Enqueue certs out of dependency order for executions.
- for cert in executed_shared_certs.iter().rev() {
- authorities[3].enqueue_certificates_for_execution(
- vec![cert.clone()],
- &authorities[3].epoch_store_for_testing(),
- );
- }
- for cert in executed_owned_certs.iter().rev() {
- authorities[3].enqueue_certificates_for_execution(
- vec![cert.clone()],
- &authorities[3].epoch_store_for_testing(),
- );
- }
+ // Sets shared object locks in the executed order.
+ for cert in executed_shared_certs.iter() {
+ send_consensus_no_execution(&authorities[3], cert).await;
+ }
- // All certs should get executed eventually.
- let digests: Vec<_> = executed_shared_certs
- .iter()
- .chain(executed_owned_certs.iter())
- .map(|cert| *cert.digest())
- .collect();
- authorities[3]
- .get_transaction_cache_reader()
- .notify_read_executed_effects(&digests)
- .await
- .unwrap();
-}
+ // Enqueue certs out of dependency order for executions.
+ for cert in executed_shared_certs.iter().rev() {
+ authorities[3].enqueue_certificates_for_execution(
+ vec![cert.clone()],
+ &authorities[3].epoch_store_for_testing(),
+ );
+ }
+ for cert in executed_owned_certs.iter().rev() {
+ authorities[3].enqueue_certificates_for_execution(
+ vec![cert.clone()],
+ &authorities[3].epoch_store_for_testing(),
+ );
+ }
-fn make_socket_addr() -> std::net::SocketAddr {
- SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0)
-}
+ // All certs should get executed eventually.
+ let digests: Vec<_> = executed_shared_certs
+ .iter()
+ .chain(executed_owned_certs.iter())
+ .map(|cert| *cert.digest())
+ .collect();
+ authorities[3]
+ .get_transaction_cache_reader()
+ .notify_read_executed_effects(&digests)
+ .await
+ .unwrap();
+ }
-async fn try_sign_on_first_three_authorities(
- authority_clients: &[Arc>],
- committee: &Committee,
- txn: &Transaction,
-) -> IotaResult {
- for client in authority_clients.iter().take(3) {
- client
- .handle_transaction(txn.clone(), Some(make_socket_addr()))
- .await?;
+ fn make_socket_addr() -> std::net::SocketAddr {
+ SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0)
}
- extract_cert(authority_clients, committee, txn.digest())
- .await
- .try_into_verified_for_testing(committee, &Default::default())
-}
-#[tokio::test(flavor = "current_thread", start_paused = true)]
-async fn test_per_object_overload() {
- telemetry_subscribers::init_for_testing();
+ async fn try_sign_on_first_three_authorities(
+ authority_clients: &[Arc>],
+ committee: &Committee,
+ txn: &Transaction,
+ ) -> IotaResult {
+ for client in authority_clients.iter().take(3) {
+ client
+ .handle_transaction(txn.clone(), Some(make_socket_addr()))
+ .await?;
+ }
+ extract_cert(authority_clients, committee, txn.digest())
+ .await
+ .try_into_verified_for_testing(committee, &Default::default())
+ }
- // Initialize a network with 1 account and 2000 gas objects.
- let (addr, key): (_, AccountKeyPair) = get_key_pair();
- const NUM_GAS_OBJECTS_PER_ACCOUNT: usize = 2000;
- let gas_objects = (0..NUM_GAS_OBJECTS_PER_ACCOUNT)
- .map(|_| Object::with_owner_for_testing(addr))
- .collect_vec();
- let (aggregator, authorities, _genesis, package) =
- init_local_authorities(4, gas_objects.clone()).await;
- let rgp = authorities
- .first()
- .unwrap()
- .reference_gas_price_for_testing()
+ #[tokio::test(flavor = "current_thread", start_paused = true)]
+ async fn test_per_object_overload() {
+ telemetry_subscribers::init_for_testing();
+
+ // Initialize a network with 1 account and 2000 gas objects.
+ let (addr, key): (_, AccountKeyPair) = get_key_pair();
+ const NUM_GAS_OBJECTS_PER_ACCOUNT: usize = 2000;
+ let gas_objects = (0..NUM_GAS_OBJECTS_PER_ACCOUNT)
+ .map(|_| Object::with_owner_for_testing(addr))
+ .collect_vec();
+ let (aggregator, authorities, _genesis, package) =
+ init_local_authorities(4, gas_objects.clone()).await;
+ let rgp = authorities
+ .first()
+ .unwrap()
+ .reference_gas_price_for_testing()
+ .unwrap();
+ let authority_clients: Vec<_> = authorities
+ .iter()
+ .map(|a| aggregator.authority_clients[&a.name].clone())
+ .collect();
+
+ // Create a shared counter.
+ let gas_ref = get_latest_ref(authority_clients[0].clone(), gas_objects[0].id()).await;
+ let create_counter_txn = TestTransactionBuilder::new(addr, gas_ref, rgp)
+ .call_counter_create(package)
+ .build_and_sign(&key);
+ let create_counter_cert = try_sign_on_first_three_authorities(
+ &authority_clients,
+ &aggregator.committee,
+ &create_counter_txn,
+ )
+ .await
.unwrap();
- let authority_clients: Vec<_> = authorities
- .iter()
- .map(|a| aggregator.authority_clients[&a.name].clone())
- .collect();
-
- // Create a shared counter.
- let gas_ref = get_latest_ref(authority_clients[0].clone(), gas_objects[0].id()).await;
- let create_counter_txn = TestTransactionBuilder::new(addr, gas_ref, rgp)
- .call_counter_create(package)
- .build_and_sign(&key);
- let create_counter_cert = try_sign_on_first_three_authorities(
- &authority_clients,
- &aggregator.committee,
- &create_counter_txn,
- )
- .await
- .unwrap();
- for authority in authorities.iter().take(3) {
- send_consensus(authority, &create_counter_cert).await;
- }
- for authority in authorities.iter().take(3) {
- authority
+ for authority in authorities.iter().take(3) {
+ send_consensus(authority, &create_counter_cert).await;
+ }
+ for authority in authorities.iter().take(3) {
+ authority
+ .get_transaction_cache_reader()
+ .notify_read_executed_effects(&[*create_counter_cert.digest()])
+ .await
+ .unwrap()
+ .pop()
+ .unwrap();
+ }
+
+ // Signing and executing this transaction on the last authority should succeed.
+ authority_clients[3]
+ .handle_transaction(create_counter_txn.clone(), Some(make_socket_addr()))
+ .await
+ .unwrap();
+ send_consensus(&authorities[3], &create_counter_cert).await;
+ let create_counter_effects = authorities[3]
.get_transaction_cache_reader()
.notify_read_executed_effects(&[*create_counter_cert.digest()])
.await
.unwrap()
.pop()
.unwrap();
- }
+ let (shared_counter_ref, owner) = create_counter_effects.created()[0];
+ let Owner::Shared {
+ initial_shared_version: shared_counter_initial_version,
+ } = owner
+ else {
+ panic!("Not a shared object! {:?} {:?}", shared_counter_ref, owner);
+ };
- // Signing and executing this transaction on the last authority should succeed.
- authority_clients[3]
- .handle_transaction(create_counter_txn.clone(), Some(make_socket_addr()))
- .await
- .unwrap();
- send_consensus(&authorities[3], &create_counter_cert).await;
- let create_counter_effects = authorities[3]
- .get_transaction_cache_reader()
- .notify_read_executed_effects(&[*create_counter_cert.digest()])
- .await
- .unwrap()
- .pop()
- .unwrap();
- let (shared_counter_ref, owner) = create_counter_effects.created()[0];
- let Owner::Shared {
- initial_shared_version: shared_counter_initial_version,
- } = owner
- else {
- panic!("Not a shared object! {:?} {:?}", shared_counter_ref, owner);
- };
+ // Stop execution on the last authority, to simulate having a backlog.
+ authorities[3].shutdown_execution_for_test();
+ // Make sure execution driver has exited.
+ sleep(Duration::from_secs(1)).await;
+
+ // Sign and try execute 1000 txns on the first three authorities. And enqueue
+ // them on the last authority. First shared counter txn has input object
+ // available on authority 3. So to overload authority 3, 1 more
+ // txn is needed.
+ let num_txns = authorities[3]
+ .overload_config()
+ .max_transaction_manager_per_object_queue_length
+ + 1;
+ for gas_object in gas_objects.iter().take(num_txns) {
+ let gas_ref = get_latest_ref(authority_clients[0].clone(), gas_object.id()).await;
+ let shared_txn = TestTransactionBuilder::new(addr, gas_ref, rgp)
+ .call_counter_increment(
+ package,
+ shared_counter_ref.0,
+ shared_counter_initial_version,
+ )
+ .build_and_sign(&key);
+ let shared_cert = try_sign_on_first_three_authorities(
+ &authority_clients,
+ &aggregator.committee,
+ &shared_txn,
+ )
+ .await
+ .unwrap();
+ for authority in authorities.iter().take(3) {
+ send_consensus(authority, &shared_cert).await;
+ }
+ send_consensus(&authorities[3], &shared_cert).await;
+ }
- // Stop execution on the last authority, to simulate having a backlog.
- authorities[3].shutdown_execution_for_test();
- // Make sure execution driver has exited.
- sleep(Duration::from_secs(1)).await;
-
- // Sign and try execute 1000 txns on the first three authorities. And enqueue
- // them on the last authority. First shared counter txn has input object
- // available on authority 3. So to overload authority 3, 1 more
- // txn is needed.
- let num_txns = authorities[3]
- .overload_config()
- .max_transaction_manager_per_object_queue_length
- + 1;
- for gas_object in gas_objects.iter().take(num_txns) {
- let gas_ref = get_latest_ref(authority_clients[0].clone(), gas_object.id()).await;
+ // Trying to sign a new transaction would now fail.
+ let gas_ref =
+ get_latest_ref(authority_clients[0].clone(), gas_objects[num_txns].id()).await;
let shared_txn = TestTransactionBuilder::new(addr, gas_ref, rgp)
.call_counter_increment(
package,
@@ -567,125 +599,129 @@ async fn test_per_object_overload() {
shared_counter_initial_version,
)
.build_and_sign(&key);
- let shared_cert = try_sign_on_first_three_authorities(
+ let res = authorities[3]
+ .transaction_manager()
+ .check_execution_overload(authorities[3].overload_config(), shared_txn.data());
+ let message = format!("{res:?}");
+ assert!(
+ message.contains("TooManyTransactionsPendingOnObject"),
+ "{}",
+ message
+ );
+ }
+
+ #[tokio::test]
+ async fn test_txn_age_overload() {
+ telemetry_subscribers::init_for_testing();
+
+ // Initialize a network with 1 account and 3 gas objects.
+ let (addr, key): (_, AccountKeyPair) = get_key_pair();
+ let gas_objects = (0..3)
+ .map(|_| Object::with_owner_for_testing(addr))
+ .collect_vec();
+ let (aggregator, authorities, _genesis, package) =
+ init_local_authorities_with_overload_thresholds(
+ 4,
+ gas_objects.clone(),
+ AuthorityOverloadConfig {
+ max_txn_age_in_queue: Duration::from_secs(5),
+ ..Default::default()
+ },
+ )
+ .await;
+ let rgp = authorities
+ .first()
+ .unwrap()
+ .reference_gas_price_for_testing()
+ .unwrap();
+ let authority_clients: Vec<_> = authorities
+ .iter()
+ .map(|a| aggregator.authority_clients[&a.name].clone())
+ .collect();
+
+ // Create a shared counter.
+ let gas_ref = get_latest_ref(authority_clients[0].clone(), gas_objects[0].id()).await;
+ let create_counter_txn = TestTransactionBuilder::new(addr, gas_ref, rgp)
+ .call_counter_create(package)
+ .build_and_sign(&key);
+ let create_counter_cert = try_sign_on_first_three_authorities(
&authority_clients,
&aggregator.committee,
- &shared_txn,
+ &create_counter_txn,
)
.await
.unwrap();
for authority in authorities.iter().take(3) {
- send_consensus(authority, &shared_cert).await;
+ send_consensus(authority, &create_counter_cert).await;
+ }
+ for authority in authorities.iter().take(3) {
+ authority
+ .get_transaction_cache_reader()
+ .notify_read_executed_effects(&[*create_counter_cert.digest()])
+ .await
+ .unwrap()
+ .pop()
+ .unwrap();
}
- send_consensus(&authorities[3], &shared_cert).await;
- }
-
- // Trying to sign a new transaction would now fail.
- let gas_ref = get_latest_ref(authority_clients[0].clone(), gas_objects[num_txns].id()).await;
- let shared_txn = TestTransactionBuilder::new(addr, gas_ref, rgp)
- .call_counter_increment(
- package,
- shared_counter_ref.0,
- shared_counter_initial_version,
- )
- .build_and_sign(&key);
- let res = authorities[3]
- .transaction_manager()
- .check_execution_overload(authorities[3].overload_config(), shared_txn.data());
- let message = format!("{res:?}");
- assert!(
- message.contains("TooManyTransactionsPendingOnObject"),
- "{}",
- message
- );
-}
-
-#[tokio::test]
-async fn test_txn_age_overload() {
- telemetry_subscribers::init_for_testing();
- // Initialize a network with 1 account and 3 gas objects.
- let (addr, key): (_, AccountKeyPair) = get_key_pair();
- let gas_objects = (0..3)
- .map(|_| Object::with_owner_for_testing(addr))
- .collect_vec();
- let (aggregator, authorities, _genesis, package) =
- init_local_authorities_with_overload_thresholds(
- 4,
- gas_objects.clone(),
- AuthorityOverloadConfig {
- max_txn_age_in_queue: Duration::from_secs(5),
- ..Default::default()
- },
- )
- .await;
- let rgp = authorities
- .first()
- .unwrap()
- .reference_gas_price_for_testing()
- .unwrap();
- let authority_clients: Vec<_> = authorities
- .iter()
- .map(|a| aggregator.authority_clients[&a.name].clone())
- .collect();
-
- // Create a shared counter.
- let gas_ref = get_latest_ref(authority_clients[0].clone(), gas_objects[0].id()).await;
- let create_counter_txn = TestTransactionBuilder::new(addr, gas_ref, rgp)
- .call_counter_create(package)
- .build_and_sign(&key);
- let create_counter_cert = try_sign_on_first_three_authorities(
- &authority_clients,
- &aggregator.committee,
- &create_counter_txn,
- )
- .await
- .unwrap();
- for authority in authorities.iter().take(3) {
- send_consensus(authority, &create_counter_cert).await;
- }
- for authority in authorities.iter().take(3) {
- authority
+ // Signing and executing this transaction on the last authority should succeed.
+ authority_clients[3]
+ .handle_transaction(create_counter_txn.clone(), Some(make_socket_addr()))
+ .await
+ .unwrap();
+ send_consensus(&authorities[3], &create_counter_cert).await;
+ let create_counter_effects = authorities[3]
.get_transaction_cache_reader()
.notify_read_executed_effects(&[*create_counter_cert.digest()])
.await
.unwrap()
.pop()
.unwrap();
- }
+ let (shared_counter_ref, owner) = create_counter_effects.created()[0];
+ let Owner::Shared {
+ initial_shared_version: shared_counter_initial_version,
+ } = owner
+ else {
+ panic!("Not a shared object! {:?} {:?}", shared_counter_ref, owner);
+ };
- // Signing and executing this transaction on the last authority should succeed.
- authority_clients[3]
- .handle_transaction(create_counter_txn.clone(), Some(make_socket_addr()))
- .await
- .unwrap();
- send_consensus(&authorities[3], &create_counter_cert).await;
- let create_counter_effects = authorities[3]
- .get_transaction_cache_reader()
- .notify_read_executed_effects(&[*create_counter_cert.digest()])
- .await
- .unwrap()
- .pop()
- .unwrap();
- let (shared_counter_ref, owner) = create_counter_effects.created()[0];
- let Owner::Shared {
- initial_shared_version: shared_counter_initial_version,
- } = owner
- else {
- panic!("Not a shared object! {:?} {:?}", shared_counter_ref, owner);
- };
+ // Stop execution on the last authority, to simulate having a backlog.
+ authorities[3].shutdown_execution_for_test();
+ // Make sure execution driver has exited.
+ sleep(Duration::from_secs(1)).await;
+
+ // Sign and try execute 2 txns on the first three authorities. And enqueue them
+ // on the last authority. First shared counter txn has input object
+ // available on authority 3. So to put a txn in the queue, we
+ // will need another txn.
+ for gas_object in gas_objects.iter().take(2) {
+ let gas_ref = get_latest_ref(authority_clients[0].clone(), gas_object.id()).await;
+ let shared_txn = TestTransactionBuilder::new(addr, gas_ref, rgp)
+ .call_counter_increment(
+ package,
+ shared_counter_ref.0,
+ shared_counter_initial_version,
+ )
+ .build_and_sign(&key);
+ let shared_cert = try_sign_on_first_three_authorities(
+ &authority_clients,
+ &aggregator.committee,
+ &shared_txn,
+ )
+ .await
+ .unwrap();
+ for authority in authorities.iter().take(3) {
+ send_consensus(authority, &shared_cert).await;
+ }
+ send_consensus(&authorities[3], &shared_cert).await;
+ }
+
+ // Sleep for 6 seconds to make sure the transaction is old enough since our
+ // threshold is 5.
+ tokio::time::sleep(Duration::from_secs(6)).await;
- // Stop execution on the last authority, to simulate having a backlog.
- authorities[3].shutdown_execution_for_test();
- // Make sure execution driver has exited.
- sleep(Duration::from_secs(1)).await;
-
- // Sign and try execute 2 txns on the first three authorities. And enqueue them
- // on the last authority. First shared counter txn has input object
- // available on authority 3. So to put a txn in the queue, we
- // will need another txn.
- for gas_object in gas_objects.iter().take(2) {
- let gas_ref = get_latest_ref(authority_clients[0].clone(), gas_object.id()).await;
+ // Trying to sign a new transaction would now fail.
+ let gas_ref = get_latest_ref(authority_clients[0].clone(), gas_objects[2].id()).await;
let shared_txn = TestTransactionBuilder::new(addr, gas_ref, rgp)
.call_counter_increment(
package,
@@ -693,41 +729,16 @@ async fn test_txn_age_overload() {
shared_counter_initial_version,
)
.build_and_sign(&key);
- let shared_cert = try_sign_on_first_three_authorities(
- &authority_clients,
- &aggregator.committee,
- &shared_txn,
- )
- .await
- .unwrap();
- for authority in authorities.iter().take(3) {
- send_consensus(authority, &shared_cert).await;
- }
- send_consensus(&authorities[3], &shared_cert).await;
+ let res = authorities[3]
+ .transaction_manager()
+ .check_execution_overload(authorities[3].overload_config(), shared_txn.data());
+ let message = format!("{res:?}");
+ assert!(
+ message.contains("TooOldTransactionPendingOnObject"),
+ "{}",
+ message
+ );
}
-
- // Sleep for 6 seconds to make sure the transaction is old enough since our
- // threshold is 5.
- tokio::time::sleep(Duration::from_secs(6)).await;
-
- // Trying to sign a new transaction would now fail.
- let gas_ref = get_latest_ref(authority_clients[0].clone(), gas_objects[2].id()).await;
- let shared_txn = TestTransactionBuilder::new(addr, gas_ref, rgp)
- .call_counter_increment(
- package,
- shared_counter_ref.0,
- shared_counter_initial_version,
- )
- .build_and_sign(&key);
- let res = authorities[3]
- .transaction_manager()
- .check_execution_overload(authorities[3].overload_config(), shared_txn.data());
- let message = format!("{res:?}");
- assert!(
- message.contains("TooOldTransactionPendingOnObject"),
- "{}",
- message
- );
}
// Tests that when validator is in load shedding mode, it can pushback txn
diff --git a/crates/iota-e2e-tests/tests/full_node_migration_tests.rs b/crates/iota-e2e-tests/tests/full_node_migration_tests.rs
index e6c79e17779..c7f8c281b77 100644
--- a/crates/iota-e2e-tests/tests/full_node_migration_tests.rs
+++ b/crates/iota-e2e-tests/tests/full_node_migration_tests.rs
@@ -1,11 +1,24 @@
// Copyright (c) 2024 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
-use std::{path::PathBuf, str::FromStr};
+use std::{
+ fs::File,
+ io::{BufWriter, Write},
+ path::PathBuf,
+ str::FromStr,
+};
use anyhow::anyhow;
use bip32::DerivationPath;
-use iota_genesis_builder::SnapshotSource;
+use iota_genesis_builder::{
+ SnapshotSource,
+ stardust::{
+ migration::{Migration, MigrationTargetNetwork},
+ parse::HornetSnapshotParser,
+ process_outputs::scale_amount_for_iota,
+ types::address_swap_map::AddressSwapMap,
+ },
+};
use iota_json_rpc_types::{
IotaObjectDataFilter, IotaObjectDataOptions, IotaObjectResponseQuery,
IotaTransactionBlockResponse, IotaTransactionBlockResponseOptions,
@@ -21,7 +34,7 @@ use iota_types::{
gas_coin::GAS,
programmable_transaction_builder::ProgrammableTransactionBuilder,
quorum_driver_types::ExecuteTransactionRequestType,
- stardust::output::NftOutput,
+ stardust::{coin_type::CoinType, output::NftOutput},
transaction::{Argument, ObjectArg, Transaction, TransactionData},
};
use move_core_types::ident_str;
@@ -29,7 +42,10 @@ use shared_crypto::intent::Intent;
use tempfile::tempdir;
use test_cluster::TestClusterBuilder;
-const MIGRATION_DATA_PATH: &str = "tests/migration/stardust_object_snapshot.bin";
+const HORNET_SNAPSHOT_PATH: &str = "tests/migration/test_hornet_full_snapshot.bin";
+const ADDRESS_SWAP_MAP_PATH: &str = "tests/migration/address_swap.csv";
+const TEST_TARGET_NETWORK: &str = "alphanet-test";
+const MIGRATION_DATA_FILE_NAME: &str = "stardust_object_snapshot.bin";
/// Got from iota-genesis-builder/src/stardust/test_outputs/alias_ownership.rs
const MAIN_ADDRESS_MNEMONIC: &str = "few hood high omit camp keep burger give happy iron evolve draft few dawn pulp jazz box dash load snake gown bag draft car";
@@ -39,26 +55,67 @@ const SPONSOR_ADDRESS_MNEMONIC: &str = "okay pottery arch air egg very cave cash
#[sim_test]
async fn test_full_node_load_migration_data() -> Result<(), anyhow::Error> {
telemetry_subscribers::init_for_testing();
- let snapshot_source = SnapshotSource::Local(PathBuf::from_str(MIGRATION_DATA_PATH).unwrap());
+
+ // Setup the temporary dir and create the writer for the stardust object
+ // snapshot
+ let dir = tempdir()?;
+ let stardudst_object_snapshot_file_path = dir.path().join(MIGRATION_DATA_FILE_NAME);
+ let object_snapshot_writer =
+ BufWriter::new(File::create(&stardudst_object_snapshot_file_path)?);
+
+ // Generate the stardust object snapshot
+ genesis_builder_snapshot_generation(object_snapshot_writer)?;
+ // Then load it
+ let snapshot_source = SnapshotSource::Local(stardudst_object_snapshot_file_path);
+
+ // A new test cluster can be spawn with the stardust object snapshot
let test_cluster = TestClusterBuilder::new()
.with_migration_data(vec![snapshot_source])
.build()
.await;
+ // Use a client to issue a test transaction
let client = test_cluster.wallet.get_client().await.unwrap();
-
let tx_response = address_unlock_condition(client).await?;
-
let IotaTransactionBlockResponse {
confirmed_local_execution,
errors,
..
} = tx_response;
+
+ // The transaction must be successful
assert!(confirmed_local_execution.unwrap());
assert!(errors.is_empty());
Ok(())
}
+fn genesis_builder_snapshot_generation(
+ object_snapshot_writer: impl Write,
+) -> Result<(), anyhow::Error> {
+ let mut snapshot_parser =
+ HornetSnapshotParser::new::(File::open(HORNET_SNAPSHOT_PATH)?)?;
+ let total_supply = scale_amount_for_iota(snapshot_parser.total_supply()?)?;
+ let target_network = MigrationTargetNetwork::from_str(TEST_TARGET_NETWORK)?;
+ let coin_type = CoinType::Iota;
+ let address_swap_map = AddressSwapMap::from_csv(ADDRESS_SWAP_MAP_PATH)?;
+
+ // Migrate using the parser output stream
+ Migration::new(
+ snapshot_parser.target_milestone_timestamp(),
+ total_supply,
+ target_network,
+ coin_type,
+ address_swap_map,
+ )?
+ .run_for_iota(
+ snapshot_parser.target_milestone_timestamp(),
+ snapshot_parser.outputs(),
+ object_snapshot_writer,
+ )?;
+
+ Ok(())
+}
+
async fn address_unlock_condition(
iota_client: IotaClient,
) -> Result {
diff --git a/crates/iota-e2e-tests/tests/migration/address_swap.csv b/crates/iota-e2e-tests/tests/migration/address_swap.csv
new file mode 100644
index 00000000000..7ece8894744
--- /dev/null
+++ b/crates/iota-e2e-tests/tests/migration/address_swap.csv
@@ -0,0 +1,2 @@
+Origin,Destination
+iota1qp8h9augeh6tk3uvlxqfapuwv93atv63eqkpru029p6sgvr49eufyz7katr,0x4f72f788cdf4bb478cf9809e878e6163d5b351c82c11f1ea28750430752e7892
\ No newline at end of file
diff --git a/crates/iota-e2e-tests/tests/migration/stardust_object_snapshot.bin b/crates/iota-e2e-tests/tests/migration/stardust_object_snapshot.bin
deleted file mode 100644
index c7fa51debb7..00000000000
Binary files a/crates/iota-e2e-tests/tests/migration/stardust_object_snapshot.bin and /dev/null differ
diff --git a/crates/iota-e2e-tests/tests/migration/test_hornet_full_snapshot.bin b/crates/iota-e2e-tests/tests/migration/test_hornet_full_snapshot.bin
new file mode 100644
index 00000000000..bac42b91117
Binary files /dev/null and b/crates/iota-e2e-tests/tests/migration/test_hornet_full_snapshot.bin differ
diff --git a/crates/iota-framework/tests/build-system-packages.rs b/crates/iota-framework/tests/build-system-packages.rs
index 8e343a0e33f..aefc06ab5d7 100644
--- a/crates/iota-framework/tests/build-system-packages.rs
+++ b/crates/iota-framework/tests/build-system-packages.rs
@@ -198,12 +198,17 @@ fn build_packages_with_move_config(
&mut files_to_write,
);
create_category_file(framework_dir);
- create_category_file(stdlib_dir);
relocate_docs(
framework_dir,
&framework_pkg.package.compiled_docs.unwrap(),
&mut files_to_write,
);
+ create_category_file(stdlib_dir);
+ relocate_docs(
+ stdlib_dir,
+ &stdlib_pkg.package.compiled_docs.unwrap(),
+ &mut files_to_write,
+ );
create_category_file(bridge_dir);
relocate_docs(
bridge_dir,
diff --git a/crates/iota-genesis-builder/src/main.rs b/crates/iota-genesis-builder/src/main.rs
index e0adc0db630..3c79fdc329b 100644
--- a/crates/iota-genesis-builder/src/main.rs
+++ b/crates/iota-genesis-builder/src/main.rs
@@ -4,26 +4,20 @@
//! Creating a stardust objects snapshot out of a Hornet snapshot.
//! TIP that defines the Hornet snapshot file format:
//! https://github.com/iotaledger/tips/blob/main/tips/TIP-0035/tip-0035.md
-use std::{collections::BTreeMap, fs::File, io::BufWriter};
+use std::{fs::File, io::BufWriter};
-use anyhow::{Result, anyhow};
+use anyhow::Result;
use clap::{Parser, Subcommand};
use iota_genesis_builder::{
OBJECT_SNAPSHOT_FILE_PATH,
stardust::{
migration::{Migration, MigrationTargetNetwork},
parse::HornetSnapshotParser,
- types::{address_swap_map::AddressSwapMap, output_header::OutputHeader},
+ process_outputs::scale_amount_for_iota,
+ types::address_swap_map::AddressSwapMap,
},
};
-use iota_sdk::types::block::{
- address::Address,
- output::{
- AliasOutputBuilder, BasicOutputBuilder, FoundryOutputBuilder, NftOutputBuilder, Output,
- unlock_condition::{AddressUnlockCondition, StorageDepositReturnUnlockCondition},
- },
-};
-use iota_types::{stardust::coin_type::CoinType, timelock::timelock::is_vested_reward};
+use iota_types::stardust::coin_type::CoinType;
use tracing::Level;
use tracing_subscriber::FmtSubscriber;
@@ -104,182 +98,13 @@ fn main() -> Result<()> {
match coin_type {
CoinType::Iota => {
- struct MergingIterator {
- unlocked_address_balances: BTreeMap,
- snapshot_timestamp_s: u32,
- outputs: I,
- }
-
- impl MergingIterator {
- fn new(snapshot_timestamp_s: u32, outputs: I) -> Self {
- Self {
- unlocked_address_balances: Default::default(),
- snapshot_timestamp_s,
- outputs,
- }
- }
- }
-
- impl>> Iterator for MergingIterator {
- type Item = I::Item;
-
- fn next(&mut self) -> Option {
- // First process all the outputs, building the unlocked_address_balances map as
- // we go.
- for res in self.outputs.by_ref() {
- if let Ok((header, output)) = res {
- fn mergeable_address(
- header: &OutputHeader,
- output: &Output,
- snapshot_timestamp_s: u32,
- ) -> Option {
- // ignore all non-basic outputs and non vesting outputs
- if !output.is_basic()
- || !is_vested_reward(header.output_id(), output.as_basic())
- {
- return None;
- }
-
- if let Some(unlock_conditions) = output.unlock_conditions() {
- // check if vesting unlock period is already done
- if unlock_conditions.is_time_locked(snapshot_timestamp_s) {
- return None;
- }
- unlock_conditions.address().map(|uc| *uc.address())
- } else {
- None
- }
- }
-
- if let Some(address) =
- mergeable_address(&header, &output, self.snapshot_timestamp_s)
- {
- // collect the unlocked vesting balances
- self.unlocked_address_balances
- .entry(address)
- .and_modify(|x| x.balance += output.amount())
- .or_insert(OutputHeaderWithBalance {
- output_header: header,
- balance: output.amount(),
- });
- continue;
- } else {
- return Some(Ok((header, output)));
- }
- } else {
- return Some(res);
- }
- }
-
- // Now that we are out
- self.unlocked_address_balances.pop_first().map(
- |(address, output_header_with_balance)| {
- // create a new basic output which holds the aggregated balance from
- // unlocked vesting outputs for this address
- let basic = BasicOutputBuilder::new_with_amount(
- output_header_with_balance.balance,
- )
- .add_unlock_condition(AddressUnlockCondition::new(address))
- .finish()
- .expect("should be able to create a basic output");
-
- Ok((output_header_with_balance.output_header, basic.into()))
- },
- )
- }
- }
-
- let merged_outputs = MergingIterator::new(
+ migration.run_for_iota(
snapshot_parser.target_milestone_timestamp(),
snapshot_parser.outputs(),
- )
- .map(|res| {
- let (header, mut output) = res?;
- scale_output_amount_for_iota(&mut output)?;
-
- Ok::<_, anyhow::Error>((header, output))
- });
- itertools::process_results(merged_outputs, |outputs| {
- migration.run(outputs, object_snapshot_writer)
- })??;
+ object_snapshot_writer,
+ )?;
}
}
Ok(())
}
-
-struct OutputHeaderWithBalance {
- output_header: OutputHeader,
- balance: u64,
-}
-
-fn scale_output_amount_for_iota(output: &mut Output) -> Result<()> {
- *output = match output {
- Output::Basic(ref basic_output) => {
- // Update amount
- let mut builder = BasicOutputBuilder::from(basic_output)
- .with_amount(scale_amount_for_iota(basic_output.amount())?);
-
- // Update amount in potential storage deposit return unlock condition
- if let Some(sdr_uc) = basic_output
- .unlock_conditions()
- .get(StorageDepositReturnUnlockCondition::KIND)
- {
- let sdr_uc = sdr_uc.as_storage_deposit_return();
- builder = builder.replace_unlock_condition(
- StorageDepositReturnUnlockCondition::new(
- sdr_uc.return_address(),
- scale_amount_for_iota(sdr_uc.amount())?,
- u64::MAX,
- )
- .unwrap(),
- );
- };
-
- Output::from(builder.finish()?)
- }
- Output::Alias(ref alias_output) => Output::from(
- AliasOutputBuilder::from(alias_output)
- .with_amount(scale_amount_for_iota(alias_output.amount())?)
- .finish()?,
- ),
- Output::Foundry(ref foundry_output) => Output::from(
- FoundryOutputBuilder::from(foundry_output)
- .with_amount(scale_amount_for_iota(foundry_output.amount())?)
- .finish()?,
- ),
- Output::Nft(ref nft_output) => {
- // Update amount
- let mut builder = NftOutputBuilder::from(nft_output)
- .with_amount(scale_amount_for_iota(nft_output.amount())?);
-
- // Update amount in potential storage deposit return unlock condition
- if let Some(sdr_uc) = nft_output
- .unlock_conditions()
- .get(StorageDepositReturnUnlockCondition::KIND)
- {
- let sdr_uc = sdr_uc.as_storage_deposit_return();
- builder = builder.replace_unlock_condition(
- StorageDepositReturnUnlockCondition::new(
- sdr_uc.return_address(),
- scale_amount_for_iota(sdr_uc.amount())?,
- u64::MAX,
- )
- .unwrap(),
- );
- };
-
- Output::from(builder.finish()?)
- }
- Output::Treasury(_) => return Ok(()),
- };
- Ok(())
-}
-
-fn scale_amount_for_iota(amount: u64) -> Result {
- const IOTA_MULTIPLIER: u64 = 1000;
-
- amount
- .checked_mul(IOTA_MULTIPLIER)
- .ok_or_else(|| anyhow!("overflow multiplying amount {amount} by {IOTA_MULTIPLIER}"))
-}
diff --git a/crates/iota-genesis-builder/src/stardust/migration/migration.rs b/crates/iota-genesis-builder/src/stardust/migration/migration.rs
index 96ef8c7fd2a..a1e0d49dd6f 100644
--- a/crates/iota-genesis-builder/src/stardust/migration/migration.rs
+++ b/crates/iota-genesis-builder/src/stardust/migration/migration.rs
@@ -32,6 +32,7 @@ use crate::stardust::{
verification::{created_objects::CreatedObjects, verify_outputs},
},
native_token::package_data::NativeTokenPackageData,
+ process_outputs::get_merged_outputs_for_iota,
types::{address_swap_map::AddressSwapMap, output_header::OutputHeader},
};
@@ -163,6 +164,20 @@ impl Migration {
Ok(())
}
+ /// Run all stages of the migration coming from a Hornet snapshot with IOTA
+ /// coin type.
+ pub fn run_for_iota<'a>(
+ self,
+ target_milestone_timestamp: u32,
+ outputs: impl Iterator- > + 'a,
+ writer: impl Write,
+ ) -> Result<()> {
+ itertools::process_results(
+ get_merged_outputs_for_iota(target_milestone_timestamp, outputs),
+ |outputs| self.run(outputs, writer),
+ )?
+ }
+
/// The migration objects.
///
/// The system packages and underlying `init` objects
diff --git a/crates/iota-genesis-builder/src/stardust/mod.rs b/crates/iota-genesis-builder/src/stardust/mod.rs
index 56a24edab30..b4e832eea96 100644
--- a/crates/iota-genesis-builder/src/stardust/mod.rs
+++ b/crates/iota-genesis-builder/src/stardust/mod.rs
@@ -8,6 +8,7 @@
pub mod migration;
pub mod native_token;
pub mod parse;
+pub mod process_outputs;
#[cfg(feature = "test-outputs")]
pub mod test_outputs;
pub mod types;
diff --git a/crates/iota-genesis-builder/src/stardust/process_outputs.rs b/crates/iota-genesis-builder/src/stardust/process_outputs.rs
new file mode 100644
index 00000000000..bb4c93b7ad0
--- /dev/null
+++ b/crates/iota-genesis-builder/src/stardust/process_outputs.rs
@@ -0,0 +1,208 @@
+// Copyright (c) 2024 IOTA Stiftung
+// SPDX-License-Identifier: Apache-2.0
+
+use std::collections::BTreeMap;
+
+use anyhow::{Result, anyhow};
+use iota_sdk::types::block::{
+ address::Address,
+ output::{
+ AliasOutputBuilder, BasicOutputBuilder, FoundryOutputBuilder, NftOutputBuilder, Output,
+ unlock_condition::{AddressUnlockCondition, StorageDepositReturnUnlockCondition},
+ },
+};
+use iota_types::timelock::timelock::is_vested_reward;
+
+use super::types::output_header::OutputHeader;
+
+/// Take an `amount` and scale it by a multiplier defined for the IOTA token.
+pub fn scale_amount_for_iota(amount: u64) -> Result {
+ const IOTA_MULTIPLIER: u64 = 1000;
+
+ amount
+ .checked_mul(IOTA_MULTIPLIER)
+ .ok_or_else(|| anyhow!("overflow multiplying amount {amount} by {IOTA_MULTIPLIER}"))
+}
+
+/// Processes and merges outputs from a Hornet snapshot considering balances as
+/// IOTA tokens.
+///
+/// This function uses the `MergingIterator` to filter and aggregate vesting
+/// balances and then scales the output amounts.
+pub fn get_merged_outputs_for_iota<'a>(
+ target_milestone_timestamp: u32,
+ outputs: impl Iterator
- > + 'a,
+) -> impl Iterator
- > + 'a {
+ MergingIterator::new(target_milestone_timestamp, outputs).map(|res| {
+ let (header, mut output) = res?;
+ // Scale the output amount according to IOTA token multiplier
+ scale_output_amount_for_iota(&mut output)?;
+ Ok((header, output))
+ })
+}
+
+struct OutputHeaderWithBalance {
+ output_header: OutputHeader,
+ balance: u64,
+}
+
+/// An iterator that processes outputs, aggregates balances for eligible
+/// addresses, and generates new "basic" outputs for unlocked vesting rewards.
+///
+/// `MergingIterator` filters outputs based on conditions:
+/// - Must be "basic" outputs.
+/// - Must represent vesting rewards that are timelocked relative to a snapshot
+/// timestamp.
+///
+/// Eligible balances are aggregated into a map, and once all inputs are
+/// processed, the iterator produces new outputs consolidating these balances.
+///
+/// Non-eligible outputs are returned as-is.
+struct MergingIterator {
+ /// Stores aggregated balances for eligible addresses.
+ unlocked_address_balances: BTreeMap,
+ /// Timestamp used to evaluate timelock conditions.
+ snapshot_timestamp_s: u32,
+ /// Iterator over `(OutputHeader, Output)` pairs.
+ outputs: I,
+}
+
+impl MergingIterator {
+ fn new(snapshot_timestamp_s: u32, outputs: I) -> Self {
+ Self {
+ unlocked_address_balances: Default::default(),
+ snapshot_timestamp_s,
+ outputs,
+ }
+ }
+}
+
+impl>> Iterator for MergingIterator {
+ type Item = I::Item;
+
+ fn next(&mut self) -> Option {
+ // First process all the outputs, building the unlocked_address_balances map as
+ // we go.
+ for res in self.outputs.by_ref() {
+ if let Ok((header, output)) = res {
+ fn mergeable_address(
+ header: &OutputHeader,
+ output: &Output,
+ snapshot_timestamp_s: u32,
+ ) -> Option {
+ // ignore all non-basic outputs and non vesting outputs
+ if !output.is_basic()
+ || !is_vested_reward(header.output_id(), output.as_basic())
+ {
+ return None;
+ }
+
+ if let Some(unlock_conditions) = output.unlock_conditions() {
+ // check if vesting unlock period is already done
+ if unlock_conditions.is_time_locked(snapshot_timestamp_s) {
+ return None;
+ }
+ unlock_conditions.address().map(|uc| *uc.address())
+ } else {
+ None
+ }
+ }
+
+ if let Some(address) =
+ mergeable_address(&header, &output, self.snapshot_timestamp_s)
+ {
+ // collect the unlocked vesting balances
+ self.unlocked_address_balances
+ .entry(address)
+ .and_modify(|x| x.balance += output.amount())
+ .or_insert(OutputHeaderWithBalance {
+ output_header: header,
+ balance: output.amount(),
+ });
+ continue;
+ } else {
+ return Some(Ok((header, output)));
+ }
+ } else {
+ return Some(res);
+ }
+ }
+
+ // Now that we are out
+ self.unlocked_address_balances
+ .pop_first()
+ .map(|(address, output_header_with_balance)| {
+ // create a new basic output which holds the aggregated balance from
+ // unlocked vesting outputs for this address
+ let basic = BasicOutputBuilder::new_with_amount(output_header_with_balance.balance)
+ .add_unlock_condition(AddressUnlockCondition::new(address))
+ .finish()
+ .expect("should be able to create a basic output");
+
+ Ok((output_header_with_balance.output_header, basic.into()))
+ })
+ }
+}
+
+fn scale_output_amount_for_iota(output: &mut Output) -> Result<()> {
+ *output = match output {
+ Output::Basic(ref basic_output) => {
+ // Update amount
+ let mut builder = BasicOutputBuilder::from(basic_output)
+ .with_amount(scale_amount_for_iota(basic_output.amount())?);
+
+ // Update amount in potential storage deposit return unlock condition
+ if let Some(sdr_uc) = basic_output
+ .unlock_conditions()
+ .get(StorageDepositReturnUnlockCondition::KIND)
+ {
+ let sdr_uc = sdr_uc.as_storage_deposit_return();
+ builder = builder.replace_unlock_condition(
+ StorageDepositReturnUnlockCondition::new(
+ sdr_uc.return_address(),
+ scale_amount_for_iota(sdr_uc.amount())?,
+ u64::MAX,
+ )
+ .unwrap(),
+ );
+ };
+
+ Output::from(builder.finish()?)
+ }
+ Output::Alias(ref alias_output) => Output::from(
+ AliasOutputBuilder::from(alias_output)
+ .with_amount(scale_amount_for_iota(alias_output.amount())?)
+ .finish()?,
+ ),
+ Output::Foundry(ref foundry_output) => Output::from(
+ FoundryOutputBuilder::from(foundry_output)
+ .with_amount(scale_amount_for_iota(foundry_output.amount())?)
+ .finish()?,
+ ),
+ Output::Nft(ref nft_output) => {
+ // Update amount
+ let mut builder = NftOutputBuilder::from(nft_output)
+ .with_amount(scale_amount_for_iota(nft_output.amount())?);
+
+ // Update amount in potential storage deposit return unlock condition
+ if let Some(sdr_uc) = nft_output
+ .unlock_conditions()
+ .get(StorageDepositReturnUnlockCondition::KIND)
+ {
+ let sdr_uc = sdr_uc.as_storage_deposit_return();
+ builder = builder.replace_unlock_condition(
+ StorageDepositReturnUnlockCondition::new(
+ sdr_uc.return_address(),
+ scale_amount_for_iota(sdr_uc.amount())?,
+ u64::MAX,
+ )
+ .unwrap(),
+ );
+ };
+
+ Output::from(builder.finish()?)
+ }
+ Output::Treasury(_) => return Ok(()),
+ };
+ Ok(())
+}
diff --git a/crates/iota-json-rpc-tests/tests/read_api.rs b/crates/iota-json-rpc-tests/tests/read_api.rs
index 8602e391c05..a38376ce97b 100644
--- a/crates/iota-json-rpc-tests/tests/read_api.rs
+++ b/crates/iota-json-rpc-tests/tests/read_api.rs
@@ -1487,104 +1487,108 @@ async fn try_get_past_object_version_not_found() {
assert!(at_least_one_version_not_found)
}
-#[sim_test]
-async fn try_get_past_object_deleted() {
- let cluster = TestClusterBuilder::new().build().await;
- let http_client = cluster.rpc_client();
- let address = cluster.get_address_0();
-
- let objects = cluster
- .get_owned_objects(address, Some(IotaObjectDataOptions::full_content()))
- .await
- .unwrap();
+mod move_tests {
+ use super::*;
- assert_eq!(5, objects.len());
+ #[sim_test]
+ async fn try_get_past_object_deleted() {
+ let cluster = TestClusterBuilder::new().build().await;
+ let http_client = cluster.rpc_client();
+ let address = cluster.get_address_0();
- let tx_block_response = publish_move_package(&cluster).await;
+ let objects = cluster
+ .get_owned_objects(address, Some(IotaObjectDataOptions::full_content()))
+ .await
+ .unwrap();
- let package_id = tx_block_response
- .object_changes
- .unwrap()
- .iter()
- .filter_map(|obj_change| match obj_change {
- ObjectChange::Published { package_id, .. } => Some(*package_id),
- _ => None,
- })
- .collect::>()[0];
+ assert_eq!(5, objects.len());
- let tx_block_response = cluster
- .sign_and_execute_transaction(
- &cluster
- .test_transaction_builder()
- .await
- .move_call(package_id, "object_basics", "create", vec![
- 1u64.into(),
- CallArg::Pure(address.to_vec()),
- ])
- .build(),
- )
- .await;
+ let tx_block_response = publish_move_package(&cluster).await;
- let created_object_id = tx_block_response
- .object_changes
- .unwrap()
- .iter()
- .filter_map(|obj_change| match obj_change {
- ObjectChange::Created { object_id, .. } => Some(*object_id),
- _ => None,
- })
- .collect::>()[0];
+ let package_id = tx_block_response
+ .object_changes
+ .unwrap()
+ .iter()
+ .filter_map(|obj_change| match obj_change {
+ ObjectChange::Published { package_id, .. } => Some(*package_id),
+ _ => None,
+ })
+ .collect::>()[0];
+
+ let tx_block_response = cluster
+ .sign_and_execute_transaction(
+ &cluster
+ .test_transaction_builder()
+ .await
+ .move_call(package_id, "object_basics", "create", vec![
+ 1u64.into(),
+ CallArg::Pure(address.to_vec()),
+ ])
+ .build(),
+ )
+ .await;
- let objects = cluster
- .get_owned_objects(address, Some(IotaObjectDataOptions::full_content()))
- .await
- .unwrap();
+ let created_object_id = tx_block_response
+ .object_changes
+ .unwrap()
+ .iter()
+ .filter_map(|obj_change| match obj_change {
+ ObjectChange::Created { object_id, .. } => Some(*object_id),
+ _ => None,
+ })
+ .collect::>()[0];
- let object_ids = objects
- .iter()
- .map(|a| a.object_id().unwrap())
- .collect::>();
+ let objects = cluster
+ .get_owned_objects(address, Some(IotaObjectDataOptions::full_content()))
+ .await
+ .unwrap();
- assert_eq!(7, objects.len());
- assert!(object_ids.contains(&created_object_id));
+ let object_ids = objects
+ .iter()
+ .map(|a| a.object_id().unwrap())
+ .collect::>();
- let created_object = http_client
- .get_object(created_object_id, None)
- .await
- .unwrap()
- .data
- .unwrap();
+ assert_eq!(7, objects.len());
+ assert!(object_ids.contains(&created_object_id));
- let arg = CallArg::Object(iota_types::transaction::ObjectArg::ImmOrOwnedObject((
- created_object.object_id,
- created_object.version,
- created_object.digest,
- )));
+ let created_object = http_client
+ .get_object(created_object_id, None)
+ .await
+ .unwrap()
+ .data
+ .unwrap();
- let tx_block_response = cluster
- .sign_and_execute_transaction(
- &cluster
- .test_transaction_builder()
- .await
- .move_call(package_id, "object_basics", "delete", vec![arg])
- .build(),
- )
- .await;
+ let arg = CallArg::Object(iota_types::transaction::ObjectArg::ImmOrOwnedObject((
+ created_object.object_id,
+ created_object.version,
+ created_object.digest,
+ )));
+
+ let tx_block_response = cluster
+ .sign_and_execute_transaction(
+ &cluster
+ .test_transaction_builder()
+ .await
+ .move_call(package_id, "object_basics", "delete", vec![arg])
+ .build(),
+ )
+ .await;
- assert_eq!(
- tx_block_response.effects.as_ref().unwrap().deleted().len(),
- 1
- );
+ assert_eq!(
+ tx_block_response.effects.as_ref().unwrap().deleted().len(),
+ 1
+ );
- let seq_num = SequenceNumber::from_u64(4);
- let rpc_past_obj = http_client
- .try_get_past_object(created_object_id, seq_num, None)
- .await
- .unwrap();
+ let seq_num = SequenceNumber::from_u64(4);
+ let rpc_past_obj = http_client
+ .try_get_past_object(created_object_id, seq_num, None)
+ .await
+ .unwrap();
- assert!(
- matches!(rpc_past_obj, IotaPastObjectResponse::ObjectDeleted(obj) if obj.object_id == created_object_id && obj.version == seq_num)
- );
+ assert!(
+ matches!(rpc_past_obj, IotaPastObjectResponse::ObjectDeleted(obj) if obj.object_id == created_object_id && obj.version == seq_num)
+ );
+ }
}
#[sim_test]
diff --git a/crates/iota-json-rpc-tests/tests/transaction_builder_api.rs b/crates/iota-json-rpc-tests/tests/transaction_builder_api.rs
index cf7dc69998d..558a8cfa92d 100644
--- a/crates/iota-json-rpc-tests/tests/transaction_builder_api.rs
+++ b/crates/iota-json-rpc-tests/tests/transaction_builder_api.rs
@@ -298,49 +298,53 @@ async fn test_pay_all_iota() -> Result<(), anyhow::Error> {
Ok(())
}
-#[sim_test]
-async fn test_publish() -> Result<(), anyhow::Error> {
- let cluster = TestClusterBuilder::new().build().await;
- let http_client = cluster.rpc_client();
- let address = cluster.get_address_0();
-
- let objects = http_client
- .get_owned_objects(
- address,
- Some(IotaObjectResponseQuery::new_with_options(
- IotaObjectDataOptions::new()
- .with_type()
- .with_owner()
- .with_previous_transaction(),
- )),
- None,
- None,
- )
- .await?;
- let gas = objects.data.first().unwrap().object().unwrap();
-
- let compiled_package =
- BuildConfig::new_for_testing().build(Path::new("../../examples/move/basics"))?;
- let compiled_modules_bytes =
- compiled_package.get_package_base64(/* with_unpublished_deps */ false);
- let dependencies = compiled_package.get_dependency_storage_package_ids();
-
- let transaction_bytes: TransactionBlockBytes = http_client
- .publish(
- address,
- compiled_modules_bytes,
- dependencies,
- Some(gas.object_id),
- 100_000_000.into(),
- )
- .await?;
+mod move_tests {
+ use super::*;
+
+ #[sim_test]
+ async fn test_publish() -> Result<(), anyhow::Error> {
+ let cluster = TestClusterBuilder::new().build().await;
+ let http_client = cluster.rpc_client();
+ let address = cluster.get_address_0();
+
+ let objects = http_client
+ .get_owned_objects(
+ address,
+ Some(IotaObjectResponseQuery::new_with_options(
+ IotaObjectDataOptions::new()
+ .with_type()
+ .with_owner()
+ .with_previous_transaction(),
+ )),
+ None,
+ None,
+ )
+ .await?;
+ let gas = objects.data.first().unwrap().object().unwrap();
+
+ let compiled_package =
+ BuildConfig::new_for_testing().build(Path::new("../../examples/move/basics"))?;
+ let compiled_modules_bytes =
+ compiled_package.get_package_base64(/* with_unpublished_deps */ false);
+ let dependencies = compiled_package.get_dependency_storage_package_ids();
+
+ let transaction_bytes: TransactionBlockBytes = http_client
+ .publish(
+ address,
+ compiled_modules_bytes,
+ dependencies,
+ Some(gas.object_id),
+ 100_000_000.into(),
+ )
+ .await?;
- let tx_response = execute_tx(&cluster, http_client, transaction_bytes)
- .await
- .unwrap();
+ let tx_response = execute_tx(&cluster, http_client, transaction_bytes)
+ .await
+ .unwrap();
- matches!(tx_response, IotaTransactionBlockResponse {effects, ..} if effects.as_ref().unwrap().created().len() == 6);
- Ok(())
+ matches!(tx_response, IotaTransactionBlockResponse {effects, ..} if effects.as_ref().unwrap().created().len() == 6);
+ Ok(())
+ }
}
#[sim_test]
diff --git a/crates/iota-json/src/tests.rs b/crates/iota-json/src/tests.rs
index 6a899b12ebd..b97880f0efa 100644
--- a/crates/iota-json/src/tests.rs
+++ b/crates/iota-json/src/tests.rs
@@ -423,91 +423,95 @@ fn test_basic_args_linter_pure_args_good() {
}
}
-#[test]
-fn test_basic_args_linter_top_level() {
- let path = Path::new(env!("CARGO_MANIFEST_DIR")).join("../../examples/move/basics");
- let compiled_modules = BuildConfig::new_for_testing()
- .build(&path)
- .unwrap()
- .into_modules();
- let example_package = Object::new_package_for_testing(
- &compiled_modules,
- TransactionDigest::genesis_marker(),
- BuiltInFramework::genesis_move_packages(),
- )
- .unwrap();
- let package = example_package.data.try_as_package().unwrap();
-
- let module = Identifier::new("resolve_args").unwrap();
- let function = Identifier::new("foo").unwrap();
-
- // Function signature:
- // foo(
- // _foo: &mut Foo,
- // _bar: vector,
- // _name: vector,
- // _index: u64,
- // _flag: u8,
- // _recipient: address,
- // _ctx: &mut TxContext,
- // )
-
- let foo_id = ObjectID::random();
- let bar_id = ObjectID::random();
- let baz_id = ObjectID::random();
- let recipient_addr = IotaAddress::random_for_testing_only();
-
- let foo = json!(foo_id.to_canonical_string(/* with_prefix */ true));
- let bar = json!([
- bar_id.to_canonical_string(/* with_prefix */ true),
- baz_id.to_canonical_string(/* with_prefix */ true),
- ]);
-
- let name = json!("Name");
- let index = json!("12345678");
- let flag = json!(89);
- let recipient = json!(recipient_addr.to_string());
-
- let args: Vec<_> = [
- foo.clone(),
- bar.clone(),
- name.clone(),
- index.clone(),
- flag,
- recipient.clone(),
- ]
- .into_iter()
- .map(|q| IotaJsonValue::new(q.clone()).unwrap())
- .collect();
-
- let json_args: Vec<_> =
- resolve_move_function_args(package, module.clone(), function.clone(), &[], args)
+mod move_tests {
+ use super::*;
+
+ #[test]
+ fn test_basic_args_linter_top_level() {
+ let path = Path::new(env!("CARGO_MANIFEST_DIR")).join("../../examples/move/basics");
+ let compiled_modules = BuildConfig::new_for_testing()
+ .build(&path)
.unwrap()
- .into_iter()
- .map(|(arg, _)| arg)
- .collect();
-
- use ResolvedCallArg as RCA;
- fn pure(t: &T) -> RCA {
- RCA::Pure(bcs::to_bytes(t).unwrap())
- }
-
- assert_eq!(json_args, vec![
- RCA::Object(foo_id),
- RCA::ObjVec(vec![bar_id, baz_id]),
- pure(&"Name"),
- pure(&12345678u64),
- pure(&89u8),
- pure(&recipient_addr),
- ],);
-
- // Flag is u8 so too large
- let args: Vec<_> = [foo, bar, name, index, json!(10000u64), recipient]
+ .into_modules();
+ let example_package = Object::new_package_for_testing(
+ &compiled_modules,
+ TransactionDigest::genesis_marker(),
+ BuiltInFramework::genesis_move_packages(),
+ )
+ .unwrap();
+ let package = example_package.data.try_as_package().unwrap();
+
+ let module = Identifier::new("resolve_args").unwrap();
+ let function = Identifier::new("foo").unwrap();
+
+ // Function signature:
+ // foo(
+ // _foo: &mut Foo,
+ // _bar: vector,
+ // _name: vector,
+ // _index: u64,
+ // _flag: u8,
+ // _recipient: address,
+ // _ctx: &mut TxContext,
+ // )
+
+ let foo_id = ObjectID::random();
+ let bar_id = ObjectID::random();
+ let baz_id = ObjectID::random();
+ let recipient_addr = IotaAddress::random_for_testing_only();
+
+ let foo = json!(foo_id.to_canonical_string(/* with_prefix */ true));
+ let bar = json!([
+ bar_id.to_canonical_string(/* with_prefix */ true),
+ baz_id.to_canonical_string(/* with_prefix */ true),
+ ]);
+
+ let name = json!("Name");
+ let index = json!("12345678");
+ let flag = json!(89);
+ let recipient = json!(recipient_addr.to_string());
+
+ let args: Vec<_> = [
+ foo.clone(),
+ bar.clone(),
+ name.clone(),
+ index.clone(),
+ flag,
+ recipient.clone(),
+ ]
.into_iter()
.map(|q| IotaJsonValue::new(q.clone()).unwrap())
.collect();
- assert!(resolve_move_function_args(package, module, function, &[], args,).is_err());
+ let json_args: Vec<_> =
+ resolve_move_function_args(package, module.clone(), function.clone(), &[], args)
+ .unwrap()
+ .into_iter()
+ .map(|(arg, _)| arg)
+ .collect();
+
+ use ResolvedCallArg as RCA;
+ fn pure(t: &T) -> RCA {
+ RCA::Pure(bcs::to_bytes(t).unwrap())
+ }
+
+ assert_eq!(json_args, vec![
+ RCA::Object(foo_id),
+ RCA::ObjVec(vec![bar_id, baz_id]),
+ pure(&"Name"),
+ pure(&12345678u64),
+ pure(&89u8),
+ pure(&recipient_addr),
+ ],);
+
+ // Flag is u8 so too large
+ let args: Vec<_> = [foo, bar, name, index, json!(10000u64), recipient]
+ .into_iter()
+ .map(|q| IotaJsonValue::new(q.clone()).unwrap())
+ .collect();
+
+ assert!(resolve_move_function_args(package, module, function, &[], args,).is_err());
+ }
}
#[test]
diff --git a/crates/iota-rosetta/src/unit_tests/balance_changing_tx_tests.rs b/crates/iota-rosetta/src/unit_tests/balance_changing_tx_tests.rs
index f6e3f1904f5..88c4cb16308 100644
--- a/crates/iota-rosetta/src/unit_tests/balance_changing_tx_tests.rs
+++ b/crates/iota-rosetta/src/unit_tests/balance_changing_tx_tests.rs
@@ -139,98 +139,102 @@ async fn test_transfer_object() {
.await;
}
-#[tokio::test]
-async fn test_publish_and_move_call() {
- let network = TestClusterBuilder::new().build().await;
- let client = network.wallet.get_client().await.unwrap();
- let keystore = network.wallet.config().keystore();
- let rgp = network.get_reference_gas_price().await;
-
- // Test publish
- let addresses = network.get_addresses();
- let sender = get_random_address(&addresses, vec![]);
- let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
- path.extend(["..", "..", "examples", "move", "coin"]);
- let compiled_package = BuildConfig::new_for_testing().build(&path).unwrap();
- let compiled_modules_bytes =
- compiled_package.get_package_bytes(/* with_unpublished_deps */ false);
- let dependencies = compiled_package.get_dependency_storage_package_ids();
+mod move_tests {
+ use super::*;
+
+ #[tokio::test]
+ async fn test_publish_and_move_call() {
+ let network = TestClusterBuilder::new().build().await;
+ let client = network.wallet.get_client().await.unwrap();
+ let keystore = network.wallet.config().keystore();
+ let rgp = network.get_reference_gas_price().await;
+
+ // Test publish
+ let addresses = network.get_addresses();
+ let sender = get_random_address(&addresses, vec![]);
+ let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
+ path.extend(["..", "..", "examples", "move", "coin"]);
+ let compiled_package = BuildConfig::new_for_testing().build(&path).unwrap();
+ let compiled_modules_bytes =
+ compiled_package.get_package_bytes(/* with_unpublished_deps */ false);
+ let dependencies = compiled_package.get_dependency_storage_package_ids();
+
+ let pt = {
+ let mut builder = ProgrammableTransactionBuilder::new();
+ builder.publish_immutable(compiled_modules_bytes, dependencies);
+ builder.finish()
+ };
+ let response = test_transaction(
+ &client,
+ keystore,
+ vec![],
+ sender,
+ pt,
+ vec![],
+ rgp * TEST_ONLY_GAS_UNIT_FOR_HEAVY_COMPUTATION_STORAGE,
+ rgp,
+ false,
+ )
+ .await;
+ let object_changes = response.object_changes.unwrap();
- let pt = {
- let mut builder = ProgrammableTransactionBuilder::new();
- builder.publish_immutable(compiled_modules_bytes, dependencies);
- builder.finish()
- };
- let response = test_transaction(
- &client,
- keystore,
- vec![],
- sender,
- pt,
- vec![],
- rgp * TEST_ONLY_GAS_UNIT_FOR_HEAVY_COMPUTATION_STORAGE,
- rgp,
- false,
- )
- .await;
- let object_changes = response.object_changes.unwrap();
+ // Test move call (reuse published module from above test)
+ let package = object_changes
+ .iter()
+ .find_map(|change| {
+ if let ObjectChange::Published { package_id, .. } = change {
+ Some(package_id)
+ } else {
+ None
+ }
+ })
+ .unwrap();
- // Test move call (reuse published module from above test)
- let package = object_changes
- .iter()
- .find_map(|change| {
- if let ObjectChange::Published { package_id, .. } = change {
- Some(package_id)
- } else {
- None
+ let treasury = find_module_object(&object_changes, |type_| {
+ if type_.name.as_str() != "TreasuryCap" {
+ return false;
}
- })
- .unwrap();
- let treasury = find_module_object(&object_changes, |type_| {
- if type_.name.as_str() != "TreasuryCap" {
- return false;
- }
+ let Some(TypeTag::Struct(otw)) = type_.type_params.first() else {
+ return false;
+ };
- let Some(TypeTag::Struct(otw)) = type_.type_params.first() else {
- return false;
+ otw.name.as_str() == "MY_COIN"
+ });
+
+ let treasury = treasury.clone().reference.to_object_ref();
+ let recipient = *addresses.choose(&mut OsRng).unwrap();
+ let pt = {
+ let mut builder = ProgrammableTransactionBuilder::new();
+ builder
+ .move_call(
+ *package,
+ Identifier::from_str("my_coin").unwrap(),
+ Identifier::from_str("mint").unwrap(),
+ vec![],
+ vec![
+ CallArg::Object(ObjectArg::ImmOrOwnedObject(treasury)),
+ CallArg::Pure(bcs::to_bytes(&10000u64).unwrap()),
+ CallArg::Pure(bcs::to_bytes(&recipient).unwrap()),
+ ],
+ )
+ .unwrap();
+ builder.finish()
};
- otw.name.as_str() == "MY_COIN"
- });
-
- let treasury = treasury.clone().reference.to_object_ref();
- let recipient = *addresses.choose(&mut OsRng).unwrap();
- let pt = {
- let mut builder = ProgrammableTransactionBuilder::new();
- builder
- .move_call(
- *package,
- Identifier::from_str("my_coin").unwrap(),
- Identifier::from_str("mint").unwrap(),
- vec![],
- vec![
- CallArg::Object(ObjectArg::ImmOrOwnedObject(treasury)),
- CallArg::Pure(bcs::to_bytes(&10000u64).unwrap()),
- CallArg::Pure(bcs::to_bytes(&recipient).unwrap()),
- ],
- )
- .unwrap();
- builder.finish()
- };
-
- test_transaction(
- &client,
- keystore,
- vec![],
- sender,
- pt,
- vec![],
- rgp * TEST_ONLY_GAS_UNIT_FOR_GENERIC,
- rgp,
- false,
- )
- .await;
+ test_transaction(
+ &client,
+ keystore,
+ vec![],
+ sender,
+ pt,
+ vec![],
+ rgp * TEST_ONLY_GAS_UNIT_FOR_GENERIC,
+ rgp,
+ false,
+ )
+ .await;
+ }
}
#[tokio::test]
diff --git a/docs/content/_snippets/iota-evm/oracles_contract_data.mdx b/docs/content/_snippets/iota-evm/oracles_contract_data.mdx
index dd533a6e252..ff1130652be 100644
--- a/docs/content/_snippets/iota-evm/oracles_contract_data.mdx
+++ b/docs/content/_snippets/iota-evm/oracles_contract_data.mdx
@@ -16,10 +16,10 @@ import { Networks } from '@site/src/components/constant';
| Contract Type | Contract Address |
|:----------------------:|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------:|
-| Pyth contract | [https://explorer.evm.shimmer.network/address/0x290f23E4a034Db5237edCb5aA2D94Acb4DD19fD2](https://explorer.evm.shimmer.network/address/0x290f23E4a034Db5237edCb5aA2D94Acb4DD19fD2) |
-| Supra Pull Contract | [https://explorer.evm.shimmer.network/address/0xe41444462709484272F54371F3f53bBF900Ec49E](https://explorer.evm.shimmer.network/address/0xe41444462709484272F54371F3f53bBF900Ec49E) |
-| Supra Storage Contract | [https://explorer.evm.shimmer.network/address/0x3E5E89d14576cE9f20a8347aA682517fe65B4ACB](https://explorer.evm.shimmer.network/address/0x3E5E89d14576cE9f20a8347aA682517fe65B4ACB) |
-| Supra Push Contract | [https://explorer.evm.shimmer.network/address/0x3df842b27c997cEc63160E79CB4398c82645A1c3](https://explorer.evm.shimmer.network/address/0x3df842b27c997cEc63160E79CB4398c82645A1c3) |
+| Pyth contract | [<>{Networks.shimmer.evm.blockExplorerUrls}>/address/0x290f23E4a034Db5237edCb5aA2D94Acb4DD19fD2](https://explorer.evm.shimmer.network/address/0x290f23E4a034Db5237edCb5aA2D94Acb4DD19fD2) |
+| Supra Pull Contract | [<>{Networks.shimmer.evm.blockExplorerUrls}>/address/0xe41444462709484272F54371F3f53bBF900Ec49E](https://explorer.evm.shimmer.network/address/0xe41444462709484272F54371F3f53bBF900Ec49E) |
+| Supra Storage Contract | [<>{Networks.shimmer.evm.blockExplorerUrls}>/address/0x3E5E89d14576cE9f20a8347aA682517fe65B4ACB](https://explorer.evm.shimmer.network/address/0x3E5E89d14576cE9f20a8347aA682517fe65B4ACB) |
+| Supra Push Contract | [<>{Networks.shimmer.evm.blockExplorerUrls}>/address/0x3df842b27c997cEc63160E79CB4398c82645A1c3](https://explorer.evm.shimmer.network/address/0x3df842b27c997cEc63160E79CB4398c82645A1c3) |
diff --git a/docs/content/developer/advanced/custom-indexer.mdx b/docs/content/developer/advanced/custom-indexer.mdx
index 8dc697fd66f..e3605e52a6b 100644
--- a/docs/content/developer/advanced/custom-indexer.mdx
+++ b/docs/content/developer/advanced/custom-indexer.mdx
@@ -5,6 +5,8 @@ description: You can build custom indexers using the IOTA micro-data ingestion f
import Quiz from '@site/src/components/Quiz';
import questions from '/json/developer/advanced-topics/custom-indexer.json';
+import {Networks} from '@site/src/components/constant'
+import CodeBlock from '@theme/CodeBlock';
You can build custom indexers using the IOTA micro-data ingestion framework. To create an indexer, you subscribe to a checkpoint stream with full checkpoint content. This stream can be one of the publicly available streams from IOTA, one that you set up in your local environment, or a combination of the two.
@@ -31,10 +33,10 @@ Data ingestion for your indexer supports several checkpoint stream sources.
The most straightforward stream source is to subscribe to a remote store of checkpoint contents. IOTA provides the following buckets:
-- Testnet: `https://indexer.testnet.iota.cafe`
-- Devnet: `https://indexer.devnet.iota.cafe`
+- Testnet: {Networks.iota_move_testnet.indexerRpc}
+- Devnet: {Networks.iota_move_devnet.indexerRpc}
-The checkpoint files are stored in the following format: `https://indexer.testnet.iota.cafe/.chk`. You can download the checkpoint file by sending an HTTP GET request to the relevant URL. Try it yourself for checkpoint 1 at [https://indexer.testnet.iota.cafe/1.chk](https://indexer.testnet.iota.cafe/1.chk).
+The checkpoint files are stored in the following format:
{Networks.iota_move_testnet.indexerRpc+`/.chk`}
. You can download the checkpoint file by sending an HTTP GET request to the relevant URL. Try it yourself for checkpoint 1 at [{Networks.iota_move_testnet.indexerRpc}/1.chk](https://indexer.testnet.iota.cafe/1.chk).
```mermaid
flowchart LR
diff --git a/docs/content/developer/getting-started/get-coins.mdx b/docs/content/developer/getting-started/get-coins.mdx
index 0cc7704f423..ea87dc69d25 100644
--- a/docs/content/developer/getting-started/get-coins.mdx
+++ b/docs/content/developer/getting-started/get-coins.mdx
@@ -4,6 +4,7 @@ tags: [how-to, cli, typescript, sdk, faucet]
---
import Quiz from '@site/src/components/Quiz';
import questions from '/json/developer/getting-started/get-coins.json';
+import {Networks} from '@site/src/components/constant'
# Get Test Tokens
@@ -36,7 +37,7 @@ curl --location --request POST 'https://faucet.testnet.iota.cafe/gas' \
:::tip Test tokens on a local network
-If you're working with a local network, replace `'https://faucet.devnet.iota.cafe/gas'` with the appropriate value based on which package runs your network:
+If you're working with a local network, replace {Networks.iota_move_devnet.faucetUrl}
with the appropriate value based on which package runs your network:
- `iota-faucet`: `http://127.0.0.1:5003/gas`
- `iota start`: `http://127.0.0.1:9123/gas`
diff --git a/docs/content/operator/iota-full-node/source.mdx b/docs/content/operator/iota-full-node/source.mdx
index fc8f03947a2..6bb6e1f14ea 100644
--- a/docs/content/operator/iota-full-node/source.mdx
+++ b/docs/content/operator/iota-full-node/source.mdx
@@ -6,6 +6,7 @@ import Quiz from '@site/src/components/Quiz';
import questions from '/json/node-operators/iota-full-node/node-setup.json';
import WarningAdvanced from './../../_snippets/warning-advanced-instructions-node-setup.mdx'
import NodeHardwareRequirements from './../../_snippets/node-hardware-requirements.mdx'
+import {Networks} from '@site/src/components/constant'
@@ -150,7 +151,7 @@ At this point, your IOTA Full node is ready to connect to the IOTA network.
If your setup is successful, your IOTA Full node is now connected to the appropriate network.
-Your Full node serves the read endpoints of the IOTA JSON-RPC API at: `http://127.0.0.1:9000`.
+Your Full node serves the read endpoints of the IOTA JSON-RPC API at: {Networks.iota_localnet.jsonRpcUrl}
.
## Troubleshooting
@@ -216,7 +217,7 @@ Use the following steps to update your Full node:
./target/release/iota-node --config-path fullnode.yaml
```
-Your Full node restarts on: http://127.0.0.1:9000.
+Your Full node restarts on: {Networks.iota_localnet.jsonRpcUrl}
.
:::info
diff --git a/docs/content/references/ts-sdk/typescript/index.mdx b/docs/content/references/ts-sdk/typescript/index.mdx
index f1bc88e8908..ca0b3a75128 100644
--- a/docs/content/references/ts-sdk/typescript/index.mdx
+++ b/docs/content/references/ts-sdk/typescript/index.mdx
@@ -34,39 +34,6 @@ To create a local IOTA network, you can refer to [Local Development](/developer/
-## Migrate to version 0.38.0
-
-The IOTA TypeScript SDK was refactored beginning with version 0.38.0. If you are updating from an
-earlier version of the SDK, there are some changes you should consider when updating your code.
-
-### Module structure
-
-The IOTA TypeScript SDK is now divided into modular components. Before version 0.38.0, you imported
-the complete SDK module. Now, you upload the individual packages of the SDK module instead. See the
-[Module Packages section](#module-packages) for the list of packages.
-
-### Signing transactions
-
-Signing and sending transactions changes slightly with the deprecation of the `Signer`
-pattern. For an example of transaction signing, see the
-[IOTA Programmable Transaction Blocks Basics](./transaction-building/basics.mdx) topic.
-
-### Faucet requests
-
-The ability to request IOTA from a faucet is not part of `IotaClient` as it was with
-`JsonRpcProvider`. Instead, you must use the `requestIotaFromFaucetV0` method from
-`@iota/iota-sdk/faucet`. The `@iota/iota-sdk/faucet` import also provides a `getFaucetHost` method
-to retrieve the faucet URL for `localnet`, `testnet`, or `devnet` networks.
-
-```ts
-import { getFaucetHost, requestIotaFromFaucetV0 } from '@iota/iota-sdk/faucet';
-
-await requestIotaFromFaucetV0({
- host: getFaucetHost('devnet'),
- recipient: '',
-});
-```
-
## Module packages
The SDK contains a set of modular packages that you can use independently or together. Import just
diff --git a/sdk/dapp-kit/src/themes/lightTheme.ts b/sdk/dapp-kit/src/themes/lightTheme.ts
index d4ad346b400..26b302b73fb 100644
--- a/sdk/dapp-kit/src/themes/lightTheme.ts
+++ b/sdk/dapp-kit/src/themes/lightTheme.ts
@@ -55,8 +55,8 @@ export const lightTheme: ThemeVars = {
fontFamily:
'ui-sans-serif, system-ui, -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji"',
fontStyle: 'normal',
- lineHeight: '1.3',
- letterSpacing: '1',
+ lineHeight: '24px',
+ letterSpacing: '0.1px',
},
spacing: {
xxsmall: '4px',