diff --git a/contracts/contracts/coordination/Coordinator.sol b/contracts/contracts/coordination/Coordinator.sol index e3d3e9218..3c9969b6e 100644 --- a/contracts/contracts/coordination/Coordinator.sol +++ b/contracts/contracts/coordination/Coordinator.sol @@ -48,28 +48,6 @@ contract Coordinator is Initializable, AccessControlDefaultAdminRulesUpgradeable event FeeModelApproved(IFeeModel feeModel); event RitualExtended(uint32 indexed ritualId, uint32 endTimestamp); - event HandoverRequest( - uint32 indexed ritualId, - address indexed departingParticipant, - address indexed incomingParticipant - ); - event HandoverTranscriptPosted( - uint32 indexed ritualId, - address indexed departingParticipant, - address indexed incomingParticipant - ); - event BlindedSharePosted(uint32 indexed ritualId, address indexed departingParticipant); - event HandoverCanceled( - uint32 indexed ritualId, - address indexed departingParticipant, - address indexed incomingParticipant - ); - event HandoverFinalized( - uint32 indexed ritualId, - address indexed departingParticipant, - address indexed incomingParticipant - ); - enum RitualState { NON_INITIATED, DKG_AWAITING_TRANSCRIPTS, @@ -80,14 +58,6 @@ contract Coordinator is Initializable, AccessControlDefaultAdminRulesUpgradeable EXPIRED } - enum HandoverState { - NON_INITIATED, - HANDOVER_AWAITING_TRANSCRIPT, - HANDOVER_AWAITING_BLINDED_SHARE, - HANDOVER_AWAITING_FINALIZATION, - HANDOVER_TIMEOUT - } - struct Participant { address provider; bool aggregated; @@ -96,14 +66,6 @@ contract Coordinator is Initializable, AccessControlDefaultAdminRulesUpgradeable // Note: Adjust __postSentinelGap size if this struct's size changes } - struct Handover { - uint32 requestTimestamp; - address incomingProvider; - bytes transcript; - bytes decryptionRequestStaticKey; - bytes blindedShare; - } - struct Ritual { // NOTE: changing the order here affects nucypher/nucypher: CoordinatorAgent address initiator; @@ -130,13 +92,11 @@ contract Coordinator is Initializable, AccessControlDefaultAdminRulesUpgradeable } bytes32 public constant FEE_MODEL_MANAGER_ROLE = keccak256("FEE_MODEL_MANAGER_ROLE"); - bytes32 public constant HANDOVER_SUPERVISOR_ROLE = keccak256("HANDOVER_SUPERVISOR_ROLE"); ITACoChildApplication public immutable application; uint96 private immutable minAuthorization; // TODO use child app for checking eligibility uint32 public immutable dkgTimeout; - uint32 public immutable handoverTimeout; Ritual[] private ritualsStub; // former rituals, "internal" for testing only uint32 private timeoutStub; // former (dkg) timeout @@ -154,18 +114,23 @@ contract Coordinator is Initializable, AccessControlDefaultAdminRulesUpgradeable mapping(uint256 index => Ritual ritual) public rituals; uint256 public numberOfRituals; - mapping(bytes32 handoverKey => Handover handover) public handovers; + bytes32 public stub5; // former handovers + address public handoverCoordinator; // Note: Adjust the __preSentinelGap size if more contract variables are added // Storage area for sentinel values - uint256[15] internal __preSentinelGap; + uint256[14] internal __preSentinelGap; Participant internal __sentinelParticipant; uint256[20] internal __postSentinelGap; - constructor(ITACoChildApplication _application, uint32 _dkgTimeout, uint32 _handoverTimeout) { + modifier onlyHandoverCoordinator() { + require(msg.sender == handoverCoordinator, "Caller must be the handover coordinator"); + _; + } + + constructor(ITACoChildApplication _application, uint32 _dkgTimeout) { application = _application; dkgTimeout = _dkgTimeout; - handoverTimeout = _handoverTimeout; minAuthorization = _application.minimumAuthorization(); // TODO use child app for checking eligibility _disableInitializers(); } @@ -178,6 +143,10 @@ contract Coordinator is Initializable, AccessControlDefaultAdminRulesUpgradeable __AccessControlDefaultAdminRules_init(0, _admin); } + function initializeHandoverCoordinator(address _handoverCoordinator) external reinitializer(2) { + handoverCoordinator = _handoverCoordinator; + } + /// @dev for backward compatibility only function timeout() external view returns (uint32) { return dkgTimeout; @@ -187,6 +156,30 @@ contract Coordinator is Initializable, AccessControlDefaultAdminRulesUpgradeable return rituals[ritualId].initiator; } + function getThreshold(uint32 ritualId) external view returns (uint16) { + return rituals[ritualId].threshold; + } + + function getAggregatedTranscript(uint32 ritualId) external view returns (bytes memory) { + return rituals[ritualId].aggregatedTranscript; + } + + function updateParticipant( + uint32 ritualId, + address provider, + address newProvider, + bool newAggregated, + bytes memory newTranscript, + bytes memory newDecryptionRequestStaticKey + ) external onlyHandoverCoordinator { + Ritual storage ritual = rituals[ritualId]; + Participant storage participant = getParticipant(ritual, provider); + participant.provider = newProvider; + participant.aggregated = newAggregated; + participant.transcript = newTranscript; + participant.decryptionRequestStaticKey = newDecryptionRequestStaticKey; + } + function getTimestamps( uint32 ritualId ) external view returns (uint32 initTimestamp, uint32 endTimestamp) { @@ -209,14 +202,6 @@ contract Coordinator is Initializable, AccessControlDefaultAdminRulesUpgradeable return getRitualState(rituals[ritualId]); } - function getHandoverState( - uint32 ritualId, - address departingParticipant - ) external view returns (HandoverState) { - Handover storage handover = handovers[getHandoverKey(ritualId, departingParticipant)]; - return getHandoverState(handover); - } - function isRitualActive(Ritual storage ritual) internal view returns (bool) { return getRitualState(ritual) == RitualState.ACTIVE; } @@ -259,30 +244,6 @@ contract Coordinator is Initializable, AccessControlDefaultAdminRulesUpgradeable } } - function getHandoverKey( - uint32 ritualId, - address departingProvider - ) public view returns (bytes32) { - return keccak256(abi.encode(ritualId, departingProvider)); - } - - function getHandoverState(Handover storage handover) internal view returns (HandoverState) { - uint32 t0 = handover.requestTimestamp; - uint32 deadline = t0 + handoverTimeout; - if (t0 == 0) { - return HandoverState.NON_INITIATED; - } else if (block.timestamp > deadline) { - // Handover failed due to timeout - return HandoverState.HANDOVER_TIMEOUT; - } else if (handover.transcript.length == 0) { - return HandoverState.HANDOVER_AWAITING_TRANSCRIPT; - } else if (handover.blindedShare.length == 0) { - return HandoverState.HANDOVER_AWAITING_BLINDED_SHARE; - } else { - return HandoverState.HANDOVER_AWAITING_FINALIZATION; - } - } - function setProviderPublicKey(BLS12381.G2Point calldata publicKey) external { uint32 lastRitualId = uint32(numberOfRituals); address stakingProvider = application.operatorToStakingProvider(msg.sender); @@ -409,16 +370,6 @@ contract Coordinator is Initializable, AccessControlDefaultAdminRulesUpgradeable return 40 + (dkgSize + 1) * BLS12381.G2_POINT_SIZE + threshold * BLS12381.G1_POINT_SIZE; } - /** - * Calculates position of blinded share for particular participant - * @param index Participant index - * @param threshold Threshold - * @dev See https://github.com/nucypher/nucypher-contracts/issues/400 - */ - function blindedSharePosition(uint256 index, uint16 threshold) public pure returns (uint256) { - return 32 + index * BLS12381.G2_POINT_SIZE + threshold * BLS12381.G1_POINT_SIZE; - } - // /** // * @dev This method is deprecated. Use `publishTranscript` instead. // */ @@ -539,181 +490,6 @@ contract Coordinator is Initializable, AccessControlDefaultAdminRulesUpgradeable processReimbursement(initialGasLeft); } - function handoverRequest( - uint32 ritualId, - address departingParticipant, - address incomingParticipant - ) external onlyRole(HANDOVER_SUPERVISOR_ROLE) { - require(isRitualActive(ritualId), "Ritual is not active"); - require( - isParticipant(ritualId, departingParticipant), - "Departing node must be a participant" - ); - require( - !isParticipant(ritualId, incomingParticipant), - "Incoming node cannot be a participant" - ); - - Handover storage handover = handovers[getHandoverKey(ritualId, departingParticipant)]; - HandoverState state = getHandoverState(handover); - - require( - state == HandoverState.NON_INITIATED || state == HandoverState.HANDOVER_TIMEOUT, - "Handover already requested" - ); - require(isProviderKeySet(incomingParticipant), "Incoming provider has not set public key"); - require( - application.authorizedStake(incomingParticipant) >= minAuthorization, - "Not enough authorization" - ); - handover.requestTimestamp = uint32(block.timestamp); - handover.incomingProvider = incomingParticipant; - delete handover.blindedShare; - delete handover.transcript; - delete handover.decryptionRequestStaticKey; - emit HandoverRequest(ritualId, departingParticipant, incomingParticipant); - } - - function postHandoverTranscript( - uint32 ritualId, - address departingParticipant, - bytes calldata transcript, - bytes calldata decryptionRequestStaticKey - ) external { - uint256 initialGasLeft = gasleft(); - require(isRitualActive(ritualId), "Ritual is not active"); - require(transcript.length > 0, "Parameters can't be empty"); - require( - decryptionRequestStaticKey.length == 42, - "Invalid length for decryption request static key" - ); - - Handover storage handover = handovers[getHandoverKey(ritualId, departingParticipant)]; - require( - getHandoverState(handover) == HandoverState.HANDOVER_AWAITING_TRANSCRIPT, - "Not waiting for transcript" - ); - address provider = application.operatorToStakingProvider(msg.sender); - require(handover.incomingProvider == provider, "Wrong incoming provider"); - - handover.transcript = transcript; - handover.decryptionRequestStaticKey = decryptionRequestStaticKey; - emit HandoverTranscriptPosted(ritualId, departingParticipant, provider); - processReimbursement(initialGasLeft); - } - - function postBlindedShare(uint32 ritualId, bytes calldata blindedShare) external { - uint256 initialGasLeft = gasleft(); - require(isRitualActive(ritualId), "Ritual is not active"); - - address provider = application.operatorToStakingProvider(msg.sender); - Handover storage handover = handovers[getHandoverKey(ritualId, provider)]; - require( - getHandoverState(handover) == HandoverState.HANDOVER_AWAITING_BLINDED_SHARE, - "Not waiting for blinded share" - ); - require(blindedShare.length == BLS12381.G2_POINT_SIZE, "Wrong size of blinded share"); - - handover.blindedShare = blindedShare; - emit BlindedSharePosted(ritualId, provider); - processReimbursement(initialGasLeft); - } - - function cancelHandover( - uint32 ritualId, - address departingParticipant - ) external onlyRole(HANDOVER_SUPERVISOR_ROLE) { - Handover storage handover = handovers[getHandoverKey(ritualId, departingParticipant)]; - address incomingParticipant = handover.incomingProvider; - - require( - getHandoverState(handover) != HandoverState.NON_INITIATED, - "Handover not requested" - ); - handover.requestTimestamp = 0; - handover.incomingProvider = address(0); - delete handover.blindedShare; - delete handover.transcript; - delete handover.decryptionRequestStaticKey; - - emit HandoverCanceled(ritualId, departingParticipant, incomingParticipant); - } - - function finalizeHandover( - uint32 ritualId, - address departingParticipant - ) external onlyRole(HANDOVER_SUPERVISOR_ROLE) { - require(isRitualActive(ritualId), "Ritual is not active"); - - Handover storage handover = handovers[getHandoverKey(ritualId, departingParticipant)]; - require( - getHandoverState(handover) == HandoverState.HANDOVER_AWAITING_FINALIZATION, - "Not waiting for finalization" - ); - address incomingParticipant = handover.incomingProvider; - - Ritual storage ritual = rituals[ritualId]; - (, Participant storage participant, uint256 participantIndex) = findParticipant( - ritual, - departingParticipant - ); - participant.provider = incomingParticipant; - participant.decryptionRequestStaticKey = handover.decryptionRequestStaticKey; - delete participant.transcript; - - uint256 startIndex = blindedSharePosition(participantIndex, ritual.threshold); - replaceStorageBytes(ritual.aggregatedTranscript, handover.blindedShare, startIndex); - bytes32 aggregatedTranscriptDigest = keccak256(ritual.aggregatedTranscript); - emit AggregationPosted(ritualId, incomingParticipant, aggregatedTranscriptDigest); - - handover.requestTimestamp = 0; - handover.incomingProvider = address(0); - delete handover.blindedShare; - delete handover.transcript; - delete handover.decryptionRequestStaticKey; - - emit HandoverFinalized(ritualId, departingParticipant, incomingParticipant); - application.release(departingParticipant); - } - - function replaceStorageBytes( - bytes storage _preBytes, - bytes memory _postBytes, - uint256 startIndex - ) internal { - assembly { - let mlength := mload(_postBytes) - - // get the keccak hash to get the contents of the array - mstore(0x0, _preBytes.slot) - // Start copying to the last used word of the stored array. - let sc := add(keccak256(0x0, 0x20), div(startIndex, 32)) - - // Copy over the first `submod` bytes of the new data - - let slengthmod := mod(startIndex, 32) - let submod := sub(32, slengthmod) - let mc := add(_postBytes, submod) - let end := add(_postBytes, mlength) - let mask := sub(exp(0x100, submod), 1) - - sstore(sc, add(and(sload(sc), not(mask)), and(mload(mc), mask))) - - for { - sc := add(sc, 1) - mc := add(mc, 0x20) - } lt(mc, end) { - sc := add(sc, 1) - mc := add(mc, 0x20) - } { - sstore(sc, mload(mc)) - } - - mask := sub(exp(0x100, sub(mc, end)), 1) - sstore(sc, add(and(sload(sc), mask), and(mload(mc), not(mask)))) - } - } - function getRitualIdFromPublicKey( BLS12381.G1Point memory dkgPublicKey ) external view returns (uint32 ritualId) { @@ -869,4 +645,54 @@ contract Coordinator is Initializable, AccessControlDefaultAdminRulesUpgradeable // require(tokenBalance > 0, "Insufficient balance"); // token.safeTransfer(msg.sender, tokenBalance); // } + + function replaceAggregatedTranscriptBytes( + uint32 ritualId, + address incomingParticipant, + bytes memory newBytes, + uint256 startIndex + ) external onlyHandoverCoordinator { + Ritual storage ritual = rituals[ritualId]; + replaceStorageBytes(ritual.aggregatedTranscript, newBytes, startIndex); + bytes32 aggregatedTranscriptDigest = keccak256(ritual.aggregatedTranscript); + emit AggregationPosted(ritualId, incomingParticipant, aggregatedTranscriptDigest); + } + + function replaceStorageBytes( + bytes storage _preBytes, + bytes memory _postBytes, + uint256 startIndex + ) internal { + assembly { + let mlength := mload(_postBytes) + + // get the keccak hash to get the contents of the array + mstore(0x0, _preBytes.slot) + // Start copying to the last used word of the stored array. + let sc := add(keccak256(0x0, 0x20), div(startIndex, 32)) + + // Copy over the first `submod` bytes of the new data + + let slengthmod := mod(startIndex, 32) + let submod := sub(32, slengthmod) + let mc := add(_postBytes, submod) + let end := add(_postBytes, mlength) + let mask := sub(exp(0x100, submod), 1) + + sstore(sc, add(and(sload(sc), not(mask)), and(mload(mc), mask))) + + for { + sc := add(sc, 1) + mc := add(mc, 0x20) + } lt(mc, end) { + sc := add(sc, 1) + mc := add(mc, 0x20) + } { + sstore(sc, mload(mc)) + } + + mask := sub(exp(0x100, sub(mc, end)), 1) + sstore(sc, add(and(sload(sc), mask), and(mload(mc), not(mask)))) + } + } } diff --git a/contracts/contracts/coordination/HandoverCoordinator.sol b/contracts/contracts/coordination/HandoverCoordinator.sol new file mode 100644 index 000000000..6757e5be7 --- /dev/null +++ b/contracts/contracts/coordination/HandoverCoordinator.sol @@ -0,0 +1,307 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +pragma solidity ^0.8.0; + +import "@openzeppelin-upgradeable/contracts/access/extensions/AccessControlDefaultAdminRulesUpgradeable.sol"; +import "@openzeppelin-upgradeable/contracts/proxy/utils/Initializable.sol"; +import "../../threshold/ITACoChildApplication.sol"; +import "./Coordinator.sol"; + +/** + * @title HandoverCoordinator + * @notice Coordination layer for Handover protocol + */ +contract HandoverCoordinator is Initializable, AccessControlDefaultAdminRulesUpgradeable { + event ReimbursementPoolSet(address indexed pool); + event HandoverRequest( + uint32 indexed ritualId, + address indexed departingParticipant, + address indexed incomingParticipant + ); + event HandoverTranscriptPosted( + uint32 indexed ritualId, + address indexed departingParticipant, + address indexed incomingParticipant + ); + event BlindedSharePosted(uint32 indexed ritualId, address indexed departingParticipant); + event HandoverCanceled( + uint32 indexed ritualId, + address indexed departingParticipant, + address indexed incomingParticipant + ); + event HandoverFinalized( + uint32 indexed ritualId, + address indexed departingParticipant, + address indexed incomingParticipant + ); + + enum HandoverState { + NON_INITIATED, + HANDOVER_AWAITING_TRANSCRIPT, + HANDOVER_AWAITING_BLINDED_SHARE, + HANDOVER_AWAITING_FINALIZATION, + HANDOVER_TIMEOUT + } + + struct Handover { + uint32 requestTimestamp; + address incomingProvider; + bytes transcript; + bytes decryptionRequestStaticKey; + bytes blindedShare; + } + + bytes32 public constant HANDOVER_SUPERVISOR_ROLE = keccak256("HANDOVER_SUPERVISOR_ROLE"); + + ITACoChildApplication public immutable application; + Coordinator public immutable coordinator; + uint32 public immutable handoverTimeout; + uint96 private immutable minAuthorization; // TODO use child app for checking eligibility + + IReimbursementPool internal reimbursementPool; + mapping(bytes32 handoverKey => Handover handover) public handovers; + // Note: Adjust the __preSentinelGap size if more contract variables are added + + uint256[20] internal __gap; + + constructor( + ITACoChildApplication _application, + Coordinator _coordinator, + uint32 _handoverTimeout + ) { + application = _application; + coordinator = _coordinator; + handoverTimeout = _handoverTimeout; + minAuthorization = _application.minimumAuthorization(); // TODO use child app for checking eligibility + _disableInitializers(); + } + + /** + * @notice Initialize function for using with OpenZeppelin proxy + */ + function initialize(address _admin) external initializer { + __AccessControlDefaultAdminRules_init(0, _admin); + } + + function setReimbursementPool(IReimbursementPool pool) external onlyRole(DEFAULT_ADMIN_ROLE) { + require( + address(pool) == address(0) || pool.isAuthorized(address(this)), + "Invalid ReimbursementPool" + ); + reimbursementPool = pool; + emit ReimbursementPoolSet(address(pool)); + } + + function processReimbursement(uint256 initialGasLeft) internal { + if (address(reimbursementPool) != address(0)) { + // For calldataGasCost calculation, see https://github.com/nucypher/nucypher-contracts/issues/328 + uint256 calldataGasCost = (msg.data.length - 128) * 16 + 128 * 4; + uint256 gasUsed = initialGasLeft - gasleft() + calldataGasCost; + try reimbursementPool.refund(gasUsed, msg.sender) { + return; + } catch { + return; + } + } + } + + function getHandoverKey( + uint32 ritualId, + address departingProvider + ) public view returns (bytes32) { + return keccak256(abi.encode(ritualId, departingProvider)); + } + + function getHandoverState( + uint32 ritualId, + address departingParticipant + ) external view returns (HandoverState) { + Handover storage handover = handovers[getHandoverKey(ritualId, departingParticipant)]; + return getHandoverState(handover); + } + + function getHandoverState(Handover storage handover) internal view returns (HandoverState) { + uint32 t0 = handover.requestTimestamp; + uint32 deadline = t0 + handoverTimeout; + if (t0 == 0) { + return HandoverState.NON_INITIATED; + } else if (block.timestamp > deadline) { + // Handover failed due to timeout + return HandoverState.HANDOVER_TIMEOUT; + } else if (handover.transcript.length == 0) { + return HandoverState.HANDOVER_AWAITING_TRANSCRIPT; + } else if (handover.blindedShare.length == 0) { + return HandoverState.HANDOVER_AWAITING_BLINDED_SHARE; + } else { + return HandoverState.HANDOVER_AWAITING_FINALIZATION; + } + } + + /** + * Calculates position of blinded share for particular participant + * @param index Participant index + * @param threshold Threshold + * @dev See https://github.com/nucypher/nucypher-contracts/issues/400 + */ + function blindedSharePosition(uint256 index, uint16 threshold) public pure returns (uint256) { + return 32 + index * BLS12381.G2_POINT_SIZE + threshold * BLS12381.G1_POINT_SIZE; + } + + function handoverRequest( + uint32 ritualId, + address departingParticipant, + address incomingParticipant + ) external onlyRole(HANDOVER_SUPERVISOR_ROLE) { + require(coordinator.isRitualActive(ritualId), "Ritual is not active"); + require( + coordinator.isParticipant(ritualId, departingParticipant), + "Departing node must be a participant" + ); + require( + !coordinator.isParticipant(ritualId, incomingParticipant), + "Incoming node cannot be a participant" + ); + + Handover storage handover = handovers[getHandoverKey(ritualId, departingParticipant)]; + HandoverState state = getHandoverState(handover); + + require( + state == HandoverState.NON_INITIATED || state == HandoverState.HANDOVER_TIMEOUT, + "Handover already requested" + ); + require( + coordinator.isProviderKeySet(incomingParticipant), + "Incoming provider has not set public key" + ); + require( + application.authorizedStake(incomingParticipant) >= minAuthorization, + "Not enough authorization" + ); + handover.requestTimestamp = uint32(block.timestamp); + handover.incomingProvider = incomingParticipant; + delete handover.blindedShare; + delete handover.transcript; + delete handover.decryptionRequestStaticKey; + emit HandoverRequest(ritualId, departingParticipant, incomingParticipant); + } + + function postHandoverTranscript( + uint32 ritualId, + address departingParticipant, + bytes calldata transcript, + bytes calldata decryptionRequestStaticKey + ) external { + uint256 initialGasLeft = gasleft(); + require(coordinator.isRitualActive(ritualId), "Ritual is not active"); + require(transcript.length > 0, "Parameters can't be empty"); + require( + decryptionRequestStaticKey.length == 42, + "Invalid length for decryption request static key" + ); + + Handover storage handover = handovers[getHandoverKey(ritualId, departingParticipant)]; + require( + getHandoverState(handover) == HandoverState.HANDOVER_AWAITING_TRANSCRIPT, + "Not waiting for transcript" + ); + address provider = application.operatorToStakingProvider(msg.sender); + require(handover.incomingProvider == provider, "Wrong incoming provider"); + + handover.transcript = transcript; + handover.decryptionRequestStaticKey = decryptionRequestStaticKey; + emit HandoverTranscriptPosted(ritualId, departingParticipant, provider); + processReimbursement(initialGasLeft); + } + + function postBlindedShare(uint32 ritualId, bytes calldata blindedShare) external { + uint256 initialGasLeft = gasleft(); + require(coordinator.isRitualActive(ritualId), "Ritual is not active"); + + address provider = application.operatorToStakingProvider(msg.sender); + Handover storage handover = handovers[getHandoverKey(ritualId, provider)]; + require( + getHandoverState(handover) == HandoverState.HANDOVER_AWAITING_BLINDED_SHARE, + "Not waiting for blinded share" + ); + require(blindedShare.length == BLS12381.G2_POINT_SIZE, "Wrong size of blinded share"); + + handover.blindedShare = blindedShare; + emit BlindedSharePosted(ritualId, provider); + processReimbursement(initialGasLeft); + } + + function cancelHandover( + uint32 ritualId, + address departingParticipant + ) external onlyRole(HANDOVER_SUPERVISOR_ROLE) { + Handover storage handover = handovers[getHandoverKey(ritualId, departingParticipant)]; + address incomingParticipant = handover.incomingProvider; + + require( + getHandoverState(handover) != HandoverState.NON_INITIATED, + "Handover not requested" + ); + handover.requestTimestamp = 0; + handover.incomingProvider = address(0); + delete handover.blindedShare; + delete handover.transcript; + delete handover.decryptionRequestStaticKey; + + emit HandoverCanceled(ritualId, departingParticipant, incomingParticipant); + } + + function finalizeHandover( + uint32 ritualId, + address departingParticipant + ) external onlyRole(HANDOVER_SUPERVISOR_ROLE) { + require(coordinator.isRitualActive(ritualId), "Ritual is not active"); + + Handover storage handover = handovers[getHandoverKey(ritualId, departingParticipant)]; + require( + getHandoverState(handover) == HandoverState.HANDOVER_AWAITING_FINALIZATION, + "Not waiting for finalization" + ); + address incomingParticipant = handover.incomingProvider; + + Coordinator.Participant[] memory participants = coordinator.getParticipants(ritualId); + uint256 participantIndex = findParticipant(participants, departingParticipant); + coordinator.updateParticipant( + ritualId, + departingParticipant, + incomingParticipant, + true, + new bytes(0), + handover.decryptionRequestStaticKey + ); + + uint16 threshold = coordinator.getThreshold(ritualId); + uint256 startIndex = blindedSharePosition(participantIndex, threshold); + coordinator.replaceAggregatedTranscriptBytes( + ritualId, + incomingParticipant, + handover.blindedShare, + startIndex + ); + + handover.requestTimestamp = 0; + handover.incomingProvider = address(0); + delete handover.blindedShare; + delete handover.transcript; + delete handover.decryptionRequestStaticKey; + + emit HandoverFinalized(ritualId, departingParticipant, incomingParticipant); + application.release(departingParticipant); + } + + function findParticipant( + Coordinator.Participant[] memory participants, + address provider + ) internal view returns (uint256 index) { + for (uint256 i = 0; i < participants.length; i++) { + Coordinator.Participant memory participant = participants[i]; + if (participant.provider == provider) { + return i; + } + } + } +} diff --git a/deployment/constructor_params/ci/child.yml b/deployment/constructor_params/ci/child.yml index a08dbec31..e2c744fc1 100644 --- a/deployment/constructor_params/ci/child.yml +++ b/deployment/constructor_params/ci/child.yml @@ -30,6 +30,13 @@ contracts: constructor: _application: $TACoChildApplication _dkgTimeout: $ONE_HOUR_IN_SECONDS + - HandoverCoordinator: + proxy: + constructor: + _data: $encode:initialize,$deployer + constructor: + _application: $TACoChildApplication + _coordinator: $Coordinator _handoverTimeout: $ONE_DAY_IN_SECONDS - GlobalAllowList: constructor: diff --git a/deployment/constructor_params/lynx/child.yml b/deployment/constructor_params/lynx/child.yml index f085a02b5..41725d50d 100644 --- a/deployment/constructor_params/lynx/child.yml +++ b/deployment/constructor_params/lynx/child.yml @@ -8,10 +8,10 @@ artifacts: constants: ONE_HOUR_IN_SECONDS: 3600 - ONE_DAY_IN_SECONDS: 86400 FORTY_THOUSAND_TOKENS_IN_WEI_UNITS: 40000000000000000000000 TEN_MILLION_TOKENS_IN_WEI_UNITS: 10000000000000000000000000 # https://www.youtube.com/watch?v=EJR1H5tf5wE MAX_DKG_SIZE: 4 + HANDOVER_TIMEOUT_SECONDS: 900 # 15 minutes contracts: - MockPolygonChild @@ -30,9 +30,14 @@ contracts: constructor: _application: $TACoChildApplication _dkgTimeout: $ONE_HOUR_IN_SECONDS - _handoverTimeout: $ONE_DAY_IN_SECONDS - _currency: $LynxRitualToken - _feeRatePerSecond: 1 + - HandoverCoordinator: + proxy: + constructor: + _data: $encode:initialize,$deployer + constructor: + _application: $TACoChildApplication + _coordinator: $Coordinator + _handoverTimeout: $HANDOVER_TIMEOUT_SECONDS - GlobalAllowList: constructor: _coordinator: $Coordinator diff --git a/deployment/constructor_params/lynx/upgrade-coordinator.yml b/deployment/constructor_params/lynx/upgrade-coordinator.yml index d1af6f5ce..a3c27b14d 100644 --- a/deployment/constructor_params/lynx/upgrade-coordinator.yml +++ b/deployment/constructor_params/lynx/upgrade-coordinator.yml @@ -9,11 +9,9 @@ artifacts: constants: TACO_CHILD_APPLICATION: "0x42F30AEc1A36995eEFaf9536Eb62BD751F982D32" DKG_TIMEOUT_SECONDS: 3600 # 1 hour - HANDOVER_TIMEOUT_SECONDS: 900 # 15 minutes contracts: - Coordinator: constructor: _application: $TACO_CHILD_APPLICATION _dkgTimeout: $DKG_TIMEOUT_SECONDS - _handoverTimeout: $HANDOVER_TIMEOUT_SECONDS diff --git a/deployment/constructor_params/lynx/upgrade-handover-coordinator.yml b/deployment/constructor_params/lynx/upgrade-handover-coordinator.yml new file mode 100644 index 000000000..6b6f6d860 --- /dev/null +++ b/deployment/constructor_params/lynx/upgrade-handover-coordinator.yml @@ -0,0 +1,20 @@ +deployment: + name: lynx-upgrade-handover-coordinator + chain_id: 80002 + +artifacts: + dir: ./deployment/artifacts/ + filename: lynx-upgrade-handover-coordinator.json + +constants: + TACO_CHILD_APPLICATION: "0x42F30AEc1A36995eEFaf9536Eb62BD751F982D32" + COORDINATOR: "0xE9e94499bB0f67b9DBD75506ec1735486DE57770" + DKG_TIMEOUT_SECONDS: 3600 # 1 hour + HANDOVER_TIMEOUT_SECONDS: 900 # 15 minutes + +contracts: + - HandoverCoordinator: + constructor: + _application: $TACoChildApplication + _coordinator: $Coordinator + _handoverTimeout: $HANDOVER_TIMEOUT_SECONDS diff --git a/deployment/constructor_params/mainnet/child.yml b/deployment/constructor_params/mainnet/child.yml index 5ed0cd5d3..e49d00a46 100644 --- a/deployment/constructor_params/mainnet/child.yml +++ b/deployment/constructor_params/mainnet/child.yml @@ -51,9 +51,14 @@ contracts: constructor: _application: $TACoChildApplication _dkgTimeout: $DKG_TIMEOUT_IN_SECONDS + - HandoverCoordinator: + proxy: + constructor: + _data: $encode:initialize,$deployer + constructor: + _application: $TACoChildApplication + _coordinator: $Coordinator _handoverTimeout: $HANDOVER_TIMEOUT_IN_SECONDS - _currency: $DAI_ON_POLYGON - _feeRatePerSecond: $PRIVATE_BETA_FEE_RATE - GlobalAllowList: constructor: _coordinator: $Coordinator diff --git a/deployment/constructor_params/mainnet/deploy-handover-coordinator.yml b/deployment/constructor_params/mainnet/deploy-handover-coordinator.yml new file mode 100644 index 000000000..f11536942 --- /dev/null +++ b/deployment/constructor_params/mainnet/deploy-handover-coordinator.yml @@ -0,0 +1,24 @@ +deployment: + name: deploy-handover-coordinator + chain_id: 137 # Polygon Mainnet + +artifacts: + dir: ./deployment/artifacts/ + filename: mainnet-deploy-handover-coordinator.json + +constants: + # See deployment/artifacts/mainnet.json + TACO_CHILD_APPLICATION: "0xFa07aaB78062Fac4C36995bF28F6D677667973F5" + COORDINATOR: "0xE74259e3dafe30bAA8700238e324b47aC98FE755" + DKG_TIMEOUT_SECONDS: 10800 # 3 hours (existing value) + HANDOVER_TIMEOUT_IN_SECONDS: 86400 # One day in seconds + +contracts: + - HandoverCoordinator: + proxy: + constructor: + _data: $encode:initialize,$deployer + constructor: + _application: $TACoChildApplication + _coordinator: $Coordinator + _handoverTimeout: $HANDOVER_TIMEOUT_IN_SECONDS diff --git a/deployment/constructor_params/mainnet/redeploy-coordinator.yml b/deployment/constructor_params/mainnet/redeploy-coordinator.yml index c3eee400d..49eb9167b 100644 --- a/deployment/constructor_params/mainnet/redeploy-coordinator.yml +++ b/deployment/constructor_params/mainnet/redeploy-coordinator.yml @@ -10,11 +10,9 @@ constants: # See deployment/artifacts/mainnet.json TACO_CHILD_APPLICATION: "0xFa07aaB78062Fac4C36995bF28F6D677667973F5" DKG_TIMEOUT_SECONDS: 10800 # 3 hours (existing value) - HANDOVER_TIMEOUT_SECONDS: 3600 # 1 hour contracts: - Coordinator: constructor: _application: $TACO_CHILD_APPLICATION _dkgTimeout: $DKG_TIMEOUT_SECONDS - _handoverTimeout: $HANDOVER_TIMEOUT_SECONDS diff --git a/deployment/constructor_params/mainnet/redeploy-handover-coordinator.yml b/deployment/constructor_params/mainnet/redeploy-handover-coordinator.yml new file mode 100644 index 000000000..149f6659c --- /dev/null +++ b/deployment/constructor_params/mainnet/redeploy-handover-coordinator.yml @@ -0,0 +1,21 @@ +deployment: + name: redeploy-handover-coordinator + chain_id: 137 # Polygon Mainnet + +artifacts: + dir: ./deployment/artifacts/ + filename: mainnet-redeploy-handover-coordinator.json + +constants: + # See deployment/artifacts/mainnet.json + TACO_CHILD_APPLICATION: "0xFa07aaB78062Fac4C36995bF28F6D677667973F5" + COORDINATOR: "0xE74259e3dafe30bAA8700238e324b47aC98FE755" + DKG_TIMEOUT_SECONDS: 10800 # 3 hours (existing value) + HANDOVER_TIMEOUT_IN_SECONDS: 86400 # One day in seconds + +contracts: + - HandoverCoordinator: + constructor: + _application: $TACoChildApplication + _coordinator: $Coordinator + _handoverTimeout: $HANDOVER_TIMEOUT_IN_SECONDS diff --git a/deployment/constructor_params/tapir/child.yml b/deployment/constructor_params/tapir/child.yml index 614b9985f..72d8f0b4d 100644 --- a/deployment/constructor_params/tapir/child.yml +++ b/deployment/constructor_params/tapir/child.yml @@ -30,9 +30,14 @@ contracts: constructor: _application: $TACoChildApplication _dkgTimeout: $ONE_HOUR_IN_SECONDS + - HandoverCoordinator: + proxy: + constructor: + _data: $encode:initialize,$deployer + constructor: + _application: $TACoChildApplication + _coordinator: $Coordinator _handoverTimeout: $ONE_DAY_IN_SECONDS - _currency: $TapirRitualToken - _feeRatePerSecond: 1 - GlobalAllowList: constructor: _coordinator: $Coordinator diff --git a/deployment/constructor_params/tapir/upgrade-child.yml b/deployment/constructor_params/tapir/upgrade-child.yml index c120712b5..8f9d4d5f3 100644 --- a/deployment/constructor_params/tapir/upgrade-child.yml +++ b/deployment/constructor_params/tapir/upgrade-child.yml @@ -11,7 +11,6 @@ constants: TAPIR_RITUAL_TOKEN: "0xf91afFE7cf1d9c367Cb56eDd70C0941a4E8570d9" TACO_CHILD_APPLICATION_PROXY: "0x489287Ed5BdF7a35fEE411FBdCc47331093D0769" DKG_TIMEOUT_SECONDS: 3600 # 1 hour - HANDOVER_TIMEOUT_SECONDS: 900 # 15 minutes contracts: - MockPolygonChild @@ -23,4 +22,3 @@ contracts: constructor: _application: $TACO_CHILD_APPLICATION_PROXY _dkgTimeout: $DKG_TIMEOUT_SECONDS - _handoverTimeout: $HANDOVER_TIMEOUT_SECONDS diff --git a/deployment/constructor_params/tapir/upgrade-coordinator.yml b/deployment/constructor_params/tapir/upgrade-coordinator.yml index ae0e5ed22..8bbe488bf 100644 --- a/deployment/constructor_params/tapir/upgrade-coordinator.yml +++ b/deployment/constructor_params/tapir/upgrade-coordinator.yml @@ -9,11 +9,9 @@ artifacts: constants: TACO_CHILD_APPLICATION: "0x489287Ed5BdF7a35fEE411FBdCc47331093D0769" DKG_TIMEOUT_SECONDS: 3600 # 1 hour - HANDOVER_TIMEOUT_SECONDS: 900 # 15 minutes contracts: - Coordinator: constructor: _application: $TACO_CHILD_APPLICATION _dkgTimeout: $DKG_TIMEOUT_SECONDS - _handoverTimeout: $HANDOVER_TIMEOUT_SECONDS diff --git a/scripts/ci/deploy_child.py b/scripts/ci/deploy_child.py index 38b39c80d..f58f7ceba 100644 --- a/scripts/ci/deploy_child.py +++ b/scripts/ci/deploy_child.py @@ -20,7 +20,7 @@ def create_upgrade_coordinator_yaml( - output_file: Path, taco_child_application_address: str, dkg_timeout: int, handover_timeout: int + output_file: Path, taco_child_application_address: str, dkg_timeout: int ) -> None: """ Creates a YAML file for the upgrade process. @@ -38,7 +38,6 @@ def create_upgrade_coordinator_yaml( constructor: _application: "{taco_child_application_address}" _dkgTimeout: {dkg_timeout} - _handoverTimeout: {handover_timeout} """ with open(output_file, "w") as file: file.write(yaml_text) @@ -65,6 +64,8 @@ def main(): coordinator = deployer.deploy(project.Coordinator) + handover_coordinator = deployer.deploy(project.HandoverCoordinator) + global_allow_list = deployer.deploy(project.GlobalAllowList) deployments = [ @@ -72,6 +73,7 @@ def main(): taco_child_application, ritual_token, coordinator, + handover_coordinator, global_allow_list, ] @@ -82,7 +84,6 @@ def main(): UPGRADE_PARAMS_FILEPATH, str(taco_child_application.address), coordinator.dkgTimeout(), - coordinator.handoverTimeout(), ) with networks.ethereum.local.use_provider("test"): diff --git a/scripts/finalize_handover.py b/scripts/finalize_handover.py index 620fe61e2..cd7edf4ca 100644 --- a/scripts/finalize_handover.py +++ b/scripts/finalize_handover.py @@ -95,7 +95,7 @@ def cli( click.echo(f"Connected to {network.name} network.") # Get the contracts from the registry - coordinator_contract = registry.get_contract(domain=domain, contract_name="Coordinator") + coordinator_contract = registry.get_contract(domain=domain, contract_name="HandoverCoordinator") # Validate the handover data click.echo( diff --git a/scripts/lynx/upgrade_handover_coordinator.py b/scripts/lynx/upgrade_handover_coordinator.py new file mode 100644 index 000000000..403912793 --- /dev/null +++ b/scripts/lynx/upgrade_handover_coordinator.py @@ -0,0 +1,36 @@ +#!/usr/bin/python3 + +from ape import project + +from deployment.constants import ARTIFACTS_DIR, CONSTRUCTOR_PARAMS_DIR +from deployment.params import Deployer +from deployment.registry import contracts_from_registry, merge_registries + +VERIFY = False +CONSTRUCTOR_PARAMS_FILEPATH = CONSTRUCTOR_PARAMS_DIR / "lynx" / "upgrade-handover-coordinator.yml" +LYNX_REGISTRY = ARTIFACTS_DIR / "lynx.json" + + +def main(): + """ + This script upgrades HnadoverCoordinator on Lynx/Amoy. + """ + + deployer = Deployer.from_yaml(filepath=CONSTRUCTOR_PARAMS_FILEPATH, verify=VERIFY) + instances = contracts_from_registry(filepath=ARTIFACTS_DIR / "lynx.json", chain_id=80002) + + coordinator = deployer.upgrade( + project.HandoverCoordinator, + instances[project.HandoverCoordinator.contract_type.name].address, + ) + + deployments = [ + coordinator, + ] + + deployer.finalize(deployments=deployments) + merge_registries( + registry_1_filepath=LYNX_REGISTRY, + registry_2_filepath=deployer.registry_filepath, + output_filepath=LYNX_REGISTRY, + ) diff --git a/scripts/mainnet/deploy_child.py b/scripts/mainnet/deploy_child.py index a60201775..7b9f15e39 100644 --- a/scripts/mainnet/deploy_child.py +++ b/scripts/mainnet/deploy_child.py @@ -10,7 +10,6 @@ def main(): - deployer = Deployer.from_yaml(filepath=CONSTRUCTOR_PARAMS_FILEPATH, verify=VERIFY) polygon_child = deployer.deploy(project.PolygonChild) @@ -23,27 +22,26 @@ def main(): deployer.transact(taco_child_application.initialize, coordinator.address) + handover_coordinator = deployer.deploy(project.HandoverCoordinator) + + deployer.transact(coordinator.initializeHandoverCoordinator, handover_coordinator.address) + # Grant TREASURY_ROLE to Treasury Guild Multisig on Polygon (0xc3Bf49eBA094AF346830dF4dbB42a07dE378EeB6) TREASURY_ROLE = coordinator.TREASURY_ROLE() deployer.transact( - coordinator.grantRole, - TREASURY_ROLE, - deployer.constants.TREASURY_GUILD_ON_POLYGON + coordinator.grantRole, TREASURY_ROLE, deployer.constants.TREASURY_GUILD_ON_POLYGON ) # Grant INITIATOR_ROLE to Integrations Guild INITIATOR_ROLE = coordinator.INITIATOR_ROLE() deployer.transact( - coordinator.grantRole, - INITIATOR_ROLE, - deployer.constants.INTEGRATIONS_GUILD_ON_POLYGON - ) + coordinator.grantRole, INITIATOR_ROLE, deployer.constants.INTEGRATIONS_GUILD_ON_POLYGON + ) # TODO: BetaProgramInitiator will be deployed separately, so council will grant the role later - + # Change Coordinator admin to Council on Polygon deployer.transact( - coordinator.beginDefaultAdminTransfer, - deployer.constants.THRESHOLD_COUNCIL_ON_POLYGON + coordinator.beginDefaultAdminTransfer, deployer.constants.THRESHOLD_COUNCIL_ON_POLYGON ) # This requires the Council accepting the transfer by calling acceptDefaultAdminTransfer() @@ -53,6 +51,7 @@ def main(): polygon_child, taco_child_application, coordinator, + handover_coordinator, global_allow_list, ] diff --git a/scripts/mainnet/deploy_handover_coordinator.py b/scripts/mainnet/deploy_handover_coordinator.py new file mode 100644 index 000000000..cd3da34d5 --- /dev/null +++ b/scripts/mainnet/deploy_handover_coordinator.py @@ -0,0 +1,22 @@ +#!/usr/bin/python3 + +from ape import project + +from deployment.constants import CONSTRUCTOR_PARAMS_DIR +from deployment.params import Deployer + +VERIFY = False +CONSTRUCTOR_PARAMS_FILEPATH = CONSTRUCTOR_PARAMS_DIR / "mainnet" / "deploy-handover-coordinator.yml" + + +def main(): + deployer = Deployer.from_yaml(filepath=CONSTRUCTOR_PARAMS_FILEPATH, verify=VERIFY) + + # NuCo Multisig owns contract so it must do the proxy upgrade + coordinator_implementation = deployer.deploy(project.HandoverCoordinator) + + deployments = [ + coordinator_implementation, + ] + + deployer.finalize(deployments=deployments) diff --git a/scripts/mainnet/redeploy_handover_coordinator.py b/scripts/mainnet/redeploy_handover_coordinator.py new file mode 100644 index 000000000..8c976c5c9 --- /dev/null +++ b/scripts/mainnet/redeploy_handover_coordinator.py @@ -0,0 +1,24 @@ +#!/usr/bin/python3 + +from ape import project + +from deployment.constants import CONSTRUCTOR_PARAMS_DIR +from deployment.params import Deployer + +VERIFY = False +CONSTRUCTOR_PARAMS_FILEPATH = ( + CONSTRUCTOR_PARAMS_DIR / "mainnet" / "redeploy-handover-coordinator.yml" +) + + +def main(): + deployer = Deployer.from_yaml(filepath=CONSTRUCTOR_PARAMS_FILEPATH, verify=VERIFY) + + # NuCo Multisig owns contract so it must do the proxy upgrade + coordinator_implementation = deployer.deploy(project.HandoverCoordinator) + + deployments = [ + coordinator_implementation, + ] + + deployer.finalize(deployments=deployments) diff --git a/scripts/request_handover.py b/scripts/request_handover.py index f2786b939..a6b72f260 100644 --- a/scripts/request_handover.py +++ b/scripts/request_handover.py @@ -56,7 +56,7 @@ def cli( click.echo(f"Connected to {network.name} network.") # Get the contracts from the registry - coordinator_contract = registry.get_contract(domain=domain, contract_name="Coordinator") + coordinator_contract = registry.get_contract(domain=domain, contract_name="HandoverCoordinator") # Issue handover request click.echo( diff --git a/tests/test_coordinator.py b/tests/test_coordinator.py index 84276179e..c4cf0ca79 100644 --- a/tests/test_coordinator.py +++ b/tests/test_coordinator.py @@ -2,26 +2,17 @@ import ape import pytest -from ape.utils import ZERO_ADDRESS from eth_account import Account from hexbytes import HexBytes from web3 import Web3 -from tests.conftest import ( - G1_SIZE, - G2_SIZE, - HandoverState, - RitualState, - gen_public_key, - generate_transcript, -) +from tests.conftest import G1_SIZE, G2_SIZE, RitualState, gen_public_key, generate_transcript TIMEOUT = 1000 MAX_DKG_SIZE = 31 FEE_RATE = 42 ERC20_SUPPLY = 10**24 DURATION = 48 * 60 * 60 -HANDOVER_TIMEOUT = 2000 @pytest.fixture(scope="module") @@ -71,7 +62,6 @@ def coordinator(project, deployer, application, oz_dependency): contract = project.Coordinator.deploy( application.address, TIMEOUT, - HANDOVER_TIMEOUT, sender=deployer, ) @@ -105,7 +95,6 @@ def global_allow_list(project, deployer, coordinator): def test_initial_parameters(coordinator): assert coordinator.maxDkgSize() == MAX_DKG_SIZE assert coordinator.dkgTimeout() == TIMEOUT - assert coordinator.handoverTimeout() == HANDOVER_TIMEOUT assert coordinator.numberOfRituals() == 0 @@ -627,42 +616,8 @@ def test_post_aggregation_fails( # coordinator.withdrawAllTokens(erc20.address, sender=treasury) -def activate_ritual(nodes, coordinator, ritualID): - size = len(nodes) - threshold = coordinator.getThresholdForRitualSize(size) - transcript = generate_transcript(size, threshold) - - for node in nodes: - coordinator.publishTranscript(ritualID, transcript, sender=node) - - aggregated = transcript # has the same size as transcript - decryption_request_static_keys = [os.urandom(42) for _ in nodes] - dkg_public_key = (os.urandom(32), os.urandom(16)) - for i, node in enumerate(nodes): - coordinator.postAggregation( - ritualID, aggregated, dkg_public_key, decryption_request_static_keys[i], sender=node - ) - return threshold, aggregated - - -def setup_node(node, coordinator, application, deployer): - application.updateOperator(node, node, sender=deployer) - application.updateAuthorization(node, 42, sender=deployer) - public_key = gen_public_key() - coordinator.setProviderPublicKey(public_key, sender=node) - - -def test_handover_request( - coordinator, - nodes, - initiator, - erc20, - fee_model, - accounts, - deployer, - global_allow_list, - application, - chain, +def test_update_participant( + coordinator, nodes, initiator, erc20, fee_model, deployer, global_allow_list ): initiate_ritual( coordinator=coordinator, @@ -673,349 +628,42 @@ def test_handover_request( allow_logic=global_allow_list, ) - ritualID = 0 - departing_node = nodes[10] - incoming_node = accounts[MAX_DKG_SIZE + 1] - handover_supervisor = accounts[MAX_DKG_SIZE] - - coordinator.grantRole( - coordinator.HANDOVER_SUPERVISOR_ROLE(), handover_supervisor, sender=deployer - ) - - with ape.reverts(): - coordinator.handoverRequest(ritualID, departing_node, incoming_node, sender=deployer) - - with ape.reverts("Ritual is not active"): - coordinator.handoverRequest( - ritualID, departing_node, incoming_node, sender=handover_supervisor - ) - - activate_ritual(nodes, coordinator, ritualID) - - handover_key = coordinator.getHandoverKey(ritualID, departing_node) - handover = coordinator.handovers(handover_key) - assert handover.requestTimestamp == 0 - assert handover.incomingProvider == ZERO_ADDRESS - assert len(handover.blindedShare) == 0 - assert len(handover.transcript) == 0 - assert len(handover.decryptionRequestStaticKey) == 0 - - assert coordinator.getHandoverState(ritualID, departing_node) == HandoverState.NON_INITIATED - - with ape.reverts("Departing node must be a participant"): - coordinator.handoverRequest( - ritualID, handover_supervisor, incoming_node, sender=handover_supervisor - ) - with ape.reverts("Incoming node cannot be a participant"): - coordinator.handoverRequest(ritualID, departing_node, nodes[0], sender=handover_supervisor) - with ape.reverts("Incoming provider has not set public key"): - coordinator.handoverRequest( - ritualID, departing_node, incoming_node, sender=handover_supervisor - ) - - setup_node(incoming_node, coordinator, application, deployer) - - tx = coordinator.handoverRequest( - ritualID, departing_node, incoming_node, sender=handover_supervisor - ) - assert ( - coordinator.getHandoverState(ritualID, departing_node) - == HandoverState.HANDOVER_AWAITING_TRANSCRIPT - ) - - timestamp = chain.pending_timestamp - 1 - handover = coordinator.handovers(handover_key) - assert handover.requestTimestamp == timestamp - assert handover.incomingProvider == incoming_node - assert len(handover.blindedShare) == 0 - assert len(handover.transcript) == 0 - assert len(handover.decryptionRequestStaticKey) == 0 - - events = [event for event in tx.events if event.event_name == "HandoverRequest"] - assert events == [ - coordinator.HandoverRequest( - ritualId=ritualID, - departingParticipant=departing_node, - incomingParticipant=incoming_node, - ) - ] - - with ape.reverts("Handover already requested"): - coordinator.handoverRequest( - ritualID, departing_node, incoming_node, sender=handover_supervisor - ) - coordinator.postHandoverTranscript( - ritualID, departing_node, os.urandom(42), os.urandom(42), sender=incoming_node - ) - - with ape.reverts("Handover already requested"): - coordinator.handoverRequest( - ritualID, departing_node, incoming_node, sender=handover_supervisor - ) - - coordinator.postBlindedShare(ritualID, os.urandom(G2_SIZE), sender=departing_node) - assert ( - coordinator.getHandoverState(ritualID, departing_node) - == HandoverState.HANDOVER_AWAITING_FINALIZATION - ) - - with ape.reverts("Handover already requested"): - coordinator.handoverRequest( - ritualID, departing_node, incoming_node, sender=handover_supervisor - ) - - chain.pending_timestamp += HANDOVER_TIMEOUT - assert coordinator.getHandoverState(ritualID, departing_node) == HandoverState.HANDOVER_TIMEOUT - - incoming_node = accounts[MAX_DKG_SIZE + 2] - setup_node(incoming_node, coordinator, application, deployer) - - tx = coordinator.handoverRequest( - ritualID, departing_node, incoming_node, sender=handover_supervisor - ) - assert ( - coordinator.getHandoverState(ritualID, departing_node) - == HandoverState.HANDOVER_AWAITING_TRANSCRIPT - ) - - timestamp = chain.pending_timestamp - 1 - handover = coordinator.handovers(handover_key) - assert handover.requestTimestamp == timestamp - assert handover.incomingProvider == incoming_node - assert len(handover.blindedShare) == 0 - assert len(handover.transcript) == 0 - assert len(handover.decryptionRequestStaticKey) == 0 - - events = [event for event in tx.events if event.event_name == "HandoverRequest"] - assert events == [ - coordinator.HandoverRequest( - ritualId=ritualID, - departingParticipant=departing_node, - incomingParticipant=incoming_node, - ) - ] - - -def test_post_handover_transcript( - coordinator, - nodes, - initiator, - erc20, - fee_model, - accounts, - deployer, - global_allow_list, - application, - chain, -): - initiate_ritual( - coordinator=coordinator, - fee_model=fee_model, - erc20=erc20, - authority=initiator, - nodes=nodes, - allow_logic=global_allow_list, - ) - - ritualID = 0 - departing_node = nodes[10] - incoming_node = accounts[MAX_DKG_SIZE + 1] - handover_supervisor = accounts[MAX_DKG_SIZE] + coordinator.initializeHandoverCoordinator(deployer.address, sender=deployer) + ritual_id = 0 + size = len(nodes) + threshold = coordinator.getThresholdForRitualSize(size) + transcript = generate_transcript(size, threshold) decryption_request_static_key = os.urandom(42) - handover_transcript = os.urandom(42) - - with ape.reverts("Ritual is not active"): - coordinator.postHandoverTranscript( - ritualID, - departing_node, - handover_transcript, - decryption_request_static_key, - sender=incoming_node, - ) - - coordinator.grantRole( - coordinator.HANDOVER_SUPERVISOR_ROLE(), handover_supervisor, sender=deployer - ) - - activate_ritual(nodes, coordinator, ritualID) - setup_node(incoming_node, coordinator, application, deployer) - - with ape.reverts("Parameters can't be empty"): - coordinator.postHandoverTranscript( - ritualID, departing_node, bytes(), decryption_request_static_key, sender=incoming_node - ) - - with ape.reverts("Invalid length for decryption request static key"): - coordinator.postHandoverTranscript( - ritualID, departing_node, handover_transcript, os.urandom(41), sender=incoming_node - ) - - with ape.reverts("Not waiting for transcript"): - coordinator.postHandoverTranscript( - ritualID, - departing_node, - handover_transcript, - decryption_request_static_key, - sender=incoming_node, - ) - - coordinator.handoverRequest(ritualID, departing_node, incoming_node, sender=handover_supervisor) - with ape.reverts("Wrong incoming provider"): - coordinator.postHandoverTranscript( - ritualID, - departing_node, - handover_transcript, + with ape.reverts("Caller must be the handover coordinator"): + coordinator.updateParticipant( + ritual_id, + nodes[0], + deployer.address, + True, + transcript, decryption_request_static_key, - sender=departing_node, + sender=initiator, ) - assert ( - coordinator.getHandoverState(ritualID, departing_node) - == HandoverState.HANDOVER_AWAITING_TRANSCRIPT - ) - tx = coordinator.postHandoverTranscript( - ritualID, - departing_node, - handover_transcript, + coordinator.updateParticipant( + ritual_id, + nodes[0], + deployer.address, + True, + transcript, decryption_request_static_key, - sender=incoming_node, - ) - assert ( - coordinator.getHandoverState(ritualID, departing_node) - == HandoverState.HANDOVER_AWAITING_BLINDED_SHARE - ) - handover_key = coordinator.getHandoverKey(ritualID, departing_node) - handover = coordinator.handovers(handover_key) - assert handover.incomingProvider == incoming_node - assert handover.transcript == handover_transcript - assert handover.decryptionRequestStaticKey == decryption_request_static_key - - events = [event for event in tx.events if event.event_name == "HandoverTranscriptPosted"] - assert events == [ - coordinator.HandoverTranscriptPosted( - ritualId=ritualID, - departingParticipant=departing_node, - incomingParticipant=incoming_node, - ) - ] - - with ape.reverts("Not waiting for transcript"): - coordinator.postHandoverTranscript( - ritualID, - departing_node, - handover_transcript, - decryption_request_static_key, - sender=incoming_node, - ) - - chain.pending_timestamp += HANDOVER_TIMEOUT - assert coordinator.getHandoverState(ritualID, departing_node) == HandoverState.HANDOVER_TIMEOUT - - with ape.reverts("Not waiting for transcript"): - coordinator.postHandoverTranscript( - ritualID, - departing_node, - handover_transcript, - decryption_request_static_key, - sender=incoming_node, - ) - - -def test_post_blinded_share( - coordinator, - nodes, - initiator, - erc20, - fee_model, - accounts, - deployer, - global_allow_list, - application, - chain, -): - initiate_ritual( - coordinator=coordinator, - fee_model=fee_model, - erc20=erc20, - authority=initiator, - nodes=nodes, - allow_logic=global_allow_list, - ) - - ritualID = 0 - departing_node = nodes[10] - incoming_node = accounts[MAX_DKG_SIZE + 1] - handover_supervisor = accounts[MAX_DKG_SIZE] - blinded_share = os.urandom(G2_SIZE) - - with ape.reverts("Ritual is not active"): - coordinator.postBlindedShare(ritualID, blinded_share, sender=departing_node) - - coordinator.grantRole( - coordinator.HANDOVER_SUPERVISOR_ROLE(), handover_supervisor, sender=deployer - ) - - activate_ritual(nodes, coordinator, ritualID) - setup_node(incoming_node, coordinator, application, deployer) - - with ape.reverts("Not waiting for blinded share"): - coordinator.postBlindedShare(ritualID, blinded_share, sender=departing_node) - - coordinator.handoverRequest(ritualID, departing_node, incoming_node, sender=handover_supervisor) - - with ape.reverts("Not waiting for blinded share"): - coordinator.postBlindedShare(ritualID, blinded_share, sender=departing_node) - - coordinator.postHandoverTranscript( - ritualID, departing_node, os.urandom(42), os.urandom(42), sender=incoming_node + sender=deployer, ) + p = coordinator.getParticipantFromProvider(ritual_id, deployer.address) + assert p.provider == deployer.address + assert p.aggregated is True + assert p.transcript == transcript + assert p.decryptionRequestStaticKey == decryption_request_static_key - with ape.reverts("Wrong size of blinded share"): - coordinator.postBlindedShare(ritualID, os.urandom(16), sender=departing_node) - assert ( - coordinator.getHandoverState(ritualID, departing_node) - == HandoverState.HANDOVER_AWAITING_BLINDED_SHARE - ) - tx = coordinator.postBlindedShare(ritualID, blinded_share, sender=departing_node) - assert ( - coordinator.getHandoverState(ritualID, departing_node) - == HandoverState.HANDOVER_AWAITING_FINALIZATION - ) - handover_key = coordinator.getHandoverKey(ritualID, departing_node) - handover = coordinator.handovers(handover_key) - assert handover.incomingProvider == incoming_node - assert handover.blindedShare == blinded_share - assert len(handover.transcript) != 0 - assert len(handover.decryptionRequestStaticKey) != 0 - - events = [event for event in tx.events if event.event_name == "BlindedSharePosted"] - assert events == [ - coordinator.BlindedSharePosted(ritualId=ritualID, departingParticipant=departing_node) - ] - - with ape.reverts("Not waiting for blinded share"): - coordinator.postBlindedShare(ritualID, blinded_share, sender=departing_node) - - chain.pending_timestamp += HANDOVER_TIMEOUT - assert coordinator.getHandoverState(ritualID, departing_node) == HandoverState.HANDOVER_TIMEOUT - - with ape.reverts("Not waiting for blinded share"): - coordinator.postBlindedShare(ritualID, blinded_share, sender=departing_node) - - -def test_cancel_handover( - coordinator, - nodes, - initiator, - erc20, - fee_model, - accounts, - deployer, - global_allow_list, - application, - chain, +def test_replace_aggregated_transcript_bytes( + coordinator, nodes, initiator, erc20, fee_model, deployer, global_allow_list ): initiate_ritual( coordinator=coordinator, @@ -1026,195 +674,35 @@ def test_cancel_handover( allow_logic=global_allow_list, ) - ritualID = 0 - departing_node = nodes[10] - incoming_node = accounts[MAX_DKG_SIZE + 1] - handover_supervisor = accounts[MAX_DKG_SIZE] + coordinator.initializeHandoverCoordinator(deployer.address, sender=deployer) + ritual_id = 0 + size = len(nodes) + threshold = coordinator.getThresholdForRitualSize(size) + transcript = generate_transcript(size, threshold) blinded_share = os.urandom(G2_SIZE) - with ape.reverts(): - coordinator.cancelHandover(ritualID, departing_node, sender=handover_supervisor) - - coordinator.grantRole( - coordinator.HANDOVER_SUPERVISOR_ROLE(), handover_supervisor, sender=deployer - ) - - with ape.reverts("Handover not requested"): - coordinator.cancelHandover(ritualID, departing_node, sender=handover_supervisor) - - activate_ritual(nodes, coordinator, ritualID) - setup_node(incoming_node, coordinator, application, deployer) - - with ape.reverts("Handover not requested"): - coordinator.cancelHandover(ritualID, departing_node, sender=handover_supervisor) - - coordinator.handoverRequest(ritualID, departing_node, incoming_node, sender=handover_supervisor) - - tx = coordinator.cancelHandover(ritualID, departing_node, sender=handover_supervisor) - assert coordinator.getHandoverState(ritualID, departing_node) == HandoverState.NON_INITIATED - - handover_key = coordinator.getHandoverKey(ritualID, departing_node) - handover = coordinator.handovers(handover_key) - assert handover.requestTimestamp == 0 - assert handover.incomingProvider == ZERO_ADDRESS - assert len(handover.blindedShare) == 0 - assert len(handover.transcript) == 0 - assert len(handover.decryptionRequestStaticKey) == 0 + for node in nodes: + coordinator.publishTranscript(ritual_id, transcript, sender=node) - events = [event for event in tx.events if event.event_name == "HandoverCanceled"] - assert events == [ - coordinator.HandoverCanceled( - ritualId=ritualID, - departingParticipant=departing_node, - incomingParticipant=incoming_node, + aggregated = transcript # has the same size as transcript + decryption_request_static_keys = [os.urandom(42) for _ in nodes] + dkg_public_key = (os.urandom(32), os.urandom(16)) + for i, node in enumerate(nodes): + coordinator.postAggregation( + ritual_id, aggregated, dkg_public_key, decryption_request_static_keys[i], sender=node ) - ] - coordinator.handoverRequest(ritualID, departing_node, incoming_node, sender=handover_supervisor) - coordinator.postHandoverTranscript( - ritualID, departing_node, os.urandom(42), os.urandom(42), sender=incoming_node - ) - coordinator.cancelHandover(ritualID, departing_node, sender=handover_supervisor) - assert coordinator.getHandoverState(ritualID, departing_node) == HandoverState.NON_INITIATED - - coordinator.handoverRequest(ritualID, departing_node, incoming_node, sender=handover_supervisor) - coordinator.postHandoverTranscript( - ritualID, departing_node, os.urandom(42), os.urandom(42), sender=incoming_node - ) - coordinator.postBlindedShare(ritualID, blinded_share, sender=departing_node) - - tx = coordinator.cancelHandover(ritualID, departing_node, sender=handover_supervisor) - assert coordinator.getHandoverState(ritualID, departing_node) == HandoverState.NON_INITIATED - - handover_key = coordinator.getHandoverKey(ritualID, departing_node) - handover = coordinator.handovers(handover_key) - assert handover.requestTimestamp == 0 - assert handover.incomingProvider == ZERO_ADDRESS - assert len(handover.blindedShare) == 0 - assert len(handover.transcript) == 0 - assert len(handover.decryptionRequestStaticKey) == 0 - - events = [event for event in tx.events if event.event_name == "HandoverCanceled"] - assert events == [ - coordinator.HandoverCanceled( - ritualId=ritualID, - departingParticipant=departing_node, - incomingParticipant=incoming_node, + participant_index = 1 + index = 32 + participant_index * G2_SIZE + threshold * G1_SIZE + with ape.reverts("Caller must be the handover coordinator"): + coordinator.replaceAggregatedTranscriptBytes( + ritual_id, nodes[1], blinded_share, index, sender=initiator ) - ] - - coordinator.handoverRequest(ritualID, departing_node, incoming_node, sender=handover_supervisor) - chain.pending_timestamp += HANDOVER_TIMEOUT + 1 - assert coordinator.getHandoverState(ritualID, departing_node) == HandoverState.HANDOVER_TIMEOUT - coordinator.cancelHandover(ritualID, departing_node, sender=handover_supervisor) - assert coordinator.getHandoverState(ritualID, departing_node) == HandoverState.NON_INITIATED - - -@pytest.mark.parametrize("participant_index", range(0, MAX_DKG_SIZE, 2)) -def test_finalize_handover( - coordinator, - nodes, - initiator, - erc20, - fee_model, - accounts, - deployer, - global_allow_list, - application, - participant_index, -): - initiate_ritual( - coordinator=coordinator, - fee_model=fee_model, - erc20=erc20, - authority=initiator, - nodes=nodes, - allow_logic=global_allow_list, - ) - - ritualID = 0 - departing_node = nodes[participant_index] - incoming_node = accounts[MAX_DKG_SIZE + 1] - handover_supervisor = accounts[MAX_DKG_SIZE] - blinded_share = os.urandom(G2_SIZE) - - with ape.reverts(): - coordinator.finalizeHandover(ritualID, departing_node, sender=handover_supervisor) - - coordinator.grantRole( - coordinator.HANDOVER_SUPERVISOR_ROLE(), handover_supervisor, sender=deployer - ) - - threshold, aggregated = activate_ritual(nodes, coordinator, ritualID) - setup_node(incoming_node, coordinator, application, deployer) - - with ape.reverts("Not waiting for finalization"): - coordinator.finalizeHandover(ritualID, departing_node, sender=handover_supervisor) - - coordinator.handoverRequest(ritualID, departing_node, incoming_node, sender=handover_supervisor) - - with ape.reverts("Not waiting for finalization"): - coordinator.finalizeHandover(ritualID, departing_node, sender=handover_supervisor) - decryption_request_static_key = os.urandom(42) - coordinator.postHandoverTranscript( - ritualID, - departing_node, - os.urandom(42), - decryption_request_static_key, - sender=incoming_node, - ) - - with ape.reverts("Not waiting for finalization"): - coordinator.finalizeHandover(ritualID, departing_node, sender=handover_supervisor) - coordinator.postBlindedShare(ritualID, blinded_share, sender=departing_node) - assert ( - coordinator.getHandoverState(ritualID, departing_node) - == HandoverState.HANDOVER_AWAITING_FINALIZATION + coordinator.replaceAggregatedTranscriptBytes( + ritual_id, nodes[1], blinded_share, index, sender=deployer ) - - assert not application.stakingProviderReleased(departing_node) - tx = coordinator.finalizeHandover(ritualID, departing_node, sender=handover_supervisor) - assert coordinator.getHandoverState(ritualID, departing_node) == HandoverState.NON_INITIATED - assert application.stakingProviderReleased(departing_node) - - events = [event for event in tx.events if event.event_name == "HandoverFinalized"] - assert events == [ - coordinator.HandoverFinalized( - ritualId=ritualID, - departingParticipant=departing_node, - incomingParticipant=incoming_node, - ) - ] - - handover_key = coordinator.getHandoverKey(ritualID, departing_node) - handover = coordinator.handovers(handover_key) - assert handover.requestTimestamp == 0 - assert handover.incomingProvider == ZERO_ADDRESS - assert len(handover.blindedShare) == 0 - assert len(handover.transcript) == 0 - assert len(handover.decryptionRequestStaticKey) == 0 - - with ape.reverts("Participant not part of ritual"): - coordinator.getParticipantFromProvider(ritualID, departing_node) - - p = coordinator.getParticipantFromProvider(ritualID, incoming_node) - assert p.provider == incoming_node - assert p.aggregated is True - assert len(p.transcript) == 0 - assert p.decryptionRequestStaticKey == decryption_request_static_key - - index = 32 + participant_index * G2_SIZE + threshold * G1_SIZE aggregated = bytearray(aggregated) aggregated[index : index + G2_SIZE] = blinded_share aggregated = bytes(aggregated) - assert coordinator.rituals(ritualID).aggregatedTranscript == aggregated - - events = [event for event in tx.events if event.event_name == "AggregationPosted"] - assert events == [ - coordinator.AggregationPosted( - ritualId=ritualID, - node=incoming_node, - aggregatedTranscriptDigest=Web3.keccak(aggregated), - ) - ] + assert coordinator.rituals(ritual_id).aggregatedTranscript == aggregated diff --git a/tests/test_global_allow_list.py b/tests/test_global_allow_list.py index 0d7eabd98..9afa3376b 100644 --- a/tests/test_global_allow_list.py +++ b/tests/test_global_allow_list.py @@ -61,7 +61,6 @@ def coordinator(project, deployer, application, oz_dependency): contract = project.Coordinator.deploy( application.address, TIMEOUT, - 0, sender=deployer, ) diff --git a/tests/test_handover_coordinator.py b/tests/test_handover_coordinator.py new file mode 100644 index 000000000..b04f02ead --- /dev/null +++ b/tests/test_handover_coordinator.py @@ -0,0 +1,780 @@ +import os + +import ape +import pytest +from ape.utils import ZERO_ADDRESS +from web3 import Web3 + +from tests.conftest import G1_SIZE, G2_SIZE, HandoverState, gen_public_key, generate_transcript + +TIMEOUT = 1000 +MAX_DKG_SIZE = 31 +FEE_RATE = 42 +ERC20_SUPPLY = 10**24 +DURATION = 48 * 60 * 60 +HANDOVER_TIMEOUT = 2000 + + +@pytest.fixture(scope="module") +def nodes(accounts): + return sorted(accounts[:MAX_DKG_SIZE], key=lambda x: x.address.lower()) + + +@pytest.fixture(scope="module") +def initiator(accounts): + initiator_index = MAX_DKG_SIZE + 1 + assert len(accounts) >= initiator_index + return accounts[initiator_index] + + +@pytest.fixture(scope="module") +def deployer(accounts): + deployer_index = MAX_DKG_SIZE + 2 + assert len(accounts) >= deployer_index + return accounts[deployer_index] + + +@pytest.fixture(scope="module") +def treasury(accounts): + treasury_index = MAX_DKG_SIZE + 3 + assert len(accounts) >= treasury_index + return accounts[treasury_index] + + +@pytest.fixture() +def application(project, deployer, nodes): + contract = project.ChildApplicationForCoordinatorMock.deploy(sender=deployer) + for n in nodes: + contract.updateOperator(n, n, sender=deployer) + contract.updateAuthorization(n, 42, sender=deployer) + return contract + + +@pytest.fixture() +def erc20(project, initiator): + token = project.TestToken.deploy(ERC20_SUPPLY, sender=initiator) + return token + + +@pytest.fixture() +def coordinator(project, deployer, application, oz_dependency): + admin = deployer + contract = project.Coordinator.deploy( + application.address, + TIMEOUT, + sender=deployer, + ) + + encoded_initializer_function = contract.initialize.encode_input(MAX_DKG_SIZE, admin) + proxy = oz_dependency.TransparentUpgradeableProxy.deploy( + contract.address, + deployer, + encoded_initializer_function, + sender=deployer, + ) + proxy_contract = project.Coordinator.at(proxy.address) + return proxy_contract + + +@pytest.fixture() +def handover_coordinator(project, deployer, application, coordinator, oz_dependency): + admin = deployer + contract = project.HandoverCoordinator.deploy( + application.address, + coordinator.address, + HANDOVER_TIMEOUT, + sender=deployer, + ) + + encoded_initializer_function = contract.initialize.encode_input(admin) + proxy = oz_dependency.TransparentUpgradeableProxy.deploy( + contract.address, + deployer, + encoded_initializer_function, + sender=deployer, + ) + proxy_contract = project.HandoverCoordinator.at(proxy.address) + coordinator.initializeHandoverCoordinator(proxy.address, sender=deployer) + return proxy_contract + + +@pytest.fixture() +def fee_model(project, deployer, coordinator, erc20, treasury): + contract = project.FlatRateFeeModel.deploy( + coordinator.address, erc20.address, FEE_RATE, sender=deployer + ) + coordinator.grantRole(coordinator.FEE_MODEL_MANAGER_ROLE(), treasury, sender=deployer) + coordinator.approveFeeModel(contract.address, sender=treasury) + coordinator.grantRole(coordinator.FEE_MODEL_MANAGER_ROLE(), treasury, sender=deployer) + return contract + + +@pytest.fixture() +def global_allow_list(project, deployer, coordinator): + contract = project.GlobalAllowList.deploy(coordinator.address, sender=deployer) + return contract + + +def test_initial_parameters(handover_coordinator): + assert handover_coordinator.handoverTimeout() == HANDOVER_TIMEOUT + + +def initiate_ritual(coordinator, fee_model, erc20, authority, nodes, allow_logic): + for node in nodes: + public_key = gen_public_key() + coordinator.setProviderPublicKey(public_key, sender=node) + + cost = fee_model.getRitualCost(len(nodes), DURATION) + erc20.approve(fee_model.address, cost, sender=authority) + tx = coordinator.initiateRitual( + fee_model, nodes, authority, DURATION, allow_logic.address, sender=authority + ) + return authority, tx + + +def activate_ritual(nodes, coordinator, ritualID): + size = len(nodes) + threshold = coordinator.getThresholdForRitualSize(size) + transcript = generate_transcript(size, threshold) + + for node in nodes: + coordinator.publishTranscript(ritualID, transcript, sender=node) + + aggregated = transcript # has the same size as transcript + decryption_request_static_keys = [os.urandom(42) for _ in nodes] + dkg_public_key = (os.urandom(32), os.urandom(16)) + for i, node in enumerate(nodes): + coordinator.postAggregation( + ritualID, aggregated, dkg_public_key, decryption_request_static_keys[i], sender=node + ) + return threshold, aggregated + + +def setup_node(node, coordinator, application, deployer): + application.updateOperator(node, node, sender=deployer) + application.updateAuthorization(node, 42, sender=deployer) + public_key = gen_public_key() + coordinator.setProviderPublicKey(public_key, sender=node) + + +def test_handover_request( + coordinator, + handover_coordinator, + nodes, + initiator, + erc20, + fee_model, + accounts, + deployer, + global_allow_list, + application, + chain, +): + initiate_ritual( + coordinator=coordinator, + fee_model=fee_model, + erc20=erc20, + authority=initiator, + nodes=nodes, + allow_logic=global_allow_list, + ) + + ritualID = 0 + departing_node = nodes[10] + incoming_node = accounts[MAX_DKG_SIZE + 1] + handover_supervisor = accounts[MAX_DKG_SIZE] + + handover_coordinator.grantRole( + handover_coordinator.HANDOVER_SUPERVISOR_ROLE(), handover_supervisor, sender=deployer + ) + + with ape.reverts(): + handover_coordinator.handoverRequest( + ritualID, departing_node, incoming_node, sender=deployer + ) + + with ape.reverts("Ritual is not active"): + handover_coordinator.handoverRequest( + ritualID, departing_node, incoming_node, sender=handover_supervisor + ) + + activate_ritual(nodes, coordinator, ritualID) + + handover_key = handover_coordinator.getHandoverKey(ritualID, departing_node) + handover = handover_coordinator.handovers(handover_key) + assert handover.requestTimestamp == 0 + assert handover.incomingProvider == ZERO_ADDRESS + assert len(handover.blindedShare) == 0 + assert len(handover.transcript) == 0 + assert len(handover.decryptionRequestStaticKey) == 0 + + assert ( + handover_coordinator.getHandoverState(ritualID, departing_node) + == HandoverState.NON_INITIATED + ) + + with ape.reverts("Departing node must be a participant"): + handover_coordinator.handoverRequest( + ritualID, handover_supervisor, incoming_node, sender=handover_supervisor + ) + with ape.reverts("Incoming node cannot be a participant"): + handover_coordinator.handoverRequest( + ritualID, departing_node, nodes[0], sender=handover_supervisor + ) + with ape.reverts("Incoming provider has not set public key"): + handover_coordinator.handoverRequest( + ritualID, departing_node, incoming_node, sender=handover_supervisor + ) + + setup_node(incoming_node, coordinator, application, deployer) + + tx = handover_coordinator.handoverRequest( + ritualID, departing_node, incoming_node, sender=handover_supervisor + ) + assert ( + handover_coordinator.getHandoverState(ritualID, departing_node) + == HandoverState.HANDOVER_AWAITING_TRANSCRIPT + ) + + timestamp = chain.pending_timestamp - 1 + handover = handover_coordinator.handovers(handover_key) + assert handover.requestTimestamp == timestamp + assert handover.incomingProvider == incoming_node + assert len(handover.blindedShare) == 0 + assert len(handover.transcript) == 0 + assert len(handover.decryptionRequestStaticKey) == 0 + + events = [event for event in tx.events if event.event_name == "HandoverRequest"] + assert events == [ + handover_coordinator.HandoverRequest( + ritualId=ritualID, + departingParticipant=departing_node, + incomingParticipant=incoming_node, + ) + ] + + with ape.reverts("Handover already requested"): + handover_coordinator.handoverRequest( + ritualID, departing_node, incoming_node, sender=handover_supervisor + ) + handover_coordinator.postHandoverTranscript( + ritualID, departing_node, os.urandom(42), os.urandom(42), sender=incoming_node + ) + + with ape.reverts("Handover already requested"): + handover_coordinator.handoverRequest( + ritualID, departing_node, incoming_node, sender=handover_supervisor + ) + + handover_coordinator.postBlindedShare(ritualID, os.urandom(G2_SIZE), sender=departing_node) + assert ( + handover_coordinator.getHandoverState(ritualID, departing_node) + == HandoverState.HANDOVER_AWAITING_FINALIZATION + ) + + with ape.reverts("Handover already requested"): + handover_coordinator.handoverRequest( + ritualID, departing_node, incoming_node, sender=handover_supervisor + ) + + chain.pending_timestamp += HANDOVER_TIMEOUT + assert ( + handover_coordinator.getHandoverState(ritualID, departing_node) + == HandoverState.HANDOVER_TIMEOUT + ) + + incoming_node = accounts[MAX_DKG_SIZE + 2] + setup_node(incoming_node, coordinator, application, deployer) + + tx = handover_coordinator.handoverRequest( + ritualID, departing_node, incoming_node, sender=handover_supervisor + ) + assert ( + handover_coordinator.getHandoverState(ritualID, departing_node) + == HandoverState.HANDOVER_AWAITING_TRANSCRIPT + ) + + timestamp = chain.pending_timestamp - 1 + handover = handover_coordinator.handovers(handover_key) + assert handover.requestTimestamp == timestamp + assert handover.incomingProvider == incoming_node + assert len(handover.blindedShare) == 0 + assert len(handover.transcript) == 0 + assert len(handover.decryptionRequestStaticKey) == 0 + + events = [event for event in tx.events if event.event_name == "HandoverRequest"] + assert events == [ + handover_coordinator.HandoverRequest( + ritualId=ritualID, + departingParticipant=departing_node, + incomingParticipant=incoming_node, + ) + ] + + +def test_post_handover_transcript( + coordinator, + handover_coordinator, + nodes, + initiator, + erc20, + fee_model, + accounts, + deployer, + global_allow_list, + application, + chain, +): + initiate_ritual( + coordinator=coordinator, + fee_model=fee_model, + erc20=erc20, + authority=initiator, + nodes=nodes, + allow_logic=global_allow_list, + ) + + ritualID = 0 + departing_node = nodes[10] + incoming_node = accounts[MAX_DKG_SIZE + 1] + handover_supervisor = accounts[MAX_DKG_SIZE] + decryption_request_static_key = os.urandom(42) + handover_transcript = os.urandom(42) + + with ape.reverts("Ritual is not active"): + handover_coordinator.postHandoverTranscript( + ritualID, + departing_node, + handover_transcript, + decryption_request_static_key, + sender=incoming_node, + ) + + handover_coordinator.grantRole( + handover_coordinator.HANDOVER_SUPERVISOR_ROLE(), handover_supervisor, sender=deployer + ) + + activate_ritual(nodes, coordinator, ritualID) + setup_node(incoming_node, coordinator, application, deployer) + + with ape.reverts("Parameters can't be empty"): + handover_coordinator.postHandoverTranscript( + ritualID, departing_node, bytes(), decryption_request_static_key, sender=incoming_node + ) + + with ape.reverts("Invalid length for decryption request static key"): + handover_coordinator.postHandoverTranscript( + ritualID, departing_node, handover_transcript, os.urandom(41), sender=incoming_node + ) + + with ape.reverts("Not waiting for transcript"): + handover_coordinator.postHandoverTranscript( + ritualID, + departing_node, + handover_transcript, + decryption_request_static_key, + sender=incoming_node, + ) + + handover_coordinator.handoverRequest( + ritualID, departing_node, incoming_node, sender=handover_supervisor + ) + + with ape.reverts("Wrong incoming provider"): + handover_coordinator.postHandoverTranscript( + ritualID, + departing_node, + handover_transcript, + decryption_request_static_key, + sender=departing_node, + ) + + assert ( + handover_coordinator.getHandoverState(ritualID, departing_node) + == HandoverState.HANDOVER_AWAITING_TRANSCRIPT + ) + tx = handover_coordinator.postHandoverTranscript( + ritualID, + departing_node, + handover_transcript, + decryption_request_static_key, + sender=incoming_node, + ) + assert ( + handover_coordinator.getHandoverState(ritualID, departing_node) + == HandoverState.HANDOVER_AWAITING_BLINDED_SHARE + ) + handover_key = handover_coordinator.getHandoverKey(ritualID, departing_node) + handover = handover_coordinator.handovers(handover_key) + assert handover.incomingProvider == incoming_node + assert handover.transcript == handover_transcript + assert handover.decryptionRequestStaticKey == decryption_request_static_key + + events = [event for event in tx.events if event.event_name == "HandoverTranscriptPosted"] + assert events == [ + handover_coordinator.HandoverTranscriptPosted( + ritualId=ritualID, + departingParticipant=departing_node, + incomingParticipant=incoming_node, + ) + ] + + with ape.reverts("Not waiting for transcript"): + handover_coordinator.postHandoverTranscript( + ritualID, + departing_node, + handover_transcript, + decryption_request_static_key, + sender=incoming_node, + ) + + chain.pending_timestamp += HANDOVER_TIMEOUT + assert ( + handover_coordinator.getHandoverState(ritualID, departing_node) + == HandoverState.HANDOVER_TIMEOUT + ) + + with ape.reverts("Not waiting for transcript"): + handover_coordinator.postHandoverTranscript( + ritualID, + departing_node, + handover_transcript, + decryption_request_static_key, + sender=incoming_node, + ) + + +def test_post_blinded_share( + coordinator, + handover_coordinator, + nodes, + initiator, + erc20, + fee_model, + accounts, + deployer, + global_allow_list, + application, + chain, +): + initiate_ritual( + coordinator=coordinator, + fee_model=fee_model, + erc20=erc20, + authority=initiator, + nodes=nodes, + allow_logic=global_allow_list, + ) + + ritualID = 0 + departing_node = nodes[10] + incoming_node = accounts[MAX_DKG_SIZE + 1] + handover_supervisor = accounts[MAX_DKG_SIZE] + blinded_share = os.urandom(G2_SIZE) + + with ape.reverts("Ritual is not active"): + handover_coordinator.postBlindedShare(ritualID, blinded_share, sender=departing_node) + + handover_coordinator.grantRole( + handover_coordinator.HANDOVER_SUPERVISOR_ROLE(), handover_supervisor, sender=deployer + ) + + activate_ritual(nodes, coordinator, ritualID) + setup_node(incoming_node, coordinator, application, deployer) + + with ape.reverts("Not waiting for blinded share"): + handover_coordinator.postBlindedShare(ritualID, blinded_share, sender=departing_node) + + handover_coordinator.handoverRequest( + ritualID, departing_node, incoming_node, sender=handover_supervisor + ) + + with ape.reverts("Not waiting for blinded share"): + handover_coordinator.postBlindedShare(ritualID, blinded_share, sender=departing_node) + + handover_coordinator.postHandoverTranscript( + ritualID, departing_node, os.urandom(42), os.urandom(42), sender=incoming_node + ) + + with ape.reverts("Wrong size of blinded share"): + handover_coordinator.postBlindedShare(ritualID, os.urandom(16), sender=departing_node) + + assert ( + handover_coordinator.getHandoverState(ritualID, departing_node) + == HandoverState.HANDOVER_AWAITING_BLINDED_SHARE + ) + tx = handover_coordinator.postBlindedShare(ritualID, blinded_share, sender=departing_node) + assert ( + handover_coordinator.getHandoverState(ritualID, departing_node) + == HandoverState.HANDOVER_AWAITING_FINALIZATION + ) + handover_key = handover_coordinator.getHandoverKey(ritualID, departing_node) + handover = handover_coordinator.handovers(handover_key) + assert handover.incomingProvider == incoming_node + assert handover.blindedShare == blinded_share + assert len(handover.transcript) != 0 + assert len(handover.decryptionRequestStaticKey) != 0 + + events = [event for event in tx.events if event.event_name == "BlindedSharePosted"] + assert events == [ + handover_coordinator.BlindedSharePosted( + ritualId=ritualID, departingParticipant=departing_node + ) + ] + + with ape.reverts("Not waiting for blinded share"): + handover_coordinator.postBlindedShare(ritualID, blinded_share, sender=departing_node) + + chain.pending_timestamp += HANDOVER_TIMEOUT + assert ( + handover_coordinator.getHandoverState(ritualID, departing_node) + == HandoverState.HANDOVER_TIMEOUT + ) + + with ape.reverts("Not waiting for blinded share"): + handover_coordinator.postBlindedShare(ritualID, blinded_share, sender=departing_node) + + +def test_cancel_handover( + coordinator, + handover_coordinator, + nodes, + initiator, + erc20, + fee_model, + accounts, + deployer, + global_allow_list, + application, + chain, +): + initiate_ritual( + coordinator=coordinator, + fee_model=fee_model, + erc20=erc20, + authority=initiator, + nodes=nodes, + allow_logic=global_allow_list, + ) + + ritualID = 0 + departing_node = nodes[10] + incoming_node = accounts[MAX_DKG_SIZE + 1] + handover_supervisor = accounts[MAX_DKG_SIZE] + blinded_share = os.urandom(G2_SIZE) + + with ape.reverts(): + handover_coordinator.cancelHandover(ritualID, departing_node, sender=handover_supervisor) + + handover_coordinator.grantRole( + handover_coordinator.HANDOVER_SUPERVISOR_ROLE(), handover_supervisor, sender=deployer + ) + + with ape.reverts("Handover not requested"): + handover_coordinator.cancelHandover(ritualID, departing_node, sender=handover_supervisor) + + activate_ritual(nodes, coordinator, ritualID) + setup_node(incoming_node, coordinator, application, deployer) + + with ape.reverts("Handover not requested"): + handover_coordinator.cancelHandover(ritualID, departing_node, sender=handover_supervisor) + + handover_coordinator.handoverRequest( + ritualID, departing_node, incoming_node, sender=handover_supervisor + ) + + tx = handover_coordinator.cancelHandover(ritualID, departing_node, sender=handover_supervisor) + assert ( + handover_coordinator.getHandoverState(ritualID, departing_node) + == HandoverState.NON_INITIATED + ) + + handover_key = handover_coordinator.getHandoverKey(ritualID, departing_node) + handover = handover_coordinator.handovers(handover_key) + assert handover.requestTimestamp == 0 + assert handover.incomingProvider == ZERO_ADDRESS + assert len(handover.blindedShare) == 0 + assert len(handover.transcript) == 0 + assert len(handover.decryptionRequestStaticKey) == 0 + + events = [event for event in tx.events if event.event_name == "HandoverCanceled"] + assert events == [ + handover_coordinator.HandoverCanceled( + ritualId=ritualID, + departingParticipant=departing_node, + incomingParticipant=incoming_node, + ) + ] + + handover_coordinator.handoverRequest( + ritualID, departing_node, incoming_node, sender=handover_supervisor + ) + handover_coordinator.postHandoverTranscript( + ritualID, departing_node, os.urandom(42), os.urandom(42), sender=incoming_node + ) + handover_coordinator.cancelHandover(ritualID, departing_node, sender=handover_supervisor) + assert ( + handover_coordinator.getHandoverState(ritualID, departing_node) + == HandoverState.NON_INITIATED + ) + + handover_coordinator.handoverRequest( + ritualID, departing_node, incoming_node, sender=handover_supervisor + ) + handover_coordinator.postHandoverTranscript( + ritualID, departing_node, os.urandom(42), os.urandom(42), sender=incoming_node + ) + handover_coordinator.postBlindedShare(ritualID, blinded_share, sender=departing_node) + + tx = handover_coordinator.cancelHandover(ritualID, departing_node, sender=handover_supervisor) + assert ( + handover_coordinator.getHandoverState(ritualID, departing_node) + == HandoverState.NON_INITIATED + ) + + handover_key = handover_coordinator.getHandoverKey(ritualID, departing_node) + handover = handover_coordinator.handovers(handover_key) + assert handover.requestTimestamp == 0 + assert handover.incomingProvider == ZERO_ADDRESS + assert len(handover.blindedShare) == 0 + assert len(handover.transcript) == 0 + assert len(handover.decryptionRequestStaticKey) == 0 + + events = [event for event in tx.events if event.event_name == "HandoverCanceled"] + assert events == [ + handover_coordinator.HandoverCanceled( + ritualId=ritualID, + departingParticipant=departing_node, + incomingParticipant=incoming_node, + ) + ] + + handover_coordinator.handoverRequest( + ritualID, departing_node, incoming_node, sender=handover_supervisor + ) + chain.pending_timestamp += HANDOVER_TIMEOUT + 1 + assert ( + handover_coordinator.getHandoverState(ritualID, departing_node) + == HandoverState.HANDOVER_TIMEOUT + ) + handover_coordinator.cancelHandover(ritualID, departing_node, sender=handover_supervisor) + assert ( + handover_coordinator.getHandoverState(ritualID, departing_node) + == HandoverState.NON_INITIATED + ) + + +@pytest.mark.parametrize("participant_index", range(0, MAX_DKG_SIZE, 2)) +def test_finalize_handover( + coordinator, + handover_coordinator, + nodes, + initiator, + erc20, + fee_model, + accounts, + deployer, + global_allow_list, + application, + participant_index, +): + initiate_ritual( + coordinator=coordinator, + fee_model=fee_model, + erc20=erc20, + authority=initiator, + nodes=nodes, + allow_logic=global_allow_list, + ) + + ritualID = 0 + departing_node = nodes[participant_index] + incoming_node = accounts[MAX_DKG_SIZE + 1] + handover_supervisor = accounts[MAX_DKG_SIZE] + blinded_share = os.urandom(G2_SIZE) + + with ape.reverts(): + handover_coordinator.finalizeHandover(ritualID, departing_node, sender=handover_supervisor) + + handover_coordinator.grantRole( + handover_coordinator.HANDOVER_SUPERVISOR_ROLE(), handover_supervisor, sender=deployer + ) + + threshold, aggregated = activate_ritual(nodes, coordinator, ritualID) + setup_node(incoming_node, coordinator, application, deployer) + + with ape.reverts("Not waiting for finalization"): + handover_coordinator.finalizeHandover(ritualID, departing_node, sender=handover_supervisor) + + handover_coordinator.handoverRequest( + ritualID, departing_node, incoming_node, sender=handover_supervisor + ) + + with ape.reverts("Not waiting for finalization"): + handover_coordinator.finalizeHandover(ritualID, departing_node, sender=handover_supervisor) + decryption_request_static_key = os.urandom(42) + handover_coordinator.postHandoverTranscript( + ritualID, + departing_node, + os.urandom(42), + decryption_request_static_key, + sender=incoming_node, + ) + + with ape.reverts("Not waiting for finalization"): + handover_coordinator.finalizeHandover(ritualID, departing_node, sender=handover_supervisor) + + handover_coordinator.postBlindedShare(ritualID, blinded_share, sender=departing_node) + assert ( + handover_coordinator.getHandoverState(ritualID, departing_node) + == HandoverState.HANDOVER_AWAITING_FINALIZATION + ) + + assert not application.stakingProviderReleased(departing_node) + tx = handover_coordinator.finalizeHandover(ritualID, departing_node, sender=handover_supervisor) + assert ( + handover_coordinator.getHandoverState(ritualID, departing_node) + == HandoverState.NON_INITIATED + ) + assert application.stakingProviderReleased(departing_node) + + events = [event for event in tx.events if event.event_name == "HandoverFinalized"] + assert events == [ + handover_coordinator.HandoverFinalized( + ritualId=ritualID, + departingParticipant=departing_node, + incomingParticipant=incoming_node, + ) + ] + + handover_key = handover_coordinator.getHandoverKey(ritualID, departing_node) + handover = handover_coordinator.handovers(handover_key) + assert handover.requestTimestamp == 0 + assert handover.incomingProvider == ZERO_ADDRESS + assert len(handover.blindedShare) == 0 + assert len(handover.transcript) == 0 + assert len(handover.decryptionRequestStaticKey) == 0 + + with ape.reverts("Participant not part of ritual"): + coordinator.getParticipantFromProvider(ritualID, departing_node) + + p = coordinator.getParticipantFromProvider(ritualID, incoming_node) + assert p.provider == incoming_node + assert p.aggregated is True + assert len(p.transcript) == 0 + assert p.decryptionRequestStaticKey == decryption_request_static_key + + index = 32 + participant_index * G2_SIZE + threshold * G1_SIZE + aggregated = bytearray(aggregated) + aggregated[index : index + G2_SIZE] = blinded_share + aggregated = bytes(aggregated) + assert coordinator.rituals(ritualID).aggregatedTranscript == aggregated + + events = [event for event in tx.events if event.event_name == "AggregationPosted"] + assert events == [ + coordinator.AggregationPosted( + ritualId=ritualID, + node=incoming_node, + aggregatedTranscriptDigest=Web3.keccak(aggregated), + ) + ]