From ee2eb2c805cac9569361a2fddbaaeefc122a05d1 Mon Sep 17 00:00:00 2001 From: Uncle Fatso Date: Tue, 28 Oct 2025 21:29:50 +0300 Subject: [PATCH] verifier added Signed-off-by: Uncle Fatso --- src/Verifier.sol | 304 ++++++++++++++++++++++++++ src/libraries/GhostEllipticCurves.sol | 300 +++++++++++++++++++++++++ 2 files changed, 604 insertions(+) create mode 100644 src/Verifier.sol create mode 100644 src/libraries/GhostEllipticCurves.sol diff --git a/src/Verifier.sol b/src/Verifier.sol new file mode 100644 index 0000000..f33a214 --- /dev/null +++ b/src/Verifier.sol @@ -0,0 +1,304 @@ +pragma solidity ^0.8.0; + +import {GhostEllipticCurves} from "./libraries/GhostEllipticCurves.sol"; + +abstract contract GhostVerifier { + uint256 public pubkeyX; + uint256 public pubkeyY; + uint256 public maxLost; + bytes32 public signersHash; + + function _setPubkey( + uint256 _pubkeyX, + uint256 _pubkeyY, + bytes32 _signersHash, + uint256 _totalSigners + ) internal { + require(GhostEllipticCurves.isOnCurve(_pubkeyX, _pubkeyY)); // is pubkey on curve + pubkeyX = _pubkeyX; + pubkeyY = _pubkeyY; + signersHash = _signersHash; + maxLost = _totalSigners - (_totalSigners * 2 / 3 + 1); + } + + function _verify( + uint256 s, bytes32 m, + bytes calldata nonces, + bytes calldata proof, + bytes calldata missedIndexes + ) internal view returns (uint256 res) { + uint256 px = pubkeyX; + uint256 py = pubkeyY; + uint256 e; + uint256 rx; + uint256 ry; + + { + uint256[] memory rNonces = new uint256[](4); + uint256[] memory coefficients = new uint256[](2); + + _reconstructNonces(rNonces, nonces); + _computeCoefficients(px, m, rNonces, coefficients); + + (rx, ry) = _aggregateNonce(rNonces, coefficients); + e = _computeChallenge(bytes32(rx), bytes32(px), m); + (rx, ry) = _restoreAdaptiveNonce(nonces, coefficients); + } + + { + require(signersHash == sha256(proof)); // check proof correctness + (px, py) = _aggregatePubkey(px, py, proof, missedIndexes); + } + + unchecked { + s = GhostEllipticCurves.N - mulmod(s, px, GhostEllipticCurves.N); + e = GhostEllipticCurves.N - mulmod(e, px, GhostEllipticCurves.N); + } + + uint8 parity = py % 2 == 0 ? 27 : 28; + require(ecrecover(bytes32(s), parity, bytes32(px), bytes32(e)) == _nonceCommitment(rx, ry)); + } + + function _reconstructNonces( + uint256[] memory rNonces, + bytes calldata nonces + ) internal pure { + require(nonces.length == 256); // nonces length check + + uint256 i; + for (; i < 2;) { + uint256 rmx; + uint256 rmy; + uint256 rex; + uint256 rey; + + assembly { + let base := add(nonces.offset, mul(i, 128)) + rmx := calldataload(base) + rmy := calldataload(add(base, 32)) + rex := calldataload(add(base, 64)) + rey := calldataload(add(base, 96)) + } + + if (rex != 0 && rey != 0) { + (rmx, rmy, rex) = GhostEllipticCurves.projectiveAddMixed( + rmx, rmy, 1, rex, rey + ); + (rmx, rmy) = GhostEllipticCurves.toAffine(rmx, rmy, rex); + } + require(GhostEllipticCurves.isOnCurve(rmx, rmy)); // is missing nonce on curve + + rNonces[i*2] = rmx; + rNonces[i*2 + 1] = rmy; + + unchecked { ++i; } + } + } + + function _computeCoefficients( + uint256 px, bytes32 m, + uint256[] memory rNonces, + uint256[] memory coefficients + ) internal pure { + uint256 r1x = rNonces[0]; // gas savings + uint256 r2x = rNonces[2]; // gas savings + + coefficients[0] = _computeCoefficientB(r1x, r2x, px, m); + coefficients[1] = _computeCoefficientD(r1x, r2x, px, m); + } + + function _aggregateNonce( + uint256[] memory rNonces, + uint256[] memory coefficients + ) internal pure returns (uint256 rx, uint256 ry) { + uint256 rz; + + (rx, ry, rz) = GhostEllipticCurves.mulAddAffinePair( + rNonces[0], rNonces[1], coefficients[0], + rNonces[2], rNonces[3], coefficients[1] + ); + (rx, ry) = GhostEllipticCurves.toAffine(rx, ry, rz); + require(GhostEllipticCurves.isOnCurve(rx, ry)); // is aggnonce on curve + } + + function _restoreAdaptiveNonce( + bytes calldata nonces, + uint256[] memory coefficients + ) internal pure returns (uint256, uint256) { + uint256 r1x; + uint256 r1y; + uint256 r2x; + uint256 r2y; + + assembly { + let base := nonces.offset + r1x := calldataload(add(base, 0)) + r1y := calldataload(add(base, 32)) + r2x := calldataload(add(base, 128)) + r2y := calldataload(add(base, 160)) + } + + (r1x, r1y, r2x) = GhostEllipticCurves.mulAddAffinePair( + r1x, r1y, coefficients[0], + r2x, r2y, coefficients[1] + ); + (r1x, r1y) = GhostEllipticCurves.toAffine(r1x, r1y, r2x); + require(GhostEllipticCurves.isOnCurve(r1x, r1y)); // is restored nonce on curve + + return (r1x, r1y); + } + + function _computeAggregationCoefficients( + uint16 length, + bytes32[] memory ais, + bytes calldata proof + ) internal pure { + uint16 i = length; + for (; i > 0;) { + unchecked { --i; } + + uint256 pix; + uint16 l; + uint16 r; + + assembly { + let base := add(proof.offset, mul(i, 128)) + pix := calldataload(base) + + l := add(shl(1, i), 1) + r := add(l, 1) + } + + ais[i] = bytes32(_computeCoefficientKeyAgg( + l < length ? ais[l] : bytes32(0x0), + r < length ? ais[r] : bytes32(0x0), + bytes32(pix) + )); + } + } + + function _checkAggregationCorrectness( + uint256 xx, uint256 yy, uint256 ai, + bytes calldata proof + ) internal pure returns (uint256 res) { + uint256 px; + uint256 py; + uint256 pz; + + uint16 i; + for (; i < 3;) { + uint256 x; + uint256 y; + + assembly { + let base := add(proof.offset, mul(i, 128)) + let j := mul(gt(i, 0), 64) + x := calldataload(add(base, j)) + y := calldataload(add(base, add(32, j))) + } + + if (i == 0) { + (px, py, pz) = GhostEllipticCurves.mulAddAffineSingle(x, y, ai); + } else { + (px, py, pz) = GhostEllipticCurves.projectiveAddMixed(px, py, pz, x, y); + } + + unchecked { ++i; } + } + + (px, py) = GhostEllipticCurves.toAffine(px, py, pz); + uint256 hix; + assembly { + let base := proof.offset + hix := calldataload(add(base, 64)) + let hiy := calldataload(add(base, 96)) + + if iszero(and(eq(xx, px), eq(xx, hix))) { + revert(0, 0) + } + + if iszero(and(eq(yy, py), eq(yy, hiy))) { + revert(0, 0) + } + } + } + + function _aggregatePubkey( + uint256 px, uint256 py, + bytes calldata proof, + bytes calldata missedIndexes + ) internal view returns (uint256, uint256) { + uint256 pz = 1; + uint16 length = uint16(proof.length); + length = (length & 63) == 0 ? (length >> 6) : 0; + + uint16 i = length; + uint16 lost; + + for (; i > 0;) { + unchecked { --i; } + + uint256 hix; + uint256 hiy; + bool isMissed; + + assembly { + isMissed := byte(0, calldataload(add(missedIndexes.offset, i))) + } + + if (isMissed) { + assembly { + let base := add(proof.offset, mul(i, 64)) + hix := calldataload(base) + hiy := calldataload(add(base, 32)) + } + + (px, py, pz) = GhostEllipticCurves.projectiveAddMixed( + px, py, pz, hix, GhostEllipticCurves.P - hiy + ); + unchecked { ++lost; } + } + } + + (px, py) = GhostEllipticCurves.toAffine(px, py, pz); + require(GhostEllipticCurves.isOnCurve(px, py)); // is point (Hagg - Hmissing) on curve + require(maxLost >= lost); // is enough threshold + + return (px, py); + } + + function _nonceCommitment(uint256 px, uint256 py) internal pure returns (address) { + bytes32 h = keccak256(abi.encodePacked(bytes32(px), bytes32(py))); + return address(uint160(uint256(h))); + } + + function _computeCoefficientKeyAgg(bytes32 l, bytes32 r, bytes32 s) internal pure returns (uint256) { + // Below line is the same as: bytes32 tag = sha256("EXODUS/KeyAggCoef"); + // Double check it with help of `https://emn178.github.io/online-tools/sha256.html` + bytes32 tag = 0x172d284f34ce926b36667a8a8617b9cd810cc61b43d644e7b770d5859a3a3d43; + + bytes32 sHash = sha256(abi.encode(tag, tag, s)); + return uint256(sha256(abi.encodePacked(tag, tag, l, r, sHash))) % GhostEllipticCurves.N; + } + + function _computeCoefficientB(uint256 r1x, uint256 r2x, uint256 px, bytes32 m) internal pure returns (uint256) { + // Below line is the same as: bytes32 tag = sha256("EXODUS/nonceB"); + // Double check it with help of `https://emn178.github.io/online-tools/sha256.html` + bytes32 tag = 0x9163366544d6028e0142472531d58dba1006e5a0a528d94030f178ec4ddc5f8e; + return uint256(sha256(abi.encodePacked(tag, tag, r1x, r2x, px, m))) % GhostEllipticCurves.N; + } + + function _computeCoefficientD(uint256 r1x, uint256 r2x, uint256 px, bytes32 m) internal pure returns (uint256) { + // Below line is the same as: bytes32 tag = sha256("EXODUS/nonceD"); + // Double check it with help of `https://emn178.github.io/online-tools/sha256.html` + bytes32 tag = 0x25b86693490e759343e5fb3e272667c19273dea862f4ef52f7656cfb1563e9f6; + return uint256(sha256(abi.encodePacked(tag, tag, m, px, r1x, r2x))) % GhostEllipticCurves.N; + } + + function _computeChallenge(bytes32 rx, bytes32 px, bytes32 m) internal pure returns (uint256) { + // Below line is the same as: bytes32 tag = sha256("EXODUS/challenge"); + // Double check it with help of `https://emn178.github.io/online-tools/sha256.html` + bytes32 tag = 0xf0bf915ac954f4aa752d09a0b6a57c577bf0bcca4661ee26c7e613466cf5f9a1; + return uint256(sha256(abi.encodePacked(tag, tag, rx, px, m))) % GhostEllipticCurves.N; + } +} diff --git a/src/libraries/GhostEllipticCurves.sol b/src/libraries/GhostEllipticCurves.sol new file mode 100644 index 0000000..a2d9070 --- /dev/null +++ b/src/libraries/GhostEllipticCurves.sol @@ -0,0 +1,300 @@ +// SPDX-License-Identifier: MIT + +pragma solidity ^0.8.0; + +// Vectors for secp256k1 are difficult to find. These are the vectors from: +// https://web.archive.org/web/20190724010836/https://chuckbatson.wordpress.com/2014/11/26/secp256k1-test-vectors + +library GhostEllipticCurves { + // Constants are taken from https://en.bitcoin.it/wiki/Secp256k1 + uint256 internal constant B = 7; + uint256 internal constant P = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F; + uint256 internal constant N = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141; + + function findMaxBitLength(uint256 k1, uint256 k2) internal pure returns (uint256 bits) { + assembly { + // Find maximum of the three scalars + let max := k1 + if gt(k2, max) { max := k2 } + + // if (v >> 128 != 0) { v >>= 128; bits += 128; } + if gt(max, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF) { + max := shr(128, max) + bits := 128 + } + // if (v >> 64 != 0) { v >>= 64; bits += 64; } + if gt(max, 0xFFFFFFFFFFFFFFFF) { + max := shr(64, max) + bits := add(bits, 64) + } + // if (v >> 32 != 0) { v >>= 32; bits += 32; } + if gt(max, 0xFFFFFFFF) { + max := shr(32, max) + bits := add(bits, 32) + } + // if (v >> 16 != 0) { v >>= 16; bits += 16; } + if gt(max, 0xFFFF) { + max := shr(16, max) + bits := add(bits, 16) + } + // if (v >> 8 != 0) { v >>= 8; bits += 8; } + if gt(max, 0xFF) { + max := shr(8, max) + bits := add(bits, 8) + } + // if (v >> 4 != 0) { v >>= 4; bits += 4; } + if gt(max, 0xF) { + max := shr(4, max) + bits := add(bits, 4) + } + // if (v >> 2 != 0) { v >>= 2; bits += 2; } + if gt(max, 0x3) { + max := shr(2, max) + bits := add(bits, 2) + } + // if (v >> 1 != 0) { /* v >>= 1; */ bits += 1; } + if gt(max, 0x1) { + bits := add(bits, 1) + } + bits := add(bits, 1) + } + } + + function mulAddAffinePair( + uint256 x1, uint256 y1, uint256 k1, + uint256 x2, uint256 y2, uint256 k2 + ) internal pure returns (uint256 x3, uint256 y3, uint256 z3) { + // We implement the Straus-Shamir trick described in + // Trading Inversions for Multiplications in Elliptic Curve Cryptography. + // (https://eprint.iacr.org/2003/257.pdf Page 7). + + uint256 bits = findMaxBitLength(k1, k2); + + uint256[4] memory precomputedXs; + uint256[4] memory precomputedYs; + uint256[4] memory precomputedZs; + + precomputedXs[1] = x2; precomputedYs[1] = y2; precomputedZs[1] = 1; // 01: P2 + precomputedXs[2] = x1; precomputedYs[2] = y1; precomputedZs[2] = 1; // 10: P1 + (precomputedXs[3], precomputedYs[3], precomputedZs[3]) = projectiveAdd(x1, y1, 1, x2, y2, 1); // 11: P1+P2 + + y3 = 1; + + for (; bits > 0;) { + unchecked { --bits; } + + (x3, y3, z3) = projectiveDouble(x3, y3, z3); + + uint8 mask; + assembly { + mask := or( + shl(1, and(shr(bits, k1), 1)), + and(shr(bits, k2), 1) + ) + } + + if (mask == 0) { + continue; + } + + if (mask == 3) { + (x3, y3, z3) = projectiveAdd( + x3, y3, z3, precomputedXs[mask], precomputedYs[mask], precomputedZs[mask] + ); + } else { + (x3, y3, z3) = projectiveAddMixed( + x3, y3, z3, precomputedXs[mask], precomputedYs[mask] + ); + } + } + } + + function mulAddAffineSingle( + uint256 x, uint256 y, uint256 k + ) internal pure returns (uint256 x_, uint256 y_, uint256 z_) { + // We implement the Straus-Shamir trick described in + // Trading Inversions for Multiplications in Elliptic Curve Cryptography. + // (https://eprint.iacr.org/2003/257.pdf Page 7). + + y_ = 1; + uint256 bits = findMaxBitLength(k, 0); + + for (; bits > 0;) { + unchecked { --bits; } + + (x_, y_, z_) = projectiveDouble(x_, y_, z_); + + uint8 mask; + assembly { + mask := and(shr(bits, k), 1) + } + + if (mask != 0) { + (x_, y_, z_) = projectiveAddMixed(x_, y_, z_, x, y); + } + } + } + + function projectiveDouble( + uint256 X, + uint256 Y, + uint256 Z + ) internal pure returns (uint256 X3, uint256 Y3, uint256 Z3) { + // We implement the complete addition formula from Renes-Costello-Batina 2015 + // (https://eprint.iacr.org/2015/1060 Algorithm 9). + + // X3 = 2XY (Y^2 − 9bZ^2), (ok) + // Y3 = (Y^2 − 9bZ^2)(Y^2 + 3bZ^2) + 24bY^2Z^2, + // Z3 = 8Y^3Z + + uint256 t0 = mulmod(Y, Y, P); // 1. t0 ← Y · Y => (Y²) + Z3 = mulmod(8, t0, P); // 2. Z3 ← t0 + t0 => (8Y²) + uint256 t1 = mulmod(Y, Z, P); // 3. t1 ← Y · Z => (YZ) + uint256 t2 = mulmod(Z, Z, P); // 4. t2 ← Z · Z => (Z²) + t2 = mulmod(21, t2, P); // 5. t2 ← b3 · t2 => (3bZ²) + X3 = mulmod(t2, Z3, P); // 6. X3 ← t2 · Z3 => (3bZ²8Y²) + Y3 = addmod(t0, t2, P); // 7. Y3 ← t0 + t2 => (Y² + 3bZ²) + Z3 = mulmod(t1, Z3, P); // 8. Z3 ← t1 · Z3 => (YZ · 8Y² = 8Y³Z) + t1 = addmod(t0, P - mulmod(3, t2, P), P); // 9. t1 ← t0 - (3 · t2) => (Y² - 9bZ²) + Y3 = addmod(X3, mulmod(t1, Y3, P), P); // 10. Y3 ← t1 · (t1 · Y3) => ((Y² - 9bZ²) · (Y² + 3bZ²)) + X3 = mulmod(t1, mulmod(X, Y, P), P); // 11. X3 ← t1 · (X1 · Y1) => ((Y² - 9bZ²) · XY) + X3 = addmod(X3, X3, P); // 12. X3 ← X3 + X3 => ((Y² - 9bZ²) · 2XY) + } + + function projectiveAddMixed( + uint256 X1, + uint256 Y1, + uint256 Z1, + uint256 X2, + uint256 Y2 + ) internal pure returns (uint256 X3, uint256 Y3, uint256 Z3) { + // We implement the complete addition formula from Renes-Costello-Batina 2015 + // (https://eprint.iacr.org/2015/1060 Algorithm 8). + + // X3 = (X1Y2 + X2Y1)(Y1Y2 − 3bZ1) − 3b(Y1 + Y2Z1)(X1 + X2Z1), + // Y3 = (Y1Y2 + 3bZ1)(Y1Y2 − 3bZ1) + 9bX1X2(X1 + X2Z1), + // Z3 = (Y1 + Y2Z1)(Y1Y2 + 3bZ1) + 3X1X2(X1Y2 + X2Y1), + + assembly { + let t0 := mulmod(X1, X2, P) // 1. t0 ← X1 · X2 => (X1·X2) + let t1 := mulmod(Y1, Y2, P) // 2. t1 ← Y1 · Y2 => (Y1·Y2) + let t3 := mulmod(X2, Y1, P) // 3. t3 ← X2 + Y2 => (X2·Y1) + let t4 := mulmod(X1, Y2, P) // 4. t4 ← X1 + Y1 => (X1·Y2) + t3 := addmod(t3, t4, P) // 5. t3 ← t3 − t4 => (X2·Y1 + X1·Y2) + t4 := mulmod(Y2, Z1, P) // 6. t4 ← Y2 · Z1 => (Y2·Z1) + t4 := addmod(t4, Y1, P) // 7. t4 ← t4 + Y1 => (Y2·Z1 + Y1) + Y3 := mulmod(X2, Z1, P) // 8. Y3 ← X2 · Z1 => (X2·Z1) + Y3 := addmod(Y3, X1, P) // 9. Y3 ← Y3 + X1 => (X2·Z1 + X1) + t0 := mulmod(3, t0, P) // 10. t0 ← X3 + t0 => (3·(X1·X2)) + let t2 := mulmod(21, Z1, P) // 11. t2 ← b3 · Z1 => (b3·Z1) + Z3 := addmod(t1, t2, P) // 12. Z3 ← t1 + t2 => (Y1·Y2 + b·3·Z1) + t1 := addmod(t1, sub(P, t2), P) // 13. t1 ← t1 − t2 => (Y1·Y2 - b·3·Z1) + Y3 := mulmod(21, Y3, P) // 14. Y3 ← b3 · Y3 => 3·b·(X2·Z1 + X1) + X3 := mulmod(t4, Y3, P) // 15. X3 ← t4 · Y3 => (Y2·Z1 + Y1)·b·3·(X2·Z1 + X1) + t2 := mulmod(t3, t1, P) // 16. t2 ← t3 · t1 => ((X2·Y1 + X1·Y2)·(Y1·Y2 - b3·Z1)) + X3 := addmod(t2, sub(P, X3), P) // 17. X3 ← t2 − X3 => ((X2·Y1 + X1·Y2)·(Y1·Y2 - b3·Z1) - 3·B·(Y2·Z1 + Y1)·(X2·Z1 + X1)) + Y3 := mulmod(Y3, t0, P) // 18. Y3 ← Y3 · t0 => (9·b·(X2·Z1 + X1)·X1·X2) + t1 := mulmod(t1, Z3, P) // 19. t1 ← t1 · Z3 => (Y1·Y2 - b·3·Z1)·(Y1·Y2 + b·3·Z1) + Y3 := addmod(t1, Y3, P) // 20. Y3 ← t1 + Y3 => ((Y1·Y2 - b·3·Z1)·(Y1·Y2 + 3·b·Z1) + 9·b·(X2·Z1 + X1)·X1·X2) + t0 := mulmod(t0, t3, P) // 21. t0 ← t0 · t3 => (3·X2·Y1 + (X2·Y1 + X1·Y2)) + Z3 := mulmod(Z3, t4, P) // 22. Z3 ← Z3 · t4 => (Y1·Y2 + b·3·Z1)·(Y2·Z1 + Y1) + Z3 := addmod(Z3, t0, P) // 23. Z3 ← Z3 + t0 => ((Y1·Y2 + b·3·Z1)·(Y2·Z1 + Y1) + 3·X2·Y1·(X2·Y1 + X1·Y2)) + } + } + + function projectiveAdd( + uint256 X1, + uint256 Y1, + uint256 Z1, + uint256 X2, + uint256 Y2, + uint256 Z2 + ) internal pure returns (uint256 X3, uint256 Y3, uint256 Z3) { + // We implement the complete addition formula from Renes-Costello-Batina 2015 + // (https://eprint.iacr.org/2015/1060 Algorithm 7). + + // X3 = (X1Y2 + X2Y1)(Y1Y2 − 3bZ1Z2) − 3b(Y1Z2 + Y2Z1)(X1Z2 + X2Z1), + // Y3 = (Y1Y2 + 3bZ1Z2)(Y1Y2 − 3bZ1Z2) + 9bX1X2(X1Z2 + X2Z1), + // Z3 = (Y1Z2 + Y2Z1)(Y1Y2 + 3bZ1Z2) + 3X1X2(X1Y2 + X2Y1), + + uint256 t0 = mulmod(X1, X2, P); // 1. t0 ← X1 · X2 => (X1·X2) + uint256 t1 = mulmod(Y1, Y2, P); // 2. t1 ← Y1 · Y2 => (Y1·Y2) + uint256 t2 = mulmod(Z1, Z2, P); // 3. t2 ← Z1 · Z2 => (Z1·Z2) + uint256 t3 = addmod(X1, Y1, P); // 4. t3 ← X1 + Y1 => (X1 + Y1) + uint256 t4 = addmod(X2, Y2, P); // 5. t4 ← X2 + Y2 => (X2 + Y2) + t3 = mulmod(t3, t4, P); // 6. t3 ← t3 · t4 => ((X1 + Y1) · (X2 + Y2)) + t4 = addmod(t0, t1, P); // 7. t4 ← t0 + t1 => (X1·X2 + Y1·Y2) + t3 = addmod(t3, P - t4, P); // 8. t3 ← t3 - t4 => ((X1 + Y1)·(X2 + Y2) - X1·X2 - Y1·Y2) + t4 = addmod(Y1, Z1, P); // 9. t4 ← Y1 + Z1 => (Y1 + Z1) + X3 = addmod(Y2, Z2, P); // 10. X3 ← Y2 + Z2 => (Y2 + Z2) + t4 = mulmod(t4, X3, P); // 11. t4 ← t4 · X3 => ((Y1 + Z1) · (Y2 + Z2)) + X3 = addmod(t1, t2, P); // 12. X3 ← t1 + t2 => (Y1·Y2 + Z1·Z2) + t4 = addmod(t4, P - X3, P); // 13. t4 ← t4 - X3 => ((Y1 + Z1)·(Y2 + Z2) - Y1·Y2 - Z1·Z2) + X3 = addmod(X1, Z1, P); // 14. X3 ← X1 + Z1 => (X1 + Z1) + Y3 = addmod(X2, Z2, P); // 15. Y3 ← X2 + Z2 => (X2 + Z2) + X3 = mulmod(X3, Y3, P); // 16. X3 ← X3 · Y3 => ((X1 + Z1) · (X2 + Z2)) + Y3 = addmod(t0, t2, P); // 17. Y3 ← t0 + t2 => (X1·X2 + Z1·Z2) + Y3 = addmod(X3, P - Y3, P); // 18. Y3 ← X3 - Y3 => ((X1 + Z1)·(X2 + Z2) - X1·X2 - Z1·Z2) + X3 = addmod(t0, t0, P); // 19. X3 ← t0 + t0 => (2·X1·X2) + t0 = addmod(X3, t0, P); // 20. t0 ← X3 + t0 => (3·X1·X2) + t2 = mulmod(21, t2, P); // 21. t2 ← B3 · t2 => 3b · Z1·Z2 + Z3 = addmod(t1, t2, P); // 22. Z3 ← t1 + t2 => Y1·Y2 + 3·b·Z1·Z2 + t1 = addmod(t1, P - t2, P); // 23. t1 ← t1 - t2 => Y1·Y2 - 3·b·Z1·Z2 + Y3 = mulmod(21, Y3, P); // 24. Y3 ← B3 · Y3 => 3b · ((X1+Z1)(X2+Z2) - X1·X2 - Z1·Z2) + X3 = mulmod(t4, Y3, P); // 25. X3 ← t4 · Y3 => 3b·((Y1+Z1)(Y2+Z2)-Y1·Y2-Z1·Z2) · ((X1+Z1)(X2+Z2)-X1·X2-Z1·Z2) + t2 = mulmod(t3, t1, P); // 26. t2 ← t3 · t1 => ((X1+Y1)(X2+Y2)-X1·X2-Y1·Y2) · (Y1·Y2 - 3·b·Z1·Z2) + X3 = addmod(t2, P - X3, P); // 27. X3 ← t2 - X3 => (X1Y2+X2Y1)(Y1·Y2-3·b·Z1·Z2) - 3b(Y1·Z2+Y2·Z1)(X1·Z2+X2·Z1) + Y3 = mulmod(Y3, t0, P); // 28. Y3 ← Y3 · t0 => 3·b·((X1+Z1)(X2+Z2) - X1·X2 - Z1·Z2) · 3·X1·X2 = 9·b·X1·X2·(X1·Z2+X2·Z1) + t1 = mulmod(t1, Z3, P); // 29. t1 ← t1 · Z3 => (Y1·Y2-3·b·Z1·Z2) · (Y1·Y2+3·b·Z1·Z2) + Y3 = addmod(t1, Y3, P); // 30. Y3 ← t1 + Y3 => (Y1Y2+3·b·Z1·Z2)(Y1·Y2-3·b·Z1·Z2) + 9b·X1·X2·(X1·Z2+X2·Z1) + t0 = mulmod(t0, t3, P); // 31. t0 ← t0 · t3 => (3·X1·X2) · ((X1+Y1)(X2+Y2)-X1·X2-Y1·Y2) + Z3 = mulmod(Z3, t4, P); // 32. Z3 ← Z3 · t4 => (Y1·Y2+3·b·Z1·Z2) · ((Y1+Z1)(Y2+Z2)-Y1·Y2-Z1·Z2) + Z3 = addmod(Z3, t0, P); // 33. Z3 ← Z3 + t0 => (Y1·Z2+Y2·Z1)·(Y1·Y2+3·b·Z1·Z2) + 3·X1·X2·(X1·Y2+X2·Y1) + } + + function isOnCurve(uint256 x, uint256 y) internal pure returns (bool) { + uint lhs = mulmod(y, y, P); // y^2 + uint rhs = mulmod(mulmod(x, x, P), x, P); // x^3 + rhs = addmod(rhs, B, P); // x^3 + 7 + return lhs == rhs; + } + + function toAffine( + uint256 x, + uint256 y, + uint256 z + ) internal pure returns (uint256, uint256) { + uint256 t0 = modInverse(z); + x = mulmod(x, t0, P); + y = mulmod(y, t0, P); + return (x, y); + } + + function modInverse(uint256 r1) internal pure returns (uint256 t0) { + // Extended Euclidean algorithm (iterative, assembly) + // Typically lower average gas than the modexp precompile for single inversions, + // but execution time (and gas) depends on input values — not constant-time. + assembly { + let t1 := 1 + let r0 := P + + for {} r1 {} { + let q := div(r0, r1) + + // t0, t1 = t1, t0 - q * t1 + let t1_new := sub(t0, mul(q, t1)) + t0 := t1 + t1 := t1_new + + // r0, r1 = r1, r0 - q * r1 + let r1_new := sub(r0, mul(q, r1)) + r0 := r1 + r1 := r1_new + } + + if slt(t0, 0) { + t0 := add(t0, P) + } + } + } +}