From 483bd5bd918ebbc1d5e008ce892d7a0023b50ed1 Mon Sep 17 00:00:00 2001 From: Chiro Hiro Date: Wed, 4 Oct 2023 14:39:04 +0700 Subject: [PATCH 1/8] Update arkworks to 0.4.2 (only up to kimchi) (p-s develop/mina compatible) Update arkworks to 0.4.2 for pallas and vesta Update test cases to new version of test suite 0.4.2 Upgrade utils to arkworks 0.4.2 Upgrade poseidon to arkworks 0.4.2 Upgrade export test vectors of poseidon to arkworks 0.4.2 Upgrade groupmap to arkworks 0.4.2 Upgrade hasher to arkworks 0.4.2 Upgrade signer to arkworks 0.4.2 Upgrade turshi to arkworks 0.4.2 Convert poly-comm to arkworks 0.4.2 Upgrade arkworks for `kimchi` and other libraries/tools Fixup compilation errors in OCaml conversion helpers Adjust serde_as regression test to 0.4.2 Use compressed serialization Fix erroneous implicit Affine->Proj conversions Fix from_address bug Fix ocaml printing: use hex instead of integer --- Cargo.lock | 200 +--- Cargo.toml | 14 +- book/src/specs/kimchi.md | 8 +- circuit-construction/Cargo.toml | 47 + circuit-construction/src/constants.rs | 44 + circuit-construction/src/lib.rs | 33 + circuit-construction/src/prover.rs | 136 +++ .../src/tests/example_proof.rs | 103 ++ circuit-construction/src/writer.rs | 1007 +++++++++++++++++ curves/Cargo.toml | 3 +- curves/src/pasta/curves/pallas.rs | 91 +- curves/src/pasta/curves/tests.rs | 6 + curves/src/pasta/curves/vesta.rs | 83 +- curves/src/pasta/fields/fft.rs | 69 ++ curves/src/pasta/fields/fp.rs | 33 +- curves/src/pasta/fields/fq.rs | 31 +- curves/src/pasta/fields/mod.rs | 40 + curves/tests/pasta_curves.rs | 38 +- curves/tests/pasta_fields.rs | 27 +- groupmap/src/lib.rs | 22 +- groupmap/tests/groupmap.rs | 4 +- hasher/src/roinput.rs | 10 +- kimchi/Cargo.toml | 1 + kimchi/src/circuits/constraints.rs | 15 +- .../circuits/domain_constant_evaluation.rs | 4 +- kimchi/src/circuits/expr.rs | 4 +- kimchi/src/circuits/gate.rs | 23 +- kimchi/src/circuits/lookup/index.rs | 4 +- kimchi/src/circuits/polynomials/and.rs | 4 +- .../circuits/polynomials/endomul_scalar.rs | 6 +- .../polynomials/foreign_field_add/gadget.rs | 4 +- .../polynomials/foreign_field_common.rs | 4 +- .../polynomials/foreign_field_mul/gadget.rs | 4 +- .../src/circuits/polynomials/keccak/gadget.rs | 4 +- .../src/circuits/polynomials/permutation.rs | 6 +- kimchi/src/circuits/polynomials/poseidon.rs | 4 +- .../polynomials/range_check/gadget.rs | 4 +- kimchi/src/circuits/polynomials/rot.rs | 4 +- kimchi/src/circuits/polynomials/turshi.rs | 4 +- kimchi/src/circuits/polynomials/xor.rs | 4 +- kimchi/src/circuits/wires.rs | 24 +- kimchi/src/circuits/witness/mod.rs | 4 +- kimchi/src/curve.rs | 52 +- kimchi/src/linearization.rs | 8 +- kimchi/src/precomputed_srs.rs | 6 +- kimchi/src/proof.rs | 16 +- kimchi/src/prover.rs | 18 +- kimchi/src/prover_index.rs | 8 +- kimchi/src/snarky/api.rs | 12 +- kimchi/src/snarky/constants.rs | 2 +- kimchi/src/snarky/folding.rs | 12 +- kimchi/src/snarky/range_checks.rs | 4 +- kimchi/src/tests/and.rs | 6 +- kimchi/src/tests/ec.rs | 40 +- kimchi/src/tests/endomul.rs | 33 +- kimchi/src/tests/endomul_scalar.rs | 4 +- kimchi/src/tests/foreign_field_add.rs | 14 +- kimchi/src/tests/foreign_field_mul.rs | 6 +- kimchi/src/tests/framework.rs | 1 + kimchi/src/tests/generic.rs | 4 +- kimchi/src/tests/not.rs | 6 +- kimchi/src/tests/range_check.rs | 4 +- kimchi/src/tests/recursion.rs | 2 +- kimchi/src/tests/rot.rs | 4 +- kimchi/src/tests/serde.rs | 6 +- kimchi/src/tests/varbasemul.rs | 20 +- kimchi/src/tests/xor.rs | 4 +- kimchi/src/verifier.rs | 4 +- poly-commitment/src/chunked.rs | 7 +- poly-commitment/src/combine.rs | 134 ++- poly-commitment/src/commitment.rs | 112 +- poly-commitment/src/evaluation_proof.rs | 41 +- poly-commitment/src/kzg.rs | 85 +- poly-commitment/src/lib.rs | 12 +- poly-commitment/src/srs.rs | 21 +- poly-commitment/tests/batch_15_wires.rs | 2 +- poly-commitment/tests/commitment.rs | 2 +- poly-commitment/tests/ipa_commitment.rs | 5 +- poly-commitment/tests/kzg.rs | 21 +- poseidon/Cargo.toml | 4 +- poseidon/export_test_vectors/Cargo.toml | 4 +- poseidon/export_test_vectors/src/vectors.rs | 11 +- poseidon/src/poseidon.rs | 3 +- poseidon/src/sponge.rs | 47 +- signer/src/lib.rs | 6 +- signer/src/pubkey.rs | 29 +- signer/src/schnorr.rs | 25 +- turshi/tests/helper.rs | 4 +- utils/Cargo.toml | 3 +- utils/src/array.rs | 4 +- utils/src/dense_polynomial.rs | 4 +- utils/src/field_helpers.rs | 16 +- utils/src/serialization.rs | 13 +- utils/tests/chunked_polynomials.rs | 2 +- utils/tests/dense_polynomial.rs | 2 +- utils/tests/field_helpers.rs | 11 +- utils/tests/foreign_field.rs | 4 +- 97 files changed, 2220 insertions(+), 890 deletions(-) create mode 100644 circuit-construction/Cargo.toml create mode 100644 circuit-construction/src/constants.rs create mode 100644 circuit-construction/src/lib.rs create mode 100644 circuit-construction/src/prover.rs create mode 100644 circuit-construction/src/tests/example_proof.rs create mode 100644 circuit-construction/src/writer.rs create mode 100644 curves/src/pasta/curves/tests.rs create mode 100644 curves/src/pasta/fields/fft.rs diff --git a/Cargo.lock b/Cargo.lock index 861e68feb4..89bb61363e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -23,17 +23,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" -[[package]] -name = "ahash" -version = "0.7.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a824f2aa7e75a0c98c5a504fceb80649e9c35265d44525b5f94de4771a395cd" -dependencies = [ - "getrandom", - "once_cell", - "version_check", -] - [[package]] name = "ahash" version = "0.8.7" @@ -135,21 +124,29 @@ dependencies = [ [[package]] name = "ark-algebra-test-templates" -version = "0.3.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3eef0b339ebc113d9bd3fb7cd666baf2cfe4e1024e0fac23e072d46598bbd0cd" +checksum = "400bd3a79c741b1832f1416d4373ae077ef82ca14a8b4cee1248a2f11c8b9172" dependencies = [ "ark-ec", "ark-ff", "ark-serialize", "ark-std", + "hex", + "num-bigint", + "num-integer", + "num-traits", + "serde", + "serde_derive", + "serde_json", + "sha2", ] [[package]] name = "ark-bn254" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea691771ebbb28aea556c044e2e5c5227398d840cee0c34d4d20fa8eb2689e8c" +checksum = "a22f4561524cd949590d78d7d4c5df8f592430d221f7f3c9497bbafd8972120f" dependencies = [ "ark-ec", "ark-ff", @@ -158,14 +155,17 @@ dependencies = [ [[package]] name = "ark-ec" -version = "0.3.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dea978406c4b1ca13c2db2373b05cc55429c3575b8b21f1b9ee859aa5b03dd42" +checksum = "defd9a439d56ac24968cca0571f598a61bc8c55f71d50a89cda591cb750670ba" dependencies = [ "ark-ff", + "ark-poly", "ark-serialize", "ark-std", "derivative", + "hashbrown 0.13.2", + "itertools 0.10.5", "num-traits", "rayon", "zeroize", @@ -173,15 +173,17 @@ dependencies = [ [[package]] name = "ark-ff" -version = "0.3.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" dependencies = [ "ark-ff-asm", "ark-ff-macros", "ark-serialize", "ark-std", "derivative", + "digest", + "itertools 0.10.5", "num-bigint", "num-traits", "paste", @@ -192,9 +194,9 @@ dependencies = [ [[package]] name = "ark-ff-asm" -version = "0.3.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" dependencies = [ "quote", "syn 1.0.109", @@ -202,46 +204,48 @@ dependencies = [ [[package]] name = "ark-ff-macros" -version = "0.3.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" dependencies = [ "num-bigint", "num-traits", + "proc-macro2", "quote", "syn 1.0.109", ] [[package]] name = "ark-poly" -version = "0.3.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0f78f47537c2f15706db7e98fe64cc1711dbf9def81218194e17239e53e5aa" +checksum = "d320bfc44ee185d899ccbadfa8bc31aab923ce1558716e1997a1e74057fe86bf" dependencies = [ "ark-ff", "ark-serialize", "ark-std", "derivative", - "hashbrown 0.11.2", + "hashbrown 0.13.2", "rayon", ] [[package]] name = "ark-serialize" -version = "0.3.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" dependencies = [ "ark-serialize-derive", "ark-std", - "digest 0.9.0", + "digest", + "num-bigint", ] [[package]] name = "ark-serialize-derive" -version = "0.3.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dd4e5f0bf8285d5ed538d27fab7411f3e297908fd93c62195de8bee3f199e82" +checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" dependencies = [ "proc-macro2", "quote", @@ -250,9 +254,9 @@ dependencies = [ [[package]] name = "ark-std" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" dependencies = [ "num-traits", "rand", @@ -436,7 +440,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" dependencies = [ - "digest 0.10.7", + "digest", ] [[package]] @@ -550,7 +554,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" dependencies = [ "ciborium-io", - "half 2.3.1", + "half", ] [[package]] @@ -742,32 +746,6 @@ dependencies = [ "cfg-if 1.0.0", ] -[[package]] -name = "criterion" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f" -dependencies = [ - "atty", - "cast", - "clap 2.34.0", - "criterion-plot 0.4.5", - "csv", - "itertools 0.10.5", - "lazy_static", - "num-traits", - "oorandom", - "plotters", - "rayon", - "regex", - "serde", - "serde_cbor", - "serde_derive", - "serde_json", - "tinytemplate", - "walkdir", -] - [[package]] name = "criterion" version = "0.5.1" @@ -778,7 +756,7 @@ dependencies = [ "cast", "ciborium", "clap 4.4.18", - "criterion-plot 0.5.0", + "criterion-plot", "is-terminal", "itertools 0.10.5", "num-traits", @@ -794,16 +772,6 @@ dependencies = [ "walkdir", ] -[[package]] -name = "criterion-plot" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2673cc8207403546f45f5fd319a974b1e6983ad1a3ee7e6041650013be041876" -dependencies = [ - "cast", - "itertools 0.10.5", -] - [[package]] name = "criterion-plot" version = "0.5.0" @@ -855,27 +823,6 @@ dependencies = [ "typenum", ] -[[package]] -name = "csv" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" -dependencies = [ - "csv-core", - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "csv-core" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" -dependencies = [ - "memchr", -] - [[package]] name = "cty" version = "0.2.2" @@ -979,15 +926,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "digest" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" -dependencies = [ - "generic-array", -] - [[package]] name = "digest" version = "0.10.7" @@ -1226,12 +1164,6 @@ dependencies = [ "rand", ] -[[package]] -name = "half" -version = "1.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b43ede17f21864e81be2fa654110bf1e793774238d86ef8555c37e6519c0403" - [[package]] name = "half" version = "2.3.1" @@ -1242,15 +1174,6 @@ dependencies = [ "crunchy", ] -[[package]] -name = "hashbrown" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" -dependencies = [ - "ahash 0.7.7", -] - [[package]] name = "hashbrown" version = "0.12.3" @@ -1263,7 +1186,7 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.7", + "ahash", ] [[package]] @@ -1515,7 +1438,7 @@ dependencies = [ "ark-serialize", "blake2", "colored", - "criterion 0.5.1", + "criterion", "groupmap", "hex", "iai", @@ -1745,6 +1668,7 @@ dependencies = [ "ark-algebra-test-templates", "ark-ec", "ark-ff", + "ark-serialize", "ark-std", "num-bigint", "rand", @@ -1771,7 +1695,7 @@ dependencies = [ "ark-ff", "ark-poly", "ark-serialize", - "criterion 0.3.6", + "criterion", "hex", "mina-curves", "o1-utils", @@ -1866,7 +1790,7 @@ name = "mvpoly" version = "0.1.0" dependencies = [ "ark-ff", - "criterion 0.5.1", + "criterion", "kimchi", "log", "mina-curves", @@ -2562,9 +2486,9 @@ checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc_version" -version = "0.3.3" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ "semver", ] @@ -2641,21 +2565,9 @@ dependencies = [ [[package]] name = "semver" -version = "0.11.0" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" -dependencies = [ - "semver-parser", -] - -[[package]] -name = "semver-parser" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" -dependencies = [ - "pest", -] +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" [[package]] name = "serde" @@ -2666,16 +2578,6 @@ dependencies = [ "serde_derive", ] -[[package]] -name = "serde_cbor" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" -dependencies = [ - "half 1.8.3", - "serde", -] - [[package]] name = "serde_derive" version = "1.0.196" @@ -2757,7 +2659,7 @@ checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "digest 0.10.7", + "digest", ] [[package]] @@ -2766,7 +2668,7 @@ version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" dependencies = [ - "digest 0.10.7", + "digest", "keccak", ] diff --git a/Cargo.toml b/Cargo.toml index 0d5f84a2b3..356f7d5630 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,13 +24,13 @@ members = [ resolver = "2" [workspace.dependencies] -ark-algebra-test-templates = "0.3.0" -ark-bn254 = { version = "0.3.0" } -ark-ec = { version = "0.3.0", features = ["parallel"] } -ark-ff = { version = "0.3.0", features = ["parallel", "asm"] } -ark-poly = { version = "0.3.0", features = ["parallel"] } -ark-serialize = "0.3.0" -ark-std = "0.3.0" +ark-algebra-test-templates = "0.4.2" +ark-bn254 = { version = "0.4.0" } +ark-ec = { version = "0.4.2", features = ["parallel"] } +ark-ff = { version = "0.4.2", features = ["parallel", "asm"] } +ark-poly = { version = "0.4.2", features = ["parallel"] } +ark-serialize = "0.4.2" +ark-std = "0.4.0" bcs = "0.1.3" base64 = "0.21.5" bitvec = "1.0.0" diff --git a/book/src/specs/kimchi.md b/book/src/specs/kimchi.md index 32116cc81c..08f73bf9e8 100644 --- a/book/src/specs/kimchi.md +++ b/book/src/specs/kimchi.md @@ -2037,7 +2037,7 @@ pub struct ProofEvaluations { #[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] -pub struct LookupCommitments { +pub struct LookupCommitments { /// Commitments to the sorted lookup table polynomial (may have chunks) pub sorted: Vec>, /// Commitment to the lookup aggregation polynomial @@ -2050,7 +2050,7 @@ pub struct LookupCommitments { #[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] -pub struct ProverCommitments { +pub struct ProverCommitments { /// The commitments to the witness (execution trace) pub w_comm: [PolyComm; COLUMNS], /// The commitment to the permutation polynomial @@ -2065,7 +2065,7 @@ pub struct ProverCommitments { #[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] -pub struct ProverProof { +pub struct ProverProof { /// All the polynomial commitments required in the proof pub commitments: ProverCommitments, @@ -2093,7 +2093,7 @@ pub struct ProverProof { #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] pub struct RecursionChallenge where - G: AffineCurve, + G: AffineRepr, { /// Vector of scalar field elements #[serde_as(as = "Vec")] diff --git a/circuit-construction/Cargo.toml b/circuit-construction/Cargo.toml new file mode 100644 index 0000000000..3e60cb706c --- /dev/null +++ b/circuit-construction/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "circuit-construction" +version = "0.1.0" +description = "A simple circuit writer for kimchi" +repository = "https://github.com/o1-labs/proof-systems" +edition = "2021" +license = "Apache-2.0" +homepage = "https://o1-labs.github.io/proof-systems/" +documentation = "https://o1-labs.github.io/proof-systems/rustdoc/" +readme = "../README.md" + +[lib] +path = "src/lib.rs" +bench = false # needed for criterion (https://bheisler.github.io/criterion.rs/book/faq.html#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options) + +[dependencies] +ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } +ark-ec = { version = "0.4.2", features = [ "parallel" ] } +ark-poly = { version = "0.4.2", features = [ "parallel" ] } +ark-serialize = "0.4.2" +blake2 = "0.10.0" +num-derive = "0.3" +num-traits = "0.2" +itertools = "0.10.3" +rand = "0.8.0" +rand_core = "0.6.3" +rayon = "1.5.0" +rmp-serde = "1.0.0" +serde = "1.0.130" +serde_with = "1.10.0" +thiserror = "1.0.30" + +poly-commitment = { path = "../poly-commitment", version = "0.1.0" } +groupmap = { path = "../groupmap", version = "0.1.0" } +mina-curves = { path = "../curves", version = "0.1.0" } +o1-utils = { path = "../utils", version = "0.1.0" } +mina-poseidon = { path = "../poseidon", version = "0.1.0" } +kimchi = { path = "../kimchi", version = "0.1.0" } + +[dev-dependencies] +proptest = "1.0.0" +proptest-derive = "0.3.0" +colored = "2.0.0" + +# benchmarks +criterion = "0.3" +iai = "0.1" diff --git a/circuit-construction/src/constants.rs b/circuit-construction/src/constants.rs new file mode 100644 index 0000000000..803f18a9c0 --- /dev/null +++ b/circuit-construction/src/constants.rs @@ -0,0 +1,44 @@ +use ark_ec::AffineRepr; +use ark_ff::Field; +use kimchi::curve::KimchiCurve; +use mina_curves::pasta::{Fp, Fq, Pallas as PallasAffine, Vesta as VestaAffine}; +use mina_poseidon::poseidon::ArithmeticSpongeParams; +use poly_commitment::{commitment::CommitmentCurve, srs::endos}; + +/// The type of possible constants in the circuit +#[derive(Clone)] +pub struct Constants { + pub poseidon: &'static ArithmeticSpongeParams, + pub endo: F, + pub base: (F, F), +} + +/// Constants for the base field of Pallas +/// /// +/// # Panics +/// +/// Will panic if `PallasAffine::generator()` returns None. +pub fn fp_constants() -> Constants { + let (endo_q, _endo_r) = endos::(); + let base = PallasAffine::generator().to_coordinates().unwrap(); + Constants { + poseidon: VestaAffine::sponge_params(), + endo: endo_q, + base, + } +} + +/// Constants for the base field of Vesta +/// +/// # Panics +/// +/// Will panic if `VestaAffine::generator()` returns None. +pub fn fq_constants() -> Constants { + let (endo_q, _endo_r) = endos::(); + let base = VestaAffine::generator().to_coordinates().unwrap(); + Constants { + poseidon: PallasAffine::sponge_params(), + endo: endo_q, + base, + } +} diff --git a/circuit-construction/src/lib.rs b/circuit-construction/src/lib.rs new file mode 100644 index 0000000000..27618ed679 --- /dev/null +++ b/circuit-construction/src/lib.rs @@ -0,0 +1,33 @@ +#![doc = include_str!("../../README.md")] + +/// Definition of possible constants in circuits +pub mod constants; +/// This contains the prover functions, ranging from curves definitions to prover index and proof generation +pub mod prover; +/// This is the actual writer with all of the available functions to set up a circuit and its corresponding constraint system +pub mod writer; + +#[cfg(test)] +mod tests; + +/// This contains the Kimchi dependencies being used +pub mod prologue { + pub use super::constants::{fp_constants, fq_constants, Constants}; + pub use super::prover::{generate_prover_index, prove, CoordinateCurve}; + pub use super::writer::{Cs, Var}; + pub use ark_ec::{AffineRepr, CurveGroup}; + pub use ark_ff::{FftField, PrimeField, UniformRand}; + pub use ark_poly::{EvaluationDomain, Radix2EvaluationDomain}; + pub use groupmap::GroupMap; + pub use kimchi::verifier::verify; + pub use mina_curves::pasta::{ + Fp, Pallas as PallasAffine, Vesta as VestaAffine, VestaParameters, + }; + pub use mina_poseidon::{ + constants::*, + poseidon::{ArithmeticSponge, Sponge}, + sponge::{DefaultFqSponge, DefaultFrSponge}, + }; + pub use poly_commitment::{commitment::CommitmentCurve, srs::SRS}; + pub use std::sync::Arc; +} diff --git a/circuit-construction/src/prover.rs b/circuit-construction/src/prover.rs new file mode 100644 index 0000000000..2841c8dfbd --- /dev/null +++ b/circuit-construction/src/prover.rs @@ -0,0 +1,136 @@ +use crate::writer::{Cs, GateSpec, System, Var, WitnessGenerator}; +use ark_ec::AffineRepr; +use ark_ff::{One, PrimeField, Zero}; +use kimchi::{ + circuits::{constraints::ConstraintSystem, gate::GateType, wires::COLUMNS}, + curve::KimchiCurve, + plonk_sponge::FrSponge, + proof::ProverProof, + prover_index::ProverIndex, +}; +use mina_poseidon::FqSponge; +use poly_commitment::{ + commitment::{CommitmentCurve, PolyComm}, + srs::{endos, SRS}, +}; +use std::array; + +/// Given an index, a group map, custom blinders for the witness, a public input vector, and a circuit `main`, it creates a proof. +/// +/// # Panics +/// +/// Will panic if recursive proof creation returns `ProverError`. +pub fn prove( + index: &ProverIndex, + group_map: &G::Map, + blinders: Option<[Option; COLUMNS]>, + public_input: &[G::ScalarField], + mut main: H, +) -> ProverProof +where + H: FnMut(&mut WitnessGenerator, Vec>), + G::BaseField: PrimeField, + G: KimchiCurve, + EFqSponge: Clone + FqSponge, + EFrSponge: FrSponge, +{ + // create the witness generator + let mut gen: WitnessGenerator = WitnessGenerator::new(public_input); + + // run the witness generation + let public_vars = public_input + .iter() + .map(|x| Var { + index: 0, + value: Some(*x), + }) + .collect(); + main(&mut gen, public_vars); + + // get the witness columns + gen.curr_gate_count(); + let columns = gen.columns(); + + // custom blinders for the witness commitment + let blinders: [Option>; COLUMNS] = match blinders { + None => array::from_fn(|_| None), + Some(bs) => array::from_fn(|i| { + bs[i].map(|b| PolyComm { + unshifted: vec![b], + shifted: None, + }) + }), + }; + + // create the proof + ProverProof::create_recursive::( + group_map, + columns, + &[], + index, + vec![], + Some(blinders), + ) + .unwrap() +} + +/// Creates the prover index on input an `srs`, used `constants`, parameters for Poseidon, number of public inputs, and a specific circuit +/// +/// # Panics +/// +/// Will panic if `constraint_system` is not built with `public` input. +pub fn generate_prover_index( + srs: std::sync::Arc>, + public: usize, + main: Circuit, +) -> ProverIndex +where + Circuit: FnOnce(&mut System, Vec>), + Curve: KimchiCurve, +{ + let mut system: System = System::default(); + let z = Curve::ScalarField::zero(); + + // create public input variables + let public_input_row = vec![Curve::ScalarField::one(), z, z, z, z, z, z, z, z, z]; + let public_input: Vec<_> = (0..public) + .map(|_| { + let v = system.var(|| panic!("fail")); + + system.gate(GateSpec { + typ: GateType::Generic, + row: vec![Some(v)], + coeffs: public_input_row.clone(), + }); + v + }) + .collect(); + + main(&mut system, public_input); + + let gates = system.gates(); + + // Other base field = self scalar field + let (endo_q, _endo_r) = endos::(); + //let (endo_q, _endo_r) = Curve::endos(); + + let constraint_system = ConstraintSystem::::create(gates) + .public(public) + .build() + // TODO: return a Result instead of panicking + .expect("couldn't construct constraint system"); + + ProverIndex::::create(constraint_system, endo_q, srs) +} + +/// Handling coordinates in an affine curve +pub trait CoordinateCurve: AffineRepr { + /// Returns the coordinates in the curve as two points of the base field + fn to_coords(&self) -> Option<(Self::BaseField, Self::BaseField)>; +} + +impl CoordinateCurve for G { + fn to_coords(&self) -> Option<(Self::BaseField, Self::BaseField)> { + CommitmentCurve::to_coordinates(self) + } +} diff --git a/circuit-construction/src/tests/example_proof.rs b/circuit-construction/src/tests/example_proof.rs new file mode 100644 index 0000000000..54fa787bb2 --- /dev/null +++ b/circuit-construction/src/tests/example_proof.rs @@ -0,0 +1,103 @@ +use crate::prologue::*; +use kimchi::curve::KimchiCurve; +use std::ops::Mul; + +type SpongeQ = DefaultFqSponge; +type SpongeR = DefaultFrSponge; + +pub struct Witness { + pub s: G::ScalarField, + pub preimage: G::BaseField, +} + +// Prove knowledge of discrete log and poseidon preimage of a hash +pub fn circuit< + F: PrimeField + FftField, + G: AffineRepr + CoordinateCurve, + Sys: Cs, +>( + constants: &Constants, + // The witness + witness: Option<&Witness>, + sys: &mut Sys, + public_input: Vec>, +) { + let zero = sys.constant(F::zero()); + + let constant_curve_pt = |sys: &mut Sys, (x, y)| { + let x = sys.constant(x); + let y = sys.constant(y); + (x, y) + }; + + let base = constant_curve_pt(sys, G::generator().to_coords().unwrap()); + let scalar = sys.scalar(G::ScalarField::MODULUS_BIT_SIZE as usize, || { + witness.as_ref().unwrap().s + }); + let actual = sys.scalar_mul(zero, base, scalar); + + let preimage = sys.var(|| witness.as_ref().unwrap().preimage); + let actual_hash = sys.poseidon(constants, vec![preimage, zero, zero])[0]; + + sys.assert_eq(actual.0, public_input[0]); + sys.assert_eq(actual.1, public_input[1]); + sys.assert_eq(actual_hash, public_input[2]); +} + +const PUBLIC_INPUT_LENGTH: usize = 3; + +#[test] +fn test_example_circuit() { + use mina_curves::pasta::Pallas; + use mina_curves::pasta::Vesta; + // create SRS + let srs = { + let mut srs = SRS::::create(1 << 7); // 2^7 = 128 + srs.add_lagrange_basis(Radix2EvaluationDomain::new(srs.g.len()).unwrap()); + Arc::new(srs) + }; + + let proof_system_constants = fp_constants(); + + // generate circuit and index + let prover_index = generate_prover_index::<_, _>(srs, PUBLIC_INPUT_LENGTH, |sys, p| { + circuit::<_, Pallas, _>(&proof_system_constants, None, sys, p) + }); + + let group_map = ::Map::setup(); + + let mut rng = rand::thread_rng(); + + // create witness + let private_key = ::ScalarField::rand(&mut rng); + let preimage = ::BaseField::rand(&mut rng); + + let witness = Witness { + s: private_key, + preimage, + }; + + // create public input + let public_key = Pallas::generator().mul(private_key).into_affine(); + let hash = { + let mut s: ArithmeticSponge<_, PlonkSpongeConstantsKimchi> = + ArithmeticSponge::new(Vesta::sponge_params()); + s.absorb(&[preimage]); + s.squeeze() + }; + + // generate proof + let public_input = vec![public_key.x, public_key.y, hash]; + let proof = prove::( + &prover_index, + &group_map, + None, + &public_input, + |sys, p| circuit::(&proof_system_constants, Some(&witness), sys, p), + ); + + // verify proof + let verifier_index = prover_index.verifier_index(); + + verify::<_, SpongeQ, SpongeR>(&group_map, &verifier_index, &proof, &public_input).unwrap(); +} diff --git a/circuit-construction/src/writer.rs b/circuit-construction/src/writer.rs new file mode 100644 index 0000000000..0caec23c1c --- /dev/null +++ b/circuit-construction/src/writer.rs @@ -0,0 +1,1007 @@ +use ark_ff::{BigInteger, FftField, PrimeField}; +use kimchi::circuits::{ + gate::{CircuitGate, GateType}, + polynomials::generic::{ + DOUBLE_GENERIC_COEFFS, DOUBLE_GENERIC_REGISTERS, GENERIC_COEFFS, GENERIC_REGISTERS, + }, + wires::{Wire, COLUMNS}, +}; +use mina_poseidon::{ + constants::{PlonkSpongeConstantsKimchi, SpongeConstants}, + permutation::full_round, +}; +use std::array; +use std::collections::HashMap; + +use crate::constants::Constants; + +/// A variable in our circuit. +/// Variables are assigned with an index to differentiate from each other. +/// Optionally, they can eventually take as value a field element. +#[derive(Hash, Eq, PartialEq, Debug, Clone, Copy)] +pub struct Var { + pub index: usize, + pub value: Option, +} + +impl Var { + /// Returns the value inside a variable [Var]. + /// + /// # Panics + /// + /// Will panic if it is `None`. + pub fn val(&self) -> F { + self.value.unwrap() + } +} + +/// A variable that corresponds to scalar that is shifted by a certain amount. +pub struct ShiftedScalar(Var); + +/// Specifies a gate within a circuit. +/// A gate will have a type, +/// will refer to a row of variables, +/// and will have associated vector of coefficients. +pub struct GateSpec { + pub typ: GateType, + pub row: Vec>>, + pub coeffs: Vec, +} + +impl GateSpec { + pub fn get_var_val_or(&self, col: usize, default: F) -> F { + match self.row.get(col) { + Some(Some(var)) => var.val(), + _ => default, + } + } + + pub fn get_var_idx(&self, col: usize) -> Option { + match self.row.get(col) { + Some(Some(var)) => Some(var.index), + _ => None, + } + } +} + +/// A set of gates within the circuit. +/// It carries the index for the next available variable, +/// and the vector of [`GateSpec`] created so far. +/// It also keeps track of the queue of generic gates and cached constants. +#[derive(Default)] +pub struct System { + pub next_variable: usize, + pub generic_gate_queue: Vec>, + // pub equivalence_classes: HashMap>, + pub gates: Vec>, + pub cached_constants: HashMap>, +} + +/// Carries a vector of rows corresponding to the witness, a queue of generic gates, and stores the cached constants +#[derive(Default)] +pub struct WitnessGenerator +where + F: PrimeField, +{ + pub generic_gate_queue: Vec>, + pub rows: Vec>, + pub cached_constants: HashMap>, +} + +impl WitnessGenerator +where + F: PrimeField, +{ + /// Given a list of public inputs, creates the witness generator. + pub fn new(public_inputs: &[F]) -> Self { + let mut gen = Self::default(); + + for input in public_inputs { + let row = array::from_fn(|i| if i == 0 { *input } else { F::zero() }); + gen.rows.push(row); + } + + gen + } +} + +/// A row is an array of [COLUMNS] elements +type Row = [V; COLUMNS]; + +/// This trait includes all the operations that can be executed +/// by the elements in the circuits. +/// It allows for different behaviours depending on the struct for +/// which it is implemented for. +/// In particular, the circuit mode and the witness generation mode. +pub trait Cs { + /// In cases where you want to create a free variable in the circuit, + /// as in the variable is not constrained _yet_ + /// and can be anything that the prover wants. + /// For example, division can be implemented as: + /// + /// ```ignore + /// let a = sys.constant(5u32.into()); + /// let b = sys.constant(10u32.into()); + /// let c = sys.var(|| { + /// b.value * a.value.inverse().unwrap() + /// }); + /// sys.assert_eq(a * c, b); + /// ``` + /// + fn var(&mut self, g: G) -> Var + where + G: FnOnce() -> F; + + /// Returns the number of gates that the current [Self] contains. + fn curr_gate_count(&self) -> usize; + + /// Returns a variable containing a field element as value that is + /// computed as the equivalent `BigInteger` number returned by + /// function `g`, only if the length is a multiple of 4. + fn endo_scalar(&mut self, length: usize, g: G) -> Var + where + G: FnOnce() -> N, + { + assert_eq!(length % 4, 0); + + self.var(|| { + let y = g(); + let bits = y.to_bits_le(); + F::from_bigint(F::BigInt::from_bits_le(&bits)).unwrap() + }) + } + + /// This function creates a [`ShiftedScalar`] variable from a field element that is + /// returned by function `g()`, and a length that should be a multiple of 5. + fn scalar(&mut self, length: usize, g: G) -> ShiftedScalar + where + G: FnOnce() -> Fr, + { + assert_eq!(length % 5, 0); + + let v = self.var(|| { + // TODO: No need to recompute this each time. + let two = Fr::from(2u64); + let shift = Fr::one() + two.pow([length as u64]); + + let x = g(); + // x = 2 y + shift + // y = (x - shift) / 2 + // TODO: Could cache value of 1/2 to avoid division + let y = (x - shift) / two; + let bits = y.into_bigint().to_bits_le(); + F::from_bigint(F::BigInt::from_bits_le(&bits)).unwrap() + }); + ShiftedScalar(v) + } + + /// In circuit mode, adds a gate to the circuit. + /// In witness generation mode, adds the corresponding row to the witness. + fn gate(&mut self, g: GateSpec); + + /// Creates a `Generic` gate that constrains if two variables are equal. + /// This is done by setting `x1` in the left wire and `x2` in the right wire + /// with left coefficient `1` and right coefficient `-1`, so that `x1 - x2 = 0`. + // TODO: Optimize to use permutation argument. + fn assert_eq(&mut self, x1: Var, x2: Var) { + // | 0 | 1 | 2 | ... + // | x1 | x2 | 0 | ... + let vars = [Some(x1), Some(x2), None]; + + // constrain `x1 - x2 = 0` + let mut coeffs = [F::zero(); GENERIC_COEFFS]; + coeffs[0] = F::one(); + coeffs[1] = -F::one(); + + self.generic(coeffs, vars); + } + + /// Checks if a constant `x` is already in the cached constants of `self` and returns it. + /// Otherwise, it creates a variable for it and caches it. + fn cached_constants(&mut self, x: F) -> Var; + + /// Creates a `Generic` gate to include a constant in the circuit, and returns the variable containing it. + /// It sets the left wire to be the variable containing the constant `x` and the rest to zero. + /// Then the left coefficient is set to one and the coefficient for constants is set to `-x`. + /// This way, the constraint `1 * x - x = 0` holds. + fn constant(&mut self, x: F) -> Var { + let v = self.cached_constants(x); + + let mut coeffs = [F::zero(); GENERIC_COEFFS]; + coeffs[0] = F::one(); + coeffs[GENERIC_REGISTERS + 1] = -x; + + let vars = [Some(v), None, None]; + + self.generic(coeffs, vars); + + v + } + + /// Stores a generic gate until it can combine two of them + /// into a double generic gate. + fn generic_queue(&mut self, gate: GateSpec) -> Option>; + + /// Adds a generic gate. + /// + /// Warning: this assumes that some finalization occurs to flush + /// any queued generic gate. + fn generic(&mut self, coeffs: [F; GENERIC_COEFFS], vars: [Option>; GENERIC_REGISTERS]) { + let gate = GateSpec { + typ: GateType::Generic, + row: vars.to_vec(), + coeffs: coeffs.to_vec(), + }; + // we queue the single generic gate until we have two of them + if let Some(double_generic_gate) = self.generic_queue(gate) { + self.gate(double_generic_gate); + } + } + + /// Creates a `Generic` gate to constrain that a variable `v` is scaled by an `x` amount and returns it. + /// First, it creates a new variable with a scaled value (meaning, the value in `v` times `x`). + /// Then, it creates a row that sets the left wire to be `v` and the right wire to be the scaled variable. + /// Finally, it sets the left coefficient to `x` and the right coefficient to `-1`. + /// That way, the constraint `x * v - 1 * xv = 0` is created. + fn scale(&mut self, x: F, v: Var) -> Var { + let xv = self.var(|| v.val() * x); + + let vars = [Some(v), Some(xv), None]; + + let mut coeffs = [F::zero(); GENERIC_COEFFS]; + coeffs[0] = x; + coeffs[1] = -F::one(); + + self.generic(coeffs, vars); + + xv + } + + /// Performs curve point addition. + /// It creates the corresponding `CompleteAdd` gate for the points `(x1, y1)` and `(x2,y2)` + /// and returns the third point resulting from the addition as a tuple of variables. + fn add_group( + &mut self, + zero: Var, + (x1, y1): (Var, Var), + (x2, y2): (Var, Var), + ) -> (Var, Var) { + let mut same_x_bool = false; + let same_x = self.var(|| { + let same_x = x1.val() == x2.val(); + same_x_bool = same_x; + F::from(u64::from(same_x)) + }); + + let inf = zero; + let x21_inv = self.var(|| { + if x1.val() == x2.val() { + F::zero() + } else { + (x2.val() - x1.val()).inverse().unwrap() + } + }); + + let s = self.var(|| { + if same_x_bool { + let x1_squared = x1.val().square(); + (x1_squared.double() + x1_squared).div(y1.val().double()) + } else { + (y2.val() - y1.val()) * x21_inv.val() + } + }); + + let inf_z = self.var(|| { + if y1.val() == y2.val() { + F::zero() + } else if same_x_bool { + (y2.val() - y1.val()).inverse().unwrap() + } else { + F::zero() + } + }); + + let x3 = self.var(|| s.val().square() - (x1.val() + x2.val())); + + let y3 = self.var(|| s.val() * (x1.val() - x3.val()) - y1.val()); + + self.gate(GateSpec { + typ: GateType::CompleteAdd, + row: vec![ + Some(x1), + Some(y1), + Some(x2), + Some(y2), + Some(x3), + Some(y3), + Some(inf), + Some(same_x), + Some(s), + Some(inf_z), + Some(x21_inv), + ], + coeffs: vec![], + }); + (x3, y3) + } + + /// Doubles one curve point `(x1, y1)`, using internally the `add_group()` function. + /// It creates a `CompleteAdd` gate for this point addition (with itself). + /// Returns a tuple of variables corresponding to the doubled point. + fn double(&mut self, zero: Var, (x1, y1): (Var, Var)) -> (Var, Var) { + self.add_group(zero, (x1, y1), (x1, y1)) + } + + /// Creates a `CompleteAdd` gate that checks whether a third point `(x3, y3)` is the addition + /// of the two first points `(x1, y1)` and `(x2, y2)`. + /// The difference between this function and `add_group()` is that in `assert_add_group` the + /// third point is given, whereas in the other one it is computed with the formula. + fn assert_add_group( + &mut self, + zero: Var, + (x1, y1): (Var, Var), + (x2, y2): (Var, Var), + (x3, y3): (Var, Var), + ) { + let mut same_x_bool = false; + let same_x = self.var(|| { + let same_x = x1.val() == x2.val(); + same_x_bool = same_x; + F::from(u64::from(same_x)) + }); + + let inf = zero; + let x21_inv = self.var(|| { + if x1.val() == x2.val() { + F::zero() + } else { + (x2.val() - x1.val()).inverse().unwrap() + } + }); + + let s = self.var(|| { + if same_x_bool { + let x1_squared = x1.val().square(); + (x1_squared.double() + x1_squared).div(y1.val().double()) + } else { + (y2.val() - y1.val()) * x21_inv.val() + } + }); + + let inf_z = self.var(|| { + if y1.val() == y2.val() { + F::zero() + } else if same_x_bool { + (y2.val() - y1.val()).inverse().unwrap() + } else { + F::zero() + } + }); + + self.gate(GateSpec { + typ: GateType::CompleteAdd, + row: vec![ + Some(x1), + Some(y1), + Some(x2), + Some(y2), + Some(x3), + Some(y3), + Some(inf), + Some(same_x), + Some(s), + Some(inf_z), + Some(x21_inv), + ], + coeffs: vec![], + }); + } + + /// This function is used to include conditionals in circuits. + /// It creates three `Generic` gates to simulate the logics of the conditional. + /// It receives as input: + /// - `b`: the branch + /// - `t`: the true + /// - `f`: the false + /// And simulates the following equation: `res = b * ( t - f ) + f` + /// ( when the condition is false, `res = 1` ) + /// ( when the condition is true, `res = b` ) + /// This is constrained using three `Generic` gates + /// 1. Constrain `delta = t - f` + /// 2. Constrain `res1 = b * delta` + /// 3. Constrain `res = res1 + f` + /// For (1): + /// - Creates a row with left wire `t`, right wire `f`, and output wire `delta` + /// - Assigns `1` to the left coefficient, `-1` to the right coefficient, and `-1` to the output coefficient. + /// - That way, it creates a first gate constraining: `1 * t - 1 * f - delta = 0` + /// For (2): + /// - Creates a row with left wire `b`, right wire `delta`, and output wire `res1`. + /// - Assigns `-1` to the output coefficient, and `1` to the multiplication coefficient. + /// - That way, it creates a second gate constraining: `-1 * res + 1 * b * delta = 0` + /// For (3): + /// - Creates a row with left wire `res1`, right wire `f`, and output wire `res`. + /// - Assigns `1` to the left coefficient, `1` to the right coefficient, and `-1` to the output coefficient. + /// - That way, it creates a third gate constraining: `1 * res1 + 1 * f - 1 * res = 0` + fn cond_select(&mut self, b: Var, t: Var, f: Var) -> Var { + // Could be more efficient. Currently uses three constraints :( + // delta = t - f + // res1 = b * delta + // res = res1 + f + + let delta = self.var(|| t.val() - f.val()); + let res1 = self.var(|| b.val() * delta.val()); + let res = self.var(|| f.val() + res1.val()); + + let row1 = [Some(t), Some(f), Some(delta)]; + let mut c1 = [F::zero(); GENERIC_COEFFS]; + c1[0] = F::one(); + c1[1] = -F::one(); + c1[2] = -F::one(); + + self.generic(c1, row1); + + let row2 = [Some(b), Some(delta), Some(res1)]; + + let mut c2 = [F::zero(); GENERIC_COEFFS]; + c2[0] = F::zero(); + c2[1] = F::zero(); + c2[2] = -F::one(); + c2[3] = F::one(); + + self.generic(c2, row2); + + let row3 = [Some(res1), Some(f), Some(res)]; + let mut c3 = [F::zero(); GENERIC_COEFFS]; + c3[0] = F::one(); + c3[1] = F::one(); + c3[2] = -F::one(); + + self.generic(c3, row3); + + res + } + + /// Performs a scalar multiplication between a [`ShiftedScalar`] and a point `(xt, yt)`. + /// This function creates 51 rows pairs of rows. + fn scalar_mul( + &mut self, + zero: Var, + (xt, yt): (Var, Var), + scalar: ShiftedScalar, + ) -> (Var, Var) { + let num_bits = 255; + let num_row_pairs = num_bits / 5; + let mut witness: [Vec; COLUMNS] = array::from_fn(|_| vec![]); + + let acc0 = self.add_group(zero, (xt, yt), (xt, yt)); + + let _ = self.var(|| { + witness = array::from_fn(|_| vec![F::zero(); 2 * num_row_pairs]); + // Creates a vector of bits from the value inside the scalar, with the most significant bit upfront + let bits_msb: Vec = scalar + .0 + .val() + .into_bigint() + .to_bits_le() + .iter() + .take(num_bits) + .copied() + .rev() + .collect(); + // Creates a witness for the VarBaseMul gate. + kimchi::circuits::polynomials::varbasemul::witness( + &mut witness, + 0, + (xt.val(), yt.val()), + &bits_msb, + (acc0.0.val(), acc0.1.val()), + ); + F::zero() + }); + + // For each of the pairs, it generates a VarBaseMul and a Zero gate. + let mut res = None; + for i in 0..num_row_pairs { + let mut row1: [_; COLUMNS] = array::from_fn(|j| self.var(|| witness[j][2 * i])); + let row2: [_; COLUMNS] = array::from_fn(|j| self.var(|| witness[j][2 * i + 1])); + + row1[0] = xt; + row1[1] = yt; + if i == 0 { + row1[2] = acc0.0; + row1[3] = acc0.1; + row1[4] = zero; + } + if i == num_row_pairs - 1 { + row1[5] = scalar.0; + res = Some((row2[0], row2[1])); + } + + self.gate(GateSpec { + row: row1.into_iter().map(Some).collect(), + typ: GateType::VarBaseMul, + coeffs: vec![], + }); + + self.gate(GateSpec { + row: row2.into_iter().map(Some).collect(), + typ: GateType::Zero, + coeffs: vec![], + }); + } + + res.unwrap() + } + + /// Creates an endoscalar multiplication gadget with `length_in_bits/4 + 1` gates. + /// For each row, it adds one `EndoMul` gate. The gadget is finalized with a `Zero` gate. + /// + /// | row | `GateType` | + /// | --- | ---------- | + /// | i | `EndoMul` | + /// | i+1 | `EndoMul` | + /// | ... | ... | + /// | r | `EndoMul` | + /// | r+1 | `Zero` | + /// + fn endo( + &mut self, + zero: Var, + constants: &Constants, + (xt, yt): (Var, Var), + scalar: Var, + length_in_bits: usize, + ) -> (Var, Var) { + let bits_per_row = 4; + let rows = length_in_bits / 4; + assert_eq!(0, length_in_bits % 4); + + let mut bits_ = vec![]; + let bits: Vec<_> = (0..length_in_bits) + .map(|i| { + self.var(|| { + if bits_.is_empty() { + bits_ = scalar + .val() + .into_bigint() + .to_bits_le() + .iter() + .take(length_in_bits) + .copied() + .rev() + .collect(); + } + F::from(u64::from(bits_[i])) + }) + }) + .collect(); + + let one = F::one(); + + let endo = constants.endo; + let mut acc = { + let phip = (self.scale(endo, xt), yt); + let phip_p = self.add_group(zero, phip, (xt, yt)); + self.double(zero, phip_p) + }; + + let mut n_acc = zero; + + // TODO: Could be more efficient + for i in 0..rows { + let b1 = bits[i * bits_per_row]; + let b2 = bits[i * bits_per_row + 1]; + let b3 = bits[i * bits_per_row + 2]; + let b4 = bits[i * bits_per_row + 3]; + + let (xp, yp) = acc; + + let xq1 = self.var(|| (one + (endo - one) * b1.val()) * xt.val()); + let yq1 = self.var(|| (b2.val().double() - one) * yt.val()); + + let s1 = self.var(|| (yq1.val() - yp.val()) / (xq1.val() - xp.val())); + let s1_squared = self.var(|| s1.val().square()); + // (2*xp – s1^2 + xq) * ((xp – xr) * s1 + yr + yp) = (xp – xr) * 2*yp + // => 2 yp / (2*xp – s1^2 + xq) = s1 + (yr + yp) / (xp – xr) + // => 2 yp / (2*xp – s1^2 + xq) - s1 = (yr + yp) / (xp – xr) + // + // s2 := 2 yp / (2*xp – s1^2 + xq) - s1 + // + // (yr + yp)^2 = (xp – xr)^2 * (s1^2 – xq1 + xr) + // => (s1^2 – xq1 + xr) = (yr + yp)^2 / (xp – xr)^2 + // + // => xr = s2^2 - s1^2 + xq + // => yr = s2 * (xp - xr) - yp + let s2 = self.var(|| { + yp.val().double() / (xp.val().double() + xq1.val() - s1_squared.val()) - s1.val() + }); + + // (xr, yr) + let xr = self.var(|| xq1.val() + s2.val().square() - s1_squared.val()); + let yr = self.var(|| (xp.val() - xr.val()) * s2.val() - yp.val()); + + let xq2 = self.var(|| (one + (endo - one) * b3.val()) * xt.val()); + let yq2 = self.var(|| (b4.val().double() - one) * yt.val()); + let s3 = self.var(|| (yq2.val() - yr.val()) / (xq2.val() - xr.val())); + let s3_squared = self.var(|| s3.val().square()); + let s4 = self.var(|| { + yr.val().double() / (xr.val().double() + xq2.val() - s3_squared.val()) - s3.val() + }); + + let xs = self.var(|| xq2.val() + s4.val().square() - s3_squared.val()); + let ys = self.var(|| (xr.val() - xs.val()) * s4.val() - yr.val()); + + self.gate(GateSpec { + typ: GateType::EndoMul, + row: vec![ + Some(xt), + Some(yt), + None, + None, + Some(xp), + Some(yp), + Some(n_acc), + Some(xr), + Some(yr), + Some(s1), + Some(s3), + Some(b1), + Some(b2), + Some(b3), + Some(b4), + ], + coeffs: vec![], + }); + + acc = (xs, ys); + + n_acc = self.var(|| { + let mut n_acc = n_acc.val(); + n_acc.double_in_place(); + n_acc += b1.val(); + n_acc.double_in_place(); + n_acc += b2.val(); + n_acc.double_in_place(); + n_acc += b3.val(); + n_acc.double_in_place(); + n_acc += b4.val(); + n_acc + }); + } + + // TODO: use a generic gate with zero coeffs + self.gate(GateSpec { + typ: GateType::Zero, + row: vec![ + None, + None, + None, + None, + Some(acc.0), + Some(acc.1), + Some(scalar), + None, + None, + None, + None, + None, + None, + None, + None, + ], + coeffs: vec![], + }); + acc + } + + /// Checks that a string of bits (with LSB first) correspond to the value inside variable `x`. + /// It splits the bitstring across rows, where each row takes care of 8 crumbs of 2 bits each. + /// + fn assert_pack(&mut self, zero: Var, x: Var, bits_lsb: &[Var]) { + let crumbs_per_row = 8; + let bits_per_row = 2 * crumbs_per_row; + assert_eq!(bits_lsb.len() % bits_per_row, 0); + let num_rows = bits_lsb.len() / bits_per_row; + + // Reverse string of bits to have MSB first in the vector + let bits_msb: Vec<_> = bits_lsb.iter().rev().collect(); + + let mut a = self.var(|| F::from(2u64)); + let mut b = self.var(|| F::from(2u64)); + let mut n = zero; + + let one = F::one(); + let neg_one = -one; + + // For each of the chunks, get the corresponding bits + for (i, row_bits) in bits_msb[..].chunks(bits_per_row).enumerate() { + let mut row: [Var; COLUMNS] = array::from_fn(|_| self.var(|| F::zero())); + row[0] = n; + row[2] = a; + row[3] = b; + + // For this row, get crumbs of 2 bits each + for (j, crumb_bits) in row_bits.chunks(2).enumerate() { + // Remember the MSB of each crumb is in the 0 index + let b0 = crumb_bits[1]; // less valued + let b1 = crumb_bits[0]; // more valued + + // Value of the 2-bit crumb in MSB + let crumb = self.var(|| b0.val() + b1.val().double()); + // Stores the 8 of them in positions [6..13] of the row + row[6 + j] = crumb; + + a = self.var(|| { + let x = a.val().double(); + if b1.val().is_zero() { + x + } else { + x + if b0.val().is_one() { one } else { neg_one } + } + }); + + b = self.var(|| { + let x = b.val().double(); + if b1.val().is_zero() { + x + if b0.val().is_one() { one } else { neg_one } + } else { + x + } + }); + + // Accumulated chunk value + n = self.var(|| n.val().double().double() + crumb.val()); + } + + // In final row, this is the input value, otherwise the accumulated value + row[1] = if i == num_rows - 1 { x } else { n }; + row[4] = a; + row[5] = b; + + row[14] = self.var(|| F::zero()); + } + } + + /// Creates a Poseidon gadget for given constants and a given input. + /// It generates a number of `Poseidon` gates followed by a final `Zero` gate. + fn poseidon(&mut self, constants: &Constants, input: Vec>) -> Vec> { + use kimchi::circuits::polynomials::poseidon::{POS_ROWS_PER_HASH, ROUNDS_PER_ROW}; + + let params = constants.poseidon; + let rc = ¶ms.round_constants; + let width = PlonkSpongeConstantsKimchi::SPONGE_WIDTH; + + let mut states = vec![input]; + + for row in 0..POS_ROWS_PER_HASH { + let offset = row * ROUNDS_PER_ROW; + + for i in 0..ROUNDS_PER_ROW { + let mut s: Option> = None; + states.push( + (0..3) + .map(|col| { + self.var(|| { + match &s { + Some(s) => s[col], + None => { + // Do one full round on the previous value + let mut acc = states[states.len() - 1] + .iter() + .map(|x| x.val()) + .collect(); + full_round::( + params, + &mut acc, + offset + i, + ); + let res = acc[col]; + s = Some(acc); + res + } + } + }) + }) + .collect(), + ); + } + + self.gate(GateSpec { + typ: kimchi::circuits::gate::GateType::Poseidon, + coeffs: (0..COLUMNS) + .map(|i| rc[offset + (i / width)][i % width]) + .collect(), + row: vec![ + Some(states[offset][0]), + Some(states[offset][1]), + Some(states[offset][2]), + Some(states[offset + 4][0]), + Some(states[offset + 4][1]), + Some(states[offset + 4][2]), + Some(states[offset + 1][0]), + Some(states[offset + 1][1]), + Some(states[offset + 1][2]), + Some(states[offset + 2][0]), + Some(states[offset + 2][1]), + Some(states[offset + 2][2]), + Some(states[offset + 3][0]), + Some(states[offset + 3][1]), + Some(states[offset + 3][2]), + ], + }); + } + + let final_state = &states[states.len() - 1]; + let final_row = vec![ + Some(final_state[0]), + Some(final_state[1]), + Some(final_state[2]), + ]; + self.gate(GateSpec { + typ: kimchi::circuits::gate::GateType::Zero, + coeffs: vec![], + row: final_row, + }); + + states.pop().unwrap() + } +} + +impl Cs for WitnessGenerator { + /// Creates a variable with value given by a function `g` with index `0` + fn var(&mut self, g: G) -> Var + where + G: FnOnce() -> F, + { + Var { + index: 0, + value: Some(g()), + } + } + + /// Returns the number of rows. + fn curr_gate_count(&self) -> usize { + self.rows.len() + } + + /// Pushes a new row corresponding to the values in the row of gate `g`. + fn gate(&mut self, g: GateSpec) { + assert!(g.row.len() <= COLUMNS); + + let row: [F; COLUMNS] = array::from_fn(|col| g.get_var_val_or(col, F::zero())); + self.rows.push(row); + } + + fn generic_queue(&mut self, gate: GateSpec) -> Option> { + if let Some(mut other) = self.generic_gate_queue.pop() { + other.row.extend(&gate.row); + assert_eq!(other.row.len(), DOUBLE_GENERIC_REGISTERS); + Some(other) + } else { + self.generic_gate_queue.push(gate); + None + } + } + + fn cached_constants(&mut self, x: F) -> Var { + match self.cached_constants.get(&x) { + Some(var) => *var, + None => { + let var = self.var(|| x); + self.cached_constants.insert(x, var); + var + } + } + } +} + +impl WitnessGenerator { + /// Returns the columns of the witness. + pub fn columns(&mut self) -> [Vec; COLUMNS] { + // flush any queued generic gate + if let Some(gate) = self.generic_gate_queue.pop() { + self.gate(gate); + } + + // transpose + array::from_fn(|col| self.rows.iter().map(|row| row[col]).collect()) + } +} + +impl Cs for System { + fn var(&mut self, _: V) -> Var { + let v = self.next_variable; + self.next_variable += 1; + Var { + index: v, + value: None, + } + } + + /// Outputs the number of gates in the circuit + fn curr_gate_count(&self) -> usize { + self.gates.len() + } + + fn gate(&mut self, g: GateSpec) { + self.gates.push(g); + } + + fn generic_queue(&mut self, gate: GateSpec) -> Option> { + if let Some(mut other) = self.generic_gate_queue.pop() { + other.row.extend(&gate.row); + assert_eq!(other.row.len(), DOUBLE_GENERIC_REGISTERS); + other.coeffs.extend(&gate.coeffs); + assert_eq!(other.coeffs.len(), DOUBLE_GENERIC_COEFFS); + Some(other) + } else { + self.generic_gate_queue.push(gate); + None + } + } + + fn cached_constants(&mut self, x: F) -> Var { + match self.cached_constants.get(&x) { + Some(var) => *var, + None => { + let var = self.var(|| x); + self.cached_constants.insert(x, var); + var + } + } + } +} + +impl System { + /// Compiles our intermediate representation into a circuit. + /// + /// # Panics + /// + /// Will not panic ever since it is permutation inside gates + pub fn gates(&mut self) -> Vec> { + let mut first_cell: HashMap = HashMap::new(); + let mut most_recent_cell: HashMap = HashMap::new(); + let mut gates = vec![]; + + // flush any queued generic gate + if let Some(gate) = self.generic_gate_queue.pop() { + self.gate(gate); + } + + // convert GateSpec into CircuitGate + for (row, gate) in self.gates.iter().enumerate() { + // while tracking the wiring + let wires = array::from_fn(|col| { + let curr = Wire { row, col }; + + if let Some(index) = gate.get_var_idx(col) { + // wire this cell to the previous one + match most_recent_cell.insert(index, curr) { + Some(w) => w, + // unless it is the first cell, + // in which case we just save it for the very end + // (to complete the cycle) + None => { + first_cell.insert(index, curr); + curr + } + } + } else { + // if no var to be found, it's a cell wired to itself + curr + } + }); + + let g = CircuitGate::new(gate.typ, wires, gate.coeffs.clone()); + gates.push(g); + } + + // finish the permutation cycle + for (var, first) in &first_cell { + let last = *most_recent_cell.get(var).unwrap(); + gates[first.row].wires[first.col] = last; + } + + gates + } +} diff --git a/curves/Cargo.toml b/curves/Cargo.toml index 9e5bbceadd..8e31715760 100644 --- a/curves/Cargo.toml +++ b/curves/Cargo.toml @@ -17,4 +17,5 @@ num-bigint.workspace = true [dev-dependencies] rand.workspace = true ark-algebra-test-templates.workspace = true -ark-std.workspace = true \ No newline at end of file +ark-std.workspace = true +ark-serialize.workspace = true diff --git a/curves/src/pasta/curves/pallas.rs b/curves/src/pasta/curves/pallas.rs index 790251b55d..39813e13bb 100644 --- a/curves/src/pasta/curves/pallas.rs +++ b/curves/src/pasta/curves/pallas.rs @@ -1,70 +1,75 @@ use crate::pasta::*; use ark_ec::{ - models::short_weierstrass_jacobian::{GroupAffine, GroupProjective}, - ModelParameters, SWModelParameters, + models::short_weierstrass::{Affine, Projective, SWCurveConfig}, + CurveConfig, }; -use ark_ff::{field_new, Zero}; +use ark_ff::{MontFp, Zero}; + +/// G_GENERATOR_X = +/// 1 +pub const G_GENERATOR_X: Fp = MontFp!("1"); + +/// G1_GENERATOR_Y = +/// 12418654782883325593414442427049395787963493412651469444558597405572177144507 +pub const G_GENERATOR_Y: Fp = + MontFp!("12418654782883325593414442427049395787963493412651469444558597405572177144507"); #[derive(Debug, Clone, Default, PartialEq, Eq)] pub struct PallasParameters; -impl ModelParameters for PallasParameters { +impl CurveConfig for PallasParameters { type BaseField = Fp; - type ScalarField = Fq; -} -pub type Pallas = GroupAffine; -pub type ProjectivePallas = GroupProjective; - -impl SWModelParameters for PallasParameters { - /// COEFF_A = 0 - const COEFF_A: Fp = field_new!(Fp, "0"); - - /// COEFF_B = 5 - const COEFF_B: Fp = field_new!(Fp, "5"); + type ScalarField = Fq; /// COFACTOR = 1 const COFACTOR: &'static [u64] = &[0x1]; /// COFACTOR_INV = 1 - const COFACTOR_INV: Fq = field_new!(Fq, "1"); + const COFACTOR_INV: Fq = MontFp!("1"); +} + +pub type Pallas = Affine; + +pub type ProjectivePallas = Projective; + +impl SWCurveConfig for PallasParameters { + const COEFF_A: Self::BaseField = MontFp!("0"); - /// AFFINE_GENERATOR_COEFFS = (G1_GENERATOR_X, G1_GENERATOR_Y) - const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = - (G_GENERATOR_X, G_GENERATOR_Y); + const COEFF_B: Self::BaseField = MontFp!("5"); + const GENERATOR: Affine = Affine::new_unchecked(G_GENERATOR_X, G_GENERATOR_Y); +} + +impl PallasParameters { #[inline(always)] - fn mul_by_a(_: &Self::BaseField) -> Self::BaseField { - Self::BaseField::zero() + pub fn mul_by_a( + _: &::BaseField, + ) -> ::BaseField { + ::BaseField::zero() } } -/// G_GENERATOR_X = -/// 1 -pub const G_GENERATOR_X: Fp = field_new!(Fp, "1"); - -/// G1_GENERATOR_Y = -/// 12418654782883325593414442427049395787963493412651469444558597405572177144507 -pub const G_GENERATOR_Y: Fp = field_new!( - Fp, - "12418654782883325593414442427049395787963493412651469444558597405572177144507" -); - /// legacy curve, a copy of the normal curve to support legacy sponge params #[derive(Copy, Clone, Default, PartialEq, Eq)] pub struct LegacyPallasParameters; -impl ModelParameters for LegacyPallasParameters { - type BaseField = ::BaseField; - type ScalarField = ::ScalarField; +impl CurveConfig for LegacyPallasParameters { + type BaseField = ::BaseField; + + type ScalarField = ::ScalarField; + + const COFACTOR: &'static [u64] = ::COFACTOR; + + const COFACTOR_INV: Self::ScalarField = ::COFACTOR_INV; } -impl SWModelParameters for LegacyPallasParameters { - const COEFF_A: Self::BaseField = ::COEFF_A; - const COEFF_B: Self::BaseField = ::COEFF_B; - const COFACTOR: &'static [u64] = ::COFACTOR; - const COFACTOR_INV: Self::ScalarField = ::COFACTOR_INV; - const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = - ::AFFINE_GENERATOR_COEFFS; + +impl SWCurveConfig for LegacyPallasParameters { + const COEFF_A: Self::BaseField = ::COEFF_A; + + const COEFF_B: Self::BaseField = ::COEFF_B; + + const GENERATOR: Affine = Affine::new_unchecked(G_GENERATOR_X, G_GENERATOR_Y); } -pub type LegacyPallas = GroupAffine; +pub type LegacyPallas = Affine; diff --git a/curves/src/pasta/curves/tests.rs b/curves/src/pasta/curves/tests.rs new file mode 100644 index 0000000000..9f9d3cc002 --- /dev/null +++ b/curves/src/pasta/curves/tests.rs @@ -0,0 +1,6 @@ +use crate::pasta::ProjectivePallas; +use crate::pasta::ProjectiveVesta; +use ark_algebra_test_templates::*; + +test_group!(g1; ProjectivePallas; sw); +test_group!(g2; ProjectiveVesta; sw); diff --git a/curves/src/pasta/curves/vesta.rs b/curves/src/pasta/curves/vesta.rs index 2a8b5002e5..7a587e9f1d 100644 --- a/curves/src/pasta/curves/vesta.rs +++ b/curves/src/pasta/curves/vesta.rs @@ -1,70 +1,71 @@ use crate::pasta::*; use ark_ec::{ - models::short_weierstrass_jacobian::{GroupAffine, GroupProjective}, - ModelParameters, SWModelParameters, + models::short_weierstrass::{Affine, Projective, SWCurveConfig}, + CurveConfig, }; -use ark_ff::{field_new, Zero}; +use ark_ff::{MontFp, Zero}; + +/// G_GENERATOR_X = +/// 1 +pub const G_GENERATOR_X: Fq = MontFp!("1"); + +/// G1_GENERATOR_Y = +/// 11426906929455361843568202299992114520848200991084027513389447476559454104162 +pub const G_GENERATOR_Y: Fq = + MontFp!("11426906929455361843568202299992114520848200991084027513389447476559454104162"); #[derive(Debug, Clone, Default, PartialEq, Eq)] pub struct VestaParameters; -impl ModelParameters for VestaParameters { +impl CurveConfig for VestaParameters { type BaseField = Fq; type ScalarField = Fp; + + /// COFACTOR = 1 + const COFACTOR: &'static [u64] = &[0x1]; + + /// COFACTOR_INV = 1 + const COFACTOR_INV: Fp = MontFp!("1"); } -pub type Vesta = GroupAffine; -pub type ProjectiveVesta = GroupProjective; +pub type Vesta = Affine; +pub type ProjectiveVesta = Projective; -impl SWModelParameters for VestaParameters { +impl SWCurveConfig for VestaParameters { /// COEFF_A = 0 - const COEFF_A: Fq = field_new!(Fq, "0"); + const COEFF_A: Fq = MontFp!("0"); /// COEFF_B = 5 - const COEFF_B: Fq = field_new!(Fq, "5"); - - /// COFACTOR = 1 - const COFACTOR: &'static [u64] = &[0x1]; - - /// COFACTOR_INV = 1 - const COFACTOR_INV: Fp = field_new!(Fp, "1"); + const COEFF_B: Fq = MontFp!("5"); /// AFFINE_GENERATOR_COEFFS = (G1_GENERATOR_X, G1_GENERATOR_Y) - const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = - (G_GENERATOR_X, G_GENERATOR_Y); + const GENERATOR: Affine = Affine::new_unchecked(G_GENERATOR_X, G_GENERATOR_Y); +} +impl VestaParameters { #[inline(always)] - fn mul_by_a(_: &Self::BaseField) -> Self::BaseField { - Self::BaseField::zero() + pub fn mul_by_a( + _: &::BaseField, + ) -> ::BaseField { + ::BaseField::zero() } } -/// G_GENERATOR_X = -/// 1 -pub const G_GENERATOR_X: Fq = field_new!(Fq, "1"); - -/// G1_GENERATOR_Y = -/// 11426906929455361843568202299992114520848200991084027513389447476559454104162 -pub const G_GENERATOR_Y: Fq = field_new!( - Fq, - "11426906929455361843568202299992114520848200991084027513389447476559454104162" -); - /// legacy curve, a copy of the normal curve to support legacy sponge params #[derive(Copy, Clone, Default, PartialEq, Eq)] pub struct LegacyVestaParameters; -impl ModelParameters for LegacyVestaParameters { - type BaseField = ::BaseField; - type ScalarField = ::ScalarField; +impl CurveConfig for LegacyVestaParameters { + type BaseField = ::BaseField; + type ScalarField = ::ScalarField; + const COFACTOR: &'static [u64] = ::COFACTOR; + const COFACTOR_INV: Self::ScalarField = ::COFACTOR_INV; } -impl SWModelParameters for LegacyVestaParameters { - const COEFF_A: Self::BaseField = ::COEFF_A; - const COEFF_B: Self::BaseField = ::COEFF_B; - const COFACTOR: &'static [u64] = ::COFACTOR; - const COFACTOR_INV: Self::ScalarField = ::COFACTOR_INV; - const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = - ::AFFINE_GENERATOR_COEFFS; + +impl SWCurveConfig for LegacyVestaParameters { + const COEFF_A: Self::BaseField = ::COEFF_A; + const COEFF_B: Self::BaseField = ::COEFF_B; + const GENERATOR: Affine = Affine::new_unchecked(G_GENERATOR_X, G_GENERATOR_Y); } -pub type LegacyVesta = GroupAffine; +pub type LegacyVesta = Affine; diff --git a/curves/src/pasta/fields/fft.rs b/curves/src/pasta/fields/fft.rs new file mode 100644 index 0000000000..023615fb0a --- /dev/null +++ b/curves/src/pasta/fields/fft.rs @@ -0,0 +1,69 @@ +use ark_ff::biginteger::BigInteger; + +/// A trait that defines parameters for a field that can be used for FFTs. +pub trait FftParameters: 'static + Send + Sync + Sized { + type BigInt: BigInteger; + + /// Let `N` be the size of the multiplicative group defined by the field. + /// Then `TWO_ADICITY` is the two-adicity of `N`, i.e. the integer `s` + /// such that `N = 2^s * t` for some odd integer `t`. + const TWO_ADICITY: u32; + + /// 2^s root of unity computed by GENERATOR^t + const TWO_ADIC_ROOT_OF_UNITY: Self::BigInt; + + /// An integer `b` such that there exists a multiplicative subgroup + /// of size `b^k` for some integer `k`. + const SMALL_SUBGROUP_BASE: Option = None; + + /// The integer `k` such that there exists a multiplicative subgroup + /// of size `Self::SMALL_SUBGROUP_BASE^k`. + const SMALL_SUBGROUP_BASE_ADICITY: Option = None; + + /// GENERATOR^((MODULUS-1) / (2^s * + /// SMALL_SUBGROUP_BASE^SMALL_SUBGROUP_BASE_ADICITY)) Used for mixed-radix FFT. + const LARGE_SUBGROUP_ROOT_OF_UNITY: Option = None; +} + +/// A trait that defines parameters for a prime field. +pub trait FpParameters: FftParameters { + /// The modulus of the field. + const MODULUS: Self::BigInt; + + /// The number of bits needed to represent the `Self::MODULUS`. + const MODULUS_BITS: u32; + + /// The number of bits that must be shaved from the beginning of + /// the representation when randomly sampling. + const REPR_SHAVE_BITS: u32; + + /// Let `M` be the power of 2^64 nearest to `Self::MODULUS_BITS`. Then + /// `R = M % Self::MODULUS`. + const R: Self::BigInt; + + /// R2 = R^2 % Self::MODULUS + const R2: Self::BigInt; + + /// INV = -MODULUS^{-1} mod 2^64 + const INV: u64; + + /// A multiplicative generator of the field. + /// `Self::GENERATOR` is an element having multiplicative order + /// `Self::MODULUS - 1`. + const GENERATOR: Self::BigInt; + + /// The number of bits that can be reliably stored. + /// (Should equal `SELF::MODULUS_BITS - 1`) + const CAPACITY: u32; + + /// t for 2^s * t = MODULUS - 1, and t coprime to 2. + const T: Self::BigInt; + + /// (t - 1) / 2 + const T_MINUS_ONE_DIV_TWO: Self::BigInt; + + /// (Self::MODULUS - 1) / 2 + const MODULUS_MINUS_ONE_DIV_TWO: Self::BigInt; +} + +pub trait Fp256Parameters {} diff --git a/curves/src/pasta/fields/fp.rs b/curves/src/pasta/fields/fp.rs index 8560087ade..5672d1abf8 100644 --- a/curves/src/pasta/fields/fp.rs +++ b/curves/src/pasta/fields/fp.rs @@ -1,6 +1,15 @@ -use ark_ff::{biginteger::BigInteger256 as BigInteger, FftParameters, Fp256, Fp256Parameters}; - -pub type Fp = Fp256; +use super::fft::{FftParameters, Fp256Parameters}; +use ark_ff::{ + biginteger::BigInteger256 as BigInteger, + fields::{MontBackend, MontConfig}, + Fp256, +}; + +#[derive(MontConfig)] +#[modulus = "28948022309329048855892746252171976963363056481941560715954676764349967630337"] +#[generator = "5"] +pub struct FqConfig; +pub type Fp = Fp256>; pub struct FpParameters; @@ -12,35 +21,35 @@ impl FftParameters for FpParameters { const TWO_ADICITY: u32 = 32; #[rustfmt::skip] - const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger::new([ 0xa28db849bad6dbf0, 0x9083cd03d3b539df, 0xfba6b9ca9dc8448e, 0x3ec928747b89c6da ]); } -impl ark_ff::FpParameters for FpParameters { +impl super::fft::FpParameters for FpParameters { // 28948022309329048855892746252171976963363056481941560715954676764349967630337 - const MODULUS: BigInteger = BigInteger([ + const MODULUS: BigInteger = BigInteger::new([ 0x992d30ed00000001, 0x224698fc094cf91b, 0x0, 0x4000000000000000, ]); - const R: BigInteger = BigInteger([ + const R: BigInteger = BigInteger::new([ 0x34786d38fffffffd, 0x992c350be41914ad, 0xffffffffffffffff, 0x3fffffffffffffff, ]); - const R2: BigInteger = BigInteger([ + const R2: BigInteger = BigInteger::new([ 0x8c78ecb30000000f, 0xd7d30dbd8b0de0e7, 0x7797a99bc3c95d18, 0x96d41af7b9cb714, ]); - const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger::new([ 0xcc96987680000000, 0x11234c7e04a67c8d, 0x0, @@ -48,13 +57,13 @@ impl ark_ff::FpParameters for FpParameters { ]); // T and T_MINUS_ONE_DIV_TWO, where MODULUS - 1 = 2^S * T - const T: BigInteger = BigInteger([0x94cf91b992d30ed, 0x224698fc, 0x0, 0x40000000]); + const T: BigInteger = BigInteger::new([0x94cf91b992d30ed, 0x224698fc, 0x0, 0x40000000]); const T_MINUS_ONE_DIV_TWO: BigInteger = - BigInteger([0x4a67c8dcc969876, 0x11234c7e, 0x0, 0x20000000]); + BigInteger::new([0x4a67c8dcc969876, 0x11234c7e, 0x0, 0x20000000]); // GENERATOR = 5 - const GENERATOR: BigInteger = BigInteger([ + const GENERATOR: BigInteger = BigInteger::new([ 0xa1a55e68ffffffed, 0x74c2a54b4f4982f3, 0xfffffffffffffffd, diff --git a/curves/src/pasta/fields/fq.rs b/curves/src/pasta/fields/fq.rs index 59a0ced05b..b623705750 100644 --- a/curves/src/pasta/fields/fq.rs +++ b/curves/src/pasta/fields/fq.rs @@ -1,46 +1,53 @@ -use ark_ff::{ - biginteger::BigInteger256 as BigInteger, FftParameters, Fp256, Fp256Parameters, FpParameters, -}; +use super::fft::{FftParameters, Fp256Parameters, FpParameters}; +use ark_ff::{biginteger::BigInteger256 as BigInteger, Fp256}; pub struct FqParameters; -pub type Fq = Fp256; +use ark_ff::fields::{MontBackend, MontConfig}; + +#[derive(MontConfig)] +#[modulus = "28948022309329048855892746252171976963363056481941647379679742748393362948097"] +#[generator = "5"] +pub struct FrConfig; +pub type Fq = Fp256>; impl Fp256Parameters for FqParameters {} + impl FftParameters for FqParameters { type BigInt = BigInteger; const TWO_ADICITY: u32 = 32; #[rustfmt::skip] - const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger::new([ 0x218077428c9942de, 0xcc49578921b60494, 0xac2e5d27b2efbee2, 0xb79fa897f2db056 ]); } + impl FpParameters for FqParameters { // 28948022309329048855892746252171976963363056481941647379679742748393362948097 - const MODULUS: BigInteger = BigInteger([ + const MODULUS: BigInteger = BigInteger::new([ 0x8c46eb2100000001, 0x224698fc0994a8dd, 0x0, 0x4000000000000000, ]); - const R: BigInteger = BigInteger([ + const R: BigInteger = BigInteger::new([ 0x5b2b3e9cfffffffd, 0x992c350be3420567, 0xffffffffffffffff, 0x3fffffffffffffff, ]); - const R2: BigInteger = BigInteger([ + const R2: BigInteger = BigInteger::new([ 0xfc9678ff0000000f, 0x67bb433d891a16e3, 0x7fae231004ccf590, 0x96d41af7ccfdaa9, ]); - const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger::new([ 0xc623759080000000, 0x11234c7e04ca546e, 0x0, @@ -49,13 +56,13 @@ impl FpParameters for FqParameters { // T and T_MINUS_ONE_DIV_TWO, where MODULUS - 1 = 2^S * T - const T: BigInteger = BigInteger([0x994a8dd8c46eb21, 0x224698fc, 0x0, 0x40000000]); + const T: BigInteger = BigInteger::new([0x994a8dd8c46eb21, 0x224698fc, 0x0, 0x40000000]); const T_MINUS_ONE_DIV_TWO: BigInteger = - BigInteger([0x4ca546ec6237590, 0x11234c7e, 0x0, 0x20000000]); + BigInteger::new([0x4ca546ec6237590, 0x11234c7e, 0x0, 0x20000000]); // GENERATOR = 5 - const GENERATOR: BigInteger = BigInteger([ + const GENERATOR: BigInteger = BigInteger::new([ 0x96bc8c8cffffffed, 0x74c2a54b49f7778e, 0xfffffffffffffffd, diff --git a/curves/src/pasta/fields/mod.rs b/curves/src/pasta/fields/mod.rs index 5679125e89..84be91fdba 100644 --- a/curves/src/pasta/fields/mod.rs +++ b/curves/src/pasta/fields/mod.rs @@ -1,5 +1,45 @@ +use ark_ff::Field; pub mod fp; pub use self::fp::*; pub mod fq; pub use self::fq::*; + +pub mod fft; + +#[derive(Debug, PartialEq)] +pub enum LegendreSymbol { + Zero = 0, + QuadraticResidue = 1, + QuadraticNonResidue = -1, +} + +impl LegendreSymbol { + pub fn is_zero(&self) -> bool { + *self == LegendreSymbol::Zero + } + + pub fn is_qnr(&self) -> bool { + *self == LegendreSymbol::QuadraticNonResidue + } + + pub fn is_qr(&self) -> bool { + *self == LegendreSymbol::QuadraticResidue + } +} + +/// The interface for a field that supports an efficient square-root operation. +pub trait SquareRootField: Field { + /// Returns a `LegendreSymbol`, which indicates whether this field element is + /// 1 : a quadratic residue + /// 0 : equal to 0 + /// -1 : a quadratic non-residue + fn legendre(&self) -> LegendreSymbol; + + /// Returns the square root of self, if it exists. + #[must_use] + fn sqrt(&self) -> Option; + + /// Sets `self` to be the square root of `self`, if it exists. + fn sqrt_in_place(&mut self) -> Option<&mut Self>; +} diff --git a/curves/tests/pasta_curves.rs b/curves/tests/pasta_curves.rs index 29d58c9b2f..8cf829fa74 100644 --- a/curves/tests/pasta_curves.rs +++ b/curves/tests/pasta_curves.rs @@ -1,33 +1,11 @@ use std::str::FromStr; -use ark_algebra_test_templates::{curves::*, groups::*}; -use ark_ec::AffineCurve; -use ark_std::test_rng; -use mina_curves::pasta::{curves::pallas, Fp, Pallas}; +use ark_algebra_test_templates::*; +use mina_curves::pasta::{Fp, Pallas, ProjectivePallas, ProjectiveVesta}; use num_bigint::BigUint; -use rand::Rng; -#[test] -fn test_pallas_projective_curve() { - curve_tests::(); - - sw_tests::(); -} - -#[test] -fn test_pallas_projective_group() { - let mut rng = test_rng(); - let a: pallas::ProjectivePallas = rng.gen(); - let b: pallas::ProjectivePallas = rng.gen(); - group_test(a, b); -} - -#[test] -fn test_pallas_generator() { - let generator = pallas::Pallas::prime_subgroup_generator(); - assert!(generator.is_on_curve()); - assert!(generator.is_in_correct_subgroup_assuming_on_curve()); -} +test_group!(g1; ProjectivePallas; sw); +test_group!(g2; ProjectiveVesta; sw); #[test] fn test_regression_vesta_biguint_into_returns_canonical_representation() { @@ -40,7 +18,7 @@ fn test_regression_vesta_biguint_into_returns_canonical_representation() { "12418654782883325593414442427049395787963493412651469444558597405572177144507", ) .unwrap(); - let p1 = Pallas::new(p_x, p_y, false); + let p1 = Pallas::new_unchecked(p_x, p_y); let p_x_biguint: BigUint = p1.x.into(); let p_y_biguint: BigUint = p1.y.into(); @@ -64,7 +42,7 @@ fn test_regression_vesta_addition_affine() { "12418654782883325593414442427049395787963493412651469444558597405572177144507", ) .unwrap(); - let p1 = Pallas::new(p1_x, p1_y, false); + let p1 = Pallas::new_unchecked(p1_x, p1_y); let p2_x = Fp::from_str( "20444556541222657078399132219657928148671392403212669005631716460534733845831", @@ -74,12 +52,12 @@ fn test_regression_vesta_addition_affine() { "12418654782883325593414442427049395787963493412651469444558597405572177144507", ) .unwrap(); - let p2 = Pallas::new(p2_x, p2_y, false); + let p2 = Pallas::new_unchecked(p2_x, p2_y); // The type annotation ensures we have a point with affine coordinates, // relying on implicit conversion if the addition outputs a point in a // different coordinates set. - let p3: Pallas = p1 + p2; + let p3: Pallas = (p1 + p2).into(); let expected_p3_x = BigUint::from_str( "8503465768106391777493614032514048814691664078728891710322960303815233784505", diff --git a/curves/tests/pasta_fields.rs b/curves/tests/pasta_fields.rs index f21288f45b..85a1f0aa6e 100644 --- a/curves/tests/pasta_fields.rs +++ b/curves/tests/pasta_fields.rs @@ -1,24 +1,5 @@ -use ark_algebra_test_templates::fields::{field_test, primefield_test, sqrt_field_test}; -use ark_std::test_rng; -use mina_curves::pasta::{Fp, Fq}; -use rand::Rng; +use ark_algebra_test_templates::*; +use mina_curves::pasta::fields::{Fp as Fr, Fq}; -#[test] -fn test_fp() { - let mut rng = test_rng(); - let a: Fp = rng.gen(); - let b: Fp = rng.gen(); - field_test(a, b); - sqrt_field_test(a); - primefield_test::(); -} - -#[test] -fn test_fq() { - let mut rng = test_rng(); - let a: Fq = rng.gen(); - let b: Fq = rng.gen(); - field_test(a, b); - sqrt_field_test(a); - primefield_test::(); -} +test_field!(fq; Fq; mont_prime_field); +test_field!(fr; Fr; mont_prime_field); diff --git a/groupmap/src/lib.rs b/groupmap/src/lib.rs index cc310d9ab8..a35140f423 100644 --- a/groupmap/src/lib.rs +++ b/groupmap/src/lib.rs @@ -19,8 +19,8 @@ //! WB19: Riad S. Wahby and Dan Boneh, Fast and simple constant-time hashing to the BLS12-381 elliptic curve. //! -use ark_ec::models::SWModelParameters; -use ark_ff::{Field, One, SquareRootField, Zero}; +use ark_ec::short_weierstrass::SWCurveConfig; +use ark_ff::{Field, One, Zero}; pub trait GroupMap { fn setup() -> Self; @@ -29,7 +29,7 @@ pub trait GroupMap { } #[derive(Clone, Copy)] -pub struct BWParameters { +pub struct BWParameters { u: G::BaseField, fu: G::BaseField, sqrt_neg_three_u_squared_minus_u_over_2: G::BaseField, @@ -38,12 +38,13 @@ pub struct BWParameters { } /// returns the right-hand side of the Short Weierstrass curve equation for a given x -fn curve_eqn(x: G::BaseField) -> G::BaseField { +fn curve_eqn(x: G::BaseField) -> G::BaseField { let mut res = x; res *= &x; // x^2 res += &G::COEFF_A; // x^2 + A res *= &x; // x^3 + A x res += &G::COEFF_B; // x^3 + A x + B + res } @@ -61,7 +62,7 @@ fn find_first Option>(start: K, f: F) -> A { } /// ? -fn potential_xs_helper( +fn potential_xs_helper( params: &BWParameters, t2: G::BaseField, alpha: G::BaseField, @@ -89,10 +90,7 @@ fn potential_xs_helper( } /// ? -fn potential_xs( - params: &BWParameters, - t: G::BaseField, -) -> [G::BaseField; 3] { +fn potential_xs(params: &BWParameters, t: G::BaseField) -> [G::BaseField; 3] { let t2 = t.square(); let mut alpha_inv = t2; alpha_inv += ¶ms.fu; @@ -108,12 +106,12 @@ fn potential_xs( /// returns the y-coordinate if x is a valid point on the curve, otherwise None /// TODO: what about sign? -pub fn get_y(x: G::BaseField) -> Option { +pub fn get_y(x: G::BaseField) -> Option { let fx = curve_eqn::(x); fx.sqrt() } -fn get_xy( +fn get_xy( params: &BWParameters, t: G::BaseField, ) -> (G::BaseField, G::BaseField) { @@ -126,7 +124,7 @@ fn get_xy( panic!("get_xy") } -impl GroupMap for BWParameters { +impl GroupMap for BWParameters { fn setup() -> Self { assert!(G::COEFF_A.is_zero()); diff --git a/groupmap/tests/groupmap.rs b/groupmap/tests/groupmap.rs index 0044616783..17d05dfc16 100644 --- a/groupmap/tests/groupmap.rs +++ b/groupmap/tests/groupmap.rs @@ -8,7 +8,7 @@ fn test_group_map_on_curve() { let params = BWParameters::::setup(); let t: Fq = rand::random(); let (x, y) = BWParameters::::to_group(¶ms, t); - let g = Vesta::new(x, y, false); + let g = Vesta::new(x, y); assert!(g.is_on_curve()); } @@ -27,7 +27,7 @@ fn test_batch_group_map_on_curve() { let ts: Vec = (0..1000).map(|_| rand::random()).collect(); for xs in BWParameters::::batch_to_group_x(¶ms, ts).iter() { let (x, y) = first_xy(xs); - let g = Vesta::new(x, y, false); + let g = Vesta::new(x, y); assert!(g.is_on_curve()); } } diff --git a/hasher/src/roinput.rs b/hasher/src/roinput.rs index 1db4ee3193..e20b4bbba7 100644 --- a/hasher/src/roinput.rs +++ b/hasher/src/roinput.rs @@ -91,7 +91,7 @@ impl ROInput { pub fn append_scalar(mut self, s: Fq) -> Self { // mina scalars are 255 bytes let bytes = s.to_bytes(); - let bits = &bytes.as_bits::()[..Fq::size_in_bits()]; + let bits = &bytes.as_bits::()[..Fq::MODULUS_BIT_SIZE as usize]; self.bits.extend(bits); self } @@ -121,7 +121,9 @@ impl ROInput { /// Serialize random oracle input to bytes pub fn to_bytes(&self) -> Vec { let mut bits: BitVec = self.fields.iter().fold(BitVec::new(), |mut acc, fe| { - acc.extend_from_bitslice(&fe.to_bytes().as_bits::()[..Fp::size_in_bits()]); + acc.extend_from_bitslice( + &fe.to_bytes().as_bits::()[..Fp::MODULUS_BIT_SIZE as usize], + ); acc }); @@ -137,7 +139,7 @@ impl ROInput { let bits_as_fields = self.bits - .chunks(Fp::size_in_bits() - 1) + .chunks(Fp::MODULUS_BIT_SIZE as usize - 1) .fold(vec![], |mut acc, chunk| { // Workaround: chunk.clone() does not appear to respect // the chunk's boundaries when it's not byte-aligned. @@ -157,7 +159,7 @@ impl ROInput { bv.clone_from_bitslice(chunk); // extend to the size of a field; - bv.resize(Fp::size_in_bits(), false); + bv.resize(Fp::MODULUS_BIT_SIZE as usize, false); acc.push( Fp::from_bytes(&bv.into_vec()) diff --git a/kimchi/Cargo.toml b/kimchi/Cargo.toml index e9a8ce037c..9ea24410cb 100644 --- a/kimchi/Cargo.toml +++ b/kimchi/Cargo.toml @@ -38,6 +38,7 @@ hex.workspace = true strum.workspace = true strum_macros.workspace = true + snarky-deriver.workspace = true turshi.workspace = true diff --git a/kimchi/src/circuits/constraints.rs b/kimchi/src/circuits/constraints.rs index 0234650ace..fc24fb7c39 100644 --- a/kimchi/src/circuits/constraints.rs +++ b/kimchi/src/circuits/constraints.rs @@ -18,7 +18,7 @@ use crate::{ error::{DomainCreationError, SetupError}, prover_index::ProverIndex, }; -use ark_ff::{PrimeField, SquareRootField, Zero}; +use ark_ff::{PrimeField, Zero}; use ark_poly::{ univariate::DensePolynomial as DP, EvaluationDomain, Evaluations as E, Radix2EvaluationDomain as D, @@ -277,11 +277,8 @@ impl ConstraintSystem { } } -impl< - F: PrimeField + SquareRootField, - G: KimchiCurve, - OpeningProof: OpenProof, - > ProverIndex +impl, OpeningProof: OpenProof> + ProverIndex { /// This function verifies the consistency of the wire /// assignments (witness) against the constraints @@ -335,7 +332,7 @@ impl< } } -impl ConstraintSystem { +impl ConstraintSystem { /// evaluate witness polynomials over domains pub fn evaluate(&self, w: &[DP; COLUMNS], z: &DP) -> WitnessOverDomains { // compute shifted witness polynomials @@ -663,7 +660,7 @@ impl FeatureFlags { } } -impl Builder { +impl Builder { /// Set up the number of public inputs. /// If not invoked, it equals `0` by default. pub fn public(mut self, public: usize) -> Self { @@ -917,7 +914,7 @@ pub mod tests { use mina_curves::pasta::{Fp, Fq}; use o1_utils::FieldHelpers; - impl ConstraintSystem { + impl ConstraintSystem { pub fn for_testing(gates: Vec>) -> Self { let public = 0; // not sure if theres a smarter way instead of the double unwrap, but should be fine in the test diff --git a/kimchi/src/circuits/domain_constant_evaluation.rs b/kimchi/src/circuits/domain_constant_evaluation.rs index 7de9268ae2..0c83ff4dcd 100644 --- a/kimchi/src/circuits/domain_constant_evaluation.rs +++ b/kimchi/src/circuits/domain_constant_evaluation.rs @@ -3,8 +3,8 @@ use crate::circuits::domains::EvaluationDomains; use ark_ff::FftField; use ark_poly::{ - univariate::DensePolynomial as DP, EvaluationDomain, Evaluations as E, - Radix2EvaluationDomain as D, UVPolynomial, + univariate::DensePolynomial as DP, DenseUVPolynomial, EvaluationDomain, Evaluations as E, + Radix2EvaluationDomain as D, }; use serde::{Deserialize, Serialize}; use serde_with::serde_as; diff --git a/kimchi/src/circuits/expr.rs b/kimchi/src/circuits/expr.rs index 61486d6458..ea8b6caf97 100644 --- a/kimchi/src/circuits/expr.rs +++ b/kimchi/src/circuits/expr.rs @@ -2836,7 +2836,7 @@ impl FormattedOutput for ConstantTerm { match self { EndoCoefficient => "endo_coefficient".to_string(), Mds { row, col } => format!("mds({row}, {col})"), - Literal(x) => format!("field(\"0x{}\")", x.into_repr()), + Literal(x) => format!("field(\"0x{}\")", x.into_bigint()), } } @@ -2845,7 +2845,7 @@ impl FormattedOutput for ConstantTerm { match self { EndoCoefficient => "endo\\_coefficient".to_string(), Mds { row, col } => format!("mds({row}, {col})"), - Literal(x) => format!("\\mathbb{{F}}({})", x.into_repr().into()), + Literal(x) => format!("\\mathbb{{F}}({})", x.into_bigint().into()), } } diff --git a/kimchi/src/circuits/gate.rs b/kimchi/src/circuits/gate.rs index af55bf759c..5432c7670e 100644 --- a/kimchi/src/circuits/gate.rs +++ b/kimchi/src/circuits/gate.rs @@ -13,13 +13,11 @@ use crate::{ curve::KimchiCurve, prover_index::ProverIndex, }; -use ark_ff::{bytes::ToBytes, PrimeField, SquareRootField}; -use num_traits::cast::ToPrimitive; +use ark_ff::PrimeField; use o1_utils::hasher::CryptoDigest; use poly_commitment::OpenProof; use serde::{Deserialize, Serialize}; use serde_with::serde_as; -use std::io::{Result as IoResult, Write}; use thiserror::Error; use super::{argument::ArgumentWitness, expr}; @@ -163,24 +161,7 @@ where } } -impl ToBytes for CircuitGate { - #[inline] - fn write(&self, mut w: W) -> IoResult<()> { - let typ: u8 = ToPrimitive::to_u8(&self.typ).unwrap(); - typ.write(&mut w)?; - for i in 0..COLUMNS { - self.wires[i].write(&mut w)?; - } - - (self.coeffs.len() as u8).write(&mut w)?; - for x in &self.coeffs { - x.write(&mut w)?; - } - Ok(()) - } -} - -impl CircuitGate { +impl CircuitGate { /// this function creates "empty" circuit gate pub fn zero(wires: GateWires) -> Self { CircuitGate::new(GateType::Zero, wires, vec![]) diff --git a/kimchi/src/circuits/lookup/index.rs b/kimchi/src/circuits/lookup/index.rs index 1c7ffdb05a..7b8daaefbe 100644 --- a/kimchi/src/circuits/lookup/index.rs +++ b/kimchi/src/circuits/lookup/index.rs @@ -8,7 +8,7 @@ use crate::circuits::{ tables::LookupTable, }, }; -use ark_ff::{FftField, PrimeField, SquareRootField}; +use ark_ff::{FftField, PrimeField}; use ark_poly::{ univariate::DensePolynomial as DP, EvaluationDomain, Evaluations as E, Radix2EvaluationDomain as D, @@ -194,7 +194,7 @@ pub struct LookupConstraintSystem { pub configuration: LookupConfiguration, } -impl LookupConstraintSystem { +impl LookupConstraintSystem { /// Create the `LookupConstraintSystem`. /// /// # Errors diff --git a/kimchi/src/circuits/polynomials/and.rs b/kimchi/src/circuits/polynomials/and.rs index e49da51a3d..8debb0992d 100644 --- a/kimchi/src/circuits/polynomials/and.rs +++ b/kimchi/src/circuits/polynomials/and.rs @@ -15,7 +15,7 @@ use crate::circuits::{ polynomial::COLUMNS, wires::Wire, }; -use ark_ff::{PrimeField, SquareRootField}; +use ark_ff::PrimeField; use num_bigint::BigUint; use o1_utils::{BigUintFieldHelpers, BigUintHelpers, BitwiseOps, FieldHelpers, Two}; @@ -58,7 +58,7 @@ use o1_utils::{BigUintFieldHelpers, BigUintHelpers, BitwiseOps, FieldHelpers, Tw //~ * the `xor` in `a x b = xor` is connected to the `xor` in `2 \cdot and = sum - xor` //~ * the `sum` in `a + b = sum` is connected to the `sum` in `2 \cdot and = sum - xor` -impl CircuitGate { +impl CircuitGate { /// Extends an AND gadget for `bytes` length. /// The full operation being performed is the following: /// `a AND b = 1/2 * (a + b - (a XOR b))` diff --git a/kimchi/src/circuits/polynomials/endomul_scalar.rs b/kimchi/src/circuits/polynomials/endomul_scalar.rs index 00965da093..3cc8bdce96 100644 --- a/kimchi/src/circuits/polynomials/endomul_scalar.rs +++ b/kimchi/src/circuits/polynomials/endomul_scalar.rs @@ -230,7 +230,7 @@ pub fn gen_witness( let bits_per_row = 2 * crumbs_per_row; assert_eq!(num_bits % bits_per_row, 0); - let bits_lsb: Vec<_> = BitIteratorLE::new(scalar.into_repr()) + let bits_lsb: Vec<_> = BitIteratorLE::new(scalar.into_bigint()) .take(num_bits) .collect(); let bits_msb: Vec<_> = bits_lsb.iter().rev().collect(); @@ -341,7 +341,7 @@ mod tests { let f1 = c_func; let f2 = |x: F| -> F { - let bits_le = x.into_repr().to_bits_le(); + let bits_le = x.into_bigint().to_bits_le(); let b0 = bits_le[0]; let b1 = bits_le[1]; @@ -373,7 +373,7 @@ mod tests { let f1 = d_func; let f2 = |x: F| -> F { - let bits_le = x.into_repr().to_bits_le(); + let bits_le = x.into_bigint().to_bits_le(); let b0 = bits_le[0]; let b1 = bits_le[1]; diff --git a/kimchi/src/circuits/polynomials/foreign_field_add/gadget.rs b/kimchi/src/circuits/polynomials/foreign_field_add/gadget.rs index 8918d3e483..4b6c847b83 100644 --- a/kimchi/src/circuits/polynomials/foreign_field_add/gadget.rs +++ b/kimchi/src/circuits/polynomials/foreign_field_add/gadget.rs @@ -1,6 +1,6 @@ //! This module obtains the gates of a foreign field addition circuit. -use ark_ff::{PrimeField, SquareRootField}; +use ark_ff::PrimeField; use num_bigint::BigUint; use crate::circuits::{ @@ -11,7 +11,7 @@ use crate::circuits::{ use super::witness::FFOps; -impl CircuitGate { +impl CircuitGate { /// Create foreign field addition gate chain without range checks (needs to wire the range check for result bound manually) /// - Inputs /// - starting row diff --git a/kimchi/src/circuits/polynomials/foreign_field_common.rs b/kimchi/src/circuits/polynomials/foreign_field_common.rs index 5a2fd7eb42..0d0cfb103f 100644 --- a/kimchi/src/circuits/polynomials/foreign_field_common.rs +++ b/kimchi/src/circuits/polynomials/foreign_field_common.rs @@ -253,13 +253,13 @@ fn biguint_to_limbs(x: &BigUint, limb_bits: usize) -> Vec { mod tests { use super::*; - use ark_ec::AffineCurve; + use ark_ec::AffineRepr; use ark_ff::One; use mina_curves::pasta::Pallas as CurvePoint; use num_bigint::RandBigInt; /// Base field element type - pub type BaseField = ::BaseField; + pub type BaseField = ::BaseField; fn secp256k1_modulus() -> BigUint { BigUint::from_bytes_be(&secp256k1::constants::FIELD_SIZE) diff --git a/kimchi/src/circuits/polynomials/foreign_field_mul/gadget.rs b/kimchi/src/circuits/polynomials/foreign_field_mul/gadget.rs index ec85d60845..872fb3fe1b 100644 --- a/kimchi/src/circuits/polynomials/foreign_field_mul/gadget.rs +++ b/kimchi/src/circuits/polynomials/foreign_field_mul/gadget.rs @@ -1,6 +1,6 @@ //! This module obtains the gates of a foreign field addition circuit. -use ark_ff::{PrimeField, SquareRootField}; +use ark_ff::PrimeField; use num_bigint::BigUint; use o1_utils::foreign_field::ForeignFieldHelpers; @@ -28,7 +28,7 @@ use super::circuitgates::ForeignFieldMul; /// Number of gates in this gadget pub const GATE_COUNT: usize = 1; -impl CircuitGate { +impl CircuitGate { /// Create foreign field multiplication gate /// Inputs the starting row /// Outputs tuple (next_row, circuit_gates) where diff --git a/kimchi/src/circuits/polynomials/keccak/gadget.rs b/kimchi/src/circuits/polynomials/keccak/gadget.rs index b3c7b95b8a..f204771fe9 100644 --- a/kimchi/src/circuits/polynomials/keccak/gadget.rs +++ b/kimchi/src/circuits/polynomials/keccak/gadget.rs @@ -3,7 +3,7 @@ use crate::circuits::{ gate::{CircuitGate, GateType}, wires::Wire, }; -use ark_ff::{PrimeField, SquareRootField}; +use ark_ff::PrimeField; use super::{ constants::{RATE_IN_BYTES, ROUNDS}, @@ -12,7 +12,7 @@ use super::{ const SPONGE_COEFFS: usize = 336; -impl CircuitGate { +impl CircuitGate { /// Extends a Keccak circuit to hash one message /// Note: /// Requires at least one more row after the Keccak gadget so that diff --git a/kimchi/src/circuits/polynomials/permutation.rs b/kimchi/src/circuits/polynomials/permutation.rs index 4d337f07a0..2268f333c0 100644 --- a/kimchi/src/circuits/polynomials/permutation.rs +++ b/kimchi/src/circuits/polynomials/permutation.rs @@ -49,10 +49,10 @@ use crate::{ proof::{PointEvaluations, ProofEvaluations}, prover_index::ProverIndex, }; -use ark_ff::{FftField, PrimeField, SquareRootField, Zero}; +use ark_ff::{FftField, PrimeField, Zero}; use ark_poly::{ univariate::{DenseOrSparsePolynomial, DensePolynomial}, - EvaluationDomain, Evaluations, Polynomial, Radix2EvaluationDomain as D, UVPolynomial, + DenseUVPolynomial, EvaluationDomain, Evaluations, Polynomial, Radix2EvaluationDomain as D, }; use blake2::{Blake2b512, Digest}; use o1_utils::{ExtendedDensePolynomial, ExtendedEvaluations}; @@ -134,7 +134,7 @@ pub struct Shifts { impl Shifts where - F: FftField + SquareRootField, + F: FftField, { /// Generates the shifts for a given domain pub fn new(domain: &D) -> Self { diff --git a/kimchi/src/circuits/polynomials/poseidon.rs b/kimchi/src/circuits/polynomials/poseidon.rs index 8399205a44..4eff85c3b8 100644 --- a/kimchi/src/circuits/polynomials/poseidon.rs +++ b/kimchi/src/circuits/polynomials/poseidon.rs @@ -35,7 +35,7 @@ use crate::{ }, curve::KimchiCurve, }; -use ark_ff::{Field, PrimeField, SquareRootField}; +use ark_ff::{Field, PrimeField}; use mina_poseidon::{ constants::{PlonkSpongeConstantsKimchi, SpongeConstants}, poseidon::{sbox, ArithmeticSponge, ArithmeticSpongeParams, Sponge}, @@ -77,7 +77,7 @@ pub const fn round_to_cols(i: usize) -> Range { start..(start + SPONGE_WIDTH) } -impl CircuitGate { +impl CircuitGate { pub fn create_poseidon( wires: GateWires, // Coefficients are passed in in the logical order diff --git a/kimchi/src/circuits/polynomials/range_check/gadget.rs b/kimchi/src/circuits/polynomials/range_check/gadget.rs index fe90cf478a..309f4de50e 100644 --- a/kimchi/src/circuits/polynomials/range_check/gadget.rs +++ b/kimchi/src/circuits/polynomials/range_check/gadget.rs @@ -1,6 +1,6 @@ //! Range check gate -use ark_ff::{FftField, PrimeField, SquareRootField}; +use ark_ff::{FftField, PrimeField}; use crate::{ alphas::Alphas, @@ -21,7 +21,7 @@ use super::circuitgates::{RangeCheck0, RangeCheck1}; pub const GATE_COUNT: usize = 2; -impl CircuitGate { +impl CircuitGate { /// Create range check gate for constraining three 88-bit values. /// Inputs the starting row /// Outputs tuple (`next_row`, `circuit_gates`) where diff --git a/kimchi/src/circuits/polynomials/rot.rs b/kimchi/src/circuits/polynomials/rot.rs index 9d92038e52..4e8f1bfbae 100644 --- a/kimchi/src/circuits/polynomials/rot.rs +++ b/kimchi/src/circuits/polynomials/rot.rs @@ -19,7 +19,7 @@ use crate::{ }, variable_map, }; -use ark_ff::{PrimeField, SquareRootField}; +use ark_ff::PrimeField; use std::{array, marker::PhantomData}; #[derive(Clone, Copy, PartialEq, Eq, Debug)] @@ -28,7 +28,7 @@ pub enum RotMode { Right, } -impl CircuitGate { +impl CircuitGate { /// Creates a Rot64 gadget to rotate a word /// It will need: /// - 1 Generic gate to constrain to zero the top 2 limbs of the shifted and excess witness of the rotation diff --git a/kimchi/src/circuits/polynomials/turshi.rs b/kimchi/src/circuits/polynomials/turshi.rs index 6f08407b81..75ba40c7b3 100644 --- a/kimchi/src/circuits/polynomials/turshi.rs +++ b/kimchi/src/circuits/polynomials/turshi.rs @@ -91,7 +91,7 @@ use crate::{ curve::KimchiCurve, proof::ProofEvaluations, }; -use ark_ff::{FftField, Field, PrimeField, SquareRootField}; +use ark_ff::{FftField, Field, PrimeField}; use log::error; use rand::{prelude::StdRng, SeedableRng}; use std::{array, marker::PhantomData}; @@ -105,7 +105,7 @@ pub const CIRCUIT_GATE_COUNT: usize = 4; // GATE-RELATED -impl CircuitGate { +impl CircuitGate { /// This function creates a `CairoClaim` gate pub fn create_cairo_claim(wires: GateWires) -> Self { CircuitGate::new(GateType::CairoClaim, wires, vec![]) diff --git a/kimchi/src/circuits/polynomials/xor.rs b/kimchi/src/circuits/polynomials/xor.rs index d7a87313c6..c24f2aa965 100644 --- a/kimchi/src/circuits/polynomials/xor.rs +++ b/kimchi/src/circuits/polynomials/xor.rs @@ -16,14 +16,14 @@ use crate::{ }, variable_map, }; -use ark_ff::{PrimeField, SquareRootField}; +use ark_ff::PrimeField; use num_bigint::BigUint; use o1_utils::{BigUintFieldHelpers, BigUintHelpers, BitwiseOps, FieldHelpers}; use std::{array, marker::PhantomData}; use super::generic::GenericGateSpec; -impl CircuitGate { +impl CircuitGate { /// Extends a XOR gadget for `bits` length to a circuit /// Includes: /// - num_xors Xor16 gates diff --git a/kimchi/src/circuits/wires.rs b/kimchi/src/circuits/wires.rs index 52ed4c56bf..31c8761911 100644 --- a/kimchi/src/circuits/wires.rs +++ b/kimchi/src/circuits/wires.rs @@ -1,11 +1,7 @@ //! This module implements Plonk circuit gate wires primitive. -use ark_ff::bytes::{FromBytes, ToBytes}; use serde::{Deserialize, Serialize}; -use std::{ - array, - io::{Read, Result as IoResult, Write}, -}; +use std::array; /// Number of registers pub const COLUMNS: usize = 15; @@ -67,24 +63,6 @@ impl Wirable for GateWires { } } -impl ToBytes for Wire { - #[inline] - fn write(&self, mut w: W) -> IoResult<()> { - (self.row as u32).write(&mut w)?; - (self.col as u32).write(&mut w)?; - Ok(()) - } -} - -impl FromBytes for Wire { - #[inline] - fn read(mut r: R) -> IoResult { - let row = u32::read(&mut r)? as usize; - let col = u32::read(&mut r)? as usize; - Ok(Wire { row, col }) - } -} - #[cfg(feature = "ocaml_types")] pub mod caml { use super::*; diff --git a/kimchi/src/circuits/witness/mod.rs b/kimchi/src/circuits/witness/mod.rs index 830e2af5e7..53f278045c 100644 --- a/kimchi/src/circuits/witness/mod.rs +++ b/kimchi/src/circuits/witness/mod.rs @@ -93,10 +93,10 @@ mod tests { use super::*; use crate::circuits::polynomial::COLUMNS; - use ark_ec::AffineCurve; + use ark_ec::AffineRepr; use ark_ff::{Field, One, Zero}; use mina_curves::pasta::Pallas; - type PallasField = ::BaseField; + type PallasField = ::BaseField; #[test] fn zero_layout() { diff --git a/kimchi/src/curve.rs b/kimchi/src/curve.rs index 57790b10f7..5fa3d9d3e7 100644 --- a/kimchi/src/curve.rs +++ b/kimchi/src/curve.rs @@ -1,7 +1,7 @@ //! This module contains a useful trait for recursion: [KimchiCurve], //! which defines how a pair of curves interact. -use ark_ec::{short_weierstrass_jacobian::GroupAffine, AffineCurve, ModelParameters}; +use ark_ec::{short_weierstrass::Affine, AffineRepr, CurveConfig}; use mina_curves::pasta::curves::{ pallas::{LegacyPallasParameters, PallasParameters}, vesta::{LegacyVestaParameters, VestaParameters}, @@ -33,32 +33,32 @@ pub trait KimchiCurve: CommitmentCurve + EndoCurve { /// Accessor for the other curve's prime subgroup generator, as coordinates // TODO: This leaked from snarky.rs. Stop the bleed. - fn other_curve_prime_subgroup_generator() -> (Self::ScalarField, Self::ScalarField); + fn other_curve_generator() -> (Self::ScalarField, Self::ScalarField); } fn vesta_endos() -> &'static ( - ::BaseField, - ::ScalarField, + ::BaseField, + ::ScalarField, ) { static VESTA_ENDOS: Lazy<( - ::BaseField, - ::ScalarField, - )> = Lazy::new(endos::>); + ::BaseField, + ::ScalarField, + )> = Lazy::new(endos::>); &VESTA_ENDOS } fn pallas_endos() -> &'static ( - ::BaseField, - ::ScalarField, + ::BaseField, + ::ScalarField, ) { static PALLAS_ENDOS: Lazy<( - ::BaseField, - ::ScalarField, - )> = Lazy::new(endos::>); + ::BaseField, + ::ScalarField, + )> = Lazy::new(endos::>); &PALLAS_ENDOS } -impl KimchiCurve for GroupAffine { +impl KimchiCurve for Affine { const NAME: &'static str = "vesta"; fn sponge_params() -> &'static ArithmeticSpongeParams { @@ -77,14 +77,14 @@ impl KimchiCurve for GroupAffine { &pallas_endos().0 } - fn other_curve_prime_subgroup_generator() -> (Self::ScalarField, Self::ScalarField) { - GroupAffine::::prime_subgroup_generator() + fn other_curve_generator() -> (Self::ScalarField, Self::ScalarField) { + Affine::::generator() .to_coordinates() .unwrap() } } -impl KimchiCurve for GroupAffine { +impl KimchiCurve for Affine { const NAME: &'static str = "pallas"; fn sponge_params() -> &'static ArithmeticSpongeParams { @@ -103,8 +103,8 @@ impl KimchiCurve for GroupAffine { &vesta_endos().0 } - fn other_curve_prime_subgroup_generator() -> (Self::ScalarField, Self::ScalarField) { - GroupAffine::::prime_subgroup_generator() + fn other_curve_generator() -> (Self::ScalarField, Self::ScalarField) { + Affine::::generator() .to_coordinates() .unwrap() } @@ -114,7 +114,7 @@ impl KimchiCurve for GroupAffine { // Legacy curves // -impl KimchiCurve for GroupAffine { +impl KimchiCurve for Affine { const NAME: &'static str = "legacy_vesta"; fn sponge_params() -> &'static ArithmeticSpongeParams { @@ -133,14 +133,14 @@ impl KimchiCurve for GroupAffine { &pallas_endos().0 } - fn other_curve_prime_subgroup_generator() -> (Self::ScalarField, Self::ScalarField) { - GroupAffine::::prime_subgroup_generator() + fn other_curve_generator() -> (Self::ScalarField, Self::ScalarField) { + Affine::::generator() .to_coordinates() .unwrap() } } -impl KimchiCurve for GroupAffine { +impl KimchiCurve for Affine { const NAME: &'static str = "legacy_pallas"; fn sponge_params() -> &'static ArithmeticSpongeParams { @@ -159,8 +159,8 @@ impl KimchiCurve for GroupAffine { &vesta_endos().0 } - fn other_curve_prime_subgroup_generator() -> (Self::ScalarField, Self::ScalarField) { - GroupAffine::::prime_subgroup_generator() + fn other_curve_generator() -> (Self::ScalarField, Self::ScalarField) { + Affine::::generator() .to_coordinates() .unwrap() } @@ -170,7 +170,7 @@ impl KimchiCurve for GroupAffine { use mina_poseidon::dummy_values::kimchi_dummy; #[cfg(feature = "bn254")] -impl KimchiCurve for GroupAffine { +impl KimchiCurve for Affine { const NAME: &'static str = "bn254"; fn sponge_params() -> &'static ArithmeticSpongeParams { @@ -197,7 +197,7 @@ impl KimchiCurve for GroupAffine { &ENDO } - fn other_curve_prime_subgroup_generator() -> (Self::ScalarField, Self::ScalarField) { + fn other_curve_generator() -> (Self::ScalarField, Self::ScalarField) { // TODO: Dummy value, this is definitely not right (44u64.into(), 88u64.into()) } diff --git a/kimchi/src/linearization.rs b/kimchi/src/linearization.rs index 4e0fa3c0f5..b3f0352499 100644 --- a/kimchi/src/linearization.rs +++ b/kimchi/src/linearization.rs @@ -33,14 +33,14 @@ use crate::circuits::{ gate::GateType, wires::COLUMNS, }; -use ark_ff::{FftField, PrimeField, SquareRootField, Zero}; +use ark_ff::{FftField, PrimeField, Zero}; /// Get the expresion of constraints. /// /// # Panics /// /// Will panic if `generic_gate` is not associate with `alpha^0`. -pub fn constraints_expr( +pub fn constraints_expr( feature_flags: Option<&FeatureFlags>, generic: bool, ) -> ( @@ -242,7 +242,7 @@ pub fn constraints_expr( /// Adds the polynomials that are evaluated as part of the proof /// for the linearization to work. -pub fn linearization_columns( +pub fn linearization_columns( feature_flags: Option<&FeatureFlags>, ) -> std::collections::HashSet { let mut h = std::collections::HashSet::new(); @@ -345,7 +345,7 @@ pub fn linearization_columns( /// /// Will panic if the `linearization` process fails. #[allow(clippy::type_complexity)] -pub fn expr_linearization( +pub fn expr_linearization( feature_flags: Option<&FeatureFlags>, generic: bool, ) -> ( diff --git a/kimchi/src/precomputed_srs.rs b/kimchi/src/precomputed_srs.rs index a58ebd15e9..f29a2a8745 100644 --- a/kimchi/src/precomputed_srs.rs +++ b/kimchi/src/precomputed_srs.rs @@ -118,7 +118,7 @@ where mod tests { use super::*; - use ark_ec::AffineCurve; + use ark_ec::AffineRepr; use ark_ff::PrimeField; use ark_serialize::Write; use hex; @@ -127,8 +127,8 @@ mod tests { use crate::circuits::domains::EvaluationDomains; - fn test_regression_serialization_srs_with_generators(exp_output: String) { - let h = G::prime_subgroup_generator(); + fn test_regression_serialization_srs_with_generators(exp_output: String) { + let h = G::generator(); let g = vec![h]; let lagrange_bases = HashMap::new(); let srs = SRS:: { diff --git a/kimchi/src/proof.rs b/kimchi/src/proof.rs index 7cec0521a6..a29fd12918 100644 --- a/kimchi/src/proof.rs +++ b/kimchi/src/proof.rs @@ -6,7 +6,7 @@ use crate::circuits::{ lookup::lookups::LookupPattern, wires::{COLUMNS, PERMUTS}, }; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::{FftField, One, Zero}; use ark_poly::univariate::DensePolynomial; use o1_utils::ExtendedDensePolynomial; @@ -108,7 +108,7 @@ pub struct ProofEvaluations { #[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] -pub struct LookupCommitments { +pub struct LookupCommitments { /// Commitments to the sorted lookup table polynomial (may have chunks) pub sorted: Vec>, /// Commitment to the lookup aggregation polynomial @@ -121,7 +121,7 @@ pub struct LookupCommitments { #[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] -pub struct ProverCommitments { +pub struct ProverCommitments { /// The commitments to the witness (execution trace) pub w_comm: [PolyComm; COLUMNS], /// The commitment to the permutation polynomial @@ -136,7 +136,7 @@ pub struct ProverCommitments { #[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] -pub struct ProverProof { +pub struct ProverProof { /// All the polynomial commitments required in the proof pub commitments: ProverCommitments, @@ -164,7 +164,7 @@ pub struct ProverProof { #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] pub struct RecursionChallenge where - G: AffineCurve, + G: AffineRepr, { /// Vector of scalar field elements #[serde_as(as = "Vec")] @@ -345,7 +345,7 @@ impl ProofEvaluations { } } -impl RecursionChallenge { +impl RecursionChallenge { pub fn new(chals: Vec, comm: PolyComm) -> RecursionChallenge { RecursionChallenge { chals, comm } } @@ -505,7 +505,7 @@ pub mod caml { impl From> for CamlRecursionChallenge where - G: AffineCurve, + G: AffineRepr, CamlG: From, CamlF: From, { @@ -519,7 +519,7 @@ pub mod caml { impl From> for RecursionChallenge where - G: AffineCurve + From, + G: AffineRepr + From, G::ScalarField: From, { fn from(caml_ch: CamlRecursionChallenge) -> RecursionChallenge { diff --git a/kimchi/src/prover.rs b/kimchi/src/prover.rs index d63f3b2261..6d17f286c7 100644 --- a/kimchi/src/prover.rs +++ b/kimchi/src/prover.rs @@ -36,8 +36,8 @@ use crate::{ }; use ark_ff::{FftField, Field, One, PrimeField, UniformRand, Zero}; use ark_poly::{ - univariate::DensePolynomial, EvaluationDomain, Evaluations, Polynomial, - Radix2EvaluationDomain as D, UVPolynomial, + univariate::DensePolynomial, DenseUVPolynomial, EvaluationDomain, Evaluations, Polynomial, + Radix2EvaluationDomain as D, }; use itertools::Itertools; use mina_poseidon::{sponge::ScalarChallenge, FqSponge}; @@ -1504,7 +1504,7 @@ internal_tracing::decl_traces!(internal_traces; pub mod caml { use super::*; use crate::proof::caml::{CamlProofEvaluations, CamlRecursionChallenge}; - use ark_ec::AffineCurve; + use ark_ec::AffineRepr; use poly_commitment::{ commitment::caml::{CamlOpeningProof, CamlPolyComm}, evaluation_proof::OpeningProof, @@ -1592,7 +1592,7 @@ pub mod caml { impl From> for CamlLookupCommitments where - G: AffineCurve, + G: AffineRepr, CamlPolyComm: From>, { fn from( @@ -1612,7 +1612,7 @@ pub mod caml { impl From> for LookupCommitments where - G: AffineCurve, + G: AffineRepr, PolyComm: From>, { fn from( @@ -1636,7 +1636,7 @@ pub mod caml { impl From> for CamlProverCommitments where - G: AffineCurve, + G: AffineRepr, CamlPolyComm: From>, { fn from(prover_comm: ProverCommitments) -> Self { @@ -1669,7 +1669,7 @@ pub mod caml { impl From> for ProverCommitments where - G: AffineCurve, + G: AffineRepr, PolyComm: From>, { fn from(caml_prover_comm: CamlProverCommitments) -> ProverCommitments { @@ -1722,7 +1722,7 @@ pub mod caml { impl From<(ProverProof>, Vec)> for CamlProofWithPublic where - G: AffineCurve, + G: AffineRepr, CamlG: From, CamlF: From, { @@ -1746,7 +1746,7 @@ pub mod caml { for (ProverProof>, Vec) where CamlF: Clone, - G: AffineCurve + From, + G: AffineRepr + From, G::ScalarField: From, { fn from( diff --git a/kimchi/src/prover_index.rs b/kimchi/src/prover_index.rs index 11e23ac0e1..6ff5ca2bf5 100644 --- a/kimchi/src/prover_index.rs +++ b/kimchi/src/prover_index.rs @@ -142,7 +142,7 @@ pub mod testing { }, precomputed_srs, }; - use ark_ff::{PrimeField, SquareRootField}; + use ark_ff::PrimeField; use ark_poly::{EvaluationDomain, Radix2EvaluationDomain as D}; use poly_commitment::{evaluation_proof::OpeningProof, srs::SRS, OpenProof}; @@ -163,7 +163,7 @@ pub mod testing { ) -> ProverIndex where G::BaseField: PrimeField, - G::ScalarField: PrimeField + SquareRootField, + G::ScalarField: PrimeField, { // not sure if theres a smarter way instead of the double unwrap, but should be fine in the test let cs = ConstraintSystem::::create(gates) @@ -200,7 +200,7 @@ pub mod testing { ) -> ProverIndex> where G::BaseField: PrimeField, - G::ScalarField: PrimeField + SquareRootField, + G::ScalarField: PrimeField, { new_index_for_test_with_lookups_and_custom_srs( gates, @@ -232,7 +232,7 @@ pub mod testing { ) -> ProverIndex> where G::BaseField: PrimeField, - G::ScalarField: PrimeField + SquareRootField, + G::ScalarField: PrimeField, { new_index_for_test_with_lookups::(gates, public, 0, vec![], None, false, None) } diff --git a/kimchi/src/snarky/api.rs b/kimchi/src/snarky/api.rs index 5d52c3e567..4632eceb28 100644 --- a/kimchi/src/snarky/api.rs +++ b/kimchi/src/snarky/api.rs @@ -16,7 +16,7 @@ use crate::{ verifier_index::VerifierIndex, }; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::PrimeField; use log::debug; use poly_commitment::{commitment::CommitmentCurve, OpenProof, SRS}; @@ -31,8 +31,8 @@ pub struct Witness(pub [Vec; COLUMNS]); // aliases // -type ScalarField = ::ScalarField; -type BaseField = ::BaseField; +type ScalarField = ::ScalarField; +type BaseField = ::BaseField; /// A prover index. pub struct ProverIndexWrapper @@ -71,7 +71,7 @@ where debug: bool, ) -> SnarkyResult<(Proof, Box>)> where - ::BaseField: PrimeField, + ::BaseField: PrimeField, EFqSponge: Clone + FqSponge, Circuit::Curve, ScalarField>, EFrSponge: FrSponge>, @@ -174,7 +174,7 @@ where public_input: >>::OutOfCircuit, public_output: >>::OutOfCircuit, ) where - ::BaseField: PrimeField, + ::BaseField: PrimeField, EFqSponge: Clone + FqSponge, Circuit::Curve, ScalarField>, EFrSponge: FrSponge>, @@ -300,7 +300,7 @@ pub trait SnarkyCircuit: Sized { self, ) -> SnarkyResult<(ProverIndexWrapper, VerifierIndexWrapper)> where - ::BaseField: PrimeField, + ::BaseField: PrimeField, { let compiled_circuit = compile(self)?; diff --git a/kimchi/src/snarky/constants.rs b/kimchi/src/snarky/constants.rs index c0489de8d9..646a81ee10 100644 --- a/kimchi/src/snarky/constants.rs +++ b/kimchi/src/snarky/constants.rs @@ -18,7 +18,7 @@ where pub fn new>() -> Self { let poseidon = Curve::sponge_params().clone(); let endo_q = Curve::other_curve_endo(); - let base = Curve::other_curve_prime_subgroup_generator(); + let base = Curve::other_curve_generator(); Self { poseidon, diff --git a/kimchi/src/snarky/folding.rs b/kimchi/src/snarky/folding.rs index 42797c2d3b..21d86f7cc1 100644 --- a/kimchi/src/snarky/folding.rs +++ b/kimchi/src/snarky/folding.rs @@ -5,7 +5,7 @@ use crate::{ loc, snarky::{api::SnarkyCircuit, cvar::FieldVar, runner::RunState}, }; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::{BigInteger, One, PrimeField}; use mina_curves::pasta::Fp; use poly_commitment::{evaluation_proof::OpeningProof, OpenProof}; @@ -68,7 +68,7 @@ fn apply( todo!() } -type F = ::ScalarField; +type F = ::ScalarField; fn challenge_linear_combination( _full: FullChallenge>, @@ -107,11 +107,11 @@ fn trim( ) -> SnarkyResult> { let (high, low): (FieldVar, FieldVar) = sys.compute(loc!(), |wit| { let val = wit.read_var(v); - let mut high = val.into_repr(); + let mut high = val.into_bigint(); high.divn(CHALLENGE_BITS as u32); - let mut low = val.into_repr(); - low.sub_noborrow(&high); - (F::from_repr(high).unwrap(), F::from_repr(low).unwrap()) + let mut low = val.into_bigint(); + low.sub_with_borrow(&high); + (F::from_bigint(high).unwrap(), F::from_bigint(low).unwrap()) })?; let composition = high.mul(base, None, loc!(), sys)? + &low; // TODO: constraint low to 127 bits diff --git a/kimchi/src/snarky/range_checks.rs b/kimchi/src/snarky/range_checks.rs index e21d012194..208ce7f052 100644 --- a/kimchi/src/snarky/range_checks.rs +++ b/kimchi/src/snarky/range_checks.rs @@ -34,7 +34,7 @@ struct RangeCheckLimbs1 { impl RangeCheckLimbs1 { ///extracts the limbs needed for range check from the bits of f fn parse(f: F) -> Self { - let mut bits = f.into_repr().to_bits_le().into_iter(); + let mut bits = f.into_bigint().to_bits_le().into_iter(); let crumbs = parse_limbs::(bits.by_ref()); let limbs = parse_limbs::(bits); Self { crumbs, limbs } @@ -58,7 +58,7 @@ struct RangeCheckLimbs2 { impl RangeCheckLimbs2 { ///extracts the limbs needed for range check from the bits of f fn parse(f: F) -> Self { - let mut bits = f.into_repr().to_bits_le().into_iter(); + let mut bits = f.into_bigint().to_bits_le().into_iter(); let crumbs_low = parse_limbs::(bits.by_ref()); let limbs = parse_limbs::(bits.by_ref()); let crumbs_high = parse_limbs::(bits); diff --git a/kimchi/src/tests/and.rs b/kimchi/src/tests/and.rs index 9743231d5c..ea69836618 100644 --- a/kimchi/src/tests/and.rs +++ b/kimchi/src/tests/and.rs @@ -10,7 +10,7 @@ use crate::{ plonk_sponge::FrSponge, }; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::{One, PrimeField, Zero}; use mina_curves::pasta::{Fp, Fq, Pallas, PallasParameters, Vesta, VestaParameters}; use mina_poseidon::{ @@ -23,8 +23,8 @@ use o1_utils::{BitwiseOps, FieldHelpers, RandomField}; use super::framework::TestFramework; -type PallasField = ::BaseField; -type VestaField = ::BaseField; +type PallasField = ::BaseField; +type VestaField = ::BaseField; type SpongeParams = PlonkSpongeConstantsKimchi; type VestaBaseSponge = DefaultFqSponge; type VestaScalarSponge = DefaultFrSponge; diff --git a/kimchi/src/tests/ec.rs b/kimchi/src/tests/ec.rs index e62def2888..6da2ab488e 100644 --- a/kimchi/src/tests/ec.rs +++ b/kimchi/src/tests/ec.rs @@ -2,14 +2,14 @@ use crate::circuits::{ gate::{CircuitGate, GateType}, wires::*, }; -use ark_ec::{AffineCurve, ProjectiveCurve}; -use ark_ff::{Field, One, PrimeField, UniformRand, Zero}; +use ark_ec::{AffineRepr, CurveGroup}; +use ark_ff::{Field, One, UniformRand, Zero}; use mina_curves::pasta::{Fp as F, Pallas as Other, Vesta, VestaParameters}; use mina_poseidon::{ constants::PlonkSpongeConstantsKimchi, sponge::{DefaultFqSponge, DefaultFrSponge}, }; -use std::array; +use std::{array, ops::Mul}; use super::framework::TestFramework; @@ -39,36 +39,34 @@ fn ec_test() { let mut witness: [Vec; COLUMNS] = array::from_fn(|_| vec![]); - let ps = { - let p = Other::prime_subgroup_generator() - .into_projective() - .mul(::ScalarField::rand(&mut rng).into_repr()) - .into_affine(); + let ps: Vec = { + let p = Other::generator() + .into_group() + .mul(::ScalarField::rand(&mut rng)); let mut res = vec![]; let mut acc = p; for _ in 0..num_additions { res.push(acc); - acc = acc + p; + acc += p; } - res + ::Group::normalize_batch(&res) }; - let qs = { - let q = Other::prime_subgroup_generator() - .into_projective() - .mul(::ScalarField::rand(&mut rng).into_repr()) - .into_affine(); + let qs: Vec = { + let q = Other::generator() + .into_group() + .mul(::ScalarField::rand(&mut rng)); let mut res = vec![]; let mut acc = q; for _ in 0..num_additions { res.push(acc); - acc = acc + q; + acc += q; } - res + ::Group::normalize_batch(&res) }; for &p in ps.iter().take(num_doubles) { - let p2: Other = p + p; + let p2: Other = (p + p).into(); let (x1, y1) = (p.x, p.y); let x1_squared = x1.square(); // 2 * s * y1 = 3 * x1^2 @@ -96,11 +94,12 @@ fn ec_test() { let p = ps[i]; let q = qs[i]; - let pq: Other = p + q; + let pq: Other = (p + q).into(); let (x1, y1) = (p.x, p.y); let (x2, y2) = (q.x, q.y); // (x2 - x1) * s = y2 - y1 let s = (y2 - y1) / (x2 - x1); + witness[0].push(x1); witness[1].push(y1); witness[2].push(x2); @@ -122,11 +121,12 @@ fn ec_test() { for &p in ps.iter().take(num_infs) { let q: Other = -p; - let p2: Other = p + p; + let p2: Other = (p + p).into(); let (x1, y1) = (p.x, p.y); let x1_squared = x1.square(); // 2 * s * y1 = -3 * x1^2 let s = (x1_squared.double() + x1_squared) / y1.double(); + witness[0].push(p.x); witness[1].push(p.y); witness[2].push(q.x); diff --git a/kimchi/src/tests/endomul.rs b/kimchi/src/tests/endomul.rs index 69c4d72cbe..3f0a02362c 100644 --- a/kimchi/src/tests/endomul.rs +++ b/kimchi/src/tests/endomul.rs @@ -6,7 +6,7 @@ use crate::{ }, tests::framework::TestFramework, }; -use ark_ec::{AffineCurve, ProjectiveCurve}; +use ark_ec::{AffineRepr, CurveGroup}; use ark_ff::{BigInteger, BitIteratorLE, Field, One, PrimeField, UniformRand, Zero}; use mina_curves::pasta::{Fp as F, Pallas as Other, Vesta, VestaParameters}; use mina_poseidon::{ @@ -15,7 +15,7 @@ use mina_poseidon::{ }; use poly_commitment::srs::endos; use rand::{rngs::StdRng, SeedableRng}; -use std::array; +use std::{array, ops::Mul}; type SpongeParams = PlonkSpongeConstantsKimchi; type BaseSponge = DefaultFqSponge; @@ -58,23 +58,23 @@ fn endomul_test() { // let start = Instant::now(); for i in 0..num_scalars { - let bits_lsb: Vec<_> = BitIteratorLE::new(F::rand(rng).into_repr()) + let bits_lsb: Vec<_> = BitIteratorLE::new(F::rand(rng).into_bigint()) .take(num_bits) .collect(); - let x = ::ScalarField::from_repr( + let x = ::ScalarField::from_bigint( ::BigInt::from_bits_le(&bits_lsb[..]), ) .unwrap(); let x_scalar = ScalarChallenge(x).to_field(&endo_r); - let base = Other::prime_subgroup_generator(); - // let g = Other::prime_subgroup_generator().into_projective(); + let base = Other::generator(); + // let g = Other::generator().into_group(); let acc0 = { - let t = Other::new(endo_q * base.x, base.y, false); + let t = Other::new_unchecked(endo_q * base.x, base.y); // Ensuring we use affine coordinates - let p: Other = t + base; - let acc: Other = p + p; + let p = t + base; + let acc: Other = (p + p).into(); (acc.x, acc.y) }; @@ -90,27 +90,24 @@ fn endomul_test() { ); let expected = { - let t = Other::prime_subgroup_generator(); - let mut acc = Other::new(acc0.0, acc0.1, false); + let t = Other::generator(); + let mut acc = Other::new_unchecked(acc0.0, acc0.1).into_group(); for i in (0..(num_bits / 2)).rev() { let b2i = F::from(bits_lsb[2 * i] as u64); let b2i1 = F::from(bits_lsb[2 * i + 1] as u64); let xq = (F::one() + ((endo_q - F::one()) * b2i1)) * t.x; let yq = (b2i.double() - F::one()) * t.y; - acc = acc + (acc + Other::new(xq, yq, false)); + acc = acc + (acc + Other::new_unchecked(xq, yq)); } - acc + acc.into_affine() }; assert_eq!( expected, - Other::prime_subgroup_generator() - .into_projective() - .mul(x_scalar.into_repr()) - .into_affine() + Other::generator().into_group().mul(x_scalar).into_affine() ); assert_eq!((expected.x, expected.y), res.acc); - assert_eq!(x.into_repr(), res.n.into_repr()); + assert_eq!(x.into_bigint(), res.n.into_bigint()); } TestFramework::::default() diff --git a/kimchi/src/tests/endomul_scalar.rs b/kimchi/src/tests/endomul_scalar.rs index f39c0bf236..886a7daefa 100644 --- a/kimchi/src/tests/endomul_scalar.rs +++ b/kimchi/src/tests/endomul_scalar.rs @@ -52,10 +52,10 @@ fn endomul_scalar_test() { //let start = Instant::now(); for _ in 0..num_scalars { let x = { - let bits_lsb: Vec<_> = BitIteratorLE::new(F::rand(rng).into_repr()) + let bits_lsb: Vec<_> = BitIteratorLE::new(F::rand(rng).into_bigint()) .take(num_bits) .collect(); - F::from_repr(::BigInt::from_bits_le(&bits_lsb[..])).unwrap() + F::from_bigint(::BigInt::from_bits_le(&bits_lsb[..])).unwrap() }; assert_eq!( diff --git a/kimchi/src/tests/foreign_field_add.rs b/kimchi/src/tests/foreign_field_add.rs index 124185b2ca..cf10bae7bf 100644 --- a/kimchi/src/tests/foreign_field_add.rs +++ b/kimchi/src/tests/foreign_field_add.rs @@ -17,8 +17,8 @@ use crate::{ curve::KimchiCurve, prover_index::ProverIndex, }; -use ark_ec::AffineCurve; -use ark_ff::{One, PrimeField, SquareRootField, Zero}; +use ark_ec::AffineRepr; +use ark_ff::{One, PrimeField, Zero}; use ark_poly::EvaluationDomain; use mina_curves::pasta::{Fp, Pallas, Vesta, VestaParameters}; use mina_poseidon::{ @@ -35,8 +35,8 @@ use poly_commitment::{ use rand::{rngs::StdRng, Rng}; use std::{array, sync::Arc}; -type PallasField = ::BaseField; -type VestaField = ::BaseField; +type PallasField = ::BaseField; +type VestaField = ::BaseField; type SpongeParams = PlonkSpongeConstantsKimchi; @@ -146,7 +146,7 @@ static NULL_CARRY_BOTH: &[u8] = &[ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x03, 0xD2, ]; -impl CircuitGate { +impl CircuitGate { /// Check if a given circuit gate is a given foreign field operation pub fn check_ffadd_sign(&self, sign: FFOps) -> Result<(), String> { if self.typ != GateType::ForeignFieldAdd { @@ -175,7 +175,7 @@ impl CircuitGate { // Outputs tuple (next_row, circuit_gates) where // next_row - next row after this gate // circuit_gates - vector of circuit gates comprising this gate -fn short_circuit( +fn short_circuit( opcodes: &[FFOps], foreign_field_modulus: &BigUint, ) -> (usize, Vec>) { @@ -208,7 +208,7 @@ fn short_circuit( // Outputs tuple (next_row, circuit_gates) where // next_row - next row after this gate // circuit_gates - vector of circuit gates comprising this gate -fn full_circuit( +fn full_circuit( opcodes: &[FFOps], foreign_field_modulus: &BigUint, ) -> (usize, Vec>) { diff --git a/kimchi/src/tests/foreign_field_mul.rs b/kimchi/src/tests/foreign_field_mul.rs index 73ba8ed5c8..ac4f386f6d 100644 --- a/kimchi/src/tests/foreign_field_mul.rs +++ b/kimchi/src/tests/foreign_field_mul.rs @@ -14,7 +14,7 @@ use crate::{ plonk_sponge::FrSponge, tests::framework::TestFramework, }; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::{Field, PrimeField, Zero}; use mina_curves::pasta::{Fp, Fq, Pallas, PallasParameters, Vesta, VestaParameters}; use num_bigint::BigUint; @@ -28,8 +28,8 @@ use mina_poseidon::{ }; use num_bigint::RandBigInt; -type PallasField = ::BaseField; -type VestaField = ::BaseField; +type PallasField = ::BaseField; +type VestaField = ::BaseField; type SpongeParams = PlonkSpongeConstantsKimchi; type VestaBaseSponge = DefaultFqSponge; diff --git a/kimchi/src/tests/framework.rs b/kimchi/src/tests/framework.rs index b0b79dbdbd..2e8aad8567 100644 --- a/kimchi/src/tests/framework.rs +++ b/kimchi/src/tests/framework.rs @@ -123,6 +123,7 @@ where /// creates the indexes #[must_use] + #[allow(dead_code)] pub(crate) fn setup_with_custom_srs, usize) -> OpeningProof::SRS>( mut self, get_srs: F, diff --git a/kimchi/src/tests/generic.rs b/kimchi/src/tests/generic.rs index c606259f62..0fd2c4d2ad 100644 --- a/kimchi/src/tests/generic.rs +++ b/kimchi/src/tests/generic.rs @@ -94,7 +94,7 @@ fn test_generic_gate_pub_empty() { fn test_generic_gate_kzg() { type Fp = ark_bn254::Fr; type SpongeParams = PlonkSpongeConstantsKimchi; - type BaseSponge = DefaultFqSponge; + type BaseSponge = DefaultFqSponge; type ScalarSponge = DefaultFrSponge; use ark_ff::UniformRand; @@ -112,7 +112,7 @@ fn test_generic_gate_kzg() { // create and verify proof based on the witness >, + poly_commitment::kzg::KZGProof>, > as Default>::default() .gates(gates) .witness(witness) diff --git a/kimchi/src/tests/not.rs b/kimchi/src/tests/not.rs index 22020944ab..2f7e127b44 100644 --- a/kimchi/src/tests/not.rs +++ b/kimchi/src/tests/not.rs @@ -14,7 +14,7 @@ use crate::{ }; use super::framework::TestFramework; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::{Field, One, PrimeField, Zero}; use mina_curves::pasta::{Fp, Pallas, Vesta, VestaParameters}; use mina_poseidon::{ @@ -25,8 +25,8 @@ use num_bigint::BigUint; use o1_utils::{BigUintHelpers, BitwiseOps, FieldHelpers, RandomField}; use poly_commitment::evaluation_proof::OpeningProof; -type PallasField = ::BaseField; -type VestaField = ::BaseField; +type PallasField = ::BaseField; +type VestaField = ::BaseField; type SpongeParams = PlonkSpongeConstantsKimchi; type VestaBaseSponge = DefaultFqSponge; type VestaScalarSponge = DefaultFrSponge; diff --git a/kimchi/src/tests/range_check.rs b/kimchi/src/tests/range_check.rs index c26bcbd7d0..3e50a655fc 100644 --- a/kimchi/src/tests/range_check.rs +++ b/kimchi/src/tests/range_check.rs @@ -17,7 +17,7 @@ use crate::{ prover_index::testing::new_index_for_test_with_lookups, }; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::{Field, One, Zero}; use ark_poly::EvaluationDomain; use mina_curves::pasta::{Fp, Pallas, Vesta, VestaParameters}; @@ -43,7 +43,7 @@ use super::framework::TestFramework; type BaseSponge = DefaultFqSponge; type ScalarSponge = DefaultFrSponge; -type PallasField = ::BaseField; +type PallasField = ::BaseField; fn create_test_prover_index( public_size: usize, diff --git a/kimchi/src/tests/recursion.rs b/kimchi/src/tests/recursion.rs index da2fba8a25..686aba3aed 100644 --- a/kimchi/src/tests/recursion.rs +++ b/kimchi/src/tests/recursion.rs @@ -7,7 +7,7 @@ use crate::{ proof::RecursionChallenge, }; use ark_ff::{UniformRand, Zero}; -use ark_poly::{univariate::DensePolynomial, UVPolynomial}; +use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial}; use mina_curves::pasta::{Fp, Vesta, VestaParameters}; use mina_poseidon::{ constants::PlonkSpongeConstantsKimchi, diff --git a/kimchi/src/tests/rot.rs b/kimchi/src/tests/rot.rs index 54104bfc1b..20db94ae25 100644 --- a/kimchi/src/tests/rot.rs +++ b/kimchi/src/tests/rot.rs @@ -17,7 +17,7 @@ use crate::{ plonk_sponge::FrSponge, prover_index::ProverIndex, }; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::{One, PrimeField, Zero}; use ark_poly::EvaluationDomain; use mina_curves::pasta::{Fp, Fq, Pallas, PallasParameters, Vesta, VestaParameters}; @@ -33,7 +33,7 @@ use poly_commitment::{ }; use rand::Rng; -type PallasField = ::BaseField; +type PallasField = ::BaseField; type SpongeParams = PlonkSpongeConstantsKimchi; type VestaBaseSponge = DefaultFqSponge; type VestaScalarSponge = DefaultFrSponge; diff --git a/kimchi/src/tests/serde.rs b/kimchi/src/tests/serde.rs index 0c15da848d..d9d278d6f4 100644 --- a/kimchi/src/tests/serde.rs +++ b/kimchi/src/tests/serde.rs @@ -9,7 +9,7 @@ use crate::{ verifier::verify, verifier_index::VerifierIndex, }; -use ark_ec::short_weierstrass_jacobian::GroupAffine; +use ark_ec::short_weierstrass::Affine; use ark_ff::Zero; use groupmap::GroupMap; use mina_curves::pasta::{Fp, Vesta, VestaParameters}; @@ -72,11 +72,11 @@ mod tests { .unwrap(); // deserialize the verifier index - let mut verifier_index_deserialize: VerifierIndex, _> = + let mut verifier_index_deserialize: VerifierIndex, _> = serde_json::from_str(&verifier_index_serialize).unwrap(); // add srs with lagrange bases - let mut srs = SRS::>::create(verifier_index.max_poly_size); + let mut srs = SRS::>::create(verifier_index.max_poly_size); srs.add_lagrange_basis(verifier_index.domain); verifier_index_deserialize.powers_of_alpha = index.powers_of_alpha; verifier_index_deserialize.linearization = index.linearization; diff --git a/kimchi/src/tests/varbasemul.rs b/kimchi/src/tests/varbasemul.rs index f3b91ec643..02ba79d583 100644 --- a/kimchi/src/tests/varbasemul.rs +++ b/kimchi/src/tests/varbasemul.rs @@ -6,7 +6,7 @@ use crate::{ }, tests::framework::TestFramework, }; -use ark_ec::{AffineCurve, ProjectiveCurve}; +use ark_ec::{AffineRepr, CurveGroup}; use ark_ff::{BigInteger, BitIteratorLE, Field, One, PrimeField, UniformRand, Zero}; use colored::Colorize; use mina_curves::pasta::{Fp as F, Pallas as Other, Vesta, VestaParameters}; @@ -15,7 +15,7 @@ use mina_poseidon::{ sponge::{DefaultFqSponge, DefaultFrSponge}, }; use rand::{rngs::StdRng, SeedableRng}; -use std::{array, time::Instant}; +use std::{array, ops::Mul, time::Instant}; type SpongeParams = PlonkSpongeConstantsKimchi; type BaseSponge = DefaultFqSponge; @@ -23,7 +23,7 @@ type ScalarSponge = DefaultFrSponge; #[test] fn varbase_mul_test() { - let num_bits = F::size_in_bits(); + let num_bits = F::MODULUS_BIT_SIZE as usize; let chunks = num_bits / 5; let num_scalars = 10; @@ -55,14 +55,14 @@ fn varbase_mul_test() { let start = Instant::now(); for i in 0..num_scalars { let x = F::rand(rng); - let bits_lsb: Vec<_> = BitIteratorLE::new(x.into_repr()).take(num_bits).collect(); - let x_ = ::ScalarField::from_repr( + let bits_lsb: Vec<_> = BitIteratorLE::new(x.into_bigint()).take(num_bits).collect(); + let x_ = ::ScalarField::from_bigint( ::BigInt::from_bits_le(&bits_lsb[..]), ) .unwrap(); - let base = Other::prime_subgroup_generator(); - let g = Other::prime_subgroup_generator().into_projective(); + let base = Other::generator(); + let g = Other::generator().into_group(); let acc = (g + g).into_affine(); let acc = (acc.x, acc.y); @@ -76,12 +76,12 @@ fn varbase_mul_test() { acc, ); - let shift = ::ScalarField::from(2).pow([(bits_msb.len()) as u64]); + let shift = ::ScalarField::from(2).pow([(bits_msb.len()) as u64]); let expected = g - .mul((::ScalarField::one() + shift + x_.double()).into_repr()) + .mul(&(::ScalarField::one() + shift + x_.double())) .into_affine(); - assert_eq!(x_.into_repr(), res.n.into_repr()); + assert_eq!(x_.into_bigint(), res.n.into_bigint()); assert_eq!((expected.x, expected.y), res.acc); } println!( diff --git a/kimchi/src/tests/xor.rs b/kimchi/src/tests/xor.rs index 0a03bbf988..940e2413a1 100644 --- a/kimchi/src/tests/xor.rs +++ b/kimchi/src/tests/xor.rs @@ -11,7 +11,7 @@ use crate::{ curve::KimchiCurve, prover_index::ProverIndex, }; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::{Field, One, PrimeField, Zero}; use ark_poly::EvaluationDomain; use mina_curves::pasta::{Fp, Pallas, Vesta, VestaParameters}; @@ -28,7 +28,7 @@ use poly_commitment::{ use super::framework::TestFramework; -type PallasField = ::BaseField; +type PallasField = ::BaseField; type SpongeParams = PlonkSpongeConstantsKimchi; type VestaBaseSponge = DefaultFqSponge; type VestaScalarSponge = DefaultFrSponge; diff --git a/kimchi/src/verifier.rs b/kimchi/src/verifier.rs index 697514ab88..09a975f145 100644 --- a/kimchi/src/verifier.rs +++ b/kimchi/src/verifier.rs @@ -19,7 +19,7 @@ use crate::{ proof::{PointEvaluations, ProofEvaluations, ProverProof, RecursionChallenge}, verifier_index::VerifierIndex, }; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::{Field, One, PrimeField, Zero}; use ark_poly::{univariate::DensePolynomial, EvaluationDomain, Polynomial}; use mina_poseidon::{sponge::ScalarChallenge, FqSponge}; @@ -762,7 +762,7 @@ where fn to_batch<'a, G, EFqSponge, EFrSponge, OpeningProof: OpenProof>( verifier_index: &VerifierIndex, proof: &'a ProverProof, - public_input: &'a [::ScalarField], + public_input: &'a [::ScalarField], ) -> Result> where G: KimchiCurve, diff --git a/poly-commitment/src/chunked.rs b/poly-commitment/src/chunked.rs index cc70c5bb75..0538285979 100644 --- a/poly-commitment/src/chunked.rs +++ b/poly-commitment/src/chunked.rs @@ -1,5 +1,6 @@ -use ark_ec::ProjectiveCurve; +use ark_ec::CurveGroup; use ark_ff::{Field, Zero}; +use std::ops::AddAssign; use crate::{commitment::CommitmentCurve, PolyComm}; @@ -10,13 +11,13 @@ where /// Multiplies each commitment chunk of f with powers of zeta^n // TODO(mimoo): better name for this function pub fn chunk_commitment(&self, zeta_n: C::ScalarField) -> Self { - let mut res = C::Projective::zero(); + let mut res = C::Group::zero(); // use Horner's to compute chunk[0] + z^n chunk[1] + z^2n chunk[2] + ... // as ( chunk[-1] * z^n + chunk[-2] ) * z^n + chunk[-3] // (https://en.wikipedia.org/wiki/Horner%27s_method) for chunk in self.elems.iter().rev() { res *= zeta_n; - res.add_assign_mixed(chunk); + res.add_assign(chunk); } PolyComm { diff --git a/poly-commitment/src/combine.rs b/poly-commitment/src/combine.rs index 52f7e19f95..521e4739dc 100644 --- a/poly-commitment/src/combine.rs +++ b/poly-commitment/src/combine.rs @@ -16,15 +16,16 @@ //! such a scratch array within each algorithm. use ark_ec::{ - models::short_weierstrass_jacobian::GroupAffine as SWJAffine, AffineCurve, ProjectiveCurve, - SWModelParameters, + models::short_weierstrass::Affine as SWJAffine, short_weierstrass::SWCurveConfig, AffineRepr, + CurveGroup, Group, }; use ark_ff::{BitIteratorBE, Field, One, PrimeField, Zero}; use itertools::Itertools; use mina_poseidon::sponge::ScalarChallenge; use rayon::prelude::*; +use std::ops::AddAssign; -fn add_pairs_in_place(pairs: &mut Vec>) { +fn add_pairs_in_place(pairs: &mut Vec>) { let len = if pairs.len() % 2 == 0 { pairs.len() } else { @@ -86,7 +87,7 @@ fn add_pairs_in_place(pairs: &mut Vec>) { /// assuming that for each `i`, `v0[i].x != v1[i].x` so we can use the ordinary /// addition formula and don't have to handle the edge cases of doubling and /// hitting the point at infinity. -fn batch_add_assign_no_branch( +fn batch_add_assign_no_branch( denominators: &mut [P::BaseField], v0: &mut [SWJAffine

], v1: &[SWJAffine

], @@ -117,7 +118,7 @@ fn batch_add_assign_no_branch( } /// Given arrays of curve points `v0` and `v1` do `v0[i] += v1[i]` for each i. -pub fn batch_add_assign( +pub fn batch_add_assign( denominators: &mut [P::BaseField], v0: &mut [SWJAffine

], v1: &[SWJAffine

], @@ -168,7 +169,7 @@ pub fn batch_add_assign( }); } -fn affine_window_combine_base( +fn affine_window_combine_base( g1: &[SWJAffine

], g2: &[SWJAffine

], x1: P::ScalarField, @@ -190,8 +191,8 @@ fn affine_window_combine_base( }; assert!(g1g2.len() == g1.len()); - let windows1 = BitIteratorBE::new(x1.into_repr()).tuples(); - let windows2 = BitIteratorBE::new(x2.into_repr()).tuples(); + let windows1 = BitIteratorBE::new(x1.into_bigint()).tuples(); + let windows2 = BitIteratorBE::new(x2.into_bigint()).tuples(); let mut points = vec![SWJAffine::

::zero(); g1.len()]; @@ -275,11 +276,11 @@ fn affine_window_combine_base( points } -fn batch_endo_in_place(endo_coeff: P::BaseField, ps: &mut [SWJAffine

]) { +fn batch_endo_in_place(endo_coeff: P::BaseField, ps: &mut [SWJAffine

]) { ps.par_iter_mut().for_each(|p| p.x *= endo_coeff); } -fn batch_negate_in_place(ps: &mut [SWJAffine

]) { +fn batch_negate_in_place(ps: &mut [SWJAffine

]) { ps.par_iter_mut().for_each(|p| { p.y = -p.y; }); @@ -287,7 +288,7 @@ fn batch_negate_in_place(ps: &mut [SWJAffine

]) { /// Uses a batch version of Algorithm 1 of https://eprint.iacr.org/2019/1021.pdf (on page 19) to /// compute `g1 + g2.scale(chal.to_field(endo_coeff))` -fn affine_window_combine_one_endo_base( +fn affine_window_combine_one_endo_base( endo_coeff: P::BaseField, g1: &[SWJAffine

], g2: &[SWJAffine

], @@ -304,7 +305,7 @@ fn affine_window_combine_one_endo_base( (limbs_lsb[limb as usize] >> j) & 1 } - let rep = chal.0.into_repr(); + let rep = chal.0.into_bigint(); let r = rep.as_ref(); let mut denominators = vec![P::BaseField::zero(); g1.len()]; @@ -340,7 +341,7 @@ fn affine_window_combine_one_endo_base( } /// Double an array of curve points in-place. -fn batch_double_in_place( +fn batch_double_in_place( denominators: &mut Vec, points: &mut [SWJAffine

], ) { @@ -366,12 +367,12 @@ fn batch_double_in_place( }); } -fn affine_window_combine_one_base( +fn affine_window_combine_one_base( g1: &[SWJAffine

], g2: &[SWJAffine

], x2: P::ScalarField, ) -> Vec> { - let windows2 = BitIteratorBE::new(x2.into_repr()).tuples(); + let windows2 = BitIteratorBE::new(x2.into_bigint()).tuples(); let mut points = vec![SWJAffine::

::zero(); g1.len()]; @@ -412,7 +413,7 @@ fn affine_window_combine_one_base( points } -pub fn affine_window_combine( +pub fn affine_window_combine( g1: &[SWJAffine

], g2: &[SWJAffine

], x1: P::ScalarField, @@ -431,7 +432,7 @@ pub fn affine_window_combine( /// `g1[i] + g2[i].scale(chal.to_field(endo_coeff))` /// /// Internally, it uses the curve endomorphism to speed up this operation. -pub fn affine_window_combine_one_endo( +pub fn affine_window_combine_one_endo( endo_coeff: P::BaseField, g1: &[SWJAffine

], g2: &[SWJAffine

], @@ -445,7 +446,7 @@ pub fn affine_window_combine_one_endo( .collect(); v.concat() } -pub fn affine_window_combine_one( +pub fn affine_window_combine_one( g1: &[SWJAffine

], g2: &[SWJAffine

], x2: P::ScalarField, @@ -459,24 +460,23 @@ pub fn affine_window_combine_one( v.concat() } -pub fn window_combine( +pub fn window_combine( g_lo: &[G], g_hi: &[G], x_lo: G::ScalarField, x_hi: G::ScalarField, ) -> Vec { - let mut g_proj: Vec = { + let mut g_proj: Vec = { let pairs: Vec<_> = g_lo.iter().zip(g_hi).collect(); pairs .into_par_iter() .map(|(lo, hi)| window_shamir::(x_lo, *lo, x_hi, *hi)) .collect() }; - G::Projective::batch_normalization(g_proj.as_mut_slice()); - g_proj.par_iter().map(|g| g.into_affine()).collect() + G::Group::normalize_batch(g_proj.as_mut_slice()) } -pub fn affine_shamir_window_table( +pub fn affine_shamir_window_table( denominators: &mut [P::BaseField], g1: &[SWJAffine

], g2: &[SWJAffine

], @@ -555,7 +555,7 @@ pub fn affine_shamir_window_table( res } -pub fn affine_shamir_window_table_one( +pub fn affine_shamir_window_table_one( denominators: &mut [P::BaseField], g1: &[SWJAffine

], ) -> [Vec>; 3] { @@ -585,118 +585,113 @@ pub fn affine_shamir_window_table_one( res } -fn window_shamir( - x1: G::ScalarField, - g1: G, - x2: G::ScalarField, - g2: G, -) -> G::Projective { +fn window_shamir(x1: G::ScalarField, g1: G, x2: G::ScalarField, g2: G) -> G::Group { let [_g00_00, g01_00, g10_00, g11_00, g00_01, g01_01, g10_01, g11_01, g00_10, g01_10, g10_10, g11_10, g00_11, g01_11, g10_11, g11_11] = shamir_window_table(g1, g2); - let windows1 = BitIteratorBE::new(x1.into_repr()).tuples(); - let windows2 = BitIteratorBE::new(x2.into_repr()).tuples(); + let windows1 = BitIteratorBE::new(x1.into_bigint()).tuples(); + let windows2 = BitIteratorBE::new(x2.into_bigint()).tuples(); - let mut res = G::Projective::zero(); + let mut res = G::Group::zero(); for ((hi_1, lo_1), (hi_2, lo_2)) in windows1.zip(windows2) { res.double_in_place(); res.double_in_place(); match ((hi_1, lo_1), (hi_2, lo_2)) { ((false, false), (false, false)) => (), - ((false, true), (false, false)) => res.add_assign_mixed(&g01_00), - ((true, false), (false, false)) => res.add_assign_mixed(&g10_00), - ((true, true), (false, false)) => res.add_assign_mixed(&g11_00), - - ((false, false), (false, true)) => res.add_assign_mixed(&g00_01), - ((false, true), (false, true)) => res.add_assign_mixed(&g01_01), - ((true, false), (false, true)) => res.add_assign_mixed(&g10_01), - ((true, true), (false, true)) => res.add_assign_mixed(&g11_01), - - ((false, false), (true, false)) => res.add_assign_mixed(&g00_10), - ((false, true), (true, false)) => res.add_assign_mixed(&g01_10), - ((true, false), (true, false)) => res.add_assign_mixed(&g10_10), - ((true, true), (true, false)) => res.add_assign_mixed(&g11_10), - - ((false, false), (true, true)) => res.add_assign_mixed(&g00_11), - ((false, true), (true, true)) => res.add_assign_mixed(&g01_11), - ((true, false), (true, true)) => res.add_assign_mixed(&g10_11), - ((true, true), (true, true)) => res.add_assign_mixed(&g11_11), + ((false, true), (false, false)) => res.add_assign(&g01_00), + ((true, false), (false, false)) => res.add_assign(&g10_00), + ((true, true), (false, false)) => res.add_assign(&g11_00), + + ((false, false), (false, true)) => res.add_assign(&g00_01), + ((false, true), (false, true)) => res.add_assign(&g01_01), + ((true, false), (false, true)) => res.add_assign(&g10_01), + ((true, true), (false, true)) => res.add_assign(&g11_01), + + ((false, false), (true, false)) => res.add_assign(&g00_10), + ((false, true), (true, false)) => res.add_assign(&g01_10), + ((true, false), (true, false)) => res.add_assign(&g10_10), + ((true, true), (true, false)) => res.add_assign(&g11_10), + + ((false, false), (true, true)) => res.add_assign(&g00_11), + ((false, true), (true, true)) => res.add_assign(&g01_11), + ((true, false), (true, true)) => res.add_assign(&g10_11), + ((true, true), (true, true)) => res.add_assign(&g11_11), } } res } -pub fn shamir_window_table(g1: G, g2: G) -> [G; 16] { - let g00_00 = G::prime_subgroup_generator().into_projective(); - let g01_00 = g1.into_projective(); +pub fn shamir_window_table(g1: G, g2: G) -> [G; 16] { + let g00_00 = G::generator().into_group(); + let g01_00 = g1.into_group(); let g10_00 = { let mut g = g01_00; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; let g11_00 = { let mut g = g10_00; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; - let g00_01 = g2.into_projective(); + let g00_01 = g2.into_group(); let g01_01 = { let mut g = g00_01; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; let g10_01 = { let mut g = g01_01; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; let g11_01 = { let mut g = g10_01; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; let g00_10 = { let mut g = g00_01; - g.add_assign_mixed(&g2); + g.add_assign(&g2); g }; let g01_10 = { let mut g = g00_10; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; let g10_10 = { let mut g = g01_10; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; let g11_10 = { let mut g = g10_10; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; let g00_11 = { let mut g = g00_10; - g.add_assign_mixed(&g2); + g.add_assign(&g2); g }; let g01_11 = { let mut g = g00_11; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; let g10_11 = { let mut g = g01_11; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; let g11_11 = { let mut g = g10_11; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; @@ -704,8 +699,7 @@ pub fn shamir_window_table(g1: G, g2: G) -> [G; 16] { g00_00, g01_00, g10_00, g11_00, g00_01, g01_01, g10_01, g11_01, g00_10, g01_10, g10_10, g11_10, g00_11, g01_11, g10_11, g11_11, ]; - G::Projective::batch_normalization(v.as_mut_slice()); - let v: Vec<_> = v.iter().map(|x| x.into_affine()).collect(); + let v: Vec<_> = G::Group::normalize_batch(v.as_mut_slice()); [ v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7], v[8], v[9], v[10], v[11], v[12], v[13], v[14], v[15], diff --git a/poly-commitment/src/commitment.rs b/poly-commitment/src/commitment.rs index e4428cf23e..41c12c96dd 100644 --- a/poly-commitment/src/commitment.rs +++ b/poly-commitment/src/commitment.rs @@ -12,17 +12,15 @@ use crate::{ SRS as SRSTrait, }; use ark_ec::{ - models::short_weierstrass_jacobian::GroupAffine as SWJAffine, msm::VariableBaseMSM, - AffineCurve, ProjectiveCurve, SWModelParameters, -}; -use ark_ff::{ - BigInteger, Field, FpParameters, One, PrimeField, SquareRootField, UniformRand, Zero, + models::short_weierstrass::Affine as SWJAffine, short_weierstrass::SWCurveConfig, AffineRepr, + CurveGroup, VariableBaseMSM, }; +use ark_ff::{BigInteger, Field, One, PrimeField, UniformRand, Zero}; use ark_poly::{ univariate::DensePolynomial, EvaluationDomain, Evaluations, Radix2EvaluationDomain as D, }; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; -use core::ops::{Add, Sub}; +use core::ops::{Add, AddAssign, Sub}; use groupmap::{BWParameters, GroupMap}; use mina_poseidon::{sponge::ScalarChallenge, FqSponge}; use o1_utils::{math, ExtendedDensePolynomial as _}; @@ -194,16 +192,16 @@ impl PolyComm { /// ``` /// /// in the other case. -pub fn shift_scalar(x: G::ScalarField) -> G::ScalarField +pub fn shift_scalar(x: G::ScalarField) -> G::ScalarField where G::BaseField: PrimeField, { - let n1 = ::Params::MODULUS; + let n1 = ::MODULUS; let n2 = ::BigInt::from_bits_le( - &::Params::MODULUS.to_bits_le()[..], + &::MODULUS.to_bits_le()[..], ); let two: G::ScalarField = (2u64).into(); - let two_pow = two.pow([::Params::MODULUS_BITS as u64]); + let two_pow = two.pow([::MODULUS_BIT_SIZE as u64]); if n1 < n2 { (x - (two_pow + G::ScalarField::one())) / two } else { @@ -211,7 +209,7 @@ where } } -impl<'a, 'b, C: AffineCurve> Add<&'a PolyComm> for &'b PolyComm { +impl<'a, 'b, C: AffineRepr> Add<&'a PolyComm> for &'b PolyComm { type Output = PolyComm; fn add(self, other: &'a PolyComm) -> PolyComm { @@ -220,7 +218,7 @@ impl<'a, 'b, C: AffineCurve> Add<&'a PolyComm> for &'b PolyComm { let n2 = other.elems.len(); for i in 0..std::cmp::max(n1, n2) { let pt = if i < n1 && i < n2 { - self.elems[i] + other.elems[i] + (self.elems[i] + other.elems[i]).into_affine() } else if i < n1 { self.elems[i] } else { @@ -232,7 +230,7 @@ impl<'a, 'b, C: AffineCurve> Add<&'a PolyComm> for &'b PolyComm { } } -impl<'a, 'b, C: AffineCurve> Sub<&'a PolyComm> for &'b PolyComm { +impl<'a, 'b, C: AffineRepr + Sub> Sub<&'a PolyComm> for &'b PolyComm { type Output = PolyComm; fn sub(self, other: &'a PolyComm) -> PolyComm { @@ -241,7 +239,7 @@ impl<'a, 'b, C: AffineCurve> Sub<&'a PolyComm> for &'b PolyComm { let n2 = other.elems.len(); for i in 0..std::cmp::max(n1, n2) { let pt = if i < n1 && i < n2 { - self.elems[i] + (-other.elems[i]) + (self.elems[i] - other.elems[i]).into_affine() } else if i < n1 { self.elems[i] } else { @@ -253,7 +251,7 @@ impl<'a, 'b, C: AffineCurve> Sub<&'a PolyComm> for &'b PolyComm { } } -impl PolyComm { +impl PolyComm { pub fn scale(&self, c: C::ScalarField) -> PolyComm { PolyComm { elems: self.elems.iter().map(|g| g.mul(c).into_affine()).collect(), @@ -273,7 +271,7 @@ impl PolyComm { return Self::new(vec![C::zero()]); } - let all_scalars: Vec<_> = elm.iter().map(|s| s.into_repr()).collect(); + let all_scalars: Vec<_> = elm.iter().map(|s| s.into_bigint()).collect(); let elems_size = Iterator::max(com.iter().map(|c| c.elems.len())).unwrap(); let mut elems = Vec::with_capacity(elems_size); @@ -286,10 +284,9 @@ impl PolyComm { .filter_map(|(com, scalar)| com.elems.get(chunk).map(|c| (c, scalar))) .unzip(); - let chunk_msm = VariableBaseMSM::multi_scalar_mul::(&points, &scalars); + let chunk_msm = C::Group::msm_bigint(&points, &scalars); elems.push(chunk_msm.into_affine()); } - Self::new(elems) } } @@ -343,41 +340,31 @@ pub fn pows(d: usize, x: F) -> Vec { res } -pub fn squeeze_prechallenge>( +pub fn squeeze_prechallenge>( sponge: &mut EFqSponge, ) -> ScalarChallenge { ScalarChallenge(sponge.challenge()) } -pub fn squeeze_challenge< - Fq: Field, - G, - Fr: PrimeField + SquareRootField, - EFqSponge: FqSponge, ->( +pub fn squeeze_challenge>( endo_r: &Fr, sponge: &mut EFqSponge, ) -> Fr { squeeze_prechallenge(sponge).to_field(endo_r) } -pub fn absorb_commitment< - Fq: Field, - G: Clone, - Fr: PrimeField + SquareRootField, - EFqSponge: FqSponge, ->( +pub fn absorb_commitment>( sponge: &mut EFqSponge, commitment: &PolyComm, ) { sponge.absorb_g(&commitment.elems); } -/// A useful trait extending AffineCurve for commitments. -/// Unfortunately, we can't specify that `AffineCurve`, +/// A useful trait extending AffineRepr for commitments. +/// Unfortunately, we can't specify that `AffineRepr`, /// so usage of this traits must manually bind `G::BaseField: PrimeField`. -pub trait CommitmentCurve: AffineCurve { - type Params: SWModelParameters; +pub trait CommitmentCurve: AffineRepr + Sub { + type Params: SWCurveConfig; type Map: GroupMap; fn to_coordinates(&self) -> Option<(Self::BaseField, Self::BaseField)>; @@ -385,7 +372,7 @@ pub trait CommitmentCurve: AffineCurve { } /// A trait extending CommitmentCurve for endomorphisms. -/// Unfortunately, we can't specify that `AffineCurve`, +/// Unfortunately, we can't specify that `AffineRepr`, /// so usage of this traits must manually bind `G::BaseField: PrimeField`. pub trait EndoCurve: CommitmentCurve { /// Combine where x1 = one @@ -414,7 +401,7 @@ pub trait EndoCurve: CommitmentCurve { } } -impl CommitmentCurve for SWJAffine

{ +impl CommitmentCurve for SWJAffine

{ type Params = P; type Map = BWParameters

; @@ -427,14 +414,11 @@ impl CommitmentCurve for SWJAffine

{ } fn of_coordinates(x: P::BaseField, y: P::BaseField) -> SWJAffine

{ - SWJAffine::

::new(x, y, false) + SWJAffine::

::new_unchecked(x, y) } } -impl EndoCurve for SWJAffine

-where - P::BaseField: PrimeField, -{ +impl EndoCurve for SWJAffine

{ fn combine_one(g1: &[Self], g2: &[Self], x2: Self::ScalarField) -> Vec { crate::combine::affine_window_combine_one(g1, g2, x2) } @@ -459,7 +443,7 @@ where } } -pub fn to_group(m: &G::Map, t: ::BaseField) -> G { +pub fn to_group(m: &G::Map, t: ::BaseField) -> G { let (x, y) = m.to_group(t); G::of_coordinates(x, y) } @@ -524,7 +508,7 @@ pub fn combined_inner_product( /// Contains the evaluation of a polynomial commitment at a set of points. pub struct Evaluation where - G: AffineCurve, + G: AffineRepr, { /// The commitment of the polynomial being evaluated. /// Note that PolyComm contains a vector of commitments, which handles the @@ -544,7 +528,7 @@ where // TODO: I think we should really change this name to something more correct pub struct BatchEvaluationProof<'a, G, EFqSponge, OpeningProof> where - G: AffineCurve, + G: AffineRepr, EFqSponge: FqSponge, { /// The sponge used to generate/absorb the challenges. @@ -704,7 +688,7 @@ where .ok_or_else(|| CommitmentError::BlindersDontMatch(blinders.len(), com.len()))? .map(|(g, b)| { let mut g_masked = self.h.mul(b); - g_masked.add_assign_mixed(&g); + g_masked.add_assign(&g); g_masked.into_affine() }); Ok(BlindedCommitment { @@ -725,7 +709,7 @@ where ) -> PolyComm { let is_zero = plnm.is_zero(); - let coeffs: Vec<_> = plnm.iter().map(|c| c.into_repr()).collect(); + let coeffs: Vec<_> = plnm.iter().map(|c| c.into_bigint()).collect(); // chunk while commiting let mut elems = vec![]; @@ -733,7 +717,7 @@ where elems.push(G::zero()); } else { coeffs.chunks(self.g.len()).for_each(|coeffs_chunk| { - let chunk = VariableBaseMSM::multi_scalar_mul(&self.g, coeffs_chunk); + let chunk = G::Group::msm_bigint(&self.g, coeffs_chunk); elems.push(chunk.into_affine()); }); } @@ -984,8 +968,8 @@ impl SRS { } // verify the equation - let scalars: Vec<_> = scalars.iter().map(|x| x.into_repr()).collect(); - VariableBaseMSM::multi_scalar_mul(&points, &scalars) == G::Projective::zero() + let scalars: Vec<_> = scalars.iter().map(|x| x.into_bigint()).collect(); + G::Group::msm_bigint(&points, &scalars) == G::Group::zero() } } @@ -1013,12 +997,12 @@ pub mod caml { impl From> for CamlPolyComm where - G: AffineCurve, + G: AffineRepr, CamlG: From, { fn from(polycomm: PolyComm) -> Self { Self { - unshifted: polycomm.elems.into_iter().map(Into::into).collect(), + unshifted: polycomm.elems.into_iter().map(CamlG::from).collect(), shifted: None, } } @@ -1026,12 +1010,12 @@ pub mod caml { impl<'a, G, CamlG> From<&'a PolyComm> for CamlPolyComm where - G: AffineCurve, + G: AffineRepr, CamlG: From + From<&'a G>, { fn from(polycomm: &'a PolyComm) -> Self { Self { - unshifted: polycomm.elems.iter().map(Into::into).collect(), + unshifted: polycomm.elems.iter().map(Into::::into).collect(), shifted: None, } } @@ -1039,7 +1023,7 @@ pub mod caml { impl From> for PolyComm where - G: AffineCurve + From, + G: AffineRepr + From, { fn from(camlpolycomm: CamlPolyComm) -> PolyComm { assert!( @@ -1047,14 +1031,18 @@ pub mod caml { "mina#14628: Shifted commitments are deprecated and must not be used" ); PolyComm { - elems: camlpolycomm.unshifted.into_iter().map(Into::into).collect(), + elems: camlpolycomm + .unshifted + .into_iter() + .map(Into::::into) + .collect(), } } } impl<'a, G, CamlG> From<&'a CamlPolyComm> for PolyComm where - G: AffineCurve + From<&'a CamlG> + From, + G: AffineRepr + From<&'a CamlG> + From, { fn from(camlpolycomm: &'a CamlPolyComm) -> PolyComm { assert!( @@ -1082,7 +1070,7 @@ pub mod caml { impl From> for CamlOpeningProof where - G: AffineCurve, + G: AffineRepr, CamlG: From, CamlF: From, { @@ -1091,19 +1079,19 @@ pub mod caml { lr: opening_proof .lr .into_iter() - .map(|(g1, g2)| (g1.into(), g2.into())) + .map(|(g1, g2)| (CamlG::from(g1), CamlG::from(g2))) .collect(), - delta: opening_proof.delta.into(), + delta: CamlG::from(opening_proof.delta), z1: opening_proof.z1.into(), z2: opening_proof.z2.into(), - sg: opening_proof.sg.into(), + sg: CamlG::from(opening_proof.sg), } } } impl From> for OpeningProof where - G: AffineCurve, + G: AffineRepr, CamlG: Into, CamlF: Into, { diff --git a/poly-commitment/src/evaluation_proof.rs b/poly-commitment/src/evaluation_proof.rs index 82aced647b..05b97fa2cc 100644 --- a/poly-commitment/src/evaluation_proof.rs +++ b/poly-commitment/src/evaluation_proof.rs @@ -8,9 +8,9 @@ use crate::{ srs::{endos, SRS}, PolynomialsToCombine, SRS as _, }; -use ark_ec::{msm::VariableBaseMSM, AffineCurve, ProjectiveCurve}; +use ark_ec::{AffineRepr, CurveGroup, VariableBaseMSM}; use ark_ff::{FftField, Field, One, PrimeField, UniformRand, Zero}; -use ark_poly::{univariate::DensePolynomial, EvaluationDomain, Evaluations, UVPolynomial}; +use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial, EvaluationDomain, Evaluations}; use mina_poseidon::{sponge::ScalarChallenge, FqSponge}; use o1_utils::{math, ExtendedDensePolynomial}; use rand_core::{CryptoRng, RngCore}; @@ -247,22 +247,22 @@ impl SRS { let rand_r = ::rand(rng); // Pedersen commitment to a_lo,rand_l, - let l = VariableBaseMSM::multi_scalar_mul( + let l = G::Group::msm_bigint( &[g_lo, &[self.h, u]].concat(), &[a_hi, &[rand_l, inner_prod(a_hi, b_lo)]] .concat() .iter() - .map(|x| x.into_repr()) + .map(|x| x.into_bigint()) .collect::>(), ) .into_affine(); - let r = VariableBaseMSM::multi_scalar_mul( + let r = G::Group::msm_bigint( &[g_hi, &[self.h, u]].concat(), &[a_lo, &[rand_r, inner_prod(a_lo, b_hi)]] .concat() .iter() - .map(|x| x.into_repr()) + .map(|x| x.into_bigint()) .collect::>(), ) .into_affine(); @@ -334,9 +334,8 @@ impl SRS { let r_delta = ::rand(rng); // delta = (g0 + u*b0)*d + h*r_delta - let delta = ((g0.into_projective() + (u.mul(b0))).into_affine().mul(d) - + self.h.mul(r_delta)) - .into_affine(); + let delta = ((g0.into_group() + (u.mul(b0))).into_affine().mul(d) + self.h.mul(r_delta)) + .into_affine(); sponge.absorb_g(&[delta]); let c = ScalarChallenge(sponge.challenge()).to_field(&endo_r); @@ -396,7 +395,7 @@ impl SRS { #[serde_as] #[derive(Clone, Debug, Serialize, Deserialize, Default)] #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] -pub struct OpeningProof { +pub struct OpeningProof { /// Vector of rounds of L & R commitments #[serde_as(as = "Vec<(o1_utils::serialization::SerdeAs, o1_utils::serialization::SerdeAs)>")] pub lr: Vec<(G, G)>, @@ -411,26 +410,24 @@ pub struct OpeningProof { pub sg: G, } -impl< - BaseField: PrimeField, - G: AffineCurve + CommitmentCurve + EndoCurve, - > crate::OpenProof for OpeningProof +impl + CommitmentCurve + EndoCurve> + crate::OpenProof for OpeningProof { type SRS = SRS; - fn open::ScalarField>>( + fn open::ScalarField>>( srs: &Self::SRS, group_map: &::Map, plnms: PolynomialsToCombine, - elm: &[::ScalarField], // vector of evaluation points - polyscale: ::ScalarField, // scaling factor for polynoms - evalscale: ::ScalarField, // scaling factor for evaluation point powers - sponge: EFqSponge, // sponge + elm: &[::ScalarField], // vector of evaluation points + polyscale: ::ScalarField, // scaling factor for polynoms + evalscale: ::ScalarField, // scaling factor for evaluation point powers + sponge: EFqSponge, // sponge rng: &mut RNG, ) -> Self where EFqSponge: - Clone + FqSponge<::BaseField, G, ::ScalarField>, + Clone + FqSponge<::BaseField, G, ::ScalarField>, RNG: RngCore + CryptoRng, { srs.open(group_map, plnms, elm, polyscale, evalscale, sponge, rng) @@ -443,7 +440,7 @@ impl< rng: &mut RNG, ) -> bool where - EFqSponge: FqSponge, + EFqSponge: FqSponge<::BaseField, G, ::ScalarField>, RNG: RngCore + CryptoRng, { srs.verify(group_map, batch, rng) @@ -456,7 +453,7 @@ pub struct Challenges { pub chal_inv: Vec, } -impl OpeningProof { +impl OpeningProof { /// Computes a log-sized vector of scalar challenges for /// recombining elements inside the IPA. pub fn prechallenges>( diff --git a/poly-commitment/src/kzg.rs b/poly-commitment/src/kzg.rs index d21573fcab..7fa822726d 100644 --- a/poly-commitment/src/kzg.rs +++ b/poly-commitment/src/kzg.rs @@ -6,7 +6,7 @@ //! The protocol requires a structured reference string (SRS) that contains //! powers of a generator of a group, and a pairing friendly curve. //! -//! The pairing friendly curve requirement is hidden in the PairingEngine trait +//! The pairing friendly curve requirement is hidden in the Pairing trait //! parameter. use crate::{ @@ -14,40 +14,41 @@ use crate::{ PolynomialsToCombine, SRS as SRSTrait, }; -use ark_ec::{msm::VariableBaseMSM, AffineCurve, PairingEngine}; +use ark_ec::{pairing::Pairing, AffineRepr, VariableBaseMSM}; use ark_ff::{One, PrimeField, Zero}; use ark_poly::{ univariate::{DenseOrSparsePolynomial, DensePolynomial}, - EvaluationDomain, Evaluations, Polynomial, Radix2EvaluationDomain as D, UVPolynomial, + DenseUVPolynomial, EvaluationDomain, Evaluations, Polynomial, Radix2EvaluationDomain as D, }; use mina_poseidon::FqSponge; use rand_core::{CryptoRng, RngCore}; use serde::{Deserialize, Serialize}; use serde_with::serde_as; +use std::ops::Neg; #[serde_as] #[derive(Debug, Serialize, Deserialize)] #[serde( bound = "Pair::G1Affine: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize" )] -pub struct KZGProof { +pub struct KZGProof { #[serde_as(as = "o1_utils::serialization::SerdeAs")] pub quotient: Pair::G1Affine, #[serde_as(as = "o1_utils::serialization::SerdeAs")] /// A blinding factor used to hide the polynomial, if necessary - pub blinding: ::ScalarField, + pub blinding: ::ScalarField, } -impl Default for KZGProof { +impl Default for KZGProof { fn default() -> Self { Self { - quotient: Pair::G1Affine::prime_subgroup_generator(), - blinding: ::ScalarField::zero(), + quotient: Pair::G1Affine::generator(), + blinding: ::ScalarField::zero(), } } } -impl Clone for KZGProof { +impl Clone for KZGProof { fn clone(&self) -> Self { Self { quotient: self.quotient, @@ -62,7 +63,7 @@ impl Clone for KZGProof { /// /// The SRS is formed using what we call a "trusted setup". For now, the setup /// is created using the method `create_trusted_setup`. -pub struct PairingSRS { +pub struct PairingSRS { /// The full SRS is the one used by the prover. Can be seen as the "proving /// key"/"secret key" pub full_srs: SRS, @@ -71,7 +72,7 @@ pub struct PairingSRS { pub verifier_srs: SRS, } -impl Default for PairingSRS { +impl Default for PairingSRS { fn default() -> Self { Self { full_srs: SRS::default(), @@ -80,7 +81,7 @@ impl Default for PairingSRS { } } -impl Clone for PairingSRS { +impl Clone for PairingSRS { fn clone(&self) -> Self { Self { full_srs: self.full_srs.clone(), @@ -93,7 +94,7 @@ impl< F: PrimeField, G: CommitmentCurve, G2: CommitmentCurve, - Pair: PairingEngine, + Pair: Pairing, > PairingSRS { /// Create a new SRS for the KZG protocol. @@ -115,7 +116,7 @@ impl< F: PrimeField, G: CommitmentCurve, G2: CommitmentCurve, - Pair: PairingEngine, + Pair: Pairing, > crate::OpenProof for KZGProof { type SRS = PairingSRS; @@ -132,14 +133,14 @@ impl< srs: &Self::SRS, _group_map: &::Map, plnms: PolynomialsToCombine, - elm: &[::ScalarField], - polyscale: ::ScalarField, - _evalscale: ::ScalarField, + elm: &[::ScalarField], + polyscale: ::ScalarField, + _evalscale: ::ScalarField, _sponge: EFqSponge, _rng: &mut RNG, ) -> Self where - EFqSponge: Clone + FqSponge<::BaseField, G, F>, + EFqSponge: Clone + FqSponge<::BaseField, G, F>, RNG: RngCore + CryptoRng, { KZGProof::create(srs, plnms, elm, polyscale).unwrap() @@ -177,7 +178,7 @@ impl< F: PrimeField, G: CommitmentCurve, G2: CommitmentCurve, - Pair: PairingEngine, + Pair: Pairing, > SRSTrait for PairingSRS { fn max_poly_size(&self) -> usize { @@ -320,7 +321,7 @@ impl< F: PrimeField, G: CommitmentCurve, G2: CommitmentCurve, - Pair: PairingEngine, + Pair: Pairing, > KZGProof { /// Create a KZG proof. @@ -372,7 +373,7 @@ impl< polyscale: F, // scaling factor for polynoms elm: &[F], // vector of evaluation points ) -> bool { - let poly_commitment = { + let poly_commitment: G::Group = { let mut scalars: Vec = Vec::new(); let mut points = Vec::new(); combine_commitments( @@ -382,9 +383,9 @@ impl< polyscale, F::one(), /* TODO: This is inefficient */ ); - let scalars: Vec<_> = scalars.iter().map(|x| x.into_repr()).collect(); + let scalars: Vec<_> = scalars.iter().map(|x| x.into_bigint()).collect(); - VariableBaseMSM::multi_scalar_mul(&points, &scalars) + G::Group::msm_bigint(&points, &scalars) }; // IMPROVEME: we could have a single flat array for all evaluations, see @@ -401,25 +402,35 @@ impl< .full_srs .commit_non_hiding(&eval_polynomial(elm, &evals), 1) .elems[0] - .into_projective(); + .into_group(); let numerator_commitment = { poly_commitment - eval_commitment - blinding_commitment }; // We compute the result of the multiplication of two miller loop, // to apply only one final exponentation - let to_loop = [ - ( - ark_ec::prepare_g1::(numerator_commitment), - ark_ec::prepare_g2::(Pair::G2Affine::prime_subgroup_generator()), - ), - ( - // Note that we do a neagtion here, to put everything on the same side - (self.quotient).neg().into(), - ark_ec::prepare_g2::(divisor_commitment), - ), + let to_loop_left = [ + ark_ec::pairing::prepare_g1::(numerator_commitment), + // Note that we do a neagtion here, to put everything on the same side + ark_ec::pairing::prepare_g1::(self.quotient.into_group().neg()), + ]; + let to_loop_right = [ + ark_ec::pairing::prepare_g2::(Pair::G2Affine::generator()), + ark_ec::pairing::prepare_g2::(divisor_commitment), ]; // the result here is numerator_commitment * 1 - quotient * divisor_commitment // Note that the unwrap cannot fail as the output of a miller loop is non zero - let res = Pair::final_exponentiation(&(Pair::miller_loop(&to_loop))).unwrap(); - - res == Pair::Fqk::one() + let res = Pair::final_exponentiation(Pair::multi_miller_loop(to_loop_left, to_loop_right)) + .unwrap(); + + res.0 == Pair::TargetField::one() + + // @VOLHOVM remove this if not necessary + // let numerator_commitment_proj: ::Group = + // { poly_commitment - eval_commitment - blinding_commitment }; + // let numerator_commitment_affine: Pair::G1Affine = From::from(numerator_commitment_proj); + // + // let numerator = Pair::pairing(numerator_commitment_affine, Pair::G2Affine::generator()); + // let scaled_quotient = Pair::pairing(self.quotient, divisor_commitment); + // numerator == scaled_quotient + // } + //} } } diff --git a/poly-commitment/src/lib.rs b/poly-commitment/src/lib.rs index 9ca1165d3a..915861dca8 100644 --- a/poly-commitment/src/lib.rs +++ b/poly-commitment/src/lib.rs @@ -13,7 +13,7 @@ use crate::{ error::CommitmentError, evaluation_proof::DensePolynomialOrEvaluations, }; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::UniformRand; use ark_poly::{ univariate::DensePolynomial, EvaluationDomain, Evaluations, Radix2EvaluationDomain as D, @@ -164,19 +164,19 @@ pub trait OpenProof: Sized + Clone { /// - `sponge`: Sponge used to coin and absorb values /// - `rng`: The RNG to use to generate random elements in the open #[allow(clippy::too_many_arguments)] - fn open::ScalarField>>( + fn open::ScalarField>>( srs: &Self::SRS, group_map: &::Map, plnms: PolynomialsToCombine, - elm: &[::ScalarField], - polyscale: ::ScalarField, - evalscale: ::ScalarField, + elm: &[::ScalarField], + polyscale: ::ScalarField, + evalscale: ::ScalarField, sponge: EFqSponge, // sponge rng: &mut RNG, ) -> Self where EFqSponge: - Clone + FqSponge<::BaseField, G, ::ScalarField>, + Clone + FqSponge<::BaseField, G, ::ScalarField>, RNG: RngCore + CryptoRng; fn verify( diff --git a/poly-commitment/src/srs.rs b/poly-commitment/src/srs.rs index 93827f90a5..6b1388094d 100644 --- a/poly-commitment/src/srs.rs +++ b/poly-commitment/src/srs.rs @@ -2,7 +2,7 @@ //! String (SRS). use crate::{commitment::CommitmentCurve, PolyComm}; -use ark_ec::{AffineCurve, ProjectiveCurve}; +use ark_ec::{AffineRepr, CurveGroup}; use ark_ff::{BigInteger, Field, One, PrimeField, Zero}; use ark_poly::{EvaluationDomain, Radix2EvaluationDomain as D}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; @@ -49,10 +49,10 @@ where let endo_q: G::BaseField = mina_poseidon::sponge::endo_coefficient(); let endo_r = { let potential_endo_r: G::ScalarField = mina_poseidon::sponge::endo_coefficient(); - let t = G::prime_subgroup_generator(); + let t = G::generator(); let (x, y) = t.to_coordinates().unwrap(); let phi_t = G::of_coordinates(x * endo_q, y); - if t.mul(potential_endo_r) == phi_t.into_projective() { + if t.mul(potential_endo_r) == phi_t.into_group() { potential_endo_r } else { potential_endo_r * potential_endo_r @@ -82,12 +82,13 @@ where let n = <::BasePrimeField as PrimeField>::BigInt::from_bits_be(&bits); - let t = <::BasePrimeField as PrimeField>::from_repr(n) + let t = <::BasePrimeField as PrimeField>::from_bigint(n) .expect("packing code has a bug"); base_fields.push(t) } let t = G::BaseField::from_base_prime_field_elems(&base_fields).unwrap(); + let (x, y) = map.to_group(t); G::of_coordinates(x, y).mul_by_cofactor() } @@ -190,24 +191,22 @@ impl SRS { // For each chunk for i in 0..num_elems { // Initialize the vector with zero curve points - let mut lg: Vec<::Projective> = - vec![::Projective::zero(); n]; + let mut lg: Vec<::Group> = vec![::Group::zero(); n]; // Overwrite the terms corresponding to that chunk with the SRS curve points let start_offset = i * srs_size; let num_terms = min((i + 1) * srs_size, n) - start_offset; for j in 0..num_terms { - lg[start_offset + j] = self.g[j].into_projective() + lg[start_offset + j] = self.g[j].into_group() } // Apply the IFFT domain.ifft_in_place(&mut lg); - ::Projective::batch_normalization(lg.as_mut_slice()); // Append the 'partial Langrange polynomials' to the vector of elems chunks - elems.push(lg) + elems.push(::Group::normalize_batch(lg.as_mut_slice())); } let chunked_commitments: Vec<_> = (0..n) .map(|i| PolyComm { - elems: elems.iter().map(|v| v[i].into_affine()).collect(), + elems: elems.iter().map(|v| v[i]).collect(), }) .collect(); self.lagrange_bases.insert(n, chunked_commitments); @@ -226,7 +225,7 @@ impl SRS { let mut x_pow = G::ScalarField::one(); let g: Vec<_> = (0..depth) .map(|_| { - let res = G::prime_subgroup_generator().mul(x_pow); + let res = G::generator().mul(x_pow); x_pow *= x; res.into_affine() }) diff --git a/poly-commitment/tests/batch_15_wires.rs b/poly-commitment/tests/batch_15_wires.rs index 6313f6fb43..262f8c1f6b 100644 --- a/poly-commitment/tests/batch_15_wires.rs +++ b/poly-commitment/tests/batch_15_wires.rs @@ -2,7 +2,7 @@ //! verification of a batch of batched opening proofs of polynomial commitments use ark_ff::{UniformRand, Zero}; -use ark_poly::{univariate::DensePolynomial, Radix2EvaluationDomain, UVPolynomial}; +use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial, Radix2EvaluationDomain}; use colored::Colorize; use groupmap::GroupMap; use mina_curves::pasta::{Fp, Vesta, VestaParameters}; diff --git a/poly-commitment/tests/commitment.rs b/poly-commitment/tests/commitment.rs index 247d6da6a8..b6ee5841b6 100644 --- a/poly-commitment/tests/commitment.rs +++ b/poly-commitment/tests/commitment.rs @@ -1,5 +1,5 @@ use ark_ff::{UniformRand, Zero}; -use ark_poly::{univariate::DensePolynomial, Radix2EvaluationDomain, UVPolynomial}; +use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial, Radix2EvaluationDomain}; use colored::Colorize; use groupmap::GroupMap; use mina_curves::pasta::{Fp, Vesta, VestaParameters}; diff --git a/poly-commitment/tests/ipa_commitment.rs b/poly-commitment/tests/ipa_commitment.rs index 7077c937bb..2140613590 100644 --- a/poly-commitment/tests/ipa_commitment.rs +++ b/poly-commitment/tests/ipa_commitment.rs @@ -1,7 +1,8 @@ +use ark_ec::AffineRepr; use ark_ff::{One, UniformRand, Zero}; use ark_poly::{ - univariate::DensePolynomial, EvaluationDomain, Evaluations, Polynomial, - Radix2EvaluationDomain as D, Radix2EvaluationDomain, UVPolynomial, + univariate::DensePolynomial, DenseUVPolynomial, EvaluationDomain, Evaluations, Polynomial, + Radix2EvaluationDomain as D, Radix2EvaluationDomain, }; use groupmap::GroupMap; use mina_curves::pasta::{Fp, Vesta as VestaG}; diff --git a/poly-commitment/tests/kzg.rs b/poly-commitment/tests/kzg.rs index 2baeb175f7..8f08f051f5 100644 --- a/poly-commitment/tests/kzg.rs +++ b/poly-commitment/tests/kzg.rs @@ -1,9 +1,9 @@ -use ark_bn254::{Fr as ScalarField, G1Affine as G1, G2Affine as G2, Parameters}; -use ark_ec::bn::Bn; -use ark_ff::{UniformRand, Zero}; +use ark_bn254::{Config, Fr as ScalarField, G1Affine as G1, G2Affine as G2}; +use ark_ec::{bn::Bn, AffineRepr}; +use ark_ff::UniformRand; use ark_poly::{ - univariate::DensePolynomial, EvaluationDomain, Polynomial, Radix2EvaluationDomain as D, - UVPolynomial, + univariate::DensePolynomial, DenseUVPolynomial, EvaluationDomain, Polynomial, + Radix2EvaluationDomain as D, }; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use poly_commitment::{ @@ -74,7 +74,7 @@ fn test_kzg_proof() { let polyscale = ScalarField::rand(&mut rng); - let kzg_proof = KZGProof::>::create( + let kzg_proof = KZGProof::>::create( &srs, polynomials_and_blinders.as_slice(), &evaluation_points, @@ -89,8 +89,8 @@ fn test_kzg_proof() { /// Our points in G2 are not actually in the correct subgroup and serialize well. #[test] fn check_srs_g2_valid_and_serializes() { - type BN254 = ark_ec::bn::Bn; - type BN254G2BaseField = ::Fqe; + type BN254 = Bn; + type BN254G2BaseField = ::BaseField; type Fp = ark_bn254::Fr; let mut rng = o1_utils::tests::make_test_rng(None); @@ -112,9 +112,10 @@ fn check_srs_g2_valid_and_serializes() { // Check it serializes well let actual_y: BN254G2BaseField = actual.y; - let res = actual_y.serialize(vec.as_mut_slice()); + let res = actual_y.serialize_compressed(vec.as_mut_slice()); assert!(res.is_ok()); - let expected: BN254G2BaseField = CanonicalDeserialize::deserialize(vec.as_slice()).unwrap(); + let expected: BN254G2BaseField = + CanonicalDeserialize::deserialize_compressed(vec.as_slice()).unwrap(); assert!(expected == actual_y, "serialization failed"); } } diff --git a/poseidon/Cargo.toml b/poseidon/Cargo.toml index cbd94e3977..1b591f8f14 100644 --- a/poseidon/Cargo.toml +++ b/poseidon/Cargo.toml @@ -16,6 +16,7 @@ path = "src/lib.rs" ark-ff.workspace = true ark-ec.workspace = true ark-poly.workspace = true +ark-serialize.workspace = true rand.workspace = true rayon.workspace = true serde.workspace = true @@ -30,10 +31,9 @@ ocaml = { workspace = true, optional = true } ocaml-gen = { workspace = true, optional = true } [dev-dependencies] -criterion = "0.3" +criterion.workspace = true serde_json.workspace = true hex.workspace = true -ark-serialize.workspace = true [features] default = [] diff --git a/poseidon/export_test_vectors/Cargo.toml b/poseidon/export_test_vectors/Cargo.toml index 6af585470b..80baaa21f8 100644 --- a/poseidon/export_test_vectors/Cargo.toml +++ b/poseidon/export_test_vectors/Cargo.toml @@ -10,11 +10,11 @@ edition = "2021" license = "Apache-2.0" [dependencies] -ark-ff = { version = "0.3.0", features = [ "parallel", "asm" ] } +ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } num-bigint = { version = "0.4.0" } serde_json = { version = "1.0" } hex = { version = "0.4" } -ark-serialize = { version = "0.3.0" } +ark-serialize = { version = "0.4.2" } rand = "0.8.0" serde = { version = "1.0", features = ["derive"] } serde_with = "1.10.0" diff --git a/poseidon/export_test_vectors/src/vectors.rs b/poseidon/export_test_vectors/src/vectors.rs index 7fc8826cfc..b8feddeeb8 100644 --- a/poseidon/export_test_vectors/src/vectors.rs +++ b/poseidon/export_test_vectors/src/vectors.rs @@ -1,5 +1,5 @@ use super::{Mode, ParamType}; -use ark_ff::{fields::PrimeField as _, UniformRand as _}; +use ark_ff::UniformRand as _; use ark_serialize::CanonicalSerialize as _; use mina_curves::pasta::Fp; use mina_poseidon::{ @@ -78,9 +78,10 @@ pub fn generate(mode: Mode, param_type: ParamType) -> TestVectors { .into_iter() .map(|elem| { let mut input_bytes = vec![]; - elem.into_repr() - .serialize(&mut input_bytes) + elem.0 + .serialize_uncompressed(&mut input_bytes) .expect("canonical serialiation should work"); + match mode { Mode::Hex => hex::encode(&input_bytes), Mode::B10 => BigUint::from_bytes_le(&input_bytes).to_string(), @@ -89,8 +90,8 @@ pub fn generate(mode: Mode, param_type: ParamType) -> TestVectors { .collect(); let mut output_bytes = vec![]; output - .into_repr() - .serialize(&mut output_bytes) + .0 + .serialize_uncompressed(&mut output_bytes) .expect("canonical serialization should work"); // add vector diff --git a/poseidon/src/poseidon.rs b/poseidon/src/poseidon.rs index d1075b512e..056a964c3e 100644 --- a/poseidon/src/poseidon.rs +++ b/poseidon/src/poseidon.rs @@ -5,6 +5,7 @@ use crate::{ permutation::{full_round, poseidon_block_cipher}, }; use ark_ff::Field; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use serde::{Deserialize, Serialize}; use serde_with::serde_as; @@ -47,7 +48,7 @@ pub enum SpongeState { #[serde_as] #[derive(Clone, Serialize, Deserialize, Default, Debug)] -pub struct ArithmeticSpongeParams { +pub struct ArithmeticSpongeParams { #[serde_as(as = "Vec>")] pub round_constants: Vec>, #[serde_as(as = "Vec>")] diff --git a/poseidon/src/sponge.rs b/poseidon/src/sponge.rs index ea743633d6..be9ba0a319 100644 --- a/poseidon/src/sponge.rs +++ b/poseidon/src/sponge.rs @@ -2,8 +2,8 @@ use crate::{ constants::SpongeConstants, poseidon::{ArithmeticSponge, ArithmeticSpongeParams, Sponge}, }; -use ark_ec::{short_weierstrass_jacobian::GroupAffine, SWModelParameters}; -use ark_ff::{BigInteger, Field, FpParameters, One, PrimeField, Zero}; +use ark_ec::models::short_weierstrass::{Affine, SWCurveConfig}; +use ark_ff::{BigInteger, Field, One, PrimeField, Zero}; /// Abstracts a sponge operating on a base field `Fq` of the curve /// `G`. The parameter `Fr` is modelling the scalar field of the @@ -51,9 +51,7 @@ pub struct ScalarChallenge(pub F); pub fn endo_coefficient() -> F { let p_minus_1_over_3 = (F::zero() - F::one()) / F::from(3u64); - let t = F::multiplicative_generator(); - - t.pow(p_minus_1_over_3.into_repr().as_ref()) + F::GENERATOR.pow(p_minus_1_over_3.into_bigint().as_ref()) } fn get_bit(limbs_lsb: &[u64], i: u64) -> u64 { @@ -64,7 +62,7 @@ fn get_bit(limbs_lsb: &[u64], i: u64) -> u64 { impl ScalarChallenge { pub fn to_field_with_length(&self, length_in_bits: usize, endo_coeff: &F) -> F { - let rep = self.0.into_repr(); + let rep = self.0.into_bigint(); let r = rep.as_ref(); let mut a: F = 2_u64.into(); @@ -97,7 +95,7 @@ impl ScalarChallenge { } #[derive(Clone)] -pub struct DefaultFqSponge { +pub struct DefaultFqSponge { pub sponge: ArithmeticSponge, pub last_squeezed: Vec, } @@ -108,10 +106,10 @@ pub struct DefaultFrSponge { } fn pack(limbs_lsb: &[u64]) -> B { - let mut res: B = 0.into(); + let mut res: B = 0u64.into(); for &x in limbs_lsb.iter().rev() { res.muln(64); - res.add_nocarry(&x.into()); + res.add_with_carry(&x.into()); } res } @@ -122,10 +120,9 @@ impl DefaultFrSponge { let last_squeezed = self.last_squeezed.clone(); let (limbs, remaining) = last_squeezed.split_at(num_limbs); self.last_squeezed = remaining.to_vec(); - Fr::from_repr(pack::(limbs)) - .expect("internal representation was not a valid field element") + Fr::from(pack::(limbs)) } else { - let x = self.sponge.squeeze().into_repr(); + let x = self.sponge.squeeze().into_bigint(); self.last_squeezed .extend(&x.as_ref()[0..HIGH_ENTROPY_LIMBS]); self.squeeze(num_limbs) @@ -133,7 +130,7 @@ impl DefaultFrSponge { } } -impl DefaultFqSponge +impl DefaultFqSponge where P::BaseField: PrimeField, ::BigInt: Into<::BigInt>, @@ -145,7 +142,7 @@ where self.last_squeezed = remaining.to_vec(); limbs.to_vec() } else { - let x = self.sponge.squeeze().into_repr(); + let x = self.sponge.squeeze().into_bigint(); self.last_squeezed .extend(&x.as_ref()[0..HIGH_ENTROPY_LIMBS]); self.squeeze_limbs(num_limbs) @@ -158,13 +155,13 @@ where } pub fn squeeze(&mut self, num_limbs: usize) -> P::ScalarField { - P::ScalarField::from_repr(pack(&self.squeeze_limbs(num_limbs))) + P::ScalarField::from_bigint(pack(&self.squeeze_limbs(num_limbs))) .expect("internal representation was not a valid field element") } } -impl - FqSponge, P::ScalarField> for DefaultFqSponge +impl FqSponge, P::ScalarField> + for DefaultFqSponge where P::BaseField: PrimeField, ::BigInt: Into<::BigInt>, @@ -177,7 +174,7 @@ where } } - fn absorb_g(&mut self, g: &[GroupAffine

]) { + fn absorb_g(&mut self, g: &[Affine

]) { self.last_squeezed = vec![]; for g in g.iter() { if g.infinity { @@ -204,13 +201,13 @@ where self.last_squeezed = vec![]; x.iter().for_each(|x| { - let bits = x.into_repr().to_bits_le(); + let bits = x.into_bigint().to_bits_le(); // absorb - if ::Params::MODULUS - < ::Params::MODULUS.into() + if ::MODULUS + < ::MODULUS.into() { - let fe = P::BaseField::from_repr( + let fe = P::BaseField::from_bigint( ::BigInt::from_bits_le(&bits), ) .expect("padding code has a bug"); @@ -222,7 +219,7 @@ where P::BaseField::zero() }; - let high_bits = P::BaseField::from_repr( + let high_bits = P::BaseField::from_bigint( ::BigInt::from_bits_le(&bits[1..bits.len()]), ) .expect("padding code has a bug"); @@ -234,14 +231,14 @@ where } fn digest(mut self) -> P::ScalarField { - let x: ::BigInt = self.squeeze_field().into_repr(); + let x: ::BigInt = self.squeeze_field().into_bigint(); // Returns zero for values that are too large. // This means that there is a bias for the value zero (in one of the curve). // An attacker could try to target that seed, in order to predict the challenges u and v produced by the Fr-Sponge. // This would allow the attacker to mess with the result of the aggregated evaluation proof. // Previously the attacker's odds were 1/q, now it's (q-p)/q. // Since log2(q-p) ~ 86 and log2(q) ~ 254 the odds of a successful attack are negligible. - P::ScalarField::from_repr(x.into()).unwrap_or_else(P::ScalarField::zero) + P::ScalarField::from_bigint(x.into()).unwrap_or_else(P::ScalarField::zero) } fn digest_fq(mut self) -> P::BaseField { diff --git a/signer/src/lib.rs b/signer/src/lib.rs index 6f64a44930..ff90997c6d 100644 --- a/signer/src/lib.rs +++ b/signer/src/lib.rs @@ -15,16 +15,16 @@ pub use schnorr::Schnorr; pub use seckey::SecKey; pub use signature::Signature; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; /// Affine curve point type pub use mina_curves::pasta::Pallas as CurvePoint; /// Base field element type -pub type BaseField = ::BaseField; +pub type BaseField = ::BaseField; /// Scalar field element type -pub type ScalarField = ::ScalarField; +pub type ScalarField = ::ScalarField; /// Mina network (or blockchain) identifier #[derive(Debug, Clone)] diff --git a/signer/src/pubkey.rs b/signer/src/pubkey.rs index cf8ba1278a..a7eaddba11 100644 --- a/signer/src/pubkey.rs +++ b/signer/src/pubkey.rs @@ -2,12 +2,12 @@ //! //! Definition of public key structure and helpers -use ark_ec::{AffineCurve, ProjectiveCurve}; +use ark_ec::{short_weierstrass::Affine, AffineRepr, CurveGroup}; use ark_ff::{BigInteger, PrimeField, Zero}; use bs58; use core::fmt; use sha2::{Digest, Sha256}; -use std::ops::Neg; +use std::ops::{Mul, Neg}; use thiserror::Error; use crate::{BaseField, CurvePoint, ScalarField, SecKey}; @@ -86,12 +86,17 @@ impl PubKey { .map_err(|_| PubKeyError::XCoordinateBytes)?; let y = BaseField::from_bytes(&bytes[BaseField::size_in_bytes()..]) .map_err(|_| PubKeyError::YCoordinateBytes)?; - let pt = CurvePoint::get_point_from_x(x, y.0.is_odd()).ok_or(PubKeyError::XCoordinate)?; + let pt = CurvePoint::get_point_from_x_unchecked(x, y.0.is_odd()) + .ok_or(PubKeyError::XCoordinate)?; if pt.y != y { return Err(PubKeyError::NonCurvePoint); } - let public = CurvePoint::new(x, y, pt.infinity); + let public = Affine { + x, + y, + infinity: pt.infinity, + }; if !public.is_on_curve() { return Err(PubKeyError::NonCurvePoint); } @@ -115,7 +120,7 @@ impl PubKey { if secret_key.clone().into_scalar() == ScalarField::zero() { return Err(PubKeyError::SecKey); } - let pt = CurvePoint::prime_subgroup_generator() + let pt = CurvePoint::generator() .mul(secret_key.into_scalar()) .into_affine(); if !pt.is_on_curve() { @@ -158,9 +163,10 @@ impl PubKey { } let x = BaseField::from_bytes(x_bytes).map_err(|_| PubKeyError::XCoordinateBytes)?; - let mut pt = CurvePoint::get_point_from_x(x, y_parity).ok_or(PubKeyError::XCoordinate)?; + let mut pt = + CurvePoint::get_point_from_x_unchecked(x, y_parity).ok_or(PubKeyError::XCoordinate)?; - if pt.y.into_repr().is_even() == y_parity { + if pt.y.into_bigint().is_even() == y_parity { pt.y = pt.y.neg(); } @@ -187,14 +193,14 @@ impl PubKey { let point = self.0; CompressedPubKey { x: point.x, - is_odd: point.y.into_repr().is_odd(), + is_odd: point.y.into_bigint().is_odd(), } } /// Serialize public key into corresponding Mina address pub fn into_address(&self) -> String { let point = self.point(); - into_address(&point.x, point.y.into_repr().is_odd()) + into_address(&point.x, point.y.into_bigint().is_odd()) } /// Deserialize public key into bytes @@ -271,7 +277,8 @@ impl CompressedPubKey { } else { return Err(PubKeyError::YCoordinateParity); }; - let public = CurvePoint::get_point_from_x(x, is_odd).ok_or(PubKeyError::XCoordinate)?; + let public = + CurvePoint::get_point_from_x_unchecked(x, is_odd).ok_or(PubKeyError::XCoordinate)?; if !public.is_on_curve() { return Err(PubKeyError::NonCurvePoint); } @@ -294,7 +301,7 @@ impl CompressedPubKey { pub fn from_secret_key(sec_key: SecKey) -> Self { // We do not need to check point is on the curve, since it's derived directly from the generator point let public = PubKey::from_point_unsafe( - CurvePoint::prime_subgroup_generator() + CurvePoint::generator() .mul(sec_key.into_scalar()) .into_affine(), ); diff --git a/signer/src/schnorr.rs b/signer/src/schnorr.rs index 6fb1cff55c..ed780121ac 100644 --- a/signer/src/schnorr.rs +++ b/signer/src/schnorr.rs @@ -5,8 +5,8 @@ //! Details: use ark_ec::{ - AffineCurve, // for prime_subgroup_generator() - ProjectiveCurve, // for into_affine() + AffineRepr, // for generator() + CurveGroup, }; use ark_ff::{ BigInteger, // for is_even() @@ -19,7 +19,7 @@ use blake2::{ Blake2bVar, }; use mina_hasher::{self, DomainParameter, Hasher, ROInput}; -use std::ops::Neg; +use std::ops::{Add, Neg}; use crate::{BaseField, CurvePoint, Hashable, Keypair, PubKey, ScalarField, Signature, Signer}; @@ -58,8 +58,10 @@ impl Hashable for Message { impl Signer for Schnorr { fn sign(&mut self, kp: &Keypair, input: &H) -> Signature { let k: ScalarField = self.derive_nonce(kp, input); - let r: CurvePoint = CurvePoint::prime_subgroup_generator().mul(k).into_affine(); - let k: ScalarField = if r.y.into_repr().is_even() { k } else { -k }; + let r: CurvePoint = CurvePoint::generator() + .mul_bigint(k.into_bigint()) + .into_affine(); + let k: ScalarField = if r.y.into_bigint().is_even() { k } else { -k }; let e: ScalarField = self.message_hash(&kp.public, r.x, input); let s: ScalarField = k + e * kp.secret.scalar(); @@ -70,17 +72,19 @@ impl Signer for Schnorr { fn verify(&mut self, sig: &Signature, public: &PubKey, input: &H) -> bool { let ev: ScalarField = self.message_hash(public, sig.rx, input); - let sv: CurvePoint = CurvePoint::prime_subgroup_generator() - .mul(sig.s) + let sv = CurvePoint::generator() + .mul_bigint(sig.s.into_bigint()) .into_affine(); // Perform addition and infinity check in projective coordinates for performance - let rv = public.point().mul(ev).neg().add_mixed(&sv); + let rv = public.point().mul_bigint(ev.into_bigint()).neg().add(sv); + if rv.is_zero() { return false; } + let rv = rv.into_affine(); - rv.y.into_repr().is_even() && rv.x == sig.rx + rv.y.into_bigint().is_even() && rv.x == sig.rx } } @@ -147,7 +151,6 @@ impl Schnorr { // Squeeze and convert from base field element to scalar field element // Since the difference in modulus between the two fields is < 2^125, w.h.p., a // random value from one field will fit in the other field. - ScalarField::from_repr(self.hasher.hash(&schnorr_input).into_repr()) - .expect("failed to create scalar") + ScalarField::from(self.hasher.hash(&schnorr_input).into_bigint()) } } diff --git a/turshi/tests/helper.rs b/turshi/tests/helper.rs index fe308f15bc..6192673c3e 100644 --- a/turshi/tests/helper.rs +++ b/turshi/tests/helper.rs @@ -1,10 +1,10 @@ -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use mina_curves::pasta::Pallas as CurvePoint; use o1_utils::FieldHelpers; use turshi::helper::CairoFieldHelpers; /// Base field element type -pub type BaseField = ::BaseField; +pub type BaseField = ::BaseField; #[test] fn test_field_to_bits() { diff --git a/utils/Cargo.toml b/utils/Cargo.toml index ccab6c03f3..a9dfe25fd4 100644 --- a/utils/Cargo.toml +++ b/utils/Cargo.toml @@ -10,7 +10,6 @@ edition = "2021" license = "Apache-2.0" [dependencies] -ark-ec.workspace = true ark-ff.workspace = true ark-poly.workspace = true ark-serialize.workspace = true @@ -31,4 +30,4 @@ rand_core.workspace = true ark-ec.workspace = true mina-curves.workspace = true num-bigint.workspace = true -secp256k1.workspace = true \ No newline at end of file +secp256k1.workspace = true diff --git a/utils/src/array.rs b/utils/src/array.rs index a744d4f536..d8beeae43f 100644 --- a/utils/src/array.rs +++ b/utils/src/array.rs @@ -114,11 +114,11 @@ macro_rules! box_array2 { mod tests { use super::*; - use ark_ec::AffineCurve; + use ark_ec::AffineRepr; use ark_ff::{UniformRand, Zero}; use mina_curves::pasta::Pallas as CurvePoint; - pub type BaseField = ::BaseField; + pub type BaseField = ::BaseField; #[test] /// Tests whether initialising different arrays creates a stack diff --git a/utils/src/dense_polynomial.rs b/utils/src/dense_polynomial.rs index 698661c4b4..7f489d037a 100644 --- a/utils/src/dense_polynomial.rs +++ b/utils/src/dense_polynomial.rs @@ -1,7 +1,7 @@ //! This adds a few utility functions for the [DensePolynomial] arkworks type. use ark_ff::Field; -use ark_poly::{univariate::DensePolynomial, Polynomial, UVPolynomial}; +use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial, Polynomial}; use rayon::prelude::*; use crate::chunked_polynomial::ChunkedPolynomial; @@ -32,7 +32,7 @@ impl ExtendedDensePolynomial for DensePolynomial { result .coeffs .par_iter_mut() - .for_each(|coeff| *coeff *= &elm); + .for_each(|coeff: &mut F| *coeff *= &elm); result } diff --git a/utils/src/field_helpers.rs b/utils/src/field_helpers.rs index 8bd99b4ef2..ad45980ac9 100644 --- a/utils/src/field_helpers.rs +++ b/utils/src/field_helpers.rs @@ -1,6 +1,6 @@ //! Useful helper methods to extend [ark_ff::Field]. -use ark_ff::{BigInteger, Field, FpParameters, PrimeField}; +use ark_ff::{BigInteger, Field, PrimeField}; use num_bigint::{BigInt, BigUint, RandBigInt, ToBigInt}; use rand::rngs::StdRng; use std::ops::Neg; @@ -122,7 +122,7 @@ pub trait FieldHelpers { where F: PrimeField, { - F::size_in_bits() / 8 + (F::size_in_bits() % 8 != 0) as usize + (F::MODULUS_BIT_SIZE / 8) as usize + (F::MODULUS_BIT_SIZE % 8 != 0) as usize } /// Get the modulus as `BigUint` @@ -130,18 +130,19 @@ pub trait FieldHelpers { where F: PrimeField, { - BigUint::from_bytes_le(&F::Params::MODULUS.to_bytes_le()) + BigUint::from_bytes_le(&F::MODULUS.to_bytes_le()) } } impl FieldHelpers for F { fn from_bytes(bytes: &[u8]) -> Result { - F::deserialize(&mut &*bytes).map_err(|_| FieldHelpersError::DeserializeBytes) + F::deserialize_uncompressed(&mut &*bytes).map_err(|_| FieldHelpersError::DeserializeBytes) } fn from_hex(hex: &str) -> Result { let bytes: Vec = hex::decode(hex).map_err(|_| FieldHelpersError::DecodeHex)?; - F::deserialize(&mut &bytes[..]).map_err(|_| FieldHelpersError::DeserializeBytes) + F::deserialize_uncompressed(&mut &bytes[..]) + .map_err(|_| FieldHelpersError::DeserializeBytes) } /// Creates a field element from bits (little endian) @@ -154,12 +155,13 @@ impl FieldHelpers for F { bytes }); - F::deserialize(&mut &bytes[..]).map_err(|_| FieldHelpersError::DeserializeBytes) + F::deserialize_uncompressed(&mut &bytes[..]) + .map_err(|_| FieldHelpersError::DeserializeBytes) } fn to_bytes(&self) -> Vec { let mut bytes: Vec = vec![]; - self.serialize(&mut bytes) + self.serialize_uncompressed(&mut bytes) .expect("Failed to serialize field"); bytes diff --git a/utils/src/serialization.rs b/utils/src/serialization.rs index 0b1620dbdf..a69c0dabe7 100644 --- a/utils/src/serialization.rs +++ b/utils/src/serialization.rs @@ -23,7 +23,7 @@ pub mod ser { S: serde::Serializer, { let mut bytes = vec![]; - val.serialize(&mut bytes) + val.serialize_compressed(&mut bytes) .map_err(serde::ser::Error::custom)?; Bytes::serialize_as(&bytes, serializer) @@ -37,7 +37,7 @@ pub mod ser { D: serde::Deserializer<'de>, { let bytes: Vec = Bytes::deserialize_as(deserializer)?; - T::deserialize(&mut &bytes[..]).map_err(serde::de::Error::custom) + T::deserialize_compressed(&mut &bytes[..]).map_err(serde::de::Error::custom) } } @@ -60,7 +60,7 @@ where S: serde::Serializer, { let mut bytes = vec![]; - val.serialize(&mut bytes) + val.serialize_compressed(&mut bytes) .map_err(serde::ser::Error::custom)?; if serializer.is_human_readable() { @@ -84,7 +84,7 @@ where } else { Bytes::deserialize_as(deserializer)? }; - T::deserialize(&mut &bytes[..]).map_err(serde::de::Error::custom) + T::deserialize_compressed(&mut &bytes[..]).map_err(serde::de::Error::custom) } } @@ -100,7 +100,8 @@ where S: serde::Serializer, { let mut bytes = vec![]; - val.serialize_unchecked(&mut bytes) + // Serialization is still as usual, there's no serialize_compressed_unchecked method. + val.serialize_compressed(&mut bytes) .map_err(serde::ser::Error::custom)?; if serializer.is_human_readable() { @@ -124,6 +125,6 @@ where } else { Bytes::deserialize_as(deserializer)? }; - T::deserialize_unchecked(&mut &bytes[..]).map_err(serde::de::Error::custom) + T::deserialize_compressed_unchecked(&mut &bytes[..]).map_err(serde::de::Error::custom) } } diff --git a/utils/tests/chunked_polynomials.rs b/utils/tests/chunked_polynomials.rs index 44777abb84..082776a241 100644 --- a/utils/tests/chunked_polynomials.rs +++ b/utils/tests/chunked_polynomials.rs @@ -1,5 +1,5 @@ use ark_ff::{Field, One}; -use ark_poly::{univariate::DensePolynomial, Polynomial, UVPolynomial}; +use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial, Polynomial}; use mina_curves::pasta::Fp; use o1_utils::ExtendedDensePolynomial; diff --git a/utils/tests/dense_polynomial.rs b/utils/tests/dense_polynomial.rs index cf4f9c08ab..8e67a31656 100644 --- a/utils/tests/dense_polynomial.rs +++ b/utils/tests/dense_polynomial.rs @@ -1,5 +1,5 @@ use ark_ff::One; -use ark_poly::{univariate::DensePolynomial, UVPolynomial}; +use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial}; use mina_curves::pasta::Fp; use o1_utils::ExtendedDensePolynomial; diff --git a/utils/tests/field_helpers.rs b/utils/tests/field_helpers.rs index 4f87ae6734..b242ded0cb 100644 --- a/utils/tests/field_helpers.rs +++ b/utils/tests/field_helpers.rs @@ -1,4 +1,4 @@ -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::{BigInteger, One, PrimeField}; use mina_curves::pasta::Pallas as CurvePoint; use num_bigint::BigUint; @@ -8,7 +8,7 @@ use o1_utils::{ }; /// Base field element type -pub type BaseField = ::BaseField; +pub type BaseField = ::BaseField; #[test] fn field_hex() { @@ -95,7 +95,10 @@ fn field_bits() { .is_ok()); assert_eq!( - BaseField::from_bits(&vec![true; BaseField::size_in_bits()]), + BaseField::from_bits(&vec![ + true; + ::MODULUS_BIT_SIZE as usize + ]), Err(FieldHelpersError::DeserializeBytes) ); @@ -125,7 +128,7 @@ fn field_big() { let field_zero = BaseField::from(0u32); assert_eq!( - BigUint::from_bytes_be(&field_zero.into_repr().to_bytes_be()), + BigUint::from_bytes_be(&field_zero.0.to_bytes_be()), BigUint::from_bytes_be(&be_zero_32bytes) ); diff --git a/utils/tests/foreign_field.rs b/utils/tests/foreign_field.rs index 4c89b60670..5c19f11654 100644 --- a/utils/tests/foreign_field.rs +++ b/utils/tests/foreign_field.rs @@ -1,11 +1,11 @@ -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::One; use mina_curves::pasta::Pallas as CurvePoint; use num_bigint::BigUint; use o1_utils::{field_helpers::FieldHelpers, ForeignElement}; /// Base field element type -pub type BaseField = ::BaseField; +pub type BaseField = ::BaseField; fn secp256k1_modulus() -> BigUint { BigUint::from_bytes_be(&secp256k1::constants::FIELD_SIZE) From 4bb1e61a7d02cdebedfd9965cc8a3d9285d6b0f1 Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Fri, 20 Sep 2024 19:44:35 +0100 Subject: [PATCH 2/8] Update to arkworks 0.4.2: arrabiata,folding,ivc,msm,o1vm --- arrabiata/src/prover.rs | 6 +- arrabiata/src/witness.rs | 18 +- arrabiata/tests/witness.rs | 28 +- folding/src/checker.rs | 40 +- folding/src/columns.rs | 4 +- folding/src/expressions.rs | 18 +- folding/src/lib.rs | 8 +- folding/tests/test_decomposable_folding.rs | 8 +- .../test_folding_with_quadriticization.rs | 8 +- folding/tests/test_vanilla_folding.rs | 6 +- ivc/src/expr_eval.rs | 4 +- ivc/src/plonkish_lang.rs | 3 +- ivc/src/poseidon_55_0_7_3_2/interpreter.rs | 4 +- ivc/src/poseidon_55_0_7_3_7/interpreter.rs | 4 +- ivc/src/poseidon_8_56_5_3_2/interpreter.rs | 4 +- ivc/tests/simple.rs | 3 +- msm/src/fec/interpreter.rs | 6 +- msm/src/fec/lookups.rs | 4 +- msm/src/fec/mod.rs | 17 +- msm/src/ffa/interpreter.rs | 4 +- msm/src/lib.rs | 8 +- msm/src/precomputed_srs.rs | 8 +- msm/src/serialization/interpreter.rs | 15 +- msm/src/serialization/lookups.rs | 4 +- msm/src/test/generic.rs | 2 +- msm/src/test/test_circuit/interpreter.rs | 6 +- o1vm/src/legacy/folding.rs | 14 +- o1vm/src/legacy/tests.rs | 356 +++++++++--------- o1vm/src/lib.rs | 4 +- 29 files changed, 318 insertions(+), 296 deletions(-) diff --git a/arrabiata/src/prover.rs b/arrabiata/src/prover.rs index 9838a060f0..7e9ea7b36b 100644 --- a/arrabiata/src/prover.rs +++ b/arrabiata/src/prover.rs @@ -1,7 +1,7 @@ //! A prover for the folding/accumulation scheme use crate::proof::Proof; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::PrimeField; use crate::witness::Env; @@ -12,8 +12,8 @@ use crate::witness::Env; pub fn prove< Fp: PrimeField, Fq: PrimeField, - E1: AffineCurve, - E2: AffineCurve, + E1: AffineRepr, + E2: AffineRepr, >( _env: &Env, ) -> Result { diff --git a/arrabiata/src/witness.rs b/arrabiata/src/witness.rs index ee147f8a9b..903009d758 100644 --- a/arrabiata/src/witness.rs +++ b/arrabiata/src/witness.rs @@ -1,4 +1,4 @@ -use ark_ec::{AffineCurve, SWModelParameters}; +use ark_ec::{models::short_weierstrass::SWCurveConfig, AffineRepr}; use ark_ff::PrimeField; use ark_poly::Evaluations; use kimchi::circuits::{domains::EvaluationDomains, gate::CurrOrNext}; @@ -32,8 +32,8 @@ pub const IVC_STARTING_INSTRUCTION: Instruction = Instruction::Poseidon(0); pub struct Env< Fp: PrimeField, Fq: PrimeField, - E1: AffineCurve, - E2: AffineCurve, + E1: AffineRepr, + E2: AffineRepr, > { // ---------------- // Setup related (domains + SRS) @@ -189,8 +189,8 @@ impl< E2: CommitmentCurve, > InterpreterEnv for Env where - ::BaseField: PrimeField, - ::BaseField: PrimeField, + ::BaseField: PrimeField, + ::BaseField: PrimeField, { type Position = (Column, CurrOrNext); @@ -815,8 +815,8 @@ impl< sponge_e2: [BigInt; 3], ) -> Self { { - assert!(Fp::size_in_bits() <= MAXIMUM_FIELD_SIZE_IN_BITS.try_into().unwrap(), "The size of the field Fp is too large, it should be less than {MAXIMUM_FIELD_SIZE_IN_BITS}"); - assert!(Fq::size_in_bits() <= MAXIMUM_FIELD_SIZE_IN_BITS.try_into().unwrap(), "The size of the field Fq is too large, it should be less than {MAXIMUM_FIELD_SIZE_IN_BITS}"); + assert!(Fp::MODULUS_BIT_SIZE <= MAXIMUM_FIELD_SIZE_IN_BITS.try_into().unwrap(), "The size of the field Fp is too large, it should be less than {MAXIMUM_FIELD_SIZE_IN_BITS}"); + assert!(Fq::MODULUS_BIT_SIZE <= MAXIMUM_FIELD_SIZE_IN_BITS.try_into().unwrap(), "The size of the field Fq is too large, it should be less than {MAXIMUM_FIELD_SIZE_IN_BITS}"); let modulus_fp = Fp::modulus_biguint(); assert!( (modulus_fp - BigUint::from(1_u64)).gcd(&BigUint::from(POSEIDON_ALPHA)) @@ -871,10 +871,10 @@ impl< // Default set to the blinders. Using double to make the EC scaling happy. let previous_commitments_e1: Vec> = (0..NUMBER_OF_COLUMNS) - .map(|_| PolyComm::new(vec![srs_e1.h + srs_e1.h])) + .map(|_| PolyComm::new(vec![(srs_e1.h + srs_e1.h).into()])) .collect(); let previous_commitments_e2: Vec> = (0..NUMBER_OF_COLUMNS) - .map(|_| PolyComm::new(vec![srs_e2.h + srs_e2.h])) + .map(|_| PolyComm::new(vec![(srs_e2.h + srs_e2.h).into()])) .collect(); // FIXME: zero will not work. let ivc_accumulator_e1: Vec> = (0..NUMBER_OF_COLUMNS) diff --git a/arrabiata/tests/witness.rs b/arrabiata/tests/witness.rs index d08fe714eb..f2f34fce10 100644 --- a/arrabiata/tests/witness.rs +++ b/arrabiata/tests/witness.rs @@ -1,4 +1,7 @@ -use ark_ec::{short_weierstrass_jacobian::GroupAffine, ProjectiveCurve, SWModelParameters}; +use ark_ec::{ + models::short_weierstrass::{Affine, SWCurveConfig}, + AffineRepr, Group, +}; use ark_ff::{PrimeField, UniformRand}; use arrabiata::{ interpreter::{self, Instruction, InterpreterEnv}, @@ -29,20 +32,18 @@ impl SpongeConstants for PlonkSpongeConstants { const PERM_INITIAL_ARK: bool = false; } -fn helper_generate_random_elliptic_curve_point( - rng: &mut RNG, -) -> GroupAffine

+fn helper_generate_random_elliptic_curve_point(rng: &mut RNG) -> Affine

where P::BaseField: PrimeField, RNG: RngCore + CryptoRng, { let p1_x = P::BaseField::rand(rng); - let mut p1: Option> = GroupAffine::

::get_point_from_x(p1_x, false); + let mut p1: Option> = Affine::

::get_point_from_x_unchecked(p1_x, false); while p1.is_none() { let p1_x = P::BaseField::rand(rng); - p1 = GroupAffine::

::get_point_from_x(p1_x, false); + p1 = Affine::

::get_point_from_x_unchecked(p1_x, false); } - let p1: GroupAffine

= p1.unwrap().scale_by_cofactor().into(); + let p1: Affine

= p1.unwrap().mul_by_cofactor_to_group().into(); p1 } @@ -179,7 +180,7 @@ fn test_unit_witness_elliptic_curve_addition() { assert_eq!(env.current_iteration, 0); let (exp_x3, exp_y3) = { let res: Pallas = - env.ivc_accumulator_e2[0].elems[0] + env.previous_commitments_e2[0].elems[0]; + (env.ivc_accumulator_e2[0].elems[0] + env.previous_commitments_e2[0].elems[0]).into(); let (x3, y3) = res.to_coordinates().unwrap(); ( x3.to_biguint().to_bigint().unwrap(), @@ -198,7 +199,7 @@ fn test_unit_witness_elliptic_curve_addition() { assert_eq!(env.current_iteration, 1); let (exp_x3, exp_y3) = { let res: Vesta = - env.ivc_accumulator_e1[0].elems[0] + env.previous_commitments_e1[0].elems[0]; + (env.ivc_accumulator_e1[0].elems[0] + env.previous_commitments_e1[0].elems[0]).into(); let (x3, y3) = res.to_coordinates().unwrap(); ( x3.to_biguint().to_bigint().unwrap(), @@ -217,7 +218,7 @@ fn test_unit_witness_elliptic_curve_addition() { assert_eq!(env.current_iteration, 2); let (exp_x3, exp_y3) = { let res: Pallas = - env.ivc_accumulator_e2[0].elems[0] + env.previous_commitments_e2[0].elems[0]; + (env.ivc_accumulator_e2[0].elems[0] + env.previous_commitments_e2[0].elems[0]).into(); let (x3, y3) = res.to_coordinates().unwrap(); ( x3.to_biguint().to_bigint().unwrap(), @@ -254,7 +255,7 @@ fn test_witness_double_elliptic_curve_point() { let p1_y = env.write_column(pos_y, p1.y.to_biguint().into()); let (res_x, res_y) = env.double_ec_point(pos_x, pos_y, p1_x, p1_y); - let exp_res: Pallas = p1 + p1; + let exp_res: Pallas = (p1 + p1).into(); let exp_x: BigInt = exp_res.x.to_biguint().into(); let exp_y: BigInt = exp_res.y.to_biguint().into(); @@ -291,8 +292,9 @@ where let res_y: BigInt = env.state[1].clone(); let p1_proj: ProjectivePallas = p1.into(); - let p1_r: Pallas = p1_proj.mul(r.clone().to_u64_digits().1).into(); - let exp_res: Pallas = p1_r + env.srs_e2.h; + // @volhovm TODO check if mul_bigint is what was intended + let p1_r: Pallas = p1_proj.mul_bigint(r.clone().to_u64_digits().1).into(); + let exp_res: Pallas = (p1_r + env.srs_e2.h).into(); let exp_x: BigInt = exp_res.x.to_biguint().into(); let exp_y: BigInt = exp_res.y.to_biguint().into(); diff --git a/folding/src/checker.rs b/folding/src/checker.rs index 374c395bd0..fdbb24ebb4 100644 --- a/folding/src/checker.rs +++ b/folding/src/checker.rs @@ -6,7 +6,7 @@ use crate::{ instance_witness::Instance, ExpExtension, FoldingConfig, Radix2EvaluationDomain, RelaxedInstance, RelaxedWitness, }; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::{Field, Zero}; use ark_poly::Evaluations; use kimchi::circuits::{expr::Variable, gate::CurrOrNext}; @@ -85,8 +85,8 @@ pub trait Provide { fn resolve( &self, inner: FoldingCompatibleExprInner, - domain: Radix2EvaluationDomain<::ScalarField>, - ) -> Vec<::ScalarField>; + domain: Radix2EvaluationDomain<::ScalarField>, + ) -> Vec<::ScalarField>; } impl Provide for Provider @@ -94,24 +94,24 @@ where C::Witness: Index< C::Column, Output = Evaluations< - ::ScalarField, - Radix2EvaluationDomain<::ScalarField>, + ::ScalarField, + Radix2EvaluationDomain<::ScalarField>, >, >, C::Witness: Index< C::Selector, Output = Evaluations< - ::ScalarField, - Radix2EvaluationDomain<::ScalarField>, + ::ScalarField, + Radix2EvaluationDomain<::ScalarField>, >, >, - C::Instance: Index::ScalarField>, + C::Instance: Index::ScalarField>, { fn resolve( &self, inner: FoldingCompatibleExprInner, - domain: Radix2EvaluationDomain<::ScalarField>, - ) -> Vec<::ScalarField> { + domain: Radix2EvaluationDomain<::ScalarField>, + ) -> Vec<::ScalarField> { let domain_size = domain.size as usize; match inner { FoldingCompatibleExprInner::Constant(c) => { @@ -145,24 +145,24 @@ where C::Witness: Index< C::Column, Output = Evaluations< - ::ScalarField, - Radix2EvaluationDomain<::ScalarField>, + ::ScalarField, + Radix2EvaluationDomain<::ScalarField>, >, >, C::Witness: Index< C::Selector, Output = Evaluations< - ::ScalarField, - Radix2EvaluationDomain<::ScalarField>, + ::ScalarField, + Radix2EvaluationDomain<::ScalarField>, >, >, - C::Instance: Index::ScalarField>, + C::Instance: Index::ScalarField>, { fn resolve( &self, inner: FoldingCompatibleExprInner, - domain: Radix2EvaluationDomain<::ScalarField>, - ) -> Vec<::ScalarField> { + domain: Radix2EvaluationDomain<::ScalarField>, + ) -> Vec<::ScalarField> { match inner { FoldingCompatibleExprInner::Extensions(ext) => match ext { ExpExtension::U => { @@ -204,8 +204,8 @@ pub trait Checker: Provide { fn check_rec( &self, exp: FoldingCompatibleExpr, - domain: Radix2EvaluationDomain<::ScalarField>, - ) -> Vec<::ScalarField> { + domain: Radix2EvaluationDomain<::ScalarField>, + ) -> Vec<::ScalarField> { let e2 = exp.clone(); let res = match exp { FoldingCompatibleExpr::Atom(inner) => self.resolve(inner, domain), @@ -249,7 +249,7 @@ pub trait Checker: Provide { fn check( &self, exp: &FoldingCompatibleExpr, - domain: Radix2EvaluationDomain<::ScalarField>, + domain: Radix2EvaluationDomain<::ScalarField>, ) { let res = self.check_rec(exp.clone(), domain); for (i, row) in res.iter().enumerate() { diff --git a/folding/src/columns.rs b/folding/src/columns.rs index 43e3a21a48..fd08561785 100644 --- a/folding/src/columns.rs +++ b/folding/src/columns.rs @@ -3,7 +3,7 @@ //! scheme as they describe the basic expressiveness of the system. use crate::FoldingConfig; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use derivative::Derivative; use kimchi::circuits::expr::Variable; @@ -28,7 +28,7 @@ pub enum ExtendedFoldingColumn { /// The error term introduced in the "relaxed" instance. Error, /// A constant value in our expression - Constant(::ScalarField), + Constant(::ScalarField), /// A challenge used by the PIOP or the folding scheme. Challenge(C::Challenge), /// A list of randomizer to combine expressions diff --git a/folding/src/expressions.rs b/folding/src/expressions.rs index 7b61e28e31..da7b3a7626 100644 --- a/folding/src/expressions.rs +++ b/folding/src/expressions.rs @@ -276,7 +276,7 @@ use crate::{ quadraticization::{quadraticize, ExtendedWitnessGenerator, Quadraticized}, FoldingConfig, ScalarField, }; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::One; use derivative::Derivative; use itertools::Itertools; @@ -368,7 +368,7 @@ pub enum ExpExtension { Debug(bound = "C: FoldingConfig") )] pub enum FoldingCompatibleExprInner { - Constant(::ScalarField), + Constant(::ScalarField), Challenge(C::Challenge), Cell(Variable), /// extra nodes created by folding, should not be passed to folding @@ -750,7 +750,7 @@ impl FoldingExp { Mul(e1, e2) } // TODO: Replace with `Pow` - FoldingExp::Pow(_, 0) => Atom(Constant(::ScalarField::one())), + FoldingExp::Pow(_, 0) => Atom(Constant(::ScalarField::one())), FoldingExp::Pow(e, 1) => e.into_compatible(), FoldingExp::Pow(e, i) => { let e = e.into_compatible(); @@ -932,7 +932,7 @@ pub fn extract_terms(exp: FoldingExp) -> Box Box::new( [Term { exp: FoldingExp::Atom(ExtendedFoldingColumn::Constant( - ::ScalarField::one(), + ::ScalarField::one(), )), sign: Sign::Pos, }] @@ -1006,7 +1006,7 @@ pub fn folding_expression( impl From> for FoldingCompatibleExprInner where - Config::Curve: AffineCurve, + Config::Curve: AffineRepr, Config::Challenge: From, { fn from(expr: ConstantExprInner) -> Self { @@ -1028,7 +1028,7 @@ impl> From, Col>> for FoldingCompatibleExprInner where - Config::Curve: AffineCurve, + Config::Curve: AffineRepr, Config::Challenge: From, { // TODO: check if this needs some special treatment for Extensions @@ -1050,7 +1050,7 @@ impl> From, Col>>> for FoldingCompatibleExpr where - Config::Curve: AffineCurve, + Config::Curve: AffineRepr, Config::Challenge: From, { fn from(expr: Operations, Col>>) -> Self { @@ -1076,7 +1076,7 @@ where impl> From>> for FoldingCompatibleExpr where - Config::Curve: AffineCurve, + Config::Curve: AffineRepr, Config::Challenge: From, { fn from(expr: Operations>) -> Self { @@ -1102,7 +1102,7 @@ impl> From>, Col>>> for FoldingCompatibleExpr where - Config::Curve: AffineCurve, + Config::Curve: AffineRepr, Config::Challenge: From, { fn from( diff --git a/folding/src/lib.rs b/folding/src/lib.rs index d1a591137d..2b8e5a0a9d 100644 --- a/folding/src/lib.rs +++ b/folding/src/lib.rs @@ -17,7 +17,7 @@ //! [expressions]. // TODO: the documentation above might need more descriptions. -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::{Field, One, Zero}; use ark_poly::{EvaluationDomain, Evaluations, Radix2EvaluationDomain}; use error_term::{compute_error, ExtendedEnv}; @@ -59,8 +59,8 @@ pub mod checker; // complexity for clippy. // Should be moved into FoldingConfig, but associated type defaults are unstable // at the moment. -type ScalarField = <::Curve as AffineCurve>::ScalarField; -type BaseField = <::Curve as AffineCurve>::BaseField; +type ScalarField = <::Curve as AffineRepr>::ScalarField; +type BaseField = <::Curve as AffineRepr>::BaseField; // 'static seems to be used for expressions. Can we get rid of it? pub trait FoldingConfig: Debug + 'static { @@ -91,7 +91,7 @@ pub trait FoldingConfig: Debug + 'static { type Structure: Clone; type Env: FoldingEnv< - ::ScalarField, + ::ScalarField, Self::Instance, Self::Witness, Self::Column, diff --git a/folding/tests/test_decomposable_folding.rs b/folding/tests/test_decomposable_folding.rs index dbe8ea7c2c..0574d556d2 100644 --- a/folding/tests/test_decomposable_folding.rs +++ b/folding/tests/test_decomposable_folding.rs @@ -1,5 +1,5 @@ -use ark_ec::{AffineCurve, ProjectiveCurve}; -use ark_ff::{One, UniformRand, Zero}; +use ark_ec::AffineRepr; +use ark_ff::{One, UniformRand}; use ark_poly::{Evaluations, Radix2EvaluationDomain}; use folding::{ checker::{Checker, ExtendedProvider}, @@ -25,7 +25,7 @@ use std::println as debug; type Fp = ark_bn254::Fr; type Curve = ark_bn254::G1Affine; type SpongeParams = PlonkSpongeConstantsKimchi; -type BaseSponge = DefaultFqSponge; +type BaseSponge = DefaultFqSponge; // the type representing our columns, in this case we have 3 witness columns #[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] @@ -69,7 +69,7 @@ impl Foldable for TestInstance { fn combine(a: Self, b: Self, challenge: Fp) -> Self { TestInstance { commitments: std::array::from_fn(|i| { - a.commitments[i] + b.commitments[i].mul(challenge).into_affine() + (a.commitments[i] + b.commitments[i] * challenge).into() }), challenges: std::array::from_fn(|i| a.challenges[i] + challenge * b.challenges[i]), alphas: Alphas::combine(a.alphas, b.alphas, challenge), diff --git a/folding/tests/test_folding_with_quadriticization.rs b/folding/tests/test_folding_with_quadriticization.rs index 97340d7c2e..46aaf9b18e 100644 --- a/folding/tests/test_folding_with_quadriticization.rs +++ b/folding/tests/test_folding_with_quadriticization.rs @@ -1,7 +1,7 @@ // this example is a copy of the decomposable folding one, but with a degree 3 gate // that triggers quadriticization -use ark_ec::{AffineCurve, ProjectiveCurve}; -use ark_ff::{One, UniformRand, Zero}; +use ark_ec::AffineRepr; +use ark_ff::{One, UniformRand}; use ark_poly::{Evaluations, Radix2EvaluationDomain}; use folding::{ checker::{Checker, ExtendedProvider}, @@ -25,7 +25,7 @@ use std::println as debug; type Fp = ark_bn254::Fr; type Curve = ark_bn254::G1Affine; type SpongeParams = PlonkSpongeConstantsKimchi; -type BaseSponge = DefaultFqSponge; +type BaseSponge = DefaultFqSponge; // the type representing our columns, in this case we have 3 witness columns #[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] @@ -69,7 +69,7 @@ impl Foldable for TestInstance { fn combine(a: Self, b: Self, challenge: Fp) -> Self { TestInstance { commitments: std::array::from_fn(|i| { - a.commitments[i] + b.commitments[i].mul(challenge).into_affine() + (a.commitments[i] + b.commitments[i] * challenge).into() }), challenges: std::array::from_fn(|i| a.challenges[i] + challenge * b.challenges[i]), alphas: Alphas::combine(a.alphas, b.alphas, challenge), diff --git a/folding/tests/test_vanilla_folding.rs b/folding/tests/test_vanilla_folding.rs index 87e0a8c6f0..09b4baae1b 100644 --- a/folding/tests/test_vanilla_folding.rs +++ b/folding/tests/test_vanilla_folding.rs @@ -6,7 +6,7 @@ /// ```text /// cargo nextest run test_folding_instance --release --all-features /// ``` -use ark_ec::{AffineCurve, ProjectiveCurve}; +use ark_ec::AffineRepr; use ark_ff::{One, UniformRand, Zero}; use ark_poly::{EvaluationDomain, Evaluations, Radix2EvaluationDomain}; use checker::{ExtendedProvider, Provider}; @@ -30,7 +30,7 @@ use std::println as debug; type Fp = ark_bn254::Fr; type Curve = ark_bn254::G1Affine; type SpongeParams = PlonkSpongeConstantsKimchi; -type BaseSponge = DefaultFqSponge; +type BaseSponge = DefaultFqSponge; /// The instance is the commitments to the polynomials and the challenges /// There are 3 commitments and challanges because there are 3 columns, A, B and @@ -47,7 +47,7 @@ impl Foldable for TestInstance { fn combine(a: Self, b: Self, challenge: Fp) -> Self { TestInstance { commitments: std::array::from_fn(|i| { - a.commitments[i] + b.commitments[i].mul(challenge).into_affine() + (a.commitments[i] + b.commitments[i] * challenge).into() }), challenges: std::array::from_fn(|i| a.challenges[i] + challenge * b.challenges[i]), alphas: Alphas::combine(a.alphas, b.alphas, challenge), diff --git a/ivc/src/expr_eval.rs b/ivc/src/expr_eval.rs index 3e3e9763aa..7fffa1af36 100644 --- a/ivc/src/expr_eval.rs +++ b/ivc/src/expr_eval.rs @@ -1,7 +1,7 @@ use std::ops::Index; use crate::plonkish_lang::{CombinableEvals, PlonkishChallenge, PlonkishWitnessGeneric}; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::Field; use ark_poly::{Evaluations, Radix2EvaluationDomain as R2D}; use folding::{ @@ -54,7 +54,7 @@ pub type SimpleEvalEnv = Generic Curve, N_COL, N_FSEL, - Evaluations<::ScalarField, R2D<::ScalarField>>, + Evaluations<::ScalarField, R2D<::ScalarField>>, >; impl< diff --git a/ivc/src/plonkish_lang.rs b/ivc/src/plonkish_lang.rs index fd38e55410..2a8797f237 100644 --- a/ivc/src/plonkish_lang.rs +++ b/ivc/src/plonkish_lang.rs @@ -1,7 +1,6 @@ /// Provides definition of plonkish language related instance, /// witness, and tools to work with them. The IVC is specialized for /// exactly the plonkish language. -use ark_ec::ProjectiveCurve; use ark_ff::{FftField, Field, One}; use ark_poly::{Evaluations, Radix2EvaluationDomain as R2D}; use folding::{instance_witness::Foldable, Alphas, Instance, Witness}; @@ -122,7 +121,7 @@ impl Self { Self { commitments: std::array::from_fn(|i| { - a.commitments[i] + b.commitments[i].mul(challenge).into_affine() + (a.commitments[i] + b.commitments[i].mul(challenge)).into() }), challenges: std::array::from_fn(|i| a.challenges[i] + challenge * b.challenges[i]), alphas: Alphas::combine(a.alphas, b.alphas, challenge), diff --git a/ivc/src/poseidon_55_0_7_3_2/interpreter.rs b/ivc/src/poseidon_55_0_7_3_2/interpreter.rs index c3228f5b02..029e2744b3 100644 --- a/ivc/src/poseidon_55_0_7_3_2/interpreter.rs +++ b/ivc/src/poseidon_55_0_7_3_2/interpreter.rs @@ -12,7 +12,7 @@ //! `poseidon/src/pasta/params.sage` use crate::poseidon_55_0_7_3_2::columns::PoseidonColumn; -use ark_ff::{FpParameters, PrimeField}; +use ark_ff::PrimeField; use kimchi_msm::circuit_design::{ColAccessCap, ColWriteCap, HybridCopyCap}; use num_bigint::BigUint; use num_integer::Integer; @@ -77,7 +77,7 @@ where // Checking that p - 1 is coprime with 7 as it has to be the case for the sbox { let one = BigUint::from(1u64); - let p: BigUint = TryFrom::try_from(::Params::MODULUS).unwrap(); + let p: BigUint = TryFrom::try_from(::MODULUS).unwrap(); let p_minus_one = p - one.clone(); let seven = BigUint::from(7u64); assert_eq!(p_minus_one.gcd(&seven), one); diff --git a/ivc/src/poseidon_55_0_7_3_7/interpreter.rs b/ivc/src/poseidon_55_0_7_3_7/interpreter.rs index a9f2dde2a2..706f36eb4a 100644 --- a/ivc/src/poseidon_55_0_7_3_7/interpreter.rs +++ b/ivc/src/poseidon_55_0_7_3_7/interpreter.rs @@ -12,7 +12,7 @@ //! `poseidon/src/pasta/params.sage` use crate::poseidon_55_0_7_3_7::columns::PoseidonColumn; -use ark_ff::{FpParameters, PrimeField}; +use ark_ff::PrimeField; use kimchi_msm::circuit_design::{ColAccessCap, ColWriteCap, HybridCopyCap}; use num_bigint::BigUint; use num_integer::Integer; @@ -77,7 +77,7 @@ where // Checking that p - 1 is coprime with 7 as it has to be the case for the sbox { let one = BigUint::from(1u64); - let p: BigUint = TryFrom::try_from(::Params::MODULUS).unwrap(); + let p: BigUint = TryFrom::try_from(::MODULUS).unwrap(); let p_minus_one = p - one.clone(); let seven = BigUint::from(7u64); assert_eq!(p_minus_one.gcd(&seven), one); diff --git a/ivc/src/poseidon_8_56_5_3_2/interpreter.rs b/ivc/src/poseidon_8_56_5_3_2/interpreter.rs index b73317c617..f70ac17c0c 100644 --- a/ivc/src/poseidon_8_56_5_3_2/interpreter.rs +++ b/ivc/src/poseidon_8_56_5_3_2/interpreter.rs @@ -12,7 +12,7 @@ //! `poseidon/src/pasta/params.sage` use crate::poseidon_8_56_5_3_2::columns::PoseidonColumn; -use ark_ff::{FpParameters, PrimeField}; +use ark_ff::PrimeField; use kimchi_msm::circuit_design::{ColAccessCap, ColWriteCap, HybridCopyCap}; use num_bigint::BigUint; use num_integer::Integer; @@ -96,7 +96,7 @@ where // Checking that p - 1 is coprime with 5 as it has to be the case for the sbox { let one = BigUint::from(1u64); - let p: BigUint = TryFrom::try_from(::Params::MODULUS).unwrap(); + let p: BigUint = TryFrom::try_from(::MODULUS).unwrap(); let p_minus_one = p - one.clone(); let five = BigUint::from(5u64); assert_eq!(p_minus_one.gcd(&five), one); diff --git a/ivc/tests/simple.rs b/ivc/tests/simple.rs index d0f40ca515..0788536858 100644 --- a/ivc/tests/simple.rs +++ b/ivc/tests/simple.rs @@ -2,6 +2,7 @@ //! to fold a simple addition circuit. The addition circuit consists of a single //! constraint of degree 1 over 3 columns (A + B - C = 0). +use ark_ec::AffineRepr; use ark_ff::{One, PrimeField, UniformRand, Zero}; use ark_poly::{Evaluations, Radix2EvaluationDomain as R2D}; use folding::{ @@ -54,7 +55,7 @@ pub type Fq = ark_bn254::Fq; /// The curve we commit into pub type Curve = BN254G1Affine; -pub type BaseSponge = DefaultFqSponge; +pub type BaseSponge = DefaultFqSponge; pub type ScalarSponge = DefaultFrSponge; pub type SpongeParams = PlonkSpongeConstantsKimchi; diff --git a/msm/src/fec/interpreter.rs b/msm/src/fec/interpreter.rs index ac6bd3ca26..ce3f78b39c 100644 --- a/msm/src/fec/interpreter.rs +++ b/msm/src/fec/interpreter.rs @@ -13,7 +13,7 @@ use crate::{ N_LIMBS_LARGE, N_LIMBS_SMALL, }, }; -use ark_ff::{FpParameters, PrimeField, Zero}; +use ark_ff::{PrimeField, Zero}; use core::marker::PhantomData; use num_bigint::{BigInt, BigUint, ToBigInt}; use num_integer::Integer; @@ -432,11 +432,11 @@ pub fn ec_add_circuit< let large_limb_size: F = From::from(1u128 << LIMB_BITSIZE_LARGE); // Foreign field modulus - let f_bui: BigUint = TryFrom::try_from(Ff::Params::MODULUS).unwrap(); + let f_bui: BigUint = TryFrom::try_from(Ff::MODULUS).unwrap(); let f_bi: BigInt = f_bui.to_bigint().unwrap(); // Native field modulus (prime) - let n_bui: BigUint = TryFrom::try_from(F::Params::MODULUS).unwrap(); + let n_bui: BigUint = TryFrom::try_from(F::MODULUS).unwrap(); let n_bi: BigInt = n_bui.to_bigint().unwrap(); let n_half_bi = &n_bi / &two_bi; diff --git a/msm/src/fec/lookups.rs b/msm/src/fec/lookups.rs index 5150178c5e..2ca9d7dc66 100644 --- a/msm/src/fec/lookups.rs +++ b/msm/src/fec/lookups.rs @@ -1,5 +1,5 @@ use crate::{logup::LookupTableID, Logup, LIMB_BITSIZE, N_LIMBS}; -use ark_ff::{FpParameters, PrimeField}; +use ark_ff::PrimeField; use num_bigint::BigUint; use o1_utils::FieldHelpers; use std::marker::PhantomData; @@ -154,7 +154,7 @@ impl LookupTable { value < F::from(1u64 << 9) || value >= F::zero() - F::from(1u64 << 9) } Self::RangeCheckFfHighest(_) => { - let f_bui: BigUint = TryFrom::try_from(Ff::Params::MODULUS).unwrap(); + let f_bui: BigUint = TryFrom::try_from(Ff::MODULUS).unwrap(); let top_modulus_f: F = F::from_biguint(&(f_bui >> ((N_LIMBS - 1) * LIMB_BITSIZE))).unwrap(); value < top_modulus_f diff --git a/msm/src/fec/mod.rs b/msm/src/fec/mod.rs index 1b21b55623..6d12154865 100644 --- a/msm/src/fec/mod.rs +++ b/msm/src/fec/mod.rs @@ -16,10 +16,13 @@ mod tests { logup::LookupTableID, Ff1, Fp, }; - use ark_ec::AffineCurve; + use ark_ec::AffineRepr; use ark_ff::UniformRand; use rand::{CryptoRng, RngCore}; - use std::collections::{BTreeMap, HashMap}; + use std::{ + collections::{BTreeMap, HashMap}, + ops::Mul, + }; type FECWitnessBuilderEnv = WitnessBuilderEnv< Fp, @@ -35,14 +38,17 @@ mod tests { rng: &mut RNG, domain_size: usize, ) -> FECWitnessBuilderEnv { - use mina_curves::pasta::{Fp, Pallas}; + // NOTE: this uses Pallas-in-Pallas emulation. + use mina_curves::pasta::Pallas; + type Fp = ::ScalarField; let mut witness_env = WitnessBuilderEnv::create(); // To support less rows than domain_size we need to have selectors. //let row_num = rng.gen_range(0..domain_size); - let gen = Pallas::prime_subgroup_generator(); + let gen = Pallas::generator(); + let kp: Fp = UniformRand::rand(rng); let p: Pallas = gen.mul(kp).into(); let px: Ff1 = p.x; @@ -57,8 +63,7 @@ mod tests { let (rx, ry) = ec_add_circuit(&mut witness_env, px, py, qx, qy); - let r: Pallas = - ark_ec::models::short_weierstrass_jacobian::GroupAffine::new(rx, ry, false); + let r: Pallas = ark_ec::models::short_weierstrass::Affine::new_unchecked(rx, ry); assert!( r == p + q, diff --git a/msm/src/ffa/interpreter.rs b/msm/src/ffa/interpreter.rs index cc7036bfff..f7da6355c2 100644 --- a/msm/src/ffa/interpreter.rs +++ b/msm/src/ffa/interpreter.rs @@ -4,7 +4,7 @@ use crate::{ serialization::interpreter::{limb_decompose_biguint, limb_decompose_ff}, LIMB_BITSIZE, N_LIMBS, }; -use ark_ff::{FpParameters, PrimeField}; +use ark_ff::PrimeField; use num_bigint::BigUint; use num_integer::Integer; use o1_utils::field_helpers::FieldHelpers; @@ -75,7 +75,7 @@ pub fn ff_addition_circuit< a: Ff, b: Ff, ) { - let f_bigint: BigUint = TryFrom::try_from(Ff::Params::MODULUS).unwrap(); + let f_bigint: BigUint = TryFrom::try_from(Ff::MODULUS).unwrap(); let a_limbs: [F; N_LIMBS] = limb_decompose_ff::(&a); let b_limbs: [F; N_LIMBS] = limb_decompose_ff::(&b); diff --git a/msm/src/lib.rs b/msm/src/lib.rs index 48ec472122..c87295ad1a 100644 --- a/msm/src/lib.rs +++ b/msm/src/lib.rs @@ -44,9 +44,9 @@ pub const LIMB_BITSIZE: usize = 15; /// [`Ff1`] or [`Ff2`]). pub const N_LIMBS: usize = 17; -pub type BN254 = ark_ec::bn::Bn; -pub type BN254G1Affine = ::G1Affine; -pub type BN254G2Affine = ::G2Affine; +pub type BN254 = ark_ec::bn::Bn; +pub type BN254G1Affine = ::G1Affine; +pub type BN254G2Affine = ::G2Affine; /// The native field we are working with. pub type Fp = ark_bn254::Fr; @@ -56,6 +56,6 @@ pub type Ff1 = mina_curves::pasta::Fp; pub type Ff2 = mina_curves::pasta::Fq; pub type SpongeParams = PlonkSpongeConstantsKimchi; -pub type BaseSponge = DefaultFqSponge; +pub type BaseSponge = DefaultFqSponge; pub type ScalarSponge = DefaultFrSponge; pub type OpeningProof = KZGProof; diff --git a/msm/src/precomputed_srs.rs b/msm/src/precomputed_srs.rs index ea95cf6eeb..fabceca785 100644 --- a/msm/src/precomputed_srs.rs +++ b/msm/src/precomputed_srs.rs @@ -1,7 +1,7 @@ //! Clone of kimchi/precomputed_srs.rs but for MSM project with BN254 use crate::{Fp, BN254, DOMAIN_SIZE}; -use ark_ec::PairingEngine; +use ark_ec::pairing::Pairing; use ark_ff::UniformRand; use ark_serialize::Write; use kimchi::{circuits::domains::EvaluationDomains, precomputed_srs::TestSRS}; @@ -12,12 +12,12 @@ use std::{fs::File, io::BufReader, path::PathBuf}; /// A clone of the `PairingSRS` that is serialized in a test-optimised way. #[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq, Eq)] -pub struct TestPairingSRS { +pub struct TestPairingSRS { pub full_srs: TestSRS, pub verifier_srs: TestSRS, } -impl From> for TestPairingSRS { +impl From> for TestPairingSRS { fn from(value: PairingSRS) -> Self { TestPairingSRS { full_srs: From::from(value.full_srs), @@ -26,7 +26,7 @@ impl From> for TestPairingSRS { } } -impl From> for PairingSRS { +impl From> for PairingSRS { fn from(value: TestPairingSRS) -> Self { PairingSRS { full_srs: From::from(value.full_srs), diff --git a/msm/src/serialization/interpreter.rs b/msm/src/serialization/interpreter.rs index 518ce37e95..57c07efebf 100644 --- a/msm/src/serialization/interpreter.rs +++ b/msm/src/serialization/interpreter.rs @@ -1,4 +1,4 @@ -use ark_ff::{FpParameters, PrimeField, Zero}; +use ark_ff::{PrimeField, Zero}; use num_bigint::{BigInt, BigUint, ToBigInt}; use num_integer::Integer; use num_traits::{sign::Signed, Euclid}; @@ -101,7 +101,7 @@ pub const N_LIMBS_LARGE: usize = 4; /// Returns the highest limb of the foreign field modulus. Is used by the lookups. pub fn ff_modulus_highest_limb() -> BigUint { - let f_bui: BigUint = TryFrom::try_from(::Params::MODULUS).unwrap(); + let f_bui: BigUint = TryFrom::try_from(::MODULUS).unwrap(); f_bui >> ((N_LIMBS - 1) * LIMB_BITSIZE) } @@ -547,11 +547,11 @@ pub fn multiplication_circuit< let large_limb_size: F = From::from(1u128 << LIMB_BITSIZE_LARGE); // Foreign field modulus - let f_bui: BigUint = TryFrom::try_from(Ff::Params::MODULUS).unwrap(); + let f_bui: BigUint = TryFrom::try_from(Ff::MODULUS).unwrap(); let f_bi: BigInt = f_bui.to_bigint().unwrap(); // Native field modulus (prime) - let n_bui: BigUint = TryFrom::try_from(F::Params::MODULUS).unwrap(); + let n_bui: BigUint = TryFrom::try_from(F::MODULUS).unwrap(); let n_bi: BigInt = n_bui.to_bigint().unwrap(); let n_half_bi = &n_bi / &two_bi; @@ -802,10 +802,9 @@ mod tests { lookups::LookupTable, N_INTERMEDIATE_LIMBS, }, - Ff1, LIMB_BITSIZE, N_LIMBS, + Ff1, Fp, LIMB_BITSIZE, N_LIMBS, }; - use ark_ff::{BigInteger, FpParameters as _, One, PrimeField, UniformRand, Zero}; - use mina_curves::pasta::Fp; + use ark_ff::{BigInteger, One, PrimeField, UniformRand, Zero}; use num_bigint::BigUint; use o1_utils::{tests::make_test_rng, FieldHelpers}; use rand::{CryptoRng, Rng, RngCore}; @@ -957,7 +956,7 @@ mod tests { #[test] fn test_decomposition_order_minus_one() { - let x = BigUint::from_bytes_be(&::Params::MODULUS.to_bytes_be()) + let x = BigUint::from_bytes_be(&::MODULUS.to_bytes_be()) - BigUint::from_str("1").unwrap(); test_decomposition_generic(Fp::from(x)); diff --git a/msm/src/serialization/lookups.rs b/msm/src/serialization/lookups.rs index 00066eb196..7dc2592f22 100644 --- a/msm/src/serialization/lookups.rs +++ b/msm/src/serialization/lookups.rs @@ -1,5 +1,5 @@ use crate::{logup::LookupTableID, Logup, LIMB_BITSIZE, N_LIMBS}; -use ark_ff::{FpParameters, PrimeField}; +use ark_ff::PrimeField; use num_bigint::BigUint; use o1_utils::FieldHelpers; use std::marker::PhantomData; @@ -199,7 +199,7 @@ impl LookupTable { Some(value < F::from(1u64 << 9) || value >= F::zero() - F::from(1u64 << 9)) } Self::RangeCheckFfHighest(_) => { - let f_bui: BigUint = TryFrom::try_from(Ff::Params::MODULUS).unwrap(); + let f_bui: BigUint = TryFrom::try_from(Ff::MODULUS).unwrap(); let top_modulus_f: F = F::from_biguint(&(f_bui >> ((N_LIMBS - 1) * LIMB_BITSIZE))).unwrap(); Some(value < top_modulus_f) diff --git a/msm/src/test/generic.rs b/msm/src/test/generic.rs index c1951b4331..ee2d13ba0c 100644 --- a/msm/src/test/generic.rs +++ b/msm/src/test/generic.rs @@ -3,7 +3,7 @@ use crate::{ expr::E, logup::LookupTableID, lookups::LookupTableIDs, proof::ProofInputs, prover::prove, verifier::verify, witness::Witness, BaseSponge, Fp, OpeningProof, ScalarSponge, BN254, }; -use ark_ff::Zero; +use ark_ec::AffineRepr; use kimchi::circuits::domains::EvaluationDomains; use poly_commitment::kzg::PairingSRS; use rand::{CryptoRng, RngCore}; diff --git a/msm/src/test/test_circuit/interpreter.rs b/msm/src/test/test_circuit/interpreter.rs index 105906806c..afb72942fd 100644 --- a/msm/src/test/test_circuit/interpreter.rs +++ b/msm/src/test/test_circuit/interpreter.rs @@ -7,7 +7,7 @@ use crate::{ }, LIMB_BITSIZE, N_LIMBS, }; -use ark_ff::{Field, PrimeField, SquareRootField, Zero}; +use ark_ff::{Field, PrimeField, Zero}; fn fill_limbs_a_b< F: PrimeField, @@ -253,7 +253,7 @@ pub fn test_fixed_sel_degree_7_with_constants< // NB: Assumes non-standard selectors /// Circuit generator function for 3 * A_0^7 + B_0 * FIXED_SEL_3. pub fn test_fixed_sel_degree_7_mul_witness< - F: SquareRootField + PrimeField, + F: PrimeField, Env: ColAccessCap + ColWriteCap + DirectWitnessCap, >( env: &mut Env, @@ -328,7 +328,7 @@ pub fn constrain_lookups< } pub fn lookups_circuit< - F: SquareRootField + PrimeField, + F: PrimeField, Env: ColAccessCap + ColWriteCap + DirectWitnessCap diff --git a/o1vm/src/legacy/folding.rs b/o1vm/src/legacy/folding.rs index 153495840e..02ae226b70 100644 --- a/o1vm/src/legacy/folding.rs +++ b/o1vm/src/legacy/folding.rs @@ -1,4 +1,4 @@ -use ark_ec::{AffineCurve, ProjectiveCurve}; +use ark_ec::AffineRepr; use ark_ff::FftField; use ark_poly::{Evaluations, Radix2EvaluationDomain}; use folding::{ @@ -15,8 +15,8 @@ use strum_macros::{EnumCount as EnumCountMacro, EnumIter}; // complexity for clippy. // Should be moved into FoldingConfig, but associated type defaults are unstable // at the moment. -pub(crate) type ScalarField = <::Curve as AffineCurve>::ScalarField; -pub(crate) type BaseField = <::Curve as AffineCurve>::BaseField; +pub(crate) type ScalarField = <::Curve as AffineRepr>::ScalarField; +pub(crate) type BaseField = <::Curve as AffineRepr>::BaseField; // Does not contain alpha because this one should be provided by folding itself #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, EnumIter, EnumCountMacro)] @@ -49,19 +49,19 @@ pub struct FoldingInstance { /// - β as the evaluation point for the logup argument /// - j: the joint combiner for vector lookups /// - γ (set to 0 for now) - pub challenges: [::ScalarField; Challenge::COUNT], + pub challenges: [::ScalarField; Challenge::COUNT], /// Reuses the Alphas defined in the example of folding - pub alphas: Alphas<::ScalarField>, + pub alphas: Alphas<::ScalarField>, /// Blinder used in the polynomial commitment scheme - pub blinder: ::ScalarField, + pub blinder: ::ScalarField, } impl Foldable for FoldingInstance { fn combine(a: Self, b: Self, challenge: G::ScalarField) -> Self { FoldingInstance { commitments: array::from_fn(|i| { - a.commitments[i] + b.commitments[i].mul(challenge).into_affine() + (a.commitments[i] + b.commitments[i].mul(challenge)).into() }), challenges: array::from_fn(|i| a.challenges[i] + challenge * b.challenges[i]), alphas: Alphas::combine(a.alphas, b.alphas, challenge), diff --git a/o1vm/src/legacy/tests.rs b/o1vm/src/legacy/tests.rs index d0dfc5b3db..de831c39f3 100644 --- a/o1vm/src/legacy/tests.rs +++ b/o1vm/src/legacy/tests.rs @@ -1,5 +1,4 @@ use crate::{ - legacy::folding::{Challenge, FoldingEnvironment, FoldingInstance, FoldingWitness}, interpreters::mips::{ column::N_MIPS_REL_COLS, constraints::Env as CEnv, @@ -7,8 +6,12 @@ use crate::{ witness::SCRATCH_SIZE, ITypeInstruction, }, - legacy::trace::Trace, - legacy::folding::DecomposedMIPSTrace, + legacy::{ + folding::{ + Challenge, DecomposedMIPSTrace, FoldingEnvironment, FoldingInstance, FoldingWitness, + }, + trace::Trace, + }, BaseSponge, Curve, }; @@ -145,33 +148,33 @@ pub mod mips { RType(rtype) => match rtype { JumpRegister | SyscallExitGroup | Sync => assert_num_constraints(&instr, 1), ShiftLeftLogical - | ShiftRightLogical - | ShiftRightArithmetic - | ShiftLeftLogicalVariable - | ShiftRightLogicalVariable - | ShiftRightArithmeticVariable - | JumpAndLinkRegister - | SyscallReadHint - | MoveFromHi - | MoveFromLo - | MoveToLo - | MoveToHi - | Add - | AddUnsigned - | Sub - | SubUnsigned - | And - | Or - | Xor - | Nor - | SetLessThan - | SetLessThanUnsigned - | MultiplyToRegister - | CountLeadingOnes - | CountLeadingZeros => assert_num_constraints(&instr, 4), + | ShiftRightLogical + | ShiftRightArithmetic + | ShiftLeftLogicalVariable + | ShiftRightLogicalVariable + | ShiftRightArithmeticVariable + | JumpAndLinkRegister + | SyscallReadHint + | MoveFromHi + | MoveFromLo + | MoveToLo + | MoveToHi + | Add + | AddUnsigned + | Sub + | SubUnsigned + | And + | Or + | Xor + | Nor + | SetLessThan + | SetLessThanUnsigned + | MultiplyToRegister + | CountLeadingOnes + | CountLeadingZeros => assert_num_constraints(&instr, 4), MoveZero | MoveNonZero => assert_num_constraints(&instr, 6), SyscallReadOther | SyscallWriteHint | SyscallWriteOther | Multiply - | MultiplyUnsigned | Div | DivUnsigned => assert_num_constraints(&instr, 7), + | MultiplyUnsigned | Div | DivUnsigned => assert_num_constraints(&instr, 7), SyscallOther => assert_num_constraints(&instr, 11), SyscallMmap => assert_num_constraints(&instr, 12), SyscallFcntl | SyscallReadPreimage => assert_num_constraints(&instr, 23), @@ -183,24 +186,23 @@ pub mod mips { JumpAndLink => assert_num_constraints(&instr, 4), }, IType(itype) => match itype { - BranchLeqZero | BranchGtZero | BranchLtZero | BranchGeqZero | Store8 | Store16 => { - assert_num_constraints(&instr, 1) - } + BranchLeqZero | BranchGtZero | BranchLtZero | BranchGeqZero | Store8 + | Store16 => assert_num_constraints(&instr, 1), BranchEq | BranchNeq | Store32 => assert_num_constraints(&instr, 3), AddImmediate - | AddImmediateUnsigned - | SetLessThanImmediate - | SetLessThanImmediateUnsigned - | AndImmediate - | OrImmediate - | XorImmediate - | LoadUpperImmediate - | Load8 - | Load16 - | Load32 - | Load8Unsigned - | Load16Unsigned - | Store32Conditional => assert_num_constraints(&instr, 4), + | AddImmediateUnsigned + | SetLessThanImmediate + | SetLessThanImmediateUnsigned + | AndImmediate + | OrImmediate + | XorImmediate + | LoadUpperImmediate + | Load8 + | Load16 + | Load32 + | Load8Unsigned + | Load16Unsigned + | Store32Conditional => assert_num_constraints(&instr, 4), LoadWordLeft | LoadWordRight | StoreWordLeft | StoreWordRight => { assert_num_constraints(&instr, 13) } @@ -394,7 +396,8 @@ pub mod keccak { // Add the columns of the selectors to the circuit trace.set_selector_column::(Sponge(Absorb(First)), domain_size); - trace.set_selector_column::(Sponge(Absorb(Middle)), domain_size); + trace + .set_selector_column::(Sponge(Absorb(Middle)), domain_size); trace.set_selector_column::(Sponge(Absorb(Last)), domain_size); trace } @@ -416,10 +419,11 @@ pub mod keccak { let mut keccak_env = KeccakEnv::::new(0, &preimage); // Keep track of the constraints and lookups of the sub-circuits - let mut keccak_circuit = >>::new( - domain_size, - &mut keccak_env, - ); + let mut keccak_circuit = + >>::new( + domain_size, + &mut keccak_env, + ); while keccak_env.step.is_some() { let step = keccak_env.selector(); @@ -435,12 +439,12 @@ pub mod keccak { for step in Steps::iter().flat_map(|x| x.into_iter()) { if keccak_circuit.in_circuit(step) { test_completeness_generic_no_lookups::< - N_ZKVM_KECCAK_COLS, + N_ZKVM_KECCAK_COLS, N_ZKVM_KECCAK_REL_COLS, N_ZKVM_KECCAK_SEL_COLS, 0, _, - >( + >( keccak_circuit[step].constraints.clone(), Box::new([]), keccak_circuit[step].witness.clone(), @@ -475,6 +479,8 @@ pub mod keccak { // (Step, Left, Right) type KeccakFoldingPair = (Steps, KeccakFoldingSide, KeccakFoldingSide); + type KeccakDefaultFqSponge = DefaultFqSponge; + #[test] fn heavy_test_keccak_folding() { use crate::{keccak::folding::KeccakConfig, trace::Foldable, Curve}; @@ -503,10 +509,10 @@ pub mod keccak { // Store all constraints indexed by Step let constraints = >::folding_constraints(&trace); + >>::folding_constraints(&trace); // DEFINITIONS OF FUNCTIONS FOR TESTING PURPOSES @@ -521,127 +527,130 @@ pub mod keccak { let check_folding = |left: &KeccakFoldingSide, - right: &KeccakFoldingSide, - constraints: &[FoldingCompatibleExpr], - fq_sponge: &mut DefaultFqSponge| { - // Create the folding scheme ignoring selectors - let (scheme, final_constraint) = - FoldingScheme::::new(constraints.to_vec(), &srs, domain, &()); - - // Fold both sides and check the constraints ignoring the selector columns - let fout = - scheme.fold_instance_witness_pair(left.clone(), right.clone(), fq_sponge); - - // We should always have 0 as the degree of the constraints, - // without selectors, they are never higher than 2 in Keccak. - assert_eq!(scheme.get_number_of_additional_columns(), 0); - - let checker = ExtendedProvider::new(fout.folded_instance, fout.folded_witness); - checker.check(&final_constraint, domain); - }; + right: &KeccakFoldingSide, + constraints: &[FoldingCompatibleExpr], + fq_sponge: &mut KeccakDefaultFqSponge| { + // Create the folding scheme ignoring selectors + let (scheme, final_constraint) = + FoldingScheme::::new(constraints.to_vec(), &srs, domain, &()); + + // Fold both sides and check the constraints ignoring the selector columns + let fout = + scheme.fold_instance_witness_pair(left.clone(), right.clone(), fq_sponge); + + // We should always have 0 as the degree of the constraints, + // without selectors, they are never higher than 2 in Keccak. + assert_eq!(scheme.get_number_of_additional_columns(), 0); + + let checker = ExtendedProvider::new(fout.folded_instance, fout.folded_witness); + checker.check(&final_constraint, domain); + }; let check_decomposable_folding_pair = |step: Option, - left: &KeccakFoldingSide, - right: &KeccakFoldingSide, - scheme: &DecomposableFoldingScheme, - final_constraint: &FoldingCompatibleExpr, - quadri_cols: Option, - fq_sponge: &mut DefaultFqSponge| { - let fout = - scheme.fold_instance_witness_pair(left.clone(), right.clone(), step, fq_sponge); - - let extra_cols = scheme.get_number_of_additional_columns(); - if let Some(quadri_cols) = quadri_cols { - assert!(extra_cols == quadri_cols); - } - - // Check the constraints on the folded circuit applying selectors - let checker = ExtendedProvider::::new( - fout.folded_instance, - fout.folded_witness, - ); - checker.check(final_constraint, domain); - }; - - let check_decomposable_folding = - |pair: &KeccakFoldingPair, - constraints: BTreeMap>>, - common_constraints: Vec>, - quadri_cols: Option, - fq_sponge: &mut DefaultFqSponge| { - let (step, left, right) = pair; - let (dec_scheme, dec_final_constraint) = - DecomposableFoldingScheme::::new( - constraints, - common_constraints, - &srs, - domain, - &(), - ); - // Subcase A: Check the folded circuit of decomposable folding ignoring selectors (None) - check_decomposable_folding_pair( - None, - left, - right, - &dec_scheme, - &dec_final_constraint, - quadri_cols, - fq_sponge, - ); - // Subcase B: Check the folded circuit of decomposable folding applying selectors (Some) - check_decomposable_folding_pair( - Some(*step), - left, - right, - &dec_scheme, - &dec_final_constraint, - quadri_cols, - fq_sponge, - ); - }; - - let check_decomposable_folding_mix = - |steps: (Steps, Steps), - fq_sponge: &mut DefaultFqSponge| { - let (dec_scheme, dec_final_constraint) = - DecomposableFoldingScheme::::new( - constraints.clone(), - vec![], - &srs, - domain, - &(), - ); - let left = { - let fout = dec_scheme.fold_instance_witness_pair( - keccak_trace[0].to_folding_pair(steps.0, fq_sponge, domain, &srs), - keccak_trace[1].to_folding_pair(steps.0, fq_sponge, domain, &srs), - Some(steps.0), + left: &KeccakFoldingSide, + right: &KeccakFoldingSide, + scheme: &DecomposableFoldingScheme, + final_constraint: &FoldingCompatibleExpr, + quadri_cols: Option, + fq_sponge: &mut KeccakDefaultFqSponge| { + let fout = scheme.fold_instance_witness_pair( + left.clone(), + right.clone(), + step, fq_sponge, ); + + let extra_cols = scheme.get_number_of_additional_columns(); + if let Some(quadri_cols) = quadri_cols { + assert!(extra_cols == quadri_cols); + } + + // Check the constraints on the folded circuit applying selectors let checker = ExtendedProvider::::new( fout.folded_instance, fout.folded_witness, ); - (checker.instance, checker.witness) + checker.check(final_constraint, domain); }; - let right = { - let fout = dec_scheme.fold_instance_witness_pair( - keccak_trace[0].to_folding_pair(steps.1, fq_sponge, domain, &srs), - keccak_trace[1].to_folding_pair(steps.1, fq_sponge, domain, &srs), - Some(steps.1), + + let check_decomposable_folding = + |pair: &KeccakFoldingPair, + constraints: BTreeMap>>, + common_constraints: Vec>, + quadri_cols: Option, + fq_sponge: &mut KeccakDefaultFqSponge| { + let (step, left, right) = pair; + let (dec_scheme, dec_final_constraint) = + DecomposableFoldingScheme::::new( + constraints, + common_constraints, + &srs, + domain, + &(), + ); + // Subcase A: Check the folded circuit of decomposable folding ignoring selectors (None) + check_decomposable_folding_pair( + None, + left, + right, + &dec_scheme, + &dec_final_constraint, + quadri_cols, fq_sponge, ); - let checker = ExtendedProvider::::new( - fout.folded_instance, - fout.folded_witness, + // Subcase B: Check the folded circuit of decomposable folding applying selectors (Some) + check_decomposable_folding_pair( + Some(*step), + left, + right, + &dec_scheme, + &dec_final_constraint, + quadri_cols, + fq_sponge, ); - (checker.instance, checker.witness) }; - let fout = dec_scheme.fold_instance_witness_pair(left, right, None, fq_sponge); - let checker = ExtendedProvider::new(fout.folded_instance, fout.folded_witness); - checker.check(&dec_final_constraint, domain); - }; + + let check_decomposable_folding_mix = + |steps: (Steps, Steps), fq_sponge: &mut KeccakDefaultFqSponge| { + let (dec_scheme, dec_final_constraint) = + DecomposableFoldingScheme::::new( + constraints.clone(), + vec![], + &srs, + domain, + &(), + ); + let left = { + let fout = dec_scheme.fold_instance_witness_pair( + keccak_trace[0].to_folding_pair(steps.0, fq_sponge, domain, &srs), + keccak_trace[1].to_folding_pair(steps.0, fq_sponge, domain, &srs), + Some(steps.0), + fq_sponge, + ); + let checker = ExtendedProvider::::new( + fout.folded_instance, + fout.folded_witness, + ); + (checker.instance, checker.witness) + }; + let right = { + let fout = dec_scheme.fold_instance_witness_pair( + keccak_trace[0].to_folding_pair(steps.1, fq_sponge, domain, &srs), + keccak_trace[1].to_folding_pair(steps.1, fq_sponge, domain, &srs), + Some(steps.1), + fq_sponge, + ); + let checker = ExtendedProvider::::new( + fout.folded_instance, + fout.folded_witness, + ); + (checker.instance, checker.witness) + }; + let fout = dec_scheme.fold_instance_witness_pair(left, right, None, fq_sponge); + let checker = ExtendedProvider::new(fout.folded_instance, fout.folded_witness); + checker.check(&dec_final_constraint, domain); + }; // HERE STARTS THE TESTING @@ -654,15 +663,16 @@ pub mod keccak { assert_eq!(constraints[&Round(0)].len(), 389); // Total number of Keccak constraints of degree higher than 2 (should be 0) - let total_deg_higher_2 = Steps::iter() - .flat_map(|x| x.into_iter()) - .fold(0, |acc, step| { - acc + trace[step] - .constraints - .iter() - .filter(|c| c.degree(1, 0) > 2) - .count() - }); + let total_deg_higher_2 = + Steps::iter() + .flat_map(|x| x.into_iter()) + .fold(0, |acc, step| { + acc + trace[step] + .constraints + .iter() + .filter(|c| c.degree(1, 0) > 2) + .count() + }); assert_eq!(total_deg_higher_2, 0); // Check folding constraints of individual steps ignoring selectors @@ -685,7 +695,13 @@ pub mod keccak { // CASE 2: Check that `DecomposableFoldingScheme` works when passing the dummy zero constraint // to each step, and an empty list of common constraints. let pair = (step, left, right); - check_decomposable_folding(&pair, dummy_constraints(), vec![], Some(0), &mut fq_sponge); + check_decomposable_folding( + &pair, + dummy_constraints(), + vec![], + Some(0), + &mut fq_sponge, + ); // CASE 3: Using a separate `DecomposableFoldingScheme` for each step, check each step // constraints using a dummy BTreeMap of `vec[0]` per-step constraints and diff --git a/o1vm/src/lib.rs b/o1vm/src/lib.rs index 3da2a4ff0d..86a250ba4f 100644 --- a/o1vm/src/lib.rs +++ b/o1vm/src/lib.rs @@ -52,7 +52,7 @@ pub(crate) type E = Expr, Column>; pub type Fp = ark_bn254::Fr; /// Elliptic curve group of BN254 pub type Curve = ark_bn254::G1Affine; -pub type BaseSponge = DefaultFqSponge; +pub type BaseSponge = DefaultFqSponge; pub type ScalarSponge = DefaultFrSponge; pub type SpongeParams = PlonkSpongeConstantsKimchi; -pub type OpeningProof = KZGProof>; +pub type OpeningProof = KZGProof>; From e0d92f468ff4c485c76c0dd31203bddf0d6a18ae Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Fri, 20 Sep 2024 22:23:08 +0100 Subject: [PATCH 3/8] Fix serialization? (what? why?) --- utils/src/serialization.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/utils/src/serialization.rs b/utils/src/serialization.rs index a69c0dabe7..05c25e94b9 100644 --- a/utils/src/serialization.rs +++ b/utils/src/serialization.rs @@ -88,7 +88,7 @@ where } } -/// Same as `SerdeAs` but using unchecked (de)serialization. +/// Same as `SerdeAs` but using unchecked and uncompressed (de)serialization. pub struct SerdeAsUnchecked; impl serde_with::SerializeAs for SerdeAsUnchecked @@ -100,8 +100,7 @@ where S: serde::Serializer, { let mut bytes = vec![]; - // Serialization is still as usual, there's no serialize_compressed_unchecked method. - val.serialize_compressed(&mut bytes) + val.serialize_uncompressed(&mut bytes) .map_err(serde::ser::Error::custom)?; if serializer.is_human_readable() { @@ -125,6 +124,6 @@ where } else { Bytes::deserialize_as(deserializer)? }; - T::deserialize_compressed_unchecked(&mut &bytes[..]).map_err(serde::de::Error::custom) + T::deserialize_uncompressed_unchecked(&mut &bytes[..]).map_err(serde::de::Error::custom) } } From 2aabe695a8ca9890633bf47356b320cd771d8bab Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Mon, 23 Sep 2024 21:53:17 +0100 Subject: [PATCH 4/8] Clarify situation with Fp/Fq/Ff1 in msm fec/serialization --- msm/src/fec/mod.rs | 8 ++++---- msm/src/serialization/interpreter.rs | 5 ++++- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/msm/src/fec/mod.rs b/msm/src/fec/mod.rs index 6d12154865..929b0d01e7 100644 --- a/msm/src/fec/mod.rs +++ b/msm/src/fec/mod.rs @@ -38,9 +38,9 @@ mod tests { rng: &mut RNG, domain_size: usize, ) -> FECWitnessBuilderEnv { - // NOTE: this uses Pallas-in-Pallas emulation. use mina_curves::pasta::Pallas; - type Fp = ::ScalarField; + // Fq = Ff2 + type Fq = ::ScalarField; let mut witness_env = WitnessBuilderEnv::create(); @@ -49,13 +49,13 @@ mod tests { let gen = Pallas::generator(); - let kp: Fp = UniformRand::rand(rng); + let kp: Fq = UniformRand::rand(rng); let p: Pallas = gen.mul(kp).into(); let px: Ff1 = p.x; let py: Ff1 = p.y; for row_i in 0..domain_size { - let kq: Fp = UniformRand::rand(rng); + let kq: Fq = UniformRand::rand(rng); let q: Pallas = gen.mul(kq).into(); let qx: Ff1 = q.x; diff --git a/msm/src/serialization/interpreter.rs b/msm/src/serialization/interpreter.rs index 57c07efebf..d4353a1863 100644 --- a/msm/src/serialization/interpreter.rs +++ b/msm/src/serialization/interpreter.rs @@ -802,7 +802,7 @@ mod tests { lookups::LookupTable, N_INTERMEDIATE_LIMBS, }, - Ff1, Fp, LIMB_BITSIZE, N_LIMBS, + Ff1, LIMB_BITSIZE, N_LIMBS, }; use ark_ff::{BigInteger, One, PrimeField, UniformRand, Zero}; use num_bigint::BigUint; @@ -810,6 +810,9 @@ mod tests { use rand::{CryptoRng, Rng, RngCore}; use std::str::FromStr; + // In this test module we assume native = foreign = scalar field of Vesta. + type Fp = Ff1; + type SerializationWitnessBuilderEnv = WitnessBuilderEnv< Fp, SerializationColumn, From ac5e9a9b252988d27904f6187cec38bf85776ecb Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Mon, 23 Sep 2024 22:34:45 +0100 Subject: [PATCH 5/8] Remove irrelevant old comment --- poly-commitment/src/kzg.rs | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/poly-commitment/src/kzg.rs b/poly-commitment/src/kzg.rs index 7fa822726d..fc27d59c18 100644 --- a/poly-commitment/src/kzg.rs +++ b/poly-commitment/src/kzg.rs @@ -421,16 +421,5 @@ impl< .unwrap(); res.0 == Pair::TargetField::one() - - // @VOLHOVM remove this if not necessary - // let numerator_commitment_proj: ::Group = - // { poly_commitment - eval_commitment - blinding_commitment }; - // let numerator_commitment_affine: Pair::G1Affine = From::from(numerator_commitment_proj); - // - // let numerator = Pair::pairing(numerator_commitment_affine, Pair::G2Affine::generator()); - // let scaled_quotient = Pair::pairing(self.quotient, divisor_commitment); - // numerator == scaled_quotient - // } - //} } } From 3c6e2eb1c327ef112489202f7f3390290904e225 Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Mon, 23 Sep 2024 21:57:09 +0100 Subject: [PATCH 6/8] KZG: use multi_pairing() and is_zero() --- poly-commitment/src/kzg.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/poly-commitment/src/kzg.rs b/poly-commitment/src/kzg.rs index fc27d59c18..ca15c528af 100644 --- a/poly-commitment/src/kzg.rs +++ b/poly-commitment/src/kzg.rs @@ -15,7 +15,7 @@ use crate::{ }; use ark_ec::{pairing::Pairing, AffineRepr, VariableBaseMSM}; -use ark_ff::{One, PrimeField, Zero}; +use ark_ff::{PrimeField, Zero}; use ark_poly::{ univariate::{DenseOrSparsePolynomial, DensePolynomial}, DenseUVPolynomial, EvaluationDomain, Evaluations, Polynomial, Radix2EvaluationDomain as D, @@ -417,9 +417,8 @@ impl< ]; // the result here is numerator_commitment * 1 - quotient * divisor_commitment // Note that the unwrap cannot fail as the output of a miller loop is non zero - let res = Pair::final_exponentiation(Pair::multi_miller_loop(to_loop_left, to_loop_right)) - .unwrap(); + let res = Pair::multi_pairing(to_loop_left, to_loop_right); - res.0 == Pair::TargetField::one() + res.is_zero() } } From b05067ca9e6874b02c86c189f8f925f23b37f20b Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Mon, 23 Sep 2024 22:39:07 +0100 Subject: [PATCH 7/8] Switch to workspace deps for circuit-construction and export_test_vectors --- Cargo.lock | 85 ++++--------------------- circuit-construction/Cargo.toml | 52 +++++++-------- poseidon/export_test_vectors/Cargo.toml | 21 +++--- 3 files changed, 50 insertions(+), 108 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 89bb61363e..86e8dc0650 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -829,38 +829,14 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b365fabc795046672053e29c954733ec3b05e4be654ab130fe8f1f94d7051f35" -[[package]] -name = "darling" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" -dependencies = [ - "darling_core 0.13.4", - "darling_macro 0.13.4", -] - [[package]] name = "darling" version = "0.20.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc5d6b04b3fd0ba9926f945895de7d806260a2d7431ba82e7edaecb043c4c6b8" dependencies = [ - "darling_core 0.20.5", - "darling_macro 0.20.5", -] - -[[package]] -name = "darling_core" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim 0.10.0", - "syn 1.0.109", + "darling_core", + "darling_macro", ] [[package]] @@ -877,24 +853,13 @@ dependencies = [ "syn 2.0.48", ] -[[package]] -name = "darling_macro" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" -dependencies = [ - "darling_core 0.13.4", - "quote", - "syn 1.0.109", -] - [[package]] name = "darling_macro" version = "0.20.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d1545d67a2149e1d93b7e5c7752dce5a7426eb5d1357ddcfd89336b94444f77" dependencies = [ - "darling_core 0.20.5", + "darling_core", "quote", "syn 2.0.48", ] @@ -1007,7 +972,7 @@ dependencies = [ "rand", "serde", "serde_json", - "serde_with 1.14.0", + "serde_with", ] [[package]] @@ -1080,7 +1045,7 @@ dependencies = [ "rmp-serde", "serde", "serde_json", - "serde_with 3.6.0", + "serde_with", "strum", "strum_macros", "thiserror", @@ -1465,7 +1430,7 @@ dependencies = [ "secp256k1", "serde", "serde_json", - "serde_with 3.6.0", + "serde_with", "snarky-deriver", "strum", "strum_macros", @@ -1501,7 +1466,7 @@ dependencies = [ "rmp-serde", "serde", "serde_json", - "serde_with 3.6.0", + "serde_with", "strum", "strum_macros", "thiserror", @@ -1520,7 +1485,7 @@ dependencies = [ "poly-commitment", "serde", "serde_json", - "serde_with 3.6.0", + "serde_with", "tinytemplate", ] @@ -1706,7 +1671,7 @@ dependencies = [ "rayon", "serde", "serde_json", - "serde_with 3.6.0", + "serde_with", ] [[package]] @@ -1912,7 +1877,7 @@ dependencies = [ "rayon", "secp256k1", "serde", - "serde_with 3.6.0", + "serde_with", "sha2", "thiserror", ] @@ -1949,7 +1914,7 @@ dependencies = [ "rmp-serde", "serde", "serde_json", - "serde_with 3.6.0", + "serde_with", "sha3", "stacker", "strum", @@ -2228,7 +2193,7 @@ dependencies = [ "rayon", "rmp-serde", "serde", - "serde_with 3.6.0", + "serde_with", "thiserror", ] @@ -2600,16 +2565,6 @@ dependencies = [ "serde", ] -[[package]] -name = "serde_with" -version = "1.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff" -dependencies = [ - "serde", - "serde_with_macros 1.5.2", -] - [[package]] name = "serde_with" version = "3.6.0" @@ -2623,29 +2578,17 @@ dependencies = [ "indexmap 2.2.2", "serde", "serde_json", - "serde_with_macros 3.6.0", + "serde_with_macros", "time", ] -[[package]] -name = "serde_with_macros" -version = "1.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" -dependencies = [ - "darling 0.13.4", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "serde_with_macros" version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "568577ff0ef47b879f736cd66740e022f3672788cdf002a05a4e609ea5a6fb15" dependencies = [ - "darling 0.20.5", + "darling", "proc-macro2", "quote", "syn 2.0.48", diff --git a/circuit-construction/Cargo.toml b/circuit-construction/Cargo.toml index 3e60cb706c..fdd047984b 100644 --- a/circuit-construction/Cargo.toml +++ b/circuit-construction/Cargo.toml @@ -14,34 +14,34 @@ path = "src/lib.rs" bench = false # needed for criterion (https://bheisler.github.io/criterion.rs/book/faq.html#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options) [dependencies] -ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } -ark-ec = { version = "0.4.2", features = [ "parallel" ] } -ark-poly = { version = "0.4.2", features = [ "parallel" ] } -ark-serialize = "0.4.2" -blake2 = "0.10.0" -num-derive = "0.3" -num-traits = "0.2" -itertools = "0.10.3" -rand = "0.8.0" -rand_core = "0.6.3" -rayon = "1.5.0" -rmp-serde = "1.0.0" -serde = "1.0.130" -serde_with = "1.10.0" -thiserror = "1.0.30" +ark-ff.workspace = true +ark-ec.workspace = true +ark-poly.workspace = true +ark-serialize.workspace = true +blake2.workspace = true +num-derive.workspace = true +num-traits.workspace = true +itertools.workspace = true +rand.workspace = true +rand_core.workspace = true +rayon.workspace = true +rmp-serde.workspace = true +serde.workspace = true +serde_with.workspace = true +thiserror.workspace = true -poly-commitment = { path = "../poly-commitment", version = "0.1.0" } -groupmap = { path = "../groupmap", version = "0.1.0" } -mina-curves = { path = "../curves", version = "0.1.0" } -o1-utils = { path = "../utils", version = "0.1.0" } -mina-poseidon = { path = "../poseidon", version = "0.1.0" } -kimchi = { path = "../kimchi", version = "0.1.0" } +poly-commitment.workspace = true +groupmap.workspace = true +mina-curves.workspace = true +o1-utils.workspace = true +mina-poseidon.workspace = true +kimchi.workspace = true [dev-dependencies] -proptest = "1.0.0" -proptest-derive = "0.3.0" -colored = "2.0.0" +proptest.workspace = true +proptest-derive.workspace = true +colored.workspace = true # benchmarks -criterion = "0.3" -iai = "0.1" +criterion.workspace = true +iai.workspace = true diff --git a/poseidon/export_test_vectors/Cargo.toml b/poseidon/export_test_vectors/Cargo.toml index 80baaa21f8..e00ad601ee 100644 --- a/poseidon/export_test_vectors/Cargo.toml +++ b/poseidon/export_test_vectors/Cargo.toml @@ -10,15 +10,14 @@ edition = "2021" license = "Apache-2.0" [dependencies] -ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } -num-bigint = { version = "0.4.0" } -serde_json = { version = "1.0" } -hex = { version = "0.4" } -ark-serialize = { version = "0.4.2" } -rand = "0.8.0" -serde = { version = "1.0", features = ["derive"] } -serde_with = "1.10.0" - -mina-curves = { path = "../../curves", version = "0.1.0" } -mina-poseidon = { path = "../../poseidon", version = "0.1.0" } +ark-ff.workspace = true +num-bigint.workspace = true +serde_json.workspace = true +hex.workspace = true +ark-serialize.workspace = true +rand.workspace = true +serde.workspace = true +serde_with.workspace = true +mina-curves.workspace = true +mina-poseidon.workspace = true From 3ce142b76b74c72d273dc4377995220bd3ef8dd8 Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Mon, 23 Sep 2024 22:20:46 +0100 Subject: [PATCH 8/8] Poseidon/Test Vectors: Fix wrong serialization & add regression test --- poseidon/export_test_vectors/src/vectors.rs | 119 +++++++++++++++++++- utils/tests/field_helpers.rs | 2 +- 2 files changed, 117 insertions(+), 4 deletions(-) diff --git a/poseidon/export_test_vectors/src/vectors.rs b/poseidon/export_test_vectors/src/vectors.rs index b8feddeeb8..ee047890cf 100644 --- a/poseidon/export_test_vectors/src/vectors.rs +++ b/poseidon/export_test_vectors/src/vectors.rs @@ -1,5 +1,5 @@ use super::{Mode, ParamType}; -use ark_ff::UniformRand as _; +use ark_ff::{PrimeField, UniformRand as _}; use ark_serialize::CanonicalSerialize as _; use mina_curves::pasta::Fp; use mina_poseidon::{ @@ -78,7 +78,7 @@ pub fn generate(mode: Mode, param_type: ParamType) -> TestVectors { .into_iter() .map(|elem| { let mut input_bytes = vec![]; - elem.0 + elem.into_bigint() .serialize_uncompressed(&mut input_bytes) .expect("canonical serialiation should work"); @@ -90,7 +90,7 @@ pub fn generate(mode: Mode, param_type: ParamType) -> TestVectors { .collect(); let mut output_bytes = vec![]; output - .0 + .into_bigint() .serialize_uncompressed(&mut output_bytes) .expect("canonical serialization should work"); @@ -112,3 +112,116 @@ pub fn generate(mode: Mode, param_type: ParamType) -> TestVectors { TestVectors { name, test_vectors } } + +#[cfg(test)] +mod tests { + + use super::*; + + #[test] + fn poseidon_test_vectors_regression() { + use mina_poseidon::pasta; + let mut rng = &mut rand::rngs::StdRng::from_seed([0u8; 32]); + + // Values are generated w.r.t. the following commit: + // 1494cf973d40fb276465929eb7db1952c5de7bdc + // (that still uses arkworks 0.3.0) + + let expected_output_bytes_legacy = [ + [ + 27, 50, 81, 182, 145, 45, 130, 237, 199, 139, 187, 10, 92, 136, 240, 198, 253, 225, + 120, 27, 195, 230, 84, 18, 63, 166, 134, 42, 76, 99, 230, 23, + ], + [ + 233, 146, 98, 4, 142, 113, 119, 69, 253, 205, 96, 42, 59, 82, 126, 158, 124, 46, + 91, 165, 137, 65, 88, 8, 78, 47, 46, 44, 177, 66, 100, 61, + ], + [ + 31, 143, 157, 47, 185, 84, 125, 2, 84, 161, 192, 39, 31, 244, 0, 66, 165, 153, 39, + 232, 47, 208, 151, 215, 250, 114, 63, 133, 81, 232, 194, 58, + ], + [ + 153, 120, 16, 250, 143, 51, 135, 158, 104, 156, 128, 128, 33, 215, 241, 207, 48, + 47, 48, 240, 7, 87, 84, 228, 61, 194, 247, 93, 118, 187, 57, 32, + ], + [ + 249, 48, 174, 91, 239, 32, 152, 227, 183, 25, 73, 233, 135, 140, 175, 86, 89, 137, + 127, 59, 158, 177, 113, 31, 41, 106, 153, 207, 183, 64, 236, 63, + ], + [ + 70, 27, 110, 192, 143, 211, 169, 195, 112, 51, 239, 212, 9, 207, 84, 132, 147, 176, + 3, 178, 245, 0, 219, 132, 93, 93, 31, 210, 255, 206, 27, 2, + ], + ]; + + let expected_output_bytes_kimchi = [ + [ + 168, 235, 158, 224, 243, 0, 70, 48, 138, 187, 250, 93, 32, 175, 115, 200, 27, 189, + 171, 194, 91, 69, 151, 133, 2, 77, 4, 82, 40, 190, 173, 47, + ], + [ + 194, 127, 92, 204, 27, 156, 169, 110, 191, 207, 34, 111, 254, 28, 202, 241, 89, + 145, 245, 226, 223, 247, 32, 48, 223, 109, 141, 29, 230, 181, 28, 13, + ], + [ + 238, 26, 57, 207, 87, 2, 255, 206, 108, 78, 212, 92, 105, 193, 255, 227, 103, 185, + 123, 134, 79, 154, 104, 138, 78, 128, 170, 185, 149, 74, 14, 10, + ], + [ + 252, 66, 64, 58, 146, 197, 79, 63, 196, 10, 116, 66, 72, 177, 170, 234, 252, 154, + 82, 137, 234, 3, 117, 226, 73, 211, 32, 4, 150, 196, 133, 33, + ], + [ + 42, 33, 199, 187, 104, 139, 231, 56, 52, 166, 8, 70, 141, 53, 158, 96, 175, 246, + 75, 186, 160, 9, 17, 203, 83, 113, 240, 208, 235, 33, 111, 41, + ], + [ + 133, 233, 196, 82, 62, 17, 13, 12, 173, 230, 192, 216, 56, 126, 197, 152, 164, 155, + 205, 238, 73, 116, 220, 196, 21, 134, 120, 39, 171, 177, 119, 25, + ], + ]; + + let expected_output_0_hex_legacy = + "1b3251b6912d82edc78bbb0a5c88f0c6fde1781bc3e654123fa6862a4c63e617"; + let expected_output_0_hex_kimchi = + "a8eb9ee0f30046308abbfa5d20af73c81bbdabc25b459785024d045228bead2f"; + + for param_type in [ParamType::Legacy, ParamType::Kimchi] { + let expected_output_bytes = match param_type { + ParamType::Legacy => &expected_output_bytes_legacy, + ParamType::Kimchi => &expected_output_bytes_kimchi, + }; + + for length in 0..6 { + // generate input & hash + let input = rand_fields(&mut rng, length); + let output = match param_type { + ParamType::Legacy => poseidon::( + &input, + pasta::fp_legacy::static_params(), + ), + ParamType::Kimchi => poseidon::( + &input, + pasta::fp_kimchi::static_params(), + ), + }; + + let mut output_bytes = vec![]; + output + .into_bigint() + .serialize_uncompressed(&mut output_bytes) + .expect("canonical serialization should work"); + + assert!(output_bytes == expected_output_bytes[length as usize]); + } + + let expected_output_0_hex = match param_type { + ParamType::Legacy => expected_output_0_hex_legacy, + ParamType::Kimchi => expected_output_0_hex_kimchi, + }; + + let test_vectors_hex = generate(Mode::Hex, param_type); + assert!(test_vectors_hex.test_vectors[0].output == expected_output_0_hex); + } + } +} diff --git a/utils/tests/field_helpers.rs b/utils/tests/field_helpers.rs index b242ded0cb..7e6b4fd163 100644 --- a/utils/tests/field_helpers.rs +++ b/utils/tests/field_helpers.rs @@ -128,7 +128,7 @@ fn field_big() { let field_zero = BaseField::from(0u32); assert_eq!( - BigUint::from_bytes_be(&field_zero.0.to_bytes_be()), + BigUint::from_bytes_be(&field_zero.into_bigint().to_bytes_be()), BigUint::from_bytes_be(&be_zero_32bytes) );