From df08874fce805f0f1e8488108e5006fc08fbb6ee Mon Sep 17 00:00:00 2001 From: ledwards2225 <98505400+ledwards2225@users.noreply.github.com> Date: Tue, 23 Jul 2024 08:55:22 -0700 Subject: [PATCH] feat: simple sparse commitment (#7488) Add method for more efficiently committing to sparse polynomials. For polynomials with very few non-zero coefficients, it turns out to be more efficient to first reduce the set of {scalars, points} defining the MSM to only those for which scalar != 0, then perform the MSM. This is implemented in a new method `commit_sparse` in the CommitmentKey. This PR also introduces (1) A more comprehensive set of pippenger benchmarks, (2) Tracking of commit times in the ClientIvc benchmark scripts, and (3) a test suite for the `commit_sparse` functionality. Here are some benchmarks for committing to a polynomials of varying sizes, each with only 5 random nonzero entries. The first uses `commit()` and the second uses the new `commit_sparse()` method: ``` --------------------------------------------------------------------------------------------------- Benchmark Time CPU Iterations --------------------------------------------------------------------------------------------------- bench_commit_sparse_random/14 5.79 ms 0.371 ms 1000 bench_commit_sparse_random/15 10.6 ms 0.508 ms 1000 bench_commit_sparse_random/16 12.1 ms 1.86 ms 253 bench_commit_sparse_random/17 14.0 ms 1.71 ms 412 bench_commit_sparse_random/18 73.1 ms 2.75 ms 100 bench_commit_sparse_random_preprocessed/14 0.132 ms 0.076 ms 9216 bench_commit_sparse_random_preprocessed/15 0.134 ms 0.077 ms 9114 bench_commit_sparse_random_preprocessed/16 0.138 ms 0.079 ms 8856 bench_commit_sparse_random_preprocessed/17 0.150 ms 0.082 ms 8579 bench_commit_sparse_random_preprocessed/18 0.177 ms 0.108 ms 6396 ``` Below are the relevant highlights from ClientIvc bench (master vs branch). Note the reductions in commitment time from databus (bus columns plus counts/tags), ecc_op_wires, and databus_inverses. Also, note that the time spent on these commitments between the unstructured and structured cases is now almost identical, suggesting that the required zero checking is essentially free. Master - Unstructured: ``` ClientIVCBench/Full/6 14277 ms 10173 ms commit(t) 3613 25.60% COMMIT::wires(t) 850 35.80% COMMIT::z_perm(t) 449 18.92% COMMIT::databus(t) 262 11.04% COMMIT::ecc_op_wires(t) 278 11.71% COMMIT::lookup_inverses(t) 189 7.98% COMMIT::databus_inverses(t) 250 10.54% COMMIT::lookup_counts_tags(t) 95 4.01% ``` Branch - Unstructured: ``` ClientIVCBench/Full/6 13530 ms 10167 ms commit(t) 2833 21.20% COMMIT::wires(t) 849 51.63% COMMIT::z_perm(t) 447 27.15% COMMIT::databus(t) 10 0.63% COMMIT::ecc_op_wires(t) 49 2.97% COMMIT::lookup_inverses(t) 190 11.56% COMMIT::databus_inverses(t) 4 0.24% COMMIT::lookup_counts_tags(t) 96 5.82% ``` Master - STRUCTURED ``` ClientIVCBench/FullStructured/6 20935 ms 14669 ms commit(t) 7091 34.14% COMMIT::wires(t) 1471 25.74% COMMIT::z_perm(t) 1028 17.98% COMMIT::databus(t) 509 8.90% COMMIT::ecc_op_wires(t) 929 16.26% COMMIT::lookup_inverses(t) 356 6.23% COMMIT::databus_inverses(t) 1243 21.75% COMMIT::lookup_counts_tags(t) 179 3.13% ``` Branch - STRUCTURED ``` ClientIVCBench/FullStructured/6 18316 ms 13782 ms commit(t) 4398 24.23% COMMIT::wires(t) 1468 47.53% COMMIT::z_perm(t) 1020 33.04% COMMIT::databus(t) 13 0.44% COMMIT::ecc_op_wires(t) 52 1.68% COMMIT::lookup_inverses(t) 350 11.32% COMMIT::databus_inverses(t) 5 0.17% COMMIT::lookup_counts_tags(t) 180 5.82% ``` --- .../cpp/scripts/analyze_client_ivc_bench.py | 67 ++++++--- .../commitment_schemes/commit.bench.cpp | 133 +++++++++++++++++- .../commitment_schemes/commitment_key.hpp | 68 +++++++++ .../sparse_commitment.test.cpp | 69 +++++++++ .../barretenberg/ultra_honk/oink_prover.cpp | 48 +++++-- 5 files changed, 344 insertions(+), 41 deletions(-) create mode 100644 barretenberg/cpp/src/barretenberg/commitment_schemes/sparse_commitment.test.cpp diff --git a/barretenberg/cpp/scripts/analyze_client_ivc_bench.py b/barretenberg/cpp/scripts/analyze_client_ivc_bench.py index 95dcbde7619..94c66435f46 100644 --- a/barretenberg/cpp/scripts/analyze_client_ivc_bench.py +++ b/barretenberg/cpp/scripts/analyze_client_ivc_bench.py @@ -71,10 +71,35 @@ print(f"{key:<{max_label_length}}{time_ms:>8.0f} {time_ms/total_time_ms:>8.2%}") -# Relations breakdown -# Note: The timings here are off likely because the tracking is occuring in a hot loop but -# they should be meaningful relative to one another -print('\nRelation contributions (times to be interpreted relatively):') +# Extract a set of components from the benchmark data and display timings and relative percentages +def print_contributions(prefix, ivc_bench_json, bench_name, components): + + # Read JSON file and extract benchmark + try: + with open(prefix / ivc_bench_json, "r") as read_file: + read_result = json.load(read_file) + bench = next((_bench for _bench in read_result["benchmarks"] if _bench["name"] == bench_name), None) + if not bench: + raise ValueError(f"Benchmark '{bench_name}' not found in the JSON file.") + except FileNotFoundError: + print(f"File not found: {prefix / ivc_bench_json}") + return + + # Filter and sum up kept times + bench_components = {key: bench[key] for key in components if key in bench} + sum_of_kept_times_ms = sum(float(time) for time in bench_components.values()) / 1e6 + print(f"Total time accounted for (ms): {sum_of_kept_times_ms:>8.0f}") + + # Print results + max_label_length = max(len(label) for label in components) + column_headers = {"operation": "operation", "ms": "ms", "%": "% sum"} + print(f"{column_headers['operation']:<{max_label_length}}{column_headers['ms']:>8} {column_headers['%']:>8}") + + for key in components: + time_ms = bench_components.get(key, 0) / 1e6 + percentage = time_ms / sum_of_kept_times_ms if sum_of_kept_times_ms > 0 else 0 + print(f"{key:<{max_label_length}}{time_ms:>8.0f} {percentage:>8.2%}") + relations = [ "Arithmetic::accumulate(t)", "Permutation::accumulate(t)", @@ -87,23 +112,19 @@ "PoseidonExt::accumulate(t)", "PoseidonInt::accumulate(t)", ] -with open(PREFIX/IVC_BENCH_JSON, "r") as read_file: - read_result = json.load(read_file) - for _bench in read_result["benchmarks"]: - if _bench["name"] == BENCHMARK: - bench = _bench -bench_components = dict(filter(lambda x: x[0] in relations, bench.items())) -# For each kept time, get the proportion over all kept times. -sum_of_kept_times_ms = sum(float(time) - for _, time in bench_components.items())/1e6 -max_label_length = max(len(label) for label in relations) -column = {"function": "function", "ms": "ms", "%": "% sum"} -print( - f"{column['function']:<{max_label_length}}{column['ms']:>8} {column['%']:>8}") -for key in relations: - if key not in bench: - time_ms = 0 - else: - time_ms = bench[key]/1e6 - print(f"{key:<{max_label_length}}{time_ms:>8.0f} {time_ms/sum_of_kept_times_ms:>8.2%}") \ No newline at end of file +print('\nRelation contributions (times to be interpreted relatively):') +print_contributions(PREFIX, IVC_BENCH_JSON, BENCHMARK, relations) + +commitments = [ + "COMMIT::wires(t)", + "COMMIT::z_perm(t)", + "COMMIT::databus(t)", + "COMMIT::ecc_op_wires(t)", + "COMMIT::lookup_inverses(t)", + "COMMIT::databus_inverses(t)", + "COMMIT::lookup_counts_tags(t)", +] + +print('\nCommitment contributions:') +print_contributions(PREFIX, IVC_BENCH_JSON, BENCHMARK, commitments) \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/commit.bench.cpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/commit.bench.cpp index c37d893e125..88a27cd1b6c 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/commit.bench.cpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/commit.bench.cpp @@ -7,25 +7,146 @@ namespace bb { template std::shared_ptr> create_commitment_key(const size_t num_points) { + bb::srs::init_crs_factory("../srs_db/ignition"); std::string srs_path; return std::make_shared>(num_points); } -constexpr size_t MAX_LOG_NUM_POINTS = 24; -constexpr size_t MAX_NUM_POINTS = 1 << MAX_LOG_NUM_POINTS; +// Generate a polynomial with a specified number of nonzero random coefficients +template Polynomial sparse_random_poly(const size_t size, const size_t num_nonzero) +{ + auto& engine = numeric::get_debug_randomness(); + auto polynomial = Polynomial(size); + + for (size_t i = 0; i < num_nonzero; i++) { + size_t idx = engine.get_random_uint32() % size; + polynomial[idx] = FF::random_element(); + } -auto key = create_commitment_key(MAX_NUM_POINTS); + return polynomial; +} + +constexpr size_t MIN_LOG_NUM_POINTS = 16; +constexpr size_t MAX_LOG_NUM_POINTS = 20; +constexpr size_t MAX_NUM_POINTS = 1 << MAX_LOG_NUM_POINTS; +constexpr size_t SPARSE_NUM_NONZERO = 100; -template void bench_commit(::benchmark::State& state) +// Commit to a zero polynomial +template void bench_commit_zero(::benchmark::State& state) { + auto key = create_commitment_key(MAX_NUM_POINTS); + const size_t num_points = 1 << state.range(0); const auto polynomial = Polynomial(num_points); for (auto _ : state) { - benchmark::DoNotOptimize(key->commit(polynomial)); + key->commit(polynomial); + } +} + +// Commit to a polynomial with sparse nonzero entries equal to 1 +template void bench_commit_sparse(::benchmark::State& state) +{ + using Fr = typename Curve::ScalarField; + auto key = create_commitment_key(MAX_NUM_POINTS); + + const size_t num_points = 1 << state.range(0); + const size_t num_nonzero = SPARSE_NUM_NONZERO; + + auto polynomial = Polynomial(num_points); + for (size_t i = 0; i < num_nonzero; i++) { + polynomial[i] = 1; + } + + for (auto _ : state) { + key->commit(polynomial); + } +} + +// Commit to a polynomial with sparse nonzero entries equal to 1 using the commit_sparse method to preprocess the input +template void bench_commit_sparse_preprocessed(::benchmark::State& state) +{ + using Fr = typename Curve::ScalarField; + auto key = create_commitment_key(MAX_NUM_POINTS); + + const size_t num_points = 1 << state.range(0); + const size_t num_nonzero = SPARSE_NUM_NONZERO; + + auto polynomial = Polynomial(num_points); + for (size_t i = 0; i < num_nonzero; i++) { + polynomial[i] = 1; + } + + for (auto _ : state) { + key->commit_sparse(polynomial); + } +} + +// Commit to a polynomial with sparse random nonzero entries +template void bench_commit_sparse_random(::benchmark::State& state) +{ + using Fr = typename Curve::ScalarField; + auto key = create_commitment_key(MAX_NUM_POINTS); + + const size_t num_points = 1 << state.range(0); + const size_t num_nonzero = SPARSE_NUM_NONZERO; + + auto polynomial = sparse_random_poly(num_points, num_nonzero); + + for (auto _ : state) { + key->commit(polynomial); + } +} + +// Commit to a polynomial with sparse random nonzero entries using the commit_sparse method to preprocess the input +template void bench_commit_sparse_random_preprocessed(::benchmark::State& state) +{ + using Fr = typename Curve::ScalarField; + auto key = create_commitment_key(MAX_NUM_POINTS); + + const size_t num_points = 1 << state.range(0); + const size_t num_nonzero = SPARSE_NUM_NONZERO; + + auto polynomial = sparse_random_poly(num_points, num_nonzero); + + for (auto _ : state) { + key->commit_sparse(polynomial); + } +} + +// Commit to a polynomial with dense random nonzero entries +template void bench_commit_random(::benchmark::State& state) +{ + using Fr = typename Curve::ScalarField; + auto key = create_commitment_key(MAX_NUM_POINTS); + + const size_t num_points = 1 << state.range(0); + auto polynomial = Polynomial(num_points); + for (auto& coeff : polynomial) { + coeff = Fr::random_element(); + } + for (auto _ : state) { + key->commit(polynomial); } } -BENCHMARK(bench_commit)->DenseRange(10, MAX_LOG_NUM_POINTS)->Unit(benchmark::kMillisecond); +BENCHMARK(bench_commit_zero) + ->DenseRange(MIN_LOG_NUM_POINTS, MAX_LOG_NUM_POINTS) + ->Unit(benchmark::kMillisecond); +BENCHMARK(bench_commit_sparse) + ->DenseRange(MIN_LOG_NUM_POINTS, MAX_LOG_NUM_POINTS) + ->Unit(benchmark::kMillisecond); +BENCHMARK(bench_commit_sparse_preprocessed) + ->DenseRange(MIN_LOG_NUM_POINTS, MAX_LOG_NUM_POINTS) + ->Unit(benchmark::kMillisecond); +BENCHMARK(bench_commit_sparse_random) + ->DenseRange(MIN_LOG_NUM_POINTS, MAX_LOG_NUM_POINTS) + ->Unit(benchmark::kMillisecond); +BENCHMARK(bench_commit_sparse_random_preprocessed) + ->DenseRange(MIN_LOG_NUM_POINTS, MAX_LOG_NUM_POINTS) + ->Unit(benchmark::kMillisecond); +BENCHMARK(bench_commit_random) + ->DenseRange(MIN_LOG_NUM_POINTS, MAX_LOG_NUM_POINTS) + ->Unit(benchmark::kMillisecond); } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/commitment_key.hpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/commitment_key.hpp index 61a3be1d93f..c6b23b071e7 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/commitment_key.hpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/commitment_key.hpp @@ -34,6 +34,7 @@ template class CommitmentKey { using Fr = typename Curve::ScalarField; using Commitment = typename Curve::AffineElement; + using G1 = typename Curve::AffineElement; public: scalar_multiplication::pippenger_runtime_state pippenger_runtime_state; @@ -81,6 +82,73 @@ template class CommitmentKey { return scalar_multiplication::pippenger_unsafe( const_cast(polynomial.data()), srs->get_monomial_points(), degree, pippenger_runtime_state); }; + + /** + * @brief Efficiently commit to a sparse polynomial + * @details Iterate through the {point, scalar} pairs that define the inputs to the commitment MSM, maintain (copy) + * only those for which the scalar is nonzero, then perform the MSM on the reduced inputs. + * @warning Method makes a copy of all {point, scalar} pairs that comprise the reduced input. Will not be efficient + * in terms of memory or computation for polynomials beyond a certain sparseness threshold. + * + * @param polynomial + * @return Commitment + */ + Commitment commit_sparse(std::span polynomial) + { + BB_OP_COUNT_TIME(); + const size_t degree = polynomial.size(); + ASSERT(degree <= srs->get_monomial_size()); + + // Extract the precomputed point table (contains raw SRS points at even indices and the corresponding + // endomorphism point (\beta*x, -y) at odd indices). + G1* point_table = srs->get_monomial_points(); + + // Define structures needed to multithread the extraction of non-zero inputs + const size_t num_threads = degree >= get_num_cpus_pow2() ? get_num_cpus_pow2() : 1; + const size_t block_size = degree / num_threads; + std::vector> thread_scalars(num_threads); + std::vector> thread_points(num_threads); + + // Loop over all polynomial coefficients and keep {point, scalar} pairs for which scalar != 0 + parallel_for(num_threads, [&](size_t thread_idx) { + const size_t start = thread_idx * block_size; + const size_t end = (thread_idx + 1) * block_size; + + for (size_t idx = start; idx < end; ++idx) { + + const Fr& scalar = polynomial[idx]; + + if (!scalar.is_zero()) { + thread_scalars[thread_idx].emplace_back(scalar); + // Save both the raw srs point and the precomputed endomorphism point from the point table + const G1& point = point_table[idx * 2]; + const G1& endo_point = point_table[idx * 2 + 1]; + thread_points[thread_idx].emplace_back(point); + thread_points[thread_idx].emplace_back(endo_point); + } + } + }); + + // Compute total number of non-trivial input pairs + size_t num_nonzero_scalars = 0; + for (auto& scalars : thread_scalars) { + num_nonzero_scalars += scalars.size(); + } + + // Reconstruct the full input to the pippenger from the individual threads + std::vector scalars; + std::vector points; + scalars.reserve(num_nonzero_scalars); + points.reserve(num_nonzero_scalars); + for (size_t idx = 0; idx < num_threads; ++idx) { + scalars.insert(scalars.end(), thread_scalars[idx].begin(), thread_scalars[idx].end()); + points.insert(points.end(), thread_points[idx].begin(), thread_points[idx].end()); + } + + // Call the version of pippenger which assumes all points are distinct + return scalar_multiplication::pippenger_unsafe( + scalars.data(), points.data(), scalars.size(), pippenger_runtime_state); + } }; } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/sparse_commitment.test.cpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/sparse_commitment.test.cpp new file mode 100644 index 00000000000..f555d437d08 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/sparse_commitment.test.cpp @@ -0,0 +1,69 @@ +#include "barretenberg/commitment_schemes/commitment_key.hpp" +#include "barretenberg/polynomials/polynomial.hpp" +#include "barretenberg/srs/factories/file_crs_factory.hpp" + +#include + +namespace bb { + +template class CommitmentKeyTest : public ::testing::Test { + using CK = CommitmentKey; + + using Fr = typename Curve::ScalarField; + using Commitment = typename Curve::AffineElement; + using Polynomial = bb::Polynomial; + + public: + template inline std::shared_ptr create_commitment_key(size_t num_points); +}; + +template <> +template <> +std::shared_ptr> CommitmentKeyTest::create_commitment_key< + CommitmentKey>(const size_t num_points) +{ + srs::init_crs_factory("../srs_db/ignition"); + return std::make_shared>(num_points); +} + +template <> +template <> +std::shared_ptr> CommitmentKeyTest::create_commitment_key< + CommitmentKey>(const size_t num_points) +{ + srs::init_grumpkin_crs_factory("../srs_db/grumpkin"); + return std::make_shared>(num_points); +} + +using Curves = ::testing::Types; + +TYPED_TEST_SUITE(CommitmentKeyTest, Curves); + +// Check that commit and commit_sparse return the same result for a random sparse polynomial +TYPED_TEST(CommitmentKeyTest, CommitSparse) +{ + using Curve = TypeParam; + using CK = CommitmentKey; + using G1 = Curve::AffineElement; + using Fr = Curve::ScalarField; + using Polynomial = bb::Polynomial; + + const size_t num_points = 1 << 12; // large enough to ensure normal pippenger logic is used + const size_t num_nonzero = 7; + + // Construct a sparse random polynomial + Polynomial poly{ num_points }; + for (size_t i = 0; i < num_nonzero; ++i) { + size_t idx = (i + 1) * (i + 1) % num_points; + poly[idx] = Fr::random_element(); + } + + // Commit to the polynomial using both the conventional commit method and the sparse commitment method + auto key = TestFixture::template create_commitment_key(num_points); + G1 commit_result = key->commit(poly); + G1 sparse_commit_result = key->commit_sparse(poly); + + EXPECT_EQ(sparse_commit_result, commit_result); +} + +} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/oink_prover.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/oink_prover.cpp index bf1e6f04c38..01b88f5de32 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/oink_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/oink_prover.cpp @@ -1,4 +1,5 @@ #include "barretenberg/ultra_honk/oink_prover.hpp" +#include "barretenberg/plonk_honk_shared/instance_inspector.hpp" #include "barretenberg/relations/logderiv_lookup_relation.hpp" namespace bb { @@ -66,9 +67,12 @@ template void OinkProver::execute_wire_commitment { // Commit to the first three wire polynomials of the instance // We only commit to the fourth wire polynomial after adding memory recordss - witness_commitments.w_l = commitment_key->commit(proving_key.polynomials.w_l); - witness_commitments.w_r = commitment_key->commit(proving_key.polynomials.w_r); - witness_commitments.w_o = commitment_key->commit(proving_key.polynomials.w_o); + { + BB_OP_COUNT_TIME_NAME("COMMIT::wires"); + witness_commitments.w_l = commitment_key->commit(proving_key.polynomials.w_l); + witness_commitments.w_r = commitment_key->commit(proving_key.polynomials.w_r); + witness_commitments.w_o = commitment_key->commit(proving_key.polynomials.w_o); + } auto wire_comms = witness_commitments.get_wires(); auto wire_labels = commitment_labels.get_wires(); @@ -82,7 +86,10 @@ template void OinkProver::execute_wire_commitment for (auto [commitment, polynomial, label] : zip_view(witness_commitments.get_ecc_op_wires(), proving_key.polynomials.get_ecc_op_wires(), commitment_labels.get_ecc_op_wires())) { - commitment = commitment_key->commit(polynomial); + { + BB_OP_COUNT_TIME_NAME("COMMIT::ecc_op_wires"); + commitment = commitment_key->commit_sparse(polynomial); + } transcript->send_to_verifier(domain_separator + label, commitment); } @@ -90,7 +97,10 @@ template void OinkProver::execute_wire_commitment for (auto [commitment, polynomial, label] : zip_view(witness_commitments.get_databus_entities(), proving_key.polynomials.get_databus_entities(), commitment_labels.get_databus_entities())) { - commitment = commitment_key->commit(polynomial); + { + BB_OP_COUNT_TIME_NAME("COMMIT::databus"); + commitment = commitment_key->commit_sparse(polynomial); + } transcript->send_to_verifier(domain_separator + label, commitment); } } @@ -113,9 +123,15 @@ template void OinkProver::execute_sorted_list_acc relation_parameters.eta, relation_parameters.eta_two, relation_parameters.eta_three); // Commit to lookup argument polynomials and the finalized (i.e. with memory records) fourth wire polynomial - witness_commitments.lookup_read_counts = commitment_key->commit(proving_key.polynomials.lookup_read_counts); - witness_commitments.lookup_read_tags = commitment_key->commit(proving_key.polynomials.lookup_read_tags); - witness_commitments.w_4 = commitment_key->commit(proving_key.polynomials.w_4); + { + BB_OP_COUNT_TIME_NAME("COMMIT::lookup_counts_tags"); + witness_commitments.lookup_read_counts = commitment_key->commit(proving_key.polynomials.lookup_read_counts); + witness_commitments.lookup_read_tags = commitment_key->commit(proving_key.polynomials.lookup_read_tags); + } + { + BB_OP_COUNT_TIME_NAME("COMMIT::wires"); + witness_commitments.w_4 = commitment_key->commit(proving_key.polynomials.w_4); + } transcript->send_to_verifier(domain_separator + commitment_labels.lookup_read_counts, witness_commitments.lookup_read_counts); @@ -137,7 +153,10 @@ template void OinkProver::execute_log_derivative_ // Compute the inverses used in log-derivative lookup relations proving_key.compute_logderivative_inverses(relation_parameters); - witness_commitments.lookup_inverses = commitment_key->commit(proving_key.polynomials.lookup_inverses); + { + BB_OP_COUNT_TIME_NAME("COMMIT::lookup_inverses"); + witness_commitments.lookup_inverses = commitment_key->commit(proving_key.polynomials.lookup_inverses); + } transcript->send_to_verifier(domain_separator + commitment_labels.lookup_inverses, witness_commitments.lookup_inverses); @@ -146,7 +165,10 @@ template void OinkProver::execute_log_derivative_ for (auto [commitment, polynomial, label] : zip_view(witness_commitments.get_databus_inverses(), proving_key.polynomials.get_databus_inverses(), commitment_labels.get_databus_inverses())) { - commitment = commitment_key->commit(polynomial); + { + BB_OP_COUNT_TIME_NAME("COMMIT::databus_inverses"); + commitment = commitment_key->commit_sparse(polynomial); + } transcript->send_to_verifier(domain_separator + label, commitment); } } @@ -160,8 +182,10 @@ template void OinkProver::execute_grand_product_c { proving_key.compute_grand_product_polynomials(relation_parameters); - witness_commitments.z_perm = commitment_key->commit(proving_key.polynomials.z_perm); - + { + BB_OP_COUNT_TIME_NAME("COMMIT::z_perm"); + witness_commitments.z_perm = commitment_key->commit(proving_key.polynomials.z_perm); + } transcript->send_to_verifier(domain_separator + commitment_labels.z_perm, witness_commitments.z_perm); }