diff --git a/.devcontainer/dev/devcontainer.json b/.devcontainer/dev/devcontainer.json index 50354b2b570..792ffdbc010 100644 --- a/.devcontainer/dev/devcontainer.json +++ b/.devcontainer/dev/devcontainer.json @@ -1,17 +1,22 @@ { - "image": "node:lts-bookworm-slim", - "features": { - "ghcr.io/devcontainers/features/docker-in-docker:2": {} + "name": "Development", + "build": { + "dockerfile": "../../build-images/Dockerfile", + "context": "../../build-images", + "target": "devbox" }, - "postCreateCommand": "curl -s install.aztec.network | VERSION=master NON_INTERACTIVE=1 BIN_PATH=/usr/local/bin bash -s", - "customizations": { - "vscode": { - "settings": {}, - "extensions": [ - "noir-lang.vscode-noir" - ] - } - }, - "workspaceMount": "source=${localWorkspaceFolder},target=/root/workspace,type=bind", - "workspaceFolder": "/root/workspace" + "containerUser": "aztec-dev", + // ubuntu:noble is currently not supported. + // Can possibly workaround cherry-picking from here: + // https://github.com/devcontainers/features/blob/main/src/docker-in-docker/install.sh + // + // "image": "aztecprotocol/codespace", + // "features": { + // "docker-in-docker": { + // "version": "latest", + // "moby": true, + // "dockerDashComposeVersion": "v1" + // } + // }, + "mounts": ["source=devbox-home,target=/home/aztec-dev,type=volume"] } diff --git a/.github/workflows/ci-arm.yml b/.github/workflows/ci-arm.yml index aa1d3f26ea7..bcc20694ede 100644 --- a/.github/workflows/ci-arm.yml +++ b/.github/workflows/ci-arm.yml @@ -32,7 +32,10 @@ jobs: needs: setup runs-on: master-arm steps: - - {uses: actions/checkout@v4, with: { ref: "${{ github.event.pull_request.head.sha }}"}} + - { + uses: actions/checkout@v4, + with: { ref: "${{ github.event.pull_request.head.sha }}" }, + } - uses: ./.github/ci-setup-action with: dockerhub_password: "${{ secrets.DOCKERHUB_PASSWORD }}" @@ -48,7 +51,10 @@ jobs: needs: build runs-on: master-arm steps: - - {uses: actions/checkout@v4, with: { ref: "${{ github.event.pull_request.head.sha }}"}} + - { + uses: actions/checkout@v4, + with: { ref: "${{ github.event.pull_request.head.sha }}" }, + } - uses: ./.github/ci-setup-action with: dockerhub_password: "${{ secrets.DOCKERHUB_PASSWORD }}" @@ -56,7 +62,7 @@ jobs: - name: Test working-directory: ./yarn-project/end-to-end/ timeout-minutes: 15 - run: earthly -P --no-output +uniswap-trade-on-l1-from-l2 --e2e_mode=cache + run: earthly -P --no-output +uniswap-trade-on-l1-from-l2 notify: needs: [e2e] diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 42c9e4fe619..d9f0f8c5699 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -76,7 +76,7 @@ jobs: - name: Test working-directory: ./yarn-project/end-to-end/ timeout-minutes: 25 - run: earthly-ci -P --secret AWS_ACCESS_KEY_ID=${{ secrets.AWS_ACCESS_KEY_ID }} --secret AWS_SECRET_ACCESS_KEY=${{ secrets.AWS_SECRET_ACCESS_KEY }} --no-output +${{ matrix.test }} --e2e_mode=cache + run: earthly-ci -P --secret AWS_ACCESS_KEY_ID=${{ secrets.AWS_ACCESS_KEY_ID }} --secret AWS_SECRET_ACCESS_KEY=${{ secrets.AWS_SECRET_ACCESS_KEY }} --no-output +${{ matrix.test }} bench-summary: needs: e2e diff --git a/.vscode/extensions.json b/.vscode/extensions.json index 988ff79b475..a872182ad77 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -37,7 +37,7 @@ // Displays code coverage report information within vscode "ryanluker.vscode-coverage-gutters", // Spell checking - "streetsidesoftware.code-spell-checker", + "streetsidesoftware.code-spell-checker" // End C++/Circuits extensions /////////////////////////////////////// ], @@ -58,7 +58,7 @@ // Most features are disabled in `settings.json` // which confict with `clangd` // Since we ignore GDB, we no longer need this extension - "ms-vscode.cpptools", + "ms-vscode.cpptools" // End C++/Circuits unwanted extensions /////////////////////////////////////// ] diff --git a/.vscode/settings.json b/.vscode/settings.json index ea41bbe2bc2..48a4cde9de8 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -136,7 +136,7 @@ // Clangd. Note that this setting may be overridden by user settings // to the default value "clangd". // - "clangd.path": "clangd-15", + "clangd.path": "clangd-16", // // C/C++ (should be disabled) // @@ -165,6 +165,5 @@ "**/target/**": true, "**/l1-contracts/lib/**": true, "**/barretenberg/cpp/build*/**": true - }, - "cmake.sourceDirectory": "/mnt/user-data/adam/aztec-packages/barretenberg/cpp" + } } diff --git a/Earthfile b/Earthfile index 096ac3bac0d..5a42a251dfd 100644 --- a/Earthfile +++ b/Earthfile @@ -1,5 +1,5 @@ VERSION 0.8 -FROM ubuntu:lunar +FROM ubuntu:noble build-ci: BUILD ./avm-transpiler/+build @@ -16,15 +16,12 @@ build-ci: BUILD ./yarn-project/+end-to-end BUILD ./yarn-project/+aztec -build-ci-small: - BUILD ./yarn-project/end-to-end/+e2e-escrow-contract - build: # yarn-project has the entry point to Aztec BUILD ./yarn-project/+build test-end-to-end: - BUILD ./yarn-project/end-to-end/+test-all + BUILD ./yarn-project/end-to-end+e2e-tests bench: RUN echo hi diff --git a/avm-transpiler/Earthfile b/avm-transpiler/Earthfile index b2c77f9b3f9..4c3102dec8c 100644 --- a/avm-transpiler/Earthfile +++ b/avm-transpiler/Earthfile @@ -1,18 +1,17 @@ VERSION 0.8 IMPORT ../noir AS noir -# we rely on noir source, which this image has -FROM noir+nargo -# move noir contents to /usr/src/noir -RUN mv /usr/src /noir && mkdir /usr/src && mv /noir /usr/src -# work in avm-transpiler -WORKDIR /usr/src/avm-transpiler +source: + # we rely on noir source, which this image has + FROM noir+nargo -RUN apt-get update && apt-get install -y git + # move noir contents to /usr/src/noir + RUN mv /usr/src /noir && mkdir /usr/src && mv /noir /usr/src + # work in avm-transpiler + WORKDIR /usr/src/avm-transpiler -COPY --dir scripts src Cargo.lock Cargo.toml rust-toolchain.toml . + COPY --dir scripts src Cargo.lock Cargo.toml rust-toolchain.toml . -source: # for debugging rebuilds RUN echo CONTENT HASH $(find . -type f -exec sha256sum {} ';' | sort | sha256sum | awk '{print $1}') | tee .content-hash @@ -21,7 +20,7 @@ build: RUN ./scripts/bootstrap_native.sh SAVE ARTIFACT target/release/avm-transpiler avm-transpiler -run: +run: #TODO needed? FROM ubuntu:focal COPY +build/avm-transpiler /usr/src/avm-transpiler diff --git a/barretenberg/cpp/CMakePresets.json b/barretenberg/cpp/CMakePresets.json index 24538c08c0b..a42cf23517c 100644 --- a/barretenberg/cpp/CMakePresets.json +++ b/barretenberg/cpp/CMakePresets.json @@ -256,7 +256,7 @@ "generator": "Ninja", "toolchainFile": "cmake/toolchains/wasm32-wasi.cmake", "environment": { - "WASI_SDK_PREFIX": "${sourceDir}/src/wasi-sdk", + "WASI_SDK_PREFIX": "/opt/wasi-sdk", "CC": "$env{WASI_SDK_PREFIX}/bin/clang", "CXX": "$env{WASI_SDK_PREFIX}/bin/clang++", "AR": "$env{WASI_SDK_PREFIX}/bin/llvm-ar", diff --git a/barretenberg/cpp/Earthfile b/barretenberg/cpp/Earthfile index 346d1be3d07..e4057ec965c 100644 --- a/barretenberg/cpp/Earthfile +++ b/barretenberg/cpp/Earthfile @@ -1,80 +1,13 @@ VERSION 0.8 -build-base: - ARG TARGETARCH - FROM --platform=linux/$TARGETARCH ubuntu:lunar - RUN apt-get update && apt-get install -y \ - build-essential \ - curl \ - git \ - cmake \ - lsb-release \ - wget \ - software-properties-common \ - gnupg \ - ninja-build \ - npm \ - libssl-dev \ - jq \ - bash \ - libstdc++6 \ - clang-format \ - clang-16 - - IF [ $TARGETARCH = arm64 ] - # publish arm after, assumes x86 already exists, becomes multiplatform image - SAVE IMAGE --push aztecprotocol/bb-ubuntu-lunar - FROM --platform=linux/amd64 aztecprotocol/bb-ubuntu-lunar:x86-latest - SAVE IMAGE --push aztecprotocol/bb-ubuntu-lunar - ELSE - SAVE IMAGE --push aztecprotocol/bb-ubuntu-lunar:x86-latest - END - -build-wasi-sdk-image: - WORKDIR / - RUN git clone --recursive https://github.com/WebAssembly/wasi-sdk.git - WORKDIR /wasi-sdk - RUN git checkout 9389ea5eeec98afc61039683ae92c6147fee9c54 - ENV NINJA_FLAGS=-v - ENV MAKEFLAGS="-j$(nproc)" - RUN make build/llvm.BUILT - RUN make build/wasi-libc.BUILT - RUN make build/compiler-rt.BUILT - RUN make build/libcxx.BUILT - RUN make build/config.BUILT - SAVE ARTIFACT build/install/opt/wasi-sdk - -build-wasi-sdk: - ARG TARGETARCH - # Wrapper just exists share files. - FROM scratch - WORKDIR /usr/src - COPY +get-wasi-sdk-image/wasi-sdk wasi-sdk - SAVE ARTIFACT wasi-sdk - SAVE IMAGE --push aztecprotocol/cache:wasi-sdk-threads-v21.0-$TARGETARCH - -get-wasi-sdk-threads: - ARG TARGETARCH - # If this is failing, we need to run earthly --push +build-wasi-sdk - FROM aztecprotocol/cache:wasi-sdk-threads-v21.0-$TARGETARCH - SAVE ARTIFACT wasi-sdk - -get-wasi-sdk: - # NOTE: currently only works with x86 - # TODO Align with above - FROM +source - COPY ./scripts/install-wasi-sdk.sh ./scripts/ - RUN ./scripts/install-wasi-sdk.sh - # TODO better output name to mirror wasi-sdk - SAVE ARTIFACT src/wasi-sdk-20.0 wasi-sdk - wasmtime: - FROM aztecprotocol/bb-ubuntu-lunar + FROM ubuntu:noble + RUN apt update && apt install -y curl xz-utils RUN curl https://wasmtime.dev/install.sh -sSf | bash SAVE ARTIFACT /root/.wasmtime/bin/wasmtime source: - FROM aztecprotocol/bb-ubuntu-lunar + FROM ../../build-images+build WORKDIR /usr/src/barretenberg # cpp source COPY --dir src/barretenberg src/CMakeLists.txt src @@ -106,19 +39,17 @@ preset-wasm: FROM +preset-wasm-threads SAVE ARTIFACT build/bin ELSE - COPY +get-wasi-sdk/wasi-sdk src/wasi-sdk RUN cmake --preset wasm -Bbuild && cmake --build build --target barretenberg.wasm - RUN src/wasi-sdk/bin/llvm-strip ./build/bin/barretenberg.wasm + RUN /opt/wasi-sdk/bin/llvm-strip ./build/bin/barretenberg.wasm SAVE ARTIFACT build/bin SAVE IMAGE --cache-hint END preset-wasm-threads: FROM +source - COPY +get-wasi-sdk-threads/wasi-sdk src/wasi-sdk RUN cmake --preset wasm-threads -Bbuild && cmake --build build --target barretenberg.wasm # TODO(https://github.com/AztecProtocol/barretenberg/issues/941) We currently do not strip barretenberg threaded wasm, for stack traces. - # RUN src/wasi-sdk/bin/llvm-strip ./build/bin/barretenberg.wasm + # RUN /opt/wasi-sdk/bin/llvm-strip ./build/bin/barretenberg.wasm SAVE ARTIFACT build/bin preset-gcc: @@ -189,7 +120,7 @@ bench-binaries: # Runs on the bench image, sent from the builder runner bench-ultra-honk: BUILD +wasmtime # prefetch - FROM +source + FROM ubuntu:noble COPY --dir +bench-binaries/* . # install SRS needed for proving COPY --dir ./srs_db/+build/. srs_db @@ -201,7 +132,7 @@ bench-ultra-honk: bench-client-ivc: BUILD +wasmtime # prefetch - FROM +source + FROM ubuntu:noble COPY --dir +bench-binaries/* . # install SRS needed for proving COPY --dir ./srs_db/+build/. srs_db diff --git a/barretenberg/cpp/bootstrap.sh b/barretenberg/cpp/bootstrap.sh index 2eaafa29571..a323eb601cc 100755 --- a/barretenberg/cpp/bootstrap.sh +++ b/barretenberg/cpp/bootstrap.sh @@ -31,9 +31,6 @@ fi # Download ignition transcripts. (cd ./srs_db && ./download_ignition.sh 0) -# Install wasi-sdk. -./scripts/install-wasi-sdk.sh - # Attempt to just pull artefacts from CI and exit on success. [ -n "${USE_CACHE:-}" ] && ./bootstrap_cache.sh && exit @@ -82,7 +79,7 @@ AVAILABLE_MEMORY=0 case "$(uname)" in Linux*) # Check available memory on Linux - AVAILABLE_MEMORY=$(awk '/MemFree/ { printf $2 }' /proc/meminfo) + AVAILABLE_MEMORY=$(awk '/MemTotal/ { printf $2 }' /proc/meminfo) ;; *) echo "Parallel builds not supported on this operating system" @@ -90,11 +87,11 @@ case "$(uname)" in esac # This value may be too low. # If builds fail with an amount of free memory greater than this value then it should be increased. -MIN_PARALLEL_BUILD_MEMORY=32000000 +MIN_PARALLEL_BUILD_MEMORY=32854492 if [[ AVAILABLE_MEMORY -lt MIN_PARALLEL_BUILD_MEMORY ]]; then echo "System does not have enough memory for parallel builds, falling back to sequential" - build_native + build_native build_wasm build_wasm_threads else diff --git a/barretenberg/cpp/cmake/threading.cmake b/barretenberg/cpp/cmake/threading.cmake index ff60f240a16..fffffb6f83c 100644 --- a/barretenberg/cpp/cmake/threading.cmake +++ b/barretenberg/cpp/cmake/threading.cmake @@ -4,7 +4,7 @@ if(MULTITHREADING) add_link_options(-pthread) if(WASM) add_compile_options(--target=wasm32-wasi-threads) - add_link_options(--target=wasm32-wasi-threads) + add_link_options(--target=wasm32-wasi-threads -Wl,--shared-memory) endif() #add_compile_options(-fsanitize=thread) #add_link_options(-fsanitize=thread) diff --git a/barretenberg/cpp/dockerfiles/Dockerfile.x86_64-linux-clang b/barretenberg/cpp/dockerfiles/Dockerfile.x86_64-linux-clang index 568f0fcd9e4..012cae9403b 100644 --- a/barretenberg/cpp/dockerfiles/Dockerfile.x86_64-linux-clang +++ b/barretenberg/cpp/dockerfiles/Dockerfile.x86_64-linux-clang @@ -20,7 +20,7 @@ RUN wget https://apt.llvm.org/llvm.sh && chmod +x llvm.sh && ./llvm.sh 16 WORKDIR /usr/src/barretenberg/cpp COPY . . -# Build bb binary and targets needed for benchmarking. +# Build bb binary and targets needed for benchmarking. # Everything else is built as part linux-clang-assert. # Benchmark targets want to run without asserts, so get built alongside bb. RUN cmake --preset clang16 diff --git a/barretenberg/cpp/dockerfiles/Dockerfile.x86_64-linux-clang-assert b/barretenberg/cpp/dockerfiles/Dockerfile.x86_64-linux-clang-assert index bef8e342756..3cff8c5023c 100644 --- a/barretenberg/cpp/dockerfiles/Dockerfile.x86_64-linux-clang-assert +++ b/barretenberg/cpp/dockerfiles/Dockerfile.x86_64-linux-clang-assert @@ -14,10 +14,9 @@ RUN apt update && apt install -y \ libssl-dev \ jq \ bash \ - libstdc++6 \ - clang-format + libstdc++6 -RUN wget https://apt.llvm.org/llvm.sh && chmod +x llvm.sh && ./llvm.sh 16 +RUN wget https://apt.llvm.org/llvm.sh && chmod +x llvm.sh && ./llvm.sh 16 && apt install -y clang-format-16 WORKDIR /usr/src/barretenberg/cpp COPY . . diff --git a/barretenberg/cpp/format.sh b/barretenberg/cpp/format.sh index ae314e96a6f..4b1b9e7cbd7 100755 --- a/barretenberg/cpp/format.sh +++ b/barretenberg/cpp/format.sh @@ -4,22 +4,22 @@ set -e if [ "$1" == "staged" ]; then echo Formatting barretenberg staged files... for FILE in $(git diff-index --diff-filter=d --relative --cached --name-only HEAD | grep -e '\.\(cpp\|hpp\|tcc\)$'); do - clang-format -i $FILE + clang-format-16 -i $FILE sed -i.bak 's/\r$//' $FILE && rm ${FILE}.bak git add $FILE done elif [ "$1" == "check" ]; then for FILE in $(find ./src -iname *.hpp -o -iname *.cpp -o -iname *.tcc | grep -v src/msgpack-c); do - clang-format --dry-run --Werror $FILE + clang-format-16 --dry-run --Werror $FILE done elif [ -n "$1" ]; then for FILE in $(git diff-index --relative --name-only $1 | grep -e '\.\(cpp\|hpp\|tcc\)$'); do - clang-format -i $FILE + clang-format-16 -i $FILE sed -i.bak 's/\r$//' $FILE && rm ${FILE}.bak done else for FILE in $(find ./src -iname *.hpp -o -iname *.cpp -o -iname *.tcc | grep -v src/msgpack-c); do - clang-format -i $FILE + clang-format-16 -i $FILE sed -i.bak 's/\r$//' $FILE && rm ${FILE}.bak done fi diff --git a/barretenberg/cpp/scripts/install-wasi-sdk.sh b/barretenberg/cpp/scripts/install-wasi-sdk.sh index 22224212170..650ab723d22 100755 --- a/barretenberg/cpp/scripts/install-wasi-sdk.sh +++ b/barretenberg/cpp/scripts/install-wasi-sdk.sh @@ -35,4 +35,8 @@ else curl -s -L https://wasi-sdk.s3.eu-west-2.amazonaws.com/yamt-wasi-sysroot-20.0.threads.tgz | tar zxf - fi # TODO(https://github.com/AztecProtocol/barretenberg/issues/906): in the future this should use earthly and a 'SAVE ARTIFACT wasi-sdk AS LOCAL wasi-sdk' -mv wasi-sdk-20.0+threads wasi-sdk +if [ "$(id -u)" -eq 0 ]; then + mv wasi-sdk-20.0+threads /opt/wasi-sdk +else + sudo mv wasi-sdk-20.0+threads /opt/wasi-sdk +fi diff --git a/barretenberg/cpp/scripts/strip-wasm.sh b/barretenberg/cpp/scripts/strip-wasm.sh index 18e3cf78d02..9d0c4c36cff 100755 --- a/barretenberg/cpp/scripts/strip-wasm.sh +++ b/barretenberg/cpp/scripts/strip-wasm.sh @@ -1,4 +1,4 @@ #!/bin/sh -./src/wasi-sdk-20.0/bin/llvm-strip ./build-wasm/bin/barretenberg.wasm +/opt/wasi-sdk/bin/llvm-strip ./build-wasm/bin/barretenberg.wasm # TODO(https://github.com/AztecProtocol/barretenberg/issues/941) We currently do not strip barretenberg threaded wasm, for stack traces. -# ./src/wasi-sdk-20.0/bin/llvm-strip ./build-wasm-threads/bin/barretenberg.wasm +# /opt/wasi-sdk/bin/llvm-strip ./build-wasm-threads/bin/barretenberg.wasm diff --git a/barretenberg/cpp/src/CMakeLists.txt b/barretenberg/cpp/src/CMakeLists.txt index 57f24f01fdf..7f7b588c58d 100644 --- a/barretenberg/cpp/src/CMakeLists.txt +++ b/barretenberg/cpp/src/CMakeLists.txt @@ -23,8 +23,7 @@ if(CMAKE_CXX_COMPILER_ID MATCHES "Clang") endif() if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 18) # We target clang18 and need this, eventually warning should be fixed or this will be unconditional. - add_compile_options(-Wno-vla-cxx-extension -pthread) - add_link_options(-Wl,--shared-memory) + add_compile_options(-Wno-vla-cxx-extension) endif() endif() diff --git a/barretenberg/cpp/src/barretenberg/common/fuzzer.hpp b/barretenberg/cpp/src/barretenberg/common/fuzzer.hpp index aaa2e71f676..51e9f202463 100644 --- a/barretenberg/cpp/src/barretenberg/common/fuzzer.hpp +++ b/barretenberg/cpp/src/barretenberg/common/fuzzer.hpp @@ -90,10 +90,10 @@ class FastRandom { */ template concept SimpleRng = requires(T a) { - { - a.next() - } -> std::convertible_to; - }; + { + a.next() + } -> std::convertible_to; +}; /** * @brief Concept for forcing ArgumentSizes to be size_t * @@ -101,27 +101,27 @@ concept SimpleRng = requires(T a) { */ template concept InstructionArgumentSizes = requires { - { - std::make_tuple(T::CONSTANT, - T::WITNESS, - T::CONSTANT_WITNESS, - T::ADD, - T::SUBTRACT, - T::MULTIPLY, - T::DIVIDE, - T::ADD_TWO, - T::MADD, - T::MULT_MADD, - T::MSUB_DIV, - T::SQR, - T::SQR_ADD, - T::SUBTRACT_WITH_CONSTRAINT, - T::DIVIDE_WITH_CONSTRAINTS, - T::SLICE, - T::ASSERT_ZERO, - T::ASSERT_NOT_ZERO) - } -> std::same_as>; - }; + { + std::make_tuple(T::CONSTANT, + T::WITNESS, + T::CONSTANT_WITNESS, + T::ADD, + T::SUBTRACT, + T::MULTIPLY, + T::DIVIDE, + T::ADD_TWO, + T::MADD, + T::MULT_MADD, + T::MSUB_DIV, + T::SQR, + T::SQR_ADD, + T::SUBTRACT_WITH_CONSTRAINT, + T::DIVIDE_WITH_CONSTRAINTS, + T::SLICE, + T::ASSERT_ZERO, + T::ASSERT_NOT_ZERO) + } -> std::same_as>; +}; /** * @brief Concept for Havoc Configurations @@ -129,13 +129,12 @@ concept InstructionArgumentSizes = requires { * @tparam T */ template -concept HavocConfigConstraint = - requires { - { - std::make_tuple(T::GEN_MUTATION_COUNT_LOG, T::GEN_STRUCTURAL_MUTATION_PROBABILITY) - } -> std::same_as>; - T::GEN_MUTATION_COUNT_LOG <= 7; - }; +concept HavocConfigConstraint = requires { + { + std::make_tuple(T::GEN_MUTATION_COUNT_LOG, T::GEN_STRUCTURAL_MUTATION_PROBABILITY) + } -> std::same_as>; + T::GEN_MUTATION_COUNT_LOG <= 7; +}; /** * @brief Concept specifying the class used by the fuzzer * @@ -143,12 +142,12 @@ concept HavocConfigConstraint = */ template concept ArithmeticFuzzHelperConstraint = requires { - typename T::ArgSizes; - typename T::Instruction; - typename T::ExecutionState; - typename T::ExecutionHandler; - InstructionArgumentSizes; - }; + typename T::ArgSizes; + typename T::Instruction; + typename T::ExecutionState; + typename T::ExecutionHandler; + InstructionArgumentSizes; +}; /** * @brief Fuzzer uses only composers with check_circuit function @@ -157,10 +156,10 @@ concept ArithmeticFuzzHelperConstraint = requires { */ template concept CheckableComposer = requires(T a) { - { - CircuitChecker::check(a) - } -> std::same_as; - }; + { + CircuitChecker::check(a) + } -> std::same_as; +}; /** * @brief The fuzzer can use a postprocessing function that is specific to the type being fuzzed @@ -171,10 +170,10 @@ concept CheckableComposer = requires(T a) { */ template concept PostProcessingEnabled = requires(Composer composer, Context context) { - { - T::postProcess(&composer, context) - } -> std::same_as; - }; + { + T::postProcess(&composer, context) + } -> std::same_as; +}; /** * @brief This concept is used when we want to limit the number of executions of certain instructions (for example, @@ -184,9 +183,9 @@ concept PostProcessingEnabled = requires(Composer composer, Context context) { */ template concept InstructionWeightsEnabled = requires { - typename T::InstructionWeights; - T::InstructionWeights::_LIMIT; - }; + typename T::InstructionWeights; + T::InstructionWeights::_LIMIT; +}; /** * @brief Mutate the value of a field element diff --git a/barretenberg/cpp/src/barretenberg/common/moody/blockingconcurrentqueue.h b/barretenberg/cpp/src/barretenberg/common/moody/blockingconcurrentqueue.h deleted file mode 100644 index 60d3c5ce725..00000000000 --- a/barretenberg/cpp/src/barretenberg/common/moody/blockingconcurrentqueue.h +++ /dev/null @@ -1,561 +0,0 @@ -// Provides an efficient blocking version of moodycamel::ConcurrentQueue. -// ©2015-2020 Cameron Desrochers. Distributed under the terms of the simplified -// BSD license, available at the top of concurrentqueue.h. -// Also dual-licensed under the Boost Software License (see LICENSE.md) -// Uses Jeff Preshing's semaphore implementation (under the terms of its -// separate zlib license, see lightweightsemaphore.h). - -#pragma once - -#include "concurrentqueue.h" -#include "lightweightsemaphore.h" - -#include -#include -#include -#include -#include - -namespace moodycamel { -// This is a blocking version of the queue. It has an almost identical interface to -// the normal non-blocking version, with the addition of various wait_dequeue() methods -// and the removal of producer-specific dequeue methods. -template class BlockingConcurrentQueue { - private: - typedef ::moodycamel::ConcurrentQueue ConcurrentQueue; - typedef ::moodycamel::LightweightSemaphore LightweightSemaphore; - - public: - typedef typename ConcurrentQueue::producer_token_t producer_token_t; - typedef typename ConcurrentQueue::consumer_token_t consumer_token_t; - - typedef typename ConcurrentQueue::index_t index_t; - typedef typename ConcurrentQueue::size_t size_t; - typedef typename std::make_signed::type ssize_t; - - static const size_t BLOCK_SIZE = ConcurrentQueue::BLOCK_SIZE; - static const size_t EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD = - ConcurrentQueue::EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD; - static const size_t EXPLICIT_INITIAL_INDEX_SIZE = ConcurrentQueue::EXPLICIT_INITIAL_INDEX_SIZE; - static const size_t IMPLICIT_INITIAL_INDEX_SIZE = ConcurrentQueue::IMPLICIT_INITIAL_INDEX_SIZE; - static const size_t INITIAL_IMPLICIT_PRODUCER_HASH_SIZE = ConcurrentQueue::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE; - static const std::uint32_t EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE = - ConcurrentQueue::EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE; - static const size_t MAX_SUBQUEUE_SIZE = ConcurrentQueue::MAX_SUBQUEUE_SIZE; - - public: - // Creates a queue with at least `capacity` element slots; note that the - // actual number of elements that can be inserted without additional memory - // allocation depends on the number of producers and the block size (e.g. if - // the block size is equal to `capacity`, only a single block will be allocated - // up-front, which means only a single producer will be able to enqueue elements - // without an extra allocation -- blocks aren't shared between producers). - // This method is not thread safe -- it is up to the user to ensure that the - // queue is fully constructed before it starts being used by other threads (this - // includes making the memory effects of construction visible, possibly with a - // memory barrier). - explicit BlockingConcurrentQueue(size_t capacity = 6 * BLOCK_SIZE) - : inner(capacity) - , sema(create(0, (int)Traits::MAX_SEMA_SPINS), - &BlockingConcurrentQueue::template destroy) - { - assert(reinterpret_cast((BlockingConcurrentQueue*)1) == - &((BlockingConcurrentQueue*)1)->inner && - "BlockingConcurrentQueue must have ConcurrentQueue as its first member"); - if (!sema) { - MOODYCAMEL_THROW(std::bad_alloc()); - } - } - - BlockingConcurrentQueue(size_t minCapacity, size_t maxExplicitProducers, size_t maxImplicitProducers) - : inner(minCapacity, maxExplicitProducers, maxImplicitProducers) - , sema(create(0, (int)Traits::MAX_SEMA_SPINS), - &BlockingConcurrentQueue::template destroy) - { - assert(reinterpret_cast((BlockingConcurrentQueue*)1) == - &((BlockingConcurrentQueue*)1)->inner && - "BlockingConcurrentQueue must have ConcurrentQueue as its first member"); - if (!sema) { - MOODYCAMEL_THROW(std::bad_alloc()); - } - } - - // Disable copying and copy assignment - BlockingConcurrentQueue(BlockingConcurrentQueue const&) MOODYCAMEL_DELETE_FUNCTION; - BlockingConcurrentQueue& operator=(BlockingConcurrentQueue const&) MOODYCAMEL_DELETE_FUNCTION; - - // Moving is supported, but note that it is *not* a thread-safe operation. - // Nobody can use the queue while it's being moved, and the memory effects - // of that move must be propagated to other threads before they can use it. - // Note: When a queue is moved, its tokens are still valid but can only be - // used with the destination queue (i.e. semantically they are moved along - // with the queue itself). - BlockingConcurrentQueue(BlockingConcurrentQueue&& other) MOODYCAMEL_NOEXCEPT : inner(std::move(other.inner)), - sema(std::move(other.sema)) - {} - - inline BlockingConcurrentQueue& operator=(BlockingConcurrentQueue&& other) MOODYCAMEL_NOEXCEPT - { - return swap_internal(other); - } - - // Swaps this queue's state with the other's. Not thread-safe. - // Swapping two queues does not invalidate their tokens, however - // the tokens that were created for one queue must be used with - // only the swapped queue (i.e. the tokens are tied to the - // queue's movable state, not the object itself). - inline void swap(BlockingConcurrentQueue& other) MOODYCAMEL_NOEXCEPT { swap_internal(other); } - - private: - BlockingConcurrentQueue& swap_internal(BlockingConcurrentQueue& other) - { - if (this == &other) { - return *this; - } - - inner.swap(other.inner); - sema.swap(other.sema); - return *this; - } - - public: - // Enqueues a single item (by copying it). - // Allocates memory if required. Only fails if memory allocation fails (or implicit - // production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0, - // or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). - // Thread-safe. - inline bool enqueue(T const& item) - { - if ((details::likely)(inner.enqueue(item))) { - sema->signal(); - return true; - } - return false; - } - - // Enqueues a single item (by moving it, if possible). - // Allocates memory if required. Only fails if memory allocation fails (or implicit - // production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0, - // or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). - // Thread-safe. - inline bool enqueue(T&& item) - { - if ((details::likely)(inner.enqueue(std::move(item)))) { - sema->signal(); - return true; - } - return false; - } - - // Enqueues a single item (by copying it) using an explicit producer token. - // Allocates memory if required. Only fails if memory allocation fails (or - // Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). - // Thread-safe. - inline bool enqueue(producer_token_t const& token, T const& item) - { - if ((details::likely)(inner.enqueue(token, item))) { - sema->signal(); - return true; - } - return false; - } - - // Enqueues a single item (by moving it, if possible) using an explicit producer token. - // Allocates memory if required. Only fails if memory allocation fails (or - // Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). - // Thread-safe. - inline bool enqueue(producer_token_t const& token, T&& item) - { - if ((details::likely)(inner.enqueue(token, std::move(item)))) { - sema->signal(); - return true; - } - return false; - } - - // Enqueues several items. - // Allocates memory if required. Only fails if memory allocation fails (or - // implicit production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE - // is 0, or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). - // Note: Use std::make_move_iterator if the elements should be moved instead of copied. - // Thread-safe. - template inline bool enqueue_bulk(It itemFirst, size_t count) - { - if ((details::likely)(inner.enqueue_bulk(std::forward(itemFirst), count))) { - sema->signal((LightweightSemaphore::ssize_t)(ssize_t)count); - return true; - } - return false; - } - - // Enqueues several items using an explicit producer token. - // Allocates memory if required. Only fails if memory allocation fails - // (or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). - // Note: Use std::make_move_iterator if the elements should be moved - // instead of copied. - // Thread-safe. - template inline bool enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count) - { - if ((details::likely)(inner.enqueue_bulk(token, std::forward(itemFirst), count))) { - sema->signal((LightweightSemaphore::ssize_t)(ssize_t)count); - return true; - } - return false; - } - - // Enqueues a single item (by copying it). - // Does not allocate memory. Fails if not enough room to enqueue (or implicit - // production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE - // is 0). - // Thread-safe. - inline bool try_enqueue(T const& item) - { - if (inner.try_enqueue(item)) { - sema->signal(); - return true; - } - return false; - } - - // Enqueues a single item (by moving it, if possible). - // Does not allocate memory (except for one-time implicit producer). - // Fails if not enough room to enqueue (or implicit production is - // disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0). - // Thread-safe. - inline bool try_enqueue(T&& item) - { - if (inner.try_enqueue(std::move(item))) { - sema->signal(); - return true; - } - return false; - } - - // Enqueues a single item (by copying it) using an explicit producer token. - // Does not allocate memory. Fails if not enough room to enqueue. - // Thread-safe. - inline bool try_enqueue(producer_token_t const& token, T const& item) - { - if (inner.try_enqueue(token, item)) { - sema->signal(); - return true; - } - return false; - } - - // Enqueues a single item (by moving it, if possible) using an explicit producer token. - // Does not allocate memory. Fails if not enough room to enqueue. - // Thread-safe. - inline bool try_enqueue(producer_token_t const& token, T&& item) - { - if (inner.try_enqueue(token, std::move(item))) { - sema->signal(); - return true; - } - return false; - } - - // Enqueues several items. - // Does not allocate memory (except for one-time implicit producer). - // Fails if not enough room to enqueue (or implicit production is - // disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0). - // Note: Use std::make_move_iterator if the elements should be moved - // instead of copied. - // Thread-safe. - template inline bool try_enqueue_bulk(It itemFirst, size_t count) - { - if (inner.try_enqueue_bulk(std::forward(itemFirst), count)) { - sema->signal((LightweightSemaphore::ssize_t)(ssize_t)count); - return true; - } - return false; - } - - // Enqueues several items using an explicit producer token. - // Does not allocate memory. Fails if not enough room to enqueue. - // Note: Use std::make_move_iterator if the elements should be moved - // instead of copied. - // Thread-safe. - template inline bool try_enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count) - { - if (inner.try_enqueue_bulk(token, std::forward(itemFirst), count)) { - sema->signal((LightweightSemaphore::ssize_t)(ssize_t)count); - return true; - } - return false; - } - - // Attempts to dequeue from the queue. - // Returns false if all producer streams appeared empty at the time they - // were checked (so, the queue is likely but not guaranteed to be empty). - // Never allocates. Thread-safe. - template inline bool try_dequeue(U& item) - { - if (sema->tryWait()) { - while (!inner.try_dequeue(item)) { - continue; - } - return true; - } - return false; - } - - // Attempts to dequeue from the queue using an explicit consumer token. - // Returns false if all producer streams appeared empty at the time they - // were checked (so, the queue is likely but not guaranteed to be empty). - // Never allocates. Thread-safe. - template inline bool try_dequeue(consumer_token_t& token, U& item) - { - if (sema->tryWait()) { - while (!inner.try_dequeue(token, item)) { - continue; - } - return true; - } - return false; - } - - // Attempts to dequeue several elements from the queue. - // Returns the number of items actually dequeued. - // Returns 0 if all producer streams appeared empty at the time they - // were checked (so, the queue is likely but not guaranteed to be empty). - // Never allocates. Thread-safe. - template inline size_t try_dequeue_bulk(It itemFirst, size_t max) - { - size_t count = 0; - max = (size_t)sema->tryWaitMany((LightweightSemaphore::ssize_t)(ssize_t)max); - while (count != max) { - count += inner.template try_dequeue_bulk(itemFirst, max - count); - } - return count; - } - - // Attempts to dequeue several elements from the queue using an explicit consumer token. - // Returns the number of items actually dequeued. - // Returns 0 if all producer streams appeared empty at the time they - // were checked (so, the queue is likely but not guaranteed to be empty). - // Never allocates. Thread-safe. - template inline size_t try_dequeue_bulk(consumer_token_t& token, It itemFirst, size_t max) - { - size_t count = 0; - max = (size_t)sema->tryWaitMany((LightweightSemaphore::ssize_t)(ssize_t)max); - while (count != max) { - count += inner.template try_dequeue_bulk(token, itemFirst, max - count); - } - return count; - } - - // Blocks the current thread until there's something to dequeue, then - // dequeues it. - // Never allocates. Thread-safe. - template inline void wait_dequeue(U& item) - { - while (!sema->wait()) { - continue; - } - while (!inner.try_dequeue(item)) { - continue; - } - } - - // Blocks the current thread until either there's something to dequeue - // or the timeout (specified in microseconds) expires. Returns false - // without setting `item` if the timeout expires, otherwise assigns - // to `item` and returns true. - // Using a negative timeout indicates an indefinite timeout, - // and is thus functionally equivalent to calling wait_dequeue. - // Never allocates. Thread-safe. - template inline bool wait_dequeue_timed(U& item, std::int64_t timeout_usecs) - { - if (!sema->wait(timeout_usecs)) { - return false; - } - while (!inner.try_dequeue(item)) { - continue; - } - return true; - } - - // Blocks the current thread until either there's something to dequeue - // or the timeout expires. Returns false without setting `item` if the - // timeout expires, otherwise assigns to `item` and returns true. - // Never allocates. Thread-safe. - template - inline bool wait_dequeue_timed(U& item, std::chrono::duration const& timeout) - { - return wait_dequeue_timed(item, std::chrono::duration_cast(timeout).count()); - } - - // Blocks the current thread until there's something to dequeue, then - // dequeues it using an explicit consumer token. - // Never allocates. Thread-safe. - template inline void wait_dequeue(consumer_token_t& token, U& item) - { - while (!sema->wait()) { - continue; - } - while (!inner.try_dequeue(token, item)) { - continue; - } - } - - // Blocks the current thread until either there's something to dequeue - // or the timeout (specified in microseconds) expires. Returns false - // without setting `item` if the timeout expires, otherwise assigns - // to `item` and returns true. - // Using a negative timeout indicates an indefinite timeout, - // and is thus functionally equivalent to calling wait_dequeue. - // Never allocates. Thread-safe. - template inline bool wait_dequeue_timed(consumer_token_t& token, U& item, std::int64_t timeout_usecs) - { - if (!sema->wait(timeout_usecs)) { - return false; - } - while (!inner.try_dequeue(token, item)) { - continue; - } - return true; - } - - // Blocks the current thread until either there's something to dequeue - // or the timeout expires. Returns false without setting `item` if the - // timeout expires, otherwise assigns to `item` and returns true. - // Never allocates. Thread-safe. - template - inline bool wait_dequeue_timed(consumer_token_t& token, U& item, std::chrono::duration const& timeout) - { - return wait_dequeue_timed(token, item, std::chrono::duration_cast(timeout).count()); - } - - // Attempts to dequeue several elements from the queue. - // Returns the number of items actually dequeued, which will - // always be at least one (this method blocks until the queue - // is non-empty) and at most max. - // Never allocates. Thread-safe. - template inline size_t wait_dequeue_bulk(It itemFirst, size_t max) - { - size_t count = 0; - max = (size_t)sema->waitMany((LightweightSemaphore::ssize_t)(ssize_t)max); - while (count != max) { - count += inner.template try_dequeue_bulk(itemFirst, max - count); - } - return count; - } - - // Attempts to dequeue several elements from the queue. - // Returns the number of items actually dequeued, which can - // be 0 if the timeout expires while waiting for elements, - // and at most max. - // Using a negative timeout indicates an indefinite timeout, - // and is thus functionally equivalent to calling wait_dequeue_bulk. - // Never allocates. Thread-safe. - template inline size_t wait_dequeue_bulk_timed(It itemFirst, size_t max, std::int64_t timeout_usecs) - { - size_t count = 0; - max = (size_t)sema->waitMany((LightweightSemaphore::ssize_t)(ssize_t)max, timeout_usecs); - while (count != max) { - count += inner.template try_dequeue_bulk(itemFirst, max - count); - } - return count; - } - - // Attempts to dequeue several elements from the queue. - // Returns the number of items actually dequeued, which can - // be 0 if the timeout expires while waiting for elements, - // and at most max. - // Never allocates. Thread-safe. - template - inline size_t wait_dequeue_bulk_timed(It itemFirst, size_t max, std::chrono::duration const& timeout) - { - return wait_dequeue_bulk_timed( - itemFirst, max, std::chrono::duration_cast(timeout).count()); - } - - // Attempts to dequeue several elements from the queue using an explicit consumer token. - // Returns the number of items actually dequeued, which will - // always be at least one (this method blocks until the queue - // is non-empty) and at most max. - // Never allocates. Thread-safe. - template inline size_t wait_dequeue_bulk(consumer_token_t& token, It itemFirst, size_t max) - { - size_t count = 0; - max = (size_t)sema->waitMany((LightweightSemaphore::ssize_t)(ssize_t)max); - while (count != max) { - count += inner.template try_dequeue_bulk(token, itemFirst, max - count); - } - return count; - } - - // Attempts to dequeue several elements from the queue using an explicit consumer token. - // Returns the number of items actually dequeued, which can - // be 0 if the timeout expires while waiting for elements, - // and at most max. - // Using a negative timeout indicates an indefinite timeout, - // and is thus functionally equivalent to calling wait_dequeue_bulk. - // Never allocates. Thread-safe. - template - inline size_t wait_dequeue_bulk_timed(consumer_token_t& token, It itemFirst, size_t max, std::int64_t timeout_usecs) - { - size_t count = 0; - max = (size_t)sema->waitMany((LightweightSemaphore::ssize_t)(ssize_t)max, timeout_usecs); - while (count != max) { - count += inner.template try_dequeue_bulk(token, itemFirst, max - count); - } - return count; - } - - // Attempts to dequeue several elements from the queue using an explicit consumer token. - // Returns the number of items actually dequeued, which can - // be 0 if the timeout expires while waiting for elements, - // and at most max. - // Never allocates. Thread-safe. - template - inline size_t wait_dequeue_bulk_timed(consumer_token_t& token, - It itemFirst, - size_t max, - std::chrono::duration const& timeout) - { - return wait_dequeue_bulk_timed( - token, itemFirst, max, std::chrono::duration_cast(timeout).count()); - } - - // Returns an estimate of the total number of elements currently in the queue. This - // estimate is only accurate if the queue has completely stabilized before it is called - // (i.e. all enqueue and dequeue operations have completed and their memory effects are - // visible on the calling thread, and no further operations start while this method is - // being called). - // Thread-safe. - inline size_t size_approx() const { return (size_t)sema->availableApprox(); } - - // Returns true if the underlying atomic variables used by - // the queue are lock-free (they should be on most platforms). - // Thread-safe. - static constexpr bool is_lock_free() { return ConcurrentQueue::is_lock_free(); } - - private: - template static inline U* create(A1&& a1, A2&& a2) - { - void* p = (Traits::malloc)(sizeof(U)); - return p != nullptr ? new (p) U(std::forward(a1), std::forward(a2)) : nullptr; - } - - template static inline void destroy(U* p) - { - if (p != nullptr) { - p->~U(); - } - (Traits::free)(p); - } - - private: - ConcurrentQueue inner; - std::unique_ptr sema; -}; - -template -inline void swap(BlockingConcurrentQueue& a, BlockingConcurrentQueue& b) MOODYCAMEL_NOEXCEPT -{ - a.swap(b); -} - -} // end namespace moodycamel \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/common/moody/concurrentqueue.h b/barretenberg/cpp/src/barretenberg/common/moody/concurrentqueue.h deleted file mode 100644 index fc23142efcf..00000000000 --- a/barretenberg/cpp/src/barretenberg/common/moody/concurrentqueue.h +++ /dev/null @@ -1,3988 +0,0 @@ -// Provides a C++11 implementation of a multi-producer, multi-consumer lock-free queue. -// An overview, including benchmark results, is provided here: -// http://moodycamel.com/blog/2014/a-fast-general-purpose-lock-free-queue-for-c++ -// The full design is also described in excruciating detail at: -// http://moodycamel.com/blog/2014/detailed-design-of-a-lock-free-queue - -// Simplified BSD license: -// Copyright (c) 2013-2020, Cameron Desrochers. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// - Redistributions of source code must retain the above copyright notice, this list of -// conditions and the following disclaimer. -// - Redistributions in binary form must reproduce the above copyright notice, this list of -// conditions and the following disclaimer in the documentation and/or other materials -// provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL -// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -// OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR -// TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, -// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Also dual-licensed under the Boost Software License (see LICENSE.md) - -#pragma once - -#if defined(__GNUC__) && !defined(__INTEL_COMPILER) -// Disable -Wconversion warnings (spuriously triggered when Traits::size_t and -// Traits::index_t are set to < 32 bits, causing integer promotion, causing warnings -// upon assigning any computed values) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" - -#ifdef MCDBGQ_USE_RELACY -#pragma GCC diagnostic ignored "-Wint-to-pointer-cast" -#endif -#endif - -#if defined(_MSC_VER) && (!defined(_HAS_CXX17) || !_HAS_CXX17) -// VS2019 with /W4 warns about constant conditional expressions but unless /std=c++17 or higher -// does not support `if constexpr`, so we have no choice but to simply disable the warning -#pragma warning(push) -#pragma warning(disable : 4127) // conditional expression is constant -#endif - -#if defined(__APPLE__) -#include "TargetConditionals.h" -#endif - -#ifdef MCDBGQ_USE_RELACY -#include "relacy/relacy_std.hpp" -#include "relacy_shims.h" -// We only use malloc/free anyway, and the delete macro messes up `= delete` method declarations. -// We'll override the default trait malloc ourselves without a macro. -#undef new -#undef delete -#undef malloc -#undef free -#else -#include // Requires C++11. Sorry VS2010. -#include -#endif -#include // for max_align_t -#include -#include -#include -#include -#include -#include -#include // for CHAR_BIT -#include -#include // partly for __WINPTHREADS_VERSION if on MinGW-w64 w/ POSIX threading -#include // used for thread exit synchronization - -// Platform-specific definitions of a numeric thread ID type and an invalid value -namespace moodycamel { -namespace details { -template struct thread_id_converter { - typedef thread_id_t thread_id_numeric_size_t; - typedef thread_id_t thread_id_hash_t; - static thread_id_hash_t prehash(thread_id_t const& x) { return x; } -}; -} // namespace details -} // namespace moodycamel -#if defined(MCDBGQ_USE_RELACY) -namespace moodycamel { -namespace details { -typedef std::uint32_t thread_id_t; -static const thread_id_t invalid_thread_id = 0xFFFFFFFFU; -static const thread_id_t invalid_thread_id2 = 0xFFFFFFFEU; -static inline thread_id_t thread_id() -{ - return rl::thread_index(); -} -} // namespace details -} // namespace moodycamel -#elif defined(_WIN32) || defined(__WINDOWS__) || defined(__WIN32__) -// No sense pulling in windows.h in a header, we'll manually declare the function -// we use and rely on backwards-compatibility for this not to break -extern "C" __declspec(dllimport) unsigned long __stdcall GetCurrentThreadId(void); -namespace moodycamel { -namespace details { -static_assert(sizeof(unsigned long) == sizeof(std::uint32_t), - "Expected size of unsigned long to be 32 bits on Windows"); -typedef std::uint32_t thread_id_t; -static const thread_id_t invalid_thread_id = 0; // See http://blogs.msdn.com/b/oldnewthing/archive/2004/02/23/78395.aspx -static const thread_id_t invalid_thread_id2 = - 0xFFFFFFFFU; // Not technically guaranteed to be invalid, but is never used in practice. Note that all Win32 thread - // IDs are presently multiples of 4. -static inline thread_id_t thread_id() -{ - return static_cast(::GetCurrentThreadId()); -} -} // namespace details -} // namespace moodycamel -#elif defined(__arm__) || defined(_M_ARM) || defined(__aarch64__) || (defined(__APPLE__) && TARGET_OS_IPHONE) || \ - defined(MOODYCAMEL_NO_THREAD_LOCAL) -namespace moodycamel { -namespace details { -static_assert(sizeof(std::thread::id) == 4 || sizeof(std::thread::id) == 8, - "std::thread::id is expected to be either 4 or 8 bytes"); - -typedef std::thread::id thread_id_t; -static const thread_id_t invalid_thread_id; // Default ctor creates invalid ID - -// Note we don't define a invalid_thread_id2 since std::thread::id doesn't have one; it's -// only used if MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED is defined anyway, which it won't -// be. -static inline thread_id_t thread_id() -{ - return std::this_thread::get_id(); -} - -template struct thread_id_size {}; -template <> struct thread_id_size<4> { - typedef std::uint32_t numeric_t; -}; -template <> struct thread_id_size<8> { - typedef std::uint64_t numeric_t; -}; - -template <> struct thread_id_converter { - typedef thread_id_size::numeric_t thread_id_numeric_size_t; -#ifndef __APPLE__ - typedef std::size_t thread_id_hash_t; -#else - typedef thread_id_numeric_size_t thread_id_hash_t; -#endif - - static thread_id_hash_t prehash(thread_id_t const& x) - { -#ifndef __APPLE__ - return std::hash()(x); -#else - return *reinterpret_cast(&x); -#endif - } -}; -} -} -#else -// Use a nice trick from this answer: http://stackoverflow.com/a/8438730/21475 -// In order to get a numeric thread ID in a platform-independent way, we use a thread-local -// static variable's address as a thread identifier :-) -#if defined(__GNUC__) || defined(__INTEL_COMPILER) -#define MOODYCAMEL_THREADLOCAL __thread -#elif defined(_MSC_VER) -#define MOODYCAMEL_THREADLOCAL __declspec(thread) -#else -// Assume C++11 compliant compiler -#define MOODYCAMEL_THREADLOCAL thread_local -#endif -namespace moodycamel { -namespace details { -typedef std::uintptr_t thread_id_t; -static const thread_id_t invalid_thread_id = 0; // Address can't be nullptr -static const thread_id_t invalid_thread_id2 = - 1; // Member accesses off a null pointer are also generally invalid. Plus it's not aligned. -inline thread_id_t thread_id() -{ - static MOODYCAMEL_THREADLOCAL int x; - return reinterpret_cast(&x); -} -} -} -#endif - -// Constexpr if -#ifndef MOODYCAMEL_CONSTEXPR_IF -#if (defined(_MSC_VER) && defined(_HAS_CXX17) && _HAS_CXX17) || __cplusplus > 201402L -#define MOODYCAMEL_CONSTEXPR_IF if constexpr -#define MOODYCAMEL_MAYBE_UNUSED [[maybe_unused]] -#else -#define MOODYCAMEL_CONSTEXPR_IF if -#define MOODYCAMEL_MAYBE_UNUSED -#endif -#endif - -// Exceptions -#ifndef MOODYCAMEL_EXCEPTIONS_ENABLED -#if (defined(_MSC_VER) && defined(_CPPUNWIND)) || (defined(__GNUC__) && defined(__EXCEPTIONS)) || \ - (!defined(_MSC_VER) && !defined(__GNUC__)) -#define MOODYCAMEL_EXCEPTIONS_ENABLED -#endif -#endif -#ifdef MOODYCAMEL_EXCEPTIONS_ENABLED -#define MOODYCAMEL_TRY try -#define MOODYCAMEL_CATCH(...) catch (__VA_ARGS__) -#define MOODYCAMEL_RETHROW throw -#define MOODYCAMEL_THROW(expr) throw(expr) -#else -#define MOODYCAMEL_TRY MOODYCAMEL_CONSTEXPR_IF(true) -#define MOODYCAMEL_CATCH(...) else MOODYCAMEL_CONSTEXPR_IF(false) -#define MOODYCAMEL_RETHROW -#define MOODYCAMEL_THROW(expr) -#endif - -#ifndef MOODYCAMEL_NOEXCEPT -#if !defined(MOODYCAMEL_EXCEPTIONS_ENABLED) -#define MOODYCAMEL_NOEXCEPT -#define MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr) true -#define MOODYCAMEL_NOEXCEPT_ASSIGN(type, valueType, expr) true -#elif defined(_MSC_VER) && defined(_NOEXCEPT) && _MSC_VER < 1800 -// VS2012's std::is_nothrow_[move_]constructible is broken and returns true when it shouldn't :-( -// We have to assume *all* non-trivial constructors may throw on VS2012! -#define MOODYCAMEL_NOEXCEPT _NOEXCEPT -#define MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr) \ - (std::is_rvalue_reference::value && std::is_move_constructible::value \ - ? std::is_trivially_move_constructible::value \ - : std::is_trivially_copy_constructible::value) -#define MOODYCAMEL_NOEXCEPT_ASSIGN(type, valueType, expr) \ - ((std::is_rvalue_reference::value && std::is_move_assignable::value \ - ? std::is_trivially_move_assignable::value || std::is_nothrow_move_assignable::value \ - : std::is_trivially_copy_assignable::value || std::is_nothrow_copy_assignable::value) && \ - MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr)) -#elif defined(_MSC_VER) && defined(_NOEXCEPT) && _MSC_VER < 1900 -#define MOODYCAMEL_NOEXCEPT _NOEXCEPT -#define MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr) \ - (std::is_rvalue_reference::value && std::is_move_constructible::value \ - ? std::is_trivially_move_constructible::value || std::is_nothrow_move_constructible::value \ - : std::is_trivially_copy_constructible::value || std::is_nothrow_copy_constructible::value) -#define MOODYCAMEL_NOEXCEPT_ASSIGN(type, valueType, expr) \ - ((std::is_rvalue_reference::value && std::is_move_assignable::value \ - ? std::is_trivially_move_assignable::value || std::is_nothrow_move_assignable::value \ - : std::is_trivially_copy_assignable::value || std::is_nothrow_copy_assignable::value) && \ - MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr)) -#else -#define MOODYCAMEL_NOEXCEPT noexcept -#define MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr) noexcept(expr) -#define MOODYCAMEL_NOEXCEPT_ASSIGN(type, valueType, expr) noexcept(expr) -#endif -#endif - -#ifndef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED -#ifdef MCDBGQ_USE_RELACY -#define MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED -#else -// VS2013 doesn't support `thread_local`, and MinGW-w64 w/ POSIX threading has a crippling bug: -// http://sourceforge.net/p/mingw-w64/bugs/445 g++ <=4.7 doesn't support thread_local either. Finally, iOS/ARM doesn't -// have support for it either, and g++/ARM allows it to compile but it's unconfirmed to actually work -#if (!defined(_MSC_VER) || _MSC_VER >= 1900) && \ - (!defined(__MINGW32__) && !defined(__MINGW64__) || !defined(__WINPTHREADS_VERSION)) && \ - (!defined(__GNUC__) || __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)) && \ - (!defined(__APPLE__) || !TARGET_OS_IPHONE) && !defined(__arm__) && !defined(_M_ARM) && !defined(__aarch64__) -// Assume `thread_local` is fully supported in all other C++11 compilers/platforms -#define MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED // tentatively enabled for now; years ago several users report having - // problems with it on -#endif -#endif -#endif - -// VS2012 doesn't support deleted functions. -// In this case, we declare the function normally but don't define it. A link error will be generated if the function is -// called. -#ifndef MOODYCAMEL_DELETE_FUNCTION -#if defined(_MSC_VER) && _MSC_VER < 1800 -#define MOODYCAMEL_DELETE_FUNCTION -#else -#define MOODYCAMEL_DELETE_FUNCTION = delete -#endif -#endif - -namespace moodycamel { -namespace details { -#ifndef MOODYCAMEL_ALIGNAS -// VS2013 doesn't support alignas or alignof, and align() requires a constant literal -#if defined(_MSC_VER) && _MSC_VER <= 1800 -#define MOODYCAMEL_ALIGNAS(alignment) __declspec(align(alignment)) -#define MOODYCAMEL_ALIGNOF(obj) __alignof(obj) -#define MOODYCAMEL_ALIGNED_TYPE_LIKE(T, obj) typename details::Vs2013Aligned::value, T>::type -template struct Vs2013Aligned {}; // default, unsupported alignment -template struct Vs2013Aligned<1, T> { - typedef __declspec(align(1)) T type; -}; -template struct Vs2013Aligned<2, T> { - typedef __declspec(align(2)) T type; -}; -template struct Vs2013Aligned<4, T> { - typedef __declspec(align(4)) T type; -}; -template struct Vs2013Aligned<8, T> { - typedef __declspec(align(8)) T type; -}; -template struct Vs2013Aligned<16, T> { - typedef __declspec(align(16)) T type; -}; -template struct Vs2013Aligned<32, T> { - typedef __declspec(align(32)) T type; -}; -template struct Vs2013Aligned<64, T> { - typedef __declspec(align(64)) T type; -}; -template struct Vs2013Aligned<128, T> { - typedef __declspec(align(128)) T type; -}; -template struct Vs2013Aligned<256, T> { - typedef __declspec(align(256)) T type; -}; -#else -template struct identity { - typedef T type; -}; -#define MOODYCAMEL_ALIGNAS(alignment) alignas(alignment) -#define MOODYCAMEL_ALIGNOF(obj) alignof(obj) -#define MOODYCAMEL_ALIGNED_TYPE_LIKE(T, obj) alignas(alignof(obj)) typename details::identity::type -#endif -#endif -} // namespace details -} // namespace moodycamel - -// TSAN can false report races in lock-free code. To enable TSAN to be used from projects that use this one, -// we can apply per-function compile-time suppression. -// See https://clang.llvm.org/docs/ThreadSanitizer.html#has-feature-thread-sanitizer -#define MOODYCAMEL_NO_TSAN -#if defined(__has_feature) -#if __has_feature(thread_sanitizer) -#undef MOODYCAMEL_NO_TSAN -#define MOODYCAMEL_NO_TSAN __attribute__((no_sanitize("thread"))) -#endif // TSAN -#endif // TSAN - -// Compiler-specific likely/unlikely hints -namespace moodycamel { -namespace details { -#if defined(__GNUC__) -static inline bool(likely)(bool x) -{ - return __builtin_expect((x), true); -} -static inline bool(unlikely)(bool x) -{ - return __builtin_expect((x), false); -} -#else -static inline bool(likely)(bool x) -{ - return x; -} -static inline bool(unlikely)(bool x) -{ - return x; -} -#endif -} // namespace details -} // namespace moodycamel - -#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG -#include "internal/concurrentqueue_internal_debug.h" -#endif - -namespace moodycamel { -namespace details { -template struct const_numeric_max { - static_assert(std::is_integral::value, "const_numeric_max can only be used with integers"); - static const T value = std::numeric_limits::is_signed - ? (static_cast(1) << (sizeof(T) * CHAR_BIT - 1)) - static_cast(1) - : static_cast(-1); -}; - -#if defined(__GLIBCXX__) -typedef ::max_align_t std_max_align_t; // libstdc++ forgot to add it to std:: for a while -#else -typedef std::max_align_t std_max_align_t; // Others (e.g. MSVC) insist it can *only* be accessed via std:: -#endif - -// Some platforms have incorrectly set max_align_t to a type with <8 bytes alignment even while supporting -// 8-byte aligned scalar values (*cough* 32-bit iOS). Work around this with our own union. See issue #64. -typedef union { - std_max_align_t x; - long long y; - void* z; -} max_align_t; -} // namespace details - -// Default traits for the ConcurrentQueue. To change some of the -// traits without re-implementing all of them, inherit from this -// struct and shadow the declarations you wish to be different; -// since the traits are used as a template type parameter, the -// shadowed declarations will be used where defined, and the defaults -// otherwise. -struct ConcurrentQueueDefaultTraits { - // General-purpose size type. std::size_t is strongly recommended. - typedef std::size_t size_t; - - // The type used for the enqueue and dequeue indices. Must be at least as - // large as size_t. Should be significantly larger than the number of elements - // you expect to hold at once, especially if you have a high turnover rate; - // for example, on 32-bit x86, if you expect to have over a hundred million - // elements or pump several million elements through your queue in a very - // short space of time, using a 32-bit type *may* trigger a race condition. - // A 64-bit int type is recommended in that case, and in practice will - // prevent a race condition no matter the usage of the queue. Note that - // whether the queue is lock-free with a 64-int type depends on the whether - // std::atomic is lock-free, which is platform-specific. - typedef std::size_t index_t; - - // Internally, all elements are enqueued and dequeued from multi-element - // blocks; this is the smallest controllable unit. If you expect few elements - // but many producers, a smaller block size should be favoured. For few producers - // and/or many elements, a larger block size is preferred. A sane default - // is provided. Must be a power of 2. - static const size_t BLOCK_SIZE = 32; - - // For explicit producers (i.e. when using a producer token), the block is - // checked for being empty by iterating through a list of flags, one per element. - // For large block sizes, this is too inefficient, and switching to an atomic - // counter-based approach is faster. The switch is made for block sizes strictly - // larger than this threshold. - static const size_t EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD = 32; - - // How many full blocks can be expected for a single explicit producer? This should - // reflect that number's maximum for optimal performance. Must be a power of 2. - static const size_t EXPLICIT_INITIAL_INDEX_SIZE = 32; - - // How many full blocks can be expected for a single implicit producer? This should - // reflect that number's maximum for optimal performance. Must be a power of 2. - static const size_t IMPLICIT_INITIAL_INDEX_SIZE = 32; - - // The initial size of the hash table mapping thread IDs to implicit producers. - // Note that the hash is resized every time it becomes half full. - // Must be a power of two, and either 0 or at least 1. If 0, implicit production - // (using the enqueue methods without an explicit producer token) is disabled. - static const size_t INITIAL_IMPLICIT_PRODUCER_HASH_SIZE = 32; - - // Controls the number of items that an explicit consumer (i.e. one with a token) - // must consume before it causes all consumers to rotate and move on to the next - // internal queue. - static const std::uint32_t EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE = 256; - - // The maximum number of elements (inclusive) that can be enqueued to a sub-queue. - // Enqueue operations that would cause this limit to be surpassed will fail. Note - // that this limit is enforced at the block level (for performance reasons), i.e. - // it's rounded up to the nearest block size. - static const size_t MAX_SUBQUEUE_SIZE = details::const_numeric_max::value; - - // The number of times to spin before sleeping when waiting on a semaphore. - // Recommended values are on the order of 1000-10000 unless the number of - // consumer threads exceeds the number of idle cores (in which case try 0-100). - // Only affects instances of the BlockingConcurrentQueue. - static const int MAX_SEMA_SPINS = 10000; - - // Whether to recycle dynamically-allocated blocks into an internal free list or - // not. If false, only pre-allocated blocks (controlled by the constructor - // arguments) will be recycled, and all others will be `free`d back to the heap. - // Note that blocks consumed by explicit producers are only freed on destruction - // of the queue (not following destruction of the token) regardless of this trait. - static const bool RECYCLE_ALLOCATED_BLOCKS = false; - -#ifndef MCDBGQ_USE_RELACY - // Memory allocation can be customized if needed. - // malloc should return nullptr on failure, and handle alignment like std::malloc. -#if defined(malloc) || defined(free) - // Gah, this is 2015, stop defining macros that break standard code already! - // Work around malloc/free being special macros: - static inline void* WORKAROUND_malloc(size_t size) { return malloc(size); } - static inline void WORKAROUND_free(void* ptr) { return free(ptr); } - static inline void*(malloc)(size_t size) { return WORKAROUND_malloc(size); } - static inline void(free)(void* ptr) { return WORKAROUND_free(ptr); } -#else - static inline void* malloc(size_t size) { return std::malloc(size); } - static inline void free(void* ptr) { return std::free(ptr); } -#endif -#else - // Debug versions when running under the Relacy race detector (ignore - // these in user code) - static inline void* malloc(size_t size) { return rl::rl_malloc(size, $); } - static inline void free(void* ptr) { return rl::rl_free(ptr, $); } -#endif -}; - -// When producing or consuming many elements, the most efficient way is to: -// 1) Use one of the bulk-operation methods of the queue with a token -// 2) Failing that, use the bulk-operation methods without a token -// 3) Failing that, create a token and use that with the single-item methods -// 4) Failing that, use the single-parameter methods of the queue -// Having said that, don't create tokens willy-nilly -- ideally there should be -// a maximum of one token per thread (of each kind). -struct ProducerToken; -struct ConsumerToken; - -template class ConcurrentQueue; -template class BlockingConcurrentQueue; -class ConcurrentQueueTests; - -namespace details { -struct ConcurrentQueueProducerTypelessBase { - ConcurrentQueueProducerTypelessBase* next; - std::atomic inactive; - ProducerToken* token; - - ConcurrentQueueProducerTypelessBase() - : next(nullptr) - , inactive(false) - , token(nullptr) - {} -}; - -template struct _hash_32_or_64 { - static inline std::uint32_t hash(std::uint32_t h) - { - // MurmurHash3 finalizer -- see https://code.google.com/p/smhasher/source/browse/trunk/MurmurHash3.cpp - // Since the thread ID is already unique, all we really want to do is propagate that - // uniqueness evenly across all the bits, so that we can use a subset of the bits while - // reducing collisions significantly - h ^= h >> 16; - h *= 0x85ebca6b; - h ^= h >> 13; - h *= 0xc2b2ae35; - return h ^ (h >> 16); - } -}; -template <> struct _hash_32_or_64<1> { - static inline std::uint64_t hash(std::uint64_t h) - { - h ^= h >> 33; - h *= 0xff51afd7ed558ccd; - h ^= h >> 33; - h *= 0xc4ceb9fe1a85ec53; - return h ^ (h >> 33); - } -}; -template struct hash_32_or_64 : public _hash_32_or_64<(size > 4)> {}; - -static inline size_t hash_thread_id(thread_id_t id) -{ - static_assert(sizeof(thread_id_t) <= 8, "Expected a platform where thread IDs are at most 64-bit values"); - return static_cast(hash_32_or_64::thread_id_hash_t)>::hash( - thread_id_converter::prehash(id))); -} - -template static inline bool circular_less_than(T a, T b) -{ - static_assert(std::is_integral::value && !std::numeric_limits::is_signed, - "circular_less_than is intended to be used only with unsigned integer types"); - return static_cast(a - b) > static_cast(static_cast(1) << (static_cast(sizeof(T) * CHAR_BIT - 1))); - // Note: extra parens around rhs of operator<< is MSVC bug: - // https://developercommunity2.visualstudio.com/t/C4554-triggers-when-both-lhs-and-rhs-is/10034931 - // silencing the bug requires #pragma warning(disable: 4554) around the calling code and has no effect when - // done here. -} - -template static inline char* align_for(char* ptr) -{ - const std::size_t alignment = std::alignment_of::value; - return ptr + (alignment - (reinterpret_cast(ptr) % alignment)) % alignment; -} - -template static inline T ceil_to_pow_2(T x) -{ - static_assert(std::is_integral::value && !std::numeric_limits::is_signed, - "ceil_to_pow_2 is intended to be used only with unsigned integer types"); - - // Adapted from http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 - --x; - x |= x >> 1; - x |= x >> 2; - x |= x >> 4; - for (std::size_t i = 1; i < sizeof(T); i <<= 1) { - x |= x >> (i << 3); - } - ++x; - return x; -} - -template static inline void swap_relaxed(std::atomic& left, std::atomic& right) -{ - T temp = std::move(left.load(std::memory_order_relaxed)); - left.store(std::move(right.load(std::memory_order_relaxed)), std::memory_order_relaxed); - right.store(std::move(temp), std::memory_order_relaxed); -} - -template static inline T const& nomove(T const& x) -{ - return x; -} - -template struct nomove_if { - template static inline T const& eval(T const& x) { return x; } -}; - -template <> struct nomove_if { - template static inline auto eval(U&& x) -> decltype(std::forward(x)) { return std::forward(x); } -}; - -template static inline auto deref_noexcept(It& it) MOODYCAMEL_NOEXCEPT->decltype(*it) -{ - return *it; -} - -#if defined(__clang__) || !defined(__GNUC__) || __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) -template struct is_trivially_destructible : std::is_trivially_destructible {}; -#else -template struct is_trivially_destructible : std::has_trivial_destructor {}; -#endif - -#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED -#ifdef MCDBGQ_USE_RELACY -typedef RelacyThreadExitListener ThreadExitListener; -typedef RelacyThreadExitNotifier ThreadExitNotifier; -#else -class ThreadExitNotifier; - -struct ThreadExitListener { - typedef void (*callback_t)(void*); - callback_t callback; - void* userData; - - ThreadExitListener* next; // reserved for use by the ThreadExitNotifier - ThreadExitNotifier* chain; // reserved for use by the ThreadExitNotifier -}; - -class ThreadExitNotifier { - public: - static void subscribe(ThreadExitListener* listener) - { - auto& tlsInst = instance(); - std::lock_guard guard(mutex()); - listener->next = tlsInst.tail; - listener->chain = &tlsInst; - tlsInst.tail = listener; - } - - static void unsubscribe(ThreadExitListener* listener) - { - std::lock_guard guard(mutex()); - if (!listener->chain) { - return; // race with ~ThreadExitNotifier - } - auto& tlsInst = *listener->chain; - listener->chain = nullptr; - ThreadExitListener** prev = &tlsInst.tail; - for (auto ptr = tlsInst.tail; ptr != nullptr; ptr = ptr->next) { - if (ptr == listener) { - *prev = ptr->next; - break; - } - prev = &ptr->next; - } - } - - private: - ThreadExitNotifier() - : tail(nullptr) - {} - ThreadExitNotifier(ThreadExitNotifier const&) MOODYCAMEL_DELETE_FUNCTION; - ThreadExitNotifier& operator=(ThreadExitNotifier const&) MOODYCAMEL_DELETE_FUNCTION; - - ~ThreadExitNotifier() - { - // This thread is about to exit, let everyone know! - assert(this == &instance() && - "If this assert fails, you likely have a buggy compiler! Change the preprocessor conditions such that " - "MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED is no longer defined."); - std::lock_guard guard(mutex()); - for (auto ptr = tail; ptr != nullptr; ptr = ptr->next) { - ptr->chain = nullptr; - ptr->callback(ptr->userData); - } - } - - // Thread-local - static inline ThreadExitNotifier& instance() - { - static thread_local ThreadExitNotifier notifier; - return notifier; - } - - static inline std::mutex& mutex() - { - // Must be static because the ThreadExitNotifier could be destroyed while unsubscribe is called - static std::mutex mutex; - return mutex; - } - - private: - ThreadExitListener* tail; -}; -#endif -#endif - -template struct static_is_lock_free_num { - enum { value = 0 }; -}; -template <> struct static_is_lock_free_num { - enum { value = ATOMIC_CHAR_LOCK_FREE }; -}; -template <> struct static_is_lock_free_num { - enum { value = ATOMIC_SHORT_LOCK_FREE }; -}; -template <> struct static_is_lock_free_num { - enum { value = ATOMIC_INT_LOCK_FREE }; -}; -template <> struct static_is_lock_free_num { - enum { value = ATOMIC_LONG_LOCK_FREE }; -}; -template <> struct static_is_lock_free_num { - enum { value = ATOMIC_LLONG_LOCK_FREE }; -}; -template struct static_is_lock_free : static_is_lock_free_num::type> {}; -template <> struct static_is_lock_free { - enum { value = ATOMIC_BOOL_LOCK_FREE }; -}; -template struct static_is_lock_free { - enum { value = ATOMIC_POINTER_LOCK_FREE }; -}; -} // namespace details - -struct ProducerToken { - template explicit ProducerToken(ConcurrentQueue& queue); - - template explicit ProducerToken(BlockingConcurrentQueue& queue); - - ProducerToken(ProducerToken&& other) MOODYCAMEL_NOEXCEPT : producer(other.producer) - { - other.producer = nullptr; - if (producer != nullptr) { - producer->token = this; - } - } - - inline ProducerToken& operator=(ProducerToken&& other) MOODYCAMEL_NOEXCEPT - { - swap(other); - return *this; - } - - void swap(ProducerToken& other) MOODYCAMEL_NOEXCEPT - { - std::swap(producer, other.producer); - if (producer != nullptr) { - producer->token = this; - } - if (other.producer != nullptr) { - other.producer->token = &other; - } - } - - // A token is always valid unless: - // 1) Memory allocation failed during construction - // 2) It was moved via the move constructor - // (Note: assignment does a swap, leaving both potentially valid) - // 3) The associated queue was destroyed - // Note that if valid() returns true, that only indicates - // that the token is valid for use with a specific queue, - // but not which one; that's up to the user to track. - inline bool valid() const { return producer != nullptr; } - - ~ProducerToken() - { - if (producer != nullptr) { - producer->token = nullptr; - producer->inactive.store(true, std::memory_order_release); - } - } - - // Disable copying and assignment - ProducerToken(ProducerToken const&) MOODYCAMEL_DELETE_FUNCTION; - ProducerToken& operator=(ProducerToken const&) MOODYCAMEL_DELETE_FUNCTION; - - private: - template friend class ConcurrentQueue; - friend class ConcurrentQueueTests; - - protected: - details::ConcurrentQueueProducerTypelessBase* producer; -}; - -struct ConsumerToken { - template explicit ConsumerToken(ConcurrentQueue& q); - - template explicit ConsumerToken(BlockingConcurrentQueue& q); - - ConsumerToken(ConsumerToken&& other) MOODYCAMEL_NOEXCEPT : initialOffset(other.initialOffset), - lastKnownGlobalOffset(other.lastKnownGlobalOffset), - itemsConsumedFromCurrent(other.itemsConsumedFromCurrent), - currentProducer(other.currentProducer), - desiredProducer(other.desiredProducer) - {} - - inline ConsumerToken& operator=(ConsumerToken&& other) MOODYCAMEL_NOEXCEPT - { - swap(other); - return *this; - } - - void swap(ConsumerToken& other) MOODYCAMEL_NOEXCEPT - { - std::swap(initialOffset, other.initialOffset); - std::swap(lastKnownGlobalOffset, other.lastKnownGlobalOffset); - std::swap(itemsConsumedFromCurrent, other.itemsConsumedFromCurrent); - std::swap(currentProducer, other.currentProducer); - std::swap(desiredProducer, other.desiredProducer); - } - - // Disable copying and assignment - ConsumerToken(ConsumerToken const&) MOODYCAMEL_DELETE_FUNCTION; - ConsumerToken& operator=(ConsumerToken const&) MOODYCAMEL_DELETE_FUNCTION; - - private: - template friend class ConcurrentQueue; - friend class ConcurrentQueueTests; - - private: // but shared with ConcurrentQueue - std::uint32_t initialOffset; - std::uint32_t lastKnownGlobalOffset; - std::uint32_t itemsConsumedFromCurrent; - details::ConcurrentQueueProducerTypelessBase* currentProducer; - details::ConcurrentQueueProducerTypelessBase* desiredProducer; -}; - -// Need to forward-declare this swap because it's in a namespace. -// See -// http://stackoverflow.com/questions/4492062/why-does-a-c-friend-class-need-a-forward-declaration-only-in-other-namespaces -template -inline void swap(typename ConcurrentQueue::ImplicitProducerKVP& a, - typename ConcurrentQueue::ImplicitProducerKVP& b) MOODYCAMEL_NOEXCEPT; - -template class ConcurrentQueue { - public: - typedef ::moodycamel::ProducerToken producer_token_t; - typedef ::moodycamel::ConsumerToken consumer_token_t; - - typedef typename Traits::index_t index_t; - typedef typename Traits::size_t size_t; - - static const size_t BLOCK_SIZE = static_cast(Traits::BLOCK_SIZE); - static const size_t EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD = - static_cast(Traits::EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD); - static const size_t EXPLICIT_INITIAL_INDEX_SIZE = static_cast(Traits::EXPLICIT_INITIAL_INDEX_SIZE); - static const size_t IMPLICIT_INITIAL_INDEX_SIZE = static_cast(Traits::IMPLICIT_INITIAL_INDEX_SIZE); - static const size_t INITIAL_IMPLICIT_PRODUCER_HASH_SIZE = - static_cast(Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE); - static const std::uint32_t EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE = - static_cast(Traits::EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE); -#ifdef _MSC_VER -#pragma warning(push) -#pragma warning(disable : 4307) // + integral constant overflow (that's what the ternary expression is for!) -#pragma warning(disable : 4309) // static_cast: Truncation of constant value -#endif - static const size_t MAX_SUBQUEUE_SIZE = - (details::const_numeric_max::value - static_cast(Traits::MAX_SUBQUEUE_SIZE) < BLOCK_SIZE) - ? details::const_numeric_max::value - : ((static_cast(Traits::MAX_SUBQUEUE_SIZE) + (BLOCK_SIZE - 1)) / BLOCK_SIZE * BLOCK_SIZE); -#ifdef _MSC_VER -#pragma warning(pop) -#endif - - static_assert(!std::numeric_limits::is_signed && std::is_integral::value, - "Traits::size_t must be an unsigned integral type"); - static_assert(!std::numeric_limits::is_signed && std::is_integral::value, - "Traits::index_t must be an unsigned integral type"); - static_assert(sizeof(index_t) >= sizeof(size_t), "Traits::index_t must be at least as wide as Traits::size_t"); - static_assert((BLOCK_SIZE > 1) && !(BLOCK_SIZE & (BLOCK_SIZE - 1)), - "Traits::BLOCK_SIZE must be a power of 2 (and at least 2)"); - static_assert((EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD > 1) && - !(EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD & (EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD - 1)), - "Traits::EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD must be a power of 2 (and greater than 1)"); - static_assert((EXPLICIT_INITIAL_INDEX_SIZE > 1) && - !(EXPLICIT_INITIAL_INDEX_SIZE & (EXPLICIT_INITIAL_INDEX_SIZE - 1)), - "Traits::EXPLICIT_INITIAL_INDEX_SIZE must be a power of 2 (and greater than 1)"); - static_assert((IMPLICIT_INITIAL_INDEX_SIZE > 1) && - !(IMPLICIT_INITIAL_INDEX_SIZE & (IMPLICIT_INITIAL_INDEX_SIZE - 1)), - "Traits::IMPLICIT_INITIAL_INDEX_SIZE must be a power of 2 (and greater than 1)"); - static_assert((INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) || - !(INITIAL_IMPLICIT_PRODUCER_HASH_SIZE & (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE - 1)), - "Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE must be a power of 2"); - static_assert( - INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0 || INITIAL_IMPLICIT_PRODUCER_HASH_SIZE >= 1, - "Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE must be at least 1 (or 0 to disable implicit enqueueing)"); - - public: - // Creates a queue with at least `capacity` element slots; note that the - // actual number of elements that can be inserted without additional memory - // allocation depends on the number of producers and the block size (e.g. if - // the block size is equal to `capacity`, only a single block will be allocated - // up-front, which means only a single producer will be able to enqueue elements - // without an extra allocation -- blocks aren't shared between producers). - // This method is not thread safe -- it is up to the user to ensure that the - // queue is fully constructed before it starts being used by other threads (this - // includes making the memory effects of construction visible, possibly with a - // memory barrier). - explicit ConcurrentQueue(size_t capacity = 32 * BLOCK_SIZE) - : producerListTail(nullptr) - , producerCount(0) - , initialBlockPoolIndex(0) - , nextExplicitConsumerId(0) - , globalExplicitConsumerOffset(0) - { - implicitProducerHashResizeInProgress.clear(std::memory_order_relaxed); - populate_initial_implicit_producer_hash(); - populate_initial_block_list(capacity / BLOCK_SIZE + ((capacity & (BLOCK_SIZE - 1)) == 0 ? 0 : 1)); - -#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG - // Track all the producers using a fully-resolved typed list for - // each kind; this makes it possible to debug them starting from - // the root queue object (otherwise wacky casts are needed that - // don't compile in the debugger's expression evaluator). - explicitProducers.store(nullptr, std::memory_order_relaxed); - implicitProducers.store(nullptr, std::memory_order_relaxed); -#endif - } - - // Computes the correct amount of pre-allocated blocks for you based - // on the minimum number of elements you want available at any given - // time, and the maximum concurrent number of each type of producer. - ConcurrentQueue(size_t minCapacity, size_t maxExplicitProducers, size_t maxImplicitProducers) - : producerListTail(nullptr) - , producerCount(0) - , initialBlockPoolIndex(0) - , nextExplicitConsumerId(0) - , globalExplicitConsumerOffset(0) - { - implicitProducerHashResizeInProgress.clear(std::memory_order_relaxed); - populate_initial_implicit_producer_hash(); - size_t blocks = (((minCapacity + BLOCK_SIZE - 1) / BLOCK_SIZE) - 1) * (maxExplicitProducers + 1) + - 2 * (maxExplicitProducers + maxImplicitProducers); - populate_initial_block_list(blocks); - -#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG - explicitProducers.store(nullptr, std::memory_order_relaxed); - implicitProducers.store(nullptr, std::memory_order_relaxed); -#endif - } - - // Note: The queue should not be accessed concurrently while it's - // being deleted. It's up to the user to synchronize this. - // This method is not thread safe. - ~ConcurrentQueue() - { - // Destroy producers - auto ptr = producerListTail.load(std::memory_order_relaxed); - while (ptr != nullptr) { - auto next = ptr->next_prod(); - if (ptr->token != nullptr) { - ptr->token->producer = nullptr; - } - destroy(ptr); - ptr = next; - } - - // Destroy implicit producer hash tables - MOODYCAMEL_CONSTEXPR_IF(INITIAL_IMPLICIT_PRODUCER_HASH_SIZE != 0) - { - auto hash = implicitProducerHash.load(std::memory_order_relaxed); - while (hash != nullptr) { - auto prev = hash->prev; - if (prev != nullptr) { // The last hash is part of this object and was not allocated dynamically - for (size_t i = 0; i != hash->capacity; ++i) { - hash->entries[i].~ImplicitProducerKVP(); - } - hash->~ImplicitProducerHash(); - (Traits::free)(hash); - } - hash = prev; - } - } - - // Destroy global free list - auto block = freeList.head_unsafe(); - while (block != nullptr) { - auto next = block->freeListNext.load(std::memory_order_relaxed); - if (block->dynamicallyAllocated) { - destroy(block); - } - block = next; - } - - // Destroy initial free list - destroy_array(initialBlockPool, initialBlockPoolSize); - } - - // Disable copying and copy assignment - ConcurrentQueue(ConcurrentQueue const&) MOODYCAMEL_DELETE_FUNCTION; - ConcurrentQueue& operator=(ConcurrentQueue const&) MOODYCAMEL_DELETE_FUNCTION; - - // Moving is supported, but note that it is *not* a thread-safe operation. - // Nobody can use the queue while it's being moved, and the memory effects - // of that move must be propagated to other threads before they can use it. - // Note: When a queue is moved, its tokens are still valid but can only be - // used with the destination queue (i.e. semantically they are moved along - // with the queue itself). - ConcurrentQueue(ConcurrentQueue&& other) MOODYCAMEL_NOEXCEPT - : producerListTail(other.producerListTail.load(std::memory_order_relaxed)), - producerCount(other.producerCount.load(std::memory_order_relaxed)), - initialBlockPoolIndex(other.initialBlockPoolIndex.load(std::memory_order_relaxed)), - initialBlockPool(other.initialBlockPool), - initialBlockPoolSize(other.initialBlockPoolSize), - freeList(std::move(other.freeList)), - nextExplicitConsumerId(other.nextExplicitConsumerId.load(std::memory_order_relaxed)), - globalExplicitConsumerOffset(other.globalExplicitConsumerOffset.load(std::memory_order_relaxed)) - { - // Move the other one into this, and leave the other one as an empty queue - implicitProducerHashResizeInProgress.clear(std::memory_order_relaxed); - populate_initial_implicit_producer_hash(); - swap_implicit_producer_hashes(other); - - other.producerListTail.store(nullptr, std::memory_order_relaxed); - other.producerCount.store(0, std::memory_order_relaxed); - other.nextExplicitConsumerId.store(0, std::memory_order_relaxed); - other.globalExplicitConsumerOffset.store(0, std::memory_order_relaxed); - -#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG - explicitProducers.store(other.explicitProducers.load(std::memory_order_relaxed), std::memory_order_relaxed); - other.explicitProducers.store(nullptr, std::memory_order_relaxed); - implicitProducers.store(other.implicitProducers.load(std::memory_order_relaxed), std::memory_order_relaxed); - other.implicitProducers.store(nullptr, std::memory_order_relaxed); -#endif - - other.initialBlockPoolIndex.store(0, std::memory_order_relaxed); - other.initialBlockPoolSize = 0; - other.initialBlockPool = nullptr; - - reown_producers(); - } - - inline ConcurrentQueue& operator=(ConcurrentQueue&& other) MOODYCAMEL_NOEXCEPT { return swap_internal(other); } - - // Swaps this queue's state with the other's. Not thread-safe. - // Swapping two queues does not invalidate their tokens, however - // the tokens that were created for one queue must be used with - // only the swapped queue (i.e. the tokens are tied to the - // queue's movable state, not the object itself). - inline void swap(ConcurrentQueue& other) MOODYCAMEL_NOEXCEPT { swap_internal(other); } - - private: - ConcurrentQueue& swap_internal(ConcurrentQueue& other) - { - if (this == &other) { - return *this; - } - - details::swap_relaxed(producerListTail, other.producerListTail); - details::swap_relaxed(producerCount, other.producerCount); - details::swap_relaxed(initialBlockPoolIndex, other.initialBlockPoolIndex); - std::swap(initialBlockPool, other.initialBlockPool); - std::swap(initialBlockPoolSize, other.initialBlockPoolSize); - freeList.swap(other.freeList); - details::swap_relaxed(nextExplicitConsumerId, other.nextExplicitConsumerId); - details::swap_relaxed(globalExplicitConsumerOffset, other.globalExplicitConsumerOffset); - - swap_implicit_producer_hashes(other); - - reown_producers(); - other.reown_producers(); - -#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG - details::swap_relaxed(explicitProducers, other.explicitProducers); - details::swap_relaxed(implicitProducers, other.implicitProducers); -#endif - - return *this; - } - - public: - // Enqueues a single item (by copying it). - // Allocates memory if required. Only fails if memory allocation fails (or implicit - // production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0, - // or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). - // Thread-safe. - inline bool enqueue(T const& item) - { - MOODYCAMEL_CONSTEXPR_IF(INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false; - else return inner_enqueue(item); - } - - // Enqueues a single item (by moving it, if possible). - // Allocates memory if required. Only fails if memory allocation fails (or implicit - // production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0, - // or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). - // Thread-safe. - inline bool enqueue(T&& item) - { - MOODYCAMEL_CONSTEXPR_IF(INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false; - else return inner_enqueue(std::move(item)); - } - - // Enqueues a single item (by copying it) using an explicit producer token. - // Allocates memory if required. Only fails if memory allocation fails (or - // Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). - // Thread-safe. - inline bool enqueue(producer_token_t const& token, T const& item) { return inner_enqueue(token, item); } - - // Enqueues a single item (by moving it, if possible) using an explicit producer token. - // Allocates memory if required. Only fails if memory allocation fails (or - // Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). - // Thread-safe. - inline bool enqueue(producer_token_t const& token, T&& item) - { - return inner_enqueue(token, std::move(item)); - } - - // Enqueues several items. - // Allocates memory if required. Only fails if memory allocation fails (or - // implicit production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE - // is 0, or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). - // Note: Use std::make_move_iterator if the elements should be moved instead of copied. - // Thread-safe. - template bool enqueue_bulk(It itemFirst, size_t count) - { - MOODYCAMEL_CONSTEXPR_IF(INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false; - else return inner_enqueue_bulk(itemFirst, count); - } - - // Enqueues several items using an explicit producer token. - // Allocates memory if required. Only fails if memory allocation fails - // (or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). - // Note: Use std::make_move_iterator if the elements should be moved - // instead of copied. - // Thread-safe. - template bool enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count) - { - return inner_enqueue_bulk(token, itemFirst, count); - } - - // Enqueues a single item (by copying it). - // Does not allocate memory. Fails if not enough room to enqueue (or implicit - // production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE - // is 0). - // Thread-safe. - inline bool try_enqueue(T const& item) - { - MOODYCAMEL_CONSTEXPR_IF(INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false; - else return inner_enqueue(item); - } - - // Enqueues a single item (by moving it, if possible). - // Does not allocate memory (except for one-time implicit producer). - // Fails if not enough room to enqueue (or implicit production is - // disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0). - // Thread-safe. - inline bool try_enqueue(T&& item) - { - MOODYCAMEL_CONSTEXPR_IF(INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false; - else return inner_enqueue(std::move(item)); - } - - // Enqueues a single item (by copying it) using an explicit producer token. - // Does not allocate memory. Fails if not enough room to enqueue. - // Thread-safe. - inline bool try_enqueue(producer_token_t const& token, T const& item) - { - return inner_enqueue(token, item); - } - - // Enqueues a single item (by moving it, if possible) using an explicit producer token. - // Does not allocate memory. Fails if not enough room to enqueue. - // Thread-safe. - inline bool try_enqueue(producer_token_t const& token, T&& item) - { - return inner_enqueue(token, std::move(item)); - } - - // Enqueues several items. - // Does not allocate memory (except for one-time implicit producer). - // Fails if not enough room to enqueue (or implicit production is - // disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0). - // Note: Use std::make_move_iterator if the elements should be moved - // instead of copied. - // Thread-safe. - template bool try_enqueue_bulk(It itemFirst, size_t count) - { - MOODYCAMEL_CONSTEXPR_IF(INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false; - else return inner_enqueue_bulk(itemFirst, count); - } - - // Enqueues several items using an explicit producer token. - // Does not allocate memory. Fails if not enough room to enqueue. - // Note: Use std::make_move_iterator if the elements should be moved - // instead of copied. - // Thread-safe. - template bool try_enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count) - { - return inner_enqueue_bulk(token, itemFirst, count); - } - - // Attempts to dequeue from the queue. - // Returns false if all producer streams appeared empty at the time they - // were checked (so, the queue is likely but not guaranteed to be empty). - // Never allocates. Thread-safe. - template bool try_dequeue(U& item) - { - // Instead of simply trying each producer in turn (which could cause needless contention on the first - // producer), we score them heuristically. - size_t nonEmptyCount = 0; - ProducerBase* best = nullptr; - size_t bestSize = 0; - for (auto ptr = producerListTail.load(std::memory_order_acquire); nonEmptyCount < 3 && ptr != nullptr; - ptr = ptr->next_prod()) { - auto size = ptr->size_approx(); - if (size > 0) { - if (size > bestSize) { - bestSize = size; - best = ptr; - } - ++nonEmptyCount; - } - } - - // If there was at least one non-empty queue but it appears empty at the time - // we try to dequeue from it, we need to make sure every queue's been tried - if (nonEmptyCount > 0) { - if ((details::likely)(best->dequeue(item))) { - return true; - } - for (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) { - if (ptr != best && ptr->dequeue(item)) { - return true; - } - } - } - return false; - } - - // Attempts to dequeue from the queue. - // Returns false if all producer streams appeared empty at the time they - // were checked (so, the queue is likely but not guaranteed to be empty). - // This differs from the try_dequeue(item) method in that this one does - // not attempt to reduce contention by interleaving the order that producer - // streams are dequeued from. So, using this method can reduce overall throughput - // under contention, but will give more predictable results in single-threaded - // consumer scenarios. This is mostly only useful for internal unit tests. - // Never allocates. Thread-safe. - template bool try_dequeue_non_interleaved(U& item) - { - for (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) { - if (ptr->dequeue(item)) { - return true; - } - } - return false; - } - - // Attempts to dequeue from the queue using an explicit consumer token. - // Returns false if all producer streams appeared empty at the time they - // were checked (so, the queue is likely but not guaranteed to be empty). - // Never allocates. Thread-safe. - template bool try_dequeue(consumer_token_t& token, U& item) - { - // The idea is roughly as follows: - // Every 256 items from one producer, make everyone rotate (increase the global offset) -> this means the - // highest efficiency consumer dictates the rotation speed of everyone else, more or less If you see that the - // global offset has changed, you must reset your consumption counter and move to your designated place If - // there's no items where you're supposed to be, keep moving until you find a producer with some items If the - // global offset has not changed but you've run out of items to consume, move over from your current position - // until you find an producer with something in it - - if (token.desiredProducer == nullptr || - token.lastKnownGlobalOffset != globalExplicitConsumerOffset.load(std::memory_order_relaxed)) { - if (!update_current_producer_after_rotation(token)) { - return false; - } - } - - // If there was at least one non-empty queue but it appears empty at the time - // we try to dequeue from it, we need to make sure every queue's been tried - if (static_cast(token.currentProducer)->dequeue(item)) { - if (++token.itemsConsumedFromCurrent == EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE) { - globalExplicitConsumerOffset.fetch_add(1, std::memory_order_relaxed); - } - return true; - } - - auto tail = producerListTail.load(std::memory_order_acquire); - auto ptr = static_cast(token.currentProducer)->next_prod(); - if (ptr == nullptr) { - ptr = tail; - } - while (ptr != static_cast(token.currentProducer)) { - if (ptr->dequeue(item)) { - token.currentProducer = ptr; - token.itemsConsumedFromCurrent = 1; - return true; - } - ptr = ptr->next_prod(); - if (ptr == nullptr) { - ptr = tail; - } - } - return false; - } - - // Attempts to dequeue several elements from the queue. - // Returns the number of items actually dequeued. - // Returns 0 if all producer streams appeared empty at the time they - // were checked (so, the queue is likely but not guaranteed to be empty). - // Never allocates. Thread-safe. - template size_t try_dequeue_bulk(It itemFirst, size_t max) - { - size_t count = 0; - for (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) { - count += ptr->dequeue_bulk(itemFirst, max - count); - if (count == max) { - break; - } - } - return count; - } - - // Attempts to dequeue several elements from the queue using an explicit consumer token. - // Returns the number of items actually dequeued. - // Returns 0 if all producer streams appeared empty at the time they - // were checked (so, the queue is likely but not guaranteed to be empty). - // Never allocates. Thread-safe. - template size_t try_dequeue_bulk(consumer_token_t& token, It itemFirst, size_t max) - { - if (token.desiredProducer == nullptr || - token.lastKnownGlobalOffset != globalExplicitConsumerOffset.load(std::memory_order_relaxed)) { - if (!update_current_producer_after_rotation(token)) { - return 0; - } - } - - size_t count = static_cast(token.currentProducer)->dequeue_bulk(itemFirst, max); - if (count == max) { - if ((token.itemsConsumedFromCurrent += static_cast(max)) >= - EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE) { - globalExplicitConsumerOffset.fetch_add(1, std::memory_order_relaxed); - } - return max; - } - token.itemsConsumedFromCurrent += static_cast(count); - max -= count; - - auto tail = producerListTail.load(std::memory_order_acquire); - auto ptr = static_cast(token.currentProducer)->next_prod(); - if (ptr == nullptr) { - ptr = tail; - } - while (ptr != static_cast(token.currentProducer)) { - auto dequeued = ptr->dequeue_bulk(itemFirst, max); - count += dequeued; - if (dequeued != 0) { - token.currentProducer = ptr; - token.itemsConsumedFromCurrent = static_cast(dequeued); - } - if (dequeued == max) { - break; - } - max -= dequeued; - ptr = ptr->next_prod(); - if (ptr == nullptr) { - ptr = tail; - } - } - return count; - } - - // Attempts to dequeue from a specific producer's inner queue. - // If you happen to know which producer you want to dequeue from, this - // is significantly faster than using the general-case try_dequeue methods. - // Returns false if the producer's queue appeared empty at the time it - // was checked (so, the queue is likely but not guaranteed to be empty). - // Never allocates. Thread-safe. - template inline bool try_dequeue_from_producer(producer_token_t const& producer, U& item) - { - return static_cast(producer.producer)->dequeue(item); - } - - // Attempts to dequeue several elements from a specific producer's inner queue. - // Returns the number of items actually dequeued. - // If you happen to know which producer you want to dequeue from, this - // is significantly faster than using the general-case try_dequeue methods. - // Returns 0 if the producer's queue appeared empty at the time it - // was checked (so, the queue is likely but not guaranteed to be empty). - // Never allocates. Thread-safe. - template - inline size_t try_dequeue_bulk_from_producer(producer_token_t const& producer, It itemFirst, size_t max) - { - return static_cast(producer.producer)->dequeue_bulk(itemFirst, max); - } - - // Returns an estimate of the total number of elements currently in the queue. This - // estimate is only accurate if the queue has completely stabilized before it is called - // (i.e. all enqueue and dequeue operations have completed and their memory effects are - // visible on the calling thread, and no further operations start while this method is - // being called). - // Thread-safe. - size_t size_approx() const - { - size_t size = 0; - for (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) { - size += ptr->size_approx(); - } - return size; - } - - // Returns true if the underlying atomic variables used by - // the queue are lock-free (they should be on most platforms). - // Thread-safe. - static constexpr bool is_lock_free() - { - return details::static_is_lock_free::value == 2 && details::static_is_lock_free::value == 2 && - details::static_is_lock_free::value == 2 && - details::static_is_lock_free::value == 2 && details::static_is_lock_free::value == 2 && - details::static_is_lock_free< - typename details::thread_id_converter::thread_id_numeric_size_t>::value == 2; - } - - private: - friend struct ProducerToken; - friend struct ConsumerToken; - struct ExplicitProducer; - friend struct ExplicitProducer; - struct ImplicitProducer; - friend struct ImplicitProducer; - friend class ConcurrentQueueTests; - - enum AllocationMode { CanAlloc, CannotAlloc }; - - /////////////////////////////// - // Queue methods - /////////////////////////////// - - template inline bool inner_enqueue(producer_token_t const& token, U&& element) - { - return static_cast(token.producer) - ->ConcurrentQueue::ExplicitProducer::template enqueue(std::forward(element)); - } - - template inline bool inner_enqueue(U&& element) - { - auto producer = get_or_add_implicit_producer(); - return producer == nullptr - ? false - : producer->ConcurrentQueue::ImplicitProducer::template enqueue(std::forward(element)); - } - - template - inline bool inner_enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count) - { - return static_cast(token.producer) - ->ConcurrentQueue::ExplicitProducer::template enqueue_bulk(itemFirst, count); - } - - template inline bool inner_enqueue_bulk(It itemFirst, size_t count) - { - auto producer = get_or_add_implicit_producer(); - return producer == nullptr - ? false - : producer->ConcurrentQueue::ImplicitProducer::template enqueue_bulk(itemFirst, count); - } - - inline bool update_current_producer_after_rotation(consumer_token_t& token) - { - // Ah, there's been a rotation, figure out where we should be! - auto tail = producerListTail.load(std::memory_order_acquire); - if (token.desiredProducer == nullptr && tail == nullptr) { - return false; - } - auto prodCount = producerCount.load(std::memory_order_relaxed); - auto globalOffset = globalExplicitConsumerOffset.load(std::memory_order_relaxed); - if ((details::unlikely)(token.desiredProducer == nullptr)) { - // Aha, first time we're dequeueing anything. - // Figure out our local position - // Note: offset is from start, not end, but we're traversing from end -- subtract from count first - std::uint32_t offset = prodCount - 1 - (token.initialOffset % prodCount); - token.desiredProducer = tail; - for (std::uint32_t i = 0; i != offset; ++i) { - token.desiredProducer = static_cast(token.desiredProducer)->next_prod(); - if (token.desiredProducer == nullptr) { - token.desiredProducer = tail; - } - } - } - - std::uint32_t delta = globalOffset - token.lastKnownGlobalOffset; - if (delta >= prodCount) { - delta = delta % prodCount; - } - for (std::uint32_t i = 0; i != delta; ++i) { - token.desiredProducer = static_cast(token.desiredProducer)->next_prod(); - if (token.desiredProducer == nullptr) { - token.desiredProducer = tail; - } - } - - token.lastKnownGlobalOffset = globalOffset; - token.currentProducer = token.desiredProducer; - token.itemsConsumedFromCurrent = 0; - return true; - } - - /////////////////////////// - // Free list - /////////////////////////// - - template struct FreeListNode { - FreeListNode() - : freeListRefs(0) - , freeListNext(nullptr) - {} - - std::atomic freeListRefs; - std::atomic freeListNext; - }; - - // A simple CAS-based lock-free free list. Not the fastest thing in the world under heavy contention, but - // simple and correct (assuming nodes are never freed until after the free list is destroyed), and fairly - // speedy under low contention. - template // N must inherit FreeListNode or have the same fields (and initialization of them) - struct FreeList { - FreeList() - : freeListHead(nullptr) - {} - FreeList(FreeList&& other) - : freeListHead(other.freeListHead.load(std::memory_order_relaxed)) - { - other.freeListHead.store(nullptr, std::memory_order_relaxed); - } - void swap(FreeList& other) { details::swap_relaxed(freeListHead, other.freeListHead); } - - FreeList(FreeList const&) MOODYCAMEL_DELETE_FUNCTION; - FreeList& operator=(FreeList const&) MOODYCAMEL_DELETE_FUNCTION; - - inline void add(N* node) - { -#ifdef MCDBGQ_NOLOCKFREE_FREELIST - debug::DebugLock lock(mutex); -#endif - // We know that the should-be-on-freelist bit is 0 at this point, so it's safe to - // set it using a fetch_add - if (node->freeListRefs.fetch_add(SHOULD_BE_ON_FREELIST, std::memory_order_acq_rel) == 0) { - // Oh look! We were the last ones referencing this node, and we know - // we want to add it to the free list, so let's do it! - add_knowing_refcount_is_zero(node); - } - } - - inline N* try_get() - { -#ifdef MCDBGQ_NOLOCKFREE_FREELIST - debug::DebugLock lock(mutex); -#endif - auto head = freeListHead.load(std::memory_order_acquire); - while (head != nullptr) { - auto prevHead = head; - auto refs = head->freeListRefs.load(std::memory_order_relaxed); - if ((refs & REFS_MASK) == 0 || - !head->freeListRefs.compare_exchange_strong( - refs, refs + 1, std::memory_order_acquire, std::memory_order_relaxed)) { - head = freeListHead.load(std::memory_order_acquire); - continue; - } - - // Good, reference count has been incremented (it wasn't at zero), which means we can read the - // next and not worry about it changing between now and the time we do the CAS - auto next = head->freeListNext.load(std::memory_order_relaxed); - if (freeListHead.compare_exchange_strong( - head, next, std::memory_order_acquire, std::memory_order_relaxed)) { - // Yay, got the node. This means it was on the list, which means shouldBeOnFreeList must be false no - // matter the refcount (because nobody else knows it's been taken off yet, it can't have been put - // back on). - assert((head->freeListRefs.load(std::memory_order_relaxed) & SHOULD_BE_ON_FREELIST) == 0); - - // Decrease refcount twice, once for our ref, and once for the list's ref - head->freeListRefs.fetch_sub(2, std::memory_order_release); - return head; - } - - // OK, the head must have changed on us, but we still need to decrease the refcount we increased. - // Note that we don't need to release any memory effects, but we do need to ensure that the reference - // count decrement happens-after the CAS on the head. - refs = prevHead->freeListRefs.fetch_sub(1, std::memory_order_acq_rel); - if (refs == SHOULD_BE_ON_FREELIST + 1) { - add_knowing_refcount_is_zero(prevHead); - } - } - - return nullptr; - } - - // Useful for traversing the list when there's no contention (e.g. to destroy remaining nodes) - N* head_unsafe() const { return freeListHead.load(std::memory_order_relaxed); } - - private: - inline void add_knowing_refcount_is_zero(N* node) - { - // Since the refcount is zero, and nobody can increase it once it's zero (except us, and we run - // only one copy of this method per node at a time, i.e. the single thread case), then we know - // we can safely change the next pointer of the node; however, once the refcount is back above - // zero, then other threads could increase it (happens under heavy contention, when the refcount - // goes to zero in between a load and a refcount increment of a node in try_get, then back up to - // something non-zero, then the refcount increment is done by the other thread) -- so, if the CAS - // to add the node to the actual list fails, decrease the refcount and leave the add operation to - // the next thread who puts the refcount back at zero (which could be us, hence the loop). - auto head = freeListHead.load(std::memory_order_relaxed); - while (true) { - node->freeListNext.store(head, std::memory_order_relaxed); - node->freeListRefs.store(1, std::memory_order_release); - if (!freeListHead.compare_exchange_strong( - head, node, std::memory_order_release, std::memory_order_relaxed)) { - // Hmm, the add failed, but we can only try again when the refcount goes back to zero - if (node->freeListRefs.fetch_add(SHOULD_BE_ON_FREELIST - 1, std::memory_order_release) == 1) { - continue; - } - } - return; - } - } - - private: - // Implemented like a stack, but where node order doesn't matter (nodes are inserted out of order under - // contention) - std::atomic freeListHead; - - static const std::uint32_t REFS_MASK = 0x7FFFFFFF; - static const std::uint32_t SHOULD_BE_ON_FREELIST = 0x80000000; - -#ifdef MCDBGQ_NOLOCKFREE_FREELIST - debug::DebugMutex mutex; -#endif - }; - - /////////////////////////// - // Block - /////////////////////////// - - enum InnerQueueContext { implicit_context = 0, explicit_context = 1 }; - - struct Block { - Block() - : next(nullptr) - , elementsCompletelyDequeued(0) - , freeListRefs(0) - , freeListNext(nullptr) - , dynamicallyAllocated(true) - { -#ifdef MCDBGQ_TRACKMEM - owner = nullptr; -#endif - } - - template inline bool is_empty() const - { - MOODYCAMEL_CONSTEXPR_IF(context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) - { - // Check flags - for (size_t i = 0; i < BLOCK_SIZE; ++i) { - if (!emptyFlags[i].load(std::memory_order_relaxed)) { - return false; - } - } - - // Aha, empty; make sure we have all other memory effects that happened before the empty flags were set - std::atomic_thread_fence(std::memory_order_acquire); - return true; - } - else - { - // Check counter - if (elementsCompletelyDequeued.load(std::memory_order_relaxed) == BLOCK_SIZE) { - std::atomic_thread_fence(std::memory_order_acquire); - return true; - } - assert(elementsCompletelyDequeued.load(std::memory_order_relaxed) <= BLOCK_SIZE); - return false; - } - } - - // Returns true if the block is now empty (does not apply in explicit context) - template inline bool set_empty(MOODYCAMEL_MAYBE_UNUSED index_t i) - { - MOODYCAMEL_CONSTEXPR_IF(context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) - { - // Set flag - assert(!emptyFlags[BLOCK_SIZE - 1 - static_cast(i & static_cast(BLOCK_SIZE - 1))].load( - std::memory_order_relaxed)); - emptyFlags[BLOCK_SIZE - 1 - static_cast(i & static_cast(BLOCK_SIZE - 1))].store( - true, std::memory_order_release); - return false; - } - else - { - // Increment counter - auto prevVal = elementsCompletelyDequeued.fetch_add(1, std::memory_order_release); - assert(prevVal < BLOCK_SIZE); - return prevVal == BLOCK_SIZE - 1; - } - } - - // Sets multiple contiguous item statuses to 'empty' (assumes no wrapping and count > 0). - // Returns true if the block is now empty (does not apply in explicit context). - template inline bool set_many_empty(MOODYCAMEL_MAYBE_UNUSED index_t i, size_t count) - { - MOODYCAMEL_CONSTEXPR_IF(context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) - { - // Set flags - std::atomic_thread_fence(std::memory_order_release); - i = BLOCK_SIZE - 1 - static_cast(i & static_cast(BLOCK_SIZE - 1)) - count + 1; - for (size_t j = 0; j != count; ++j) { - assert(!emptyFlags[i + j].load(std::memory_order_relaxed)); - emptyFlags[i + j].store(true, std::memory_order_relaxed); - } - return false; - } - else - { - // Increment counter - auto prevVal = elementsCompletelyDequeued.fetch_add(count, std::memory_order_release); - assert(prevVal + count <= BLOCK_SIZE); - return prevVal + count == BLOCK_SIZE; - } - } - - template inline void set_all_empty() - { - MOODYCAMEL_CONSTEXPR_IF(context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) - { - // Set all flags - for (size_t i = 0; i != BLOCK_SIZE; ++i) { - emptyFlags[i].store(true, std::memory_order_relaxed); - } - } - else - { - // Reset counter - elementsCompletelyDequeued.store(BLOCK_SIZE, std::memory_order_relaxed); - } - } - - template inline void reset_empty() - { - MOODYCAMEL_CONSTEXPR_IF(context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) - { - // Reset flags - for (size_t i = 0; i != BLOCK_SIZE; ++i) { - emptyFlags[i].store(false, std::memory_order_relaxed); - } - } - else - { - // Reset counter - elementsCompletelyDequeued.store(0, std::memory_order_relaxed); - } - } - - inline T* operator[](index_t idx) MOODYCAMEL_NOEXCEPT - { - return static_cast(static_cast(elements)) + - static_cast(idx & static_cast(BLOCK_SIZE - 1)); - } - inline T const* operator[](index_t idx) const MOODYCAMEL_NOEXCEPT - { - return static_cast(static_cast(elements)) + - static_cast(idx & static_cast(BLOCK_SIZE - 1)); - } - - private: - static_assert(std::alignment_of::value <= sizeof(T), - "The queue does not support types with an alignment greater than their size at this time"); - MOODYCAMEL_ALIGNED_TYPE_LIKE(char[sizeof(T) * BLOCK_SIZE], T) elements; - - public: - Block* next; - std::atomic elementsCompletelyDequeued; - std::atomic emptyFlags[BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD ? BLOCK_SIZE : 1]; - - public: - std::atomic freeListRefs; - std::atomic freeListNext; - bool dynamicallyAllocated; // Perhaps a better name for this would be 'isNotPartOfInitialBlockPool' - -#ifdef MCDBGQ_TRACKMEM - void* owner; -#endif - }; - static_assert(std::alignment_of::value >= std::alignment_of::value, - "Internal error: Blocks must be at least as aligned as the type they are wrapping"); - -#ifdef MCDBGQ_TRACKMEM - public: - struct MemStats; - - private: -#endif - - /////////////////////////// - // Producer base - /////////////////////////// - - struct ProducerBase : public details::ConcurrentQueueProducerTypelessBase { - ProducerBase(ConcurrentQueue* parent_, bool isExplicit_) - : tailIndex(0) - , headIndex(0) - , dequeueOptimisticCount(0) - , dequeueOvercommit(0) - , tailBlock(nullptr) - , isExplicit(isExplicit_) - , parent(parent_) - {} - - virtual ~ProducerBase() {} - - template inline bool dequeue(U& element) - { - if (isExplicit) { - return static_cast(this)->dequeue(element); - } else { - return static_cast(this)->dequeue(element); - } - } - - template inline size_t dequeue_bulk(It& itemFirst, size_t max) - { - if (isExplicit) { - return static_cast(this)->dequeue_bulk(itemFirst, max); - } else { - return static_cast(this)->dequeue_bulk(itemFirst, max); - } - } - - inline ProducerBase* next_prod() const { return static_cast(next); } - - inline size_t size_approx() const - { - auto tail = tailIndex.load(std::memory_order_relaxed); - auto head = headIndex.load(std::memory_order_relaxed); - return details::circular_less_than(head, tail) ? static_cast(tail - head) : 0; - } - - inline index_t getTail() const { return tailIndex.load(std::memory_order_relaxed); } - - protected: - std::atomic tailIndex; // Where to enqueue to next - std::atomic headIndex; // Where to dequeue from next - - std::atomic dequeueOptimisticCount; - std::atomic dequeueOvercommit; - - Block* tailBlock; - - public: - bool isExplicit; - ConcurrentQueue* parent; - - protected: -#ifdef MCDBGQ_TRACKMEM - friend struct MemStats; -#endif - }; - - /////////////////////////// - // Explicit queue - /////////////////////////// - - struct ExplicitProducer : public ProducerBase { - explicit ExplicitProducer(ConcurrentQueue* parent_) - : ProducerBase(parent_, true) - , blockIndex(nullptr) - , pr_blockIndexSlotsUsed(0) - , pr_blockIndexSize(EXPLICIT_INITIAL_INDEX_SIZE >> 1) - , pr_blockIndexFront(0) - , pr_blockIndexEntries(nullptr) - , pr_blockIndexRaw(nullptr) - { - size_t poolBasedIndexSize = details::ceil_to_pow_2(parent_->initialBlockPoolSize) >> 1; - if (poolBasedIndexSize > pr_blockIndexSize) { - pr_blockIndexSize = poolBasedIndexSize; - } - - new_block_index( - 0); // This creates an index with double the number of current entries, i.e. EXPLICIT_INITIAL_INDEX_SIZE - } - - ~ExplicitProducer() - { - // Destruct any elements not yet dequeued. - // Since we're in the destructor, we can assume all elements - // are either completely dequeued or completely not (no halfways). - if (this->tailBlock != nullptr) { // Note this means there must be a block index too - // First find the block that's partially dequeued, if any - Block* halfDequeuedBlock = nullptr; - if ((this->headIndex.load(std::memory_order_relaxed) & static_cast(BLOCK_SIZE - 1)) != 0) { - // The head's not on a block boundary, meaning a block somewhere is partially dequeued - // (or the head block is the tail block and was fully dequeued, but the head/tail are still not on a - // boundary) - size_t i = (pr_blockIndexFront - pr_blockIndexSlotsUsed) & (pr_blockIndexSize - 1); - while (details::circular_less_than(pr_blockIndexEntries[i].base + BLOCK_SIZE, - this->headIndex.load(std::memory_order_relaxed))) { - i = (i + 1) & (pr_blockIndexSize - 1); - } - assert(details::circular_less_than(pr_blockIndexEntries[i].base, - this->headIndex.load(std::memory_order_relaxed))); - halfDequeuedBlock = pr_blockIndexEntries[i].block; - } - - // Start at the head block (note the first line in the loop gives us the head from the tail on the first - // iteration) - auto block = this->tailBlock; - do { - block = block->next; - if (block->ConcurrentQueue::Block::template is_empty()) { - continue; - } - - size_t i = 0; // Offset into block - if (block == halfDequeuedBlock) { - i = static_cast(this->headIndex.load(std::memory_order_relaxed) & - static_cast(BLOCK_SIZE - 1)); - } - - // Walk through all the items in the block; if this is the tail block, we need to stop when we reach - // the tail index - auto lastValidIndex = - (this->tailIndex.load(std::memory_order_relaxed) & static_cast(BLOCK_SIZE - 1)) == 0 - ? BLOCK_SIZE - : static_cast(this->tailIndex.load(std::memory_order_relaxed) & - static_cast(BLOCK_SIZE - 1)); - while (i != BLOCK_SIZE && (block != this->tailBlock || i != lastValidIndex)) { - (*block)[i++]->~T(); - } - } while (block != this->tailBlock); - } - - // Destroy all blocks that we own - if (this->tailBlock != nullptr) { - auto block = this->tailBlock; - do { - auto nextBlock = block->next; - this->parent->add_block_to_free_list(block); - block = nextBlock; - } while (block != this->tailBlock); - } - - // Destroy the block indices - auto header = static_cast(pr_blockIndexRaw); - while (header != nullptr) { - auto prev = static_cast(header->prev); - header->~BlockIndexHeader(); - (Traits::free)(header); - header = prev; - } - } - - template inline bool enqueue(U&& element) - { - index_t currentTailIndex = this->tailIndex.load(std::memory_order_relaxed); - index_t newTailIndex = 1 + currentTailIndex; - if ((currentTailIndex & static_cast(BLOCK_SIZE - 1)) == 0) { - // We reached the end of a block, start a new one - auto startBlock = this->tailBlock; - auto originalBlockIndexSlotsUsed = pr_blockIndexSlotsUsed; - if (this->tailBlock != nullptr && - this->tailBlock->next->ConcurrentQueue::Block::template is_empty()) { - // We can re-use the block ahead of us, it's empty! - this->tailBlock = this->tailBlock->next; - this->tailBlock->ConcurrentQueue::Block::template reset_empty(); - - // We'll put the block on the block index (guaranteed to be room since we're conceptually removing - // the last block from it first -- except instead of removing then adding, we can just overwrite). - // Note that there must be a valid block index here, since even if allocation failed in the ctor, - // it would have been re-attempted when adding the first block to the queue; since there is such - // a block, a block index must have been successfully allocated. - } else { - // Whatever head value we see here is >= the last value we saw here (relatively), - // and <= its current value. Since we have the most recent tail, the head must be - // <= to it. - auto head = this->headIndex.load(std::memory_order_relaxed); - assert(!details::circular_less_than(currentTailIndex, head)); - if (!details::circular_less_than(head, currentTailIndex + BLOCK_SIZE) || - (MAX_SUBQUEUE_SIZE != details::const_numeric_max::value && - (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head))) { - // We can't enqueue in another block because there's not enough leeway -- the - // tail could surpass the head by the time the block fills up! (Or we'll exceed - // the size limit, if the second part of the condition was true.) - return false; - } - // We're going to need a new block; check that the block index has room - if (pr_blockIndexRaw == nullptr || pr_blockIndexSlotsUsed == pr_blockIndexSize) { - // Hmm, the circular block index is already full -- we'll need - // to allocate a new index. Note pr_blockIndexRaw can only be nullptr if - // the initial allocation failed in the constructor. - - MOODYCAMEL_CONSTEXPR_IF(allocMode == CannotAlloc) - { - return false; - } - else if (!new_block_index(pr_blockIndexSlotsUsed)) - { - return false; - } - } - - // Insert a new block in the circular linked list - auto newBlock = this->parent->ConcurrentQueue::template requisition_block(); - if (newBlock == nullptr) { - return false; - } -#ifdef MCDBGQ_TRACKMEM - newBlock->owner = this; -#endif - newBlock->ConcurrentQueue::Block::template reset_empty(); - if (this->tailBlock == nullptr) { - newBlock->next = newBlock; - } else { - newBlock->next = this->tailBlock->next; - this->tailBlock->next = newBlock; - } - this->tailBlock = newBlock; - ++pr_blockIndexSlotsUsed; - } - - MOODYCAMEL_CONSTEXPR_IF( - !MOODYCAMEL_NOEXCEPT_CTOR(T, U, new (static_cast(nullptr)) T(std::forward(element)))) - { - // The constructor may throw. We want the element not to appear in the queue in - // that case (without corrupting the queue): - MOODYCAMEL_TRY - { - new ((*this->tailBlock)[currentTailIndex]) T(std::forward(element)); - } - MOODYCAMEL_CATCH(...) - { - // Revert change to the current block, but leave the new block available - // for next time - pr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed; - this->tailBlock = startBlock == nullptr ? this->tailBlock : startBlock; - MOODYCAMEL_RETHROW; - } - } - else - { - (void)startBlock; - (void)originalBlockIndexSlotsUsed; - } - - // Add block to block index - auto& entry = blockIndex.load(std::memory_order_relaxed)->entries[pr_blockIndexFront]; - entry.base = currentTailIndex; - entry.block = this->tailBlock; - blockIndex.load(std::memory_order_relaxed)->front.store(pr_blockIndexFront, std::memory_order_release); - pr_blockIndexFront = (pr_blockIndexFront + 1) & (pr_blockIndexSize - 1); - - MOODYCAMEL_CONSTEXPR_IF( - !MOODYCAMEL_NOEXCEPT_CTOR(T, U, new (static_cast(nullptr)) T(std::forward(element)))) - { - this->tailIndex.store(newTailIndex, std::memory_order_release); - return true; - } - } - - // Enqueue - new ((*this->tailBlock)[currentTailIndex]) T(std::forward(element)); - - this->tailIndex.store(newTailIndex, std::memory_order_release); - return true; - } - - template bool dequeue(U& element) - { - auto tail = this->tailIndex.load(std::memory_order_relaxed); - auto overcommit = this->dequeueOvercommit.load(std::memory_order_relaxed); - if (details::circular_less_than( - this->dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit, tail)) { - // Might be something to dequeue, let's give it a try - - // Note that this if is purely for performance purposes in the common case when the queue is - // empty and the values are eventually consistent -- we may enter here spuriously. - - // Note that whatever the values of overcommit and tail are, they are not going to change (unless we - // change them) and must be the same value at this point (inside the if) as when the if condition was - // evaluated. - - // We insert an acquire fence here to synchronize-with the release upon incrementing dequeueOvercommit - // below. This ensures that whatever the value we got loaded into overcommit, the load of - // dequeueOptisticCount in the fetch_add below will result in a value at least as recent as that (and - // therefore at least as large). Note that I believe a compiler (signal) fence here would be sufficient - // due to the nature of fetch_add (all read-modify-write operations are guaranteed to work on the latest - // value in the modification order), but unfortunately that can't be shown to be correct using only the - // C++11 standard. See - // http://stackoverflow.com/questions/18223161/what-are-the-c11-memory-ordering-guarantees-in-this-corner-case - std::atomic_thread_fence(std::memory_order_acquire); - - // Increment optimistic counter, then check if it went over the boundary - auto myDequeueCount = this->dequeueOptimisticCount.fetch_add(1, std::memory_order_relaxed); - - // Note that since dequeueOvercommit must be <= dequeueOptimisticCount (because dequeueOvercommit is - // only ever incremented after dequeueOptimisticCount -- this is enforced in the `else` block below), - // and since we now have a version of dequeueOptimisticCount that is at least as recent as overcommit - // (due to the release upon incrementing dequeueOvercommit and the acquire above that synchronizes with - // it), overcommit <= myDequeueCount. However, we can't assert this since both dequeueOptimisticCount - // and dequeueOvercommit may (independently) overflow; in such a case, though, the logic still holds - // since the difference between the two is maintained. - - // Note that we reload tail here in case it changed; it will be the same value as before or greater, - // since this load is sequenced after (happens after) the earlier load above. This is supported by - // read-read coherency (as defined in the standard), explained here: - // http://en.cppreference.com/w/cpp/atomic/memory_order - tail = this->tailIndex.load(std::memory_order_acquire); - if ((details::likely)(details::circular_less_than(myDequeueCount - overcommit, tail))) { - // Guaranteed to be at least one element to dequeue! - - // Get the index. Note that since there's guaranteed to be at least one element, this - // will never exceed tail. We need to do an acquire-release fence here since it's possible - // that whatever condition got us to this point was for an earlier enqueued element (that - // we already see the memory effects for), but that by the time we increment somebody else - // has incremented it, and we need to see the memory effects for *that* element, which is - // in such a case is necessarily visible on the thread that incremented it in the first - // place with the more current condition (they must have acquired a tail that is at least - // as recent). - auto index = this->headIndex.fetch_add(1, std::memory_order_acq_rel); - - // Determine which block the element is in - - auto localBlockIndex = blockIndex.load(std::memory_order_acquire); - auto localBlockIndexHead = localBlockIndex->front.load(std::memory_order_acquire); - - // We need to be careful here about subtracting and dividing because of index wrap-around. - // When an index wraps, we need to preserve the sign of the offset when dividing it by the - // block size (in order to get a correct signed block count offset in all cases): - auto headBase = localBlockIndex->entries[localBlockIndexHead].base; - auto blockBaseIndex = index & ~static_cast(BLOCK_SIZE - 1); - auto offset = static_cast( - static_cast::type>(blockBaseIndex - headBase) / - static_cast::type>(BLOCK_SIZE)); - auto block = - localBlockIndex->entries[(localBlockIndexHead + offset) & (localBlockIndex->size - 1)].block; - - // Dequeue - auto& el = *((*block)[index]); - if (!MOODYCAMEL_NOEXCEPT_ASSIGN(T, T&&, element = std::move(el))) { - // Make sure the element is still fully dequeued and destroyed even if the assignment - // throws - struct Guard { - Block* block; - index_t index; - - ~Guard() - { - (*block)[index]->~T(); - block->ConcurrentQueue::Block::template set_empty(index); - } - } guard = { block, index }; - - element = std::move(el); // NOLINT - } else { - element = std::move(el); // NOLINT - el.~T(); // NOLINT - block->ConcurrentQueue::Block::template set_empty(index); - } - - return true; - } else { - // Wasn't anything to dequeue after all; make the effective dequeue count eventually consistent - this->dequeueOvercommit.fetch_add( - 1, std::memory_order_release); // Release so that the fetch_add on dequeueOptimisticCount is - // guaranteed to happen before this write - } - } - - return false; - } - - template - bool MOODYCAMEL_NO_TSAN enqueue_bulk(It itemFirst, size_t count) - { - // First, we need to make sure we have enough room to enqueue all of the elements; - // this means pre-allocating blocks and putting them in the block index (but only if - // all the allocations succeeded). - index_t startTailIndex = this->tailIndex.load(std::memory_order_relaxed); - auto startBlock = this->tailBlock; - auto originalBlockIndexFront = pr_blockIndexFront; - auto originalBlockIndexSlotsUsed = pr_blockIndexSlotsUsed; - - Block* firstAllocatedBlock = nullptr; - - // Figure out how many blocks we'll need to allocate, and do so - size_t blockBaseDiff = ((startTailIndex + count - 1) & ~static_cast(BLOCK_SIZE - 1)) - - ((startTailIndex - 1) & ~static_cast(BLOCK_SIZE - 1)); - index_t currentTailIndex = (startTailIndex - 1) & ~static_cast(BLOCK_SIZE - 1); - if (blockBaseDiff > 0) { - // Allocate as many blocks as possible from ahead - while (blockBaseDiff > 0 && this->tailBlock != nullptr && - this->tailBlock->next != firstAllocatedBlock && - this->tailBlock->next->ConcurrentQueue::Block::template is_empty()) { - blockBaseDiff -= static_cast(BLOCK_SIZE); - currentTailIndex += static_cast(BLOCK_SIZE); - - this->tailBlock = this->tailBlock->next; - firstAllocatedBlock = firstAllocatedBlock == nullptr ? this->tailBlock : firstAllocatedBlock; - - auto& entry = blockIndex.load(std::memory_order_relaxed)->entries[pr_blockIndexFront]; - entry.base = currentTailIndex; - entry.block = this->tailBlock; - pr_blockIndexFront = (pr_blockIndexFront + 1) & (pr_blockIndexSize - 1); - } - - // Now allocate as many blocks as necessary from the block pool - while (blockBaseDiff > 0) { - blockBaseDiff -= static_cast(BLOCK_SIZE); - currentTailIndex += static_cast(BLOCK_SIZE); - - auto head = this->headIndex.load(std::memory_order_relaxed); - assert(!details::circular_less_than(currentTailIndex, head)); - bool full = !details::circular_less_than(head, currentTailIndex + BLOCK_SIZE) || - (MAX_SUBQUEUE_SIZE != details::const_numeric_max::value && - (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head)); - if (pr_blockIndexRaw == nullptr || pr_blockIndexSlotsUsed == pr_blockIndexSize || full) { - MOODYCAMEL_CONSTEXPR_IF(allocMode == CannotAlloc) - { - // Failed to allocate, undo changes (but keep injected blocks) - pr_blockIndexFront = originalBlockIndexFront; - pr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed; - this->tailBlock = startBlock == nullptr ? firstAllocatedBlock : startBlock; - return false; - } - else if (full || !new_block_index(originalBlockIndexSlotsUsed)) - { - // Failed to allocate, undo changes (but keep injected blocks) - pr_blockIndexFront = originalBlockIndexFront; - pr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed; - this->tailBlock = startBlock == nullptr ? firstAllocatedBlock : startBlock; - return false; - } - - // pr_blockIndexFront is updated inside new_block_index, so we need to - // update our fallback value too (since we keep the new index even if we - // later fail) - originalBlockIndexFront = originalBlockIndexSlotsUsed; - } - - // Insert a new block in the circular linked list - auto newBlock = this->parent->ConcurrentQueue::template requisition_block(); - if (newBlock == nullptr) { - pr_blockIndexFront = originalBlockIndexFront; - pr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed; - this->tailBlock = startBlock == nullptr ? firstAllocatedBlock : startBlock; - return false; - } - -#ifdef MCDBGQ_TRACKMEM - newBlock->owner = this; -#endif - newBlock->ConcurrentQueue::Block::template set_all_empty(); - if (this->tailBlock == nullptr) { - newBlock->next = newBlock; - } else { - newBlock->next = this->tailBlock->next; - this->tailBlock->next = newBlock; - } - this->tailBlock = newBlock; - firstAllocatedBlock = firstAllocatedBlock == nullptr ? this->tailBlock : firstAllocatedBlock; - - ++pr_blockIndexSlotsUsed; - - auto& entry = blockIndex.load(std::memory_order_relaxed)->entries[pr_blockIndexFront]; - entry.base = currentTailIndex; - entry.block = this->tailBlock; - pr_blockIndexFront = (pr_blockIndexFront + 1) & (pr_blockIndexSize - 1); - } - - // Excellent, all allocations succeeded. Reset each block's emptiness before we fill them up, and - // publish the new block index front - auto block = firstAllocatedBlock; - while (true) { - block->ConcurrentQueue::Block::template reset_empty(); - if (block == this->tailBlock) { - break; - } - block = block->next; - } - - MOODYCAMEL_CONSTEXPR_IF(MOODYCAMEL_NOEXCEPT_CTOR( - T, decltype(*itemFirst), new (static_cast(nullptr)) T(details::deref_noexcept(itemFirst)))) - { - blockIndex.load(std::memory_order_relaxed) - ->front.store((pr_blockIndexFront - 1) & (pr_blockIndexSize - 1), std::memory_order_release); - } - } - - // Enqueue, one block at a time - index_t newTailIndex = startTailIndex + static_cast(count); - currentTailIndex = startTailIndex; - auto endBlock = this->tailBlock; - this->tailBlock = startBlock; - assert((startTailIndex & static_cast(BLOCK_SIZE - 1)) != 0 || firstAllocatedBlock != nullptr || - count == 0); - if ((startTailIndex & static_cast(BLOCK_SIZE - 1)) == 0 && firstAllocatedBlock != nullptr) { - this->tailBlock = firstAllocatedBlock; - } - while (true) { - index_t stopIndex = - (currentTailIndex & ~static_cast(BLOCK_SIZE - 1)) + static_cast(BLOCK_SIZE); - if (details::circular_less_than(newTailIndex, stopIndex)) { - stopIndex = newTailIndex; - } - MOODYCAMEL_CONSTEXPR_IF(MOODYCAMEL_NOEXCEPT_CTOR( - T, decltype(*itemFirst), new (static_cast(nullptr)) T(details::deref_noexcept(itemFirst)))) - { - while (currentTailIndex != stopIndex) { - new ((*this->tailBlock)[currentTailIndex++]) T(*itemFirst++); - } - } - else - { - MOODYCAMEL_TRY - { - while (currentTailIndex != stopIndex) { - // Must use copy constructor even if move constructor is available - // because we may have to revert if there's an exception. - // Sorry about the horrible templated next line, but it was the only way - // to disable moving *at compile time*, which is important because a type - // may only define a (noexcept) move constructor, and so calls to the - // cctor will not compile, even if they are in an if branch that will never - // be executed - new ((*this->tailBlock)[currentTailIndex]) - T(details::nomove_if(nullptr)) - T(details::deref_noexcept(itemFirst)))>::eval(*itemFirst)); - ++currentTailIndex; - ++itemFirst; - } - } - MOODYCAMEL_CATCH(...) - { - // Oh dear, an exception's been thrown -- destroy the elements that - // were enqueued so far and revert the entire bulk operation (we'll keep - // any allocated blocks in our linked list for later, though). - auto constructedStopIndex = currentTailIndex; - auto lastBlockEnqueued = this->tailBlock; - - pr_blockIndexFront = originalBlockIndexFront; - pr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed; - this->tailBlock = startBlock == nullptr ? firstAllocatedBlock : startBlock; - - if (!details::is_trivially_destructible::value) { - auto block = startBlock; - if ((startTailIndex & static_cast(BLOCK_SIZE - 1)) == 0) { - block = firstAllocatedBlock; - } - currentTailIndex = startTailIndex; - while (true) { - stopIndex = (currentTailIndex & ~static_cast(BLOCK_SIZE - 1)) + - static_cast(BLOCK_SIZE); - if (details::circular_less_than(constructedStopIndex, stopIndex)) { - stopIndex = constructedStopIndex; - } - while (currentTailIndex != stopIndex) { - (*block)[currentTailIndex++]->~T(); - } - if (block == lastBlockEnqueued) { - break; - } - block = block->next; - } - } - MOODYCAMEL_RETHROW; - } - } - - if (this->tailBlock == endBlock) { - assert(currentTailIndex == newTailIndex); - break; - } - this->tailBlock = this->tailBlock->next; - } - - MOODYCAMEL_CONSTEXPR_IF(!MOODYCAMEL_NOEXCEPT_CTOR( - T, decltype(*itemFirst), new (static_cast(nullptr)) T(details::deref_noexcept(itemFirst)))) - { - if (firstAllocatedBlock != nullptr) - blockIndex.load(std::memory_order_relaxed) - ->front.store((pr_blockIndexFront - 1) & (pr_blockIndexSize - 1), std::memory_order_release); - } - - this->tailIndex.store(newTailIndex, std::memory_order_release); - return true; - } - - template size_t dequeue_bulk(It& itemFirst, size_t max) - { - auto tail = this->tailIndex.load(std::memory_order_relaxed); - auto overcommit = this->dequeueOvercommit.load(std::memory_order_relaxed); - auto desiredCount = - static_cast(tail - (this->dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit)); - if (details::circular_less_than(0, desiredCount)) { - desiredCount = desiredCount < max ? desiredCount : max; - std::atomic_thread_fence(std::memory_order_acquire); - - auto myDequeueCount = this->dequeueOptimisticCount.fetch_add(desiredCount, std::memory_order_relaxed); - - tail = this->tailIndex.load(std::memory_order_acquire); - auto actualCount = static_cast(tail - (myDequeueCount - overcommit)); - if (details::circular_less_than(0, actualCount)) { - actualCount = desiredCount < actualCount ? desiredCount : actualCount; - if (actualCount < desiredCount) { - this->dequeueOvercommit.fetch_add(desiredCount - actualCount, std::memory_order_release); - } - - // Get the first index. Note that since there's guaranteed to be at least actualCount elements, this - // will never exceed tail. - auto firstIndex = this->headIndex.fetch_add(actualCount, std::memory_order_acq_rel); - - // Determine which block the first element is in - auto localBlockIndex = blockIndex.load(std::memory_order_acquire); - auto localBlockIndexHead = localBlockIndex->front.load(std::memory_order_acquire); - - auto headBase = localBlockIndex->entries[localBlockIndexHead].base; - auto firstBlockBaseIndex = firstIndex & ~static_cast(BLOCK_SIZE - 1); - auto offset = static_cast( - static_cast::type>(firstBlockBaseIndex - headBase) / - static_cast::type>(BLOCK_SIZE)); - auto indexIndex = (localBlockIndexHead + offset) & (localBlockIndex->size - 1); - - // Iterate the blocks and dequeue - auto index = firstIndex; - do { - auto firstIndexInBlock = index; - index_t endIndex = - (index & ~static_cast(BLOCK_SIZE - 1)) + static_cast(BLOCK_SIZE); - endIndex = details::circular_less_than(firstIndex + static_cast(actualCount), - endIndex) - ? firstIndex + static_cast(actualCount) - : endIndex; - auto block = localBlockIndex->entries[indexIndex].block; - if (MOODYCAMEL_NOEXCEPT_ASSIGN( - T, T&&, details::deref_noexcept(itemFirst) = std::move((*(*block)[index])))) { - while (index != endIndex) { - auto& el = *((*block)[index]); - *itemFirst++ = std::move(el); - el.~T(); - ++index; - } - } else { - MOODYCAMEL_TRY - { - while (index != endIndex) { - auto& el = *((*block)[index]); - *itemFirst = std::move(el); - ++itemFirst; - el.~T(); - ++index; - } - } - MOODYCAMEL_CATCH(...) - { - // It's too late to revert the dequeue, but we can make sure that all - // the dequeued objects are properly destroyed and the block index - // (and empty count) are properly updated before we propagate the exception - do { - block = localBlockIndex->entries[indexIndex].block; - while (index != endIndex) { - (*block)[index++]->~T(); - } - block->ConcurrentQueue::Block::template set_many_empty( - firstIndexInBlock, static_cast(endIndex - firstIndexInBlock)); - indexIndex = (indexIndex + 1) & (localBlockIndex->size - 1); - - firstIndexInBlock = index; - endIndex = (index & ~static_cast(BLOCK_SIZE - 1)) + - static_cast(BLOCK_SIZE); - endIndex = details::circular_less_than( - firstIndex + static_cast(actualCount), endIndex) - ? firstIndex + static_cast(actualCount) - : endIndex; - } while (index != firstIndex + actualCount); - - MOODYCAMEL_RETHROW; - } - } - block->ConcurrentQueue::Block::template set_many_empty( - firstIndexInBlock, static_cast(endIndex - firstIndexInBlock)); - indexIndex = (indexIndex + 1) & (localBlockIndex->size - 1); - } while (index != firstIndex + actualCount); - - return actualCount; - } else { - // Wasn't anything to dequeue after all; make the effective dequeue count eventually consistent - this->dequeueOvercommit.fetch_add(desiredCount, std::memory_order_release); - } - } - - return 0; - } - - private: - struct BlockIndexEntry { - index_t base; - Block* block; - }; - - struct BlockIndexHeader { - size_t size; - std::atomic front; // Current slot (not next, like pr_blockIndexFront) - BlockIndexEntry* entries; - void* prev; - }; - - bool new_block_index(size_t numberOfFilledSlotsToExpose) - { - auto prevBlockSizeMask = pr_blockIndexSize - 1; - - // Create the new block - pr_blockIndexSize <<= 1; - auto newRawPtr = static_cast((Traits::malloc)(sizeof(BlockIndexHeader) + - std::alignment_of::value - 1 + - sizeof(BlockIndexEntry) * pr_blockIndexSize)); - if (newRawPtr == nullptr) { - pr_blockIndexSize >>= 1; // Reset to allow graceful retry - return false; - } - - auto newBlockIndexEntries = reinterpret_cast( - details::align_for(newRawPtr + sizeof(BlockIndexHeader))); - - // Copy in all the old indices, if any - size_t j = 0; - if (pr_blockIndexSlotsUsed != 0) { - auto i = (pr_blockIndexFront - pr_blockIndexSlotsUsed) & prevBlockSizeMask; - do { - newBlockIndexEntries[j++] = pr_blockIndexEntries[i]; - i = (i + 1) & prevBlockSizeMask; - } while (i != pr_blockIndexFront); - } - - // Update everything - auto header = new (newRawPtr) BlockIndexHeader; - header->size = pr_blockIndexSize; - header->front.store(numberOfFilledSlotsToExpose - 1, std::memory_order_relaxed); - header->entries = newBlockIndexEntries; - header->prev = pr_blockIndexRaw; // we link the new block to the old one so we can free it later - - pr_blockIndexFront = j; - pr_blockIndexEntries = newBlockIndexEntries; - pr_blockIndexRaw = newRawPtr; - blockIndex.store(header, std::memory_order_release); - - return true; - } - - private: - std::atomic blockIndex; - - // To be used by producer only -- consumer must use the ones in referenced by blockIndex - size_t pr_blockIndexSlotsUsed; - size_t pr_blockIndexSize; - size_t pr_blockIndexFront; // Next slot (not current) - BlockIndexEntry* pr_blockIndexEntries; - void* pr_blockIndexRaw; - -#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG - public: - ExplicitProducer* nextExplicitProducer; - - private: -#endif - -#ifdef MCDBGQ_TRACKMEM - friend struct MemStats; -#endif - }; - - ////////////////////////////////// - // Implicit queue - ////////////////////////////////// - - struct ImplicitProducer : public ProducerBase { - ImplicitProducer(ConcurrentQueue* parent_) - : ProducerBase(parent_, false) - , nextBlockIndexCapacity(IMPLICIT_INITIAL_INDEX_SIZE) - , blockIndex(nullptr) - { - new_block_index(); - } - - ~ImplicitProducer() - { - // Note that since we're in the destructor we can assume that all enqueue/dequeue operations - // completed already; this means that all undequeued elements are placed contiguously across - // contiguous blocks, and that only the first and last remaining blocks can be only partially - // empty (all other remaining blocks must be completely full). - -#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED - // Unregister ourselves for thread termination notification - if (!this->inactive.load(std::memory_order_relaxed)) { - details::ThreadExitNotifier::unsubscribe(&threadExitListener); - } -#endif - - // Destroy all remaining elements! - auto tail = this->tailIndex.load(std::memory_order_relaxed); - auto index = this->headIndex.load(std::memory_order_relaxed); - Block* block = nullptr; - assert(index == tail || details::circular_less_than(index, tail)); - bool forceFreeLastBlock = - index != tail; // If we enter the loop, then the last (tail) block will not be freed - while (index != tail) { - if ((index & static_cast(BLOCK_SIZE - 1)) == 0 || block == nullptr) { - if (block != nullptr) { - // Free the old block - this->parent->add_block_to_free_list(block); - } - - block = get_block_index_entry_for_index(index)->value.load(std::memory_order_relaxed); - } - - ((*block)[index])->~T(); - ++index; - } - // Even if the queue is empty, there's still one block that's not on the free list - // (unless the head index reached the end of it, in which case the tail will be poised - // to create a new block). - if (this->tailBlock != nullptr && - (forceFreeLastBlock || (tail & static_cast(BLOCK_SIZE - 1)) != 0)) { - this->parent->add_block_to_free_list(this->tailBlock); - } - - // Destroy block index - auto localBlockIndex = blockIndex.load(std::memory_order_relaxed); - if (localBlockIndex != nullptr) { - for (size_t i = 0; i != localBlockIndex->capacity; ++i) { - localBlockIndex->index[i]->~BlockIndexEntry(); - } - do { - auto prev = localBlockIndex->prev; - localBlockIndex->~BlockIndexHeader(); - (Traits::free)(localBlockIndex); - localBlockIndex = prev; - } while (localBlockIndex != nullptr); - } - } - - template inline bool enqueue(U&& element) - { - index_t currentTailIndex = this->tailIndex.load(std::memory_order_relaxed); - index_t newTailIndex = 1 + currentTailIndex; - if ((currentTailIndex & static_cast(BLOCK_SIZE - 1)) == 0) { - // We reached the end of a block, start a new one - auto head = this->headIndex.load(std::memory_order_relaxed); - assert(!details::circular_less_than(currentTailIndex, head)); - if (!details::circular_less_than(head, currentTailIndex + BLOCK_SIZE) || - (MAX_SUBQUEUE_SIZE != details::const_numeric_max::value && - (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head))) { - return false; - } -#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX - debug::DebugLock lock(mutex); -#endif - // Find out where we'll be inserting this block in the block index - BlockIndexEntry* idxEntry; - if (!insert_block_index_entry(idxEntry, currentTailIndex)) { - return false; - } - - // Get ahold of a new block - auto newBlock = this->parent->ConcurrentQueue::template requisition_block(); - if (newBlock == nullptr) { - rewind_block_index_tail(); - idxEntry->value.store(nullptr, std::memory_order_relaxed); - return false; - } -#ifdef MCDBGQ_TRACKMEM - newBlock->owner = this; -#endif - newBlock->ConcurrentQueue::Block::template reset_empty(); - - MOODYCAMEL_CONSTEXPR_IF( - !MOODYCAMEL_NOEXCEPT_CTOR(T, U, new (static_cast(nullptr)) T(std::forward(element)))) - { - // May throw, try to insert now before we publish the fact that we have this new block - MOODYCAMEL_TRY - { - new ((*newBlock)[currentTailIndex]) T(std::forward(element)); - } - MOODYCAMEL_CATCH(...) - { - rewind_block_index_tail(); - idxEntry->value.store(nullptr, std::memory_order_relaxed); - this->parent->add_block_to_free_list(newBlock); - MOODYCAMEL_RETHROW; - } - } - - // Insert the new block into the index - idxEntry->value.store(newBlock, std::memory_order_relaxed); - - this->tailBlock = newBlock; - - MOODYCAMEL_CONSTEXPR_IF( - !MOODYCAMEL_NOEXCEPT_CTOR(T, U, new (static_cast(nullptr)) T(std::forward(element)))) - { - this->tailIndex.store(newTailIndex, std::memory_order_release); - return true; - } - } - - // Enqueue - new ((*this->tailBlock)[currentTailIndex]) T(std::forward(element)); - - this->tailIndex.store(newTailIndex, std::memory_order_release); - return true; - } - - template bool dequeue(U& element) - { - // See ExplicitProducer::dequeue for rationale and explanation - index_t tail = this->tailIndex.load(std::memory_order_relaxed); - index_t overcommit = this->dequeueOvercommit.load(std::memory_order_relaxed); - if (details::circular_less_than( - this->dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit, tail)) { - std::atomic_thread_fence(std::memory_order_acquire); - - index_t myDequeueCount = this->dequeueOptimisticCount.fetch_add(1, std::memory_order_relaxed); - tail = this->tailIndex.load(std::memory_order_acquire); - if ((details::likely)(details::circular_less_than(myDequeueCount - overcommit, tail))) { - index_t index = this->headIndex.fetch_add(1, std::memory_order_acq_rel); - - // Determine which block the element is in - auto entry = get_block_index_entry_for_index(index); - - // Dequeue - auto block = entry->value.load(std::memory_order_relaxed); - auto& el = *((*block)[index]); - - if (!MOODYCAMEL_NOEXCEPT_ASSIGN(T, T&&, element = std::move(el))) { -#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX - // Note: Acquiring the mutex with every dequeue instead of only when a block - // is released is very sub-optimal, but it is, after all, purely debug code. - debug::DebugLock lock(producer->mutex); -#endif - struct Guard { - Block* block; - index_t index; - BlockIndexEntry* entry; - ConcurrentQueue* parent; - - ~Guard() - { - (*block)[index]->~T(); - if (block->ConcurrentQueue::Block::template set_empty(index)) { - entry->value.store(nullptr, std::memory_order_relaxed); - parent->add_block_to_free_list(block); - } - } - } guard = { block, index, entry, this->parent }; - - element = std::move(el); // NOLINT - } else { - element = std::move(el); // NOLINT - el.~T(); // NOLINT - - if (block->ConcurrentQueue::Block::template set_empty(index)) { - { -#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX - debug::DebugLock lock(mutex); -#endif - // Add the block back into the global free pool (and remove from block index) - entry->value.store(nullptr, std::memory_order_relaxed); - } - this->parent->add_block_to_free_list(block); // releases the above store - } - } - - return true; - } else { - this->dequeueOvercommit.fetch_add(1, std::memory_order_release); - } - } - - return false; - } - -#ifdef _MSC_VER -#pragma warning(push) -#pragma warning(disable : 4706) // assignment within conditional expression -#endif - template bool enqueue_bulk(It itemFirst, size_t count) - { - // First, we need to make sure we have enough room to enqueue all of the elements; - // this means pre-allocating blocks and putting them in the block index (but only if - // all the allocations succeeded). - - // Note that the tailBlock we start off with may not be owned by us any more; - // this happens if it was filled up exactly to the top (setting tailIndex to - // the first index of the next block which is not yet allocated), then dequeued - // completely (putting it on the free list) before we enqueue again. - - index_t startTailIndex = this->tailIndex.load(std::memory_order_relaxed); - auto startBlock = this->tailBlock; - Block* firstAllocatedBlock = nullptr; - auto endBlock = this->tailBlock; - - // Figure out how many blocks we'll need to allocate, and do so - size_t blockBaseDiff = ((startTailIndex + count - 1) & ~static_cast(BLOCK_SIZE - 1)) - - ((startTailIndex - 1) & ~static_cast(BLOCK_SIZE - 1)); - index_t currentTailIndex = (startTailIndex - 1) & ~static_cast(BLOCK_SIZE - 1); - if (blockBaseDiff > 0) { -#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX - debug::DebugLock lock(mutex); -#endif - do { - blockBaseDiff -= static_cast(BLOCK_SIZE); - currentTailIndex += static_cast(BLOCK_SIZE); - - // Find out where we'll be inserting this block in the block index - BlockIndexEntry* idxEntry = - nullptr; // initialization here unnecessary but compiler can't always tell - Block* newBlock; - bool indexInserted = false; - auto head = this->headIndex.load(std::memory_order_relaxed); - assert(!details::circular_less_than(currentTailIndex, head)); - bool full = !details::circular_less_than(head, currentTailIndex + BLOCK_SIZE) || - (MAX_SUBQUEUE_SIZE != details::const_numeric_max::value && - (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head)); - - if (full || !(indexInserted = insert_block_index_entry(idxEntry, currentTailIndex)) || - (newBlock = this->parent->ConcurrentQueue::template requisition_block()) == - nullptr) { - // Index allocation or block allocation failed; revert any other allocations - // and index insertions done so far for this operation - if (indexInserted) { - rewind_block_index_tail(); - idxEntry->value.store(nullptr, std::memory_order_relaxed); - } - currentTailIndex = (startTailIndex - 1) & ~static_cast(BLOCK_SIZE - 1); - for (auto block = firstAllocatedBlock; block != nullptr; block = block->next) { - currentTailIndex += static_cast(BLOCK_SIZE); - idxEntry = get_block_index_entry_for_index(currentTailIndex); - idxEntry->value.store(nullptr, std::memory_order_relaxed); - rewind_block_index_tail(); - } - this->parent->add_blocks_to_free_list(firstAllocatedBlock); - this->tailBlock = startBlock; - - return false; - } - -#ifdef MCDBGQ_TRACKMEM - newBlock->owner = this; -#endif - newBlock->ConcurrentQueue::Block::template reset_empty(); - newBlock->next = nullptr; - - // Insert the new block into the index - idxEntry->value.store(newBlock, std::memory_order_relaxed); - - // Store the chain of blocks so that we can undo if later allocations fail, - // and so that we can find the blocks when we do the actual enqueueing - if ((startTailIndex & static_cast(BLOCK_SIZE - 1)) != 0 || - firstAllocatedBlock != nullptr) { - assert(this->tailBlock != nullptr); - this->tailBlock->next = newBlock; - } - this->tailBlock = newBlock; - endBlock = newBlock; - firstAllocatedBlock = firstAllocatedBlock == nullptr ? newBlock : firstAllocatedBlock; - } while (blockBaseDiff > 0); - } - - // Enqueue, one block at a time - index_t newTailIndex = startTailIndex + static_cast(count); - currentTailIndex = startTailIndex; - this->tailBlock = startBlock; - assert((startTailIndex & static_cast(BLOCK_SIZE - 1)) != 0 || firstAllocatedBlock != nullptr || - count == 0); - if ((startTailIndex & static_cast(BLOCK_SIZE - 1)) == 0 && firstAllocatedBlock != nullptr) { - this->tailBlock = firstAllocatedBlock; - } - while (true) { - index_t stopIndex = - (currentTailIndex & ~static_cast(BLOCK_SIZE - 1)) + static_cast(BLOCK_SIZE); - if (details::circular_less_than(newTailIndex, stopIndex)) { - stopIndex = newTailIndex; - } - MOODYCAMEL_CONSTEXPR_IF(MOODYCAMEL_NOEXCEPT_CTOR( - T, decltype(*itemFirst), new (static_cast(nullptr)) T(details::deref_noexcept(itemFirst)))) - { - while (currentTailIndex != stopIndex) { - new ((*this->tailBlock)[currentTailIndex++]) T(*itemFirst++); - } - } - else - { - MOODYCAMEL_TRY - { - while (currentTailIndex != stopIndex) { - new ((*this->tailBlock)[currentTailIndex]) - T(details::nomove_if(nullptr)) - T(details::deref_noexcept(itemFirst)))>::eval(*itemFirst)); - ++currentTailIndex; - ++itemFirst; - } - } - MOODYCAMEL_CATCH(...) - { - auto constructedStopIndex = currentTailIndex; - auto lastBlockEnqueued = this->tailBlock; - - if (!details::is_trivially_destructible::value) { - auto block = startBlock; - if ((startTailIndex & static_cast(BLOCK_SIZE - 1)) == 0) { - block = firstAllocatedBlock; - } - currentTailIndex = startTailIndex; - while (true) { - stopIndex = (currentTailIndex & ~static_cast(BLOCK_SIZE - 1)) + - static_cast(BLOCK_SIZE); - if (details::circular_less_than(constructedStopIndex, stopIndex)) { - stopIndex = constructedStopIndex; - } - while (currentTailIndex != stopIndex) { - (*block)[currentTailIndex++]->~T(); - } - if (block == lastBlockEnqueued) { - break; - } - block = block->next; - } - } - - currentTailIndex = (startTailIndex - 1) & ~static_cast(BLOCK_SIZE - 1); - for (auto block = firstAllocatedBlock; block != nullptr; block = block->next) { - currentTailIndex += static_cast(BLOCK_SIZE); - auto idxEntry = get_block_index_entry_for_index(currentTailIndex); - idxEntry->value.store(nullptr, std::memory_order_relaxed); - rewind_block_index_tail(); - } - this->parent->add_blocks_to_free_list(firstAllocatedBlock); - this->tailBlock = startBlock; - MOODYCAMEL_RETHROW; - } - } - - if (this->tailBlock == endBlock) { - assert(currentTailIndex == newTailIndex); - break; - } - this->tailBlock = this->tailBlock->next; - } - this->tailIndex.store(newTailIndex, std::memory_order_release); - return true; - } -#ifdef _MSC_VER -#pragma warning(pop) -#endif - - template size_t dequeue_bulk(It& itemFirst, size_t max) - { - auto tail = this->tailIndex.load(std::memory_order_relaxed); - auto overcommit = this->dequeueOvercommit.load(std::memory_order_relaxed); - auto desiredCount = - static_cast(tail - (this->dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit)); - if (details::circular_less_than(0, desiredCount)) { - desiredCount = desiredCount < max ? desiredCount : max; - std::atomic_thread_fence(std::memory_order_acquire); - - auto myDequeueCount = this->dequeueOptimisticCount.fetch_add(desiredCount, std::memory_order_relaxed); - - tail = this->tailIndex.load(std::memory_order_acquire); - auto actualCount = static_cast(tail - (myDequeueCount - overcommit)); - if (details::circular_less_than(0, actualCount)) { - actualCount = desiredCount < actualCount ? desiredCount : actualCount; - if (actualCount < desiredCount) { - this->dequeueOvercommit.fetch_add(desiredCount - actualCount, std::memory_order_release); - } - - // Get the first index. Note that since there's guaranteed to be at least actualCount elements, this - // will never exceed tail. - auto firstIndex = this->headIndex.fetch_add(actualCount, std::memory_order_acq_rel); - - // Iterate the blocks and dequeue - auto index = firstIndex; - BlockIndexHeader* localBlockIndex; - auto indexIndex = get_block_index_index_for_index(index, localBlockIndex); - do { - auto blockStartIndex = index; - index_t endIndex = - (index & ~static_cast(BLOCK_SIZE - 1)) + static_cast(BLOCK_SIZE); - endIndex = details::circular_less_than(firstIndex + static_cast(actualCount), - endIndex) - ? firstIndex + static_cast(actualCount) - : endIndex; - - auto entry = localBlockIndex->index[indexIndex]; - auto block = entry->value.load(std::memory_order_relaxed); - if (MOODYCAMEL_NOEXCEPT_ASSIGN( - T, T&&, details::deref_noexcept(itemFirst) = std::move((*(*block)[index])))) { - while (index != endIndex) { - auto& el = *((*block)[index]); - *itemFirst++ = std::move(el); - el.~T(); - ++index; - } - } else { - MOODYCAMEL_TRY - { - while (index != endIndex) { - auto& el = *((*block)[index]); - *itemFirst = std::move(el); - ++itemFirst; - el.~T(); - ++index; - } - } - MOODYCAMEL_CATCH(...) - { - do { - entry = localBlockIndex->index[indexIndex]; - block = entry->value.load(std::memory_order_relaxed); - while (index != endIndex) { - (*block)[index++]->~T(); - } - - if (block->ConcurrentQueue::Block::template set_many_empty( - blockStartIndex, static_cast(endIndex - blockStartIndex))) { -#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX - debug::DebugLock lock(mutex); -#endif - entry->value.store(nullptr, std::memory_order_relaxed); - this->parent->add_block_to_free_list(block); - } - indexIndex = (indexIndex + 1) & (localBlockIndex->capacity - 1); - - blockStartIndex = index; - endIndex = (index & ~static_cast(BLOCK_SIZE - 1)) + - static_cast(BLOCK_SIZE); - endIndex = details::circular_less_than( - firstIndex + static_cast(actualCount), endIndex) - ? firstIndex + static_cast(actualCount) - : endIndex; - } while (index != firstIndex + actualCount); - - MOODYCAMEL_RETHROW; - } - } - if (block->ConcurrentQueue::Block::template set_many_empty( - blockStartIndex, static_cast(endIndex - blockStartIndex))) { - { -#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX - debug::DebugLock lock(mutex); -#endif - // Note that the set_many_empty above did a release, meaning that anybody who acquires - // the block we're about to free can use it safely since our writes (and reads!) will - // have happened-before then. - entry->value.store(nullptr, std::memory_order_relaxed); - } - this->parent->add_block_to_free_list(block); // releases the above store - } - indexIndex = (indexIndex + 1) & (localBlockIndex->capacity - 1); - } while (index != firstIndex + actualCount); - - return actualCount; - } else { - this->dequeueOvercommit.fetch_add(desiredCount, std::memory_order_release); - } - } - - return 0; - } - - private: - // The block size must be > 1, so any number with the low bit set is an invalid block base index - static const index_t INVALID_BLOCK_BASE = 1; - - struct BlockIndexEntry { - std::atomic key; - std::atomic value; - }; - - struct BlockIndexHeader { - size_t capacity; - std::atomic tail; - BlockIndexEntry* entries; - BlockIndexEntry** index; - BlockIndexHeader* prev; - }; - - template - inline bool insert_block_index_entry(BlockIndexEntry*& idxEntry, index_t blockStartIndex) - { - auto localBlockIndex = - blockIndex.load(std::memory_order_relaxed); // We're the only writer thread, relaxed is OK - if (localBlockIndex == nullptr) { - return false; // this can happen if new_block_index failed in the constructor - } - size_t newTail = - (localBlockIndex->tail.load(std::memory_order_relaxed) + 1) & (localBlockIndex->capacity - 1); - idxEntry = localBlockIndex->index[newTail]; - if (idxEntry->key.load(std::memory_order_relaxed) == INVALID_BLOCK_BASE || - idxEntry->value.load(std::memory_order_relaxed) == nullptr) { - - idxEntry->key.store(blockStartIndex, std::memory_order_relaxed); - localBlockIndex->tail.store(newTail, std::memory_order_release); - return true; - } - - // No room in the old block index, try to allocate another one! - MOODYCAMEL_CONSTEXPR_IF(allocMode == CannotAlloc) - { - return false; - } - else if (!new_block_index()) - { - return false; - } - else - { - localBlockIndex = blockIndex.load(std::memory_order_relaxed); - newTail = (localBlockIndex->tail.load(std::memory_order_relaxed) + 1) & (localBlockIndex->capacity - 1); - idxEntry = localBlockIndex->index[newTail]; - assert(idxEntry->key.load(std::memory_order_relaxed) == INVALID_BLOCK_BASE); - idxEntry->key.store(blockStartIndex, std::memory_order_relaxed); - localBlockIndex->tail.store(newTail, std::memory_order_release); - return true; - } - } - - inline void rewind_block_index_tail() - { - auto localBlockIndex = blockIndex.load(std::memory_order_relaxed); - localBlockIndex->tail.store((localBlockIndex->tail.load(std::memory_order_relaxed) - 1) & - (localBlockIndex->capacity - 1), - std::memory_order_relaxed); - } - - inline BlockIndexEntry* get_block_index_entry_for_index(index_t index) const - { - BlockIndexHeader* localBlockIndex; - auto idx = get_block_index_index_for_index(index, localBlockIndex); - return localBlockIndex->index[idx]; - } - - inline size_t get_block_index_index_for_index(index_t index, BlockIndexHeader*& localBlockIndex) const - { -#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX - debug::DebugLock lock(mutex); -#endif - index &= ~static_cast(BLOCK_SIZE - 1); - localBlockIndex = blockIndex.load(std::memory_order_acquire); - auto tail = localBlockIndex->tail.load(std::memory_order_acquire); - auto tailBase = localBlockIndex->index[tail]->key.load(std::memory_order_relaxed); - assert(tailBase != INVALID_BLOCK_BASE); - // Note: Must use division instead of shift because the index may wrap around, causing a negative - // offset, whose negativity we want to preserve - auto offset = static_cast(static_cast::type>(index - tailBase) / - static_cast::type>(BLOCK_SIZE)); - size_t idx = (tail + offset) & (localBlockIndex->capacity - 1); - assert(localBlockIndex->index[idx]->key.load(std::memory_order_relaxed) == index && - localBlockIndex->index[idx]->value.load(std::memory_order_relaxed) != nullptr); - return idx; - } - - bool new_block_index() - { - auto prev = blockIndex.load(std::memory_order_relaxed); - size_t prevCapacity = prev == nullptr ? 0 : prev->capacity; - auto entryCount = prev == nullptr ? nextBlockIndexCapacity : prevCapacity; - auto raw = static_cast( - (Traits::malloc)(sizeof(BlockIndexHeader) + std::alignment_of::value - 1 + - sizeof(BlockIndexEntry) * entryCount + std::alignment_of::value - 1 + - sizeof(BlockIndexEntry*) * nextBlockIndexCapacity)); - if (raw == nullptr) { - return false; - } - - auto header = new (raw) BlockIndexHeader; - auto entries = - reinterpret_cast(details::align_for(raw + sizeof(BlockIndexHeader))); - auto index = reinterpret_cast(details::align_for( - reinterpret_cast(entries) + sizeof(BlockIndexEntry) * entryCount)); - if (prev != nullptr) { - auto prevTail = prev->tail.load(std::memory_order_relaxed); - auto prevPos = prevTail; - size_t i = 0; - do { - prevPos = (prevPos + 1) & (prev->capacity - 1); - index[i++] = prev->index[prevPos]; - } while (prevPos != prevTail); - assert(i == prevCapacity); - } - for (size_t i = 0; i != entryCount; ++i) { - new (entries + i) BlockIndexEntry; - entries[i].key.store(INVALID_BLOCK_BASE, std::memory_order_relaxed); - index[prevCapacity + i] = entries + i; - } - header->prev = prev; - header->entries = entries; - header->index = index; - header->capacity = nextBlockIndexCapacity; - header->tail.store((prevCapacity - 1) & (nextBlockIndexCapacity - 1), std::memory_order_relaxed); - - blockIndex.store(header, std::memory_order_release); - - nextBlockIndexCapacity <<= 1; - - return true; - } - - private: - size_t nextBlockIndexCapacity; - std::atomic blockIndex; - -#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED - public: - details::ThreadExitListener threadExitListener; - - private: -#endif - -#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG - public: - ImplicitProducer* nextImplicitProducer; - - private: -#endif - -#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX - mutable debug::DebugMutex mutex; -#endif -#ifdef MCDBGQ_TRACKMEM - friend struct MemStats; -#endif - }; - - ////////////////////////////////// - // Block pool manipulation - ////////////////////////////////// - - void populate_initial_block_list(size_t blockCount) - { - initialBlockPoolSize = blockCount; - if (initialBlockPoolSize == 0) { - initialBlockPool = nullptr; - return; - } - - initialBlockPool = create_array(blockCount); - if (initialBlockPool == nullptr) { - initialBlockPoolSize = 0; - } - for (size_t i = 0; i < initialBlockPoolSize; ++i) { - initialBlockPool[i].dynamicallyAllocated = false; - } - } - - inline Block* try_get_block_from_initial_pool() - { - if (initialBlockPoolIndex.load(std::memory_order_relaxed) >= initialBlockPoolSize) { - return nullptr; - } - - auto index = initialBlockPoolIndex.fetch_add(1, std::memory_order_relaxed); - - return index < initialBlockPoolSize ? (initialBlockPool + index) : nullptr; - } - - inline void add_block_to_free_list(Block* block) - { -#ifdef MCDBGQ_TRACKMEM - block->owner = nullptr; -#endif - if (!Traits::RECYCLE_ALLOCATED_BLOCKS && block->dynamicallyAllocated) { - destroy(block); - } else { - freeList.add(block); - } - } - - inline void add_blocks_to_free_list(Block* block) - { - while (block != nullptr) { - auto next = block->next; - add_block_to_free_list(block); - block = next; - } - } - - inline Block* try_get_block_from_free_list() { return freeList.try_get(); } - - // Gets a free block from one of the memory pools, or allocates a new one (if applicable) - template Block* requisition_block() - { - auto block = try_get_block_from_initial_pool(); - if (block != nullptr) { - return block; - } - - block = try_get_block_from_free_list(); - if (block != nullptr) { - return block; - } - - MOODYCAMEL_CONSTEXPR_IF(canAlloc == CanAlloc) - { - return create(); - } - else - { - return nullptr; - } - } - -#ifdef MCDBGQ_TRACKMEM - public: - struct MemStats { - size_t allocatedBlocks; - size_t usedBlocks; - size_t freeBlocks; - size_t ownedBlocksExplicit; - size_t ownedBlocksImplicit; - size_t implicitProducers; - size_t explicitProducers; - size_t elementsEnqueued; - size_t blockClassBytes; - size_t queueClassBytes; - size_t implicitBlockIndexBytes; - size_t explicitBlockIndexBytes; - - friend class ConcurrentQueue; - - private: - static MemStats getFor(ConcurrentQueue* q) - { - MemStats stats = { 0 }; - - stats.elementsEnqueued = q->size_approx(); - - auto block = q->freeList.head_unsafe(); - while (block != nullptr) { - ++stats.allocatedBlocks; - ++stats.freeBlocks; - block = block->freeListNext.load(std::memory_order_relaxed); - } - - for (auto ptr = q->producerListTail.load(std::memory_order_acquire); ptr != nullptr; - ptr = ptr->next_prod()) { - bool implicit = dynamic_cast(ptr) != nullptr; - stats.implicitProducers += implicit ? 1 : 0; - stats.explicitProducers += implicit ? 0 : 1; - - if (implicit) { - auto prod = static_cast(ptr); - stats.queueClassBytes += sizeof(ImplicitProducer); - auto head = prod->headIndex.load(std::memory_order_relaxed); - auto tail = prod->tailIndex.load(std::memory_order_relaxed); - auto hash = prod->blockIndex.load(std::memory_order_relaxed); - if (hash != nullptr) { - for (size_t i = 0; i != hash->capacity; ++i) { - if (hash->index[i]->key.load(std::memory_order_relaxed) != - ImplicitProducer::INVALID_BLOCK_BASE && - hash->index[i]->value.load(std::memory_order_relaxed) != nullptr) { - ++stats.allocatedBlocks; - ++stats.ownedBlocksImplicit; - } - } - stats.implicitBlockIndexBytes += - hash->capacity * sizeof(typename ImplicitProducer::BlockIndexEntry); - for (; hash != nullptr; hash = hash->prev) { - stats.implicitBlockIndexBytes += - sizeof(typename ImplicitProducer::BlockIndexHeader) + - hash->capacity * sizeof(typename ImplicitProducer::BlockIndexEntry*); - } - } - for (; details::circular_less_than(head, tail); head += BLOCK_SIZE) { - // auto block = prod->get_block_index_entry_for_index(head); - ++stats.usedBlocks; - } - } else { - auto prod = static_cast(ptr); - stats.queueClassBytes += sizeof(ExplicitProducer); - auto tailBlock = prod->tailBlock; - bool wasNonEmpty = false; - if (tailBlock != nullptr) { - auto block = tailBlock; - do { - ++stats.allocatedBlocks; - if (!block->ConcurrentQueue::Block::template is_empty() || wasNonEmpty) { - ++stats.usedBlocks; - wasNonEmpty = wasNonEmpty || block != tailBlock; - } - ++stats.ownedBlocksExplicit; - block = block->next; - } while (block != tailBlock); - } - auto index = prod->blockIndex.load(std::memory_order_relaxed); - while (index != nullptr) { - stats.explicitBlockIndexBytes += - sizeof(typename ExplicitProducer::BlockIndexHeader) + - index->size * sizeof(typename ExplicitProducer::BlockIndexEntry); - index = static_cast(index->prev); - } - } - } - - auto freeOnInitialPool = - q->initialBlockPoolIndex.load(std::memory_order_relaxed) >= q->initialBlockPoolSize - ? 0 - : q->initialBlockPoolSize - q->initialBlockPoolIndex.load(std::memory_order_relaxed); - stats.allocatedBlocks += freeOnInitialPool; - stats.freeBlocks += freeOnInitialPool; - - stats.blockClassBytes = sizeof(Block) * stats.allocatedBlocks; - stats.queueClassBytes += sizeof(ConcurrentQueue); - - return stats; - } - }; - - // For debugging only. Not thread-safe. - MemStats getMemStats() { return MemStats::getFor(this); } - - private: - friend struct MemStats; -#endif - - ////////////////////////////////// - // Producer list manipulation - ////////////////////////////////// - - ProducerBase* recycle_or_create_producer(bool isExplicit) - { -#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH - debug::DebugLock lock(implicitProdMutex); -#endif - // Try to re-use one first - for (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) { - if (ptr->inactive.load(std::memory_order_relaxed) && ptr->isExplicit == isExplicit) { - bool expected = true; - if (ptr->inactive.compare_exchange_strong( - expected, /* desired */ false, std::memory_order_acquire, std::memory_order_relaxed)) { - // We caught one! It's been marked as activated, the caller can have it - return ptr; - } - } - } - - return add_producer(isExplicit ? static_cast(create(this)) - : create(this)); - } - - ProducerBase* add_producer(ProducerBase* producer) - { - // Handle failed memory allocation - if (producer == nullptr) { - return nullptr; - } - - producerCount.fetch_add(1, std::memory_order_relaxed); - - // Add it to the lock-free list - auto prevTail = producerListTail.load(std::memory_order_relaxed); - do { - producer->next = prevTail; - } while (!producerListTail.compare_exchange_weak( - prevTail, producer, std::memory_order_release, std::memory_order_relaxed)); - -#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG - if (producer->isExplicit) { - auto prevTailExplicit = explicitProducers.load(std::memory_order_relaxed); - do { - static_cast(producer)->nextExplicitProducer = prevTailExplicit; - } while (!explicitProducers.compare_exchange_weak(prevTailExplicit, - static_cast(producer), - std::memory_order_release, - std::memory_order_relaxed)); - } else { - auto prevTailImplicit = implicitProducers.load(std::memory_order_relaxed); - do { - static_cast(producer)->nextImplicitProducer = prevTailImplicit; - } while (!implicitProducers.compare_exchange_weak(prevTailImplicit, - static_cast(producer), - std::memory_order_release, - std::memory_order_relaxed)); - } -#endif - - return producer; - } - - void reown_producers() - { - // After another instance is moved-into/swapped-with this one, all the - // producers we stole still think their parents are the other queue. - // So fix them up! - for (auto ptr = producerListTail.load(std::memory_order_relaxed); ptr != nullptr; ptr = ptr->next_prod()) { - ptr->parent = this; - } - } - - ////////////////////////////////// - // Implicit producer hash - ////////////////////////////////// - - struct ImplicitProducerKVP { - std::atomic key; - ImplicitProducer* - value; // No need for atomicity since it's only read by the thread that sets it in the first place - - ImplicitProducerKVP() - : value(nullptr) - {} - - ImplicitProducerKVP(ImplicitProducerKVP&& other) MOODYCAMEL_NOEXCEPT - { - key.store(other.key.load(std::memory_order_relaxed), std::memory_order_relaxed); - value = other.value; - } - - inline ImplicitProducerKVP& operator=(ImplicitProducerKVP&& other) MOODYCAMEL_NOEXCEPT - { - swap(other); - return *this; - } - - inline void swap(ImplicitProducerKVP& other) MOODYCAMEL_NOEXCEPT - { - if (this != &other) { - details::swap_relaxed(key, other.key); - std::swap(value, other.value); - } - } - }; - - template - friend void moodycamel::swap(typename ConcurrentQueue::ImplicitProducerKVP&, - typename ConcurrentQueue::ImplicitProducerKVP&) MOODYCAMEL_NOEXCEPT; - - struct ImplicitProducerHash { - size_t capacity; - ImplicitProducerKVP* entries; - ImplicitProducerHash* prev; - }; - - inline void populate_initial_implicit_producer_hash() - { - MOODYCAMEL_CONSTEXPR_IF(INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) - { - return; - } - else - { - implicitProducerHashCount.store(0, std::memory_order_relaxed); - auto hash = &initialImplicitProducerHash; - hash->capacity = INITIAL_IMPLICIT_PRODUCER_HASH_SIZE; - hash->entries = &initialImplicitProducerHashEntries[0]; - for (size_t i = 0; i != INITIAL_IMPLICIT_PRODUCER_HASH_SIZE; ++i) { - initialImplicitProducerHashEntries[i].key.store(details::invalid_thread_id, std::memory_order_relaxed); - } - hash->prev = nullptr; - implicitProducerHash.store(hash, std::memory_order_relaxed); - } - } - - void swap_implicit_producer_hashes(ConcurrentQueue& other) - { - MOODYCAMEL_CONSTEXPR_IF(INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) - { - return; - } - else - { - // Swap (assumes our implicit producer hash is initialized) - initialImplicitProducerHashEntries.swap(other.initialImplicitProducerHashEntries); - initialImplicitProducerHash.entries = &initialImplicitProducerHashEntries[0]; - other.initialImplicitProducerHash.entries = &other.initialImplicitProducerHashEntries[0]; - - details::swap_relaxed(implicitProducerHashCount, other.implicitProducerHashCount); - - details::swap_relaxed(implicitProducerHash, other.implicitProducerHash); - if (implicitProducerHash.load(std::memory_order_relaxed) == &other.initialImplicitProducerHash) { - implicitProducerHash.store(&initialImplicitProducerHash, std::memory_order_relaxed); - } else { - ImplicitProducerHash* hash; - for (hash = implicitProducerHash.load(std::memory_order_relaxed); - hash->prev != &other.initialImplicitProducerHash; - hash = hash->prev) { - continue; - } - hash->prev = &initialImplicitProducerHash; - } - if (other.implicitProducerHash.load(std::memory_order_relaxed) == &initialImplicitProducerHash) { - other.implicitProducerHash.store(&other.initialImplicitProducerHash, std::memory_order_relaxed); - } else { - ImplicitProducerHash* hash; - for (hash = other.implicitProducerHash.load(std::memory_order_relaxed); - hash->prev != &initialImplicitProducerHash; - hash = hash->prev) { - continue; - } - hash->prev = &other.initialImplicitProducerHash; - } - } - } - - // Only fails (returns nullptr) if memory allocation fails - ImplicitProducer* get_or_add_implicit_producer() - { - // Note that since the data is essentially thread-local (key is thread ID), - // there's a reduced need for fences (memory ordering is already consistent - // for any individual thread), except for the current table itself. - - // Start by looking for the thread ID in the current and all previous hash tables. - // If it's not found, it must not be in there yet, since this same thread would - // have added it previously to one of the tables that we traversed. - - // Code and algorithm adapted from http://preshing.com/20130605/the-worlds-simplest-lock-free-hash-table - -#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH - debug::DebugLock lock(implicitProdMutex); -#endif - - auto id = details::thread_id(); - auto hashedId = details::hash_thread_id(id); - - auto mainHash = implicitProducerHash.load(std::memory_order_acquire); - assert(mainHash != nullptr); // silence clang-tidy and MSVC warnings (hash cannot be null) - for (auto hash = mainHash; hash != nullptr; hash = hash->prev) { - // Look for the id in this hash - auto index = hashedId; - while (true) { // Not an infinite loop because at least one slot is free in the hash table - index &= hash->capacity - 1u; - - auto probedKey = hash->entries[index].key.load(std::memory_order_relaxed); - if (probedKey == id) { - // Found it! If we had to search several hashes deep, though, we should lazily add it - // to the current main hash table to avoid the extended search next time. - // Note there's guaranteed to be room in the current hash table since every subsequent - // table implicitly reserves space for all previous tables (there's only one - // implicitProducerHashCount). - auto value = hash->entries[index].value; - if (hash != mainHash) { - index = hashedId; - while (true) { - index &= mainHash->capacity - 1u; - auto empty = details::invalid_thread_id; -#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED - auto reusable = details::invalid_thread_id2; - if (mainHash->entries[index].key.compare_exchange_strong( - empty, id, std::memory_order_seq_cst, std::memory_order_relaxed) || - mainHash->entries[index].key.compare_exchange_strong( - reusable, id, std::memory_order_seq_cst, std::memory_order_relaxed)) { -#else - if (mainHash->entries[index].key.compare_exchange_strong( - empty, id, std::memory_order_seq_cst, std::memory_order_relaxed)) { -#endif - mainHash->entries[index].value = value; - break; - } - ++index; - } - } - - return value; - } - if (probedKey == details::invalid_thread_id) { - break; // Not in this hash table - } - ++index; - } - } - - // Insert! - auto newCount = 1 + implicitProducerHashCount.fetch_add(1, std::memory_order_relaxed); - while (true) { - // NOLINTNEXTLINE(clang-analyzer-core.NullDereference) - if (newCount >= (mainHash->capacity >> 1) && - !implicitProducerHashResizeInProgress.test_and_set(std::memory_order_acquire)) { - // We've acquired the resize lock, try to allocate a bigger hash table. - // Note the acquire fence synchronizes with the release fence at the end of this block, and hence when - // we reload implicitProducerHash it must be the most recent version (it only gets changed within this - // locked block). - mainHash = implicitProducerHash.load(std::memory_order_acquire); - if (newCount >= (mainHash->capacity >> 1)) { - size_t newCapacity = mainHash->capacity << 1; - while (newCount >= (newCapacity >> 1)) { - newCapacity <<= 1; - } - auto raw = static_cast((Traits::malloc)(sizeof(ImplicitProducerHash) + - std::alignment_of::value - 1 + - sizeof(ImplicitProducerKVP) * newCapacity)); - if (raw == nullptr) { - // Allocation failed - implicitProducerHashCount.fetch_sub(1, std::memory_order_relaxed); - implicitProducerHashResizeInProgress.clear(std::memory_order_relaxed); - return nullptr; - } - - auto newHash = new (raw) ImplicitProducerHash; - newHash->capacity = static_cast(newCapacity); - newHash->entries = reinterpret_cast( - details::align_for(raw + sizeof(ImplicitProducerHash))); - for (size_t i = 0; i != newCapacity; ++i) { - new (newHash->entries + i) ImplicitProducerKVP; - newHash->entries[i].key.store(details::invalid_thread_id, std::memory_order_relaxed); - } - newHash->prev = mainHash; - implicitProducerHash.store(newHash, std::memory_order_release); - implicitProducerHashResizeInProgress.clear(std::memory_order_release); - mainHash = newHash; - } else { - implicitProducerHashResizeInProgress.clear(std::memory_order_release); - } - } - - // If it's < three-quarters full, add to the old one anyway so that we don't have to wait for the next table - // to finish being allocated by another thread (and if we just finished allocating above, the condition will - // always be true) - if (newCount < (mainHash->capacity >> 1) + (mainHash->capacity >> 2)) { - auto producer = static_cast(recycle_or_create_producer(false)); - if (producer == nullptr) { - implicitProducerHashCount.fetch_sub(1, std::memory_order_relaxed); - return nullptr; - } - -#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED - producer->threadExitListener.callback = &ConcurrentQueue::implicit_producer_thread_exited_callback; - producer->threadExitListener.userData = producer; - details::ThreadExitNotifier::subscribe(&producer->threadExitListener); -#endif - - auto index = hashedId; - while (true) { - index &= mainHash->capacity - 1u; - auto empty = details::invalid_thread_id; -#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED - auto reusable = details::invalid_thread_id2; - if (mainHash->entries[index].key.compare_exchange_strong( - reusable, id, std::memory_order_seq_cst, std::memory_order_relaxed)) { - implicitProducerHashCount.fetch_sub( - 1, std::memory_order_relaxed); // already counted as a used slot - mainHash->entries[index].value = producer; - break; - } -#endif - if (mainHash->entries[index].key.compare_exchange_strong( - empty, id, std::memory_order_seq_cst, std::memory_order_relaxed)) { - mainHash->entries[index].value = producer; - break; - } - ++index; - } - return producer; - } - - // Hmm, the old hash is quite full and somebody else is busy allocating a new one. - // We need to wait for the allocating thread to finish (if it succeeds, we add, if not, - // we try to allocate ourselves). - mainHash = implicitProducerHash.load(std::memory_order_acquire); - } - } - -#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED - void implicit_producer_thread_exited(ImplicitProducer* producer) - { - // Remove from hash -#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH - debug::DebugLock lock(implicitProdMutex); -#endif - auto hash = implicitProducerHash.load(std::memory_order_acquire); - assert(hash != - nullptr); // The thread exit listener is only registered if we were added to a hash in the first place - auto id = details::thread_id(); - auto hashedId = details::hash_thread_id(id); - details::thread_id_t probedKey; - - // We need to traverse all the hashes just in case other threads aren't on the current one yet and are - // trying to add an entry thinking there's a free slot (because they reused a producer) - for (; hash != nullptr; hash = hash->prev) { - auto index = hashedId; - do { - index &= hash->capacity - 1u; - probedKey = id; - if (hash->entries[index].key.compare_exchange_strong( - probedKey, details::invalid_thread_id2, std::memory_order_seq_cst, std::memory_order_relaxed)) { - break; - } - ++index; - } while (probedKey != - details::invalid_thread_id); // Can happen if the hash has changed but we weren't put back in it - // yet, or if we weren't added to this hash in the first place - } - - // Mark the queue as being recyclable - producer->inactive.store(true, std::memory_order_release); - } - - static void implicit_producer_thread_exited_callback(void* userData) - { - auto producer = static_cast(userData); - auto queue = producer->parent; - queue->implicit_producer_thread_exited(producer); - } -#endif - - ////////////////////////////////// - // Utility functions - ////////////////////////////////// - - template static inline void* aligned_malloc(size_t size) - { - MOODYCAMEL_CONSTEXPR_IF(std::alignment_of::value <= std::alignment_of::value) - return (Traits::malloc)(size); - else - { - size_t alignment = std::alignment_of::value; - void* raw = (Traits::malloc)(size + alignment - 1 + sizeof(void*)); - if (!raw) - return nullptr; - char* ptr = details::align_for(reinterpret_cast(raw) + sizeof(void*)); - *(reinterpret_cast(ptr) - 1) = raw; - return ptr; - } - } - - template static inline void aligned_free(void* ptr) - { - MOODYCAMEL_CONSTEXPR_IF(std::alignment_of::value <= std::alignment_of::value) - return (Traits::free)(ptr); - else(Traits::free)(ptr ? *(reinterpret_cast(ptr) - 1) : nullptr); - } - - template static inline U* create_array(size_t count) - { - assert(count > 0); - U* p = static_cast(aligned_malloc(sizeof(U) * count)); - if (p == nullptr) - return nullptr; - - for (size_t i = 0; i != count; ++i) - new (p + i) U(); - return p; - } - - template static inline void destroy_array(U* p, size_t count) - { - if (p != nullptr) { - assert(count > 0); - for (size_t i = count; i != 0;) - (p + --i)->~U(); - } - aligned_free(p); - } - - template static inline U* create() - { - void* p = aligned_malloc(sizeof(U)); - return p != nullptr ? new (p) U : nullptr; - } - - template static inline U* create(A1&& a1) - { - void* p = aligned_malloc(sizeof(U)); - return p != nullptr ? new (p) U(std::forward(a1)) : nullptr; - } - - template static inline void destroy(U* p) - { - if (p != nullptr) - p->~U(); - aligned_free(p); - } - - private: - std::atomic producerListTail; - std::atomic producerCount; - - std::atomic initialBlockPoolIndex; - Block* initialBlockPool; - size_t initialBlockPoolSize; - -#ifndef MCDBGQ_USEDEBUGFREELIST - FreeList freeList; -#else - debug::DebugFreeList freeList; -#endif - - std::atomic implicitProducerHash; - std::atomic implicitProducerHashCount; // Number of slots logically used - ImplicitProducerHash initialImplicitProducerHash; - std::array initialImplicitProducerHashEntries; - std::atomic_flag implicitProducerHashResizeInProgress; - - std::atomic nextExplicitConsumerId; - std::atomic globalExplicitConsumerOffset; - -#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH - debug::DebugMutex implicitProdMutex; -#endif - -#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG - std::atomic explicitProducers; - std::atomic implicitProducers; -#endif -}; - -template -ProducerToken::ProducerToken(ConcurrentQueue& queue) - : producer(queue.recycle_or_create_producer(true)) -{ - if (producer != nullptr) { - producer->token = this; - } -} - -template -ProducerToken::ProducerToken(BlockingConcurrentQueue& queue) - : producer(reinterpret_cast*>(&queue)->recycle_or_create_producer(true)) -{ - if (producer != nullptr) { - producer->token = this; - } -} - -template -ConsumerToken::ConsumerToken(ConcurrentQueue& queue) - : itemsConsumedFromCurrent(0) - , currentProducer(nullptr) - , desiredProducer(nullptr) -{ - initialOffset = queue.nextExplicitConsumerId.fetch_add(1, std::memory_order_release); - lastKnownGlobalOffset = static_cast(-1); -} - -template -ConsumerToken::ConsumerToken(BlockingConcurrentQueue& queue) - : itemsConsumedFromCurrent(0) - , currentProducer(nullptr) - , desiredProducer(nullptr) -{ - initialOffset = reinterpret_cast*>(&queue)->nextExplicitConsumerId.fetch_add( - 1, std::memory_order_release); - lastKnownGlobalOffset = static_cast(-1); -} - -template -inline void swap(ConcurrentQueue& a, ConcurrentQueue& b) MOODYCAMEL_NOEXCEPT -{ - a.swap(b); -} - -inline void swap(ProducerToken& a, ProducerToken& b) MOODYCAMEL_NOEXCEPT -{ - a.swap(b); -} - -inline void swap(ConsumerToken& a, ConsumerToken& b) MOODYCAMEL_NOEXCEPT -{ - a.swap(b); -} - -template -inline void swap(typename ConcurrentQueue::ImplicitProducerKVP& a, - typename ConcurrentQueue::ImplicitProducerKVP& b) MOODYCAMEL_NOEXCEPT -{ - a.swap(b); -} - -} // namespace moodycamel - -#if defined(_MSC_VER) && (!defined(_HAS_CXX17) || !_HAS_CXX17) -#pragma warning(pop) -#endif - -#if defined(__GNUC__) && !defined(__INTEL_COMPILER) -#pragma GCC diagnostic pop -#endif \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/common/moody/lightweightsemaphore.h b/barretenberg/cpp/src/barretenberg/common/moody/lightweightsemaphore.h deleted file mode 100644 index d0ee0e45253..00000000000 --- a/barretenberg/cpp/src/barretenberg/common/moody/lightweightsemaphore.h +++ /dev/null @@ -1,396 +0,0 @@ -// Provides an efficient implementation of a semaphore (LightweightSemaphore). -// This is an extension of Jeff Preshing's sempahore implementation (licensed -// under the terms of its separate zlib license) that has been adapted and -// extended by Cameron Desrochers. - -#pragma once - -#include // For std::size_t -#include -#include // For std::make_signed - -#if defined(_WIN32) -// Avoid including windows.h in a header; we only need a handful of -// items, so we'll redeclare them here (this is relatively safe since -// the API generally has to remain stable between Windows versions). -// I know this is an ugly hack but it still beats polluting the global -// namespace with thousands of generic names or adding a .cpp for nothing. -extern "C" { -struct _SECURITY_ATTRIBUTES; -__declspec(dllimport) void* __stdcall CreateSemaphoreW(_SECURITY_ATTRIBUTES* lpSemaphoreAttributes, - long lInitialCount, - long lMaximumCount, - const wchar_t* lpName); -__declspec(dllimport) int __stdcall CloseHandle(void* hObject); -__declspec(dllimport) unsigned long __stdcall WaitForSingleObject(void* hHandle, unsigned long dwMilliseconds); -__declspec(dllimport) int __stdcall ReleaseSemaphore(void* hSemaphore, long lReleaseCount, long* lpPreviousCount); -} -#elif defined(__MACH__) -#include -#elif defined(__unix__) || defined(__wasm__) -#include - -#if defined(__GLIBC_PREREQ) && defined(_GNU_SOURCE) -#if __GLIBC_PREREQ(2, 30) -#define MOODYCAMEL_LIGHTWEIGHTSEMAPHORE_MONOTONIC -#endif -#endif -#endif - -namespace moodycamel { -namespace details { - -// Code in the mpmc_sema namespace below is an adaptation of Jeff Preshing's -// portable + lightweight semaphore implementations, originally from -// https://github.com/preshing/cpp11-on-multicore/blob/master/common/sema.h -// LICENSE: -// Copyright (c) 2015 Jeff Preshing -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgement in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. -#if defined(_WIN32) -class Semaphore { - private: - void* m_hSema; - - Semaphore(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION; - Semaphore& operator=(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION; - - public: - Semaphore(int initialCount = 0) - { - assert(initialCount >= 0); - const long maxLong = 0x7fffffff; - m_hSema = CreateSemaphoreW(nullptr, initialCount, maxLong, nullptr); - assert(m_hSema); - } - - ~Semaphore() { CloseHandle(m_hSema); } - - bool wait() - { - const unsigned long infinite = 0xffffffff; - return WaitForSingleObject(m_hSema, infinite) == 0; - } - - bool try_wait() { return WaitForSingleObject(m_hSema, 0) == 0; } - - bool timed_wait(std::uint64_t usecs) { return WaitForSingleObject(m_hSema, (unsigned long)(usecs / 1000)) == 0; } - - void signal(int count = 1) - { - while (!ReleaseSemaphore(m_hSema, count, nullptr)) - ; - } -}; -#elif defined(__MACH__) -//--------------------------------------------------------- -// Semaphore (Apple iOS and OSX) -// Can't use POSIX semaphores due to http://lists.apple.com/archives/darwin-kernel/2009/Apr/msg00010.html -//--------------------------------------------------------- -class Semaphore { - private: - semaphore_t m_sema; - - Semaphore(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION; - Semaphore& operator=(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION; - - public: - Semaphore(int initialCount = 0) - { - assert(initialCount >= 0); - kern_return_t rc = semaphore_create(mach_task_self(), &m_sema, SYNC_POLICY_FIFO, initialCount); - assert(rc == KERN_SUCCESS); - (void)rc; - } - - ~Semaphore() { semaphore_destroy(mach_task_self(), m_sema); } - - bool wait() { return semaphore_wait(m_sema) == KERN_SUCCESS; } - - bool try_wait() { return timed_wait(0); } - - bool timed_wait(std::uint64_t timeout_usecs) - { - mach_timespec_t ts; - ts.tv_sec = static_cast(timeout_usecs / 1000000); - ts.tv_nsec = static_cast((timeout_usecs % 1000000) * 1000); - - // added in OSX 10.10: - // https://developer.apple.com/library/prerelease/mac/documentation/General/Reference/APIDiffsMacOSX10_10SeedDiff/modules/Darwin.html - kern_return_t rc = semaphore_timedwait(m_sema, ts); - return rc == KERN_SUCCESS; - } - - void signal() - { - while (semaphore_signal(m_sema) != KERN_SUCCESS) - ; - } - - void signal(int count) - { - while (count-- > 0) { - while (semaphore_signal(m_sema) != KERN_SUCCESS) - ; - } - } -}; -#elif defined(__unix__) || defined(__wasm__) -//--------------------------------------------------------- -// Semaphore (POSIX, Linux) -//--------------------------------------------------------- -class Semaphore { - private: - sem_t m_sema; - - Semaphore(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION; - Semaphore& operator=(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION; - - public: - Semaphore(int initialCount = 0) - { - assert(initialCount >= 0); - int rc = sem_init(&m_sema, 0, static_cast(initialCount)); - assert(rc == 0); - (void)rc; - } - - ~Semaphore() { sem_destroy(&m_sema); } - - bool wait() - { - // http://stackoverflow.com/questions/2013181/gdb-causes-sem-wait-to-fail-with-eintr-error - int rc; - do { - rc = sem_wait(&m_sema); - } while (rc == -1 && errno == EINTR); - return rc == 0; - } - - bool try_wait() - { - int rc; - do { - rc = sem_trywait(&m_sema); - } while (rc == -1 && errno == EINTR); - return rc == 0; - } - - bool timed_wait(std::uint64_t usecs) - { - struct timespec ts; - const int usecs_in_1_sec = 1000000; - const int nsecs_in_1_sec = 1000000000; -#ifdef MOODYCAMEL_LIGHTWEIGHTSEMAPHORE_MONOTONIC - clock_gettime(CLOCK_MONOTONIC, &ts); -#else - clock_gettime(CLOCK_REALTIME, &ts); -#endif - ts.tv_sec += (time_t)(usecs / usecs_in_1_sec); - ts.tv_nsec += (long)(usecs % usecs_in_1_sec) * 1000; - // sem_timedwait bombs if you have more than 1e9 in tv_nsec - // so we have to clean things up before passing it in - if (ts.tv_nsec >= nsecs_in_1_sec) { - ts.tv_nsec -= nsecs_in_1_sec; - ++ts.tv_sec; - } - - int rc; - do { -#ifdef MOODYCAMEL_LIGHTWEIGHTSEMAPHORE_MONOTONIC - rc = sem_clockwait(&m_sema, CLOCK_MONOTONIC, &ts); -#else - rc = sem_timedwait(&m_sema, &ts); -#endif - } while (rc == -1 && errno == EINTR); - return rc == 0; - } - - void signal() - { - while (sem_post(&m_sema) == -1) - ; - } - - void signal(int count) - { - while (count-- > 0) { - while (sem_post(&m_sema) == -1) - ; - } - } -}; -#else -#error Unsupported platform! (No semaphore wrapper available) -#endif - -} // end namespace details - -//--------------------------------------------------------- -// LightweightSemaphore -//--------------------------------------------------------- -class LightweightSemaphore { - public: - typedef std::make_signed::type ssize_t; - - private: - std::atomic m_count; - details::Semaphore m_sema; - int m_maxSpins; - - bool waitWithPartialSpinning(std::int64_t timeout_usecs = -1) - { - ssize_t oldCount; - int spin = m_maxSpins; - while (--spin >= 0) { - oldCount = m_count.load(std::memory_order_relaxed); - if ((oldCount > 0) && m_count.compare_exchange_strong( - oldCount, oldCount - 1, std::memory_order_acquire, std::memory_order_relaxed)) - return true; - std::atomic_signal_fence(std::memory_order_acquire); // Prevent the compiler from collapsing the loop. - } - oldCount = m_count.fetch_sub(1, std::memory_order_acquire); - if (oldCount > 0) - return true; - if (timeout_usecs < 0) { - if (m_sema.wait()) - return true; - } - if (timeout_usecs > 0 && m_sema.timed_wait((std::uint64_t)timeout_usecs)) - return true; - // At this point, we've timed out waiting for the semaphore, but the - // count is still decremented indicating we may still be waiting on - // it. So we have to re-adjust the count, but only if the semaphore - // wasn't signaled enough times for us too since then. If it was, we - // need to release the semaphore too. - while (true) { - oldCount = m_count.load(std::memory_order_acquire); - if (oldCount >= 0 && m_sema.try_wait()) - return true; - if (oldCount < 0 && m_count.compare_exchange_strong( - oldCount, oldCount + 1, std::memory_order_relaxed, std::memory_order_relaxed)) - return false; - } - } - - ssize_t waitManyWithPartialSpinning(ssize_t max, std::int64_t timeout_usecs = -1) - { - assert(max > 0); - ssize_t oldCount; - int spin = m_maxSpins; - while (--spin >= 0) { - oldCount = m_count.load(std::memory_order_relaxed); - if (oldCount > 0) { - ssize_t newCount = oldCount > max ? oldCount - max : 0; - if (m_count.compare_exchange_strong( - oldCount, newCount, std::memory_order_acquire, std::memory_order_relaxed)) - return oldCount - newCount; - } - std::atomic_signal_fence(std::memory_order_acquire); - } - oldCount = m_count.fetch_sub(1, std::memory_order_acquire); - if (oldCount <= 0) { - if ((timeout_usecs == 0) || (timeout_usecs < 0 && !m_sema.wait()) || - (timeout_usecs > 0 && !m_sema.timed_wait((std::uint64_t)timeout_usecs))) { - while (true) { - oldCount = m_count.load(std::memory_order_acquire); - if (oldCount >= 0 && m_sema.try_wait()) - break; - if (oldCount < 0 && - m_count.compare_exchange_strong( - oldCount, oldCount + 1, std::memory_order_relaxed, std::memory_order_relaxed)) - return 0; - } - } - } - if (max > 1) - return 1 + tryWaitMany(max - 1); - return 1; - } - - public: - LightweightSemaphore(ssize_t initialCount = 0, int maxSpins = 10000) - : m_count(initialCount) - , m_maxSpins(maxSpins) - { - assert(initialCount >= 0); - assert(maxSpins >= 0); - } - - bool tryWait() - { - ssize_t oldCount = m_count.load(std::memory_order_relaxed); - while (oldCount > 0) { - if (m_count.compare_exchange_weak( - oldCount, oldCount - 1, std::memory_order_acquire, std::memory_order_relaxed)) - return true; - } - return false; - } - - bool wait() { return tryWait() || waitWithPartialSpinning(); } - - bool wait(std::int64_t timeout_usecs) { return tryWait() || waitWithPartialSpinning(timeout_usecs); } - - // Acquires between 0 and (greedily) max, inclusive - ssize_t tryWaitMany(ssize_t max) - { - assert(max >= 0); - ssize_t oldCount = m_count.load(std::memory_order_relaxed); - while (oldCount > 0) { - ssize_t newCount = oldCount > max ? oldCount - max : 0; - if (m_count.compare_exchange_weak(oldCount, newCount, std::memory_order_acquire, std::memory_order_relaxed)) - return oldCount - newCount; - } - return 0; - } - - // Acquires at least one, and (greedily) at most max - ssize_t waitMany(ssize_t max, std::int64_t timeout_usecs) - { - assert(max >= 0); - ssize_t result = tryWaitMany(max); - if (result == 0 && max > 0) - result = waitManyWithPartialSpinning(max, timeout_usecs); - return result; - } - - ssize_t waitMany(ssize_t max) - { - ssize_t result = waitMany(max, -1); - assert(result > 0); - return result; - } - - void signal(ssize_t count = 1) - { - assert(count >= 0); - ssize_t oldCount = m_count.fetch_add(count, std::memory_order_release); - ssize_t toRelease = -oldCount < count ? -oldCount : count; - if (toRelease > 0) { - m_sema.signal((int)toRelease); - } - } - - std::size_t availableApprox() const - { - ssize_t count = m_count.load(std::memory_order_relaxed); - return count > 0 ? static_cast(count) : 0; - } -}; - -} // end namespace moodycamel \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/common/parallel_for_atomic_pool.cpp b/barretenberg/cpp/src/barretenberg/common/parallel_for_atomic_pool.cpp index 29c45e34896..f71522f9f02 100644 --- a/barretenberg/cpp/src/barretenberg/common/parallel_for_atomic_pool.cpp +++ b/barretenberg/cpp/src/barretenberg/common/parallel_for_atomic_pool.cpp @@ -1,3 +1,4 @@ +#ifndef NO_MULTITHREADING #include "log.hpp" #include "thread.hpp" #include @@ -113,4 +114,5 @@ void parallel_for_atomic_pool(size_t num_iterations, const std::function -#include -#include -#include -#include -#include -#include - -namespace { -class ThreadPool { - public: - ThreadPool(size_t num_threads) - : tasks(1024) - , complete_queue_(1) - { - workers.reserve(num_threads); - for (size_t i = 0; i < num_threads; ++i) { - workers.emplace_back(&ThreadPool::worker_loop, this, i); - } - } - - ~ThreadPool() - { - stop = true; - for (size_t i = 0; i < workers.size(); ++i) { - tasks.enqueue([]() {}); - } - for (auto& worker : workers) { - worker.join(); - } - } - - ThreadPool(const ThreadPool& other) = delete; - ThreadPool(ThreadPool&& other) = delete; - ThreadPool& operator=(const ThreadPool& other) = delete; - ThreadPool& operator=(ThreadPool&& other) = delete; - - void start_tasks(const std::function& task, size_t num_iterations) - { - std::atomic complete_counter; - // 3rd party library expects c-style array as input. Boo. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays) - std::function funcs[num_iterations]; - for (size_t i = 0; i < num_iterations; ++i) { - funcs[i] = [&, i]() { - // Timer t; - task(i); - // info("task took: ", t.nanoseconds()); - if (complete_counter.fetch_add(1, std::memory_order_relaxed) == num_iterations - 1) { - // info("iteration ", i, " was the last"); - complete_queue_.enqueue(true); - } - }; - } - tasks.enqueue_bulk(funcs, num_iterations); - - { - std::function task; - while (tasks.try_dequeue(task)) { - task(); - } - } - - bool complete = false; - complete_queue_.wait_dequeue(complete); - // info("all done!"); - } - - private: - std::vector workers; - moodycamel::BlockingConcurrentQueue> tasks; - moodycamel::BlockingConcurrentQueue complete_queue_; - std::atomic stop = false; - - void worker_loop(size_t /*unused*/) - { - // info("worker started"); - while (!stop) { - std::function task; - tasks.wait_dequeue(task); - task(); - } - } -}; -} // namespace - -namespace bb { -/** - * A Thread pooled strategy that uses a popular lock-free multiple-producer multiple-consume queue library by - * "moodycamel" as the underlying mechanism to distribute work and join on completion. - */ -void parallel_for_moody(size_t num_iterations, const std::function& func) -{ - // -1 because main thread works. - static ThreadPool pool(get_num_cpus() - 1); - - pool.start_tasks(func, num_iterations); -} -} // namespace bb \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/common/parallel_for_mutex_pool.cpp b/barretenberg/cpp/src/barretenberg/common/parallel_for_mutex_pool.cpp index 24479dec9c1..16b922b99c8 100644 --- a/barretenberg/cpp/src/barretenberg/common/parallel_for_mutex_pool.cpp +++ b/barretenberg/cpp/src/barretenberg/common/parallel_for_mutex_pool.cpp @@ -1,3 +1,4 @@ +#ifndef NO_MULTITHREADING #include "log.hpp" #include "thread.hpp" #include @@ -128,4 +129,5 @@ void parallel_for_mutex_pool(size_t num_iterations, const std::function #include @@ -11,4 +12,5 @@ void parallel_for_omp(size_t num_iterations, const std::function& func(i); } } -} // namespace bb \ No newline at end of file +} // namespace bb +#endif \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/common/parallel_for_queued.cpp b/barretenberg/cpp/src/barretenberg/common/parallel_for_queued.cpp index 33dc7af9c29..acaf920fd03 100644 --- a/barretenberg/cpp/src/barretenberg/common/parallel_for_queued.cpp +++ b/barretenberg/cpp/src/barretenberg/common/parallel_for_queued.cpp @@ -1,3 +1,4 @@ +#ifndef NO_MULTITHREADING #include "log.hpp" #include "thread.hpp" #include @@ -123,4 +124,5 @@ void parallel_for_queued(size_t num_iterations, const std::function namespace bb { /** @@ -41,3 +43,4 @@ void parallel_for_spawning(size_t num_iterations, const std::function inline void read(B& it, std::optional& opt_ } template -concept HasGetAll = requires(T t) { t.get_all(); } && ! -msgpack_concepts::HasMsgPack; +concept HasGetAll = requires(T t) { t.get_all(); } && !msgpack_concepts::HasMsgPack; // Write out a struct that defines get_all() template inline void write(B& buf, T const& value) diff --git a/barretenberg/cpp/src/barretenberg/common/thread.hpp b/barretenberg/cpp/src/barretenberg/common/thread.hpp index 723d2834fa5..77309cd9098 100644 --- a/barretenberg/cpp/src/barretenberg/common/thread.hpp +++ b/barretenberg/cpp/src/barretenberg/common/thread.hpp @@ -4,18 +4,13 @@ #include #include #include -#include #include namespace bb { inline size_t get_num_cpus() { -#ifdef NO_MULTITHREADING - return 1; -#else return env_hardware_concurrency(); -#endif } // For algorithms that need to be divided amongst power of 2 threads. @@ -117,4 +112,4 @@ size_t calculate_num_threads(size_t num_iterations, size_t min_iterations_per_th size_t calculate_num_threads_pow2(size_t num_iterations, size_t min_iterations_per_thread = DEFAULT_MIN_ITERS_PER_THREAD); -} // namespace bb \ No newline at end of file +} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/env/hardware_concurrency.cpp b/barretenberg/cpp/src/barretenberg/env/hardware_concurrency.cpp index 9217e014eb9..5fe706d527f 100644 --- a/barretenberg/cpp/src/barretenberg/env/hardware_concurrency.cpp +++ b/barretenberg/cpp/src/barretenberg/env/hardware_concurrency.cpp @@ -4,10 +4,19 @@ #include #include #include + +#ifndef NO_MULTITHREADING #include +#endif extern "C" { +#ifdef NO_MULTITHREADING +uint32_t env_hardware_concurrency() +{ + return 1; +} +#else uint32_t env_hardware_concurrency() { #ifndef __wasm__ @@ -22,4 +31,5 @@ uint32_t env_hardware_concurrency() } #endif } +#endif } \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/relations/relation_types.hpp b/barretenberg/cpp/src/barretenberg/relations/relation_types.hpp index 20e55502c61..0a1cdbed54d 100644 --- a/barretenberg/cpp/src/barretenberg/relations/relation_types.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/relation_types.hpp @@ -23,10 +23,10 @@ using GetParameterView = std::conditional_t, template concept HasSubrelationLinearlyIndependentMember = requires(T) { - { - std::get(T::SUBRELATION_LINEARLY_INDEPENDENT) - } -> std::convertible_to; - }; + { + std::get(T::SUBRELATION_LINEARLY_INDEPENDENT) + } -> std::convertible_to; +}; template concept HasParameterLengthAdjustmentsMember = requires { T::TOTAL_LENGTH_ADJUSTMENTS; }; @@ -121,10 +121,10 @@ consteval std::array compute_composed_subrelation_part */ template concept isSkippable = requires(const AllEntities& input) { - { - Relation::skip(input) - } -> std::same_as; - }; + { + Relation::skip(input) + } -> std::same_as; +}; /** * @brief A wrapper for Relations to expose methods used by the Sumcheck prover or verifier to add the diff --git a/barretenberg/cpp/src/barretenberg/relations/utils.hpp b/barretenberg/cpp/src/barretenberg/relations/utils.hpp index 0eced14c195..1a777af8fdc 100644 --- a/barretenberg/cpp/src/barretenberg/relations/utils.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/utils.hpp @@ -113,8 +113,7 @@ template class RelationUtils { template static constexpr void add_tuples(std::tuple& tuple_1, const std::tuple& tuple_2) { - auto add_tuples_helper = [&](std::index_sequence) - { + auto add_tuples_helper = [&](std::index_sequence) { ((std::get(tuple_1) += std::get(tuple_2)), ...); }; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/circuit_builders/circuit_builders.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/circuit_builders/circuit_builders.hpp index 2285d18e97d..b16e5c636d8 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/circuit_builders/circuit_builders.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/circuit_builders/circuit_builders.hpp @@ -19,8 +19,7 @@ concept IsUltraBuilder = bb::IsAnyOf concept IsGoblinBuilder = bb::IsAnyOf; template -concept IsNotGoblinBuilder = ! -IsGoblinBuilder; +concept IsNotGoblinBuilder = !IsGoblinBuilder; template concept IsSimulator = bb::IsAnyOf; diff --git a/barretenberg/cpp/src/barretenberg/transcript/transcript.hpp b/barretenberg/cpp/src/barretenberg/transcript/transcript.hpp index 655641144b3..d5d7347229d 100644 --- a/barretenberg/cpp/src/barretenberg/transcript/transcript.hpp +++ b/barretenberg/cpp/src/barretenberg/transcript/transcript.hpp @@ -16,9 +16,9 @@ namespace bb { template -concept Loggable = (std::same_as || std::same_as || - std::same_as || std::same_as || - std::same_as); +concept Loggable = + (std::same_as || std::same_as || std::same_as || + std::same_as || std::same_as); // class TranscriptManifest; class TranscriptManifest { diff --git a/barretenberg/cpp/srs_db/Earthfile b/barretenberg/cpp/srs_db/Earthfile index 9bb57f1d5c6..5ccba0b6d67 100644 --- a/barretenberg/cpp/srs_db/Earthfile +++ b/barretenberg/cpp/srs_db/Earthfile @@ -1,5 +1,5 @@ VERSION 0.8 -FROM ubuntu:lunar +FROM ubuntu:noble RUN apt-get update && apt-get install -y curl diff --git a/barretenberg/exports.json b/barretenberg/exports.json index 08ddefa86a5..b0ed6310fb3 100644 --- a/barretenberg/exports.json +++ b/barretenberg/exports.json @@ -111,13 +111,13 @@ "functionName": "poseidon2_permutation", "inArgs": [ { - "name": "input_state", + "name": "inputs_buffer", "type": "fr::vec_in_buf" } ], "outArgs": [ { - "name": "output_state", + "name": "output", "type": "fr::vec_out_buf" } ], diff --git a/barretenberg/scripts/bindgen.sh b/barretenberg/scripts/bindgen.sh index e3d5b9b5a70..29ee5eb0778 100755 --- a/barretenberg/scripts/bindgen.sh +++ b/barretenberg/scripts/bindgen.sh @@ -1,6 +1,16 @@ #!/usr/bin/env bash set -eu +if ! dpkg -l python3-clang-18 &> /dev/null; then + echo "You need to install python clang 18 e.g.: apt install python3-clang-18" + exit 1 +fi + #find ./cpp/src -type f -name "c_bind*.hpp" | ./scripts/decls_json.py > exports.json cat ./scripts/c_bind_files.txt | ./scripts/decls_json.py > exports.json -(cd ./ts && yarn node --loader ts-node/esm ./src/bindgen/index.ts ../exports.json > ./src/barretenberg_api/index.ts) \ No newline at end of file +( + cd ./ts && \ + yarn install && \ + yarn node --loader ts-node/esm ./src/bindgen/index.ts ../exports.json > ./src/barretenberg_api/index.ts && \ + yarn prettier -w ./src/barretenberg_api/index.ts +) \ No newline at end of file diff --git a/barretenberg/scripts/decls_json.py b/barretenberg/scripts/decls_json.py index 2b1753220ba..a27edd0401e 100755 --- a/barretenberg/scripts/decls_json.py +++ b/barretenberg/scripts/decls_json.py @@ -4,7 +4,7 @@ import clang.cindex from typing import List -clang.cindex.Config.set_library_file('/usr/lib/llvm-16/lib/libclang-16.so.1') +clang.cindex.Config.set_library_file('/usr/lib/llvm-18/lib/libclang-18.so.1') def has_annotation(node, annotation): for child in node.get_children(): diff --git a/barretenberg/ts/Earthfile b/barretenberg/ts/Earthfile index a11afe022ee..fce0ab36807 100644 --- a/barretenberg/ts/Earthfile +++ b/barretenberg/ts/Earthfile @@ -1,6 +1,6 @@ VERSION 0.8 -FROM node:18.19.0 +FROM ../../build-images+build WORKDIR /usr/src/barretenberg/ts-build # minimum files to download yarn packages @@ -12,7 +12,7 @@ RUN yarn --immutable # other source files COPY --dir src *.json *.js *.cjs . -# copy over wasm build from cpp folder +# copy over wasm builds from cpp folder COPY ../cpp/+preset-wasm-threads/bin/barretenberg.wasm src/barretenberg_wasm/barretenberg-threads.wasm COPY ../cpp/+preset-wasm/bin/barretenberg.wasm src/barretenberg_wasm/barretenberg.wasm COPY ../cpp/+preset-wasm-threads/bin/barretenberg.wasm dest/node/barretenberg_wasm/barretenberg-threads.wasm diff --git a/barretenberg/ts/src/barretenberg_api/index.ts b/barretenberg/ts/src/barretenberg_api/index.ts index 967fc1b03a2..bdd35960e7e 100644 --- a/barretenberg/ts/src/barretenberg_api/index.ts +++ b/barretenberg/ts/src/barretenberg_api/index.ts @@ -87,8 +87,8 @@ export class BarretenbergApi { return out[0]; } - async poseidon2Permutation(inputState: Fr[]): Promise { - const inArgs = [inputState].map(serializeBufferable); + async poseidon2Permutation(inputsBuffer: Fr[]): Promise { + const inArgs = [inputsBuffer].map(serializeBufferable); const outTypes: OutputType[] = [VectorDeserializer(Fr)]; const result = await this.wasm.callWasmExport( 'poseidon2_permutation', @@ -643,8 +643,8 @@ export class BarretenbergApiSync { return out[0]; } - poseidon2Permutation(inputState: Fr[]): Fr[] { - const inArgs = [inputState].map(serializeBufferable); + poseidon2Permutation(inputsBuffer: Fr[]): Fr[] { + const inArgs = [inputsBuffer].map(serializeBufferable); const outTypes: OutputType[] = [VectorDeserializer(Fr)]; const result = this.wasm.callWasmExport( 'poseidon2_permutation', diff --git a/bootstrap.sh b/bootstrap.sh index cdecd4a065a..83216dc0470 100755 --- a/bootstrap.sh +++ b/bootstrap.sh @@ -1,11 +1,9 @@ #!/usr/bin/env bash -# Usage: -# Bootstraps the repo. End to end tests should be runnable after a bootstrap: -# ./bootstrap.sh -# Run a second time to perform a "light bootstrap", rebuilds code that's changed: -# ./bootstrap.sh -# Force a clean of the repo before performing a full bootstrap, erases untracked files, be careful! -# ./bootstrap.sh clean +# Usage: ./bootstrap.sh " +# full: Bootstrap the repo from scratch. +# fast: Bootstrap the repo using CI cache where possible to save time building. +# check: Check required toolchains and versions are installed. +# clean: Force a complete clean of the repo. Erases untracked files, be careful! set -eu cd "$(dirname "$0")" @@ -13,11 +11,89 @@ cd "$(dirname "$0")" CMD=${1:-} YELLOW="\033[93m" +RED="\033[31m" BOLD="\033[1m" RESET="\033[0m" source ./build-system/scripts/setup_env '' '' '' > /dev/null +function encourage_dev_container { + echo -e "${BOLD}${RED}ERROR: Toolchain incompatability. We encourage use of our dev container. See build-images/README.md.${RESET}" +} + +# Checks for required utilities, toolchains and their versions. +# Developers should probably use the dev container in /build-images to ensure the smoothest experience. +function check_toolchains { + # Check for various required utilities. + for util in jq parallel awk git curl; do + if ! command -v $util > /dev/null; then + encourage_dev_container + echo "Utility $util not found." + exit 1 + fi + done + # Check cmake version. + CMAKE_MIN_VERSION="3.24" + CMAKE_INSTALLED_VERSION=$(cmake --version | head -n1 | awk '{print $3}') + if [[ "$(printf '%s\n' "$CMAKE_MIN_VERSION" "$CMAKE_INSTALLED_VERSION" | sort -V | head -n1)" != "$CMAKE_MIN_VERSION" ]]; then + encourage_dev_container + echo "Minimum cmake version 3.24 not found." + exit 1 + fi + # Check clang version. + if ! clang++-16 --version > /dev/null; then + encourage_dev_container + echo "clang 16 not installed." + echo "Installation: sudo apt install clang-16" + exit 1 + fi + # Check rust version. + if ! rustup show | grep "1.74" > /dev/null; then + encourage_dev_container + echo "Rust version 1.74 not installed." + echo "Installation:" + echo " curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain 1.74.1" + exit 1 + fi + # Check wasi-sdk version. + if ! cat /opt/wasi-sdk/VERSION 2> /dev/null | grep 22.0 > /dev/null; then + encourage_dev_container + echo "wasi-sdk-22 not found at /opt/wasi-sdk." + echo "Use dev container, build from source, or you can install linux x86 version with:" + echo " curl -s -L https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-22/wasi-sdk-22.0-linux.tar.gz | tar zxf - && sudo mv wasi-sdk-22.0 /opt/wasi-sdk" + exit 1 + fi + # Check foundry version. + for tool in forge anvil; do + if ! $tool --version 2> /dev/null | grep de33b6a > /dev/null; then + encourage_dev_container + echo "$tool not in PATH or incorrect version (requires de33b6af53005037b463318d2628b5cfcaf39916)." + echo "Installation: https://book.getfoundry.sh/getting-started/installation (requires rust 1.75)" + echo " curl -L https://foundry.paradigm.xyz | bash" + echo " foundryup -b de33b6af53005037b463318d2628b5cfcaf39916" + exit 1 + fi + done + # Check Node.js version. + NODE_MIN_VERSION="18.19.0" + NODE_INSTALLED_VERSION=$(node --version | cut -d 'v' -f 2) + if [[ "$(printf '%s\n' "$NODE_MIN_VERSION" "$NODE_INSTALLED_VERSION" | sort -V | head -n1)" != "$NODE_MIN_VERSION" ]]; then + encourage_dev_container + echo "Minimum Node.js version 18.19.0 not found." + echo "Installation: nvm install 18" + exit 1 + fi + # Check for required npm globals. + for util in yarn solhint; do + if ! command -v $util > /dev/null; then + encourage_dev_container + echo "$util not found." + echo "Installation: npm install --global $util" + exit 1 + fi + done +} + if [ "$CMD" = "clean" ]; then echo "WARNING: This will erase *all* untracked files, including hooks and submodules." echo -n "Continue? [y/n] " @@ -48,8 +124,12 @@ elif [ "$CMD" = "fast" ]; then echo -e "${BOLD}${YELLOW}WARNING: Either docker or aws credentials are missing. Install docker and request credentials. Note this is for internal aztec devs only.${RESET}" exit 1 fi +elif [ "$CMD" = "check" ]; then + check_toolchains + echo "Toolchains look good! 🎉" + exit 0 else - echo "usage: $0 " + echo "usage: $0 " exit 1 fi @@ -60,10 +140,11 @@ chmod +x $HOOKS_DIR/pre-commit git submodule update --init --recursive +check_toolchains + PROJECTS=( barretenberg noir - foundry l1-contracts avm-transpiler noir-projects diff --git a/build-images/Dockerfile b/build-images/Dockerfile new file mode 100644 index 00000000000..e7b4c7c76fa --- /dev/null +++ b/build-images/Dockerfile @@ -0,0 +1,289 @@ +######################################################################################################################## +# Build wasi-sdk. +FROM ubuntu:noble AS wasi-sdk-build +RUN apt update && apt install -y \ + clang \ + cmake \ + ninja-build \ + git \ + cargo +RUN git clone --depth 1 --recursive --branch wasi-sdk-22 \ + https://github.com/WebAssembly/wasi-sdk.git +RUN mkdir -p /wasi-sdk/build/install/opt/wasi-sdk +WORKDIR /wasi-sdk +ENV MAKEFLAGS="-j$(nproc)" +RUN make build/llvm.BUILT +RUN make build/wasi-libc.BUILT +RUN make build/compiler-rt.BUILT +RUN make build/libcxx.BUILT +RUN make build/config.BUILT +RUN make build/version.BUILT +RUN mv build/install/opt/wasi-sdk /opt/wasi-sdk +FROM ubuntu:noble AS wasi-sdk +COPY --from=wasi-sdk-build /opt/wasi-sdk /opt/wasi-sdk + +######################################################################################################################## +# Build foundry. +FROM ubuntu:noble AS foundry +RUN apt update && apt install -y git cargo +RUN ulimit -n 65535 && \ + git clone --depth 1 --branch nightly-de33b6af53005037b463318d2628b5cfcaf39916 \ + https://github.com/foundry-rs/foundry.git && \ + cd foundry && cargo build --profile local && \ + mkdir -p /opt/foundry/bin && \ + for t in forge cast anvil chisel; do \ + mv ./target/local/$t /opt/foundry/bin/$t; \ + strip /opt/foundry/bin/$t; \ + done + +######################################################################################################################## +# This image contains *just* what's needed to perform a full build of the aztec project. +# It acts as the base image for all CI builds, and we build on it to produce a developer box. +FROM ubuntu:noble as build +RUN apt update && \ + apt install -y \ + # Utils + curl \ + git \ + curl \ + wget \ + jq \ + gawk \ + unzip \ + netcat-openbsd \ + parallel \ + # C++ (clang=18, which we will move to. 16 is for current build.) + build-essential \ + cmake \ + ninja-build \ + clang \ + clang-16 \ + clang-format-16 \ + libc++-dev \ + libomp-dev \ + doxygen \ + # Node (18.19.1) + nodejs \ + npm \ + # Python (clang bindings for wasm bindgen.) + python3 \ + python3-clang \ + && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +# Install wasi-sdk. +COPY --from=aztecprotocol/wasi-sdk:22.0 /opt/wasi-sdk /opt/wasi-sdk + +# Install foundry. +COPY --from=foundry /opt/foundry /opt/foundry +ENV PATH="/opt/foundry/bin:$PATH" + +# Install rust and cross-compilers. Noir specifically uses 1.74.1. +# We add everyone write ownership so downstream boxes can write. +ENV RUSTUP_HOME=/opt/rust/rustup \ + CARGO_HOME=/opt/rust/cargo \ + PATH="/opt/rust/cargo/bin:$PATH" +RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain 1.74.1 && \ + rustup target add wasm32-unknown-unknown wasm32-wasi aarch64-apple-darwin && \ + chmod -R a+w /opt/rust + +# Install yq +RUN curl -L https://github.com/mikefarah/yq/releases/download/v4.42.1/yq_linux_$(dpkg --print-architecture) \ + -o /usr/local/bin/yq && chmod +x /usr/local/bin/yq + +# Install yarn +RUN npm install --global yarn + +# Install solhint +RUN npm install --global solhint + +######################################################################################################################## +# We want to produce downstream images: codespace, devbox and sysbox. This image is the base image for each. +# It contains a suite of tools that developers might use to develop aztec. +FROM build as basebox +RUN yes | unminimize + +# Install stuff devs need. +RUN apt update && \ + apt install -y \ + zsh \ + fzf \ + libfuse2 \ + iproute2 \ + iputils-ping \ + telnet \ + lsb-release \ + tmux \ + vim \ + software-properties-common \ + gnupg \ + htop \ + cgroup-tools \ + neovim \ + sudo \ + clangd-16 \ + man \ + python3-blessed \ + && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +# Install earthly. +RUN wget https://github.com/earthly/earthly/releases/latest/download/earthly-linux-$(dpkg --print-architecture) -O /usr/local/bin/earthly && \ + chmod +x /usr/local/bin/earthly + +# Install gh (github cli). +RUN mkdir -p -m 755 /etc/apt/keyrings && wget -qO- https://cli.github.com/packages/githubcli-archive-keyring.gpg > /etc/apt/keyrings/githubcli-archive-keyring.gpg \ + && chmod go+r /etc/apt/keyrings/githubcli-archive-keyring.gpg \ + && echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | tee /etc/apt/sources.list.d/github-cli.list > /dev/null \ + && apt update \ + && apt install gh -y + +# Install gt (graphite). +RUN npm install -g @withgraphite/graphite-cli@stable + +# Install aws cli. +RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-$(uname -m).zip" -o "awscliv2.zip" && \ + unzip awscliv2.zip && \ + ./aws/install --bin-dir /usr/local/bin --install-dir /usr/local/aws-cli --update && \ + rm -rf aws awscliv2.zip + +# Install terraform. +RUN curl -fsSL https://releases.hashicorp.com/terraform/1.7.5/terraform_1.7.5_linux_$(dpkg --print-architecture).zip -o terraform.zip \ + && unzip terraform.zip -d /usr/local/bin \ + && chmod +x /usr/local/bin/terraform \ + && rm terraform.zip + +# fzf seems to not install this file for some reason. +COPY ./key-bindings.zsh /usr/share/doc/fzf/examples/key-bindings.zsh + +# Sets LANG explicitly. Ensures tmux shows unicode symbols. +# Sets RUSTUP_HOME. +# Adds foundry and cargo bin dirs to PATH. +COPY environment /etc/environment + +# Cargo home and bin path should be set within users home dir at login. +RUN echo 'export CARGO_HOME="$HOME/.cargo"' >> /etc/zsh/zshenv +RUN echo 'export PATH="$HOME/.cargo/bin:$PATH"' >> /etc/zsh/zshenv + +# sudo group can sudo without password. +RUN echo '%sudo ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers + +######################################################################################################################## +# This devbox container can be used to provide a full development environment. +# +# It can be used as a dev container: +# - Configuration in .devcontainer/devcontainer.json. +# - To run locally install "Dev Containers" plugin in vscode. +# - To run in GitHub codespaces, visit the repo in github, press '.', and open the terminal. +# +# It can be used independently: +# - The user should use the ./run.sh script to launch. +# - A persistent volume will be mounted to /home/aztec-dev. +# - It provides docker via the hosts docker instance, mounted at /var/lib/docker.sock. +# - It uses an entrypoint script at runtime to perform uid/gid alignment with the host and drop into user account. +FROM basebox as devbox +RUN apt install -y gosu +ENV TERM=xterm-256color +# Detect if the host machine is Mac, if so set an env var, and disable prompts vcs info for performance. +RUN <> /etc/zsh/zshrc +EOF +# Create the user we'll run as and become the user. +RUN useradd --shell /bin/zsh -G sudo -m aztec-dev +USER aztec-dev +WORKDIR /home/aztec-dev +# Add dotfiles. +COPY --chown=aztec-dev:aztec-dev home . +# The .npmrc config is set to install global bins here, update PATH. +ENV PATH=/home/aztec-dev/.npm-global/bin:$PATH +# Need to ensure correct permissions, under some conditions these would otherwise be created by root. +RUN mkdir .vscode-server .npm-global .ssh +# Switch back to root. Gives option for root runtime adjustments before becoming aztec-dev. +USER root +# Use as entrypoint when running in an environment that requires uid/gid alignment (e.g. vanilla linux docker). +COPY ./entrypoint.sh /entrypoint.sh +ENTRYPOINT ["/entrypoint.sh"] +CMD ["/bin/zsh"] + +######################################################################################################################## +# This sysbox container can be used to provide a full development environment. +# It's more advanced than devbox in that it uses nestybox's sysbox container runtime to provide more of a vm experience. +# It's used primarily by internal aztec developers who have sysboxes running on a powerful underlying mainframe. +# It provides better isolation and security guarantees than a plain devbox. +FROM basebox AS sysbox + +###################### START OF STOCK NESTYBOX SYSTEMD CONTAINER ############################### +# +# Systemd installation +# +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + systemd \ + systemd-sysv \ + libsystemd0 \ + ca-certificates \ + dbus \ + iptables \ + iproute2 \ + kmod \ + locales \ + sudo \ + udev && \ + \ + # Prevents journald from reading kernel messages from /dev/kmsg + echo "ReadKMsg=no" >> /etc/systemd/journald.conf && \ + \ + # Housekeeping + apt-get clean -y && \ + rm -rf \ + /var/cache/debconf/* \ + /var/lib/apt/lists/* \ + /var/log/* \ + /tmp/* \ + /var/tmp/* \ + /usr/share/local/* && \ + \ + # Create default 'ubuntu/ubuntu' user + echo "ubuntu:ubuntu" | chpasswd && adduser ubuntu sudo + +# Disable systemd services/units that are unnecessary within a container. +RUN systemctl mask systemd-udevd.service \ + systemd-udevd-kernel.socket \ + systemd-udevd-control.socket \ + systemd-modules-load.service \ + sys-kernel-config.mount \ + sys-kernel-debug.mount \ + sys-kernel-tracing.mount \ + e2scrub_reap.service + +# Make use of stopsignal (instead of sigterm) to stop systemd containers. +STOPSIGNAL SIGRTMIN+3 + +# Set systemd as entrypoint. +ENTRYPOINT [ "/sbin/init", "--log-level=err" ] + +###################### END OF STOCK NESTYBOX SYSTEMD CONTAINER ############################### + +# Install docker. +RUN curl -fsSL https://get.docker.com -o get-docker.sh && sh get-docker.sh \ + # Add user "ubuntu" to the Docker group + && usermod -a -G docker ubuntu +ADD https://raw.githubusercontent.com/docker/docker-ce/master/components/cli/contrib/completion/bash/docker /etc/bash_completion.d/docker.sh + +# Install sshd. +RUN apt install --no-install-recommends -y openssh-server \ + && rm -rf /var/lib/apt/lists/* \ + && mkdir /home/ubuntu/.ssh \ + && chown ubuntu:ubuntu /home/ubuntu/.ssh \ + && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDagCvr/+CA1jmFaJf+e9+Kw6iwfhvaKOpfbGEl5zLgB+rum5L4Kga6Jow1gLQeMnAHfqc2IgpsU4t04c8PYApAt8AWNDL+KxMiFytfjKfJ2DZJA73CYkFnkfnMtU+ki+JG9dAHd6m7ShtCSzE5n6EDO2yWCVWQfqE3dcnpwrymSWkJYrbxzeOixiNZ4f1nD9ddvFvTWGB4l+et5SWgeIaYgJYDqTI2teRt9ytJiDGrCWXs9olHsCZOL6TEJPUQmNekwBkjMAZ4TmbBMjwbUlIxOpW2UxzlONcNn7IlRcGQg0Gdbkpo/zOlCNXsvacvnphDk5vKKaQj+aQiG916LU5P charlie@aztecprotocol.com' >> /home/ubuntu/.ssh/authorized_keys \ + && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDb5OVc+9S9nXx3/34F7eLVXjoPgQ3YHSdlfhTb8WflAGmpKJTLxtAYngtDBvhKofH5HrjPPkBWxOHP9KOTo0jxUQSr0suMpggLLOHuIrCszJKXIVi7whnQ4p2RHyzyS2ANwmpxWZmYxfgamzYst9JIvQYJgAPjTFweKBsG/Lc03knJ/qgz9BHqDSZHweMTnhv1dJNhZRKy1Lxyl/CjXKF374i8qbzVWJMeDgLEH6C84vCeaH89KMmM9J0+T31uEqxzIhZxNmRz9v+x6cQAVJtGi9OIveGT9qUQwKXZsk6/zorzxV+NiIvTWHxIn9epX/FUjgUmb/jFvpbEjDkbIngj adomurad@localhost.localdomain' >> /home/ubuntu/.ssh/authorized_keys \ + && echo 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFKlUeOh9DyAL85NJ10LE+nyfi8oYm+CwxQ9JMaB6H+t root@mainframe' >> /home/ubuntu/.ssh/authorized_keys \ + && chown ubuntu:ubuntu /home/ubuntu/.ssh/authorized_keys + +# Install google authenticator for setting up 2fa. +RUN apt update && apt install -y libpam-google-authenticator + +EXPOSE 22 diff --git a/build-images/Earthfile b/build-images/Earthfile new file mode 100644 index 00000000000..1429b5a6cd1 --- /dev/null +++ b/build-images/Earthfile @@ -0,0 +1,5 @@ +VERSION 0.8 + +build: + FROM DOCKERFILE --target build . + SAVE ARTIFACT /opt/foundry/bin/anvil \ No newline at end of file diff --git a/build-images/Makefile b/build-images/Makefile new file mode 100755 index 00000000000..3e64904b3f2 --- /dev/null +++ b/build-images/Makefile @@ -0,0 +1,19 @@ +wasi-sdk: + docker build -t aztecprotocol/wasi-sdk:$$(uname -m | sed 's/aarch64/arm64/')-22.0 --target wasi-sdk --push . + docker manifest create aztecprotocol/wasi-sdk:22.0 \ + --amend aztecprotocol/wasi-sdk:x86_64-22.0 \ + --amend aztecprotocol/wasi-sdk:arm64-22.0 + docker manifest push aztecprotocol/wasi-sdk:22.0 + +build: + docker build -t aztecprotocol/build --target build . + +devbox: + docker build -t aztecprotocol/devbox --target devbox . + +sysbox: + docker build -t aztecprotocol/sysbox --target sysbox . + +all: build devbox sysbox + +.PHONY: all build devbox sysbox diff --git a/build-images/README.md b/build-images/README.md new file mode 100644 index 00000000000..2ff02e1393f --- /dev/null +++ b/build-images/README.md @@ -0,0 +1,27 @@ +# Build Image + +To ensure a consistent environment for developers, and ease of getting started, we provide a development container. + +## Install Docker + +If you don't already have docker installed, follow this guide: https://docs.docker.com/engine/install + +## Visual Studio Code + +If you use vscode, the simplest thing to do is install the "Dev Containers" plugin, and open the repo. +You'll be prompted to reload in a dev container, at which point you can open a terminal and bootstrap. +You can connect to your container from outside vscode with e.g.: `docker exec -ti /bin/zsh` + +Your repo will be mounted at `/workspaces/aztec-packages`, and your home directory is persisted in a docker volume. + +## Running Independently + +If you don't use vscode, you can simply run `./run.sh` to create and drop into the container. + +Your repo will be mounted at `/workspaces/aztec-packages`, and your home directory is persisted in a docker volume. + +## GitHub Codespaces + +This is also compatible with GitHub codespaces. Visit the repo at `http://github.com/aztecprotocol/aztec-packages`. +Press `.`, and open a terminal window. You will be prompted to create a new machine. +You can then continue to work within the browser, or reopen the codespace in your local vscode. diff --git a/build-images/entrypoint.sh b/build-images/entrypoint.sh new file mode 100755 index 00000000000..d6f36b79dd0 --- /dev/null +++ b/build-images/entrypoint.sh @@ -0,0 +1,22 @@ +#!/bin/bash +set -e + +# Modify the uid and gid of aztec-dev to match that of the host users. +[ -n "$LOCAL_GROUP_ID" ] && groupmod -g $LOCAL_GROUP_ID aztec-dev +[ -n "$LOCAL_USER_ID" ] && usermod -u $LOCAL_USER_ID aztec-dev &> /dev/null + +# Find the group id of the docker socket, add aztec-dev to that group, or create the group and add aztec-dev. +if [ -S /var/run/docker.sock ]; then + SOCKET_GID=$(stat -c %g /var/run/docker.sock) + EXISTING_GROUP=$(getent group $SOCKET_GID | cut -d: -f1) + if [ -z "$EXISTING_GROUP" ]; then + # No existing group with that gid, so create one called 'docker' and add the user to it. + groupadd -g $SOCKET_GID docker + usermod -aG docker aztec-dev + else + # A group with the desired gid already exists, add the user to it. + usermod -aG $EXISTING_GROUP aztec-dev + fi +fi + +exec /usr/sbin/gosu aztec-dev "$@" \ No newline at end of file diff --git a/build-images/environment b/build-images/environment new file mode 100644 index 00000000000..6c67b72a1af --- /dev/null +++ b/build-images/environment @@ -0,0 +1,3 @@ +PATH="/opt/foundry/bin:/opt/rust/cargo/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin" +LANG=C.UTF-8 +RUSTUP_HOME=/opt/rust/rustup \ No newline at end of file diff --git a/build-images/home/.gitconfig b/build-images/home/.gitconfig new file mode 100644 index 00000000000..e3fc331a438 --- /dev/null +++ b/build-images/home/.gitconfig @@ -0,0 +1,2 @@ +[safe] + directory = /workspaces/aztec-packages \ No newline at end of file diff --git a/build-images/home/.npmrc b/build-images/home/.npmrc new file mode 100644 index 00000000000..7d79625eaf7 --- /dev/null +++ b/build-images/home/.npmrc @@ -0,0 +1 @@ +prefix=/home/aztec-dev/.npm-global \ No newline at end of file diff --git a/build-images/home/.tmux.conf b/build-images/home/.tmux.conf new file mode 100644 index 00000000000..4b0be338e7e --- /dev/null +++ b/build-images/home/.tmux.conf @@ -0,0 +1,67 @@ +set-window-option -g mode-keys vi + +# Increase scrollback buffer. +set-option -g history-limit 50000 + +# Clear the screen, and the scrollback buffer. +bind -n M-u send-keys C-l \; run-shell "sleep .3s" \; clear-history + +# Enable mouse. +set -g mouse on + +# Rebind prefix key to backtick. +unbind C-b +set -g prefix ` +bind ` send-prefix + +# Allows for faster key repetition. Essential for vim to work properly. +set-option -g escape-time 0 + +# Send focus events for vim autoread, etc. +set -g focus-events on + +# Smart pane switching with awareness of Vim splits. +# See: https://github.com/christoomey/vim-tmux-navigator +is_vim="ps -o state= -o comm= -t '#{pane_tty}' | grep -iqE '^[^TXZ ]+ +(\\S+\\/)?g?(view|n?vim?x?)(diff)?$'" +bind-key -n C-h if-shell "$is_vim" "send-keys C-h" "select-pane -L" +bind-key -n C-j if-shell "$is_vim" "send-keys C-j" "select-pane -D" +bind-key -n C-k if-shell "$is_vim" "send-keys C-k" "select-pane -U" +bind-key -n C-l if-shell "$is_vim" "send-keys C-l" "select-pane -R" +bind-key -n Pageup if-shell "$is_vim" "send-keys Pageup" "copy-mode -u" + +bind-key -T copy-mode-vi -n C-h if-shell "$is_vim" "send-keys C-h" "select-pane -L" +bind-key -T copy-mode-vi -n C-j if-shell "$is_vim" "send-keys C-j" "select-pane -D" +bind-key -T copy-mode-vi -n C-k if-shell "$is_vim" "send-keys C-k" "select-pane -U" +bind-key -T copy-mode-vi -n C-l if-shell "$is_vim" "send-keys C-l" "select-pane -R" + +# Window navigation. +bind-key -n M-h previous-window +bind-key -n M-l next-window +bind-key -n M-Left previous-window +bind-key -n M-Right next-window +bind-key -T copy-mode-vi -n M-h previous-window +bind-key -T copy-mode-vi -n M-l next-window +bind-key -T copy-mode-vi -n M-Left previous-window +bind-key -T copy-mode-vi -n M-Right next-window + +# Window and pane creation. +bind c new-window -c "#{pane_current_path}" +bind s split-window -v -c "#{pane_current_path}" +bind v split-window -h -c "#{pane_current_path}" + +# Window sizing. +bind j resize-pane -D 5 +bind k resize-pane -U 5 + +# Vertically zoom current column of panes. +bind x if -F '#{@layout_save}' \ + {run 'tmux select-layout "#{@layout_save}" \; set -up @layout_save'} \ + {set -Fp @layout_save "#{window_layout}" ; run 'tmux resizep -y "#{window_height}"'} + +# Reload configuration shortcut. +bind R source-file ~/.tmux.conf \; display-message "Config reloaded..." + +# Disable status bar. +set -g status off + +set -g -a terminal-overrides ',xterm-256color:Tc' diff --git a/build-images/home/.zshrc b/build-images/home/.zshrc new file mode 100644 index 00000000000..122ced9ed53 --- /dev/null +++ b/build-images/home/.zshrc @@ -0,0 +1,106 @@ +# zgen +if [ ! -d "$HOME/.zgen" ]; then + git clone https://github.com/tarjoilija/zgen.git "${HOME}/.zgen" +fi + +source "${HOME}/.zgen/zgen.zsh" + +if ! zgen saved; then + # specify plugins here + zgen oh-my-zsh + zgen oh-my-zsh plugins/git + zgen oh-my-zsh plugins/vi-mode + zgen oh-my-zsh plugins/fzf + zgen load miekg/lean + + # generate the init script from plugins above + zgen save +fi + +function zle-keymap-select zle-line-init +{ + # change cursor shape in iTerm2 + case $KEYMAP in + vicmd) echo -ne '\e[1 q';; + viins|main) echo -ne '\e[5 q';; + esac + + zle reset-prompt + zle -R +} + +function zle-line-finish +{ + print -n -- "\E]50;CursorShape=0\C-G" # block cursor +} + +zle -N zle-line-init +zle -N zle-line-finish +zle -N zle-keymap-select + +setopt no_share_history +setopt rm_star_silent +setopt auto_pushd +setopt +o nomatch +set +o histexpand + +bindkey "^[[3~" delete-char +bindkey "^[3;5~" delete-char +bindkey "^[[A" history-search-backward +bindkey "^[[B" history-search-forward + +# Prevent mad background colors on permissive permissions. +export LS_COLORS="di=34:ln=36:so=35:pi=33:ex=32:bd=1;33:cd=1;33:su=31:sg=32:tw=34:ow=34:st=37" + +# Colorize completions using default `ls` colors. +zstyle ':completion:*' list-colors '' +zstyle ':completion:*' list-colors "${(s.:.)LS_COLORS}" + +export MAKEFLAGS=-j$(nproc) + +alias dr="docker run -ti --rm" +alias drs="docker run -ti --rm --entrypoint /bin/sh" +alias vim=nvim + +# Graphite aliases +alias gtl="gt log" +alias gtd="gt down" +alias gtu="gt up" +alias gts="gt sync" +alias gto="gt checkout" + +# Fuzzy git rooted dir change on ctrl-f. +gitcd() { + git_root=$(git rev-parse --show-toplevel 2> /dev/null) + if [[ $? -eq 0 ]]; then + local selected_dir=$(cd "$git_root" && find * -type d -not -path '*node_modules*' -not -path '.git*' | fzf) + if [[ -n "$selected_dir" ]]; then + # Instead of changing directory, prepare a cd command + BUFFER="cd \"$git_root/$selected_dir\"" + zle accept-line + fi + fi +} +zle -N gitcd_widget gitcd +bindkey '^F' gitcd_widget + +# Graphite autocomplete. +#compdef gt +###-begin-gt-completions-### +# +# yargs command completion script +# +# Installation: gt completion >> ~/.zshrc +# or gt completion >> ~/.zprofile on OSX. +# +_gt_yargs_completions() +{ + local reply + local si=$IFS + IFS=$' +' reply=($(COMP_CWORD="$((CURRENT-1))" COMP_LINE="$BUFFER" COMP_POINT="$CURSOR" gt --get-yargs-completions "${words[@]}")) + IFS=$si + _describe 'values' reply +} +compdef _gt_yargs_completions gt +###-end-gt-completions-### diff --git a/build-images/key-bindings.zsh b/build-images/key-bindings.zsh new file mode 100644 index 00000000000..dfb473cc8be --- /dev/null +++ b/build-images/key-bindings.zsh @@ -0,0 +1,120 @@ +# ____ ____ +# / __/___ / __/ +# / /_/_ / / /_ +# / __/ / /_/ __/ +# /_/ /___/_/ key-bindings.zsh +# +# - $FZF_TMUX_OPTS +# - $FZF_CTRL_T_COMMAND +# - $FZF_CTRL_T_OPTS +# - $FZF_CTRL_R_OPTS +# - $FZF_ALT_C_COMMAND +# - $FZF_ALT_C_OPTS + +# Key bindings +# ------------ + +# The code at the top and the bottom of this file is the same as in completion.zsh. +# Refer to that file for explanation. +if 'zmodload' 'zsh/parameter' 2>'/dev/null' && (( ${+options} )); then + __fzf_key_bindings_options="options=(${(j: :)${(kv)options[@]}})" +else + () { + __fzf_key_bindings_options="setopt" + 'local' '__fzf_opt' + for __fzf_opt in "${(@)${(@f)$(set -o)}%% *}"; do + if [[ -o "$__fzf_opt" ]]; then + __fzf_key_bindings_options+=" -o $__fzf_opt" + else + __fzf_key_bindings_options+=" +o $__fzf_opt" + fi + done + } +fi + +'emulate' 'zsh' '-o' 'no_aliases' + +{ + +[[ -o interactive ]] || return 0 + +# CTRL-T - Paste the selected file path(s) into the command line +__fsel() { + local cmd="${FZF_CTRL_T_COMMAND:-"command find -L . -mindepth 1 \\( -path '*/\\.*' -o -fstype 'sysfs' -o -fstype 'devfs' -o -fstype 'devtmpfs' -o -fstype 'proc' \\) -prune \ + -o -type f -print \ + -o -type d -print \ + -o -type l -print 2> /dev/null | cut -b3-"}" + setopt localoptions pipefail no_aliases 2> /dev/null + local item + eval "$cmd" | FZF_DEFAULT_OPTS="--height ${FZF_TMUX_HEIGHT:-40%} --reverse --bind=ctrl-z:ignore ${FZF_DEFAULT_OPTS-} ${FZF_CTRL_T_OPTS-}" $(__fzfcmd) -m "$@" | while read item; do + echo -n "${(q)item} " + done + local ret=$? + echo + return $ret +} + +__fzfcmd() { + [ -n "${TMUX_PANE-}" ] && { [ "${FZF_TMUX:-0}" != 0 ] || [ -n "${FZF_TMUX_OPTS-}" ]; } && + echo "fzf-tmux ${FZF_TMUX_OPTS:--d${FZF_TMUX_HEIGHT:-40%}} -- " || echo "fzf" +} + +fzf-file-widget() { + LBUFFER="${LBUFFER}$(__fsel)" + local ret=$? + zle reset-prompt + return $ret +} +zle -N fzf-file-widget +bindkey -M emacs '^T' fzf-file-widget +bindkey -M vicmd '^T' fzf-file-widget +bindkey -M viins '^T' fzf-file-widget + +# ALT-C - cd into the selected directory +fzf-cd-widget() { + local cmd="${FZF_ALT_C_COMMAND:-"command find -L . -mindepth 1 \\( -path '*/\\.*' -o -fstype 'sysfs' -o -fstype 'devfs' -o -fstype 'devtmpfs' -o -fstype 'proc' \\) -prune \ + -o -type d -print 2> /dev/null | cut -b3-"}" + setopt localoptions pipefail no_aliases 2> /dev/null + local dir="$(eval "$cmd" | FZF_DEFAULT_OPTS="--height ${FZF_TMUX_HEIGHT:-40%} --reverse --bind=ctrl-z:ignore ${FZF_DEFAULT_OPTS-} ${FZF_ALT_C_OPTS-}" $(__fzfcmd) +m)" + if [[ -z "$dir" ]]; then + zle redisplay + return 0 + fi + zle push-line # Clear buffer. Auto-restored on next prompt. + BUFFER="builtin cd -- ${(q)dir}" + zle accept-line + local ret=$? + unset dir # ensure this doesn't end up appearing in prompt expansion + zle reset-prompt + return $ret +} +zle -N fzf-cd-widget +bindkey -M emacs '\ec' fzf-cd-widget +bindkey -M vicmd '\ec' fzf-cd-widget +bindkey -M viins '\ec' fzf-cd-widget + +# CTRL-R - Paste the selected command from history into the command line +fzf-history-widget() { + local selected num + setopt localoptions noglobsubst noposixbuiltins pipefail no_aliases 2> /dev/null + selected=( $(fc -rl 1 | awk '{ cmd=$0; sub(/^[ \t]*[0-9]+\**[ \t]+/, "", cmd); if (!seen[cmd]++) print $0 }' | + FZF_DEFAULT_OPTS="--height ${FZF_TMUX_HEIGHT:-40%} ${FZF_DEFAULT_OPTS-} -n2..,.. --scheme=history --bind=ctrl-r:toggle-sort,ctrl-z:ignore ${FZF_CTRL_R_OPTS-} --query=${(qqq)LBUFFER} +m" $(__fzfcmd)) ) + local ret=$? + if [ -n "$selected" ]; then + num=$selected[1] + if [ -n "$num" ]; then + zle vi-fetch-history -n $num + fi + fi + zle reset-prompt + return $ret +} +zle -N fzf-history-widget +bindkey -M emacs '^R' fzf-history-widget +bindkey -M vicmd '^R' fzf-history-widget +bindkey -M viins '^R' fzf-history-widget + +} always { + eval $__fzf_key_bindings_options + 'unset' '__fzf_key_bindings_options' +} diff --git a/build-images/run.sh b/build-images/run.sh new file mode 100755 index 00000000000..97132414f76 --- /dev/null +++ b/build-images/run.sh @@ -0,0 +1,23 @@ +#!/bin/bash +set -eu + +cd $(dirname $0) + +# On linux we need to perform uid/gid alignment to ensure files modified on the host have the correct owner. +# The entrypoint.sh script picks up these environment variables and adjusts the aztec-dev user accordingly. +# This isn't necessary on mac. +if [[ "$OSTYPE" == "linux"* ]]; then + ID_ARGS="-e LOCAL_USER_ID=$(id -u) -e LOCAL_GROUP_ID=$(id -g)" +fi + +docker run \ + -ti --rm \ + --hostname devbox \ + -e SSH_CONNECTION=' ' \ + ${ID_ARGS:-} \ + -w/workspaces/aztec-packages \ + -v$PWD/..:/workspaces/aztec-packages \ + -vdevbox-home:/home/aztec-dev \ + -v$HOME/.ssh/id_rsa:/home/aztec-dev/.ssh/id_rsa:ro \ + -v/var/run/docker.sock:/var/run/docker.sock \ + aztecprotocol/devbox diff --git a/build-system/scripts/setup_env b/build-system/scripts/setup_env index 7280711ed1b..b98486cef0d 100755 --- a/build-system/scripts/setup_env +++ b/build-system/scripts/setup_env @@ -130,8 +130,10 @@ if [ -n "$COMMIT_HASH" ]; then # Install and ensure correct permissions on build instance key. mkdir -p ~/.ssh - echo ${BUILD_INSTANCE_KEY:-} | base64 -d > ~/.ssh/build_instance_key - chmod 600 ~/.ssh/build_instance_key + if [ -n "${BUILD_INSTANCE_KEY:-}" ]; then + echo $BUILD_INSTANCE_KEY | base64 -d > ~/.ssh/build_instance_key + chmod 600 ~/.ssh/build_instance_key + fi cp $BUILD_SYSTEM_PATH/remote/ssh_config ~/.ssh/config if [[ "$COMMIT_MESSAGE" == *"[ci debug]"* ]]; then diff --git a/foundry/.gitignore b/foundry/.gitignore deleted file mode 100644 index 532fb7016f2..00000000000 --- a/foundry/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -bin -foundry-repo \ No newline at end of file diff --git a/foundry/Earthfile b/foundry/Earthfile deleted file mode 100644 index 14262b0b3fb..00000000000 --- a/foundry/Earthfile +++ /dev/null @@ -1,17 +0,0 @@ -VERSION 0.8 - -builder: - FROM rust:bookworm - ARG TARGETARCH - WORKDIR /usr/src/foundry - COPY bootstrap.sh bootstrap.sh - RUN ./bootstrap.sh && rm -rf foundry-repo - SAVE ARTIFACT bin /usr/src/foundry/bin - -build: - FROM ubuntu:lunar - ARG TARGETARCH - COPY --dir +builder/usr/src/foundry/bin /usr/src/foundry/bin - SAVE ARTIFACT /usr/src/foundry/bin /usr/src/foundry/bin - ENV PATH="${PATH}:/usr/src/foundry/bin" - SAVE IMAGE --push aztecprotocol/foundry-nightly-de33b6af53005037b463318d2628b5cfcaf3991-$TARGETARCH:latest \ No newline at end of file diff --git a/foundry/bootstrap.sh b/foundry/bootstrap.sh deleted file mode 100755 index 446ddd6228e..00000000000 --- a/foundry/bootstrap.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash -set -eu - -cd $(dirname $0) - -rm -rf bin share - -[ "${TARGETARCH:-$(uname -m)}" = "arm64" ] && echo "export CFLAGS=-mno-outline-atomics" >> $HOME/.profile || true - -if [ ! -d './foundry-repo' ]; then - git clone --depth 1 --branch nightly-de33b6af53005037b463318d2628b5cfcaf39916 \ - https://github.com/foundry-rs/foundry.git foundry-repo -fi - -(cd foundry-repo && cargo build --profile local) - -mkdir bin \ - && mv foundry-repo/target/local/forge bin/forge \ - && mv foundry-repo/target/local/cast bin/cast \ - && mv foundry-repo/target/local/anvil bin/anvil \ - && mv foundry-repo/target/local/chisel bin/chisel \ - && strip bin/forge \ - && strip bin/cast \ - && strip bin/chisel \ - && strip bin/anvil; \ No newline at end of file diff --git a/l1-contracts/Dockerfile b/l1-contracts/Dockerfile index 75568db49f8..2cfabf3ca1b 100644 --- a/l1-contracts/Dockerfile +++ b/l1-contracts/Dockerfile @@ -14,11 +14,11 @@ RUN foundryup --version nightly-de33b6af53005037b463318d2628b5cfcaf39916 WORKDIR /usr/src/l1-contracts COPY . . -RUN git init +#RUN git init RUN forge clean && forge fmt --check && forge build && forge test --no-match-contract UniswapPortalTest -RUN npm install --global yarn -RUN yarn && yarn lint +RUN npm install --global solhint +RUN solhint --config ./.solhint.json --fix "src/**/*.sol" # RUN git add . && yarn slither && yarn slither-has-diff RUN forge build diff --git a/l1-contracts/Earthfile b/l1-contracts/Earthfile index efa14c84819..57d0a1c8a51 100644 --- a/l1-contracts/Earthfile +++ b/l1-contracts/Earthfile @@ -1,23 +1,12 @@ VERSION 0.8 build: - FROM ubuntu:lunar - RUN apt update && apt install curl git jq bash nodejs npm python3.11-full python3-pip -y - - # Use virtualenv, do not try to use pipx, it's not working. - RUN python3 -m venv /root/.venv - RUN /root/.venv/bin/pip3 install slither-analyzer==0.10.0 slitherin==0.5.0 - ENV PATH="${PATH}:/root/.venv/bin" - - # Install yarn and solhint. - RUN npm install --global yarn solhint - - # Install our build of foundry. - COPY --dir ../foundry/+build/usr/src/foundry/bin /usr/src/foundry/bin - ENV PATH="${PATH}:/usr/src/foundry/bin" - + FROM ../build-images+build WORKDIR /usr/src/l1-contracts COPY --dir lib scripts src terraform test *.json *.toml *.sh . - RUN git init && git add . && yarn lint && yarn slither && yarn slither-has-diff + #RUN git init && git add . && yarn lint && yarn slither && yarn slither-has-diff + # "slither": "forge clean && forge build --build-info --skip '*/test/**' --force && slither . --checklist --ignore-compile --show-ignored-findings --config-file ./slither.config.json | tee slither_output.md", + # "slither-has-diff": "./slither_has_diff.sh" + RUN solhint --config ./.solhint.json --fix "src/**/*.sol" RUN forge clean && forge fmt --check && forge build && forge test --no-match-contract UniswapPortalTest SAVE ARTIFACT /usr/src/l1-contracts /usr/src/l1-contracts \ No newline at end of file diff --git a/l1-contracts/bootstrap.sh b/l1-contracts/bootstrap.sh index 7274d3dac7d..75d22eea902 100755 --- a/l1-contracts/bootstrap.sh +++ b/l1-contracts/bootstrap.sh @@ -23,10 +23,10 @@ fi rm -rf broadcast cache out serve # Install -../foundry/bin/forge install --no-commit +forge install --no-commit # Ensure libraries are at the correct version git submodule update --init --recursive ./lib # Compile contracts -../foundry/bin/forge build +forge build diff --git a/l1-contracts/package.json b/l1-contracts/package.json deleted file mode 100644 index bd9aab6899d..00000000000 --- a/l1-contracts/package.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "name": "@aztec/l1-contracts", - "version": "0.1.0", - "license": "Apache-2.0", - "description": "Aztec contracts for the Ethereum mainnet and testnets", - "devDependencies": { - "solhint": "https://github.com/LHerskind/solhint#master" - }, - "scripts": { - "format": "../foundry/bin/forge fmt", - "lint": "solhint --config ./.solhint.json --fix \"src/**/*.sol\"", - "slither": "../foundry/bin/forge clean && ../foundry/bin/forge build --build-info --skip '*/test/**' --force && slither . --checklist --ignore-compile --show-ignored-findings --config-file ./slither.config.json | tee slither_output.md", - "slither-has-diff": "./slither_has_diff.sh" - } -} diff --git a/noir-projects/Earthfile b/noir-projects/Earthfile index ccdfe81acf1..85fe5130a92 100644 --- a/noir-projects/Earthfile +++ b/noir-projects/Earthfile @@ -1,18 +1,18 @@ VERSION 0.8 -FROM ubuntu:lunar -RUN apt-get update && apt-get install -y parallel -# Install nargo -COPY ../noir/+nargo/nargo /usr/bin/nargo -# Install transpiler -COPY ../avm-transpiler/+build/avm-transpiler /usr/bin/avm-transpiler +source: + FROM ../build-images+build -WORKDIR /usr/src/noir-projects + # Install nargo + COPY ../noir/+nargo/nargo /usr/bin/nargo + # Install transpiler + COPY ../avm-transpiler/+build/avm-transpiler /usr/bin/avm-transpiler -# Copy source. -COPY --dir aztec-nr noir-contracts noir-protocol-circuits . + WORKDIR /usr/src/noir-projects + + # Copy source. + COPY --dir aztec-nr noir-contracts noir-protocol-circuits . -source: # for debugging rebuilds RUN echo CONTENT HASH $(find . -type f -exec sha256sum {} ';' | sort | sha256sum | awk '{print $1}') | tee .content-hash diff --git a/noir-projects/bootstrap.sh b/noir-projects/bootstrap.sh index 2a46c6e26f2..0d800c336a6 100755 --- a/noir-projects/bootstrap.sh +++ b/noir-projects/bootstrap.sh @@ -27,7 +27,7 @@ AVAILABLE_MEMORY=0 case "$(uname)" in Linux*) # Check available memory on Linux - AVAILABLE_MEMORY=$(awk '/MemFree/ { printf $2 }' /proc/meminfo) + AVAILABLE_MEMORY=$(awk '/MemTotal/ { printf $2 }' /proc/meminfo) ;; *) echo "Parallel builds not supported on this operating system" @@ -35,7 +35,7 @@ case "$(uname)" in esac # This value may be too low. # If builds fail with an amount of free memory greater than this value then it should be increased. -MIN_PARALLEL_BUILD_MEMORY=32000000 +MIN_PARALLEL_BUILD_MEMORY=32854492 if [[ AVAILABLE_MEMORY -lt MIN_PARALLEL_BUILD_MEMORY ]]; then echo "System does not have enough memory for parallel builds, falling back to sequential" diff --git a/noir/Earthfile b/noir/Earthfile index 63f37774607..18faae05e2e 100644 --- a/noir/Earthfile +++ b/noir/Earthfile @@ -1,8 +1,7 @@ VERSION 0.8 nargo: - FROM rust:bullseye - RUN apt update && apt install -y libc++1 + FROM ../build-images/+build WORKDIR /usr/src # Relevant source (TODO finer-grained 'tooling') COPY --dir \ @@ -28,11 +27,7 @@ nargo: packages: BUILD ../barretenberg/ts/+build # prefetch - FROM node:20 - - RUN curl https://sh.rustup.rs -sSf | bash -s -- -y - RUN echo 'source $HOME/.cargo/env' >> $HOME/.bashrc - RUN apt update && apt install -y jq libc++1 + FROM ../build-images/+build # `noir-repo` is nested inside of `noir` so we copy `bb.js` as such to account # for the extra nested folder specified in portalled package paths @@ -69,13 +64,13 @@ packages: # TODO(AD) is this OK as a content hash? ENV COMMIT_HASH=$(find . -type f -exec sha256sum {} ';' | sort | sha256sum | awk '{print $1}') RUN echo CONTENT HASH $COMMIT_HASH | tee .content-hash - RUN PATH="/root/.cargo/bin:$PATH" ./scripts/bootstrap_packages.sh + RUN ./scripts/bootstrap_packages.sh SAVE ARTIFACT packages SAVE IMAGE --cache-hint run: # When running the container, mount the users home directory to same location. - FROM ubuntu:lunar + FROM ubuntu:noble # Install Tini as nargo doesn't handle signals properly. # Install git as nargo needs it to clone. RUN apt-get update && apt-get install -y git tini && rm -rf /var/lib/apt/lists/* && apt-get clean diff --git a/noir/scripts/bootstrap_native.sh b/noir/scripts/bootstrap_native.sh index 67fcc6f6d92..7504a74f50d 100755 --- a/noir/scripts/bootstrap_native.sh +++ b/noir/scripts/bootstrap_native.sh @@ -19,4 +19,8 @@ if [ -n "${DEBUG:-}" ]; then cargo build else cargo build --release -fi \ No newline at end of file +fi + +if [ -x ../scripts/fix_incremental_ts.sh ]; then + ../scripts/fix_incremental_ts.sh +fi diff --git a/noir/scripts/bootstrap_packages.sh b/noir/scripts/bootstrap_packages.sh index 939735060e2..f8292428055 100755 --- a/noir/scripts/bootstrap_packages.sh +++ b/noir/scripts/bootstrap_packages.sh @@ -31,4 +31,8 @@ rm -rf packages && mkdir -p packages for PROJECT in "${PROJECTS[@]}"; do PPATH=$(cd noir-repo && yarn workspaces list --json | jq -r "select(.name==\"$PROJECT\").location") tar zxfv noir-repo/$PPATH/package.tgz -C packages && mv packages/package packages/${PROJECT#*/} -done \ No newline at end of file +done + +if [ -x $ROOT/scripts/fix_incremental_ts.sh ]; then + $ROOT/scripts/fix_incremental_ts.sh +fi diff --git a/noir/scripts/fix_incremental_ts.sh b/noir/scripts/fix_incremental_ts.sh new file mode 100755 index 00000000000..ab54606420a --- /dev/null +++ b/noir/scripts/fix_incremental_ts.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +# Hack to workaround incremental builds not working on mac when mounted into dev container. +# Drops the fractional part of file timestamps. +set -eu + +cd $(dirname $0)/../noir-repo + +if [ "${HOST_OSTYPE:-}" == "darwin" ]; then + echo -n "Fixing incremental timestamps... " + find target -type f -print0 | xargs -0 -P $(nproc) -I {} sh -c 'touch -d @$(stat --format="%Y" {}) {}' + echo "Done." +fi \ No newline at end of file diff --git a/yarn-project/Earthfile b/yarn-project/Earthfile index 00bd6f6e9f0..6da55e013d9 100644 --- a/yarn-project/Earthfile +++ b/yarn-project/Earthfile @@ -4,7 +4,7 @@ deps: LOCALLY LET packages = $(git ls-files "**/package*.json" package*.json) LET tsconfigs = $(git ls-files "**/tsconfig*.json" tsconfig*.json) - FROM node:18.19.0 + FROM ../build-images+build # copy bb-js and noir-packages COPY ../barretenberg/ts/+build/build /usr/src/barretenberg/ts COPY ../noir/+packages/packages /usr/src/noir/packages @@ -35,7 +35,6 @@ build: BUILD ../noir-projects/+build BUILD ../l1-contracts/+build FROM +deps - RUN apt update && apt install -y jq curl perl && rm -rf /var/lib/apt/lists/* && apt-get clean COPY ../barretenberg/cpp/+preset-release/bin/bb /usr/src/barretenberg/cpp/build/bin/bb COPY ../noir/+nargo/acvm /usr/src/noir/noir-repo/target/release/acvm @@ -54,29 +53,53 @@ build-dev: aztec-prod: FROM +build RUN yarn workspaces focus @aztec/aztec --production && yarn cache clean + # Remove a bunch of stuff that we don't need that takes up space. + RUN rm -rf \ + ../noir-projects \ + ../l1-contracts \ + ../barretenberg/ts/src \ + ../barretenberg/ts/dest/node-cjs \ + ../barretenberg/ts/dest/browser \ + aztec.js/dest/main.js \ + end-to-end \ + **/src \ + **/artifacts SAVE ARTIFACT /usr/src /usr/src -aztec-prod-slim: - FROM node:18.19.1-slim - COPY +aztec-prod/usr/src /usr/src - aztec: - FROM +aztec-prod-slim + FROM ubuntu:noble + RUN apt update && apt install nodejs -y && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + COPY +aztec-prod/usr/src /usr/src ENTRYPOINT ["node", "--no-warnings", "/usr/src/yarn-project/aztec/dest/bin/index.js"] EXPOSE 8080 +# We care about creating a slimmed down e2e image because we have to serialize it from earthly to docker for running. end-to-end-prod: FROM +build RUN yarn workspaces focus @aztec/end-to-end --production && yarn cache clean + # Remove a bunch of stuff that we don't need that takes up space. + RUN rm -rf \ + ../noir-projects \ + ../l1-contracts \ + ../barretenberg/ts/src \ + ../barretenberg/ts/dest/node-cjs \ + ../barretenberg/ts/dest/browser \ + **/artifacts SAVE ARTIFACT /usr/src /usr/src end-to-end: - FROM node:18.19.1-slim - RUN apt-get update && apt-get install jq chromium netcat-openbsd -y - ENV CHROME_BIN="/usr/bin/chromium" - COPY ../foundry/+build/usr/src/foundry/bin/anvil /usr/src/foundry/bin/anvil + FROM ubuntu:noble + RUN apt-get update && apt-get install -y wget gnupg \ + && wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - \ + && echo "deb [arch=$(dpkg --print-architecture)] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list \ + && apt update && apt install nodejs jq google-chrome-stable netcat-openbsd -y \ + && rm -rf /var/lib/apt/lists/* + ENV CHROME_BIN="/usr/bin/google-chrome-stable" + ENV PATH=/opt/foundry/bin:$PATH + COPY ../build-images+build/anvil /opt/foundry/bin/anvil COPY +end-to-end-prod/usr/src /usr/src WORKDIR /usr/src/yarn-project/end-to-end + RUN ln -s /usr/src/yarn-project/.yarn/releases/yarn-3.6.3.cjs /usr/local/bin/yarn ENTRYPOINT ["yarn", "test"] scripts-prod: @@ -86,22 +109,17 @@ scripts-prod: all: BUILD +aztec - BUILD +cli BUILD +end-to-end -# for use with yarn-project/end-to-end and its e2e_mode=cache option +export-aztec: + ARG EARTHLY_GIT_HASH + FROM +aztec + SAVE IMAGE aztecprotocol/aztec:$EARTHLY_GIT_HASH + export-end-to-end: - # Prefetch targets to build in parallel. - BUILD +end-to-end - BUILD +aztec ARG EARTHLY_GIT_HASH - # pushes the foundry image to local docker images - FROM ../foundry/+build - SAVE IMAGE aztecprotocol/foundry-nightly-de33b6af53005037b463318d2628b5cfcaf3991:latest FROM +end-to-end SAVE IMAGE aztecprotocol/end-to-end:$EARTHLY_GIT_HASH - FROM +aztec - SAVE IMAGE aztecprotocol/aztec:$EARTHLY_GIT_HASH format-check: FROM +build diff --git a/yarn-project/bootstrap.sh b/yarn-project/bootstrap.sh index a4ac9c35137..9d78eb6b1c1 100755 --- a/yarn-project/bootstrap.sh +++ b/yarn-project/bootstrap.sh @@ -2,23 +2,12 @@ [ -n "${BUILD_SYSTEM_DEBUG:-}" ] && set -x # conditionally trace set -eu -# Check node version. -node_version=$(node -v | tr -d 'v') -major=${node_version%%.*} -rest=${node_version#*.} -minor=${rest%%.*} - YELLOW="\033[93m" BLUE="\033[34m" GREEN="\033[32m" BOLD="\033[1m" RESET="\033[0m" -if ((major < 18 || (major == 18 && minor < 19))); then - echo "Node.js version is less than 18.19. Exiting." - exit 1 -fi - cd "$(dirname "$0")" CMD=${1:-} diff --git a/yarn-project/end-to-end/Earthfile b/yarn-project/end-to-end/Earthfile index d59b9222d0e..8ff7a7f9aa1 100644 --- a/yarn-project/end-to-end/Earthfile +++ b/yarn-project/end-to-end/Earthfile @@ -1,95 +1,30 @@ VERSION 0.8 -# requires first saving the images locally with ../+export-end-to-end - -# run locally and build -E2E_TEST_LOCAL: - FUNCTION - ARG test - ARG compose_file=./scripts/docker-compose.yml - ARG enable_gas="" - ARG debug="aztec:*" - ARG EARTHLY_TARGET_NAME - LOCALLY - ENV ENABLE_GAS=$enable_gas - ENV TEST=$test - ENV DEBUG=$debug - ENV JOB_NAME=$EARTHLY_TARGET_NAME - WITH DOCKER \ - --load aztecprotocol/aztec:latest=../+aztec \ - --load aztecprotocol/end-to-end:latest=../+end-to-end \ - --load aztecprotocol/foundry-nightly-de33b6af53005037b463318d2628b5cfcaf3991:latest=../../foundry/+build - # Run our docker compose, ending whenever sandbox ends, filtering out noisy eth_getLogs - RUN docker compose -f $compose_file up --exit-code-from=end-to-end --force-recreate - END - -# run locally and take from cache, used for our mainly x86 jobs -E2E_TEST_FROM_CACHE: +E2E_COMPOSE_TEST: FUNCTION ARG test ARG compose_file=./scripts/docker-compose.yml - ARG enable_gas="" ARG debug="aztec:*" - ARG aztec_docker_tag ARG EARTHLY_TARGET_NAME LOCALLY - ENV ENABLE_GAS=$enable_gas ENV TEST=$test ENV DEBUG=$debug - ENV AZTEC_DOCKER_TAG=$aztec_docker_tag - ENV JOB_NAME=$EARTHLY_TARGET_NAME - # need a different project name for each to run in parallel - LET project_name=$(echo $test | sed 's/[./]/_/g') + LET project_name=$(echo $test | sed 's/\./_/g') IF docker compose > /dev/null 2>&1 LET CMD="docker compose" ELSE LET CMD="docker-compose" END - # In CI we do not use WITH DOCKER as we have had issues with earthly copying big images - RUN $CMD -p $project_name -f $compose_file up --exit-code-from=end-to-end --force-recreate - -# run on satellite and build, used for our few ARM jobs (means github runner doesn't need to be ARM) -E2E_TEST_FROM_BUILD: - FUNCTION - ARG test - ARG compose_file=./scripts/docker-compose.yml - ARG enable_gas="" - ARG debug="aztec:*" - ARG EARTHLY_TARGET_NAME - FROM earthly/dind:alpine-3.19-docker-25.0.2-r0 - ENV ENABLE_GAS=$enable_gas - ENV TEST=$test - ENV DEBUG=$debug - ENV JOB_NAME=$EARTHLY_TARGET_NAME - COPY $compose_file $compose_file - # For ARM, we do use WITH DOCKER as we don't have many e2e tests, but note E2E_TEST_FROM_CACHE WITH DOCKER \ --load aztecprotocol/aztec:latest=../+aztec \ --load aztecprotocol/end-to-end:latest=../+end-to-end \ - --load aztecprotocol/foundry-nightly-de33b6af53005037b463318d2628b5cfcaf3991:latest=../../foundry/+build + --load aztecprotocol/foundry-nightly-de33b6af53005037b463318d2628b5cfcaf3991:latest=../../build-images+build # Run our docker compose, ending whenever sandbox ends, filtering out noisy eth_getLogs - RUN docker compose -f $compose_file up --exit-code-from=end-to-end --force-recreate - END - -E2E_TEST: - FUNCTION - ARG test - ARG compose_file=./scripts/docker-compose.yml - ARG enable_gas="" - ARG e2e_mode=local - ARG debug="aztec:*" - LOCALLY - IF [ $e2e_mode = local ] - DO +E2E_TEST_LOCAL --test=$test --compose_file=$compose_file --enable_gas=$enable_gas --debug=$debug - ELSE IF [ $e2e_mode = cache ] - DO +E2E_TEST_FROM_CACHE --test=$test --aztec_docker_tag=$(git rev-parse HEAD) --compose_file=$compose_file --enable_gas=$enable_gas --debug=$debug - ELSE - DO +E2E_TEST_FROM_BUILD --test=$test --compose_file=$compose_file --enable_gas=$enable_gas --debug=$debug + RUN $CMD -p $project_name -f $compose_file up --exit-code-from=end-to-end --force-recreate END UPLOAD_LOGS: FUNCTION - ARG e2e_mode=cache ARG PULL_REQUEST ARG BRANCH ARG COMMIT_HASH @@ -100,7 +35,6 @@ UPLOAD_LOGS: ENV PULL_REQUEST=$PULL_REQUEST ENV BRANCH=$BRANCH ENV COMMIT_HASH=$COMMIT_HASH - ENV E2E_MODE=$e2e_mode RUN --secret AWS_ACCESS_KEY_ID --secret AWS_SECRET_ACCESS_KEY /usr/src/scripts/logs/upload_logs_to_s3.sh /usr/var/log # Define e2e tests @@ -113,67 +47,54 @@ flakey-e2e-tests: RUN yarn test --passWithNoTests ./src/flakey || true e2e-sandbox-example: - ARG e2e_mode=local - DO +E2E_TEST --test=e2e_sandbox_example.test.ts --e2e_mode=$e2e_mode + DO +E2E_COMPOSE_TEST --test=e2e_sandbox_example.test.ts uniswap-trade-on-l1-from-l2: - ARG e2e_mode=local - DO +E2E_TEST --test=uniswap_trade_on_l1_from_l2.test.ts --e2e_mode=$e2e_mode + DO +E2E_COMPOSE_TEST --test=uniswap_trade_on_l1_from_l2.test.ts integration-l1-publisher: - ARG e2e_mode=local - DO +E2E_TEST --test=integration_l1_publisher.test.ts --e2e_mode=$e2e_mode + DO +E2E_COMPOSE_TEST --test=integration_l1_publisher.test.ts e2e-browser: - ARG e2e_mode=local - DO +E2E_TEST --test=e2e_aztec_js_browser.test.ts --e2e_mode=$e2e_mode + DO +E2E_COMPOSE_TEST --test=e2e_aztec_js_browser.test.ts pxe: - ARG e2e_mode=local - DO +E2E_TEST --test=pxe.test.ts --e2e_mode=$e2e_mode + DO +E2E_COMPOSE_TEST --test=pxe.test.ts e2e-docs-examples: - ARG e2e_mode=local - DO +E2E_TEST --test=docs_examples.test.ts --e2e_mode=$e2e_mode + DO +E2E_COMPOSE_TEST --test=docs_examples.test.ts guides-writing-an-account-contract: - ARG e2e_mode=local - DO +E2E_TEST --test=guides/writing_an_account_contract.test.ts --e2e_mode=$e2e_mode + DO +E2E_COMPOSE_TEST --test=guides/writing_an_account_contract.test.ts guides-dapp-testing: - ARG e2e_mode=local - DO +E2E_TEST --test=guides/dapp_testing.test.ts --e2e_mode=$e2e_mode + DO +E2E_COMPOSE_TEST --test=guides/dapp_testing.test.ts # TODO intermittent failure # guides-sample-dapp: -# ARG e2e_mode=local -# DO +E2E_TEST --test=sample-dapp --e2e_mode=$e2e_mode +# DO +E2E_COMPOSE_TEST --test=sample-dapp # TODO currently hangs for hour+ # guides-up-quick-start: -# ARG e2e_mode=local -# DO +E2E_TEST --test=guides/up_quick_start.test.ts --e2e_mode=$e2e_mode +# DO +E2E_COMPOSE_TEST --test=guides/up_quick_start.test.ts bench-publish-rollup: - ARG e2e_mode=local ARG PULL_REQUEST ARG BRANCH ARG COMMIT_HASH - DO +E2E_TEST --test=benchmarks/bench_publish_rollup.test.ts --debug="aztec:benchmarks:*,aztec:sequencer,aztec:sequencer:*,aztec:world_state,aztec:merkle_trees" --e2e_mode=$e2e_mode --compose_file=./scripts/docker-compose-no-sandbox.yml + DO +E2E_COMPOSE_TEST --test=benchmarks/bench_publish_rollup.test.ts --debug="aztec:benchmarks:*,aztec:sequencer,aztec:sequencer:*,aztec:world_state,aztec:merkle_trees" --compose_file=./scripts/docker-compose-no-sandbox.yml DO +UPLOAD_LOGS --e2e_mode=$e2e_mode --PULL_REQUEST=$PULL_REQUEST --BRANCH=$BRANCH --COMMIT_HASH=$COMMIT_HASH bench-process-history: - ARG e2e_mode=local ARG PULL_REQUEST ARG BRANCH ARG COMMIT_HASH - DO +E2E_TEST --test=benchmarks/bench_process_history.test.ts --debug="aztec:benchmarks:*,aztec:sequencer,aztec:sequencer:*,aztec:world_state,aztec:merkle_trees" --e2e_mode=$e2e_mode --compose_file=./scripts/docker-compose-no-sandbox.yml + DO +E2E_COMPOSE_TEST --test=benchmarks/bench_process_history.test.ts --debug="aztec:benchmarks:*,aztec:sequencer,aztec:sequencer:*,aztec:world_state,aztec:merkle_trees" --compose_file=./scripts/docker-compose-no-sandbox.yml DO +UPLOAD_LOGS --e2e_mode=$e2e_mode --PULL_REQUEST=$PULL_REQUEST --BRANCH=$BRANCH --COMMIT_HASH=$COMMIT_HASH bench-tx-size: - ARG e2e_mode=local ARG PULL_REQUEST ARG BRANCH ARG COMMIT_HASH - DO +E2E_TEST --test=benchmarks/bench_tx_size_fees.test.ts --debug="aztec:benchmarks:*,aztec:sequencer,aztec:sequencer:*,aztec:world_state,aztec:merkle_trees" --e2e_mode=$e2e_mode --enable_gas=1 --compose_file=./scripts/docker-compose-no-sandbox.yml + DO +E2E_COMPOSE_TEST --test=benchmarks/bench_tx_size_fees.test.ts --debug="aztec:benchmarks:*,aztec:sequencer,aztec:sequencer:*,aztec:world_state,aztec:merkle_trees" --enable_gas=1 --compose_file=./scripts/docker-compose-no-sandbox.yml DO +UPLOAD_LOGS --e2e_mode=$e2e_mode --PULL_REQUEST=$PULL_REQUEST --BRANCH=$BRANCH --COMMIT_HASH=$COMMIT_HASH diff --git a/yarn-project/end-to-end/package.json b/yarn-project/end-to-end/package.json index e5e5c0ba6ba..fcc621d59ad 100644 --- a/yarn-project/end-to-end/package.json +++ b/yarn-project/end-to-end/package.json @@ -15,7 +15,7 @@ "clean": "rm -rf ./dest .tsbuildinfo", "formatting": "run -T prettier --check ./src \"!src/web/main.js\" && run -T eslint ./src", "formatting:fix": "run -T eslint --fix ./src && run -T prettier -w ./src", - "test": "LOG_LEVEL=${LOG_LEVEL:-silent} DEBUG_COLORS=1 NODE_NO_WARNINGS=1 node --experimental-vm-modules ../node_modules/.bin/jest --testTimeout=120000 --forceExit", + "test": "LOG_LEVEL=${LOG_LEVEL:-silent} DEBUG_COLORS=1 NODE_NO_WARNINGS=1 node --experimental-vm-modules ../node_modules/.bin/jest --testTimeout=300000 --forceExit", "test:integration": "concurrently -k -s first -c reset,dim -n test,anvil \"yarn test:integration:run\" \"anvil\"", "test:integration:run": "NODE_NO_WARNINGS=1 node --experimental-vm-modules ../node_modules/.bin/jest --no-cache --runInBand --config jest.integration.config.json" }, diff --git a/yarn-project/end-to-end/package.local.json b/yarn-project/end-to-end/package.local.json index 089811e03e9..6e3666e9fa6 100644 --- a/yarn-project/end-to-end/package.local.json +++ b/yarn-project/end-to-end/package.local.json @@ -2,6 +2,6 @@ "scripts": { "build": "yarn clean && tsc -b && webpack", "formatting": "run -T prettier --check ./src \"!src/web/main.js\" && run -T eslint ./src", - "test": "LOG_LEVEL=${LOG_LEVEL:-silent} DEBUG_COLORS=1 NODE_NO_WARNINGS=1 node --experimental-vm-modules ../node_modules/.bin/jest --testTimeout=120000 --forceExit" + "test": "LOG_LEVEL=${LOG_LEVEL:-silent} DEBUG_COLORS=1 NODE_NO_WARNINGS=1 node --experimental-vm-modules ../node_modules/.bin/jest --testTimeout=300000 --forceExit" } } diff --git a/yarn-project/end-to-end/scripts/anvil_kill_wrapper.sh b/yarn-project/end-to-end/scripts/anvil_kill_wrapper.sh index ddeea389ef2..8eec17e179f 100755 --- a/yarn-project/end-to-end/scripts/anvil_kill_wrapper.sh +++ b/yarn-project/end-to-end/scripts/anvil_kill_wrapper.sh @@ -4,7 +4,7 @@ PARENT_PID=$(awk '{print $4}' /proc/$$/stat) # Start anvil in the background. -../../foundry/bin/anvil $@ & +anvil $@ & CHILD_PID=$! cleanup() { diff --git a/yarn-project/end-to-end/src/benchmarks/bench_process_history.test.ts b/yarn-project/end-to-end/src/benchmarks/bench_process_history.test.ts index 22d11429873..b34053842f5 100644 --- a/yarn-project/end-to-end/src/benchmarks/bench_process_history.test.ts +++ b/yarn-project/end-to-end/src/benchmarks/bench_process_history.test.ts @@ -30,7 +30,7 @@ describe('benchmarks/process_history', () => { beforeEach(async () => { ({ context, contract, sequencer } = await benchmarkSetup({ maxTxsPerBlock: BLOCK_SIZE })); - }, 60_000); + }); it( `processes chain history of ${MAX_CHAIN_LENGTH} with ${BLOCK_SIZE}-tx blocks`, diff --git a/yarn-project/end-to-end/src/benchmarks/bench_publish_rollup.test.ts b/yarn-project/end-to-end/src/benchmarks/bench_publish_rollup.test.ts index 542bbfeeb5e..b3bb55d6489 100644 --- a/yarn-project/end-to-end/src/benchmarks/bench_publish_rollup.test.ts +++ b/yarn-project/end-to-end/src/benchmarks/bench_publish_rollup.test.ts @@ -14,7 +14,7 @@ describe('benchmarks/publish_rollup', () => { beforeEach(async () => { ({ context, contract, sequencer } = await benchmarkSetup({ maxTxsPerBlock: 1024 })); - }, 60_000); + }); it.each(BENCHMARK_BLOCK_SIZES)( `publishes a rollup with %d txs`, diff --git a/yarn-project/end-to-end/src/composed/docs_examples.test.ts b/yarn-project/end-to-end/src/composed/docs_examples.test.ts index 42aada0f881..e764a048fa6 100644 --- a/yarn-project/end-to-end/src/composed/docs_examples.test.ts +++ b/yarn-project/end-to-end/src/composed/docs_examples.test.ts @@ -47,5 +47,5 @@ describe('docs_examples', () => { const balance = await contract.methods.balance_of_public(wallet.getAddress()).simulate(); expect(balance).toEqual(1n); // docs:end:simulate_function - }, 120_000); + }); }); diff --git a/yarn-project/end-to-end/src/composed/e2e_persistence.test.ts b/yarn-project/end-to-end/src/composed/e2e_persistence.test.ts index 14fa82a73fe..656fd6fe367 100644 --- a/yarn-project/end-to-end/src/composed/e2e_persistence.test.ts +++ b/yarn-project/end-to-end/src/composed/e2e_persistence.test.ts @@ -85,7 +85,7 @@ describe('Aztec persistence', () => { await contract.methods.redeem_shield(ownerAddress.address, 1000n, secret).send().wait(); await initialContext.teardown(); - }, 100_000); + }); describe.each([ [ @@ -160,7 +160,7 @@ describe('Aztec persistence', () => { expect(ownerBalance).toEqual(initialOwnerBalance - 500n); expect(targetBalance).toEqual(500n); - }, 30_000); + }); }); describe.each([ @@ -278,7 +278,7 @@ describe('Aztec persistence', () => { // shut everything down await temporaryContext.teardown(); - }, 100_000); + }); let ownerWallet: AccountWallet; let contract: TokenContract; @@ -290,7 +290,7 @@ describe('Aztec persistence', () => { contract = await TokenContract.at(contractAddress, ownerWallet); await waitForAccountSynch(context.pxe, ownerAddress, { interval: 0.1, timeout: 5 }); - }, 5000); + }); afterEach(async () => { await context.teardown(); diff --git a/yarn-project/end-to-end/src/composed/e2e_sandbox_example.test.ts b/yarn-project/end-to-end/src/composed/e2e_sandbox_example.test.ts index 1ae3c9362f6..1bc0f9b3334 100644 --- a/yarn-project/end-to-end/src/composed/e2e_sandbox_example.test.ts +++ b/yarn-project/end-to-end/src/composed/e2e_sandbox_example.test.ts @@ -175,7 +175,7 @@ describe('e2e_sandbox_example', () => { expect(aliceBalance).toBe(initialSupply - transferQuantity); expect(bobBalance).toBe(transferQuantity + mintQuantity); - }, 120_000); + }); it('can create accounts on the sandbox', async () => { const logger = createDebugLogger('token'); @@ -231,5 +231,5 @@ describe('e2e_sandbox_example', () => { // check that alice and bob are in registeredAccounts expect(registeredAccounts.find(acc => acc.equals(alice))).toBeTruthy(); expect(registeredAccounts.find(acc => acc.equals(bob))).toBeTruthy(); - }, 60_000); + }); }); diff --git a/yarn-project/end-to-end/src/composed/integration_l1_publisher.test.ts b/yarn-project/end-to-end/src/composed/integration_l1_publisher.test.ts index 29432464114..8f3658a1907 100644 --- a/yarn-project/end-to-end/src/composed/integration_l1_publisher.test.ts +++ b/yarn-project/end-to-end/src/composed/integration_l1_publisher.test.ts @@ -159,7 +159,7 @@ describe('L1Publisher integration', () => { feeRecipient = config.feeRecipient || AztecAddress.random(); prevHeader = await builderDb.buildInitialHeader(false); - }, 100_000); + }); const makeEmptyProcessedTx = () => { const tx = makeEmptyProcessedTxFromHistoricalTreeRoots(prevHeader, new Fr(chainId), new Fr(config.version)); @@ -463,7 +463,7 @@ describe('L1Publisher integration', () => { // We wipe the messages from previous iteration nextL1ToL2Messages = []; } - }, 360_000); + }); it(`Build ${numberOfConsecutiveBlocks} blocks of 2 empty txs building on each other`, async () => { const archiveInRollup_ = await rollup.read.archive(); @@ -523,7 +523,7 @@ describe('L1Publisher integration', () => { }); expect(ethTx.input).toEqual(expectedData); } - }, 60_000); + }); }); /** diff --git a/yarn-project/end-to-end/src/e2e_2_pxes.test.ts b/yarn-project/end-to-end/src/e2e_2_pxes.test.ts index 95dcb88a2f4..4f7f20cbe87 100644 --- a/yarn-project/end-to-end/src/e2e_2_pxes.test.ts +++ b/yarn-project/end-to-end/src/e2e_2_pxes.test.ts @@ -45,7 +45,7 @@ describe('e2e_2_pxes', () => { wallets: [walletB], teardown: teardownB, } = await setupPXEService(1, aztecNode!, {}, undefined, true)); - }, 100_000); + }); afterEach(async () => { await teardownB(); @@ -163,7 +163,7 @@ describe('e2e_2_pxes', () => { ); await expectTokenBalance(walletB, tokenAddress, walletB.getAddress(), transferAmount1 - transferAmount2); await expectsNumOfEncryptedLogsInTheLastBlockToBe(aztecNode, 2); - }, 120_000); + }); const deployChildContractViaServerA = async () => { logger.info(`Deploying Child contract...`); diff --git a/yarn-project/end-to-end/src/e2e_account_contracts.test.ts b/yarn-project/end-to-end/src/e2e_account_contracts.test.ts index 93fee9d8589..e5f8f0743a5 100644 --- a/yarn-project/end-to-end/src/e2e_account_contracts.test.ts +++ b/yarn-project/end-to-end/src/e2e_account_contracts.test.ts @@ -40,21 +40,21 @@ function itShouldBehaveLikeAnAccountContract( wallet = await walletSetup(pxe, secretKey, getAccountContract(signingKey)); child = await ChildContract.deploy(wallet).send().deployed(); - }, 60_000); + }); afterEach(() => teardown()); it('calls a private function', async () => { logger.info('Calling private function...'); await child.methods.value(42).send().wait({ interval: 0.1 }); - }, 60_000); + }); it('calls a public function', async () => { logger.info('Calling public function...'); await child.methods.pub_inc_value(42).send().wait({ interval: 0.1 }); const storedValue = await pxe.getPublicStorageAt(child.address, new Fr(1)); expect(storedValue).toEqual(new Fr(42n)); - }, 60_000); + }); // TODO(#5830): re-enable this test it.skip('fails to call a function using an invalid signature', async () => { diff --git a/yarn-project/end-to-end/src/e2e_authwit.test.ts b/yarn-project/end-to-end/src/e2e_authwit.test.ts index b882374745c..42865d4793a 100644 --- a/yarn-project/end-to-end/src/e2e_authwit.test.ts +++ b/yarn-project/end-to-end/src/e2e_authwit.test.ts @@ -22,7 +22,7 @@ describe('e2e_authwit_tests', () => { const nodeInfo = await wallets[0].getNodeInfo(); chainId = new Fr(nodeInfo.chainId); version = new Fr(nodeInfo.protocolVersion); - }, 100_000); + }); describe('Private', () => { describe('arbitrary data', () => { diff --git a/yarn-project/end-to-end/src/e2e_avm_simulator.test.ts b/yarn-project/end-to-end/src/e2e_avm_simulator.test.ts index c4d153b3bed..12cd2508c2f 100644 --- a/yarn-project/end-to-end/src/e2e_avm_simulator.test.ts +++ b/yarn-project/end-to-end/src/e2e_avm_simulator.test.ts @@ -22,7 +22,7 @@ describe('e2e_avm_simulator', () => { beforeAll(async () => { ({ teardown, wallet } = await setup()); await publicDeployAccounts(wallet, [wallet]); - }, 100_000); + }); afterAll(() => teardown()); @@ -31,7 +31,7 @@ describe('e2e_avm_simulator', () => { beforeEach(async () => { avmContract = await AvmTestContract.deploy(wallet).send().deployed(); - }, 50_000); + }); describe('Gas metering', () => { it('Tracks L2 gas usage on simulation', async () => { @@ -93,7 +93,7 @@ describe('e2e_avm_simulator', () => { beforeEach(async () => { avmContract = await AvmAcvmInteropTestContract.deploy(wallet).send().deployed(); - }, 50_000); + }); it('Can execute ACVM function among AVM functions', async () => { expect(await avmContract.methods.constant_field_acvm().simulate()).toEqual(123456n); @@ -159,7 +159,7 @@ describe('e2e_avm_simulator', () => { beforeEach(async () => { avmContract = await AvmInitializerTestContract.deploy(wallet).send().deployed(); - }, 50_000); + }); describe('Storage', () => { it('Read immutable (initialized) storage (Field)', async () => { @@ -176,7 +176,7 @@ describe('e2e_avm_simulator', () => { beforeEach(async () => { avmContract = await AvmNestedCallsTestContract.deploy(wallet).send().deployed(); secondAvmContract = await AvmNestedCallsTestContract.deploy(wallet).send().deployed(); - }, 50_000); + }); it('Should NOT be able to emit the same unsiloed nullifier from the same contract', async () => { const nullifier = new Fr(1); diff --git a/yarn-project/end-to-end/src/e2e_blacklist_token_contract/burn.test.ts b/yarn-project/end-to-end/src/e2e_blacklist_token_contract/burn.test.ts index ffe828172f4..cb521a0baef 100644 --- a/yarn-project/end-to-end/src/e2e_blacklist_token_contract/burn.test.ts +++ b/yarn-project/end-to-end/src/e2e_blacklist_token_contract/burn.test.ts @@ -14,7 +14,7 @@ describe('e2e_blacklist_token_contract burn', () => { await t.setup(); // Have to destructure again to ensure we have latest refs. ({ asset, tokenSim, wallets, blacklisted } = t); - }, 200_000); + }); afterAll(async () => { await t.teardown(); diff --git a/yarn-project/end-to-end/src/e2e_blacklist_token_contract/minting.test.ts b/yarn-project/end-to-end/src/e2e_blacklist_token_contract/minting.test.ts index b003195c393..0bef2150bae 100644 --- a/yarn-project/end-to-end/src/e2e_blacklist_token_contract/minting.test.ts +++ b/yarn-project/end-to-end/src/e2e_blacklist_token_contract/minting.test.ts @@ -14,7 +14,7 @@ describe('e2e_blacklist_token_contract mint', () => { await t.setup(); // Have to destructure again to ensure we have latest refs. ({ asset, tokenSim, wallets, blacklisted } = t); - }, 200_000); + }); afterAll(async () => { await t.teardown(); diff --git a/yarn-project/end-to-end/src/e2e_blacklist_token_contract/shielding.test.ts b/yarn-project/end-to-end/src/e2e_blacklist_token_contract/shielding.test.ts index 0414e3c5742..4bffbc3a7ef 100644 --- a/yarn-project/end-to-end/src/e2e_blacklist_token_contract/shielding.test.ts +++ b/yarn-project/end-to-end/src/e2e_blacklist_token_contract/shielding.test.ts @@ -13,7 +13,7 @@ describe('e2e_blacklist_token_contract shield + redeem_shield', () => { await t.setup(); // Have to destructure again to ensure we have latest refs. ({ asset, tokenSim, wallets, blacklisted } = t); - }, 200_000); + }); afterAll(async () => { await t.teardown(); diff --git a/yarn-project/end-to-end/src/e2e_blacklist_token_contract/transfer_private.test.ts b/yarn-project/end-to-end/src/e2e_blacklist_token_contract/transfer_private.test.ts index 6ac2f3b6af1..ed78def1481 100644 --- a/yarn-project/end-to-end/src/e2e_blacklist_token_contract/transfer_private.test.ts +++ b/yarn-project/end-to-end/src/e2e_blacklist_token_contract/transfer_private.test.ts @@ -13,7 +13,7 @@ describe('e2e_blacklist_token_contract transfer private', () => { await t.setup(); // Have to destructure again to ensure we have latest refs. ({ asset, tokenSim, wallets, blacklisted } = t); - }, 200_000); + }); afterAll(async () => { await t.teardown(); diff --git a/yarn-project/end-to-end/src/e2e_blacklist_token_contract/transfer_public.test.ts b/yarn-project/end-to-end/src/e2e_blacklist_token_contract/transfer_public.test.ts index bc452f28f11..1459704e8aa 100644 --- a/yarn-project/end-to-end/src/e2e_blacklist_token_contract/transfer_public.test.ts +++ b/yarn-project/end-to-end/src/e2e_blacklist_token_contract/transfer_public.test.ts @@ -14,7 +14,7 @@ describe('e2e_blacklist_token_contract transfer public', () => { await t.setup(); // Have to destructure again to ensure we have latest refs. ({ asset, tokenSim, wallets, blacklisted } = t); - }, 200_000); + }); afterAll(async () => { await t.teardown(); diff --git a/yarn-project/end-to-end/src/e2e_blacklist_token_contract/unshielding.test.ts b/yarn-project/end-to-end/src/e2e_blacklist_token_contract/unshielding.test.ts index 4a26ad118b9..ba8f69e6f26 100644 --- a/yarn-project/end-to-end/src/e2e_blacklist_token_contract/unshielding.test.ts +++ b/yarn-project/end-to-end/src/e2e_blacklist_token_contract/unshielding.test.ts @@ -13,7 +13,7 @@ describe('e2e_blacklist_token_contract unshielding', () => { await t.setup(); // Have to destructure again to ensure we have latest refs. ({ asset, tokenSim, wallets, blacklisted } = t); - }, 200_000); + }); afterAll(async () => { await t.teardown(); diff --git a/yarn-project/end-to-end/src/e2e_block_building.test.ts b/yarn-project/end-to-end/src/e2e_block_building.test.ts index b2f77702822..31698d5516b 100644 --- a/yarn-project/end-to-end/src/e2e_block_building.test.ts +++ b/yarn-project/end-to-end/src/e2e_block_building.test.ts @@ -39,7 +39,7 @@ describe('e2e_block_building', () => { aztecNode, wallets: [owner, minter], } = await setup(2)); - }, 100_000); + }); afterEach(() => aztecNode.setConfig({ minTxsPerBlock: 1 })); afterAll(() => teardown()); @@ -75,7 +75,7 @@ describe('e2e_block_building', () => { const isContractDeployed = async (address: AztecAddress) => !!(await pxe.getContractInstance(address)); const areDeployed = await Promise.all(receipts.map(r => isContractDeployed(r.contract.address))); expect(areDeployed).toEqual(times(TX_COUNT, () => true)); - }, 60_000); + }); it.skip('can call public function from different tx in same block', async () => { // Ensure both txs will land on the same block @@ -108,7 +108,7 @@ describe('e2e_block_building', () => { ]); expect(deployTxReceipt.blockNumber).toEqual(callTxReceipt.blockNumber); - }, 60_000); + }); }); describe('double-spends', () => { @@ -119,7 +119,7 @@ describe('e2e_block_building', () => { ({ teardown, pxe, logger, wallet: owner } = await setup(1)); contract = await TestContract.deploy(owner).send().deployed(); logger.info(`Test contract deployed at ${contract.address}`); - }, 100_000); + }); afterAll(() => teardown()); @@ -133,7 +133,7 @@ describe('e2e_block_building', () => { } const [tx1, tx2] = calls.map(call => call.send()); await expectXorTx(tx1, tx2); - }, 30_000); + }); it('drops tx with public nullifier already emitted on the same block', async () => { const secret = Fr.random(); @@ -143,13 +143,13 @@ describe('e2e_block_building', () => { } const [tx1, tx2] = calls.map(call => call.send()); await expectXorTx(tx1, tx2); - }, 30_000); + }); it('drops tx with two equal nullifiers', async () => { const nullifier = Fr.random(); const calls = times(2, () => contract.methods.emit_nullifier(nullifier).request()); await expect(new BatchCall(owner, calls).send().wait()).rejects.toThrow(/dropped/); - }, 30_000); + }); it('drops tx with private nullifier already emitted from public on the same block', async () => { const secret = Fr.random(); @@ -166,7 +166,7 @@ describe('e2e_block_building', () => { } const [tx1, tx2] = calls.map(call => call.send()); await expectXorTx(tx1, tx2); - }, 30_000); + }); }); describe('across blocks', () => { diff --git a/yarn-project/end-to-end/src/e2e_card_game.test.ts b/yarn-project/end-to-end/src/e2e_card_game.test.ts index 6342654d9ed..56f7a547f41 100644 --- a/yarn-project/end-to-end/src/e2e_card_game.test.ts +++ b/yarn-project/end-to-end/src/e2e_card_game.test.ts @@ -123,7 +123,7 @@ describe('e2e_card_game', () => { [firstPlayer, secondPlayer, thirdPlayer] = wallets.map(a => a.getAddress()); masterNullifierSecretKeys = PLAYER_SECRET_KEYS.map(sk => deriveMasterNullifierSecretKey(sk)); - }, 100_000); + }); beforeEach(async () => { await deployContract(); @@ -148,7 +148,7 @@ describe('e2e_card_game', () => { const collection = await contract.methods.view_collection_cards(firstPlayer, 0).simulate({ from: firstPlayer }); const expected = getPackedCards(0, seed); expect(unwrapOptions(collection)).toMatchObject(expected); - }, 30_000); + }); describe('game join', () => { const seed = 27n; @@ -162,7 +162,7 @@ describe('e2e_card_game', () => { firstPlayerCollection = unwrapOptions( await contract.methods.view_collection_cards(firstPlayer, 0).simulate({ from: firstPlayer }), ); - }, 30_000); + }); it('should be able to join games', async () => { await contract.methods @@ -199,7 +199,7 @@ describe('e2e_card_game', () => { claimed: false, current_player: 0n, }); - }, 30_000); + }); it('should start games', async () => { const secondPlayerCollection = unwrapOptions( @@ -239,7 +239,7 @@ describe('e2e_card_game', () => { claimed: false, current_player: 0n, }); - }, 360_000); + }); }); describe('game play', () => { @@ -266,7 +266,7 @@ describe('e2e_card_game', () => { thirdPlayerCOllection = unwrapOptions( await contract.methods.view_collection_cards(thirdPlayer, 0).simulate({ from: thirdPlayer }), ); - }, 60_000); + }); async function joinGame(playerWallet: Wallet, cards: Card[], id = GAME_ID) { await contract.withWallet(playerWallet).methods.join_game(id, cards.map(cardToField)).send().wait(); @@ -343,6 +343,6 @@ describe('e2e_card_game', () => { ); expect(game.finished).toBe(true); - }, 360_000); + }); }); }); diff --git a/yarn-project/end-to-end/src/e2e_cheat_codes.test.ts b/yarn-project/end-to-end/src/e2e_cheat_codes.test.ts index 4fd2e8d6d6f..562a7fef0da 100644 --- a/yarn-project/end-to-end/src/e2e_cheat_codes.test.ts +++ b/yarn-project/end-to-end/src/e2e_cheat_codes.test.ts @@ -47,7 +47,7 @@ describe('e2e_cheat_codes', () => { admin = wallet.getCompleteAddress(); token = await TokenContract.deploy(wallet, admin, 'TokenName', 'TokenSymbol', 18).send().deployed(); - }, 100_000); + }); afterAll(() => teardown()); @@ -186,7 +186,7 @@ describe('e2e_cheat_codes', () => { .wait({ interval: 0.1 }); // block is published at t >= newTimestamp + 1. expect(Number(await rollup.read.lastBlockTs())).toBeGreaterThanOrEqual(newTimestamp + 1); - }, 50_000); + }); it('should throw if setting L2 block time to a past timestamp', async () => { const timestamp = await cc.eth.timestamp(); @@ -244,6 +244,6 @@ describe('e2e_cheat_codes', () => { const balance = values.reduce((sum, current) => sum + current.toBigInt(), 0n); expect(balance).toEqual(mintAmount); // docs:end:load_private_cheatcode - }, 50_000); + }); }); }); diff --git a/yarn-project/end-to-end/src/e2e_counter_contract.test.ts b/yarn-project/end-to-end/src/e2e_counter_contract.test.ts index ec7394bb434..da41aa985ca 100644 --- a/yarn-project/end-to-end/src/e2e_counter_contract.test.ts +++ b/yarn-project/end-to-end/src/e2e_counter_contract.test.ts @@ -19,7 +19,7 @@ describe('e2e_counter_contract', () => { counterContract = await CounterContract.deploy(wallet, 0, owner).send().deployed(); logger.info(`Counter contract deployed at ${counterContract.address}`); - }, 45_000); + }); afterAll(() => teardown()); diff --git a/yarn-project/end-to-end/src/e2e_cross_chain_messaging.test.ts b/yarn-project/end-to-end/src/e2e_cross_chain_messaging.test.ts index 862c4a1ac09..ce136e9113c 100644 --- a/yarn-project/end-to-end/src/e2e_cross_chain_messaging.test.ts +++ b/yarn-project/end-to-end/src/e2e_cross_chain_messaging.test.ts @@ -61,7 +61,7 @@ describe('e2e_cross_chain_messaging', () => { aztecNode = aztecNode_; teardown = teardown_; logger.info('Successfully deployed contracts and initialized portal'); - }, 100_000); + }); afterEach(async () => { await teardown(); @@ -132,7 +132,7 @@ describe('e2e_cross_chain_messaging', () => { siblingPath, ); expect(await crossChainTestHarness.getL1BalanceOf(ethAccount)).toBe(l1TokenBalance - bridgeAmount + withdrawAmount); - }, 120_000); + }); // docs:end:e2e_private_cross_chain // Unit tests for TokenBridge's private methods. @@ -191,7 +191,7 @@ describe('e2e_cross_chain_messaging', () => { ); await crossChainTestHarness.redeemShieldPrivatelyOnL2(bridgeAmount, secretForRedeemingMintedNotes); await crossChainTestHarness.expectPrivateBalanceOnL2(ownerAddress, bridgeAmount); - }, 120_000); + }); it("Bridge can't withdraw my funds if I don't give approval", async () => { const mintAmountToUser1 = 100n; @@ -212,7 +212,7 @@ describe('e2e_cross_chain_messaging', () => { .methods.exit_to_l1_private(l2Token.address, ethAccount, withdrawAmount, EthAddress.ZERO, nonce) .prove(), ).rejects.toThrow(`Unknown auth witness for message hash ${expectedBurnMessageHash.toString()}`); - }, 120_000); + }); it("Can't claim funds publicly if they were deposited privately", async () => { // 1. Mint tokens on L1 @@ -257,5 +257,5 @@ describe('e2e_cross_chain_messaging', () => { .methods.claim_public(ownerAddress, bridgeAmount, secretForL2MessageConsumption, messageLeafIndex) .prove(), ).rejects.toThrow(`No non-nullified L1 to L2 message found for message hash ${wrongMessage.hash().toString()}`); - }, 120_000); + }); }); diff --git a/yarn-project/end-to-end/src/e2e_dapp_subscription.test.ts b/yarn-project/end-to-end/src/e2e_dapp_subscription.test.ts index d496bfb8716..109c89c181d 100644 --- a/yarn-project/end-to-end/src/e2e_dapp_subscription.test.ts +++ b/yarn-project/end-to-end/src/e2e_dapp_subscription.test.ts @@ -121,7 +121,7 @@ describe('e2e_dapp_subscription', () => { [aliceAddress, sequencerAddress, subscriptionContract.address, bananaFPC.address], [0n, 0n, INITIAL_GAS_BALANCE, INITIAL_GAS_BALANCE], ); - }, 180_000); + }); it('should allow Alice to subscribe by paying privately with bananas', async () => { /** diff --git a/yarn-project/end-to-end/src/e2e_delegate_calls/delegate.test.ts b/yarn-project/end-to-end/src/e2e_delegate_calls/delegate.test.ts index 03246dcff0f..9f6890efb00 100644 --- a/yarn-project/end-to-end/src/e2e_delegate_calls/delegate.test.ts +++ b/yarn-project/end-to-end/src/e2e_delegate_calls/delegate.test.ts @@ -33,7 +33,7 @@ describe('e2e_delegate_calls', () => { expect(delegatedOnValue).toEqual(0n); expect(delegatorValue).toEqual(sentValue); - }, 100_000); + }); it("runs another contract's enqueued public function on delegator's storage", async () => { const sentValue = 42n; @@ -44,7 +44,7 @@ describe('e2e_delegate_calls', () => { expect(delegatedOnValue).toEqual(0n); expect(delegatorValue).toEqual(sentValue); - }, 100_000); + }); it("runs another contract's public function on delegator's storage", async () => { const sentValue = 42n; @@ -55,6 +55,6 @@ describe('e2e_delegate_calls', () => { expect(delegatedOnValue).toEqual(0n); expect(delegatorValue).toEqual(sentValue); - }, 100_000); + }); }); }); diff --git a/yarn-project/end-to-end/src/e2e_deploy_contract/contract_class_registration.test.ts b/yarn-project/end-to-end/src/e2e_deploy_contract/contract_class_registration.test.ts index 5c6d418a535..aa92b4d2b56 100644 --- a/yarn-project/end-to-end/src/e2e_deploy_contract/contract_class_registration.test.ts +++ b/yarn-project/end-to-end/src/e2e_deploy_contract/contract_class_registration.test.ts @@ -47,7 +47,7 @@ describe('e2e_deploy_contract contract class registration', () => { artifact = StatefulTestContract.artifact; await registerContractClass(wallet, artifact).then(c => c.send().wait()); contractClass = getContractClassFromArtifact(artifact); - }, 60_000); + }); it('registers the contract class on the node', async () => { const registeredClass = await aztecNode.getContractClass(contractClass.id); @@ -70,7 +70,7 @@ describe('e2e_deploy_contract contract class registration', () => { const fetchedFunction = fetchedClass!.privateFunctions[0]!; expect(fetchedFunction).toBeDefined(); expect(fetchedFunction.selector).toEqual(selector); - }, 60_000); + }); it('broadcasts an unconstrained function', async () => { const functionArtifact = artifact.functions.find(fn => fn.functionType === FunctionType.UNCONSTRAINED)!; @@ -84,7 +84,7 @@ describe('e2e_deploy_contract contract class registration', () => { const fetchedFunction = fetchedClass!.unconstrainedFunctions[0]!; expect(fetchedFunction).toBeDefined(); expect(fetchedFunction.selector).toEqual(selector); - }, 60_000); + }); const testDeployingAnInstance = (how: string, deployFn: (toDeploy: ContractInstanceWithAddress) => Promise) => describe(`deploying a contract instance ${how}`, () => { @@ -131,7 +131,7 @@ describe('e2e_deploy_contract contract class registration', () => { describe('using a private constructor', () => { beforeAll(async () => { ({ instance, initArgs, contract } = await deployInstance()); - }, 60_000); + }); it('stores contract instance in the aztec node', async () => { const deployed = await aztecNode.getContract(instance.address); @@ -152,7 +152,7 @@ describe('e2e_deploy_contract contract class registration', () => { .wait(); const stored = await contract.methods.get_public_value(whom).simulate(); expect(stored).toEqual(10n); - }, 30_000); + }); it('refuses to call a public function with init check if the instance is not initialized', async () => { const whom = AztecAddress.random(); @@ -164,13 +164,13 @@ describe('e2e_deploy_contract contract class registration', () => { // Meanwhile we check we didn't increment the value expect(await contract.methods.get_public_value(whom).simulate()).toEqual(0n); - }, 30_000); + }); it('refuses to initialize the instance with wrong args via a private function', async () => { await expect(contract.methods.constructor(AztecAddress.random(), 43).prove()).rejects.toThrow( /initialization hash does not match/i, ); - }, 30_000); + }); it('initializes the contract and calls a public function', async () => { await contract.methods @@ -181,7 +181,7 @@ describe('e2e_deploy_contract contract class registration', () => { await contract.methods.increment_public_value(whom, 10).send({ skipPublicSimulation: true }).wait(); const stored = await contract.methods.get_public_value(whom).simulate(); expect(stored).toEqual(10n); - }, 30_000); + }); it('refuses to reinitialize the contract', async () => { await expect( @@ -190,13 +190,13 @@ describe('e2e_deploy_contract contract class registration', () => { .send({ skipPublicSimulation: true }) .wait(), ).rejects.toThrow(/dropped/i); - }, 30_000); + }); }); describe('using a public constructor', () => { beforeAll(async () => { ({ instance, initArgs, contract } = await deployInstance({ constructorName: 'public_constructor' })); - }, 60_000); + }); it('refuses to initialize the instance with wrong args via a public function', async () => { const whom = AztecAddress.random(); @@ -206,7 +206,7 @@ describe('e2e_deploy_contract contract class registration', () => { .wait({ dontThrowOnRevert: true }); expect(receipt.status).toEqual(TxStatus.REVERTED); expect(await contract.methods.get_public_value(whom).simulate()).toEqual(0n); - }, 30_000); + }); it('initializes the contract and calls a public function', async () => { await contract.methods @@ -217,7 +217,7 @@ describe('e2e_deploy_contract contract class registration', () => { await contract.methods.increment_public_value(whom, 10).send({ skipPublicSimulation: true }).wait(); const stored = await contract.methods.get_public_value(whom).simulate(); expect(stored).toEqual(10n); - }, 30_000); + }); it('refuses to reinitialize the contract', async () => { await expect( @@ -226,7 +226,7 @@ describe('e2e_deploy_contract contract class registration', () => { .send({ skipPublicSimulation: true }) .wait(), ).rejects.toThrow(/dropped/i); - }, 30_000); + }); }); }); diff --git a/yarn-project/end-to-end/src/e2e_deploy_contract/deploy_method.test.ts b/yarn-project/end-to-end/src/e2e_deploy_contract/deploy_method.test.ts index f791681b1cb..a881d5a277b 100644 --- a/yarn-project/end-to-end/src/e2e_deploy_contract/deploy_method.test.ts +++ b/yarn-project/end-to-end/src/e2e_deploy_contract/deploy_method.test.ts @@ -26,7 +26,7 @@ describe('e2e_deploy_contract deploy method', () => { logger.debug(`Calling public method on stateful test contract at ${contract.address.toString()}`); await contract.methods.increment_public_value(owner, 84).send().wait(); expect(await contract.methods.get_public_value(owner).simulate()).toEqual(84n); - }, 60_000); + }); it('publicly universally deploys and initializes a contract', async () => { const owner = wallet.getAddress(); @@ -35,13 +35,13 @@ describe('e2e_deploy_contract deploy method', () => { expect(await contract.methods.summed_values(owner).simulate()).toEqual(42n); await contract.methods.increment_public_value(owner, 84).send().wait(); expect(await contract.methods.get_public_value(owner).simulate()).toEqual(84n); - }, 60_000); + }); it('publicly deploys and calls a public function from the constructor', async () => { const owner = wallet.getAddress(); const token = await TokenContract.deploy(wallet, owner, 'TOKEN', 'TKN', 18).send().deployed(); expect(await token.methods.is_minter(owner).simulate()).toEqual(true); - }, 60_000); + }); it('publicly deploys and initializes via a public function', async () => { const owner = wallet.getAddress(); @@ -53,7 +53,7 @@ describe('e2e_deploy_contract deploy method', () => { logger.debug(`Calling a private function to ensure the contract was properly initialized`); await contract.methods.create_note(owner, 30).send().wait(); expect(await contract.methods.summed_values(owner).simulate()).toEqual(30n); - }, 60_000); + }); it('deploys a contract with a default initializer not named constructor', async () => { logger.debug(`Deploying contract with a default initializer named initialize`); diff --git a/yarn-project/end-to-end/src/e2e_deploy_contract/legacy.test.ts b/yarn-project/end-to-end/src/e2e_deploy_contract/legacy.test.ts index 8dda2e91815..403da38154c 100644 --- a/yarn-project/end-to-end/src/e2e_deploy_contract/legacy.test.ts +++ b/yarn-project/end-to-end/src/e2e_deploy_contract/legacy.test.ts @@ -44,7 +44,7 @@ describe('e2e_deploy_contract legacy', () => { expect(receipt.contract.address).toEqual(deploymentData.address); expect(await pxe.getContractInstance(deploymentData.address)).toBeDefined(); expect(await pxe.isContractPubliclyDeployed(deploymentData.address)).toBeDefined(); - }, 60_000); + }); /** * Verify that we can produce multiple rollups. @@ -56,7 +56,7 @@ describe('e2e_deploy_contract legacy', () => { logger.info(`Deploying contract ${index + 1}...`); await deployer.deploy().send({ contractAddressSalt: Fr.random() }).wait({ wallet }); } - }, 60_000); + }); /** * Verify that we can deploy multiple contracts and interact with all of them. @@ -70,7 +70,7 @@ describe('e2e_deploy_contract legacy', () => { logger.info(`Sending TX to contract ${index + 1}...`); await receipt.contract.methods.get_public_key(wallet.getAddress()).send().wait(); } - }, 90_000); + }); /** * Milestone 1.2. @@ -82,7 +82,7 @@ describe('e2e_deploy_contract legacy', () => { await deployer.deploy().send({ contractAddressSalt }).wait({ wallet }); await expect(deployer.deploy().send({ contractAddressSalt }).wait()).rejects.toThrow(/dropped/); - }, 60_000); + }); it('should not deploy a contract which failed the public part of the execution', async () => { // This test requires at least another good transaction to go through in the same block as the bad one. @@ -114,5 +114,5 @@ describe('e2e_deploy_contract legacy', () => { // But the bad tx did not deploy await expect(pxe.isContractClassPubliclyRegistered(badDeploy.getInstance().address)).resolves.toBeFalsy(); - }, 90_000); + }); }); diff --git a/yarn-project/end-to-end/src/e2e_deploy_contract/private_initialization.test.ts b/yarn-project/end-to-end/src/e2e_deploy_contract/private_initialization.test.ts index 7ea721c21dd..6567c8e5cc0 100644 --- a/yarn-project/end-to-end/src/e2e_deploy_contract/private_initialization.test.ts +++ b/yarn-project/end-to-end/src/e2e_deploy_contract/private_initialization.test.ts @@ -65,7 +65,7 @@ describe('e2e_deploy_contract private initialization', () => { await new BatchCall(wallet, calls).send().wait(); expect(await contracts[0].methods.summed_values(owner).simulate()).toEqual(42n); expect(await contracts[1].methods.summed_values(owner).simulate()).toEqual(52n); - }, 30_000); + }); // TODO(@spalladino): This won't work until we can read a nullifier in the same tx in which it was emitted. it.skip('initializes and calls a private function in a single tx', async () => { diff --git a/yarn-project/end-to-end/src/e2e_encryption.test.ts b/yarn-project/end-to-end/src/e2e_encryption.test.ts index 7734c66ee46..a8d4e7b34be 100644 --- a/yarn-project/end-to-end/src/e2e_encryption.test.ts +++ b/yarn-project/end-to-end/src/e2e_encryption.test.ts @@ -17,7 +17,7 @@ describe('e2e_encryption', () => { beforeAll(async () => { ({ teardown, wallet } = await setup()); contract = await TestContract.deploy(wallet).send().deployed(); - }, 45_000); + }); afterAll(() => teardown()); diff --git a/yarn-project/end-to-end/src/e2e_escrow_contract.test.ts b/yarn-project/end-to-end/src/e2e_escrow_contract.test.ts index 12017b84bb8..8d0639c2346 100644 --- a/yarn-project/end-to-end/src/e2e_escrow_contract.test.ts +++ b/yarn-project/end-to-end/src/e2e_escrow_contract.test.ts @@ -77,7 +77,7 @@ describe('e2e_escrow_contract', () => { await token.methods.redeem_shield(escrowContract.address, mintAmount, secret).send().wait(); logger.info(`Token contract deployed at ${token.address}`); - }, 100_000); + }); afterEach(() => teardown(), 30_000); @@ -98,13 +98,13 @@ describe('e2e_escrow_contract', () => { await expectBalance(owner, 0n); await expectBalance(recipient, 30n); await expectBalance(escrowContract.address, 70n); - }, 60_000); + }); it('refuses to withdraw funds as a non-owner', async () => { await expect( escrowContract.withWallet(recipientWallet).methods.withdraw(token.address, 30, recipient).prove(), ).rejects.toThrow(); - }, 60_000); + }); it('moves funds using multiple keys on the same tx (#1010)', async () => { logger.info(`Minting funds in token contract to ${owner}`); @@ -136,5 +136,5 @@ describe('e2e_escrow_contract', () => { await new BatchCall(wallet, actions).send().wait(); await expectBalance(recipient, 30n); - }, 120_000); + }); }); diff --git a/yarn-project/end-to-end/src/e2e_key_registry.test.ts b/yarn-project/end-to-end/src/e2e_key_registry.test.ts index b3dccd1b34b..97157db0657 100644 --- a/yarn-project/end-to-end/src/e2e_key_registry.test.ts +++ b/yarn-project/end-to-end/src/e2e_key_registry.test.ts @@ -28,7 +28,7 @@ describe('SharedMutablePrivateGetter', () => { testContract = await TestContract.deploy(wallets[0]).send().deployed(); await publicDeployAccounts(wallets[0], wallets.slice(0, 2)); - }, 120_000); + }); const delay = async (blocks: number) => { for (let i = 0; i < blocks; i++) { diff --git a/yarn-project/end-to-end/src/e2e_lending_contract.test.ts b/yarn-project/end-to-end/src/e2e_lending_contract.test.ts index 2dd4614f80e..62acc1ab9c6 100644 --- a/yarn-project/end-to-end/src/e2e_lending_contract.test.ts +++ b/yarn-project/end-to-end/src/e2e_lending_contract.test.ts @@ -77,7 +77,7 @@ describe('e2e_lending_contract', () => { new TokenSimulator(collateralAsset, logger, [lendingContract.address, wallet.getAddress()]), new TokenSimulator(stableCoin, logger, [lendingContract.address, wallet.getAddress()]), ); - }, 200_000); + }); afterAll(() => teardown()); diff --git a/yarn-project/end-to-end/src/e2e_max_block_number.test.ts b/yarn-project/end-to-end/src/e2e_max_block_number.test.ts index 56cc4e4f10e..16758ffc7ea 100644 --- a/yarn-project/end-to-end/src/e2e_max_block_number.test.ts +++ b/yarn-project/end-to-end/src/e2e_max_block_number.test.ts @@ -13,7 +13,7 @@ describe('e2e_max_block_number', () => { beforeAll(async () => { ({ teardown, wallet, pxe } = await setup()); contract = await TestContract.deploy(wallet).send().deployed(); - }, 45_000); + }); afterAll(() => teardown()); diff --git a/yarn-project/end-to-end/src/e2e_multiple_accounts_1_enc_key.test.ts b/yarn-project/end-to-end/src/e2e_multiple_accounts_1_enc_key.test.ts index cf239abd44f..a8b0ed53439 100644 --- a/yarn-project/end-to-end/src/e2e_multiple_accounts_1_enc_key.test.ts +++ b/yarn-project/end-to-end/src/e2e_multiple_accounts_1_enc_key.test.ts @@ -75,7 +75,7 @@ describe('e2e_multiple_accounts_1_enc_key', () => { await pxe.addNote(extendedNote); await token.methods.redeem_shield(accounts[0], initialBalance, secret).send().wait(); - }, 100_000); + }); afterEach(() => teardown()); @@ -142,5 +142,5 @@ describe('e2e_multiple_accounts_1_enc_key', () => { expectedBalancesAfterTransfer2[2] + transferAmount3, ]; await transfer(1, 2, transferAmount3, expectedBalancesAfterTransfer3); - }, 180_000); + }); }); diff --git a/yarn-project/end-to-end/src/e2e_nested_contract.test.ts b/yarn-project/end-to-end/src/e2e_nested_contract.test.ts index 661978ce75c..9e878db7895 100644 --- a/yarn-project/end-to-end/src/e2e_nested_contract.test.ts +++ b/yarn-project/end-to-end/src/e2e_nested_contract.test.ts @@ -11,7 +11,7 @@ describe('e2e_nested_contract', () => { beforeEach(async () => { ({ teardown, pxe, wallet, logger } = await setup()); - }, 100_000); + }); afterEach(() => teardown()); @@ -22,7 +22,7 @@ describe('e2e_nested_contract', () => { beforeEach(async () => { parentContract = await ParentContract.deploy(wallet).send().deployed(); childContract = await ChildContract.deploy(wallet).send().deployed(); - }, 100_000); + }); const getChildStoredValue = (child: { address: AztecAddress }) => pxe.getPublicStorageAt(child.address, new Fr(1)); @@ -31,7 +31,7 @@ describe('e2e_nested_contract', () => { .entry_point(childContract.address, childContract.methods.value.selector) .send() .wait(); - }, 100_000); + }); it('fails simulation if calling a function not allowed to be called externally', async () => { await expect( @@ -39,14 +39,14 @@ describe('e2e_nested_contract', () => { .entry_point(childContract.address, (childContract.methods as any).value_internal.selector) .prove(), ).rejects.toThrow(/Assertion failed: Function value_internal can only be called internally/); - }, 100_000); + }); it('performs public nested calls', async () => { await parentContract.methods .pub_entry_point(childContract.address, childContract.methods.pub_get_value.selector, 42n) .send() .wait(); - }, 100_000); + }); it('enqueues a single public call', async () => { await parentContract.methods @@ -54,7 +54,7 @@ describe('e2e_nested_contract', () => { .send() .wait(); expect(await getChildStoredValue(childContract)).toEqual(new Fr(42n)); - }, 100_000); + }); it('fails simulation if calling a public function not allowed to be called externally', async () => { await expect( @@ -66,7 +66,7 @@ describe('e2e_nested_contract', () => { ) .prove(), ).rejects.toThrow(/Assertion failed: Function pub_inc_value_internal can only be called internally/); - }, 100_000); + }); it('enqueues multiple public calls', async () => { await parentContract.methods @@ -74,7 +74,7 @@ describe('e2e_nested_contract', () => { .send() .wait(); expect(await getChildStoredValue(childContract)).toEqual(new Fr(85n)); - }, 100_000); + }); it('enqueues a public call with nested public calls', async () => { await parentContract.methods @@ -82,7 +82,7 @@ describe('e2e_nested_contract', () => { .send() .wait(); expect(await getChildStoredValue(childContract)).toEqual(new Fr(42n)); - }, 100_000); + }); it('enqueues multiple public calls with nested public calls', async () => { await parentContract.methods @@ -90,7 +90,7 @@ describe('e2e_nested_contract', () => { .send() .wait(); expect(await getChildStoredValue(childContract)).toEqual(new Fr(85n)); - }, 100_000); + }); // Regression for https://github.com/AztecProtocol/aztec-packages/issues/640 it('reads fresh value after write within the same tx', async () => { @@ -99,7 +99,7 @@ describe('e2e_nested_contract', () => { .send() .wait(); expect(await getChildStoredValue(childContract)).toEqual(new Fr(84n)); - }, 100_000); + }); // Regression for https://github.com/AztecProtocol/aztec-packages/issues/1645 // Executes a public call first and then a private call (which enqueues another public call) @@ -121,7 +121,7 @@ describe('e2e_nested_contract', () => { const processedLogs = extendedLogs.map(extendedLog => toBigIntBE(extendedLog.log.data)); expect(processedLogs).toEqual([20n, 40n]); expect(await getChildStoredValue(childContract)).toEqual(new Fr(40n)); - }, 100_000); + }); }); describe('importer uses autogenerated test contract interface', () => { @@ -133,26 +133,26 @@ describe('e2e_nested_contract', () => { importerContract = await ImportTestContract.deploy(wallet).send().deployed(); logger.info(`Deploying test contract`); testContract = await TestContract.deploy(wallet).send().deployed(); - }, 30_000); + }); it('calls a method with multiple arguments', async () => { logger.info(`Calling main on importer contract`); await importerContract.methods.main_contract(testContract.address).send().wait(); - }, 30_000); + }); it('calls a method no arguments', async () => { logger.info(`Calling noargs on importer contract`); await importerContract.methods.call_no_args(testContract.address).send().wait(); - }, 30_000); + }); it('calls an open function', async () => { logger.info(`Calling openfn on importer contract`); await importerContract.methods.call_open_fn(testContract.address).send().wait(); - }, 30_000); + }); it('calls an open function from an open function', async () => { logger.info(`Calling pub openfn on importer contract`); await importerContract.methods.pub_call_open_fn(testContract.address).send().wait(); - }, 30_000); + }); }); }); diff --git a/yarn-project/end-to-end/src/e2e_non_contract_account.test.ts b/yarn-project/end-to-end/src/e2e_non_contract_account.test.ts index 8b08c8fa4fc..878112b259a 100644 --- a/yarn-project/end-to-end/src/e2e_non_contract_account.test.ts +++ b/yarn-project/end-to-end/src/e2e_non_contract_account.test.ts @@ -30,7 +30,7 @@ describe('e2e_non_contract_account', () => { logger.debug(`Deploying L2 contract...`); contract = await TestContract.deploy(wallet).send().deployed(); logger.info(`L2 contract deployed at ${contract.address}`); - }, 100_000); + }); afterEach(() => teardown()); @@ -48,7 +48,7 @@ describe('e2e_non_contract_account', () => { const siloedNullifier = debugInfo!.nullifiers[1]; expect(siloedNullifier.equals(expectedSiloedNullifier)).toBeTruthy(); - }, 120_000); + }); it('msg.sender is 0 when a non-contract account calls a private function on a contract', async () => { const contractWithNoContractWallet = await TestContract.at(contract.address, nonContractAccountWallet); @@ -62,7 +62,7 @@ describe('e2e_non_contract_account', () => { const msgSender = toBigInt(logs[0].log.data); expect(msgSender).toBe(0n); - }, 120_000); + }); // Note: This test doesn't really belong here as it doesn't have anything to do with non-contract accounts. I needed // to test the TestNote functionality and it doesn't really fit anywhere else. Creating a separate e2e test for this diff --git a/yarn-project/end-to-end/src/e2e_note_getter.test.ts b/yarn-project/end-to-end/src/e2e_note_getter.test.ts index c7deb286a92..b17cb3ed286 100644 --- a/yarn-project/end-to-end/src/e2e_note_getter.test.ts +++ b/yarn-project/end-to-end/src/e2e_note_getter.test.ts @@ -21,7 +21,7 @@ describe('e2e_note_getter', () => { beforeAll(async () => { ({ teardown, wallet } = await setup()); - }, 25_000); + }); afterAll(() => teardown()); @@ -32,7 +32,7 @@ describe('e2e_note_getter', () => { contract = await DocsExampleContract.deploy(wallet).send().deployed(); // sets card value to 1 and leader to sender. await contract.methods.initialize_private(Fr.random(), 1).send().wait(); - }, 25_000); + }); it('inserts notes from 0-9, then makes multiple queries specifying the total suite of comparators', async () => { // ISSUE #4243 @@ -145,7 +145,7 @@ describe('e2e_note_getter', () => { { points: 8n, randomness: 1n }, ].sort(sortFunc), ); - }, 300_000); + }); }); describe('status filter', () => { @@ -155,7 +155,7 @@ describe('e2e_note_getter', () => { beforeAll(async () => { contract = await TestContract.deploy(wallet).send().deployed(); owner = wallet.getCompleteAddress().address; - }, 100_000); + }); const VALUE = 5; @@ -189,14 +189,14 @@ describe('e2e_note_getter', () => { it('returns active notes', async () => { await contract.methods.call_create_note(VALUE, owner, storageSlot).send().wait(); await assertNoteIsReturned(storageSlot, VALUE, activeOrNullified); - }, 30_000); + }); it('does not return nullified notes', async () => { await contract.methods.call_create_note(VALUE, owner, storageSlot).send().wait(); await contract.methods.call_destroy_note(storageSlot).send().wait(); await assertNoReturnValue(storageSlot, activeOrNullified); - }, 30_000); + }); }); describe('active and nullified notes', () => { @@ -205,14 +205,14 @@ describe('e2e_note_getter', () => { it('returns active notes', async () => { await contract.methods.call_create_note(VALUE, owner, storageSlot).send().wait(); await assertNoteIsReturned(storageSlot, VALUE, activeOrNullified); - }, 30_000); + }); it('returns nullified notes', async () => { await contract.methods.call_create_note(VALUE, owner, storageSlot).send().wait(); await contract.methods.call_destroy_note(storageSlot).send().wait(); await assertNoteIsReturned(storageSlot, VALUE, activeOrNullified); - }, 30_000); + }); it('returns both active and nullified notes', async () => { // We store two notes with two different values in the same storage slot, and then delete one of them. Note that @@ -236,7 +236,7 @@ describe('e2e_note_getter', () => { // however that both view_notes and get_notes get the exact same result. expect(viewNotesManyResult).toEqual(getNotesManyResult); expect(viewNotesManyResult.sort()).toEqual([BigInt(VALUE), BigInt(VALUE + 1)]); - }, 45_000); + }); }); }); }); diff --git a/yarn-project/end-to-end/src/e2e_ordering.test.ts b/yarn-project/end-to-end/src/e2e_ordering.test.ts index 16ce4309f7d..66a37afe6e2 100644 --- a/yarn-project/end-to-end/src/e2e_ordering.test.ts +++ b/yarn-project/end-to-end/src/e2e_ordering.test.ts @@ -29,7 +29,7 @@ describe('e2e_ordering', () => { beforeEach(async () => { ({ teardown, pxe, wallet } = await setup()); - }, 100_000); + }); afterEach(() => teardown()); diff --git a/yarn-project/end-to-end/src/e2e_outbox.test.ts b/yarn-project/end-to-end/src/e2e_outbox.test.ts index 9d613efa09f..4a8d8ede821 100644 --- a/yarn-project/end-to-end/src/e2e_outbox.test.ts +++ b/yarn-project/end-to-end/src/e2e_outbox.test.ts @@ -32,7 +32,7 @@ describe('E2E Outbox Tests', () => { const receipt = await TestContract.deploy(wallets[0]).send({ contractAddressSalt: Fr.ZERO }).wait(); contract = receipt.contract; - }, 100_000); + }); afterAll(() => teardown()); @@ -84,7 +84,7 @@ describe('E2E Outbox Tests', () => { expect(index2).toBe(1n); const expectedRoot2 = calculateExpectedRoot(l2ToL1Messages![1], siblingPath2 as SiblingPath<2>, index2); expect(expectedRoot2.toString('hex')).toEqual(block?.header.contentCommitment.outHash.toString('hex')); - }, 360_000); + }); function calculateExpectedRoot(l2ToL1Message: Fr, siblingPath: SiblingPath<2>, index: bigint): Buffer { const firstLayerInput: [Buffer, Buffer] = diff --git a/yarn-project/end-to-end/src/e2e_pending_note_hashes_contract.test.ts b/yarn-project/end-to-end/src/e2e_pending_note_hashes_contract.test.ts index 47cbd7202ed..405fab1fab7 100644 --- a/yarn-project/end-to-end/src/e2e_pending_note_hashes_contract.test.ts +++ b/yarn-project/end-to-end/src/e2e_pending_note_hashes_contract.test.ts @@ -14,7 +14,7 @@ describe('e2e_pending_note_hashes_contract', () => { beforeEach(async () => { ({ teardown, aztecNode, wallet, logger } = await setup(2)); owner = wallet.getAddress(); - }, 100_000); + }); afterEach(() => teardown()); @@ -63,7 +63,7 @@ describe('e2e_pending_note_hashes_contract', () => { const deployedContract = await deployContract(); await deployedContract.methods.test_insert_then_get_then_nullify_flat(mintAmount, owner).send().wait(); - }, 60_000); + }); it('Squash! Aztec.nr function can "create" and "nullify" note in the same TX', async () => { // Kernel will squash the noteHash and its nullifier. @@ -87,7 +87,7 @@ describe('e2e_pending_note_hashes_contract', () => { await expectNoteHashesSquashedExcept(0); await expectNullifiersSquashedExcept(0); - }, 60_000); + }); it('Squash! Aztec.nr function can "create" 2 notes and "nullify" both in the same TX', async () => { // Kernel will squash both noteHashes and their nullifier. @@ -108,7 +108,7 @@ describe('e2e_pending_note_hashes_contract', () => { await expectNoteHashesSquashedExcept(0); await expectNullifiersSquashedExcept(0); - }, 60_000); + }); it('Squash! Aztec.nr function can "create" 2 notes and "nullify" 1 in the same TX (kernel will squash one note + nullifier)', async () => { // Kernel will squash one noteHash and its nullifier. @@ -130,7 +130,7 @@ describe('e2e_pending_note_hashes_contract', () => { await expectNoteHashesSquashedExcept(1); await expectNullifiersSquashedExcept(0); - }, 60_000); + }); it('Squash! Aztec.nr function can nullify a pending note and a persistent in the same TX', async () => { // Create 1 note in isolated TX. @@ -167,7 +167,7 @@ describe('e2e_pending_note_hashes_contract', () => { // the nullifier corresponding to this transient note is squashed, but the // other nullifier corresponding to the persistent note becomes persistent itself. await expectNullifiersSquashedExcept(1); - }, 60_000); + }); it('get_notes function filters a nullified note created in a previous transaction', async () => { // Create a note in an isolated transaction. @@ -195,5 +195,5 @@ describe('e2e_pending_note_hashes_contract', () => { // There is a single new nullifier. await expectNullifiersSquashedExcept(1); - }, 60_000); + }); }); diff --git a/yarn-project/end-to-end/src/e2e_private_voting_contract.test.ts b/yarn-project/end-to-end/src/e2e_private_voting_contract.test.ts index 15f537643a1..aeda6f45ee8 100644 --- a/yarn-project/end-to-end/src/e2e_private_voting_contract.test.ts +++ b/yarn-project/end-to-end/src/e2e_private_voting_contract.test.ts @@ -20,7 +20,7 @@ describe('e2e_voting_contract', () => { votingContract = await EasyPrivateVotingContract.deploy(wallet, owner).send().deployed(); logger.info(`Counter contract deployed at ${votingContract.address}`); - }, 45_000); + }); afterAll(() => teardown()); diff --git a/yarn-project/end-to-end/src/e2e_public_to_private_messaging.test.ts b/yarn-project/end-to-end/src/e2e_public_to_private_messaging.test.ts index 51077dc9de8..6b7ac9451ae 100644 --- a/yarn-project/end-to-end/src/e2e_public_to_private_messaging.test.ts +++ b/yarn-project/end-to-end/src/e2e_public_to_private_messaging.test.ts @@ -39,7 +39,7 @@ describe('e2e_public_to_private_messaging', () => { teardown = teardown_; logger = logger_; logger.info('Successfully deployed contracts and initialized portal'); - }, 100_000); + }); afterEach(async () => { await teardown(); @@ -79,5 +79,5 @@ describe('e2e_public_to_private_messaging', () => { await crossChainTestHarness.unshieldTokensOnL2(shieldAmount); await crossChainTestHarness.expectPublicBalanceOnL2(ownerAddress, bridgeAmount); await crossChainTestHarness.expectPrivateBalanceOnL2(ownerAddress, 0n); - }, 200_000); + }); }); diff --git a/yarn-project/end-to-end/src/e2e_state_vars.test.ts b/yarn-project/end-to-end/src/e2e_state_vars.test.ts index 8d63cafea4f..ae6a7281aee 100644 --- a/yarn-project/end-to-end/src/e2e_state_vars.test.ts +++ b/yarn-project/end-to-end/src/e2e_state_vars.test.ts @@ -22,7 +22,7 @@ describe('e2e_state_vars', () => { beforeAll(async () => { ({ teardown, wallet, pxe } = await setup(2)); contract = await DocsExampleContract.deploy(wallet).send().deployed(); - }, 60_000); + }); afterAll(() => teardown()); @@ -84,7 +84,7 @@ describe('e2e_state_vars', () => { await expect(contract.methods.initialize_shared_immutable(1).send().wait()).rejects.toThrow( "Assertion failed: SharedImmutable already initialized 'fields_read[0] == 0'", ); - }, 100_000); + }); }); describe('PublicImmutable', () => { @@ -96,7 +96,7 @@ describe('e2e_state_vars', () => { expect(p.account).toEqual(wallet.getCompleteAddress().address); expect(p.points).toEqual(numPoints); - }, 200_000); + }); it('initializing PublicImmutable the second time should fail', async () => { // Jest executes the tests sequentially and the first call to initialize_public_immutable was executed @@ -104,7 +104,7 @@ describe('e2e_state_vars', () => { await expect(contract.methods.initialize_public_immutable(1).send().wait()).rejects.toThrow( "Assertion failed: PublicImmutable already initialized 'fields_read[0] == 0'", ); - }, 100_000); + }); }); describe('PrivateMutable', () => { @@ -246,7 +246,7 @@ describe('e2e_state_vars', () => { .methods.set_authorized(AztecAddress.fromField(new Fr(6969696969))) .send() .wait(); - }, 30_000); + }); it("checks authorized in auth contract from test contract and finds the old value because the change hasn't been applied yet", async () => { const { txHash } = await testContract.methods diff --git a/yarn-project/end-to-end/src/e2e_static_calls.test.ts b/yarn-project/end-to-end/src/e2e_static_calls.test.ts index 839e1628acc..2d5a9cbe4dc 100644 --- a/yarn-project/end-to-end/src/e2e_static_calls.test.ts +++ b/yarn-project/end-to-end/src/e2e_static_calls.test.ts @@ -11,14 +11,14 @@ describe('e2e_static_calls', () => { beforeEach(async () => { ({ teardown, wallet } = await setup()); - }, 100_000); + }); afterEach(() => teardown()); beforeEach(async () => { parentContract = await ParentContract.deploy(wallet).send().deployed(); childContract = await ChildContract.deploy(wallet).send().deployed(); - }, 100_000); + }); describe('parent calls child', () => { it('performs legal private to private static calls', async () => { @@ -32,7 +32,7 @@ describe('e2e_static_calls', () => { ]) .send() .wait(); - }, 100_000); + }); it('performs legal (nested) private to private static calls', async () => { // We create a note in the set, so... @@ -45,28 +45,28 @@ describe('e2e_static_calls', () => { ]) .send() .wait(); - }, 100_000); + }); it('performs legal public to public static calls', async () => { await parentContract.methods .public_static_call(childContract.address, childContract.methods.pub_get_value.selector, [42n]) .send() .wait(); - }, 100_000); + }); it('performs legal (nested) public to public static calls', async () => { await parentContract.methods .public_nested_static_call(childContract.address, childContract.methods.pub_get_value.selector, [42n]) .send() .wait(); - }, 100_000); + }); it('performs legal enqueued public static calls', async () => { await parentContract.methods .enqueue_static_call_to_pub_function(childContract.address, childContract.methods.pub_get_value.selector, [42n]) .send() .wait(); - }, 100_000); + }); it('performs legal (nested) enqueued public static calls', async () => { await parentContract.methods @@ -77,7 +77,7 @@ describe('e2e_static_calls', () => { ) .send() .wait(); - }, 100_000); + }); it('fails when performing illegal private to private static calls', async () => { await expect( @@ -89,7 +89,7 @@ describe('e2e_static_calls', () => { .send() .wait(), ).rejects.toThrow('Static call cannot create new notes, emit L2->L1 messages or generate logs'); - }, 100_000); + }); it('fails when performing illegal (nested) private to private static calls', async () => { await expect( @@ -101,7 +101,7 @@ describe('e2e_static_calls', () => { .send() .wait(), ).rejects.toThrow('Static call cannot create new notes, emit L2->L1 messages or generate logs'); - }, 100_000); + }); it('fails when performing illegal public to public static calls', async () => { await expect( @@ -110,7 +110,7 @@ describe('e2e_static_calls', () => { .send() .wait(), ).rejects.toThrow('Static call cannot update the state, emit L2->L1 messages or generate logs'); - }, 100_000); + }); it('fails when performing illegal (nested) public to public static calls', async () => { await expect( @@ -119,7 +119,7 @@ describe('e2e_static_calls', () => { .send() .wait(), ).rejects.toThrow('Static call cannot update the state, emit L2->L1 messages or generate logs'); - }, 100_000); + }); it('fails when performing illegal enqueued public static calls', async () => { await expect( @@ -130,7 +130,7 @@ describe('e2e_static_calls', () => { .send() .wait(), ).rejects.toThrow('Static call cannot update the state, emit L2->L1 messages or generate logs'); - }, 100_000); + }); it('fails when performing illegal (nested) enqueued public static calls', async () => { await expect( @@ -143,6 +143,6 @@ describe('e2e_static_calls', () => { .send() .wait(), ).rejects.toThrow('Static call cannot update the state, emit L2->L1 messages or generate logs'); - }, 100_000); + }); }); }); diff --git a/yarn-project/end-to-end/src/flakey_e2e_inclusion_proofs_contract.test.ts b/yarn-project/end-to-end/src/flakey_e2e_inclusion_proofs_contract.test.ts index b82bb8153d9..6be433e2b2a 100644 --- a/yarn-project/end-to-end/src/flakey_e2e_inclusion_proofs_contract.test.ts +++ b/yarn-project/end-to-end/src/flakey_e2e_inclusion_proofs_contract.test.ts @@ -38,7 +38,7 @@ describe('e2e_inclusion_proofs_contract', () => { const receipt = await InclusionProofsContract.deploy(wallets[0], publicValue).send({ contractAddressSalt }).wait(); contract = receipt.contract; deploymentBlockNumber = receipt.blockNumber!; - }, 100_000); + }); afterAll(() => teardown()); diff --git a/yarn-project/end-to-end/src/flakey_e2e_p2p_network.test.ts b/yarn-project/end-to-end/src/flakey_e2e_p2p_network.test.ts index 77e8382ad67..97fe61a71c0 100644 --- a/yarn-project/end-to-end/src/flakey_e2e_p2p_network.test.ts +++ b/yarn-project/end-to-end/src/flakey_e2e_p2p_network.test.ts @@ -37,7 +37,7 @@ describe('e2e_p2p_network', () => { beforeEach(async () => { ({ teardown, config, logger } = await setup(1)); - }, 100_000); + }); afterEach(() => teardown()); @@ -68,7 +68,7 @@ describe('e2e_p2p_network', () => { await context.pxeService.stop(); } await bootstrapNode.stop(); - }, 120_000); + }); const createBootstrapNode = async () => { const peerId = await createLibP2PPeerId(); diff --git a/yarn-project/end-to-end/src/guides/dapp_testing.test.ts b/yarn-project/end-to-end/src/guides/dapp_testing.test.ts index 78267af530f..db87d5f90e9 100644 --- a/yarn-project/end-to-end/src/guides/dapp_testing.test.ts +++ b/yarn-project/end-to-end/src/guides/dapp_testing.test.ts @@ -39,7 +39,7 @@ describe('guides/dapp/testing', () => { token = await TokenContract.deploy(owner, owner.getCompleteAddress(), 'TokenName', 'TokenSymbol', 18) .send() .deployed(); - }, 60_000); + }); it('increases recipient funds on mint', async () => { const recipientAddress = recipient.getAddress(); @@ -63,7 +63,7 @@ describe('guides/dapp/testing', () => { await token.methods.redeem_shield(recipientAddress, mintAmount, secret).send().wait(); expect(await token.methods.balance_of_private(recipientAddress).simulate()).toEqual(20n); - }, 30_000); + }); }); // docs:end:sandbox-example @@ -81,7 +81,7 @@ describe('guides/dapp/testing', () => { .send() .deployed(); // docs:end:use-existing-wallets - }, 30_000); + }); it('increases recipient funds on mint', async () => { expect(await token.methods.balance_of_private(recipient.getAddress()).simulate()).toEqual(0n); @@ -104,7 +104,7 @@ describe('guides/dapp/testing', () => { await token.methods.redeem_shield(recipientAddress, mintAmount, secret).send().wait(); expect(await token.methods.balance_of_private(recipientAddress).simulate()).toEqual(20n); - }, 30_000); + }); }); describe('cheats', () => { @@ -118,7 +118,7 @@ describe('guides/dapp/testing', () => { owner = await createAccount(pxe); testContract = await TestContract.deploy(owner).send().deployed(); cheats = CheatCodes.create(ETHEREUM_HOST, pxe); - }, 30_000); + }); it('warps time to 1h into the future', async () => { // docs:start:warp @@ -171,7 +171,7 @@ describe('guides/dapp/testing', () => { // The balances mapping is indexed by user address ownerSlot = cheats.aztec.computeSlotInMap(TokenContract.storage.balances.slot, ownerAddress); // docs:end:calc-slot - }, 90_000); + }); it('checks private storage', async () => { // docs:start:private-storage @@ -184,7 +184,7 @@ describe('guides/dapp/testing', () => { const balance = values.reduce((sum, current) => sum + current.toBigInt(), 0n); expect(balance).toEqual(100n); // docs:end:private-storage - }, 30_000); + }); it('checks public storage', async () => { // docs:start:public-storage @@ -193,7 +193,7 @@ describe('guides/dapp/testing', () => { const balance = await pxe.getPublicStorageAt(token.address, ownerPublicBalanceSlot); expect(balance.value).toEqual(100n); // docs:end:public-storage - }, 30_000); + }); it('checks unencrypted logs, [Kinda broken with current implementation]', async () => { // docs:start:unencrypted-logs @@ -206,21 +206,21 @@ describe('guides/dapp/testing', () => { const logs = (await pxe.getUnencryptedLogs(filter)).logs; expect(Fr.fromBuffer(logs[0].log.data)).toEqual(value); // docs:end:unencrypted-logs - }, 30_000); + }); it('asserts a local transaction simulation fails by calling simulate', async () => { // docs:start:local-tx-fails const call = token.methods.transfer(owner.getAddress(), recipient.getAddress(), 200n, 0); await expect(call.prove()).rejects.toThrow(/Balance too low/); // docs:end:local-tx-fails - }, 30_000); + }); it('asserts a local transaction simulation fails by calling send', async () => { // docs:start:local-tx-fails-send const call = token.methods.transfer(owner.getAddress(), recipient.getAddress(), 200n, 0); await expect(call.send().wait()).rejects.toThrow(/Balance too low/); // docs:end:local-tx-fails-send - }, 30_000); + }); it('asserts a transaction is dropped', async () => { // docs:start:tx-dropped @@ -233,14 +233,14 @@ describe('guides/dapp/testing', () => { await call1.send().wait(); await expect(call2.send().wait()).rejects.toThrow(/dropped/); // docs:end:tx-dropped - }, 30_000); + }); it('asserts a simulation for a public function call fails', async () => { // docs:start:local-pub-fails const call = token.methods.transfer_public(owner.getAddress(), recipient.getAddress(), 1000n, 0); await expect(call.prove()).rejects.toThrow(U128_UNDERFLOW_ERROR); // docs:end:local-pub-fails - }, 30_000); + }); it('asserts a transaction with a failing public call is included (with no state changes)', async () => { // docs:start:pub-reverted @@ -251,7 +251,7 @@ describe('guides/dapp/testing', () => { const balance = await pxe.getPublicStorageAt(token.address, ownerPublicBalanceSlot); expect(balance.value).toEqual(100n); // docs:end:pub-reverted - }, 30_000); + }); }); }); }); diff --git a/yarn-project/end-to-end/src/guides/up_quick_start.test.ts b/yarn-project/end-to-end/src/guides/up_quick_start.test.ts index 1dd55e6ea35..03c46a0ff43 100644 --- a/yarn-project/end-to-end/src/guides/up_quick_start.test.ts +++ b/yarn-project/end-to-end/src/guides/up_quick_start.test.ts @@ -16,5 +16,5 @@ describe('guides/up_quick_start', () => { stdio: 'inherit', }, ); - }, 90_000); + }); }); diff --git a/yarn-project/end-to-end/src/guides/writing_an_account_contract.test.ts b/yarn-project/end-to-end/src/guides/writing_an_account_contract.test.ts index 2072a7ddda5..1d4b7fc7007 100644 --- a/yarn-project/end-to-end/src/guides/writing_an_account_contract.test.ts +++ b/yarn-project/end-to-end/src/guides/writing_an_account_contract.test.ts @@ -49,7 +49,7 @@ describe('guides/writing_an_account_contract', () => { beforeEach(async () => { context = await setup(0); - }, 60_000); + }); afterEach(() => context.teardown()); @@ -104,5 +104,5 @@ describe('guides/writing_an_account_contract', () => { logger.info(`Failed to send tx: ${err}`); } // docs:end:account-contract-fails - }, 60_000); + }); }); diff --git a/yarn-project/end-to-end/src/shared/browser.ts b/yarn-project/end-to-end/src/shared/browser.ts index 9a94d89db75..4463c27cd95 100644 --- a/yarn-project/end-to-end/src/shared/browser.ts +++ b/yarn-project/end-to-end/src/shared/browser.ts @@ -101,7 +101,7 @@ export const browserTestSuite = ( pageLogger.verbose('Waiting for window.AztecJs...'); await AztecJs.sleep(1000); } - }, 120_000); + }); afterAll(async () => { await browser.close(); @@ -136,11 +136,11 @@ export const browserTestSuite = ( const accounts = await testClient.getRegisteredAccounts(); const stringAccounts = accounts.map(acc => acc.address.toString()); expect(stringAccounts.includes(result)).toBeTruthy(); - }, 15_000); + }); it('Deploys Token contract', async () => { await deployTokenContract(); - }, 60_000); + }); it('Can access CompleteAddress class in browser', async () => { const result: string = await page.evaluate(() => { @@ -207,7 +207,7 @@ export const browserTestSuite = ( TokenContractArtifact, ); expect(result).toEqual(transferAmount); - }, 60_000); + }); const deployTokenContract = async () => { const [txHash, tokenAddress] = await page.evaluate( diff --git a/yarn-project/end-to-end/src/shared/uniswap_l1_l2.ts b/yarn-project/end-to-end/src/shared/uniswap_l1_l2.ts index d9944d99b54..7b434878eb9 100644 --- a/yarn-project/end-to-end/src/shared/uniswap_l1_l2.ts +++ b/yarn-project/end-to-end/src/shared/uniswap_l1_l2.ts @@ -623,7 +623,7 @@ export const uniswapL1L2TestSuite = ( logger.info('***** 🧚‍♀️ SWAP L2 assets on L1 Uniswap 🧚‍♀️ *****'); logger.info('WETH balance after swap : ', wethL2BalanceAfterSwap.toString()); logger.info('DAI balance after swap : ', daiL2BalanceAfterSwap.toString()); - }, 360_000); + }); // docs:end:uniswap_public // Edge cases for the private flow: