diff --git a/.config/nextest.toml b/.config/nextest.toml new file mode 100644 index 000000000..59508d7e9 --- /dev/null +++ b/.config/nextest.toml @@ -0,0 +1,3 @@ +[profile.default-miri] +slow-timeout = { period = "30s", terminate-after = 1 } +fail-fast = false diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 115296d26..232e3fd40 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -25,9 +25,9 @@ jobs: fail-fast: true matrix: os: [macos-latest, windows-latest, ubuntu-latest] - toolchain: [stable, beta, nightly, 1.65.0] + toolchain: [stable, beta, nightly, 1.70.0] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 # install the toolchain we are going to compile and test with - name: install ${{ matrix.toolchain }} toolchain @@ -77,3 +77,18 @@ jobs: # - run: mdbook test -L ./target/debug/deps docs/book # if: matrix.toolchain == 'stable' && matrix.os == 'ubuntu-latest' + + miri: + name: "Miri" + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Install Miri + run: | + rustup toolchain install nightly --component miri + rustup override set nightly + cargo miri setup + - name: Install latest nextest release + uses: taiki-e/install-action@nextest + - name: Test with Miri + run: ./miri.sh diff --git a/.github/workflows/deny.yml b/.github/workflows/deny.yml index 30b84e58b..270d4ed88 100644 --- a/.github/workflows/deny.yml +++ b/.github/workflows/deny.yml @@ -17,8 +17,9 @@ jobs: # Prevent sudden announcement of a new advisory from failing ci: continue-on-error: ${{ matrix.checks == 'advisories' }} + steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: EmbarkStudios/cargo-deny-action@v1 with: command: check ${{ matrix.checks }} diff --git a/.rustfmt.toml b/.rustfmt.toml index b299429d8..77fdc7039 100644 --- a/.rustfmt.toml +++ b/.rustfmt.toml @@ -1,9 +1,8 @@ hard_tabs = false -imports_granularity = "Crate" reorder_impl_items = true use_field_init_shorthand = true use_try_shorthand = true format_code_in_doc_comments = true wrap_comments = true -edition = "2018" +edition = "2021" version = "Two" diff --git a/CHANGELOG.md b/CHANGELOG.md index e4318517e..19149b84e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,13 +1,23 @@ +# Unreleased + +* MSRV to 1.70.0 ([#765]) +* Significant refactor of `Join` and related traits to alleviate soundness + issues. Includes introduction of a lending/streaming join via the `LendJoin` + trait which is the new common denominator implemented by joinable types. + ([#765]) + +[#765]: https://github.com/amethyst/specs/pull/765 + # 0.19.0 (2023-06-10) * Bump MSRV to 1.65.0 ([#766]) * Added index where entity deletion stopped to the error returned from `WorldExt::delete_entities` ([#766]) * Fix bug where deleting an entity with the wrong generation could clear the components of an existing entity. ([#766]) -* Bump shred to version `0.14.1`, MSRV to 1.60.0 ([shred changelog][shred-changelog], [#756]) +* Bump shred to version `0.14.1`, MSRV to 1.60.0 ([shred changelog][shred_changelog], [#756]) +[shred_changelog]: https://github.com/amethyst/shred/blob/master/CHANGELOG.md#0141-2022-07-14 [#756]: https://github.com/amethyst/specs/pull/756 [#766]: https://github.com/amethyst/specs/pull/766 -[shred-changelog]: https://github.com/amethyst/shred/blob/6b754812e304cf6c63ba0364a82a7e0e5025aaa4/CHANGELOG.md#0140-2022-07-12 # 0.18.0 (2022-07-02) diff --git a/Cargo.toml b/Cargo.toml index a72d012ac..e08b14922 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,7 @@ license = "MIT OR Apache-2.0" authors = ["slide-rs hackers"] include = ["/src", "/examples", "/benches", "/README.md", "/LICENSE-MIT", "/LICENSE-APACHE"] edition = "2021" -rust-version = "1.65.0" +rust-version = "1.70.0" # the `storage_cmp` and `storage_sparse` benches are called from `benches_main` autobenches = false @@ -22,11 +22,12 @@ autobenches = false [dependencies] ahash = "0.7.6" crossbeam-queue = "0.3" -hibitset = { version = "0.6.3", default-features = false } +hibitset = { version = "0.6.4", default-features = false } log = "0.4.8" -shred = { version = "0.14.1", default-features = false } +shred = { version = "0.15.0", default-features = false } shrev = "1.1.1" tuple_utils = "0.4.0" +nougat = "0.2.3" rayon = { version = "1.5.1", optional = true } serde = { version = "1.0.104", optional = true, features = ["serde_derive"] } @@ -40,7 +41,7 @@ uuid_entity = ["uuid", "serde"] stdweb = ["uuid/js"] storage-event-control = [] derive = ["shred-derive", "specs-derive"] -nightly = [] +nightly = ["shred/nightly"] shred-derive = ["shred/shred-derive"] @@ -53,30 +54,31 @@ criterion = "0.3.1" ron = "0.7.1" rand = "0.8" serde_json = "1.0.48" -shred = { version = "0.14.1", default-features = false, features = ["shred-derive"] } +shred = { version = "0.15.0", default-features = false, features = ["shred-derive"] } specs-derive = { path = "specs-derive", version = "0.4.1" } +[[example]] +name = "async" [[example]] name = "basic" - [[example]] -name = "full" - +name = "bitset" [[example]] name = "cluster_bomb" - [[example]] -name = "bitset" - +name = "full" [[example]] -name = "track" - +name = "lend_join" +test = true [[example]] name = "ordered_track" - [[example]] name = "saveload" required-features = ["serde"] +[[example]] +name = "slices" +[[example]] +name = "track" [[bench]] name = "benches_main" diff --git a/README.md b/README.md index a61bf18b9..a5caf0a4b 100644 --- a/README.md +++ b/README.md @@ -35,7 +35,7 @@ Unlike most other ECS libraries out there, it provides other and you can use barriers to force several stages in system execution * high performance for real-world applications -Minimum Rust version: 1.65 +Minimum Rust version: 1.70 ## [Link to the book][book] diff --git a/benches/parallel.rs b/benches/parallel.rs index 27a03b98f..588693c84 100644 --- a/benches/parallel.rs +++ b/benches/parallel.rs @@ -55,6 +55,7 @@ impl Component for Lifetime { } #[derive(Clone, Copy, Debug)] +#[allow(dead_code)] struct Ball { radius: f32, } @@ -64,6 +65,7 @@ impl Component for Ball { } #[derive(Clone, Copy, Debug)] +#[allow(dead_code)] struct Rect { a: f32, b: f32, @@ -91,6 +93,7 @@ impl Component for SpawnRequests { } #[derive(Clone, Copy, Debug)] +#[allow(dead_code)] struct Collision { a: Entity, b: Entity, @@ -102,6 +105,7 @@ impl Component for Collision { } #[derive(Clone, Copy, Debug)] +#[allow(dead_code)] struct Room { inner_width: f32, inner_height: f32, diff --git a/docs/tutorials/src/12_tracked.md b/docs/tutorials/src/12_tracked.md index a9d0a2df9..5e88b0405 100644 --- a/docs/tutorials/src/12_tracked.md +++ b/docs/tutorials/src/12_tracked.md @@ -117,8 +117,8 @@ fetch the component as mutable if/when needed. for (entity, mut comp) in (&entities, &mut comps.restrict_mut()).join() { // Check whether this component should be modified, without fetching it as // mutable. - if comp.get_unchecked().condition < 5 { - let mut comp = comp.get_mut_unchecked(); + if comp.get().condition < 5 { + let mut comp = comp.get_mut(); // ... } } @@ -137,4 +137,4 @@ simply call `storage.set_event_emission(true)`. _See [FlaggedStorage Doc](https://docs.rs/specs/latest/specs/struct.FlaggedStorage.html) -for more into._ \ No newline at end of file +for more into._ diff --git a/examples/lend_join.rs b/examples/lend_join.rs new file mode 100644 index 000000000..603e31009 --- /dev/null +++ b/examples/lend_join.rs @@ -0,0 +1,52 @@ +use specs::prelude::*; +struct Pos(f32); + +impl Component for Pos { + type Storage = VecStorage; +} + +fn main() { + let mut world = World::new(); + + world.register::(); + + let entity0 = world.create_entity().with(Pos(0.0)).build(); + world.create_entity().with(Pos(1.6)).build(); + world.create_entity().with(Pos(5.4)).build(); + + let mut pos = world.write_storage::(); + let entities = world.entities(); + + // Unlike `join` the type return from `lend_join` does not implement + // `Iterator`. Instead, a `next` method is provided that only allows one + // element to be accessed at once. + let mut lending = (&mut pos).lend_join(); + + // We copy the value out here so the borrow of `lending` is released. + let a = lending.next().unwrap().0; + // Here we keep the reference from `lending.next()` alive, so `lending` + // remains exclusively borrowed for the lifetime of `b`. + let b = lending.next().unwrap(); + // This right fails to compile since `b` is used below: + // let d = lending.next().unwrap(); + b.0 = a; + + // Items can be iterated with `while let` loop: + let mut lending = (&mut pos).lend_join(); + while let Some(pos) = lending.next() { + pos.0 *= 1.5; + } + + // A `for_each` method is also available: + (&mut pos).lend_join().for_each(|pos| { + pos.0 += 1.0; + }); + + // Finally, there is one bonus feature which `.join()` can't soundly provide. + let mut lending = (&mut pos).lend_join(); + // That is, there is a method to get the joined result for a particular + // entity: + if let Some(pos) = lending.get(entity0, &entities) { + pos.0 += 5.0; + } +} diff --git a/examples/slices.rs b/examples/slices.rs index 6f81557f3..99a023731 100644 --- a/examples/slices.rs +++ b/examples/slices.rs @@ -42,7 +42,7 @@ impl<'a> System<'a> for SysA { // Both the `Pos` and `Vel` components use `DefaultVecStorage`, which supports // `as_slice()` and `as_mut_slice()`. This lets us access components without // indirection. - let mut pos_slice = pos.as_mut_slice(); + let pos_slice = pos.as_mut_slice(); let vel_slice = vel.as_slice(); // Note that an entity which has position but not velocity will still have diff --git a/examples/track.rs b/examples/track.rs index 1e231f39d..ef9f08d35 100644 --- a/examples/track.rs +++ b/examples/track.rs @@ -70,7 +70,7 @@ impl<'a> System<'a> for SysB { fn run(&mut self, (entities, mut tracked): Self::SystemData) { for (entity, mut restricted) in (&entities, &mut tracked.restrict_mut()).join() { if entity.id() % 2 == 0 { - let mut comp = restricted.get_mut_unchecked(); + let comp = restricted.get_mut(); comp.0 += 1; } } diff --git a/miri.sh b/miri.sh new file mode 100755 index 000000000..a6bb69cca --- /dev/null +++ b/miri.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# +# Convenience script for running Miri, also the same one that the CI runs! + +set -e + +# use half the available threads since miri can be a bit memory hungry +test_threads=$((($(nproc) - 1) / 2 + 1)) +echo using $test_threads threads + +# filters out long running tests +filter='not (test(100k) | test(map_test::wrap) | test(map_test::insert_same_key) | test(=mixed_create_merge)| test(=par_join_many_entities_and_systems) | test(=stillborn_entities))' +echo "using filter: \"$filter\"" + +# Miri currently reports leaks in some tests so we disable that check +# here (might be due to ptr-int-ptr in crossbeam-epoch so might be +# resolved in future versions of that crate). +MIRIFLAGS="-Zmiri-disable-isolation -Zmiri-ignore-leaks" \ + cargo +nightly miri nextest run \ + -E "$filter" \ + --test-threads="$test_threads" \ + # use nocapture or run miri directly to see warnings from miri + #--nocapture + +# Run tests only available when parallel feature is disabled. +MIRIFLAGS="-Zmiri-disable-isolation -Zmiri-ignore-leaks" \ + cargo +nightly miri nextest run \ + --no-default-features \ + -E "binary(no_parallel)" \ + --test-threads="$test_threads" + diff --git a/src/bitset.rs b/src/bitset.rs index cbad9ed32..f0ac9f33f 100644 --- a/src/bitset.rs +++ b/src/bitset.rs @@ -2,39 +2,82 @@ //! //! Normally used for `Join`s and filtering entities. -#![cfg_attr(rustfmt, rustfmt_skip)] +#![cfg_attr(rustfmt, rustfmt::skip)] use hibitset::{AtomicBitSet, BitSet, BitSetAnd, BitSetLike, BitSetNot, BitSetOr, BitSetXor}; -use crate::join::Join; +#[nougat::gat(Type)] +use crate::join::LendJoin; #[cfg(feature = "parallel")] use crate::join::ParJoin; +use crate::join::{Join, RepeatableLendGet}; use crate::world::Index; macro_rules! define_bit_join { ( impl < ( $( $lifetime:tt )* ) ( $( $arg:ident ),* ) > for $bitset:ty ) => { - impl<$( $lifetime, )* $( $arg ),*> Join for $bitset + // SAFETY: `get` just returns the provided `id` (`Self::Value` is `()` + // and corresponds with any mask instance). + #[nougat::gat] + unsafe impl<$( $lifetime, )* $( $arg ),*> LendJoin for $bitset + where $( $arg: BitSetLike ),* + { + type Type<'next> = Index; + type Value = (); + type Mask = $bitset; + + unsafe fn open(self) -> (Self::Mask, Self::Value) { + (self, ()) + } + + unsafe fn get<'next>(_: &'next mut Self::Value, id: Index) -> Self::Type<'next> + + { + id + } + } + + // SAFETY: <$biset as LendJoin>::get does not rely on only being called + // once with a particular ID + unsafe impl<$( $lifetime, )* $( $arg ),*> RepeatableLendGet for $bitset + where $( $arg: BitSetLike ),* {} + + // SAFETY: `get` just returns the provided `id` (`Self::Value` is `()` + // and corresponds with any mask instance). + unsafe impl<$( $lifetime, )* $( $arg ),*> Join for $bitset where $( $arg: BitSetLike ),* { type Type = Index; type Value = (); type Mask = $bitset; - // SAFETY: This just moves a `BitSet`; invariants of `Join` are fulfilled, since `Self::Value` cannot be mutated. unsafe fn open(self) -> (Self::Mask, Self::Value) { (self, ()) } - // SAFETY: No unsafe code and no invariants to meet. unsafe fn get(_: &mut Self::Value, id: Index) -> Self::Type { id } } + // SAFETY: `get` is safe to call concurrently and just returns the + // provided `id` (`Self::Value` is `()` and corresponds with any mask + // instance). #[cfg(feature = "parallel")] unsafe impl<$( $lifetime, )* $( $arg ),*> ParJoin for $bitset where $( $arg: BitSetLike ),* - { } + { + type Type = Index; + type Value = (); + type Mask = $bitset; + + unsafe fn open(self) -> (Self::Mask, Self::Value) { + (self, ()) + } + + unsafe fn get(_: &Self::Value, id: Index) -> Self::Type { + id + } + } } } diff --git a/src/changeset.rs b/src/changeset.rs index 3cbbe9b7e..d4e33505e 100644 --- a/src/changeset.rs +++ b/src/changeset.rs @@ -2,7 +2,12 @@ use std::{iter::FromIterator, ops::AddAssign}; -use crate::{prelude::*, storage::UnprotectedStorage, world::Index}; +use crate::{ + join::RepeatableLendGet, + prelude::*, + storage::{SharedGetMutOnly, UnprotectedStorage}, + world::Index, +}; /// Change set that can be collected from an iterator, and joined on for easy /// application to components. @@ -62,28 +67,28 @@ impl ChangeSet { T: AddAssign, { if self.mask.contains(entity.id()) { - // SAFETY: we checked the mask, thus it's safe to call - unsafe { - *self.inner.get_mut(entity.id()) += value; - } + // SAFETY: We have exclusive access (which ensures no aliasing or + // concurrent calls from other threads) and we checked the mask, + // thus it's safe to call. + unsafe { *self.inner.get_mut(entity.id()) += value }; } else { - // SAFETY: we checked the mask, thus it's safe to call - unsafe { - self.inner.insert(entity.id(), value); - } + // SAFETY: We checked the mask, thus it's safe to call. + unsafe { self.inner.insert(entity.id(), value) }; self.mask.add(entity.id()); } } /// Clear the changeset pub fn clear(&mut self) { - for id in &self.mask { - // SAFETY: we checked the mask, thus it's safe to call - unsafe { - self.inner.remove(id); - } - } - self.mask.clear(); + // NOTE: We replace with default empty mask temporarily to protect against + // unwinding from `Drop` of components. + let mut mask_temp = core::mem::take(&mut self.mask); + // SAFETY: `self.mask` is the correct mask as specified. We swap in a + // temporary empty mask to ensure if this unwinds that the mask will be + // cleared. + unsafe { self.inner.clean(&mask_temp) }; + mask_temp.clear(); + self.mask = mask_temp; } } @@ -111,60 +116,158 @@ where } } -impl<'a, T> Join for &'a mut ChangeSet { +// SAFETY: `open` returns references to a mask and storage which are contained +// together in the `ChangeSet` and correspond. Iterating mask does not repeat +// indices. +#[nougat::gat] +unsafe impl<'a, T> LendJoin for &'a mut ChangeSet { type Mask = &'a BitSet; - type Type = &'a mut T; + type Type<'next> = &'next mut T; type Value = &'a mut DenseVecStorage; - // SAFETY: No unsafe code and no invariants to meet. unsafe fn open(self) -> (Self::Mask, Self::Value) { (&self.mask, &mut self.inner) } - // SAFETY: No unsafe code and no invariants to meet. - // `DistinctStorage` invariants are also met, but no `ParJoin` implementation - // exists yet. - unsafe fn get(v: &mut Self::Value, id: Index) -> Self::Type { - let value: *mut Self::Value = v as *mut Self::Value; - (*value).get_mut(id) + unsafe fn get<'next>(value: &'next mut Self::Value, id: Index) -> Self::Type<'next> { + // SAFETY: Since we require that the mask was checked, an element for + // `id` must have been inserted without being removed. + unsafe { value.get_mut(id) } } } -impl<'a, T> Join for &'a ChangeSet { +// SAFETY: LendJoin::get impl for this type can safely be called multiple times +// with the same ID. +unsafe impl<'a, T> RepeatableLendGet for &'a mut ChangeSet {} + +// SAFETY: `open` returns references to a mask and storage which are contained +// together in the `ChangeSet` and correspond. Iterating mask does not repeat +// indices. +unsafe impl<'a, T> Join for &'a mut ChangeSet { + type Mask = &'a BitSet; + type Type = &'a mut T; + type Value = SharedGetMutOnly<'a, T, DenseVecStorage>; + + unsafe fn open(self) -> (Self::Mask, Self::Value) { + (&self.mask, SharedGetMutOnly::new(&mut self.inner)) + } + + unsafe fn get(value: &mut Self::Value, id: Index) -> Self::Type { + // SAFETY: + // * Since we require that the mask was checked, an element for `id` must have + // been inserted without being removed. + // * We also require that there are no subsequent calls with the same `id` for + // this instance of the values from `open`, so there are no extant references + // for the element corresponding to this `id`. + // * Since we have an exclusive reference to `Self::Value`, we know this isn't + // being called from multiple threads at once. + unsafe { SharedGetMutOnly::get_mut(value, id) } + } +} + +// NOTE: could implement ParJoin for `&'a mut ChangeSet`/`&'a ChangeSet` + +// SAFETY: `open` returns references to a mask and storage which are contained +// together in the `ChangeSet` and correspond. Iterating mask does not repeat +// indices. +#[nougat::gat] +unsafe impl<'a, T> LendJoin for &'a ChangeSet { + type Mask = &'a BitSet; + type Type<'next> = &'a T; + type Value = &'a DenseVecStorage; + + unsafe fn open(self) -> (Self::Mask, Self::Value) { + (&self.mask, &self.inner) + } + + unsafe fn get<'next>(value: &'next mut Self::Value, id: Index) -> Self::Type<'next> { + // SAFETY: Since we require that the mask was checked, an element for + // `id` must have been inserted without being removed. + unsafe { value.get(id) } + } +} + +// SAFETY: LendJoin::get impl for this type can safely be called multiple times +// with the same ID. +unsafe impl<'a, T> RepeatableLendGet for &'a ChangeSet {} + +// SAFETY: `open` returns references to a mask and storage which are contained +// together in the `ChangeSet` and correspond. Iterating mask does not repeat +// indices. +unsafe impl<'a, T> Join for &'a ChangeSet { type Mask = &'a BitSet; type Type = &'a T; type Value = &'a DenseVecStorage; - // SAFETY: No unsafe code and no invariants to meet. unsafe fn open(self) -> (Self::Mask, Self::Value) { (&self.mask, &self.inner) } - // SAFETY: No unsafe code and no invariants to meet. - // `DistinctStorage` invariants are also met, but no `ParJoin` implementation - // exists yet. unsafe fn get(value: &mut Self::Value, id: Index) -> Self::Type { - value.get(id) + // SAFETY: Since we require that the mask was checked, an element for + // `id` must have been inserted without being removed. + unsafe { value.get(id) } + } +} + +/// A `Join` implementation for `ChangeSet` that simply removes all the entries +/// on a call to `get`. +// SAFETY: `open` returns references to a mask and storage which are contained +// together in the `ChangeSet` and correspond. Iterating mask does not repeat +// indices. +#[nougat::gat] +// This is a trait impl so method safety documentation is on the trait +// definition, maybe nougat confuses clippy? But this is the only spot that has +// this issue. +#[allow(clippy::missing_safety_doc)] +unsafe impl LendJoin for ChangeSet { + type Mask = BitSet; + type Type<'next> = T; + type Value = DenseVecStorage; + + unsafe fn open(self) -> (Self::Mask, Self::Value) { + (self.mask, self.inner) + } + + unsafe fn get<'next>(value: &'next mut Self::Value, id: Index) -> Self::Type<'next> { + // NOTE: This impl is the main reason that `RepeatableLendGet` exists + // since it moves the value out of the backing storage and thus can't + // be called multiple times with the same ID! + // + // SAFETY: Since we require that the mask was checked, an element for + // `id` must have been inserted without being removed. Note, this + // removes the element without effecting the mask. However, the caller + // is also required to not call this multiple times with the same `id` + // value and mask instance. Because `open` takes ownership we don't have + // to update the mask for futures uses since the `ChangeSet` is + // consumed. + unsafe { value.remove(id) } } } /// A `Join` implementation for `ChangeSet` that simply removes all the entries /// on a call to `get`. -impl Join for ChangeSet { +// SAFETY: `open` returns references to a mask and storage which are contained +// together in the `ChangeSet` and correspond. Iterating mask does not repeat +// indices. +unsafe impl Join for ChangeSet { type Mask = BitSet; type Type = T; type Value = DenseVecStorage; - // SAFETY: No unsafe code and no invariants to meet. unsafe fn open(self) -> (Self::Mask, Self::Value) { (self.mask, self.inner) } - // SAFETY: No unsafe code and no invariants to meet. - // `DistinctStorage` invariants are also met, but no `ParJoin` implementation - // exists yet. unsafe fn get(value: &mut Self::Value, id: Index) -> Self::Type { - value.remove(id) + // SAFETY: Since we require that the mask was checked, an element for + // `id` must have been inserted without being removed. Note, this + // removes the element without effecting the mask. However, the caller + // is also required to not call this multiple times with the same `id` + // value and mask instance. Because `open` takes ownership we don't have + // to update the mask for futures uses since the `ChangeSet` is + // consumed. + unsafe { value.remove(id) } } } diff --git a/src/join/bit_and.rs b/src/join/bit_and.rs new file mode 100644 index 000000000..26d8ec425 --- /dev/null +++ b/src/join/bit_and.rs @@ -0,0 +1,57 @@ +use hibitset::{BitSetAnd, BitSetLike}; +use tuple_utils::Split; + +/// `BitAnd` is a helper method to & bitsets together resulting in a tree. +pub trait BitAnd { + /// The combined bitsets. + type Value: BitSetLike; + /// Combines `Self` into a single `BitSetLike` through `BitSetAnd`. + fn and(self) -> Self::Value; +} + +/// This needs to be special cased +impl BitAnd for (A,) +where + A: BitSetLike, +{ + type Value = A; + + fn and(self) -> Self::Value { + self.0 + } +} + +macro_rules! bitset_and { + // use variables to indicate the arity of the tuple + ($($from:ident),*) => { + impl<$($from),*> BitAnd for ($($from),*) + where $($from: BitSetLike),* + { + type Value = BitSetAnd< + <::Left as BitAnd>::Value, + <::Right as BitAnd>::Value + >; + + fn and(self) -> Self::Value { + let (l, r) = self.split(); + BitSetAnd(l.and(), r.and()) + } + } + } +} + +bitset_and! {A, B} +bitset_and! {A, B, C} +bitset_and! {A, B, C, D} +bitset_and! {A, B, C, D, E} +bitset_and! {A, B, C, D, E, F} +bitset_and! {A, B, C, D, E, F, G} +bitset_and! {A, B, C, D, E, F, G, H} +bitset_and! {A, B, C, D, E, F, G, H, I} +bitset_and! {A, B, C, D, E, F, G, H, I, J} +bitset_and! {A, B, C, D, E, F, G, H, I, J, K} +bitset_and! {A, B, C, D, E, F, G, H, I, J, K, L} +bitset_and! {A, B, C, D, E, F, G, H, I, J, K, L, M} +bitset_and! {A, B, C, D, E, F, G, H, I, J, K, L, M, N} +bitset_and! {A, B, C, D, E, F, G, H, I, J, K, L, M, N, O} +bitset_and! {A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P} diff --git a/src/join/lend_join.rs b/src/join/lend_join.rs new file mode 100644 index 000000000..a909eb7ba --- /dev/null +++ b/src/join/lend_join.rs @@ -0,0 +1,320 @@ +use super::MaybeJoin; +use hibitset::{BitIter, BitSetLike}; + +use crate::world::{Entities, Entity, Index}; + +/// Like the [`Join`](super::Join) trait except this is similar to a [lending +/// iterator](https://blog.rust-lang.org/2021/08/03/GATs-stabilization-push.html#so-what-are-gats) +/// in that only one item can be accessed at once. +/// +/// The type returned from [`.lend_join()`](LendJoin::lend_join), +/// [`JoinLendIter`] does not implement `Iterator` like +/// [`JoinIter`](super::JoinIter) does. Instead, it provides a +/// [`next`](JoinLendIter::next) method that exclusively borrows the +/// `JoinLendIter` for the lifetime of the returned value. +/// +/// This limitation allows freedom for more patterns to be soundly implemented. +/// Thus, `LendJoin` acts as the "lowest common denominator" of the +/// `Join`-like traits (i.e. if something can implement `Join` it can also +/// implement `LendJoin`). +/// +/// In particular, [`Entries`](crate::storage::Entries) only implements +/// `LendJoin`. As another example, +/// [`RestrictedStorage`](crate::storage::RestrictedStorage) implements both +/// `Join` and `LendJoin`. However, for joining mutably, lend join variant +/// produces +/// [`PairedStorageWriteExclusive`](crate::storage::PairedStorageWriteExclusive) +/// values which have `get_other`/`get_other_mut` methods that aren't provided +/// by [`PairedStorageWriteShared`](crate::storage::PairedStorageWriteShared). +/// +/// Finally, these limitations allow providing the [`JoinLendIter::get`] method +/// which can be useful to get a set of components from an entity without +/// calling `get` individually on each storage (see the example in that method's +/// docs). +/// +/// Also see the `lend_join` example. +/// +/// # Safety +/// +/// The `Self::Mask` value returned with the `Self::Value` must correspond such +/// that it is safe to retrieve items from `Self::Value` whose presence is +/// indicated in the mask. As part of this, `BitSetLike::iter` must not produce +/// an iterator that repeats an `Index` value if the `LendJoin::get` impl relies +/// on not being called twice with the same `Index`. +#[nougat::gat] +pub unsafe trait LendJoin { + /// Type of joined components. + /// + /// # Note + /// + /// This type is using macro magic to emulate GATs on stable. So to refer to + /// it you need to use the [`LendJoinType<'next, J>`](LendJoinType) type + /// alias. + type Type<'next> + where + Self: 'next; + /// Type of joined storages. + type Value; + /// Type of joined bit mask. + type Mask: BitSetLike; + + /// Create a joined lending iterator over the contents. + fn lend_join(self) -> JoinLendIter + where + Self: Sized, + { + JoinLendIter::new(self) + } + + /// Returns a structure that implements `Join`/`LendJoin`/`MaybeJoin` if the + /// contained `T` does and that yields all indices, returning `None` for all + /// missing elements and `Some(T)` for found elements. + /// + /// To join over and optional component mutably this pattern can be used: + /// `(&mut storage).maybe()`. + /// + /// WARNING: Do not have a join of only `MaybeJoin`s. Otherwise the join + /// will iterate over every single index of the bitset. If you want a + /// join with all `MaybeJoin`s, add an `EntitiesRes` to the join as well + /// to bound the join to all entities that are alive. + /// + /// ``` + /// # use specs::prelude::*; + /// # #[derive(Debug, PartialEq)] + /// # struct Pos { x: i32, y: i32 } impl Component for Pos { type Storage = VecStorage; } + /// # #[derive(Debug, PartialEq)] + /// # struct Vel { x: i32, y: i32 } impl Component for Vel { type Storage = VecStorage; } + /// struct ExampleSystem; + /// impl<'a> System<'a> for ExampleSystem { + /// type SystemData = ( + /// WriteStorage<'a, Pos>, + /// ReadStorage<'a, Vel>, + /// ); + /// fn run(&mut self, (mut positions, velocities): Self::SystemData) { + /// let mut join = (&mut positions, velocities.maybe()).lend_join(); + /// while let Some ((mut position, maybe_velocity)) = join.next() { + /// if let Some(velocity) = maybe_velocity { + /// position.x += velocity.x; + /// position.y += velocity.y; + /// } + /// } + /// } + /// } + /// + /// fn main() { + /// let mut world = World::new(); + /// let mut dispatcher = DispatcherBuilder::new() + /// .with(ExampleSystem, "example_system", &[]) + /// .build(); + /// + /// dispatcher.setup(&mut world); + /// + /// let e1 = world.create_entity() + /// .with(Pos { x: 0, y: 0 }) + /// .with(Vel { x: 5, y: 2 }) + /// .build(); + /// + /// let e2 = world.create_entity() + /// .with(Pos { x: 0, y: 0 }) + /// .build(); + /// + /// dispatcher.dispatch(&mut world); + /// + /// let positions = world.read_storage::(); + /// assert_eq!(positions.get(e1), Some(&Pos { x: 5, y: 2 })); + /// assert_eq!(positions.get(e2), Some(&Pos { x: 0, y: 0 })); + /// } + /// ``` + fn maybe(self) -> MaybeJoin + where + Self: Sized, + { + MaybeJoin(self) + } + + /// Open this join by returning the mask and the storages. + /// + /// # Safety + /// + /// This is unsafe because implementations of this trait can permit the + /// `Value` to be mutated independently of the `Mask`. If the `Mask` does + /// not correctly report the status of the `Value` then illegal memory + /// access can occur. + unsafe fn open(self) -> (Self::Mask, Self::Value); + + /// Get a joined component value by a given index. + /// + /// # Safety + /// + /// * A call to `get` must be preceded by a check if `id` is part of + /// `Self::Mask` + /// * Multiple calls with the same `id` are not allowed, for a particular + /// instance of the values from [`open`](LendJoin::open). Unless this type + /// implements the unsafe trait [`RepeatableLendGet`]. + unsafe fn get<'next>(value: &'next mut Self::Value, id: Index) -> Self::Type<'next>; + + /// If this `LendJoin` typically returns all indices in the mask, then + /// iterating over only it or combined with other joins that are also + /// dangerous will cause the `JoinLendIter` to go through all indices which + /// is usually not what is wanted and will kill performance. + #[inline] + fn is_unconstrained() -> bool { + false + } +} + +/// # Safety +/// +/// Implementing this trait guarantees that `::get` can +/// soundly be called multiple times with the same ID. +pub unsafe trait RepeatableLendGet: LendJoin {} + +/// Type alias to refer to the `::Type<'next>` (except this +/// doesn't actually exist in this form so the `nougat::Gat!` macro is needed). +pub type LendJoinType<'next, J> = nougat::Gat!(::Type<'next>); + +/// `JoinLendIter` is an is a lending/streaming iterator over components from a +/// group of storages. +#[must_use] +pub struct JoinLendIter { + keys: BitIter, + values: J::Value, +} + +impl JoinLendIter { + /// Create a new lending join iterator. + pub fn new(j: J) -> Self { + if ::is_unconstrained() { + log::warn!( + "`LendJoin` possibly iterating through all indices, \ + you might've made a join with all `MaybeJoin`s, \ + which is unbounded in length." + ); + } + + // SAFETY: We do not swap out the mask or the values, nor do we allow it + // by exposing them. + let (keys, values) = unsafe { j.open() }; + JoinLendIter { + keys: keys.iter(), + values, + } + } +} + +impl JoinLendIter { + /// Lending `next`. + /// + /// Can be used to iterate with this pattern: + /// + /// `while let Some(components) = join_lending_iter.next() {` + #[allow(clippy::should_implement_trait)] // we want this to look like iterator + pub fn next(&mut self) -> Option> { + // SAFETY: Since `idx` is yielded from `keys` (the mask), it is + // necessarily a part of it. `LendJoin` requires that the iterator + // doesn't repeat indices and we advance the iterator for each `get` + // call in all methods that don't require `RepeatableLendGet`. + self.keys + .next() + .map(|idx| unsafe { J::get(&mut self.values, idx) }) + } + + /// Calls a closure on each entity in the join. + pub fn for_each(mut self, mut f: impl FnMut(LendJoinType<'_, J>)) { + self.keys.for_each(|idx| { + // SAFETY: Since `idx` is yielded from `keys` (the mask), it is + // necessarily a part of it. `LendJoin` requires that the iterator + // doesn't repeat indices and we advance the iterator for each `get` + // call in all methods that don't require `RepeatableLendGet`. + let item = unsafe { J::get(&mut self.values, idx) }; + f(item); + }) + } + + /// Allows getting joined values for specific entity. + /// + /// ## Example + /// + /// ``` + /// # use specs::prelude::*; + /// # #[derive(Debug, PartialEq)] + /// # struct Pos; impl Component for Pos { type Storage = VecStorage; } + /// # #[derive(Debug, PartialEq)] + /// # struct Vel; impl Component for Vel { type Storage = VecStorage; } + /// let mut world = World::new(); + /// + /// world.register::(); + /// world.register::(); + /// + /// // This entity could be stashed anywhere (into `Component`, `Resource`, `System`s data, etc.) as it's just a number. + /// let entity = world + /// .create_entity() + /// .with(Pos) + /// .with(Vel) + /// .build(); + /// + /// // Later + /// { + /// let mut pos = world.write_storage::(); + /// let vel = world.read_storage::(); + /// + /// assert_eq!( + /// Some((&mut Pos, &Vel)), + /// (&mut pos, &vel).lend_join().get(entity, &world.entities()), + /// "The entity that was stashed still has the needed components and is alive." + /// ); + /// } + /// + /// // The entity has found nice spot and doesn't need to move anymore. + /// world.write_storage::().remove(entity); + /// + /// // Even later + /// { + /// let mut pos = world.write_storage::(); + /// let vel = world.read_storage::(); + /// + /// assert_eq!( + /// None, + /// (&mut pos, &vel).lend_join().get(entity, &world.entities()), + /// "The entity doesn't have velocity anymore." + /// ); + /// } + /// ``` + pub fn get(&mut self, entity: Entity, entities: &Entities) -> Option> + where + J: RepeatableLendGet, + { + if self.keys.contains(entity.id()) && entities.is_alive(entity) { + // SAFETY: the mask (`keys`) is checked as specified in the docs of + // `get`. We require `J: RepeatableJoinGet` so this can be safely + // called multiple time with the same ID. + Some(unsafe { J::get(&mut self.values, entity.id()) }) + } else { + None + } + } + + /// Allows getting joined values for specific raw index. + /// + /// The raw index for an `Entity` can be retrieved using `Entity::id` + /// method. + /// + /// As this method operates on raw indices, there is no check to see if the + /// entity is still alive, so the caller should ensure it instead. + /// + /// Note: Not checking is still sound (thus this method is safe to call), + /// but this can return data from deleted entities! + pub fn get_unchecked(&mut self, index: Index) -> Option> + where + J: RepeatableLendGet, + { + if self.keys.contains(index) { + // SAFETY: the mask (`keys`) is checked as specified in the docs of + // `get`. We require `J: RepeatableJoinGet` so this can be safely + // called multiple time with the same ID. + Some(unsafe { J::get(&mut self.values, index) }) + } else { + None + } + } +} diff --git a/src/join/maybe.rs b/src/join/maybe.rs new file mode 100644 index 000000000..7ca6c5480 --- /dev/null +++ b/src/join/maybe.rs @@ -0,0 +1,134 @@ +#[nougat::gat(Type)] +use super::LendJoin; +#[cfg(feature = "parallel")] +use super::ParJoin; +use super::{Join, RepeatableLendGet}; +use hibitset::{BitSetAll, BitSetLike}; + +use crate::world::Index; + +/// Returns a structure that implements `Join`/`LendJoin`/`MaybeJoin` if the +/// contained `T` does and that yields all indices, returning `None` for all +/// missing elements and `Some(T)` for found elements. +/// +/// For usage see [`LendJoin::maybe()`](LendJoin::maybe). +/// +/// WARNING: Do not have a join of only `MaybeJoin`s. Otherwise the join will +/// iterate over every single index of the bitset. If you want a join with +/// all `MaybeJoin`s, add an `EntitiesRes` to the join as well to bound the +/// join to all entities that are alive. +pub struct MaybeJoin(pub J); + +// SAFETY: We return a mask containing all items, but check the original mask in +// the `get` implementation. Iterating the mask does not repeat indices. +#[nougat::gat] +unsafe impl LendJoin for MaybeJoin +where + T: LendJoin, +{ + type Mask = BitSetAll; + type Type<'next> = Option<::Type<'next>>; + type Value = (::Mask, ::Value); + + unsafe fn open(self) -> (Self::Mask, Self::Value) { + // SAFETY: While we do expose the mask and the values and therefore + // would allow swapping them, this method is `unsafe` and relies on the + // same invariants. + let (mask, value) = unsafe { self.0.open() }; + (BitSetAll, (mask, value)) + } + + unsafe fn get<'next>((mask, value): &'next mut Self::Value, id: Index) -> Self::Type<'next> { + if mask.contains(id) { + // SAFETY: The mask was just checked for `id`. Requirement to not + // call with the same ID more than once (unless `RepeatableLendGet` + // is implemented) is passed to the caller. + Some(unsafe { ::get(value, id) }) + } else { + None + } + } + + #[inline] + fn is_unconstrained() -> bool { + true + } +} + +// SAFETY: ::get does not rely on only being called once +// with a particular ID. +unsafe impl RepeatableLendGet for MaybeJoin where T: RepeatableLendGet {} + +// SAFETY: We return a mask containing all items, but check the original mask in +// the `get` implementation. Iterating the mask does not repeat indices. +unsafe impl Join for MaybeJoin +where + T: Join, +{ + type Mask = BitSetAll; + type Type = Option<::Type>; + type Value = (::Mask, ::Value); + + unsafe fn open(self) -> (Self::Mask, Self::Value) { + // SAFETY: While we do expose the mask and the values and therefore + // would allow swapping them, this method is `unsafe` and relies on the + // same invariants. + let (mask, value) = unsafe { self.0.open() }; + (BitSetAll, (mask, value)) + } + + unsafe fn get((mask, value): &mut Self::Value, id: Index) -> Self::Type { + if mask.contains(id) { + // SAFETY: The mask was just checked for `id`. This has the same + // requirements on the caller to only call with the same `id` once. + Some(unsafe { ::get(value, id) }) + } else { + None + } + } + + #[inline] + fn is_unconstrained() -> bool { + true + } +} + +// SAFETY: This is safe as long as `T` implements `ParJoin` safely. The `get` +// implementation here makes no assumptions about being called from a single +// thread. +// +// We return a mask containing all items, but check the original mask in +// the `get` implementation. Iterating the mask does not repeat indices. +#[cfg(feature = "parallel")] +unsafe impl ParJoin for MaybeJoin +where + T: ParJoin, +{ + type Mask = BitSetAll; + type Type = Option<::Type>; + type Value = (::Mask, ::Value); + + unsafe fn open(self) -> (Self::Mask, Self::Value) { + // SAFETY: While we do expose the mask and the values and therefore + // would allow swapping them, this method is `unsafe` and relies on the + // same invariants. + let (mask, value) = unsafe { self.0.open() }; + (BitSetAll, (mask, value)) + } + + unsafe fn get((mask, value): &Self::Value, id: Index) -> Self::Type { + if mask.contains(id) { + // SAFETY: The mask was just checked for `id`. This has the same + // requirements on the caller to not call with the same `id` until + // the previous value is no longer in use. + Some(unsafe { ::get(value, id) }) + } else { + None + } + } + + #[inline] + fn is_unconstrained() -> bool { + true + } +} diff --git a/src/join/mod.rs b/src/join/mod.rs index 41eede95c..2a3ed6f75 100644 --- a/src/join/mod.rs +++ b/src/join/mod.rs @@ -1,72 +1,24 @@ //! Joining of components for iteration over entities with specific components. -use hibitset::{BitIter, BitSetAll, BitSetAnd, BitSetLike}; +use hibitset::{BitIter, BitSetLike}; use shred::{Fetch, FetchMut, Read, ReadExpect, Resource, Write, WriteExpect}; use std::ops::{Deref, DerefMut}; -use tuple_utils::Split; -use crate::world::{Entities, Entity, Index}; +use crate::world::Index; +mod bit_and; +mod lend_join; +mod maybe; #[cfg(feature = "parallel")] mod par_join; +pub use bit_and::BitAnd; +#[nougat::gat(Type)] +pub use lend_join::LendJoin; +pub use lend_join::{JoinLendIter, LendJoinType, RepeatableLendGet}; +pub use maybe::MaybeJoin; #[cfg(feature = "parallel")] -pub use self::par_join::{JoinParIter, ParJoin}; - -/// `BitAnd` is a helper method to & bitsets together resulting in a tree. -pub trait BitAnd { - /// The combined bitsets. - type Value: BitSetLike; - /// Combines `Self` into a single `BitSetLike` through `BitSetAnd`. - fn and(self) -> Self::Value; -} - -/// This needs to be special cased -impl BitAnd for (A,) -where - A: BitSetLike, -{ - type Value = A; - - fn and(self) -> Self::Value { - self.0 - } -} - -macro_rules! bitset_and { - // use variables to indicate the arity of the tuple - ($($from:ident),*) => { - impl<$($from),*> BitAnd for ($($from),*) - where $($from: BitSetLike),* - { - type Value = BitSetAnd< - <::Left as BitAnd>::Value, - <::Right as BitAnd>::Value - >; - - fn and(self) -> Self::Value { - let (l, r) = self.split(); - BitSetAnd(l.and(), r.and()) - } - } - } -} - -bitset_and! {A, B} -bitset_and! {A, B, C} -bitset_and! {A, B, C, D} -bitset_and! {A, B, C, D, E} -bitset_and! {A, B, C, D, E, F} -bitset_and! {A, B, C, D, E, F, G} -bitset_and! {A, B, C, D, E, F, G, H} -bitset_and! {A, B, C, D, E, F, G, H, I} -bitset_and! {A, B, C, D, E, F, G, H, I, J} -bitset_and! {A, B, C, D, E, F, G, H, I, J, K} -bitset_and! {A, B, C, D, E, F, G, H, I, J, K, L} -bitset_and! {A, B, C, D, E, F, G, H, I, J, K, L, M} -bitset_and! {A, B, C, D, E, F, G, H, I, J, K, L, M, N} -bitset_and! {A, B, C, D, E, F, G, H, I, J, K, L, M, N, O} -bitset_and! {A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P} +pub use par_join::{JoinParIter, ParJoin}; /// The purpose of the `Join` trait is to provide a way /// to access multiple storages at the same time with @@ -137,7 +89,14 @@ bitset_and! {A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P} /// /// `Join` can also be used to iterate over a single /// storage, just by writing `(&storage).join()`. -pub trait Join { +/// +/// # Safety +/// +/// The `Self::Mask` value returned with the `Self::Value` must correspond such +/// that it is safe to retrieve items from `Self::Value` whose presence is +/// indicated in the mask. As part of this, `BitSetLike::iter` must not produce +/// an iterator that repeats an `Index` value. +pub unsafe trait Join { /// Type of joined components. type Type; /// Type of joined storages. @@ -153,149 +112,38 @@ pub trait Join { JoinIter::new(self) } - /// Returns a `Join`-able structure that yields all indices, returning - /// `None` for all missing elements and `Some(T)` for found elements. - /// - /// WARNING: Do not have a join of only `MaybeJoin`s. Otherwise the join - /// will iterate over every single index of the bitset. If you want a - /// join with all `MaybeJoin`s, add an `EntitiesRes` to the join as well - /// to bound the join to all entities that are alive. - /// - /// ``` - /// # use specs::prelude::*; - /// # #[derive(Debug, PartialEq)] - /// # struct Pos { x: i32, y: i32 } impl Component for Pos { type Storage = VecStorage; } - /// # #[derive(Debug, PartialEq)] - /// # struct Vel { x: i32, y: i32 } impl Component for Vel { type Storage = VecStorage; } - /// struct ExampleSystem; - /// impl<'a> System<'a> for ExampleSystem { - /// type SystemData = ( - /// WriteStorage<'a, Pos>, - /// ReadStorage<'a, Vel>, - /// ); - /// fn run(&mut self, (mut positions, velocities): Self::SystemData) { - /// for (mut position, maybe_velocity) in (&mut positions, velocities.maybe()).join() { - /// if let Some(velocity) = maybe_velocity { - /// position.x += velocity.x; - /// position.y += velocity.y; - /// } - /// } - /// } - /// } - /// - /// fn main() { - /// let mut world = World::new(); - /// let mut dispatcher = DispatcherBuilder::new() - /// .with(ExampleSystem, "example_system", &[]) - /// .build(); - /// - /// dispatcher.setup(&mut world); - /// - /// let e1 = world.create_entity() - /// .with(Pos { x: 0, y: 0 }) - /// .with(Vel { x: 5, y: 2 }) - /// .build(); - /// - /// let e2 = world.create_entity() - /// .with(Pos { x: 0, y: 0 }) - /// .build(); - /// - /// dispatcher.dispatch(&mut world); - /// - /// let positions = world.read_storage::(); - /// assert_eq!(positions.get(e1), Some(&Pos { x: 5, y: 2 })); - /// assert_eq!(positions.get(e2), Some(&Pos { x: 0, y: 0 })); - /// } - /// ``` - fn maybe(self) -> MaybeJoin - where - Self: Sized, - { - MaybeJoin(self) - } - /// Open this join by returning the mask and the storages. /// /// # Safety /// - /// This is unsafe because implementations of this trait can permit - /// the `Value` to be mutated independently of the `Mask`. - /// If the `Mask` does not correctly report the status of the `Value` - /// then illegal memory access can occur. + /// This is unsafe because implementations of this trait can permit the + /// `Value` to be mutated independently of the `Mask`. If the `Mask` does + /// not correctly report the status of the `Value` then illegal memory + /// access can occur. unsafe fn open(self) -> (Self::Mask, Self::Value); /// Get a joined component value by a given index. /// + /// /// # Safety /// /// * A call to `get` must be preceded by a check if `id` is part of - /// `Self::Mask` - /// * The implementation of this method may use unsafe code, but has no - /// invariants to meet + /// `Self::Mask`. + /// * Multiple calls with the same `id` are not allowed, for a particular + /// instance of the values from [`open`](Join::open). unsafe fn get(value: &mut Self::Value, id: Index) -> Self::Type; /// If this `Join` typically returns all indices in the mask, then iterating - /// over only it or combined with other joins that are also dangerous - /// will cause the `JoinIter`/`ParJoin` to go through all indices which - /// is usually not what is wanted and will kill performance. + /// over only it or combined with other joins that are also dangerous will + /// cause the `JoinIter` to go through all indices which is usually not what + /// is wanted and will kill performance. #[inline] fn is_unconstrained() -> bool { false } } -/// A `Join`-able structure that yields all indices, returning `None` for all -/// missing elements and `Some(T)` for found elements. -/// -/// For usage see [`Join::maybe()`]. -/// -/// WARNING: Do not have a join of only `MaybeJoin`s. Otherwise the join will -/// iterate over every single index of the bitset. If you want a join with -/// all `MaybeJoin`s, add an `EntitiesRes` to the join as well to bound the -/// join to all entities that are alive. -/// -/// [`Join::maybe()`]: ../join/trait.Join.html#method.maybe -pub struct MaybeJoin(pub J); - -impl Join for MaybeJoin -where - T: Join, -{ - type Mask = BitSetAll; - type Type = Option<::Type>; - type Value = (::Mask, ::Value); - - // SAFETY: This wraps another implementation of `open`, making it dependent on - // `J`'s correctness. We can safely assume `J` is valid, thus this must be - // valid, too. No invariants to meet. - unsafe fn open(self) -> (Self::Mask, Self::Value) { - let (mask, value) = self.0.open(); - (BitSetAll, (mask, value)) - } - - // SAFETY: No invariants to meet and the unsafe code checks the mask, thus - // fulfills the requirements for calling `get` - unsafe fn get((mask, value): &mut Self::Value, id: Index) -> Self::Type { - if mask.contains(id) { - Some(::get(value, id)) - } else { - None - } - } - - #[inline] - fn is_unconstrained() -> bool { - true - } -} - -// SAFETY: This is safe as long as `T` implements `ParJoin` safely. `MaybeJoin` -// relies on `T as Join` for all storage access and safely wraps the inner -// `Join` API, so it should also be able to implement `ParJoin`. -#[cfg(feature = "parallel")] -unsafe impl ParJoin for MaybeJoin where T: ParJoin {} - -/// `JoinIter` is an `Iterator` over a group of `Storages`. +/// `JoinIter` is an `Iterator` over a group of storages. #[must_use] pub struct JoinIter { keys: BitIter, @@ -307,12 +155,14 @@ impl JoinIter { pub fn new(j: J) -> Self { if ::is_unconstrained() { log::warn!( - "`Join` possibly iterating through all indices, you might've made a join with all `MaybeJoin`s, which is unbounded in length." + "`Join` possibly iterating through all indices, \ + you might've made a join with all `MaybeJoin`s, \ + which is unbounded in length." ); } - // SAFETY: We do not swap out the mask or the values, nor do we allow it by - // exposing them. + // SAFETY: We do not swap out the mask or the values, nor do we allow it + // by exposing them. let (keys, values) = unsafe { j.open() }; JoinIter { keys: keys.iter(), @@ -321,192 +171,112 @@ impl JoinIter { } } -impl JoinIter { - /// Allows getting joined values for specific entity. - /// - /// ## Example - /// - /// ``` - /// # use specs::prelude::*; - /// # #[derive(Debug, PartialEq)] - /// # struct Pos; impl Component for Pos { type Storage = VecStorage; } - /// # #[derive(Debug, PartialEq)] - /// # struct Vel; impl Component for Vel { type Storage = VecStorage; } - /// let mut world = World::new(); - /// - /// world.register::(); - /// world.register::(); - /// - /// // This entity could be stashed anywhere (into `Component`, `Resource`, `System`s data, etc.) as it's just a number. - /// let entity = world - /// .create_entity() - /// .with(Pos) - /// .with(Vel) - /// .build(); - /// - /// // Later - /// { - /// let mut pos = world.write_storage::(); - /// let vel = world.read_storage::(); - /// - /// assert_eq!( - /// Some((&mut Pos, &Vel)), - /// (&mut pos, &vel).join().get(entity, &world.entities()), - /// "The entity that was stashed still has the needed components and is alive." - /// ); - /// } - /// - /// // The entity has found nice spot and doesn't need to move anymore. - /// world.write_storage::().remove(entity); - /// - /// // Even later - /// { - /// let mut pos = world.write_storage::(); - /// let vel = world.read_storage::(); - /// - /// assert_eq!( - /// None, - /// (&mut pos, &vel).join().get(entity, &world.entities()), - /// "The entity doesn't have velocity anymore." - /// ); - /// } - /// ``` - pub fn get(&mut self, entity: Entity, entities: &Entities) -> Option { - if self.keys.contains(entity.id()) && entities.is_alive(entity) { - // SAFETY: the mask (`keys`) is checked as specified in the docs of `get`. - Some(unsafe { J::get(&mut self.values, entity.id()) }) - } else { - None - } - } - - /// Allows getting joined values for specific raw index. - /// - /// The raw index for an `Entity` can be retrieved using `Entity::id` - /// method. - /// - /// As this method operates on raw indices, there is no check to see if the - /// entity is still alive, so the caller should ensure it instead. - pub fn get_unchecked(&mut self, index: Index) -> Option { - if self.keys.contains(index) { - // SAFETY: the mask (`keys`) is checked as specified in the docs of `get`. - Some(unsafe { J::get(&mut self.values, index) }) - } else { - None - } - } -} - impl std::iter::Iterator for JoinIter { type Item = J::Type; fn next(&mut self) -> Option { - // SAFETY: since `idx` is yielded from `keys` (the mask), it is necessarily a - // part of it. Thus, requirements are fulfilled for calling `get`. + // SAFETY: Since `idx` is yielded from `keys` (the mask), it is + // necessarily a part of it. `Join` requires that the iterator doesn't + // repeat indices and we advance the iterator for each `get` call. self.keys .next() .map(|idx| unsafe { J::get(&mut self.values, idx) }) } } -/// Clones the `JoinIter`. -/// -/// # Examples -/// -/// ``` -/// # use specs::prelude::*; -/// # #[derive(Debug)] -/// # struct Position; impl Component for Position { type Storage = VecStorage; } -/// # #[derive(Debug)] -/// # struct Collider; impl Component for Collider { type Storage = VecStorage; } -/// let mut world = World::new(); -/// -/// world.register::(); -/// world.register::(); -/// -/// // add some entities to our world -/// for _ in 0..10 { -/// let entity = world.create_entity().with(Position).with(Collider).build(); -/// } -/// -/// // check for collisions between entities -/// let positions = world.read_storage::(); -/// let colliders = world.read_storage::(); -/// -/// let mut join_iter = (&positions, &colliders).join(); -/// while let Some(a) = join_iter.next() { -/// for b in join_iter.clone() { -/// # let check_collision = |a, b| true; -/// if check_collision(a, b) { -/// // do stuff -/// } -/// } -/// } -/// ``` -/// -/// It is *not* possible to clone a `JoinIter` which allows for -/// mutation of its content, as this would lead to shared mutable -/// access. -/// -/// ```compile_fail -/// # use specs::prelude::*; -/// # #[derive(Debug)] -/// # struct Position; impl Component for Position { type Storage = VecStorage; } -/// # let mut world = World::new(); -/// # world.register::(); -/// # let entity = world.create_entity().with(Position).build(); -/// // .. previous example -/// -/// let mut positions = world.write_storage::(); -/// -/// let mut join_iter = (&mut positions).join(); -/// // this must not compile, as the following line would cause -/// // undefined behavior! -/// let mut cloned_iter = join_iter.clone(); -/// let (mut alias_one, mut alias_two) = (join_iter.next(), cloned_iter.next()); -/// ``` -impl Clone for JoinIter -where - J::Mask: Clone, - J::Value: Clone, -{ - fn clone(&self) -> Self { - Self { - keys: self.keys.clone(), - values: self.values.clone(), - } - } -} +// Implementations of `LendJoin`, `Join`, and `ParJoin` for tuples, `Fetch`, +// `Read`, `ReadExpect`, `FetchMut`, `Write`, and `WriteExpect`. macro_rules! define_open { // use variables to indicate the arity of the tuple ($($from:ident),*) => { - impl<$($from,)*> Join for ($($from),*,) + // SAFETY: The returned mask in `open` is the intersection of the masks + // from each type in this tuple. So if an `id` is present in the + // combined mask, it will be safe to retrieve the corresponding items. + // Iterating the mask does not repeat indices. + #[nougat::gat] + unsafe impl<$($from,)*> LendJoin for ($($from),*,) + where $($from: LendJoin),*, + ($(<$from as LendJoin>::Mask,)*): BitAnd, + { + type Type<'next> = ($(<$from as LendJoin>::Type<'next>),*,); + type Value = ($($from::Value),*,); + type Mask = <($($from::Mask,)*) as BitAnd>::Value; + + #[allow(non_snake_case)] + unsafe fn open(self) -> (Self::Mask, Self::Value) { + let ($($from,)*) = self; + // SAFETY: While we do expose the mask and the values and + // therefore would allow swapping them, this method is `unsafe` + // and relies on the same invariants. + let ($($from,)*) = unsafe { ($($from.open(),)*) }; + ( + ($($from.0),*,).and(), + ($($from.1),*,) + ) + } + + #[allow(non_snake_case)] + unsafe fn get<'next>(v: &'next mut Self::Value, i: Index) -> Self::Type<'next> + + { + let &mut ($(ref mut $from,)*) = v; + // SAFETY: `get` is safe to call as the caller must have checked + // the mask, which only has a key that exists in all of the + // storages. Requirement to not call with the same ID more than + // once (unless `RepeatableLendGet` is implemented) is passed to + // the caller. + unsafe { ($($from::get($from, i),)*) } + } + + #[inline] + fn is_unconstrained() -> bool { + let mut unconstrained = true; + $( unconstrained = unconstrained && $from::is_unconstrained(); )* + unconstrained + } + } + + // SAFETY: Tuple impls of `LendJoin` simply defer to the individual + // storages. Thus, if all of them implement this, it is safe to call + // `LendJoin::get` multiple times with the same ID. + unsafe impl<$($from,)*> RepeatableLendGet for ($($from),*,) + where $($from: RepeatableLendGet),*, + ($(<$from as LendJoin>::Mask,)*): BitAnd, {} + + // SAFETY: The returned mask in `open` is the intersection of the masks + // from each type in this tuple. So if an `id` is present in the + // combined mask, it will be safe to retrieve the corresponding items. + // Iterating the mask does not repeat indices. + unsafe impl<$($from,)*> Join for ($($from),*,) where $($from: Join),*, ($(<$from as Join>::Mask,)*): BitAnd, { type Type = ($($from::Type),*,); type Value = ($($from::Value),*,); type Mask = <($($from::Mask,)*) as BitAnd>::Value; - #[allow(non_snake_case)] - // SAFETY: While we do expose the mask and the values and therefore would allow swapping them, - // this method is `unsafe` and relies on the same invariants. + #[allow(non_snake_case)] unsafe fn open(self) -> (Self::Mask, Self::Value) { let ($($from,)*) = self; - let ($($from,)*) = ($($from.open(),)*); + // SAFETY: While we do expose the mask and the values and + // therefore would allow swapping them, this method is `unsafe` + // and relies on the same invariants. + let ($($from,)*) = unsafe { ($($from.open(),)*) }; ( ($($from.0),*,).and(), ($($from.1),*,) ) } - // SAFETY: No invariants to meet and `get` is safe to call as the caller must have checked the mask, - // which only has a key that exists in all of the storages. #[allow(non_snake_case)] unsafe fn get(v: &mut Self::Value, i: Index) -> Self::Type { let &mut ($(ref mut $from,)*) = v; - ($($from::get($from, i),)*) + // SAFETY: `get` is safe to call as the caller must have checked + // the mask, which only has a key that exists in all of the + // storages. Requirement to not use the same ID multiple times + // is also passed to the caller. + unsafe { ($($from::get($from, i),)*) } } #[inline] @@ -517,15 +287,54 @@ macro_rules! define_open { } } - // SAFETY: This is safe to implement since all components implement `ParJoin`. - // If the access of every individual `get` leads to disjoint memory access, calling - // all of them after another does in no case lead to access of common memory. + // SAFETY: This is safe to implement since all components implement + // `ParJoin`. If the access of every individual `get` is callable from + // multiple threads, then this `get` method will be as well. + // + // The returned mask in `open` is the intersection of the masks + // from each type in this tuple. So if an `id` is present in the + // combined mask, it will be safe to retrieve the corresponding items. + // Iterating the mask does not repeat indices. #[cfg(feature = "parallel")] unsafe impl<$($from,)*> ParJoin for ($($from),*,) where $($from: ParJoin),*, - ($(<$from as Join>::Mask,)*): BitAnd, - {} + ($(<$from as ParJoin>::Mask,)*): BitAnd, + { + type Type = ($($from::Type),*,); + type Value = ($($from::Value),*,); + type Mask = <($($from::Mask,)*) as BitAnd>::Value; + #[allow(non_snake_case)] + unsafe fn open(self) -> (Self::Mask, Self::Value) { + let ($($from,)*) = self; + // SAFETY: While we do expose the mask and the values and + // therefore would allow swapping them, this method is `unsafe` + // and relies on the same invariants. + let ($($from,)*) = unsafe { ($($from.open(),)*) }; + ( + ($($from.0),*,).and(), + ($($from.1),*,) + ) + } + + #[allow(non_snake_case)] + unsafe fn get(v: &Self::Value, i: Index) -> Self::Type { + let &($(ref $from,)*) = v; + // SAFETY: `get` is safe to call as the caller must have checked + // the mask, which only has a key that exists in all of the + // storages. Requirement for the return value to no longer be + // alive before subsequent calls with the same `id` is passed to + // the caller. + unsafe { ($($from::get($from, i),)*) } + } + + #[inline] + fn is_unconstrained() -> bool { + let mut unconstrained = true; + $( unconstrained = unconstrained && $from::is_unconstrained(); )* + unconstrained + } + } } } @@ -559,7 +368,54 @@ define_open!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R); macro_rules! immutable_resource_join { ($($ty:ty),*) => { $( - impl<'a, 'b, T> Join for &'a $ty + // SAFETY: Since `T` implements `LendJoin` it is safe to deref and defer + // to its implementation. + #[nougat::gat] + unsafe impl<'a, 'b, T> LendJoin for &'a $ty + where + &'a T: LendJoin, + T: Resource, + { + type Type<'next> = <&'a T as LendJoin>::Type<'next>; + type Value = <&'a T as LendJoin>::Value; + type Mask = <&'a T as LendJoin>::Mask; + + unsafe fn open(self) -> (Self::Mask, Self::Value) { + // SAFETY: This only wraps `T` and, while exposing the mask and + // the values, requires the same invariants as the original + // implementation and is thus safe. + unsafe { self.deref().open() } + } + + unsafe fn get<'next>(v: &'next mut Self::Value, i: Index) -> Self::Type<'next> + + { + // SAFETY: The mask of `Self` and `T` are identical, thus a + // check to `Self`'s mask (which is required) is equal to a + // check of `T`'s mask, which makes `get` safe to call. + // Requirement to not call with the same ID more than once + // (unless `RepeatableLendGet` is implemented) is passed to the + // caller. + unsafe { <&'a T as LendJoin>::get(v, i) } + } + + #[inline] + fn is_unconstrained() -> bool { + <&'a T as LendJoin>::is_unconstrained() + } + } + + // SAFETY: <&'a $ty as LendJoin>::get does not rely on only being called + // once with a particular ID as long as `&'a T` does not rely on this. + unsafe impl<'a, 'b, T> RepeatableLendGet for &'a $ty + where + &'a T: RepeatableLendGet, + T: Resource, + {} + + // SAFETY: Since `T` implements `Join` it is safe to deref and defer to + // its implementation. + unsafe impl<'a, 'b, T> Join for &'a $ty where &'a T: Join, T: Resource, @@ -568,16 +424,20 @@ macro_rules! immutable_resource_join { type Value = <&'a T as Join>::Value; type Mask = <&'a T as Join>::Mask; - // SAFETY: This only wraps `T` and, while exposing the mask and the values, - // requires the same invariants as the original implementation and is thus safe. unsafe fn open(self) -> (Self::Mask, Self::Value) { - self.deref().open() + // SAFETY: This only wraps `T` and, while exposing the mask and + // the values, requires the same invariants as the original + // implementation and is thus safe. + unsafe { self.deref().open() } } - // SAFETY: The mask of `Self` and `T` are identical, thus a check to `Self`'s mask (which is required) - // is equal to a check of `T`'s mask, which makes `get` safe to call. unsafe fn get(v: &mut Self::Value, i: Index) -> Self::Type { - <&'a T as Join>::get(v, i) + // SAFETY: The mask of `Self` and `T` are identical, thus a + // check to `Self`'s mask (which is required) is equal to a + // check of `T`'s mask, which makes `get` safe to call. + // Requirement to not use the same ID multiple times is passed + // to the caller. + unsafe { <&'a T as Join>::get(v, i) } } #[inline] @@ -586,14 +446,40 @@ macro_rules! immutable_resource_join { } } - // SAFETY: This is just a wrapper of `T`'s implementation for `ParJoin` and can - // in no case lead to other memory access patterns. + // SAFETY: Since `T` implements `ParJoin` it is safe to deref and defer to + // its implementation. S-TODO we can rely on errors if $ty is not sync? + // Iterating the mask does not repeat indices. #[cfg(feature = "parallel")] unsafe impl<'a, 'b, T> ParJoin for &'a $ty where &'a T: ParJoin, - T: Resource - {} + T: Resource, + { + type Type = <&'a T as ParJoin>::Type; + type Value = <&'a T as ParJoin>::Value; + type Mask = <&'a T as ParJoin>::Mask; + + unsafe fn open(self) -> (Self::Mask, Self::Value) { + // SAFETY: This only wraps `T` and, while exposing the mask and + // the values, requires the same invariants as the original + // implementation and is thus safe. + unsafe { self.deref().open() } + } + + unsafe fn get(v: &Self::Value, i: Index) -> Self::Type { + // SAFETY: The mask of `Self` and `T` are identical, thus a + // check to `Self`'s mask (which is required) is equal to a + // check of `T`'s mask, which makes `get` safe to call. + // Requirement for the return value to no longer be alive before + // subsequent calls with the same ID is passed to the caller. + unsafe { <&'a T as ParJoin>::get(v, i) } + } + + #[inline] + fn is_unconstrained() -> bool { + <&'a T as ParJoin>::is_unconstrained() + } + } )* }; } @@ -601,7 +487,55 @@ macro_rules! immutable_resource_join { macro_rules! mutable_resource_join { ($($ty:ty),*) => { $( - impl<'a, 'b, T> Join for &'a mut $ty + // SAFETY: Since `T` implements `LendJoin` it is safe to deref and defer + // to its implementation. + #[nougat::gat] + unsafe impl<'a, 'b, T> LendJoin for &'a mut $ty + where + &'a mut T: LendJoin, + T: Resource, + { + type Type<'next> = <&'a mut T as LendJoin>::Type<'next>; + type Value = <&'a mut T as LendJoin>::Value; + type Mask = <&'a mut T as LendJoin>::Mask; + + unsafe fn open(self) -> (Self::Mask, Self::Value) { + // SAFETY: This only wraps `T` and, while exposing the mask and + // the values, requires the same invariants as the original + // implementation and is thus safe. + unsafe { self.deref_mut().open() } + } + + unsafe fn get<'next>(v: &'next mut Self::Value, i: Index) -> Self::Type<'next> + + { + // SAFETY: The mask of `Self` and `T` are identical, thus a + // check to `Self`'s mask (which is required) is equal to a + // check of `T`'s mask, which makes `get_mut` safe to call. + // Requirement to not call with the same ID more than once + // (unless `RepeatableLendGet` is implemented) is passed to the + // caller. + unsafe { <&'a mut T as LendJoin>::get(v, i) } + } + + #[inline] + fn is_unconstrained() -> bool { + <&'a mut T as LendJoin>::is_unconstrained() + } + } + + // SAFETY: <&'a mut $ty as LendJoin>::get does not rely on only being + // called once with a particular ID as long as `&'a mut T` does not rely + // on this. + unsafe impl<'a, 'b, T> RepeatableLendGet for &'a mut $ty + where + &'a mut T: RepeatableLendGet, + T: Resource, + {} + + // SAFETY: Since `T` implements `Join` it is safe to deref and defer to + // its implementation. + unsafe impl<'a, 'b, T> Join for &'a mut $ty where &'a mut T: Join, T: Resource, @@ -610,16 +544,20 @@ macro_rules! mutable_resource_join { type Value = <&'a mut T as Join>::Value; type Mask = <&'a mut T as Join>::Mask; - // SAFETY: This only wraps `T` and, while exposing the mask and the values, - // requires the same invariants as the original implementation and is thus safe. unsafe fn open(self) -> (Self::Mask, Self::Value) { - self.deref_mut().open() + // SAFETY: This only wraps `T` and, while exposing the mask and + // the values, requires the same invariants as the original + // implementation and is thus safe. + unsafe { self.deref_mut().open() } } - // SAFETY: The mask of `Self` and `T` are identical, thus a check to `Self`'s mask (which is required) - // is equal to a check of `T`'s mask, which makes `get_mut` safe to call. unsafe fn get(v: &mut Self::Value, i: Index) -> Self::Type { - <&'a mut T as Join>::get(v, i) + // SAFETY: The mask of `Self` and `T` are identical, thus a + // check to `Self`'s mask (which is required) is equal to a + // check of `T`'s mask, which makes `get_mut` safe to call. + // Requirement to not use the same ID multiple times is passed + // to the caller. + unsafe { <&'a mut T as Join>::get(v, i) } } #[inline] @@ -628,14 +566,39 @@ macro_rules! mutable_resource_join { } } - // SAFETY: This is just a wrapper of `T`'s implementation for `ParJoin` and can - // in no case lead to other memory access patterns. + // SAFETY: Since `T` implements `ParJoin` it is safe to deref and defer + // its implementation. S-TODO we can rely on errors if $ty is not sync? #[cfg(feature = "parallel")] unsafe impl<'a, 'b, T> ParJoin for &'a mut $ty where &'a mut T: ParJoin, - T: Resource - {} + T: Resource, + { + type Type = <&'a mut T as ParJoin>::Type; + type Value = <&'a mut T as ParJoin>::Value; + type Mask = <&'a mut T as ParJoin>::Mask; + + unsafe fn open(self) -> (Self::Mask, Self::Value) { + // SAFETY: This only wraps `T` and, while exposing the mask and + // the values, requires the same invariants as the original + // implementation and is thus safe. + unsafe { self.deref_mut().open() } + } + + unsafe fn get(v: &Self::Value, i: Index) -> Self::Type { + // SAFETY: The mask of `Self` and `T` are identical, thus a + // check to `Self`'s mask (which is required) is equal to a + // check of `T`'s mask, which makes `get_mut` safe to call. + // Requirement for the return value to no longer be alive before + // subsequent calls with the same ID is passed to the caller. + unsafe { <&'a mut T as ParJoin>::get(v, i) } + } + + #[inline] + fn is_unconstrained() -> bool { + <&'a mut T as ParJoin>::is_unconstrained() + } + } )* }; } diff --git a/src/join/par_join.rs b/src/join/par_join.rs index 40159d833..9a1a7f9c3 100644 --- a/src/join/par_join.rs +++ b/src/join/par_join.rs @@ -1,51 +1,87 @@ -use std::cell::UnsafeCell; - use hibitset::{BitProducer, BitSetLike}; - -use crate::join::Join; use rayon::iter::{ plumbing::{bridge_unindexed, Folder, UnindexedConsumer, UnindexedProducer}, ParallelIterator, }; +use crate::world::Index; + /// The purpose of the `ParJoin` trait is to provide a way /// to access multiple storages in parallel at the same time with /// the merged bit set. /// /// # Safety /// -/// The implementation of `ParallelIterator` for `ParJoin` makes multiple -/// assumptions on the structure of `Self`. In particular, `::get` -/// must be callable from multiple threads, simultaneously, without mutating -/// values not exclusively associated with `id`. -// NOTE: This is currently unspecified behavior. It seems very unlikely that it -// breaks in the future, but technically it's not specified as valid Rust code. -pub unsafe trait ParJoin: Join { +/// `ParJoin::get` must be callable from multiple threads, simultaneously. +/// +/// The `Self::Mask` value returned with the `Self::Value` must correspond such +/// that it is safe to retrieve items from `Self::Value` whose presence is +/// indicated in the mask. As part of this, `BitSetLike::iter` must not produce +/// an iterator that repeats an `Index` value. +pub unsafe trait ParJoin { + /// Type of joined components. + type Type; + /// Type of joined storages. + type Value; + /// Type of joined bit mask. + type Mask: BitSetLike; + /// Create a joined parallel iterator over the contents. fn par_join(self) -> JoinParIter where Self: Sized, { - if ::is_unconstrained() { + if Self::is_unconstrained() { log::warn!( - "`ParJoin` possibly iterating through all indices, you might've made a join with all `MaybeJoin`s, which is unbounded in length." + "`ParJoin` possibly iterating through all indices, \ + you might've made a join with all `MaybeJoin`s, \ + which is unbounded in length." ); } JoinParIter(self) } + + /// Open this join by returning the mask and the storages. + /// + /// # Safety + /// + /// This is unsafe because implementations of this trait can permit the + /// `Value` to be mutated independently of the `Mask`. If the `Mask` does + /// not correctly report the status of the `Value` then illegal memory + /// access can occur. + unsafe fn open(self) -> (Self::Mask, Self::Value); + + /// Get a joined component value by a given index. + /// + /// # Safety + /// + /// * A call to `get` must be preceded by a check if `id` is part of + /// `Self::Mask`. + /// * The value returned from this method must no longer be alive before + /// subsequent calls with the same `id`. + unsafe fn get(value: &Self::Value, id: Index) -> Self::Type; + + /// If this `LendJoin` typically returns all indices in the mask, then + /// iterating over only it or combined with other joins that are also + /// dangerous will cause the `JoinLendIter` to go through all indices which + /// is usually not what is wanted and will kill performance. + #[inline] + fn is_unconstrained() -> bool { + false + } } -/// `JoinParIter` is a `ParallelIterator` over a group of `Storages`. +/// `JoinParIter` is a `ParallelIterator` over a group of storages. #[must_use] pub struct JoinParIter(J); impl ParallelIterator for JoinParIter where - J: Join + Send, + J: ParJoin + Send, J::Mask: Send + Sync, J::Type: Send, - J::Value: Send, + J::Value: Send + Sync, { type Item = J::Type; @@ -53,12 +89,11 @@ where where C: UnindexedConsumer, { + // SAFETY: `keys` and `values` are not exposed outside this module and + // we only use `values` for calling `ParJoin::get`. let (keys, values) = unsafe { self.0.open() }; // Create a bit producer which splits on up to three levels let producer = BitProducer((&keys).iter(), 3); - // HACK: use `UnsafeCell` to share `values` between threads; - // this is the unspecified behavior referred to above. - let values = UnsafeCell::new(values); bridge_unindexed(JoinProducer::::new(producer, &values), consumer) } @@ -66,49 +101,32 @@ where struct JoinProducer<'a, J> where - J: Join + Send, + J: ParJoin + Send, J::Mask: Send + Sync + 'a, J::Type: Send, - J::Value: Send + 'a, + J::Value: Send + Sync + 'a, { keys: BitProducer<'a, J::Mask>, - values: &'a UnsafeCell, + values: &'a J::Value, } impl<'a, J> JoinProducer<'a, J> where - J: Join + Send, + J: ParJoin + Send, J::Type: Send, - J::Value: 'a + Send, + J::Value: 'a + Send + Sync, J::Mask: 'a + Send + Sync, { - fn new(keys: BitProducer<'a, J::Mask>, values: &'a UnsafeCell) -> Self { + fn new(keys: BitProducer<'a, J::Mask>, values: &'a J::Value) -> Self { JoinProducer { keys, values } } } -// SAFETY: `Send` is safe to implement if all components of `Self` are logically -// `Send`. `keys` already has `Send` implemented, thus no reasoning is required. -// `values` is a reference to an `UnsafeCell` wrapping `J::Value`; -// `J::Value` is constrained to implement `Send`. -// `UnsafeCell` provides interior mutability, but the specification of it allows -// sharing as long as access does not happen simultaneously; this makes it -// generally safe to `Send`, but we are accessing it simultaneously, which is -// technically not allowed. Also see https://github.com/slide-rs/specs/issues/220 -unsafe impl<'a, J> Send for JoinProducer<'a, J> -where - J: Join + Send, - J::Type: Send, - J::Value: 'a + Send, - J::Mask: 'a + Send + Sync, -{ -} - impl<'a, J> UnindexedProducer for JoinProducer<'a, J> where - J: Join + Send, + J: ParJoin + Send, J::Type: Send, - J::Value: 'a + Send, + J::Value: 'a + Send + Sync, J::Mask: 'a + Send + Sync, { type Item = J::Type; @@ -127,14 +145,11 @@ where F: Folder, { let JoinProducer { values, keys, .. } = self; - let iter = keys.0.map(|idx| unsafe { - // This unsafe block should be safe if the `J::get` - // can be safely called from different threads with distinct indices. - - // The indices here are guaranteed to be distinct because of the fact - // that the bit set is split. - J::get(&mut *values.get(), idx) - }); + // SAFETY: `idx` is obtained from the `Mask` returned by + // `ParJoin::open`. The indices here are guaranteed to be distinct + // because of the fact that the bit set is split and because `ParJoin` + // requires that the bit set iterator doesn't repeat indices. + let iter = keys.0.map(|idx| unsafe { J::get(values, idx) }); folder.consume_iter(iter) } diff --git a/src/lib.rs b/src/lib.rs index 65a876921..819e3af4a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,9 +1,6 @@ #![warn(missing_docs)] +#![deny(unsafe_op_in_unsafe_fn)] #![deny(clippy::disallowed_types)] -#![cfg_attr( - feature = "nightly", - feature(generic_associated_types, associated_type_defaults) -)] //! # SPECS Parallel ECS //! @@ -227,7 +224,7 @@ pub use specs_derive::{Component, ConvertSaveload}; pub use crate::join::ParJoin; pub use crate::{ changeset::ChangeSet, - join::Join, + join::{Join, LendJoin}, storage::{ DefaultVecStorage, DenseVecStorage, FlaggedStorage, HashMapStorage, NullStorage, ReadStorage, Storage, Tracked, VecStorage, WriteStorage, @@ -235,5 +232,4 @@ pub use crate::{ world::{Builder, Component, Entities, Entity, EntityBuilder, LazyUpdate, WorldExt}, }; -#[cfg(feature = "nightly")] pub use crate::storage::DerefFlaggedStorage; diff --git a/src/prelude.rs b/src/prelude.rs index 8865fec86..d13d6863a 100644 --- a/src/prelude.rs +++ b/src/prelude.rs @@ -3,6 +3,8 @@ //! Contains all of the most common traits, structures, pub use crate::join::Join; +#[nougat::gat(Type)] +pub use crate::join::LendJoin; #[cfg(feature = "parallel")] pub use crate::join::ParJoin; pub use hibitset::BitSet; diff --git a/src/saveload/marker.rs b/src/saveload/marker.rs index 5aebdd85c..203e7324c 100644 --- a/src/saveload/marker.rs +++ b/src/saveload/marker.rs @@ -11,6 +11,7 @@ use serde::{de::DeserializeOwned, Deserialize, Serialize}; use crate::{ prelude::*, + storage::AccessMut, world::{EntitiesRes, EntityResBuilder, LazyBuilder}, }; @@ -324,7 +325,7 @@ pub trait MarkerAllocator: Resource { ) -> Entity { if let Some(entity) = self.retrieve_entity_internal(marker.id()) { if let Some(mut marker_comp) = storage.get_mut(entity) { - marker_comp.update(marker); + marker_comp.access_mut().update(marker); return entity; } diff --git a/src/storage/data.rs b/src/storage/data.rs index 08655ca07..3461e0109 100644 --- a/src/storage/data.rs +++ b/src/storage/data.rs @@ -124,7 +124,7 @@ where res.entry::>() .or_insert_with(|| MaskedStorage::new(::unwrap_default())); res.fetch_mut::>() - .register(&*res.fetch::>()); + .register::>(); } fn fetch(res: &'a World) -> Self { @@ -211,7 +211,7 @@ where res.entry::>() .or_insert_with(|| MaskedStorage::new(::unwrap_default())); res.fetch_mut::>() - .register(&*res.fetch::>()); + .register::>(); } fn fetch(res: &'a World) -> Self { diff --git a/src/storage/deref_flagged.rs b/src/storage/deref_flagged.rs index f4317fb71..f88bcac62 100644 --- a/src/storage/deref_flagged.rs +++ b/src/storage/deref_flagged.rs @@ -6,7 +6,9 @@ use std::{ use hibitset::BitSetLike; use crate::{ - storage::{ComponentEvent, DenseVecStorage, Tracked, TryDefault, UnprotectedStorage}, + storage::{ + AccessMut, ComponentEvent, DenseVecStorage, Tracked, TryDefault, UnprotectedStorage, + }, world::{Component, Index}, }; @@ -56,20 +58,20 @@ where } impl> UnprotectedStorage for DerefFlaggedStorage { - type AccessMut<'a> - where - T: 'a, - = FlaggedAccessMut<'a, >::AccessMut<'a>, C>; + type AccessMut<'a> = FlaggedAccessMut<'a, >::AccessMut<'a>, C> + where T: 'a; unsafe fn clean(&mut self, has: B) where B: BitSetLike, { - self.storage.clean(has); + // SAFETY: Requirements passed to caller. + unsafe { self.storage.clean(has) }; } unsafe fn get(&self, id: Index) -> &C { - self.storage.get(id) + // SAFETY: Requirements passed to caller. + unsafe { self.storage.get(id) } } unsafe fn get_mut(&mut self, id: Index) -> Self::AccessMut<'_> { @@ -78,7 +80,8 @@ impl> UnprotectedStorage for DerefFlag channel: &mut self.channel, emit, id, - access: self.storage.get_mut(id), + // SAFETY: Requirements passed to caller. + access: unsafe { self.storage.get_mut(id) }, phantom: PhantomData, } } @@ -87,14 +90,16 @@ impl> UnprotectedStorage for DerefFlag if self.emit_event() { self.channel.single_write(ComponentEvent::Inserted(id)); } - self.storage.insert(id, comp); + // SAFETY: Requirements passed to caller. + unsafe { self.storage.insert(id, comp) }; } unsafe fn remove(&mut self, id: Index) -> C { if self.emit_event() { self.channel.single_write(ComponentEvent::Removed(id)); } - self.storage.remove(id) + // SAFETY: Requirements passed to caller. + unsafe { self.storage.remove(id) } } } @@ -118,6 +123,8 @@ impl Tracked for DerefFlaggedStorage { } } +/// Wrapper type only emits modificaition events when the component is accessed +/// via mutably dereferencing. Also see [`DerefFlaggedStorage`] documentation. pub struct FlaggedAccessMut<'a, A, C> { channel: &'a mut EventChannel, emit: bool, @@ -139,12 +146,12 @@ where impl<'a, A, C> DerefMut for FlaggedAccessMut<'a, A, C> where - A: DerefMut, + A: AccessMut, { fn deref_mut(&mut self) -> &mut Self::Target { if self.emit { self.channel.single_write(ComponentEvent::Modified(self.id)); } - self.access.deref_mut() + self.access.access_mut() } } diff --git a/src/storage/drain.rs b/src/storage/drain.rs index e86b52a5d..9cb1dbaf4 100644 --- a/src/storage/drain.rs +++ b/src/storage/drain.rs @@ -1,7 +1,9 @@ use hibitset::BitSet; +#[nougat::gat(Type)] +use crate::join::LendJoin; use crate::{ - join::Join, + join::{Join, RepeatableLendGet}, storage::MaskedStorage, world::{Component, Index}, }; @@ -13,7 +15,38 @@ pub struct Drain<'a, T: Component> { pub data: &'a mut MaskedStorage, } -impl<'a, T> Join for Drain<'a, T> +// SAFETY: Calling `get` is always safe! Iterating the mask does not repeat +// indices. +#[nougat::gat] +unsafe impl<'a, T> LendJoin for Drain<'a, T> +where + T: Component, +{ + type Mask = BitSet; + type Type<'next> = T; + type Value = &'a mut MaskedStorage; + + unsafe fn open(self) -> (Self::Mask, Self::Value) { + // TODO: Cloning the whole bitset here seems expensive, and it is + // hidden from the user, but there is no obvious way to restructure + // things to avoid this with the way that bitsets are composed together + // for iteration. + let mask = self.data.mask.clone(); + + (mask, self.data) + } + + unsafe fn get<'next>(value: &'next mut Self::Value, id: Index) -> T { + value.remove(id).expect("Tried to access same index twice") + } +} + +// SAFETY: Calling `get` is always safe! +unsafe impl<'a, T> RepeatableLendGet for Drain<'a, T> where T: Component {} + +// SAFETY: Calling `get` is always safe! Iterating the mask does not repeat +// indices. +unsafe impl<'a, T> Join for Drain<'a, T> where T: Component, { @@ -21,14 +54,16 @@ where type Type = T; type Value = &'a mut MaskedStorage; - // SAFETY: No invariants to meet and no unsafe code. unsafe fn open(self) -> (Self::Mask, Self::Value) { + // TODO: Cloning the whole bitset here seems expensive, and it is + // hidden from the user, but there is no obvious way to restructure + // things to avoid this with the way that bitsets are composed together + // for iteration. let mask = self.data.mask.clone(); (mask, self.data) } - // SAFETY: No invariants to meet and no unsafe code. unsafe fn get(value: &mut Self::Value, id: Index) -> T { value.remove(id).expect("Tried to access same index twice") } diff --git a/src/storage/entry.rs b/src/storage/entry.rs index 55ee1c745..063619546 100644 --- a/src/storage/entry.rs +++ b/src/storage/entry.rs @@ -1,7 +1,8 @@ use hibitset::BitSetAll; use super::*; -use crate::join::Join; +use crate::join::LendJoin; +use crate::world::Generation; impl<'e, T, D> Storage<'e, T, D> where @@ -38,14 +39,7 @@ where 'e: 'a, { if self.entities.is_alive(e) { - let entries = self.entries(); - unsafe { - // SAFETY: This is safe since we're not swapping out the mask or the values. - let (_, mut value): (BitSetAll, _) = entries.open(); - // SAFETY: We did check the mask, because the mask is `BitSetAll` and every - // index is part of it. - Ok(Entries::get(&mut value, e.id())) - } + Ok(self.entry_inner(e.id())) } else { let gen = self .entities @@ -60,8 +54,8 @@ where } } - /// Returns a `Join`-able structure that yields all indices, returning - /// `Entry` for all elements + /// Returns a [`LendJoin`]-able structure that yields all indices, returning + /// [`StorageEntry`] for all elements /// /// WARNING: Do not have a join of only `Entries`s. Otherwise the join will /// iterate over every single index of the bitset. If you want a join with @@ -107,7 +101,8 @@ where /// # } /// # /// # world.exec(|(mut counters, marker): (WriteStorage, ReadStorage)| { - /// for (mut counter, _) in (counters.entries(), &marker).join() { + /// let mut join = (counters.entries(), &marker).lend_join(); + /// while let Some((mut counter, _)) = join.next() { /// let counter = counter.or_insert_with(Default::default); /// counter.increase(); /// @@ -121,43 +116,43 @@ where pub fn entries<'a>(&'a mut self) -> Entries<'a, 'e, T, D> { Entries(self) } + + /// Returns an entry to the component associated with the provided index. + /// + /// Does not check whether an entity is alive! + pub fn entry_inner<'a>(&'a mut self, id: Index) -> StorageEntry<'a, 'e, T, D> { + if self.data.mask.contains(id) { + StorageEntry::Occupied(OccupiedEntry { id, storage: self }) + } else { + StorageEntry::Vacant(VacantEntry { id, storage: self }) + } + } } -/// `Join`-able structure that yields all indices, returning `Entry` for all -/// elements +/// [`LendJoin`]-able structure that yields all indices, +/// returning [`StorageEntry`] for all elements. +/// +/// This can be constructed via [`Storage::entries`]. pub struct Entries<'a, 'b: 'a, T: 'a, D: 'a>(&'a mut Storage<'b, T, D>); -impl<'a, 'b: 'a, T: 'a, D: 'a> Join for Entries<'a, 'b, T, D> +// SAFETY: We return a mask containing all items, but check the original mask in +// the `get` implementation. Iterating the mask does not repeat indices. +#[nougat::gat] +unsafe impl<'a, 'b: 'a, T: 'a, D: 'a> LendJoin for Entries<'a, 'b, T, D> where T: Component, - D: Deref>, + D: DerefMut>, { type Mask = BitSetAll; - type Type = StorageEntry<'a, 'b, T, D>; + type Type<'next> = StorageEntry<'next, 'b, T, D>; type Value = &'a mut Storage<'b, T, D>; - // SAFETY: No invariants to meet and no unsafe code. unsafe fn open(self) -> (Self::Mask, Self::Value) { (BitSetAll, self.0) } - // SAFETY: We are lengthening the lifetime of `value` to `'a`; - // TODO: how to prove this is safe? - unsafe fn get(value: &mut Self::Value, id: Index) -> Self::Type { - // This is HACK. See implementation of Join for &'a mut Storage<'e, T, D> for - // details why it is necessary. - let storage: *mut Storage<'b, T, D> = *value as *mut Storage<'b, T, D>; - if (*storage).data.mask.contains(id) { - StorageEntry::Occupied(OccupiedEntry { - id, - storage: &mut *storage, - }) - } else { - StorageEntry::Vacant(VacantEntry { - id, - storage: &mut *storage, - }) - } + unsafe fn get<'next>(value: &'next mut Self::Value, id: Index) -> Self::Type<'next> { + value.entry_inner(id) } #[inline] @@ -166,6 +161,15 @@ where } } +// SAFETY: LendJoin::get impl for this type is safe to call multiple times with +// the same ID. +unsafe impl<'a, 'b: 'a, T: 'a, D: 'a> RepeatableLendGet for Entries<'a, 'b, T, D> +where + T: Component, + D: DerefMut>, +{ +} + /// An entry to a storage which has a component associated to the entity. pub struct OccupiedEntry<'a, 'b: 'a, T: 'a, D: 'a> { id: Index, @@ -207,7 +211,7 @@ where /// Inserts a value into the storage and returns the old one. pub fn insert(&mut self, mut component: T) -> T { - std::mem::swap(&mut component, self.get_mut().deref_mut()); + core::mem::swap(&mut component, self.get_mut().access_mut()); component } @@ -231,12 +235,14 @@ where { /// Inserts a value into the storage. pub fn insert(self, component: T) -> AccessMutReturn<'a, T> { - self.storage.data.mask.add(self.id); - // SAFETY: This is safe since we added `self.id` to the mask. - unsafe { - self.storage.data.inner.insert(self.id, component); - self.storage.data.inner.get_mut(self.id) - } + // Note, this method adds `id` to the mask. + // SAFETY: `VacantEntry` is only constructed after checking that `id` is + // not present in the mask and we consume `VacantEntry` here. + unsafe { self.storage.not_present_insert(self.id, component) }; + // TODO (perf): We could potentially have an insert method that directly + // produces a reference to the just inserted value. + // SAFETY: We just inserted the component above. + unsafe { self.storage.data.inner.get_mut(self.id) } } } diff --git a/src/storage/flagged.rs b/src/storage/flagged.rs index 4adb86828..51bcf65c3 100644 --- a/src/storage/flagged.rs +++ b/src/storage/flagged.rs @@ -3,7 +3,10 @@ use std::marker::PhantomData; use hibitset::BitSetLike; use crate::{ - storage::{ComponentEvent, DenseVecStorage, Tracked, TryDefault, UnprotectedStorage}, + storage::{ + ComponentEvent, DenseVecStorage, SharedGetMutStorage, SyncUnsafeCell, Tracked, TryDefault, + UnprotectedStorage, + }, world::{Component, Index}, }; @@ -17,7 +20,7 @@ use shrev::EventChannel; /// /// What you want to instead is to use `restrict_mut()` to first /// get the entities which contain the component and then conditionally -/// modify the component after a call to `get_mut_unchecked()` or `get_mut()`. +/// modify the component after a call to `get_mut()` or `get_other_mut()`. /// /// # Examples /// @@ -99,7 +102,7 @@ use shrev::EventChannel; /// # let condition = true; /// for (entity, mut comps) in (&entities, &mut comps.restrict_mut()).join() { /// if condition { // check whether this component should be modified. -/// let mut comp = comps.get_mut_unchecked(); +/// let mut comp = comps.get_mut(); /// // ... /// } /// } @@ -165,7 +168,7 @@ use shrev::EventChannel; /// } /// ``` pub struct FlaggedStorage> { - channel: EventChannel, + channel: SyncUnsafeCell>, storage: T, #[cfg(feature = "storage-event-control")] event_emission: bool, @@ -190,7 +193,7 @@ where { fn default() -> Self { FlaggedStorage { - channel: EventChannel::::default(), + channel: SyncUnsafeCell::new(EventChannel::::default()), storage: T::unwrap_default(), #[cfg(feature = "storage-event-control")] event_emission: true, @@ -200,61 +203,79 @@ where } impl> UnprotectedStorage for FlaggedStorage { - #[cfg(feature = "nightly")] - type AccessMut<'a> - where - T: 'a, - = >::AccessMut<'a>; + type AccessMut<'a> = >::AccessMut<'a> where T: 'a; unsafe fn clean(&mut self, has: B) where B: BitSetLike, { - self.storage.clean(has); + // SAFETY: Requirements passed to caller. + unsafe { self.storage.clean(has) }; } unsafe fn get(&self, id: Index) -> &C { - self.storage.get(id) + // SAFETY: Requirements passed to caller. + unsafe { self.storage.get(id) } } - #[cfg(feature = "nightly")] unsafe fn get_mut(&mut self, id: Index) -> >::AccessMut<'_> { if self.emit_event() { - self.channel.single_write(ComponentEvent::Modified(id)); + self.channel + .get_mut() + .single_write(ComponentEvent::Modified(id)); } - self.storage.get_mut(id) + // SAFETY: Requirements passed to caller. + unsafe { self.storage.get_mut(id) } } - #[cfg(not(feature = "nightly"))] - unsafe fn get_mut(&mut self, id: Index) -> &mut C { + unsafe fn insert(&mut self, id: Index, comp: C) { if self.emit_event() { - self.channel.single_write(ComponentEvent::Modified(id)); + self.channel + .get_mut() + .single_write(ComponentEvent::Inserted(id)); } - self.storage.get_mut(id) + // SAFETY: Requirements passed to caller. + unsafe { self.storage.insert(id, comp) }; } - unsafe fn insert(&mut self, id: Index, comp: C) { + unsafe fn remove(&mut self, id: Index) -> C { if self.emit_event() { - self.channel.single_write(ComponentEvent::Inserted(id)); + self.channel + .get_mut() + .single_write(ComponentEvent::Removed(id)); } - self.storage.insert(id, comp); + // SAFETY: Requirements passed to caller. + unsafe { self.storage.remove(id) } } +} - unsafe fn remove(&mut self, id: Index) -> C { +impl> SharedGetMutStorage for FlaggedStorage { + unsafe fn shared_get_mut(&self, id: Index) -> >::AccessMut<'_> { if self.emit_event() { - self.channel.single_write(ComponentEvent::Removed(id)); + let channel_ptr = self.channel.get(); + // SAFETY: Caller required to ensure references returned from other + // safe methods such as Tracked::channel are no longer alive. This + // storage is not marked with a `DistinctStorage` impl. + unsafe { &mut *channel_ptr }.single_write(ComponentEvent::Modified(id)); } - self.storage.remove(id) + // SAFETY: Requirements passed to caller. + unsafe { self.storage.shared_get_mut(id) } } } impl Tracked for FlaggedStorage { fn channel(&self) -> &EventChannel { - &self.channel + let channel_ptr = self.channel.get(); + // SAFETY: The only place that mutably accesses the channel via a shared + // reference is the impl of `SharedGetMut::shared_get_mut` which + // requires callers to avoid calling other methods with `&self` while + // references returned there are still in use (and to ensure references + // from methods like this no longer exist). + unsafe { &*channel_ptr } } fn channel_mut(&mut self) -> &mut EventChannel { - &mut self.channel + self.channel.get_mut() } #[cfg(feature = "storage-event-control")] diff --git a/src/storage/generic.rs b/src/storage/generic.rs index 68bfbefe6..84f81a196 100644 --- a/src/storage/generic.rs +++ b/src/storage/generic.rs @@ -1,11 +1,8 @@ -#[cfg(feature = "nightly")] -use crate::storage::UnprotectedStorage; +use crate::storage::{AccessMut, UnprotectedStorage}; use crate::{ storage::{AccessMutReturn, InsertResult, ReadStorage, WriteStorage}, world::{Component, Entity}, }; -#[cfg(feature = "nightly")] -use std::ops::DerefMut; pub struct Seal; @@ -87,8 +84,7 @@ pub trait GenericWriteStorage { /// The component type of the storage type Component: Component; /// The wrapper through with mutable access of a component is performed. - #[cfg(feature = "nightly")] - type AccessMut<'a>: DerefMut + type AccessMut<'a>: AccessMut where Self: 'a; @@ -120,11 +116,8 @@ impl<'a, T> GenericWriteStorage for WriteStorage<'a, T> where T: Component, { - #[cfg(feature = "nightly")] - type AccessMut<'b> - where - Self: 'b, - = <::Storage as UnprotectedStorage>::AccessMut<'b>; + type AccessMut<'b> = <::Storage as UnprotectedStorage>::AccessMut<'b> + where Self: 'b; type Component = T; fn get_mut(&mut self, entity: Entity) -> Option> { @@ -161,11 +154,8 @@ impl<'a: 'b, 'b, T> GenericWriteStorage for &'b mut WriteStorage<'a, T> where T: Component, { - #[cfg(feature = "nightly")] - type AccessMut<'c> - where - Self: 'c, - = <::Storage as UnprotectedStorage>::AccessMut<'c>; + type AccessMut<'c> = <::Storage as UnprotectedStorage>::AccessMut<'c> + where Self: 'c; type Component = T; fn get_mut(&mut self, entity: Entity) -> Option> { diff --git a/src/storage/mod.rs b/src/storage/mod.rs index e08365ca2..0316cf76f 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -1,6 +1,5 @@ //! Component storage types, implementations for component joins, etc. -#[cfg(feature = "nightly")] pub use self::deref_flagged::{DerefFlaggedStorage, FlaggedAccessMut}; pub use self::{ data::{ReadStorage, WriteStorage}, @@ -8,17 +7,16 @@ pub use self::{ flagged::FlaggedStorage, generic::{GenericReadStorage, GenericWriteStorage}, restrict::{ - ImmutableParallelRestriction, MutableParallelRestriction, PairedStorage, RestrictedStorage, - SequentialRestriction, + PairedStorageRead, PairedStorageWriteExclusive, PairedStorageWriteShared, + RestrictedStorage, SharedGetOnly, }, storages::{ - BTreeStorage, DefaultVecStorage, DenseVecStorage, HashMapStorage, NullStorage, VecStorage, + BTreeStorage, DefaultVecStorage, DenseVecStorage, HashMapStorage, NullStorage, SliceAccess, + VecStorage, }, track::{ComponentEvent, Tracked}, }; -use self::storages::SliceAccess; - use std::{ self, marker::PhantomData, @@ -28,18 +26,20 @@ use std::{ use hibitset::{BitSet, BitSetLike, BitSetNot}; use shred::{CastFrom, Fetch}; +#[nougat::gat(Type)] +use crate::join::LendJoin; #[cfg(feature = "parallel")] use crate::join::ParJoin; use crate::{ error::{Error, WrongGeneration}, - join::Join, - world::{Component, EntitiesRes, Entity, Generation, Index}, + join::{Join, RepeatableLendGet}, + world::{Component, EntitiesRes, Entity, Index}, }; use self::drain::Drain; +use self::sync_unsafe_cell::SyncUnsafeCell; mod data; -#[cfg(feature = "nightly")] mod deref_flagged; mod drain; mod entry; @@ -47,39 +47,64 @@ mod flagged; mod generic; mod restrict; mod storages; +mod sync_unsafe_cell; #[cfg(test)] mod tests; mod track; -#[cfg(feature = "nightly")] type AccessMutReturn<'a, T> = <::Storage as UnprotectedStorage>::AccessMut<'a>; -#[cfg(not(feature = "nightly"))] -type AccessMutReturn<'a, T> = &'a mut T; /// An inverted storage type, only useful to iterate entities /// that do not have a particular component type. pub struct AntiStorage<'a>(pub &'a BitSet); -impl<'a> Join for AntiStorage<'a> { +// SAFETY: Items are just `()` and it is always safe to retrieve them regardless +// of the mask and value returned by `open`. +#[nougat::gat] +unsafe impl<'a> LendJoin for AntiStorage<'a> { + type Mask = BitSetNot<&'a BitSet>; + type Type<'next> = (); + type Value = (); + + unsafe fn open(self) -> (Self::Mask, ()) { + (BitSetNot(self.0), ()) + } + + unsafe fn get<'next>(_: &'next mut (), _: Index) {} +} + +// SAFETY: ::get does nothing. +unsafe impl RepeatableLendGet for AntiStorage<'_> {} + +// SAFETY: Items are just `()` and it is always safe to retrieve them regardless +// of the mask and value returned by `open`. +unsafe impl<'a> Join for AntiStorage<'a> { type Mask = BitSetNot<&'a BitSet>; type Type = (); type Value = (); - // SAFETY: No invariants to meet and no unsafe code. unsafe fn open(self) -> (Self::Mask, ()) { (BitSetNot(self.0), ()) } - // SAFETY: No invariants to meet and no unsafe code. unsafe fn get(_: &mut (), _: Index) {} } -// SAFETY: Since `get` does not do any memory access, this is safe to implement. -unsafe impl<'a> DistinctStorage for AntiStorage<'a> {} - -// SAFETY: Since `get` does not do any memory access, this is safe to implement. +// SAFETY: Since `get` does not do anything it is safe to concurrently call. +// Items are just `()` and it is always safe to retrieve them regardless of the +// mask and value returned by `open`. #[cfg(feature = "parallel")] -unsafe impl<'a> ParJoin for AntiStorage<'a> {} +unsafe impl<'a> ParJoin for AntiStorage<'a> { + type Mask = BitSetNot<&'a BitSet>; + type Type = (); + type Value = (); + + unsafe fn open(self) -> (Self::Mask, ()) { + (BitSetNot(self.0), ()) + } + + unsafe fn get(_: &(), _: Index) {} +} /// A dynamic storage. pub trait AnyStorage { @@ -87,15 +112,13 @@ pub trait AnyStorage { fn drop(&mut self, entities: &[Entity]); } +// SAFETY: Returned pointer has a vtable valid for `T` and retains the same +// address/provenance. unsafe impl CastFrom for dyn AnyStorage where T: AnyStorage + 'static, { - fn cast(t: &T) -> &Self { - t - } - - fn cast_mut(t: &mut T) -> &mut Self { + fn cast(t: *mut T) -> *mut Self { t } } @@ -113,8 +136,10 @@ where /// This is a marker trait which requires you to uphold the following guarantee: /// -/// > Multiple threads may call `get_mut()` with distinct indices without -/// causing > undefined behavior. +/// # Safety +/// +/// > Multiple threads may call `SharedGetMutStorage::shared_get_mut()` +/// with distinct indices without causing > undefined behavior. /// /// This is for example valid for `Vec`: /// @@ -122,15 +147,14 @@ where /// vec![1, 2, 3]; /// ``` /// -/// We may modify both element 1 and 2 at the same time; indexing the vector -/// mutably does not modify anything else than the respective elements. +/// We may modify both element 1 and 2 at the same time. /// /// As a counter example, we may have some kind of cached storage; it caches /// elements when they're retrieved, so pushes a new element to some /// cache-vector. This storage is not allowed to implement `DistinctStorage`. /// /// Implementing this trait marks the storage safe for concurrent mutation (of -/// distinct elements), thus allows `join_par()`. +/// distinct elements), thus allows `par_join()`. pub unsafe trait DistinctStorage {} /// The status of an `insert()`ion into a storage. @@ -173,11 +197,15 @@ impl MaskedStorage { /// Clear the contents of this storage. pub fn clear(&mut self) { - // SAFETY: `self.mask` is the correct mask as specified. - unsafe { - self.inner.clean(&self.mask); - } - self.mask.clear(); + // NOTE: We replace with default empty mask temporarily to protect against + // unwinding from `Drop` of components. + let mut mask_temp = core::mem::take(&mut self.mask); + // SAFETY: `self.mask` is the correct mask as specified. We swap in a + // temporary empty mask to ensure if this unwinds that the mask will be + // cleared. + unsafe { self.inner.clean(&mask_temp) }; + mask_temp.clear(); + self.mask = mask_temp; } /// Remove an element by a given index. @@ -193,7 +221,8 @@ impl MaskedStorage { /// Drop an element by a given index. pub fn drop(&mut self, id: Index) { if self.mask.remove(id) { - // SAFETY: We checked the mask (`remove` returned `true`) + // SAFETY: We checked the mask and removed the id before calling + // drop (`remove` returned `true`). unsafe { self.inner.drop(id); } @@ -330,7 +359,9 @@ where /// Tries to mutate the data associated with an `Entity`. pub fn get_mut(&mut self, e: Entity) -> Option> { if self.data.mask.contains(e.id()) && self.entities.is_alive(e) { - // SAFETY: We checked the mask, so all invariants are met. + // SAFETY: We have exclusive access (which ensures no aliasing or + // concurrent calls from other threads) and we checked the mask, + // thus it's safe to call. Some(unsafe { self.data.inner.get_mut(e.id()) }) } else { None @@ -347,13 +378,13 @@ where if self.entities.is_alive(e) { let id = e.id(); if self.data.mask.contains(id) { - // SAFETY: We checked the mask, so all invariants are met. - std::mem::swap(&mut v, unsafe { self.data.inner.get_mut(id).deref_mut() }); + // SAFETY: `id` is in the mask. + std::mem::swap(&mut v, unsafe { self.data.inner.get_mut(id) }.access_mut()); Ok(Some(v)) } else { - self.data.mask.add(id); - // SAFETY: The mask was previously empty, so it is safe to insert. - unsafe { self.data.inner.insert(id, v) }; + // SAFETY: The mask was previously empty, so this is safe to + // call. + unsafe { self.not_present_insert(id, v) } Ok(None) } } else { @@ -365,6 +396,40 @@ where } } + /// Insert the provided value at `id` and adds `id` to the mask. + /// + /// # Safety + /// + /// May only be called if `id` is not present in the mask. + #[inline(always)] + unsafe fn not_present_insert(&mut self, id: Index, value: T) { + // SAFETY: The mask was previously empty, so it is safe to + // insert. We immediately add the value to the mask below and + // unwinding from the `insert` call means that we don't need to + // include the value in the mask. If adding to the mask unwinds we + // remove the value via a drop guard. + // NOTE: We rely on any panics in `Bitset::add` leaving the bitset in + // the same state as before `add` was called! + unsafe { self.data.inner.insert(id, value) }; + if cfg!(panic = "abort") { + self.data.mask.add(id); + } else { + struct RemoveOnDrop<'a, T: Component>(&'a mut MaskedStorage, Index); + impl<'a, T: Component> Drop for RemoveOnDrop<'a, T> { + fn drop(&mut self) { + // SAFETY: We just inserted a value here above and failed to + // add it to the bitset. + unsafe { + self.0.inner.remove(self.1); + } + } + } + let guard = RemoveOnDrop(&mut self.data, id); + guard.0.mask.add(id); + core::mem::forget(guard); + } + } + /// Removes the data associated with an `Entity`. pub fn remove(&mut self, e: Entity) -> Option { if self.entities.is_alive(e) { @@ -394,48 +459,80 @@ impl<'a, T, D: Clone> Clone for Storage<'a, T, D> { } } -// SAFETY: This is safe, since `T::Storage` is `DistinctStorage` and `Join::get` -// only accesses the storage and nothing else. -unsafe impl<'a, T: Component, D> DistinctStorage for Storage<'a, T, D> where - T::Storage: DistinctStorage +impl<'a, 'e, T, D> Not for &'a Storage<'e, T, D> +where + T: Component, + D: Deref>, { + type Output = AntiStorage<'a>; + + fn not(self) -> Self::Output { + AntiStorage(&self.data.mask) + } } -impl<'a, 'e, T, D> Join for &'a Storage<'e, T, D> +// SAFETY: The mask and unprotected storage contained in `MaskedStorage` +// correspond and `open` returns references to them from the same +// `MaskedStorage` instance. Iterating the mask does not repeat indices. +#[nougat::gat] +unsafe impl<'a, 'e, T, D> LendJoin for &'a Storage<'e, T, D> where T: Component, D: Deref>, { type Mask = &'a BitSet; - type Type = &'a T; + type Type<'next> = &'a T; type Value = &'a T::Storage; - // SAFETY: No unsafe code and no invariants. unsafe fn open(self) -> (Self::Mask, Self::Value) { (&self.data.mask, &self.data.inner) } - // SAFETY: Since we require that the mask was checked, an element for `i` must - // have been inserted without being removed. - unsafe fn get(v: &mut Self::Value, i: Index) -> &'a T { - v.get(i) + unsafe fn get<'next>(v: &'next mut Self::Value, i: Index) -> &'a T { + // SAFETY: Since we require that the mask was checked, an element for + // `i` must have been inserted without being removed. + unsafe { v.get(i) } } } -impl<'a, 'e, T, D> Not for &'a Storage<'e, T, D> +// SAFETY: LendJoin::get impl for this type is safe to call multiple times with +// the same ID. +unsafe impl<'a, 'e, T, D> RepeatableLendGet for &'a Storage<'e, T, D> where T: Component, D: Deref>, { - type Output = AntiStorage<'a>; +} - fn not(self) -> Self::Output { - AntiStorage(&self.data.mask) +// SAFETY: The mask and unprotected storage contained in `MaskedStorage` +// correspond and `open` returns references to them from the same +// `MaskedStorage` instance. Iterating the mask does not repeat indices. +unsafe impl<'a, 'e, T, D> Join for &'a Storage<'e, T, D> +where + T: Component, + D: Deref>, +{ + type Mask = &'a BitSet; + type Type = &'a T; + type Value = &'a T::Storage; + + unsafe fn open(self) -> (Self::Mask, Self::Value) { + (&self.data.mask, &self.data.inner) + } + + unsafe fn get(v: &mut Self::Value, i: Index) -> &'a T { + // SAFETY: Since we require that the mask was checked, an element for + // `i` must have been inserted without being removed. + unsafe { v.get(i) } } } -// SAFETY: This is always safe because immutable access can in no case cause -// memory issues, even if access to common memory occurs. +// SAFETY: It is safe to call `::get` from +// multiple threads at once since `T::Storage: Sync`. +// +// The mask and unprotected storage contained in `MaskedStorage` correspond and +// `open` returns references to them from the same `MaskedStorage` instance. +// Iterating the mask does not repeat indices. #[cfg(feature = "parallel")] unsafe impl<'a, 'e, T, D> ParJoin for &'a Storage<'e, T, D> where @@ -443,40 +540,178 @@ where D: Deref>, T::Storage: Sync, { + type Mask = &'a BitSet; + type Type = &'a T; + type Value = &'a T::Storage; + + unsafe fn open(self) -> (Self::Mask, Self::Value) { + (&self.data.mask, &self.data.inner) + } + + unsafe fn get(v: &Self::Value, i: Index) -> &'a T { + // SAFETY: Since we require that the mask was checked, an element for + // `i` must have been inserted without being removed. + unsafe { v.get(i) } + } } -impl<'a, 'e, T, D> Join for &'a mut Storage<'e, T, D> +// SAFETY: The mask and unprotected storage contained in `MaskedStorage` +// correspond and `open` returns references to them from the same +// `MaskedStorage` instance. Iterating the mask does not repeat indices. +#[nougat::gat] +unsafe impl<'a, 'e, T, D> LendJoin for &'a mut Storage<'e, T, D> where T: Component, D: DerefMut>, { type Mask = &'a BitSet; - type Type = AccessMutReturn<'a, T>; + type Type<'next> = AccessMutReturn<'next, T>; type Value = &'a mut T::Storage; - // SAFETY: No unsafe code and no invariants to fulfill. unsafe fn open(self) -> (Self::Mask, Self::Value) { self.data.open_mut() } - // TODO: audit unsafe - unsafe fn get(v: &mut Self::Value, i: Index) -> Self::Type { - // This is horribly unsafe. Unfortunately, Rust doesn't provide a way - // to abstract mutable/immutable state at the moment, so we have to hack - // our way through it. - let value: *mut Self::Value = v as *mut Self::Value; - (*value).get_mut(i) + unsafe fn get<'next>(value: &'next mut Self::Value, id: Index) -> Self::Type<'next> { + // SAFETY: Since we require that the mask was checked, an element for + // `id` must have been inserted without being removed. + unsafe { value.get_mut(id) } } } -// SAFETY: This is safe because of the `DistinctStorage` guarantees. +// SAFETY: LendJoin::get impl for this type is safe to call multiple times with +// the same ID. +unsafe impl<'a, 'e, T, D> RepeatableLendGet for &'a mut Storage<'e, T, D> +where + T: Component, + D: DerefMut>, +{ +} + +mod shared_get_mut_only { + use super::{Index, SharedGetMutStorage, UnprotectedStorage}; + use core::marker::PhantomData; + + /// This type provides a way to ensure only `shared_get_mut` can be called + /// for the lifetime `'a` and that no references previously obtained from + /// the storage exist when it is created. While internally this is a shared + /// reference, constructing it requires an exclusive borrow for the lifetime + /// `'a`. + /// + /// This is useful for implementations of [`Join`](super::Join) and + /// [`ParJoin`](super::ParJoin). + pub struct SharedGetMutOnly<'a, T, S>(&'a S, PhantomData); + + impl<'a, T, S> SharedGetMutOnly<'a, T, S> { + pub(crate) fn new(storage: &'a mut S) -> Self { + Self(storage, PhantomData) + } + + /// # Safety + /// + /// May only be called after a call to `insert` with `id` and no + /// following call to `remove` with `id` or to `clean`. + /// + /// A mask should keep track of those states, and an `id` being + /// contained in the tracking mask is sufficient to call this method. + /// + /// There must be no extant aliasing references to this component (i.e. + /// obtained with the same `id`). + /// + /// Unless `S: DistinctStorage`, calling this from multiple threads at + /// once is unsound. + pub(crate) unsafe fn get_mut( + this: &Self, + id: Index, + ) -> >::AccessMut<'a> + where + S: SharedGetMutStorage, + { + // SAFETY: `Self::new` takes an exclusive reference to this storage, + // ensuring there are no extant references to its content at the + // time `self` is created and ensuring that only `self` has access + // to the storage for its lifetime and the lifetime of the produced + // `AccessMutReturn`s (the reference we hold to the storage is not + // exposed outside of this module). + // + // This means we only have to worry about aliasing references being + // produced by calling `SharedGetMutStorage::shared_get_mut`. + // Ensuring these don't alias and the remaining safety requirements + // are passed on to the caller. + unsafe { this.0.shared_get_mut(id) } + } + } +} +pub use shared_get_mut_only::SharedGetMutOnly; + +// SAFETY: The mask and unprotected storage contained in `MaskedStorage` +// correspond and `open` returns references to them from the same +// `MaskedStorage` instance (the storage is wrapped in `SharedGetMutOnly`). +// Iterating the mask does not repeat indices. +unsafe impl<'a, 'e, T, D> Join for &'a mut Storage<'e, T, D> +where + T: Component, + D: DerefMut>, + T::Storage: SharedGetMutStorage, +{ + type Mask = &'a BitSet; + type Type = AccessMutReturn<'a, T>; + type Value = SharedGetMutOnly<'a, T, T::Storage>; + + unsafe fn open(self) -> (Self::Mask, Self::Value) { + let (mask, value) = self.data.open_mut(); + let value = SharedGetMutOnly::new(value); + (mask, value) + } + + unsafe fn get(value: &mut Self::Value, id: Index) -> Self::Type { + // SAFETY: + // * Since we require that the mask was checked, an element for `id` must have + // been inserted without being removed. + // * We also require that there are no subsequent calls with the same `id` for + // this instance of the values from `open`, so there are no extant references + // for the element corresponding to this `id`. + // * Since we have an exclusive reference to `Self::Value`, we know this isn't + // being called from multiple threads at once. + unsafe { SharedGetMutOnly::get_mut(value, id) } + } +} + +// SAFETY: It is safe to call `SharedGetMutOnly<'a, T>::get_mut` from multiple +// threads at once since `T::Storage: DistinctStorage`. +// +// The mask and unprotected storage contained in `MaskedStorage` correspond and +// `open` returns references to them from the same `MaskedStorage` instance (the +// storage is wrapped in `SharedGetMutOnly`). Iterating the mask does not repeat +// indices. #[cfg(feature = "parallel")] unsafe impl<'a, 'e, T, D> ParJoin for &'a mut Storage<'e, T, D> where T: Component, D: DerefMut>, - T::Storage: Sync + DistinctStorage, + T::Storage: Sync + SharedGetMutStorage + DistinctStorage, { + type Mask = &'a BitSet; + type Type = AccessMutReturn<'a, T>; + type Value = SharedGetMutOnly<'a, T, T::Storage>; + + unsafe fn open(self) -> (Self::Mask, Self::Value) { + let (mask, value) = self.data.open_mut(); + let value = SharedGetMutOnly::new(value); + (mask, value) + } + + unsafe fn get(value: &Self::Value, id: Index) -> Self::Type { + // SAFETY: + // * Since we require that the mask was checked, an element for `id` must have + // been inserted without being removed. + // * We also require that the returned value is no longer alive before + // subsequent calls with the same `id`, so there are no extant references that + // were obtained with the same `id`. + // * `T::Storage` implements the unsafe trait `DistinctStorage` so it is safe to + // call this from multiple threads at once. + unsafe { SharedGetMutOnly::get_mut(value, id) } + } } /// Tries to create a default value, returns an `Err` with the name of the @@ -503,75 +738,96 @@ where } } +/// DerefMut without autoderefing. +/// +/// Allows forcing mutable access to be explicit. Useful to implement a flagged +/// storage where it is easier to discover sites where components are marked as +/// mutated. Of course, individual storages can use an associated `AccessMut` +/// type that also implements `DerefMut`, but this provides the common +/// denominator. +pub trait AccessMut: core::ops::Deref { + /// This may generate a mutation event for certain flagged storages. + fn access_mut(&mut self) -> &mut Self::Target; +} + +impl AccessMut for T +where + T: core::ops::DerefMut, +{ + fn access_mut(&mut self) -> &mut Self::Target { + &mut *self + } +} + /// Used by the framework to quickly join components. pub trait UnprotectedStorage: TryDefault { /// The wrapper through with mutable access of a component is performed. - #[cfg(feature = "nightly")] - type AccessMut<'a>: DerefMut + type AccessMut<'a>: AccessMut where Self: 'a; - /// Clean the storage given a bitset with bits set for valid indices. - /// Allows us to safely drop the storage. + /// Clean the storage given a bitset with bits set for valid indices + /// dropping all existing components. + /// + /// Allows us to drop the storage without leaking components. /// /// # Safety /// /// May only be called with the mask which keeps track of the elements /// existing in this storage. + /// + /// If this unwinds (e.g. due to a drop impl panicing), the mask should + /// still be cleared. unsafe fn clean(&mut self, has: B) where B: BitSetLike; - /// Tries reading the data associated with an `Index`. - /// This is unsafe because the external set used - /// to protect this storage is absent. + /// Gets a shared reference to the data associated with an `Index`. + /// + /// This is unsafe because the external set used to protect this storage is + /// absent. /// /// # Safety /// /// May only be called after a call to `insert` with `id` and - /// no following call to `remove` with `id`. + /// no following call to `remove` with `id` or to `clean`. /// /// A mask should keep track of those states, and an `id` being contained /// in the tracking mask is sufficient to call this method. unsafe fn get(&self, id: Index) -> &T; - /// Tries mutating the data associated with an `Index`. - /// This is unsafe because the external set used - /// to protect this storage is absent. + /// Gets mutable access to the the data associated with an `Index`. /// - /// # Safety + /// This doesn't necessarily directly return a `&mut` reference. This + /// allows storages more flexibility. For example, some flagged storages + /// utilize this to defer generation of mutation events until the user + /// obtains an `&mut` reference out of the returned wrapper type. /// - /// May only be called after a call to `insert` with `id` and - /// no following call to `remove` with `id`. - /// - /// A mask should keep track of those states, and an `id` being contained - /// in the tracking mask is sufficient to call this method. - #[cfg(feature = "nightly")] - unsafe fn get_mut(&mut self, id: Index) -> Self::AccessMut<'_>; - - /// Tries mutating the data associated with an `Index`. - /// This is unsafe because the external set used - /// to protect this storage is absent. + /// This is unsafe because the external set used to protect this storage is + /// absent. /// /// # Safety /// - /// May only be called after a call to `insert` with `id` and - /// no following call to `remove` with `id`. + /// May only be called after a call to `insert` with `id` and no following + /// call to `remove` with `id` or to `clean`. /// - /// A mask should keep track of those states, and an `id` being contained - /// in the tracking mask is sufficient to call this method. - #[cfg(not(feature = "nightly"))] - unsafe fn get_mut(&mut self, id: Index) -> &mut T; + /// A mask should keep track of those states, and an `id` being contained in + /// the tracking mask is sufficient to call this method. + unsafe fn get_mut(&mut self, id: Index) -> Self::AccessMut<'_>; /// Inserts new data for a given `Index`. /// /// # Safety /// /// May only be called if `insert` was not called with `id` before, or - /// was reverted by a call to `remove` with `id. + /// was reverted by a call to `remove` with `id` or a call to `clean`. /// /// A mask should keep track of those states, and an `id` missing from the /// mask is sufficient to call `insert`. + /// + /// If this call unwinds the insertion should be considered to have failed + /// and not be included in the mask or count as having called `insert` for + /// the safety requirements of other methods here. unsafe fn insert(&mut self, id: Index, value: T); /// Removes the data associated with an `Index`. @@ -591,11 +847,52 @@ pub trait UnprotectedStorage: TryDefault { /// /// May only be called if an element with `id` was `insert`ed and not yet /// removed / dropped. + /// + /// Caller must ensure this is cleared from the mask even if the drop impl + /// of the component panics and this unwinds. Usually, this can be + /// accomplished by removing the id from the mask just before calling this. unsafe fn drop(&mut self, id: Index) { - self.remove(id); + // SAFETY: Requirements passed to the caller. + unsafe { self.remove(id) }; } } +/// Used by the framework to mutably access components in contexts where +/// exclusive access to the storage is not possible. +pub trait SharedGetMutStorage: UnprotectedStorage { + /// Gets mutable access to the the data associated with an `Index`. + /// + /// This is unsafe because the external set used to protect this storage is + /// absent and because it doesn't protect against concurrent calls from + /// multiple threads and aliasing must manually be managed. + /// + /// # Safety + /// + /// May only be called after a call to `insert` with `id` and no following + /// call to `remove` with `id` or to `clean`. + /// + /// A mask should keep track of those states, and an `id` being contained in + /// the tracking mask is sufficient to call this method. + /// + /// There must be no extant aliasing references to this component (i.e. + /// obtained with the same `id`). Additionally, references obtained from + /// methods on this type that take `&self` (e.g. [`SliceAccess::as_slice`], + /// [`Tracked::channel`]) must no longer be alive when + /// `shared_get_mut` is called and these methods must not be + /// called while the references returned here are alive. An exception is + /// made for [`UnprotectedStorage::get`] as long as the live references it + /// has returned do not alias with live references returned here. + /// + /// Essentially, the `unsafe` code calling this must hold exclusive access + /// of the storage at some level to ensure only known code is calling + /// `&self` methods during the usage of this method and the references it + /// produces. + /// + /// Unless this type implements `DistinctStorage`, calling this from + /// multiple threads at once is unsound. + unsafe fn shared_get_mut(&self, id: Index) -> >::AccessMut<'_>; +} + #[cfg(test)] #[cfg(feature = "parallel")] mod tests_inline { diff --git a/src/storage/restrict.rs b/src/storage/restrict.rs index 6ec1c5f79..a15cc8f96 100644 --- a/src/storage/restrict.rs +++ b/src/storage/restrict.rs @@ -7,36 +7,20 @@ use std::{ use hibitset::BitSet; use shred::Fetch; -use crate::join::Join; +#[nougat::gat(Type)] +use crate::join::LendJoin; +use crate::join::{Join, RepeatableLendGet}; #[cfg(feature = "parallel")] use crate::join::ParJoin; use crate::{ - storage::{AccessMutReturn, MaskedStorage, Storage, UnprotectedStorage}, + storage::{ + AccessMutReturn, DistinctStorage, MaskedStorage, SharedGetMutStorage, Storage, + UnprotectedStorage, + }, world::{Component, EntitiesRes, Entity, Index}, }; -/// Specifies that the `RestrictedStorage` cannot run in parallel. -/// -/// A mutable `RestrictedStorage` can call `get`, `get_mut`, `get_unchecked` and -/// `get_mut_unchecked` for deferred/restricted access while an immutable -/// version can only call the immutable accessors. -pub enum SequentialRestriction {} -/// Specifies that the `RestrictedStorage` can run in parallel mutably. -/// -/// This means the storage can only call `get_mut_unchecked` and -/// `get_unchecked`. -pub enum MutableParallelRestriction {} -/// Specifies that the `RestrictedStorage` can run in parallel immutably. -/// -/// This means that the storage can call `get`, `get_unchecked`. -pub enum ImmutableParallelRestriction {} - -/// Restrictions that are allowed to access `RestrictedStorage::get`. -pub trait ImmutableAliasing: Sized {} -impl ImmutableAliasing for SequentialRestriction {} -impl ImmutableAliasing for ImmutableParallelRestriction {} - /// Similar to a `MaskedStorage` and a `Storage` combined, but restricts usage /// to only getting and modifying the components. That means it's not possible /// to modify the inner bitset so the iteration cannot be invalidated. In other @@ -58,240 +42,594 @@ impl ImmutableAliasing for ImmutableParallelRestriction {} /// fn run(&mut self, (entities, mut some_comps): Self::SystemData) { /// for (entity, mut comps) in (&entities, &mut some_comps.restrict_mut()).join() { /// // Check if the reference is fine to mutate. -/// if comps.get_unchecked().0 < 5 { +/// if comps.get().0 < 5 { /// // Get a mutable reference now. -/// let mut mutable = comps.get_mut_unchecked(); +/// let mut mutable = comps.get_mut(); /// mutable.0 += 1; /// } /// } /// } /// } /// ``` -pub struct RestrictedStorage<'rf, 'st: 'rf, C, S, B, Restrict> -where - C: Component, - S: Borrow + 'rf, - B: Borrow + 'rf, -{ - bitset: B, +pub struct RestrictedStorage<'rf, C, S> { + bitset: &'rf BitSet, data: S, - entities: &'rf Fetch<'st, EntitiesRes>, - phantom: PhantomData<(C, Restrict)>, + entities: &'rf Fetch<'rf, EntitiesRes>, + phantom: PhantomData, } -#[cfg(feature = "parallel")] -unsafe impl<'rf, 'st: 'rf, C, S, B> ParJoin - for &'rf mut RestrictedStorage<'rf, 'st, C, S, B, MutableParallelRestriction> +impl Storage<'_, T, D> where - C: Component, - S: BorrowMut + 'rf, - B: Borrow + 'rf, + T: Component, + D: Deref>, { + /// Builds an immutable `RestrictedStorage` out of a `Storage`. Allows + /// deferred unchecked access to the entity's component. + /// + /// This is returned as a `ParallelRestriction` version since you can only + /// get immutable components with this which is safe for parallel by + /// default. + pub fn restrict<'rf>(&'rf self) -> RestrictedStorage<'rf, T, &T::Storage> { + RestrictedStorage { + bitset: &self.data.mask, + data: &self.data.inner, + entities: &self.entities, + phantom: PhantomData, + } + } } -#[cfg(feature = "parallel")] -unsafe impl<'rf, 'st: 'rf, C, S, B, Restrict> ParJoin - for &'rf RestrictedStorage<'rf, 'st, C, S, B, Restrict> +impl Storage<'_, T, D> where - C: Component, - S: Borrow + 'rf, - B: Borrow + 'rf, - Restrict: ImmutableAliasing, + T: Component, + D: DerefMut>, { + /// Builds a mutable `RestrictedStorage` out of a `Storage`. Allows + /// restricted access to the inner components without allowing + /// invalidating the bitset for iteration in `Join`. + pub fn restrict_mut<'rf>(&'rf mut self) -> RestrictedStorage<'rf, T, &mut T::Storage> { + let (mask, data) = self.data.open_mut(); + RestrictedStorage { + bitset: mask, + data, + entities: &self.entities, + phantom: PhantomData, + } + } } -impl<'rf, 'st: 'rf, C, S, B, Restrict> Join for &'rf RestrictedStorage<'rf, 'st, C, S, B, Restrict> +// SAFETY: `open` returns references to corresponding mask and storage values +// contained in the wrapped `Storage`. Iterating the mask does not repeat +// indices. +#[nougat::gat] +unsafe impl<'rf, C, S> LendJoin for &'rf RestrictedStorage<'rf, C, S> where C: Component, S: Borrow, - B: Borrow, { type Mask = &'rf BitSet; - type Type = PairedStorage<'rf, 'st, C, &'rf C::Storage, &'rf BitSet, Restrict>; - type Value = (&'rf C::Storage, &'rf Fetch<'st, EntitiesRes>, &'rf BitSet); + type Type<'next> = PairedStorageRead<'rf, C>; + type Value = (&'rf C::Storage, &'rf Fetch<'rf, EntitiesRes>, &'rf BitSet); unsafe fn open(self) -> (Self::Mask, Self::Value) { - let bitset = self.bitset.borrow(); - (bitset, (self.data.borrow(), self.entities, bitset)) + ( + self.bitset, + (self.data.borrow(), self.entities, self.bitset), + ) } - unsafe fn get(value: &mut Self::Value, id: Index) -> Self::Type { - PairedStorage { + unsafe fn get<'next>(value: &'next mut Self::Value, id: Index) -> Self::Type<'next> { + // NOTE: Methods on this type rely on safety requiments of this method. + PairedStorageRead { index: id, storage: value.0, entities: value.1, bitset: value.2, - phantom: PhantomData, } } } -impl<'rf, 'st: 'rf, C, S, B, Restrict> Join - for &'rf mut RestrictedStorage<'rf, 'st, C, S, B, Restrict> +// SAFETY: LendJoin::get impl for this type can safely be called multiple times +// with the same ID. +unsafe impl<'rf, C, S> RepeatableLendGet for &'rf RestrictedStorage<'rf, C, S> +where + C: Component, + S: Borrow, +{ +} + +// SAFETY: `open` returns references to corresponding mask and storage values +// contained in the wrapped `Storage`. Iterating the mask does not repeat +// indices. +#[nougat::gat] +unsafe impl<'rf, C, S> LendJoin for &'rf mut RestrictedStorage<'rf, C, S> where C: Component, S: BorrowMut, - B: Borrow, { type Mask = &'rf BitSet; - type Type = PairedStorage<'rf, 'st, C, &'rf mut C::Storage, &'rf BitSet, Restrict>; + type Type<'next> = PairedStorageWriteExclusive<'next, C>; type Value = ( &'rf mut C::Storage, - &'rf Fetch<'st, EntitiesRes>, + &'rf Fetch<'rf, EntitiesRes>, &'rf BitSet, ); unsafe fn open(self) -> (Self::Mask, Self::Value) { - let bitset = self.bitset.borrow(); - (bitset, (self.data.borrow_mut(), self.entities, bitset)) + ( + self.bitset, + (self.data.borrow_mut(), self.entities, self.bitset), + ) } - unsafe fn get(value: &mut Self::Value, id: Index) -> Self::Type { - let value: &'rf mut Self::Value = &mut *(value as *mut Self::Value); - PairedStorage { + unsafe fn get<'next>(value: &'next mut Self::Value, id: Index) -> Self::Type<'next> { + // NOTE: Methods on this type rely on safety requiments of this method. + PairedStorageWriteExclusive { index: id, storage: value.0, entities: value.1, bitset: value.2, - phantom: PhantomData, } } } -impl<'st, T, D> Storage<'st, T, D> +// SAFETY: LendJoin::get impl for this type can safely be called multiple times +// with the same ID. +unsafe impl<'rf, C, S> RepeatableLendGet for &'rf mut RestrictedStorage<'rf, C, S> where - T: Component, - D: Deref>, + C: Component, + S: BorrowMut, { - /// Builds an immutable `RestrictedStorage` out of a `Storage`. Allows - /// deferred unchecked access to the entity's component. - /// - /// This is returned as a `ParallelRestriction` version since you can only - /// get immutable components with this which is safe for parallel by - /// default. - pub fn restrict<'rf>( - &'rf self, - ) -> RestrictedStorage<'rf, 'st, T, &T::Storage, &BitSet, ImmutableParallelRestriction> { - RestrictedStorage { - bitset: &self.data.mask, - data: &self.data.inner, - entities: &self.entities, - phantom: PhantomData, - } - } } -impl<'st, T, D> Storage<'st, T, D> +// SAFETY: `open` returns references to corresponding mask and storage values +// contained in the wrapped `Storage`. Iterating the mask does not repeat +// indices. +unsafe impl<'rf, C, S> Join for &'rf RestrictedStorage<'rf, C, S> where - T: Component, - D: DerefMut>, + C: Component, + S: Borrow, { - /// Builds a mutable `RestrictedStorage` out of a `Storage`. Allows - /// restricted access to the inner components without allowing - /// invalidating the bitset for iteration in `Join`. - pub fn restrict_mut<'rf>( - &'rf mut self, - ) -> RestrictedStorage<'rf, 'st, T, &mut T::Storage, &BitSet, SequentialRestriction> { - let (mask, data) = self.data.open_mut(); - RestrictedStorage { - bitset: mask, - data, - entities: &self.entities, - phantom: PhantomData, + type Mask = &'rf BitSet; + type Type = PairedStorageRead<'rf, C>; + type Value = (&'rf C::Storage, &'rf Fetch<'rf, EntitiesRes>, &'rf BitSet); + + unsafe fn open(self) -> (Self::Mask, Self::Value) { + ( + self.bitset, + (self.data.borrow(), self.entities, self.bitset), + ) + } + + unsafe fn get(value: &mut Self::Value, id: Index) -> Self::Type { + // NOTE: Methods on this type rely on safety requiments of this method. + PairedStorageRead { + index: id, + storage: value.0, + entities: value.1, + bitset: value.2, } } +} - /// Builds a mutable, parallel `RestrictedStorage`, - /// does not allow mutably getting other components - /// aside from the current iteration. - pub fn par_restrict_mut<'rf>( - &'rf mut self, - ) -> RestrictedStorage<'rf, 'st, T, &mut T::Storage, &BitSet, MutableParallelRestriction> { - let (mask, data) = self.data.open_mut(); - RestrictedStorage { - bitset: mask, - data, - entities: &self.entities, - phantom: PhantomData, +mod shared_get_only { + use super::{DistinctStorage, Index, SharedGetMutStorage, UnprotectedStorage}; + use core::marker::PhantomData; + + /// This type provides a way to ensure only `shared_get_mut` and `get` can + /// be called for the lifetime `'a` and that no references previously + /// obtained from the storage exist when it is created. While internally + /// this is a shared reference, constructing it requires an exclusive borrow + /// for the lifetime `'a`. + /// + /// This is useful for implementation of [`Join`](super::Join) and + /// [`ParJoin`](super::ParJoin) for `&mut RestrictedStorage`. + pub struct SharedGetOnly<'a, T, S>(&'a S, PhantomData); + + // SAFETY: All fields are required to be `Send` in the where clause. This + // also requires `S: DistinctStorage` so that we can freely duplicate + // `ShareGetOnly` while preventing `get_mut` from being called from multiple + // threads at once. + unsafe impl<'a, T, S> Send for SharedGetOnly<'a, T, S> + where + for<'b> &'b S: Send, + PhantomData: Send, + S: DistinctStorage, + { + } + // SAFETY: See above. + // NOTE: A limitation of this is that `PairedStorageWrite` is not `Sync` in + // some cases where it would be fine (we can address this if it is an issue). + unsafe impl<'a, T, S> Sync for SharedGetOnly<'a, T, S> + where + for<'b> &'b S: Sync, + PhantomData: Sync, + S: DistinctStorage, + { + } + + impl<'a, T, S> SharedGetOnly<'a, T, S> { + pub(super) fn new(storage: &'a mut S) -> Self { + Self(storage, PhantomData) + } + + pub(crate) fn duplicate(this: &Self) -> Self { + Self(this.0, this.1) + } + + /// # Safety + /// + /// May only be called after a call to `insert` with `id` and no + /// following call to `remove` with `id` or to `clean`. + /// + /// A mask should keep track of those states, and an `id` being + /// contained in the tracking mask is sufficient to call this method. + /// + /// There must be no extant aliasing references to this component (i.e. + /// obtained with the same `id` via this method or [`Self::get`]). + pub(super) unsafe fn get_mut( + this: &Self, + id: Index, + ) -> >::AccessMut<'a> + where + S: SharedGetMutStorage, + { + // SAFETY: `Self::new` takes an exclusive reference to this storage, + // ensuring there are no extant references to its content at the + // time `self` is created and ensuring that only `self` has access + // to the storage for its lifetime and the lifetime of the produced + // `AccessMutReturn`s (the reference we hold to the storage is not + // exposed outside of this module). + // + // This means we only have to worry about aliasing references being + // produced by calling `SharedGetMutStorage::shared_get_mut` (via + // this method) or `UnprotectedStorage::get` (via `Self::get`). + // Ensuring these don't alias is enforced by the requirements on + // this method and `Self::get`. + // + // `Self` is only `Send`/`Sync` when `S: DistinctStorage`. Note, + // that multiple instances of `Self` can be created via `duplicate` + // but they can't be sent between threads (nor can shared references + // be sent) unless `S: DistinctStorage`. These factors, along with + // `Self::new` taking an exclusive reference to the storage, prevent + // calling `shared_get_mut` from multiple threads at once unless `S: + // DistinctStorage`. + // + // The remaining safety requirements are passed on to the caller. + unsafe { this.0.shared_get_mut(id) } + } + + /// # Safety + /// + /// May only be called after a call to `insert` with `id` and no + /// following call to `remove` with `id` or to `clean`. + /// + /// A mask should keep track of those states, and an `id` being + /// contained in the tracking mask is sufficient to call this method. + /// + /// There must be no extant references obtained from [`Self::get_mut`] + /// using the same `id`. + pub(super) unsafe fn get(this: &Self, id: Index) -> &'a T + where + S: UnprotectedStorage, + { + // SAFETY: Safety requirements passed to the caller. + unsafe { this.0.get(id) } } } } +pub use shared_get_only::SharedGetOnly; -/// Pairs a storage with an index, meaning that the index is guaranteed to exist -/// as long as the `PairedStorage` exists. -pub struct PairedStorage<'rf, 'st: 'rf, C, S, B, Restrict> { - index: Index, - storage: S, - bitset: B, - entities: &'rf Fetch<'st, EntitiesRes>, - phantom: PhantomData<(C, Restrict)>, +// SAFETY: `open` returns references to corresponding mask and storage values +// contained in the wrapped `Storage`. Iterating the mask does not repeat +// indices. +unsafe impl<'rf, C, S> Join for &'rf mut RestrictedStorage<'rf, C, S> +where + C: Component, + S: BorrowMut, + C::Storage: SharedGetMutStorage, +{ + type Mask = &'rf BitSet; + type Type = PairedStorageWriteShared<'rf, C>; + type Value = SharedGetOnly<'rf, C, C::Storage>; + + unsafe fn open(self) -> (Self::Mask, Self::Value) { + let bitset = &self.bitset; + let storage = SharedGetOnly::new(self.data.borrow_mut()); + (bitset, storage) + } + + unsafe fn get(value: &mut Self::Value, id: Index) -> Self::Type { + // NOTE: Methods on this type rely on safety requiments of this method. + PairedStorageWriteShared { + index: id, + storage: SharedGetOnly::duplicate(value), + } + } } -impl<'rf, 'st, C, S, B, Restrict> PairedStorage<'rf, 'st, C, S, B, Restrict> +// SAFETY: It is safe to call `get` from multiple threads at once since +// `T::Storage: Sync`. We construct a `PairedStorageRead` which can be used to +// call `UnprotectedStorage::get` which is safe to call concurrently. +// +// `open` returns references to corresponding mask and storage values contained +// in the wrapped `Storage`. +// +// Iterating the mask does not repeat indices. +#[cfg(feature = "parallel")] +unsafe impl<'rf, C, S> ParJoin for &'rf RestrictedStorage<'rf, C, S> where C: Component, S: Borrow, - B: Borrow, + C::Storage: Sync, { - /// Gets the component related to the current entry without checking whether - /// the storage has it or not. - pub fn get_unchecked(&self) -> &C { - unsafe { self.storage.borrow().get(self.index) } + type Mask = &'rf BitSet; + type Type = PairedStorageRead<'rf, C>; + type Value = (&'rf C::Storage, &'rf Fetch<'rf, EntitiesRes>, &'rf BitSet); + + unsafe fn open(self) -> (Self::Mask, Self::Value) { + ( + self.bitset, + (self.data.borrow(), self.entities, self.bitset), + ) + } + + unsafe fn get(value: &Self::Value, id: Index) -> Self::Type { + // NOTE: Methods on this type rely on safety requiments of this method. + PairedStorageRead { + index: id, + storage: value.0, + entities: value.1, + bitset: value.2, + } } } -impl<'rf, 'st, C, S, B, Restrict> PairedStorage<'rf, 'st, C, S, B, Restrict> +// SAFETY: It is safe to call `get` from multiple threads at once since +// `T::Storage: Sync`. We construct a `PairedStorageSharedWrite` which can be +// used to call `UnprotectedStorage::get` which is safe to call concurrently and +// `SharedGetOnly::get_mut` which is safe to call concurrently since we require +// `C::Storage: DistinctStorage` here. +// +// `open` returns references to corresponding mask and storage values contained +// in the wrapped `Storage`. +// +// Iterating the mask does not repeat indices. +#[cfg(feature = "parallel")] +unsafe impl<'rf, C, S> ParJoin for &'rf mut RestrictedStorage<'rf, C, S> where C: Component, S: BorrowMut, - B: Borrow, + C::Storage: Sync + SharedGetMutStorage + DistinctStorage, { - /// Gets the component related to the current entry without checking whether - /// the storage has it or not. - pub fn get_mut_unchecked(&mut self) -> AccessMutReturn<'_, C> { - unsafe { self.storage.borrow_mut().get_mut(self.index) } + type Mask = &'rf BitSet; + type Type = PairedStorageWriteShared<'rf, C>; + type Value = SharedGetOnly<'rf, C, C::Storage>; + + unsafe fn open(self) -> (Self::Mask, Self::Value) { + let bitset = &self.bitset; + let storage = SharedGetOnly::new(self.data.borrow_mut()); + (bitset, storage) + } + + unsafe fn get(value: &Self::Value, id: Index) -> Self::Type { + // NOTE: Methods on this type rely on safety requiments of this method. + PairedStorageWriteShared { + index: id, + storage: SharedGetOnly::duplicate(value), + } } } -impl<'rf, 'st, C, S, B, Restrict> PairedStorage<'rf, 'st, C, S, B, Restrict> +/// Pairs a storage with an index, meaning that the index is guaranteed to exist +/// as long as the `PairedStorage` exists. +/// +/// Yielded by `lend_join`/`join`/`par_join` on `&storage.restrict()`. +pub struct PairedStorageRead<'rf, C: Component> { + index: Index, + storage: &'rf C::Storage, + bitset: &'rf BitSet, + entities: &'rf Fetch<'rf, EntitiesRes>, +} + +/// Pairs a storage with an index, meaning that the index is guaranteed to +/// exist. +/// +/// Yielded by `join`/`par_join` on `&mut storage.restrict_mut()`. +pub struct PairedStorageWriteShared<'rf, C: Component> { + index: Index, + storage: SharedGetOnly<'rf, C, C::Storage>, +} + +// SAFETY: All fields are required to implement `Send` in the where clauses. We +// also require `C::Storage: DistinctStorage` so that this cannot be sent +// between threads and then used to call `get_mut` from multiple threads at +// once. +unsafe impl Send for PairedStorageWriteShared<'_, C> +where + C: Component, + Index: Send, + for<'a> SharedGetOnly<'a, C, C::Storage>: Send, + C::Storage: DistinctStorage, +{ +} + +/// Compile test for when `Send` is implemented. +/// ```rust,compile_fail +/// use specs::prelude::*; +/// +/// struct Pos(f32); +/// impl Component for Pos { +/// type Storage = FlaggedStorage; +/// } +/// +/// let mut world = World::new(); +/// world.register::(); +/// world.create_entity().with(Pos(0.0)).build(); +/// world.create_entity().with(Pos(1.6)).build(); +/// world.create_entity().with(Pos(5.4)).build(); +/// let mut pos = world.write_storage::(); +/// +/// let mut restricted_pos = pos.restrict_mut(); +/// let mut joined = (&mut restricted_pos).join(); +/// let mut a = joined.next().unwrap(); +/// let mut b = joined.next().unwrap(); +/// // unsound since Pos::Storage isn't a DistinctStorage +/// std::thread::scope(|s| { +/// s.spawn(move || { +/// a.get_mut(); +/// }); +/// }); +/// b.get_mut(); +/// ``` +/// Should compile since `VecStorage` is a `DistinctStorage`. +/// ```rust +/// use specs::prelude::*; +/// +/// struct Pos(f32); +/// impl Component for Pos { +/// type Storage = VecStorage; +/// } +/// +/// let mut world = World::new(); +/// world.register::(); +/// world.create_entity().with(Pos(0.0)).build(); +/// world.create_entity().with(Pos(1.6)).build(); +/// world.create_entity().with(Pos(5.4)).build(); +/// let mut pos = world.write_storage::(); +/// +/// let mut restricted_pos = pos.restrict_mut(); +/// let mut joined = (&mut restricted_pos).join(); +/// let mut a = joined.next().unwrap(); +/// let mut b = joined.next().unwrap(); +/// // sound since Pos::Storage is a DistinctStorage +/// std::thread::scope(|s| { +/// s.spawn(move || { +/// a.get_mut(); +/// }); +/// }); +/// b.get_mut(); +/// ``` +fn _dummy() {} + +/// Pairs a storage with an index, meaning that the index is guaranteed to +/// exist. +/// +/// Yielded by `lend_join` on `&mut storage.restrict_mut()`. +pub struct PairedStorageWriteExclusive<'rf, C: Component> { + index: Index, + storage: &'rf mut C::Storage, + bitset: &'rf BitSet, + entities: &'rf Fetch<'rf, EntitiesRes>, +} + +impl<'rf, C> PairedStorageRead<'rf, C> where C: Component, - S: Borrow, - B: Borrow, - // Only non parallel and immutable parallel storages can access this. - Restrict: ImmutableAliasing, { - /// Attempts to get the component related to the entity. + /// Gets the component related to the current entity. + /// + /// Note, unlike `get_other` this doesn't need to check whether the + /// component is present. + pub fn get(&self) -> &C { + // SAFETY: This is constructed in the `get` methods of + // `LendJoin`/`Join`/`ParJoin` above. These all require that the mask + // has been checked. + unsafe { self.storage.get(self.index) } + } + + /// Attempts to get the component related to an arbitrary entity. /// /// Functions similar to the normal `Storage::get` implementation. /// /// This only works for non-parallel or immutably parallel /// `RestrictedStorage`. - pub fn get(&self, entity: Entity) -> Option<&C> { - if self.bitset.borrow().contains(entity.id()) && self.entities.is_alive(entity) { - Some(unsafe { self.storage.borrow().get(entity.id()) }) + pub fn get_other(&self, entity: Entity) -> Option<&C> { + if self.bitset.contains(entity.id()) && self.entities.is_alive(entity) { + // SAFETY:We just checked the mask. + Some(unsafe { self.storage.get(entity.id()) }) } else { None } } } -impl<'rf, 'st, C, S, B> PairedStorage<'rf, 'st, C, S, B, SequentialRestriction> +impl<'rf, C> PairedStorageWriteShared<'rf, C> where C: Component, - S: BorrowMut, - B: Borrow, + C::Storage: SharedGetMutStorage, { - /// Attempts to get the component related to the entity mutably. + /// Gets the component related to the current entity. + pub fn get(&self) -> &C { + // SAFETY: See note in `Self::get_mut` below. The only difference is + // that here we take a shared reference which prevents `get_mut` from + // being called while the return value is alive, but also allows this + // method to still be called again (which is fine). + unsafe { SharedGetOnly::get(&self.storage, self.index) } + } + + /// Gets the component related to the current entity. + pub fn get_mut(&mut self) -> AccessMutReturn<'_, C> { + // SAFETY: + // * This is constructed in the `get` methods of `Join`/`ParJoin` above. These + // all require that the mask has been checked. + // * We also require that either there are no subsequent calls with the same + // `id` (`Join`) or that there are not extant references from a call with the + // same `id` (`ParJoin`). Thus, `id` is unique among the instances of `Self` + // created by the join `get` methods. We then tie the lifetime of the returned + // value to the exclusive borrow of self which prevents this or `Self::get` + // from being called while the returned reference is still alive. + unsafe { SharedGetOnly::get_mut(&self.storage, self.index) } + } +} + +impl<'rf, C> PairedStorageWriteExclusive<'rf, C> +where + C: Component, +{ + /// Gets the component related to the current entity. + /// + /// Note, unlike `get_other` this doesn't need to check whether the + /// component is present. + pub fn get(&self) -> &C { + // SAFETY: This is constructed in `LendJoin::get` which requires that + // the mask has been checked. + unsafe { self.storage.get(self.index) } + } + + /// Gets the component related to the current entity. + /// + /// Note, unlike `get_other_mut` this doesn't need to check whether the + /// component is present. + pub fn get_mut(&mut self) -> AccessMutReturn<'_, C> { + // SAFETY: This is constructed in `LendJoin::get` which requires that + // the mask has been checked. + unsafe { self.storage.get_mut(self.index) } + } + + /// Attempts to get the component related to an arbitrary entity. + /// + /// Functions similar to the normal `Storage::get` implementation. + pub fn get_other(&self, entity: Entity) -> Option<&C> { + if self.bitset.contains(entity.id()) && self.entities.is_alive(entity) { + // SAFETY:We just checked the mask. + Some(unsafe { self.storage.get(entity.id()) }) + } else { + None + } + } + + /// Attempts to mutably get the component related to an arbitrary entity. /// /// Functions similar to the normal `Storage::get_mut` implementation. /// - /// This only works if this is a non-parallel `RestrictedStorage`, - /// otherwise you could access the same component mutably in two different - /// threads. - pub fn get_mut(&mut self, entity: Entity) -> Option> { - if self.bitset.borrow().contains(entity.id()) && self.entities.is_alive(entity) { - Some(unsafe { self.storage.borrow_mut().get_mut(entity.id()) }) + /// This only works if this is a lending `RestrictedStorage`, otherwise you + /// could access the same component mutably via two different + /// `PairedStorage`s at the same time. + pub fn get_other_mut(&mut self, entity: Entity) -> Option> { + if self.bitset.contains(entity.id()) && self.entities.is_alive(entity) { + // SAFETY:We just checked the mask. + Some(unsafe { self.storage.get_mut(entity.id()) }) } else { None } diff --git a/src/storage/storages.rs b/src/storage/storages.rs index 073a98387..d293d6f8a 100644 --- a/src/storage/storages.rs +++ b/src/storage/storages.rs @@ -1,12 +1,13 @@ //! Different types of storages you can use for your components. -use std::{collections::BTreeMap, mem::MaybeUninit}; +use core::{marker::PhantomData, mem::MaybeUninit, ptr, ptr::NonNull}; +use std::collections::BTreeMap; use ahash::AHashMap as HashMap; use hibitset::BitSetLike; use crate::{ - storage::{DistinctStorage, UnprotectedStorage}, + storage::{DistinctStorage, SharedGetMutStorage, SyncUnsafeCell, UnprotectedStorage}, world::Index, }; @@ -16,14 +17,17 @@ use crate::{ /// which wraps `T`. The associated type `Element` identifies what /// the slices will contain. pub trait SliceAccess { + /// The type of the underlying data elements. type Element; + /// Returns a slice of the underlying storage. fn as_slice(&self) -> &[Self::Element]; + /// Returns a mutable slice of the underlying storage. fn as_mut_slice(&mut self) -> &mut [Self::Element]; } /// BTreeMap-based storage. -pub struct BTreeStorage(BTreeMap); +pub struct BTreeStorage(BTreeMap>); impl Default for BTreeStorage { fn default() -> Self { @@ -32,42 +36,51 @@ impl Default for BTreeStorage { } impl UnprotectedStorage for BTreeStorage { - #[cfg(feature = "nightly")] - type AccessMut<'a> - where - T: 'a, - = &'a mut T; + type AccessMut<'a> = &'a mut T where T: 'a; unsafe fn clean(&mut self, _has: B) where B: BitSetLike, { - // nothing to do + self.0.clear(); } unsafe fn get(&self, id: Index) -> &T { - &self.0[&id] + let ptr = self.0[&id].get(); + // SAFETY: See `VecStorage` impl. + unsafe { &*ptr } } unsafe fn get_mut(&mut self, id: Index) -> &mut T { - self.0.get_mut(&id).unwrap() + self.0.get_mut(&id).unwrap().get_mut() } unsafe fn insert(&mut self, id: Index, v: T) { - self.0.insert(id, v); + self.0.insert(id, SyncUnsafeCell::new(v)); } unsafe fn remove(&mut self, id: Index) -> T { - self.0.remove(&id).unwrap() + self.0.remove(&id).unwrap().0.into_inner() + } +} + +impl SharedGetMutStorage for BTreeStorage { + unsafe fn shared_get_mut(&self, id: Index) -> &mut T { + let ptr = self.0[&id].get(); + // SAFETY: See `VecStorage` impl. + unsafe { &mut *ptr } } } +// SAFETY: `shared_get_mut` doesn't perform any overlapping mutable +// accesses when provided distinct indices and is safe to call from multiple +// threads at once. unsafe impl DistinctStorage for BTreeStorage {} /// `HashMap`-based storage. Best suited for rare components. /// /// This uses the [std::collections::HashMap] internally. -pub struct HashMapStorage(HashMap); +pub struct HashMapStorage(HashMap>); impl Default for HashMapStorage { fn default() -> Self { @@ -76,36 +89,45 @@ impl Default for HashMapStorage { } impl UnprotectedStorage for HashMapStorage { - #[cfg(feature = "nightly")] - type AccessMut<'a> - where - T: 'a, - = &'a mut T; + type AccessMut<'a> = &'a mut T where T: 'a; unsafe fn clean(&mut self, _has: B) where B: BitSetLike, { - //nothing to do + self.0.clear(); } unsafe fn get(&self, id: Index) -> &T { - &self.0[&id] + let ptr = self.0[&id].get(); + // SAFETY: See `VecStorage` impl. + unsafe { &*ptr } } unsafe fn get_mut(&mut self, id: Index) -> &mut T { - self.0.get_mut(&id).unwrap() + self.0.get_mut(&id).unwrap().get_mut() } unsafe fn insert(&mut self, id: Index, v: T) { - self.0.insert(id, v); + self.0.insert(id, SyncUnsafeCell::new(v)); } unsafe fn remove(&mut self, id: Index) -> T { - self.0.remove(&id).unwrap() + self.0.remove(&id).unwrap().0.into_inner() } } +impl SharedGetMutStorage for HashMapStorage { + unsafe fn shared_get_mut(&self, id: Index) -> &mut T { + let ptr = self.0[&id].get(); + // SAFETY: See `VecStorage` impl. + unsafe { &mut *ptr } + } +} + +// SAFETY: `shared_get_mut` doesn't perform any overlapping mutable +// accesses when provided distinct indices and is safe to call from multiple +// threads at once. unsafe impl DistinctStorage for HashMapStorage {} /// Dense vector storage. Has a redirection 2-way table @@ -121,7 +143,7 @@ unsafe impl DistinctStorage for HashMapStorage {} /// a particular entity's position within this slice may change /// over time. pub struct DenseVecStorage { - data: Vec, + data: Vec>, entity_id: Vec, data_id: Vec>, } @@ -145,7 +167,9 @@ impl SliceAccess for DenseVecStorage { /// and especially do not correspond with entity IDs. #[inline] fn as_slice(&self) -> &[Self::Element] { - self.data.as_slice() + let unsafe_cell_slice_ptr = SyncUnsafeCell::as_cell_of_slice(self.data.as_slice()).get(); + // SAFETY: See `VecStorage` impl. + unsafe { &*unsafe_cell_slice_ptr } } /// Returns a mutable slice of all the components in this storage. @@ -154,112 +178,207 @@ impl SliceAccess for DenseVecStorage { /// and especially do not correspond with entity IDs. #[inline] fn as_mut_slice(&mut self) -> &mut [Self::Element] { - self.data.as_mut_slice() + SyncUnsafeCell::as_slice_mut(self.data.as_mut_slice()) } } impl UnprotectedStorage for DenseVecStorage { - #[cfg(feature = "nightly")] - type AccessMut<'a> - where - T: 'a, - = &'a mut T; + type AccessMut<'a> = &'a mut T where T: 'a; unsafe fn clean(&mut self, _has: B) where B: BitSetLike, { - // nothing to do + // NOTE: clearing `data` may panic due to drop impls. So to makes sure + // everything is cleared and ensure `remove` is sound we clear `data` + // last. + self.data_id.clear(); + self.entity_id.clear(); + self.data.clear(); } unsafe fn get(&self, id: Index) -> &T { - let did = self.data_id.get_unchecked(id as usize).assume_init(); - self.data.get_unchecked(did as usize) + // NOTE: `as` cast is not lossy since insert would have encountered an + // allocation failure if this would overflow `usize.` + // SAFETY (get_unchecked and assume_init): Caller required to call + // `insert` with this `id` (with no following call to `remove` with that + // id or to `clean`). + let did = unsafe { self.data_id.get_unchecked(id as usize).assume_init() }; + // SAFETY: Indices retrieved from `data_id` with a valid `id` will + // always correspond to an element in `data`. + let ptr = unsafe { self.data.get_unchecked(did as usize) }.get(); + // SAFETY: See `VecStorage` impl. + unsafe { &*ptr } } unsafe fn get_mut(&mut self, id: Index) -> &mut T { - let did = self.data_id.get_unchecked(id as usize).assume_init(); - self.data.get_unchecked_mut(did as usize) + // NOTE: `as` cast is not lossy since insert would have encountered an + // allocation failure if this would overflow `usize.` + // SAFETY (get_unchecked and assume_init): Caller required to call + // `insert` with this `id` (with no following call to `remove` with that + // id or to `clean`). + let did = unsafe { self.data_id.get_unchecked(id as usize).assume_init() }; + // SAFETY: Indices retrieved from `data_id` with a valid `id` will + // always correspond to an element in `data`. + unsafe { self.data.get_unchecked_mut(did as usize) }.get_mut() } unsafe fn insert(&mut self, id: Index, v: T) { - let id = id as usize; + let id = if Index::BITS > usize::BITS { + // Saturate the cast to usize::MAX so if this overflows usize the + // allocation below will fail. + core::cmp::min(id, usize::MAX as Index) as usize + } else { + id as usize + }; + if self.data_id.len() <= id { - let delta = id + 1 - self.data_id.len(); + // NOTE: saturating add ensures that if this computation would + // overflow it will instead fail the allocation when calling + // reserve. + let delta = if Index::BITS >= usize::BITS { + id.saturating_add(1) + } else { + id + 1 + } - self.data_id.len(); self.data_id.reserve(delta); - self.data_id.set_len(id + 1); + // NOTE: Allocation would have failed if this addition would overflow + // SAFETY: MaybeUninit elements don't require initialization and + // the reserve call ensures the capacity will be sufficient for this + // new length. + unsafe { self.data_id.set_len(id + 1) }; } - self.data_id - .get_unchecked_mut(id) - .as_mut_ptr() - .write(self.data.len() as Index); + // NOTE: `as` cast here is not lossy since the length will be at most + // `Index::MAX` if there is still an entity without this component. + unsafe { self.data_id.get_unchecked_mut(id) }.write(self.data.len() as Index); + // NOTE: `id` originally of the type `Index` so the cast back won't + // overflow. self.entity_id.push(id as Index); - self.data.push(v); + self.data.push(SyncUnsafeCell::new(v)); } unsafe fn remove(&mut self, id: Index) -> T { - let did = self.data_id.get_unchecked(id as usize).assume_init(); + // NOTE: cast to usize won't overflow since `insert` would have failed + // to allocate. + // SAFETY (get_unchecked and assume_init): Caller required to have + // called `insert` with this `id`. + let did = unsafe { self.data_id.get_unchecked(id as usize).assume_init() }; let last = *self.entity_id.last().unwrap(); - self.data_id - .get_unchecked_mut(last as usize) - .as_mut_ptr() - .write(did); + // NOTE: cast to usize won't overflow since `insert` would have failed + // to allocate. + // SAFETY: indices in `self.entity_id` correspond to components present + // in this storage so this will be in-bounds. + unsafe { self.data_id.get_unchecked_mut(last as usize) }.write(did); + // NOTE: casting the index in the dense data array to usize won't + // overflow since the maximum number of components is limited to + // `Index::MAX + 1`. self.entity_id.swap_remove(did as usize); - self.data.swap_remove(did as usize) + self.data.swap_remove(did as usize).0.into_inner() + } +} + +impl SharedGetMutStorage for DenseVecStorage { + unsafe fn shared_get_mut(&self, id: Index) -> &mut T { + // NOTE: `as` cast is not lossy since insert would have encountered an + // allocation failure if this would overflow `usize.` + // SAFETY (get_unchecked and assume_init): Caller required to call + // `insert` with this `id` (with no following call to `remove` with that + // id or to `clean`). + let did = unsafe { self.data_id.get_unchecked(id as usize).assume_init() }; + // SAFETY: Indices retrieved from `data_id` with a valid `id` will + // always correspond to an element in `data`. + let ptr = unsafe { self.data.get_unchecked(did as usize) }.get(); + // SAFETY: See `VecStorage` impl. + unsafe { &mut *ptr } } } +// SAFETY: `shared_get_mut` doesn't perform any overlapping mutable +// accesses when provided distinct indices and is safe to call from multiple +// threads at once. unsafe impl DistinctStorage for DenseVecStorage {} /// A null storage type, used for cases where the component /// doesn't contain any data and instead works as a simple flag. -pub struct NullStorage(T); +pub struct NullStorage(PhantomData); -impl UnprotectedStorage for NullStorage -where - T: Default, -{ - #[cfg(feature = "nightly")] - type AccessMut<'a> - where - T: 'a, - = &'a mut T; +impl Default for NullStorage { + fn default() -> Self { + use core::mem::size_of; - unsafe fn clean(&mut self, _has: B) + assert_eq!(size_of::(), 0, "NullStorage can only be used with ZST"); + + NullStorage(PhantomData) + } +} + +impl UnprotectedStorage for NullStorage { + type AccessMut<'a> = &'a mut T where T: 'a; + + unsafe fn clean(&mut self, has: B) where B: BitSetLike, { + for id in has.iter() { + // SAFETY: Caller required to provide mask that keeps track of the + // existing elements, so every `id` is valid to use with `remove`. + unsafe { self.remove(id) }; + } } unsafe fn get(&self, _: Index) -> &T { - &self.0 + // SAFETY: Because the caller is required by the safety docs to first + // insert a component with this index, this corresponds to an instance + // of the ZST we conceptually own. The caller also must manage the + // aliasing of accesses via get/get_mut. + // + // Self::default asserts that `T` is a ZST which makes generating a + // reference from a dangling pointer not UB. + unsafe { &*NonNull::dangling().as_ptr() } } - unsafe fn get_mut(&mut self, _: Index) -> &mut T { - &mut self.0 + unsafe fn get_mut(&mut self, id: Index) -> &mut T { + // SAFETY: Exclusive reference to `self` guarantees that that are no + // extant references to components and that we aren't calling this from + // multiple threads at once. Remaining requirements passed to caller. + unsafe { self.shared_get_mut(id) } } - unsafe fn insert(&mut self, _: Index, _: T) {} + unsafe fn insert(&mut self, _: Index, v: T) { + // We rely on the caller tracking the presence of the ZST via the mask. + // + // We need to forget this to avoid the drop impl from running so the + // storage logically is taking ownership of this instance of the ZST. + core::mem::forget(v) + } unsafe fn remove(&mut self, _: Index) -> T { - Default::default() + // SAFETY: Because the caller is required by the safety docs to first + // insert a component with this index, this corresponds to an instance + // of the ZST we conceptually own. + // + // Self::default asserts that `T` is a ZST which makes reading from a + // dangling pointer not UB. + unsafe { ptr::read(NonNull::dangling().as_ptr()) } } } -impl Default for NullStorage -where - T: Default, -{ - fn default() -> Self { - use std::mem::size_of; - - assert_eq!(size_of::(), 0, "NullStorage can only be used with ZST"); - - NullStorage(Default::default()) +impl SharedGetMutStorage for NullStorage { + unsafe fn shared_get_mut(&self, _: Index) -> &mut T { + // SAFETY: Because the caller is required by the safety docs to first + // insert a component with this index, this corresponds to an instance + // of the ZST we conceptually own. The caller also must manage the + // aliasing of accesses via get/get_mut. + // + // Self::default asserts that `T` is a ZST which makes generating a + // reference from a dangling pointer not UB. + unsafe { &mut *NonNull::dangling().as_ptr() } } } -/// This is safe because you cannot mutate ZSTs. +// SAFETY: `shared_get_mut` doesn't perform any overlapping mutable +// accesses when provided distinct indices and is safe to call from multiple +// threads at once. unsafe impl DistinctStorage for NullStorage {} /// Vector storage. Uses a simple `Vec`. Supposed to have maximum @@ -269,7 +388,7 @@ unsafe impl DistinctStorage for NullStorage {} /// entity IDs. These can be compared to other `VecStorage`s, to /// other `DefaultVecStorage`s, and to `Entity::id()`s for live /// entities. -pub struct VecStorage(Vec>); +pub struct VecStorage(Vec>>); impl Default for VecStorage { fn default() -> Self { @@ -282,62 +401,138 @@ impl SliceAccess for VecStorage { #[inline] fn as_slice(&self) -> &[Self::Element] { - self.0.as_slice() + let unsafe_cell_slice_ptr = SyncUnsafeCell::as_cell_of_slice(self.0.as_slice()).get(); + // SAFETY: The only place that mutably accesses these elements via a + // shared reference is the impl of `SharedGetMut::shared_get_mut` which + // requires callers to avoid calling other methods with `&self` while + // references returned there are still in use (and to ensure references + // from methods like this no longer exist). + unsafe { &*unsafe_cell_slice_ptr } } #[inline] fn as_mut_slice(&mut self) -> &mut [Self::Element] { - self.0.as_mut_slice() + SyncUnsafeCell::as_slice_mut(self.0.as_mut_slice()) } } impl UnprotectedStorage for VecStorage { - #[cfg(feature = "nightly")] - type AccessMut<'a> - where - T: 'a, - = &'a mut T; + type AccessMut<'a> = &'a mut T where T: 'a; unsafe fn clean(&mut self, has: B) where B: BitSetLike, { - use std::ptr; for (i, v) in self.0.iter_mut().enumerate() { + // NOTE: `as` cast is safe since the index used for insertion is a + // `u32` so the indices will never be over `u32::MAX`. + const _: Index = 0u32; if has.contains(i as u32) { // drop in place - ptr::drop_in_place(&mut *v.as_mut_ptr()); + let v_inner = v.get_mut(); + // SAFETY: Present in the provided mask. All components are + // considered removed after a call to `clean`. + unsafe { v_inner.assume_init_drop() }; } } - self.0.set_len(0); } unsafe fn get(&self, id: Index) -> &T { - &*self.0.get_unchecked(id as usize).as_ptr() + // NOTE: `as` cast is not lossy since insert would have encountered an + // allocation failure if this would overflow `usize.` + // SAFETY: Caller required to call `insert` with this `id` (with no + // following call to `remove` with that id or to `clean`). + let ptr = unsafe { self.0.get_unchecked(id as usize) }.get(); + // SAFETY: Only method that obtains exclusive references from this + // unsafe cell is `shared_get_mut` and callers of that method are + // required to manually ensure that those references don't alias + // references from this method. + let maybe_uninit = unsafe { &*ptr }; + // SAFETY: Requirement to have `insert`ed this component ensures that it + // will be initialized. + unsafe { maybe_uninit.assume_init_ref() } } unsafe fn get_mut(&mut self, id: Index) -> &mut T { - &mut *self.0.get_unchecked_mut(id as usize).as_mut_ptr() - } - + // NOTE: `as` cast is not lossy since `insert` would have encountered an + // allocation failure if this would overflow `usize.` + // SAFETY: Caller required to call `insert` with this `id` (with no + // following call to `remove` with that id or to `clean`). + let maybe_uninit = unsafe { self.0.get_unchecked_mut(id as usize) }.get_mut(); + // SAFETY: Requirement to have `insert`ed this component ensures that it + // will be initialized. + unsafe { maybe_uninit.assume_init_mut() } + } + + // false positive https://github.com/rust-lang/rust-clippy/issues/10407 + #[allow(clippy::uninit_vec)] unsafe fn insert(&mut self, id: Index, v: T) { - let id = id as usize; + let id = if Index::BITS > usize::BITS { + // Saturate the cast to usize::MAX so if this overflows usize the + // allocation below will fail. + core::cmp::min(id, usize::MAX as Index) as usize + } else { + id as usize + }; + if self.0.len() <= id { - let delta = id + 1 - self.0.len(); + // NOTE: saturating add ensures that if this computation would + // overflow it will instead fail the allocation when calling + // reserve. + let delta = if Index::BITS >= usize::BITS { + id.saturating_add(1) + } else { + id + 1 + } - self.0.len(); self.0.reserve(delta); - self.0.set_len(id + 1); + // NOTE: Allocation would have failed if this addition would overflow + // SAFETY: MaybeUninit elements don't require initialization and + // the reserve call ensures the capacity will be sufficient for this + // new length. + unsafe { self.0.set_len(id + 1) }; } // Write the value without reading or dropping // the (currently uninitialized) memory. - *self.0.get_unchecked_mut(id as usize) = MaybeUninit::new(v); + // SAFETY: The length of the vec was extended to contain this index + // above. + unsafe { self.0.get_unchecked_mut(id) }.get_mut().write(v); } unsafe fn remove(&mut self, id: Index) -> T { - use std::ptr; - ptr::read(self.get(id)) + // SAFETY: Caller required to have called `insert` with this `id`. + // Exclusive `&mut self` ensures no aliasing is occuring. + let component_ref = unsafe { self.get(id) }; + // SAFETY: Caller not allowed to call other methods that access this + // `id` as an initialized value after this call to `remove` so it is + // safe to move out of this. + unsafe { ptr::read(component_ref) } + } +} + +impl SharedGetMutStorage for VecStorage { + unsafe fn shared_get_mut(&self, id: Index) -> &mut T { + // NOTE: `as` cast is not lossy since insert would have encountered an + // allocation failure if this would overflow `usize.` + // SAFETY: Caller required to call `insert` with this `id` (with no + // following call to `remove` with that id or to `clean`). + let ptr = unsafe { self.0.get_unchecked(id as usize) }.get(); + // SAFETY: Caller required to manage aliasing (ensuring there are no + // extant shared references into the storage, this is called with + // distinct ids, and that other methods that take `&self` aren't called + // while the exclusive references returned here are alive (except for + // `UnprotectedStorage::get` which may be used with this provided the + // caller avoids creating aliasing references from both that live at the + // same time)). + let maybe_uninit = unsafe { &mut *ptr }; + // SAFETY: Requirement to have `insert`ed this component ensures that it + // will be initialized. + unsafe { maybe_uninit.assume_init_mut() } } } +// SAFETY: `shared_get_mut` doesn't perform any overlapping mutable +// accesses when provided distinct indices and is safe to call from multiple +// threads at once. unsafe impl DistinctStorage for VecStorage {} /// Vector storage, like `VecStorage`, but allows safe access to the @@ -348,7 +543,7 @@ unsafe impl DistinctStorage for VecStorage {} /// `as_slice()` and `as_mut_slice()` indices correspond to entity IDs. /// These can be compared to other `DefaultVecStorage`s, to other /// `VecStorage`s, and to `Entity::id()`s for live entities. -pub struct DefaultVecStorage(Vec); +pub struct DefaultVecStorage(Vec>); impl Default for DefaultVecStorage { fn default() -> Self { @@ -356,15 +551,29 @@ impl Default for DefaultVecStorage { } } +impl SliceAccess for DefaultVecStorage { + type Element = T; + + /// Returns a slice of all the components in this storage. + #[inline] + fn as_slice(&self) -> &[Self::Element] { + let unsafe_cell_slice_ptr = SyncUnsafeCell::as_cell_of_slice(self.0.as_slice()).get(); + // SAFETY: See `VecStorage` impl. + unsafe { &*unsafe_cell_slice_ptr } + } + + /// Returns a mutable slice of all the components in this storage. + #[inline] + fn as_mut_slice(&mut self) -> &mut [Self::Element] { + SyncUnsafeCell::as_slice_mut(self.0.as_mut_slice()) + } +} + impl UnprotectedStorage for DefaultVecStorage where T: Default, { - #[cfg(feature = "nightly")] - type AccessMut<'a> - where - T: 'a, - = &'a mut T; + type AccessMut<'a> = &'a mut T where T: 'a; unsafe fn clean(&mut self, _has: B) where @@ -374,51 +583,63 @@ where } unsafe fn get(&self, id: Index) -> &T { - self.0.get_unchecked(id as usize) + // NOTE: `as` cast is not lossy since insert would have encountered an + // allocation failure if this would overflow `usize.` + // SAFETY: See `VecStorage` impl. + let ptr = unsafe { self.0.get_unchecked(id as usize) }.get(); + // SAFETY: See `VecStorage` impl. + unsafe { &*ptr } } unsafe fn get_mut(&mut self, id: Index) -> &mut T { - self.0.get_unchecked_mut(id as usize) + // NOTE: `as` cast is not lossy since insert would have encountered an + // allocation failure if this would overflow `usize.` + // SAFETY: See `VecStorage` impl. + unsafe { self.0.get_unchecked_mut(id as usize) }.get_mut() } unsafe fn insert(&mut self, id: Index, v: T) { - let id = id as usize; + let id = if Index::BITS > usize::BITS { + // Saturate the cast to usize::MAX so if this overflows usize the + // allocation below will fail. + core::cmp::min(id, usize::MAX as Index) as usize + } else { + id as usize + }; if self.0.len() <= id { // fill all the empty slots with default values self.0.resize_with(id, Default::default); // store the desired value - self.0.push(v) + self.0.push(SyncUnsafeCell::new(v)) } else { // store the desired value directly - self.0[id] = v; + *self.0[id].get_mut() = v; } } unsafe fn remove(&mut self, id: Index) -> T { - // make a new default value - let mut v = T::default(); - // swap it into the vec - std::ptr::swap(self.0.get_unchecked_mut(id as usize), &mut v); - // return the old value - v + // Take value leaving a default instance behind + // SAFETY: Caller required to have called `insert` with this `id`. + core::mem::take(unsafe { self.0.get_unchecked_mut(id as usize) }.get_mut()) } } -unsafe impl DistinctStorage for DefaultVecStorage {} - -impl SliceAccess for DefaultVecStorage { - type Element = T; - - /// Returns a slice of all the components in this storage. - #[inline] - fn as_slice(&self) -> &[Self::Element] { - self.0.as_slice() - } - - /// Returns a mutable slice of all the components in this storage. - #[inline] - fn as_mut_slice(&mut self) -> &mut [Self::Element] { - self.0.as_mut_slice() +impl SharedGetMutStorage for DefaultVecStorage +where + T: Default, +{ + unsafe fn shared_get_mut(&self, id: Index) -> &mut T { + // NOTE: `as` cast is not lossy since insert would have encountered an + // allocation failure if this would overflow `usize.` + // SAFETY: See `VecStorage` impl. + let ptr = unsafe { self.0.get_unchecked(id as usize) }.get(); + // SAFETY: See `VecStorage` impl. + unsafe { &mut *ptr } } } + +// SAFETY: `shared_get_mut` doesn't perform any overlapping mutable +// accesses when provided distinct indices and is safe to call from multiple +// threads at once. +unsafe impl DistinctStorage for DefaultVecStorage {} diff --git a/src/storage/sync_unsafe_cell.rs b/src/storage/sync_unsafe_cell.rs new file mode 100644 index 000000000..3e0f6b31e --- /dev/null +++ b/src/storage/sync_unsafe_cell.rs @@ -0,0 +1,51 @@ +//! Stand in for core::cell::SyncUnsafeCell since that is still unstable. +//! +//! TODO: Remove when core::cell::SyncUnsafeCell is stabilized + +use core::cell::UnsafeCell; +use core::ops::{Deref, DerefMut}; + +#[repr(transparent)] +pub struct SyncUnsafeCell(pub UnsafeCell); + +// SAFETY: Proper synchronization is left to the user of the unsafe `get` call. +// `UnsafeCell` itself doesn't implement `Sync` to prevent accidental mis-use. +unsafe impl Sync for SyncUnsafeCell {} + +impl SyncUnsafeCell { + pub fn new(value: T) -> Self { + Self(UnsafeCell::new(value)) + } + + pub fn as_cell_of_slice(slice: &[Self]) -> &SyncUnsafeCell<[T]> { + // SAFETY: `T` has the same memory layout as `SyncUnsafeCell`. + unsafe { &*(slice as *const [Self] as *const SyncUnsafeCell<[T]>) } + } + + pub fn as_slice_mut(slice: &mut [Self]) -> &mut [T] { + // SAFETY: `T` has the same memory layout as `SyncUnsafeCell` and we + // have a mutable reference which means the `SyncUnsafeCell` can be + // safely removed since we have exclusive access here. + unsafe { &mut *(slice as *mut [Self] as *mut [T]) } + } +} + +impl Deref for SyncUnsafeCell { + type Target = UnsafeCell; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for SyncUnsafeCell { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl Default for SyncUnsafeCell { + fn default() -> Self { + Self::new(Default::default()) + } +} diff --git a/src/storage/tests.rs b/src/storage/tests.rs index 6c5b76574..44bc43097 100644 --- a/src/storage/tests.rs +++ b/src/storage/tests.rs @@ -5,6 +5,9 @@ use crate::world::{Component, Entity, Generation, Index, WorldExt}; use shred::World; use std::mem::MaybeUninit; +// Make tests finish in reasonable time with miri +const ITERATIONS: u32 = if cfg!(miri) { 100 } else { 1000 }; + fn create(world: &mut World) -> WriteStorage where T::Storage: Default, @@ -32,13 +35,13 @@ mod map_test { let mut w = World::new(); let mut c = create(&mut w); - for i in 0..1_000 { + for i in 0..ITERATIONS { if let Err(err) = c.insert(ent(i), Comp(i)) { panic!("Failed to insert component into entity! {:?}", err); } } - for i in 0..1_000 { + for i in 0..ITERATIONS { assert_eq!(c.get(ent(i)).unwrap().0, i); } } @@ -64,21 +67,21 @@ mod map_test { let mut w = World::new(); let mut c = create(&mut w); - for i in 0..1_000 { + for i in 0..ITERATIONS { if let Err(err) = c.insert(ent(i), Comp(i)) { panic!("Failed to insert component into entity! {:?}", err); } } - for i in 0..1_000 { + for i in 0..ITERATIONS { assert_eq!(c.get(ent(i)).unwrap().0, i); } - for i in 0..1_000 { + for i in 0..ITERATIONS { c.remove(ent(i)); } - for i in 0..1_000 { + for i in 0..ITERATIONS { assert!(c.get(ent(i)).is_none()); } } @@ -88,7 +91,7 @@ mod map_test { let mut w = World::new(); let mut c = create(&mut w); - for i in 0..1_000i32 { + for i in 0..ITERATIONS as i32 { if let Err(err) = c.insert(ent(i as u32), Comp(i)) { panic!("Failed to insert component into entity! {:?}", err); } @@ -97,7 +100,7 @@ mod map_test { } } - for i in 0..1_000i32 { + for i in 0..ITERATIONS as i32 { assert_eq!(c.get(ent(i as u32)).unwrap().0, -i); } } @@ -249,13 +252,13 @@ mod test { let mut w = World::new(); let mut s: Storage = create(&mut w); - for i in 0..1_000 { + for i in 0..ITERATIONS { if let Err(err) = s.insert(Entity::new(i, Generation::new(1)), (i + 2718).into()) { panic!("Failed to insert component into entity! {:?}", err); } } - for i in 0..1_000 { + for i in 0..ITERATIONS { assert_eq!( s.get(Entity::new(i, Generation::new(1))).unwrap(), &(i + 2718).into() @@ -270,13 +273,13 @@ mod test { let mut w = World::new(); let mut s: Storage = create(&mut w); - for i in 0..1_000 { + for i in 0..ITERATIONS { if let Err(err) = s.insert(Entity::new(i, Generation::new(1)), (i + 2718).into()) { panic!("Failed to insert component into entity! {:?}", err); } } - for i in 0..1_000 { + for i in 0..ITERATIONS { assert_eq!( s.remove(Entity::new(i, Generation::new(1))).unwrap(), (i + 2718).into() @@ -292,19 +295,20 @@ mod test { let mut w = World::new(); let mut s: Storage = create(&mut w); - for i in 0..1_000 { + for i in 0..ITERATIONS { if let Err(err) = s.insert(Entity::new(i, Generation::new(1)), (i + 2718).into()) { panic!("Failed to insert component into entity! {:?}", err); } } - for i in 0..1_000 { + for i in 0..ITERATIONS { *s.get_mut(Entity::new(i, Generation::new(1))) .unwrap() + .access_mut() .as_mut() -= 718; } - for i in 0..1_000 { + for i in 0..ITERATIONS { assert_eq!( s.get(Entity::new(i, Generation::new(1))).unwrap(), &(i + 2000).into() @@ -321,20 +325,21 @@ mod test { // Insert the first 500 components manually, leaving indices 500..1000 // unoccupied. - for i in 0..500 { + for i in 0..ITERATIONS / 2 { if let Err(err) = s.insert(Entity::new(i, Generation::new(1)), (i).into()) { panic!("Failed to insert component into entity! {:?}", err); } } - for i in 0..1_000 { + for i in 0..ITERATIONS { *s.get_mut_or_default(Entity::new(i, Generation::new(1))) .unwrap() + .access_mut() .as_mut() += i; } - // The first 500 were initialized, and should be i*2. - for i in 0..500 { + // The first ITERATIONS / 2 were initialized, and should be i*2. + for i in 0..ITERATIONS / 2 { assert_eq!( s.get(Entity::new(i, Generation::new(1))).unwrap(), &(i + i).into() @@ -342,7 +347,7 @@ mod test { } // The rest were Default-initialized, and should equal i. - for i in 500..1_000 { + for i in ITERATIONS / 2..ITERATIONS { assert_eq!( s.get(Entity::new(i, Generation::new(1))).unwrap(), &(i).into() @@ -357,7 +362,7 @@ mod test { let mut w = World::new(); let mut s: Storage = create(&mut w); - for i in 0..1_000 { + for i in 0..ITERATIONS { if let Err(err) = s.insert(Entity::new(i, Generation::new(1)), (i + 2718).into()) { panic!("Failed to insert component into entity! {:?}", err); } @@ -368,7 +373,7 @@ mod test { } } - for i in 0..1_000 { + for i in 0..ITERATIONS { assert!(s.get(Entity::new(i, Generation::new(2))).is_none()); assert_eq!( s.get(Entity::new(i, Generation::new(1))).unwrap(), @@ -384,7 +389,7 @@ mod test { let mut w = World::new(); let mut s: Storage = create(&mut w); - for i in 0..1_000 { + for i in 0..ITERATIONS { if s.insert(Entity::new(i, Generation::new(2)), (i + 2718).into()) .is_ok() { @@ -392,7 +397,7 @@ mod test { } } - for i in 0..1_000 { + for i in 0..ITERATIONS { assert!(s.remove(Entity::new(i, Generation::new(1))).is_none()); } } @@ -442,14 +447,14 @@ mod test { let mut w = World::new(); let mut s: Storage = create(&mut w); - for i in 0..1_000 { + for i in 0..ITERATIONS { if let Err(err) = s.insert(Entity::new(i, Generation::new(1)), (i + 2718).into()) { panic!("Failed to insert component into entity! {:?}", err); } } let slice = s.as_slice(); - assert_eq!(slice.len(), 1_000); + assert_eq!(slice.len(), ITERATIONS as usize); for (i, v) in slice.iter().enumerate() { assert_eq!(v, &(i as u32 + 2718).into()); } @@ -462,14 +467,14 @@ mod test { let mut w = World::new(); let mut s: Storage = create(&mut w); - for i in 0..1_000 { + for i in 0..ITERATIONS { if let Err(err) = s.insert(Entity::new(i, Generation::new(1)), (i + 2718).into()) { panic!("Failed to insert component into entity! {:?}", err); } } let slice = s.as_slice(); - assert_eq!(slice.len(), 1_000); + assert_eq!(slice.len(), ITERATIONS as usize); for (i, v) in slice.iter().enumerate() { let v = unsafe { &*v.as_ptr() }; assert_eq!(v, &(i as u32 + 2718).into()); @@ -698,9 +703,9 @@ mod test { } for mut comps in (&mut s1.restrict_mut()).join() { - let c1 = { comps.get_unchecked().0 }; + let c1 = { comps.get().0 }; - let c2 = { comps.get_mut_unchecked().0 }; + let c2 = { comps.get_mut().0 }; assert_eq!( c1, c2, @@ -740,14 +745,12 @@ mod test { let components2 = Mutex::new(Vec::new()); let components2_mut = Mutex::new(Vec::new()); - (&mut s1.par_restrict_mut()) - .par_join() - .for_each(|mut comps| { - let (mut components2, mut components2_mut) = - (components2.lock().unwrap(), components2_mut.lock().unwrap()); - components2.push(comps.get_unchecked().0); - components2_mut.push(comps.get_mut_unchecked().0); - }); + (&mut s1.restrict_mut()).par_join().for_each(|mut comps| { + let (mut components2, mut components2_mut) = + (components2.lock().unwrap(), components2_mut.lock().unwrap()); + components2.push(comps.get().0); + components2_mut.push(comps.get_mut().0); + }); let components2 = components2.into_inner().unwrap(); assert_eq!( components2, @@ -941,7 +944,7 @@ mod test { assert!(!removed.contains(entity.id())); } - for (_, mut comp) in (&w.entities(), &mut s1).join() { + for (_, comp) in (&w.entities(), &mut s1).join() { comp.0 += 1; } @@ -994,7 +997,7 @@ mod test { #[test] fn entries() { - use crate::{join::Join, storage::WriteStorage, world::Entities}; + use crate::{join::LendJoin, storage::WriteStorage, world::Entities}; let mut w = World::new(); @@ -1018,10 +1021,12 @@ mod test { let mut sum = 0; w.exec(|(e, mut s): (Entities, WriteStorage)| { - sum = (&e, s.entries()).join().fold(0, |acc, (_, value)| { + let mut acc = 0; + (&e, s.entries()).lend_join().for_each(|(_, value)| { let v = value.or_insert(2.into()); - acc + v.0 + acc = acc + v.0; }); + sum = acc; }); assert_eq!(sum, 135); diff --git a/src/storage/track.rs b/src/storage/track.rs index 98c7e0185..58f8450f5 100644 --- a/src/storage/track.rs +++ b/src/storage/track.rs @@ -69,7 +69,7 @@ where /// Returns the event channel for insertions/removals/modifications of this /// storage's components. pub fn channel_mut(&mut self) -> &mut EventChannel { - unsafe { self.open() }.1.channel_mut() + self.data.inner.channel_mut() } /// Starts tracking component events. Note that this reader id should be @@ -89,6 +89,6 @@ where /// not emitted. #[cfg(feature = "storage-event-control")] pub fn set_event_emission(&mut self, emit: bool) { - unsafe { self.open() }.1.set_event_emission(emit); + self.data.inner.set_event_emission(emit); } } diff --git a/src/world/entity.rs b/src/world/entity.rs index 03ea8d388..3d1064b6f 100644 --- a/src/world/entity.rs +++ b/src/world/entity.rs @@ -7,9 +7,16 @@ use std::{ use hibitset::{AtomicBitSet, BitSet, BitSetOr}; use shred::Read; +#[nougat::gat(Type)] +use crate::join::LendJoin; #[cfg(feature = "parallel")] use crate::join::ParJoin; -use crate::{error::WrongGeneration, join::Join, storage::WriteStorage, world::Component}; +use crate::{ + error::WrongGeneration, + join::{Join, RepeatableLendGet}, + storage::WriteStorage, + world::Component, +}; /// An index is basically the id of an `Entity`. pub type Index = u32; @@ -89,7 +96,7 @@ impl Allocator { Ok(()) } - /// Kills and entity atomically (will be updated when the allocator is + /// Kills an entity atomically (will be updated when the allocator is /// maintained). pub fn kill_atomic(&self, e: Entity) -> Result<(), WrongGeneration> { if !self.is_alive(e) { @@ -200,9 +207,8 @@ impl Allocator { } fn update_generation_length(&mut self, i: usize) { - if self.generations.len() <= i as usize { - self.generations - .resize(i as usize + 1, ZeroableGeneration(None)); + if self.generations.len() <= i { + self.generations.resize(i + 1, ZeroableGeneration(None)); } } } @@ -317,27 +323,74 @@ impl EntitiesRes { } } -impl<'a> Join for &'a EntitiesRes { +// SAFETY: It is safe to retrieve elements with any `id` regardless of the mask. +#[nougat::gat] +unsafe impl<'a> LendJoin for &'a EntitiesRes { + type Mask = BitSetOr<&'a BitSet, &'a AtomicBitSet>; + type Type<'next> = Entity; + type Value = Self; + + unsafe fn open(self) -> (Self::Mask, Self::Value) { + (BitSetOr(&self.alloc.alive, &self.alloc.raised), self) + } + + unsafe fn get<'next>(v: &'next mut &'a EntitiesRes, id: Index) -> Entity { + let gen = v + .alloc + .generation(id) + .map(|gen| if gen.is_alive() { gen } else { gen.raised() }) + .unwrap_or_else(Generation::one); + Entity(id, gen) + } +} + +// SAFETY: ::get does not rely on only being called +// once with a particular ID. +unsafe impl<'a> RepeatableLendGet for &'a EntitiesRes {} + +// SAFETY: It is safe to retrieve elements with any `id` regardless of the mask. +unsafe impl<'a> Join for &'a EntitiesRes { type Mask = BitSetOr<&'a BitSet, &'a AtomicBitSet>; type Type = Entity; type Value = Self; - unsafe fn open(self) -> (Self::Mask, Self) { + unsafe fn open(self) -> (Self::Mask, Self::Value) { (BitSetOr(&self.alloc.alive, &self.alloc.raised), self) } - unsafe fn get(v: &mut &'a EntitiesRes, idx: Index) -> Entity { + unsafe fn get(v: &mut &'a EntitiesRes, id: Index) -> Entity { let gen = v .alloc - .generation(idx) + .generation(id) .map(|gen| if gen.is_alive() { gen } else { gen.raised() }) .unwrap_or_else(Generation::one); - Entity(idx, gen) + Entity(id, gen) } } +// SAFETY: No unsafe code is used and it is safe to call `get` from multiple +// threads at once. +// +// It is safe to retrieve elements with any `id` regardless of the mask. #[cfg(feature = "parallel")] -unsafe impl<'a> ParJoin for &'a EntitiesRes {} +unsafe impl<'a> ParJoin for &'a EntitiesRes { + type Mask = BitSetOr<&'a BitSet, &'a AtomicBitSet>; + type Type = Entity; + type Value = Self; + + unsafe fn open(self) -> (Self::Mask, Self::Value) { + (BitSetOr(&self.alloc.alive, &self.alloc.raised), self) + } + + unsafe fn get(v: &&'a EntitiesRes, id: Index) -> Entity { + let gen = v + .alloc + .generation(id) + .map(|gen| if gen.is_alive() { gen } else { gen.raised() }) + .unwrap_or_else(Generation::one); + Entity(id, gen) + } +} /// An entity builder from `EntitiesRes`. Allows building an entity with its /// components if you have mutable access to the component storages. @@ -388,6 +441,7 @@ impl fmt::Debug for Generation { impl Generation { pub(crate) fn one() -> Self { + // SAFETY: `1` is not zero. Generation(unsafe { NonZeroI32::new_unchecked(1) }) } @@ -415,6 +469,10 @@ impl Generation { /// Panics if it is alive. fn raised(self) -> Generation { assert!(!self.is_alive()); + // SAFETY: Since `self` is not alive, `self.id()` will be negative so + // subtracting it from `1` will give us a value `>= 2`. If this + // overflows it will at most wrap to `i32::MIN + 1` (so it will never be + // zero). unsafe { Generation(NonZeroI32::new_unchecked(1 - self.id())) } } } @@ -455,6 +513,8 @@ impl ZeroableGeneration { fn raised(self) -> Generation { assert!(!self.is_alive()); let gen = 1i32.checked_sub(self.id()).expect("generation overflow"); + // SAFETY: Since `self` is not alive, `self.id()` will be negative so + // subtracting it from `1` will give us a value `>= 2`. Generation(unsafe { NonZeroI32::new_unchecked(gen) }) } diff --git a/src/world/world_ext.rs b/src/world/world_ext.rs index 2d11ac73e..df44fa8be 100644 --- a/src/world/world_ext.rs +++ b/src/world/world_ext.rs @@ -320,7 +320,7 @@ impl WorldExt for World { self.entry() .or_insert_with(move || MaskedStorage::::new(storage())); self.fetch_mut::>() - .register(&*self.fetch::>()); + .register::>(); } fn add_resource(&mut self, res: T) { @@ -414,8 +414,8 @@ impl WorldExt for World { } fn delete_components(&mut self, delete: &[Entity]) { - for storage in self.fetch_mut::>().iter_mut(self) { - storage.drop(delete); + for mut storage in self.fetch_mut::>().iter_mut(self) { + (*storage).drop(delete); } } } diff --git a/tests/saveload.rs b/tests/saveload.rs index 26e717486..1623f64fc 100644 --- a/tests/saveload.rs +++ b/tests/saveload.rs @@ -56,6 +56,7 @@ mod tests { struct TupleSerdeType(u32); #[derive(Clone)] + #[allow(dead_code)] struct UnserializableType { inner: u32, } @@ -67,6 +68,7 @@ mod tests { } #[derive(Serialize, Deserialize, Clone)] + #[allow(dead_code)] struct ComplexSerdeType { #[serde(skip, default)] opaque: UnserializableType, diff --git a/tests/tests.rs b/tests/tests.rs index 4cbd17ca5..2c08f12dd 100644 --- a/tests/tests.rs +++ b/tests/tests.rs @@ -4,6 +4,9 @@ use specs::{ world::{Builder, WorldExt}, }; +// Make tests finish in reasonable time with miri +const ITERATIONS: u32 = if cfg!(miri) { 20 } else { 1000 }; + #[derive(Clone, Debug, PartialEq)] struct CompInt(i8); @@ -97,7 +100,7 @@ fn dynamic_create() { let mut world = create_world(); let mut dispatcher = DispatcherBuilder::new().with(Sys, "s", &[]).build(); - for _ in 0..1_000 { + for _ in 0..ITERATIONS { dispatcher.dispatch(&mut world); } } @@ -118,7 +121,7 @@ fn dynamic_deletion() { let mut world = create_world(); let mut dispatcher = DispatcherBuilder::new().with(Sys, "s", &[]).build(); - for _ in 0..1_000 { + for _ in 0..ITERATIONS { dispatcher.dispatch(&mut world); } } @@ -550,7 +553,7 @@ fn par_join_many_entities_and_systems() { } #[test] -fn getting_specific_entity_with_join() { +fn getting_specific_entity_with_lend_join() { let mut world = create_world(); world .create_entity() @@ -565,12 +568,16 @@ fn getting_specific_entity_with_join() { assert_eq!( Some((&CompInt(1), &mut CompBool(true))), - (&ints, &mut bools).join().get(entity, &world.entities()) + (&ints, &mut bools) + .lend_join() + .get(entity, &world.entities()) ); bools.remove(entity); assert_eq!( None, - (&ints, &mut bools).join().get(entity, &world.entities()) + (&ints, &mut bools) + .lend_join() + .get(entity, &world.entities()) ); entity }; @@ -584,7 +591,9 @@ fn getting_specific_entity_with_join() { let mut bools = world.write_storage::(); assert_eq!( None, - (&ints, &mut bools).join().get(entity, &world.entities()) + (&ints, &mut bools) + .lend_join() + .get(entity, &world.entities()) ); }