Skip to content

Commit

Permalink
unordermap(set): correcting bug when erasing in a set
Browse files Browse the repository at this point in the history
- correct bug with unordered set erasure
- add unordered set test
- add several missing execution space as arguments in UnorderedMap
- fix missing Kokkos::view_alloc in Kokkos::unique
  • Loading branch information
romintomasetti committed Jan 30, 2024
1 parent d2913cb commit 03c4417
Show file tree
Hide file tree
Showing 6 changed files with 206 additions and 42 deletions.
58 changes: 34 additions & 24 deletions containers/src/Kokkos_UnorderedMap.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -254,6 +254,9 @@ class UnorderedMap {
static constexpr bool is_modifiable_map = has_const_key && !has_const_value;
static constexpr bool is_const_map = has_const_key && has_const_value;

static constexpr size_type invalid_index =
KOKKOS_INVALID_INDEX_TYPE(size_type);

using insert_result = UnorderedMapInsertResult;

using HostMirror =
Expand All @@ -263,8 +266,6 @@ class UnorderedMap {
//@}

private:
enum : size_type { invalid_index = ~static_cast<size_type>(0) };

using impl_value_type = std::conditional_t<is_set, int, declared_value_type>;

using key_type_view = std::conditional_t<
Expand Down Expand Up @@ -413,23 +414,33 @@ class UnorderedMap {
/// This is <i>not</i> a device function; it may <i>not</i> be
/// called in a parallel kernel.
bool rehash(size_type requested_capacity = 0) {
return rehash(execution_space{}, requested_capacity);
}

bool rehash(const execution_space &space, size_type requested_capacity = 0) {
const bool bounded_insert = (capacity() == 0) || (size() == 0u);
return rehash(requested_capacity, bounded_insert);
return rehash(space, requested_capacity, bounded_insert);
}

bool rehash(size_type requested_capacity, bool bounded_insert) {
return rehash(execution_space{}, requested_capacity, bounded_insert);
}

bool rehash(const execution_space &space, size_type requested_capacity,
bool bounded_insert) {
if (!is_insertable_map) return false;

const size_type curr_size = size();
requested_capacity =
(requested_capacity < curr_size) ? curr_size : requested_capacity;

insertable_map_type tmp(requested_capacity, m_hasher, m_equal_to);
insertable_map_type tmp(Kokkos::view_alloc(space), requested_capacity,
m_hasher, m_equal_to);

if (curr_size) {
tmp.m_bounded_insert = false;
Impl::UnorderedMapRehash<insertable_map_type> f(tmp, *this);
f.apply();
Impl::UnorderedMapRehash<insertable_map_type> f{tmp, *this};
f.apply(space);
}
tmp.m_bounded_insert = bounded_insert;

Expand Down Expand Up @@ -465,26 +476,24 @@ class UnorderedMap {
return is_insertable_map ? get_flag(erasable_idx) : false;
}

bool begin_erase() {
bool begin_erase(const execution_space &space = execution_space{}) {
bool result = !erasable();
if (is_insertable_map && result) {
execution_space().fence(
space.fence(
"Kokkos::UnorderedMap::begin_erase: fence before setting erasable "
"flag");
set_flag(erasable_idx);
}
return result;
}

bool end_erase() {
bool end_erase(const execution_space &space = execution_space{}) {
bool result = erasable();
if (is_insertable_map && result) {
execution_space().fence(
"Kokkos::UnorderedMap::end_erase: fence before erasing");
Impl::UnorderedMapErase<declared_map_type> f(*this);
f.apply();
execution_space().fence(
"Kokkos::UnorderedMap::end_erase: fence after erasing");
space.fence("Kokkos::UnorderedMap::end_erase: fence before erasing");
Impl::UnorderedMapErase<declared_map_type> f{*this};
f.apply(space);
space.fence("Kokkos::UnorderedMap::end_erase: fence after erasing");
reset_flag(erasable_idx);
}
return result;
Expand Down Expand Up @@ -526,8 +535,9 @@ class UnorderedMap {
/// Kokkos::UnorderedMapInsertOpTypes for more ops.
template <typename InsertOpType = default_op_type>
KOKKOS_INLINE_FUNCTION insert_result
insert(key_type const &k, impl_value_type const &v = impl_value_type(),
[[maybe_unused]] InsertOpType arg_insert_op = InsertOpType()) const {
insert(key_type const &key,
[[maybe_unused]] impl_value_type const &value = impl_value_type(),
[[maybe_unused]] InsertOpType arg_insert_op = InsertOpType()) const {
if constexpr (is_set) {
static_assert(std::is_same_v<InsertOpType, default_op_type>,
"Insert Operations are not supported on sets.");
Expand All @@ -546,7 +556,7 @@ class UnorderedMap {

int volatile &failed_insert_ref = m_scalars((int)failed_insert_idx);

const size_type hash_value = m_hasher(k);
const size_type hash_value = m_hasher(key);
const size_type hash_list = hash_value % m_hash_lists.extent(0);

size_type *curr_ptr = &m_hash_lists[hash_list];
Expand Down Expand Up @@ -594,7 +604,7 @@ class UnorderedMap {
volatile_load(&m_keys[curr])
#endif
,
k)) {
key)) {
result.increment_list_position();
index_hint = curr;
curr_ptr = &m_next_index[curr];
Expand All @@ -621,7 +631,7 @@ class UnorderedMap {

result.set_existing(curr, free_existing);
if constexpr (!is_set) {
arg_insert_op.op(m_values, curr, v);
arg_insert_op.op(m_values, curr, value);
}
not_done = false;
}
Expand Down Expand Up @@ -649,15 +659,15 @@ class UnorderedMap {
#ifdef KOKKOS_ENABLE_SYCL
Kokkos::atomic_store(&m_keys[new_index], k);
#else
m_keys[new_index] = k;
m_keys[new_index] = key;
#endif

if (!is_set) {
if constexpr (!is_set) {
KOKKOS_NONTEMPORAL_PREFETCH_STORE(&m_values[new_index]);
#ifdef KOKKOS_ENABLE_SYCL
Kokkos::atomic_store(&m_values[new_index], v);
#else
m_values[new_index] = v;
m_values[new_index] = value;
#endif
}

Expand Down Expand Up @@ -844,7 +854,7 @@ class UnorderedMap {
sizeof(size_type) * src.m_next_index.extent(0));
raw_deep_copy(tmp.m_keys.data(), src.m_keys.data(),
sizeof(key_type) * src.m_keys.extent(0));
if (!is_set) {
if constexpr (!is_set) {
raw_deep_copy(tmp.m_values.data(), src.m_values.data(),
sizeof(impl_value_type) * src.m_values.extent(0));
}
Expand Down
31 changes: 14 additions & 17 deletions containers/src/impl/Kokkos_UnorderedMap_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,16 +47,15 @@ struct UnorderedMapRehash {
using const_map_type = typename map_type::const_map_type;
using execution_space = typename map_type::execution_space;
using size_type = typename map_type::size_type;
using policy_type =
Kokkos::RangePolicy<execution_space, IndexType<size_type>>;

map_type m_dst;
const_map_type m_src;

UnorderedMapRehash(map_type const& dst, const_map_type const& src)
: m_dst(dst), m_src(src) {}

void apply() const {
parallel_for("Kokkos::Impl::UnorderedMapRehash::apply", m_src.capacity(),
*this);
void apply(const execution_space& space = execution_space{}) const {
parallel_for("Kokkos::Impl::UnorderedMapRehash::apply",
policy_type(space, 0, m_src.capacity()), *this);
}

KOKKOS_INLINE_FUNCTION
Expand All @@ -76,19 +75,19 @@ struct UnorderedMapErase {
using size_type = typename map_type::size_type;
using key_type = typename map_type::key_type;
using value_type = typename map_type::impl_value_type;
using policy_type =
Kokkos::RangePolicy<execution_space, IndexType<size_type>>;

map_type m_map;

UnorderedMapErase(map_type const& map) : m_map(map) {}

void apply() const {
void apply(const execution_space& space = execution_space{}) const {
parallel_for("Kokkos::Impl::UnorderedMapErase::apply",
m_map.m_hash_lists.extent(0), *this);
policy_type(space, 0, m_map.m_hash_lists.extent(0)), *this);
}

KOKKOS_INLINE_FUNCTION
void operator()(size_type i) const {
const size_type invalid_index = map_type::invalid_index;
constexpr size_type invalid_index = map_type::invalid_index;

size_type curr = m_map.m_hash_lists(i);
size_type next = invalid_index;
Expand All @@ -98,7 +97,7 @@ struct UnorderedMapErase {
next = m_map.m_next_index[curr];
m_map.m_next_index[curr] = invalid_index;
m_map.m_keys[curr] = key_type();
if (m_map.is_set) m_map.m_values[curr] = value_type();
if constexpr (!map_type::is_set) m_map.m_values[curr] = value_type();
curr = next;
m_map.m_hash_lists(i) = next;
}
Expand All @@ -117,7 +116,7 @@ struct UnorderedMapErase {
m_map.m_next_index[prev] = next;
m_map.m_next_index[curr] = invalid_index;
m_map.m_keys[curr] = key_type();
if (map_type::is_set) m_map.m_values[curr] = value_type();
if constexpr (!map_type::is_set) m_map.m_values[curr] = value_type();
}
curr = next;
}
Expand Down Expand Up @@ -188,7 +187,7 @@ struct UnorderedMapHistogram {

KOKKOS_INLINE_FUNCTION
void operator()(size_type i) const {
const size_type invalid_index = map_type::invalid_index;
constexpr size_type invalid_index = map_type::invalid_index;

uint32_t length = 0;
size_type min_index = ~0u, max_index = 0;
Expand Down Expand Up @@ -223,16 +222,14 @@ struct UnorderedMapPrint {

map_type m_map;

UnorderedMapPrint(map_type const& map) : m_map(map) {}

void apply() {
parallel_for("Kokkos::Impl::UnorderedMapPrint::apply",
m_map.m_hash_lists.extent(0), *this);
}

KOKKOS_INLINE_FUNCTION
void operator()(size_type i) const {
const size_type invalid_index = map_type::invalid_index;
constexpr size_type invalid_index = map_type::invalid_index;

uint32_t list = m_map.m_hash_lists(i);
for (size_type curr = list, ii = 0; curr != invalid_index;
Expand Down
1 change: 1 addition & 0 deletions containers/unit_tests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ foreach(Tag Threads;Serial;OpenMP;HPX;Cuda;HIP;SYCL)
StaticCrsGraph
WithoutInitializing
UnorderedMap
UnorderedSet
Vector
ViewCtorPropEmbeddedDim
)
Expand Down
2 changes: 1 addition & 1 deletion containers/unit_tests/TestUnorderedMap.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ void test_insert(uint32_t num_nodes, uint32_t num_inserts,

const bool print_list = false;
if (print_list) {
Kokkos::Impl::UnorderedMapPrint<map_type> f(map);
Kokkos::Impl::UnorderedMapPrint<map_type> f{map};
f.apply();
}

Expand Down
Loading

0 comments on commit 03c4417

Please sign in to comment.