Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

UCT/CUDA_COPY: add multi-device support in cuda_copy #9645

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
77 changes: 56 additions & 21 deletions src/uct/cuda/cuda_copy/cuda_copy_ep.c
Original file line number Diff line number Diff line change
Expand Up @@ -55,22 +55,28 @@ ucs_status_t uct_cuda_copy_init_stream(CUstream *stream)
}

static UCS_F_ALWAYS_INLINE CUstream *
uct_cuda_copy_get_stream(uct_cuda_copy_iface_t *iface,
uct_cuda_copy_get_stream(uct_cuda_copy_per_ctx_rsc_t *ctx_rsc,
ucs_memory_type_t src_type, ucs_memory_type_t dst_type)
{
CUstream *stream = NULL;
static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
CUstream *stream;
ucs_status_t status;

pthread_mutex_lock(&lock);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could you please explain way a lock is necessary here.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@rakhmets As we're modifying context resource and multiple threads may want the same stream, we don't want the same stream variable to be initialized more than once.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

but UCT is not thread-safe and in UCP we have global lock per operation.
is there a specific use-cases requiring this lock?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I believe the lock can be removed.


ucs_assert((src_type < UCS_MEMORY_TYPE_LAST) &&
(dst_type < UCS_MEMORY_TYPE_LAST));

stream = &iface->queue_desc[src_type][dst_type].stream;
stream = &ctx_rsc->queue_desc[src_type][dst_type].stream;

status = uct_cuda_copy_init_stream(stream);
if (status != UCS_OK) {
pthread_mutex_unlock(&lock);
return NULL;
}

pthread_mutex_unlock(&lock);
rakhmets marked this conversation as resolved.
Show resolved Hide resolved

return stream;
}

Expand All @@ -97,6 +103,38 @@ uct_cuda_copy_get_mem_type(uct_md_h md, void *address, size_t length)
return mem_info.type;
}

static inline
ucs_status_t uct_cuda_copy_get_ctx_rsc(uct_cuda_copy_iface_t *iface,
uct_cuda_copy_per_ctx_rsc_t **ctx_rsc)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
static inline
ucs_status_t uct_cuda_copy_get_ctx_rsc(uct_cuda_copy_iface_t *iface,
uct_cuda_copy_per_ctx_rsc_t **ctx_rsc)
static UCS_F_ALWAYS_INLINE ucs_status_t
uct_cuda_copy_get_ctx_rsc(uct_cuda_copy_iface_t *iface,
uct_cuda_copy_per_ctx_rsc_t **ctx_rsc)

{
CUcontext current_ctx;
ucs_status_t status;

status = UCT_CUDADRV_FUNC_LOG_ERR(cuCtxGetCurrent(&current_ctx));
if (status != UCS_OK) {
return status;
} else if (current_ctx == NULL) {
ucs_error("attempt to perform cuda memcpy without active context");
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this log message can be confusing since the function can be called by different callers. It is better to print the following message: there is no cuda context bound to the calling cpu thread.

return UCS_ERR_IO_ERROR;
}

return uct_cuda_copy_get_per_ctx_rscs(iface, current_ctx, ctx_rsc);
}

static inline
ucs_status_t uct_cuda_copy_get_short_stream(uct_cuda_copy_iface_t *iface,
uct_cuda_copy_per_ctx_rsc_t **ctx_rsc)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

uct_cuda_copy_get_short_stream should have CUstream *stream parameter, and uct_cuda_copy_per_ctx_rsc_t *ctx_rsc local variable.

Suggested change
static inline
ucs_status_t uct_cuda_copy_get_short_stream(uct_cuda_copy_iface_t *iface,
uct_cuda_copy_per_ctx_rsc_t **ctx_rsc)
static UCS_F_ALWAYS_INLINE ucs_status_t
uct_cuda_copy_get_short_stream(uct_cuda_copy_iface_t *iface, CUstream *stream)

{
ucs_status_t status;

status = uct_cuda_copy_get_ctx_rsc(iface, ctx_rsc);
if (status != UCS_OK) {
return status;
}

return uct_cuda_copy_init_stream(&(*ctx_rsc)->short_stream);
}

static UCS_F_ALWAYS_INLINE ucs_status_t
uct_cuda_copy_post_cuda_async_copy(uct_ep_h tl_ep, void *dst, void *src,
size_t length, uct_completion_t *comp)
Expand All @@ -110,33 +148,30 @@ uct_cuda_copy_post_cuda_async_copy(uct_ep_h tl_ep, void *dst, void *src,
ucs_memory_type_t dst_type;
CUstream *stream;
ucs_queue_head_t *event_q;
uct_cuda_copy_per_ctx_rsc_t *ctx_rsc;

if (!length) {
return UCS_OK;
}

/* ensure context is set before creating events/streams */
if (iface->cuda_context == NULL) {
UCT_CUDADRV_FUNC_LOG_ERR(cuCtxGetCurrent(&iface->cuda_context));
if (iface->cuda_context == NULL) {
ucs_error("attempt to perform cuda memcpy without active context");
return UCS_ERR_IO_ERROR;
}
status = uct_cuda_copy_get_ctx_rsc(iface, &ctx_rsc);
if (status != UCS_OK) {
return status;
}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This block of code (lines 128-137) is repeated in put_short and get_short as well. Maybe put it in a function or macro?


src_type = uct_cuda_copy_get_mem_type(base_iface->md, src, length);
dst_type = uct_cuda_copy_get_mem_type(base_iface->md, dst, length);
q_desc = &iface->queue_desc[src_type][dst_type];
q_desc = &ctx_rsc->queue_desc[src_type][dst_type];
event_q = &q_desc->event_queue;
stream = uct_cuda_copy_get_stream(iface, src_type, dst_type);
stream = uct_cuda_copy_get_stream(ctx_rsc, src_type, dst_type);
if (stream == NULL) {
ucs_error("stream for src %s dst %s not available",
ucs_memory_type_names[src_type],
ucs_memory_type_names[dst_type]);
return UCS_ERR_IO_ERROR;
}

cuda_event = ucs_mpool_get(&iface->cuda_event_desc);
cuda_event = ucs_mpool_get(&ctx_rsc->cuda_event_desc);
if (ucs_unlikely(cuda_event == NULL)) {
ucs_error("Failed to allocate cuda event object");
return UCS_ERR_NO_MEMORY;
Expand Down Expand Up @@ -215,18 +250,18 @@ UCS_PROFILE_FUNC(ucs_status_t, uct_cuda_copy_ep_put_short,
uint64_t remote_addr, uct_rkey_t rkey)
{
uct_cuda_copy_iface_t *iface = ucs_derived_of(tl_ep->iface, uct_cuda_copy_iface_t);
CUstream *stream = &iface->short_stream;
ucs_status_t status;
uct_cuda_copy_per_ctx_rsc_t *ctx_rsc;

status = uct_cuda_copy_init_stream(stream);
status = uct_cuda_copy_get_short_stream(iface, &ctx_rsc);
if (status != UCS_OK) {
return status;
}

UCT_CUDADRV_FUNC_LOG_ERR(cuMemcpyAsync((CUdeviceptr)remote_addr,
(CUdeviceptr)buffer, length,
*stream));
status = UCT_CUDADRV_FUNC_LOG_ERR(cuStreamSynchronize(*stream));
ctx_rsc->short_stream));
status = UCT_CUDADRV_FUNC_LOG_ERR(cuStreamSynchronize(ctx_rsc->short_stream));

UCT_TL_EP_STAT_OP(ucs_derived_of(tl_ep, uct_base_ep_t), PUT, SHORT, length);
ucs_trace_data("PUT_SHORT size %d from %p to %p",
Expand All @@ -240,18 +275,18 @@ UCS_PROFILE_FUNC(ucs_status_t, uct_cuda_copy_ep_get_short,
uint64_t remote_addr, uct_rkey_t rkey)
{
uct_cuda_copy_iface_t *iface = ucs_derived_of(tl_ep->iface, uct_cuda_copy_iface_t);
CUstream *stream = &iface->short_stream;
ucs_status_t status;
uct_cuda_copy_per_ctx_rsc_t *ctx_rsc;

status = uct_cuda_copy_init_stream(stream);
status = uct_cuda_copy_get_short_stream(iface, &ctx_rsc);
if (status != UCS_OK) {
return status;
}

UCT_CUDADRV_FUNC_LOG_ERR(cuMemcpyAsync((CUdeviceptr)buffer,
(CUdeviceptr)remote_addr, length,
*stream));
status = UCT_CUDADRV_FUNC_LOG_ERR(cuStreamSynchronize(*stream));
ctx_rsc->short_stream));
status = UCT_CUDADRV_FUNC_LOG_ERR(cuStreamSynchronize(ctx_rsc->short_stream));

UCT_TL_EP_STAT_OP(ucs_derived_of(tl_ep, uct_base_ep_t), GET, SHORT, length);
ucs_trace_data("GET_SHORT size %d from %p to %p",
Expand Down
Loading
Loading