Skip to content

Commit

Permalink
kernel: Remove abort and join implementation (UNBISECTABLE)
Browse files Browse the repository at this point in the history
THIS COMMIT DELIBERATELY BREAKS BISECTABILITY FOR EASE OF REVIEW.
SKIP IF YOU LAND HERE.

Remove the existing implementatoin of k_thread_abort(),
k_thread_join(), and the attendant facilities in the thread subsystem
and idle thread that support them.

Signed-off-by: Andy Ross <[email protected]>
  • Loading branch information
Andy Ross authored and nashif committed Feb 24, 2021
1 parent bf99f31 commit c0c8cb0
Show file tree
Hide file tree
Showing 9 changed files with 0 additions and 412 deletions.
31 changes: 0 additions & 31 deletions include/kernel/thread.h
Original file line number Diff line number Diff line change
Expand Up @@ -112,14 +112,6 @@ struct _thread_base {
/* this thread's entry in a timeout queue */
struct _timeout timeout;
#endif

_wait_q_t join_waiters;
#if __ASSERT_ON
/* For detecting calls to k_thread_create() on threads that are
* already active
*/
atomic_t cookie;
#endif
};

typedef struct _thread_base _thread_base_t;
Expand Down Expand Up @@ -212,29 +204,6 @@ struct k_thread {
/** static thread init data */
void *init_data;

/**
* abort function
*
* This function pointer, if non-NULL, will be run once after the
* thread has completely exited. It may run in the context of:
* - the idle thread if the thread self-exited
* - another thread calling k_thread_abort()
* - a fatal exception handler on a special stack
*
* It will never run in the context of the thread itself.
*
* A pointer to the thread object that was aborted is provided. At the
* time this runs, this thread object has completely exited. It may
* be re-used with k_thread_create() or return it to a heap or slab
* pool.
*
* This function does not run with any kind of lock active and
* there is the possibility of races leading to undefined behavior
* if other threads are attempting to free or recycle this object
* concurrently.
*/
void (*fn_abort)(struct k_thread *aborted);

#if defined(CONFIG_POLL)
struct z_poller poller;
#endif
Expand Down
3 changes: 0 additions & 3 deletions include/kernel_structs.h
Original file line number Diff line number Diff line change
Expand Up @@ -109,9 +109,6 @@ struct _cpu {
/* one assigned idle thread per CPU */
struct k_thread *idle_thread;

/* If non-null, self-aborted thread that needs cleanup */
struct k_thread *pending_abort;

#if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && (CONFIG_NUM_COOP_PRIORITIES > 0)
/* Coop thread preempted by current metairq, or NULL */
struct k_thread *metairq_preempted;
Expand Down
1 change: 0 additions & 1 deletion kernel/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ list(APPEND kernel_files
stack.c
system_work_q.c
thread.c
thread_abort.c
version.c
work_q.c
condvar.c
Expand Down
36 changes: 0 additions & 36 deletions kernel/idle.c
Original file line number Diff line number Diff line change
Expand Up @@ -118,8 +118,6 @@ void z_pm_save_idle_exit(int32_t ticks)

void idle(void *p1, void *unused2, void *unused3)
{
struct _cpu *cpu = p1;

ARG_UNUSED(unused2);
ARG_UNUSED(unused3);

Expand All @@ -132,41 +130,7 @@ void idle(void *p1, void *unused2, void *unused3)
#endif /* CONFIG_BOOT_TIME_MEASUREMENT */

while (true) {
/* Lock interrupts to atomically check if to_abort is non-NULL,
* and if so clear it
*/
int key = arch_irq_lock();
struct k_thread *to_abort = cpu->pending_abort;

if (to_abort) {
cpu->pending_abort = NULL;
arch_irq_unlock(key);

/* Safe to unlock interrupts here. We've atomically
* checked and stashed cpu->pending_abort into a stack
* variable. If we get preempted here and another
* thread aborts, cpu->pending abort will get set
* again and we'll handle it when the loop iteration
* is continued below.
*/
LOG_DBG("idle %p aborting thread %p",
_current, to_abort);

z_thread_single_abort(to_abort);

/* We have to invoke this scheduler now. If we got
* here, the idle thread preempted everything else
* in order to abort the thread, and we now need to
* figure out what to do next, it's not necessarily
* the case that there are no other runnable threads.
*/
z_reschedule_unlocked();
continue;
}

#if SMP_FALLBACK
arch_irq_unlock(key);

k_busy_wait(100);
k_yield();
#else
Expand Down
214 changes: 0 additions & 214 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,6 @@
#include <sys/atomic.h>
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);

/* Maximum time between the time a self-aborting thread flags itself
* DEAD and the last read or write to its stack memory (i.e. the time
* of its next swap()). In theory this might be tuned per platform,
* but in practice this conservative value should be safe.
*/
#define THREAD_ABORT_DELAY_US 500

#if defined(CONFIG_SCHED_DUMB)
#define _priq_run_add z_priq_dumb_add
#define _priq_run_remove z_priq_dumb_remove
Expand Down Expand Up @@ -227,13 +220,6 @@ static ALWAYS_INLINE struct k_thread *next_up(void)
{
struct k_thread *thread;

/* If a thread self-aborted we need the idle thread to clean it up
* before any other thread can run on this CPU
*/
if (_current_cpu->pending_abort != NULL) {
return _current_cpu->idle_thread;
}

thread = _priq_run_best(&_kernel.ready_q.runq);

#if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && (CONFIG_NUM_COOP_PRIORITIES > 0)
Expand All @@ -253,16 +239,6 @@ static ALWAYS_INLINE struct k_thread *next_up(void)
}
#endif

/* If the current thread is marked aborting, mark it
* dead so it will not be scheduled again.
*/
if (_current->base.thread_state & _THREAD_ABORTING) {
_current->base.thread_state |= _THREAD_DEAD;
#ifdef CONFIG_SMP
_current_cpu->swap_ok = true;
#endif
}

#ifndef CONFIG_SMP
/* In uniprocessor mode, we can leave the current thread in
* the queue (actually we have to, otherwise the assembly
Expand Down Expand Up @@ -587,121 +563,6 @@ static _wait_q_t *pended_on(struct k_thread *thread)
return thread->base.pended_on;
}

void z_thread_single_abort(struct k_thread *thread)
{
void (*fn_abort)(struct k_thread *aborted) = NULL;

__ASSERT(!(thread->base.user_options & K_ESSENTIAL),
"essential thread aborted");
__ASSERT(thread != _current || arch_is_in_isr(),
"self-abort detected");

/* Prevent any of the further logic in this function from running more
* than once
*/
k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
if ((thread->base.thread_state &
(_THREAD_ABORTING | _THREAD_DEAD)) != 0) {
LOG_DBG("Thread %p already dead or on the way out", thread);
k_spin_unlock(&sched_spinlock, key);
return;
}
thread->base.thread_state |= _THREAD_ABORTING;
k_spin_unlock(&sched_spinlock, key);

(void)z_abort_thread_timeout(thread);

if (IS_ENABLED(CONFIG_SMP)) {
z_sched_abort(thread);
}

LOCKED(&sched_spinlock) {
LOG_DBG("Cleanup aborting thread %p", thread);
struct k_thread *waiter;

if (z_is_thread_ready(thread)) {
if (z_is_thread_queued(thread)) {
dequeue_thread(&_kernel.ready_q.runq,
thread);
}
update_cache(thread == _current);
} else {
if (z_is_thread_pending(thread)) {
_priq_wait_remove(&pended_on(thread)->waitq,
thread);
z_mark_thread_as_not_pending(thread);
thread->base.pended_on = NULL;
}
}

/* Wake everybody up who was trying to join with this thread.
* A reschedule is invoked later by k_thread_abort().
*/
while ((waiter = z_waitq_head(&thread->base.join_waiters)) !=
NULL) {
(void)z_abort_thread_timeout(waiter);
_priq_wait_remove(&pended_on(waiter)->waitq, waiter);
z_mark_thread_as_not_pending(waiter);
waiter->base.pended_on = NULL;
arch_thread_return_value_set(waiter, 0);
ready_thread(waiter);
}

if (z_is_idle_thread_object(_current)) {
update_cache(1);
}

thread->base.thread_state |= _THREAD_DEAD;

/* Read this here from the thread struct now instead of
* after we unlock
*/
fn_abort = thread->fn_abort;

/* Keep inside the spinlock as these may use the contents
* of the thread object. As soon as we release this spinlock,
* the thread object could be destroyed at any time.
*/
sys_trace_thread_abort(thread);
z_thread_monitor_exit(thread);

#ifdef CONFIG_USERSPACE
/* Remove this thread from its memory domain, which takes
* it off the domain's thread list and possibly also arch-
* specific tasks.
*/
z_mem_domain_exit_thread(thread);

/* Revoke permissions on thread's ID so that it may be
* recycled
*/
z_thread_perms_all_clear(thread);

/* Clear initialized state so that this thread object may be
* re-used and triggers errors if API calls are made on it from
* user threads
*/
z_object_uninit(thread->stack_obj);
z_object_uninit(thread);
#endif
/* Kernel should never look at the thread object again past
* this point unless another thread API is called. If the
* object doesn't get corrupted, we'll catch other
* k_thread_abort()s on this object, although this is
* somewhat undefined behavoir. It must be safe to call
* k_thread_create() or free the object at this point.
*/
#if __ASSERT_ON
atomic_clear(&thread->base.cookie);
#endif
}

if (fn_abort != NULL) {
/* Thread object provided to be freed or recycled */
fn_abort(thread);
}
}

static void unready_thread(struct k_thread *thread)
{
if (z_is_thread_queued(thread)) {
Expand Down Expand Up @@ -1472,43 +1333,6 @@ void z_sched_ipi(void)
z_trace_sched_ipi();
#endif
}

void z_sched_abort(struct k_thread *thread)
{
k_spinlock_key_t key;

if (thread == _current) {
z_remove_thread_from_ready_q(thread);
return;
}

/* First broadcast an IPI to the other CPUs so they can stop
* it locally. Not all architectures support that, alas. If
* we don't have it, we need to wait for some other interrupt.
*/
#ifdef CONFIG_SCHED_IPI_SUPPORTED
arch_sched_ipi();
#endif

/* Wait for it to be flagged dead either by the CPU it was
* running on or because we caught it idle in the queue
*/
while ((thread->base.thread_state & _THREAD_DEAD) == 0U) {
key = k_spin_lock(&sched_spinlock);
if (z_is_thread_prevented_from_running(thread)) {
__ASSERT(!z_is_thread_queued(thread), "");
thread->base.thread_state |= _THREAD_DEAD;
k_spin_unlock(&sched_spinlock, key);
} else if (z_is_thread_queued(thread)) {
dequeue_thread(&_kernel.ready_q.runq, thread);
thread->base.thread_state |= _THREAD_DEAD;
k_spin_unlock(&sched_spinlock, key);
} else {
k_spin_unlock(&sched_spinlock, key);
k_busy_wait(100);
}
}
}
#endif

#ifdef CONFIG_USERSPACE
Expand Down Expand Up @@ -1603,44 +1427,6 @@ int k_thread_cpu_mask_disable(k_tid_t thread, int cpu)

#endif /* CONFIG_SCHED_CPU_MASK */

int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout)
{
k_spinlock_key_t key;
int ret;

__ASSERT(((arch_is_in_isr() == false) ||
K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");

key = k_spin_lock(&sched_spinlock);

if ((thread->base.pended_on == &_current->base.join_waiters) ||
(thread == _current)) {
ret = -EDEADLK;
goto out;
}

if ((thread->base.thread_state & _THREAD_DEAD) != 0) {
ret = 0;
goto out;
}

if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
ret = -EBUSY;
goto out;
}

#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
pending_current = _current;
#endif
add_to_waitq_locked(_current, &thread->base.join_waiters);
add_thread_timeout(_current, timeout);

return z_swap(&sched_spinlock, key);
out:
k_spin_unlock(&sched_spinlock, key);
return ret;
}

#ifdef CONFIG_USERSPACE
/* Special case: don't oops if the thread is uninitialized. This is because
* the initialization bit does double-duty for thread objects; if false, means
Expand Down
Loading

0 comments on commit c0c8cb0

Please sign in to comment.