Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions posix/include/rtos/kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@

#include <rtos/wait.h>

#include <stdbool.h>
#include <stdint.h>

#ifdef __ZEPHYR__
Expand All @@ -28,6 +29,13 @@ typedef struct {

#define Z_TIMEOUT_MS(t) ((k_timeout_t) { .ticks = clock_ms_to_ticks(PLATFORM_DEFAULT_CLOCK, t) })

struct k_thread;
static inline bool thread_is_userspace(struct k_thread *thread)
{
(void)thread;
return false;
}
Comment on lines +32 to +37
Copy link

Copilot AI Apr 17, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

thread_is_userspace() introduces the bool type in this header, but the file does not include <stdbool.h>, which will break compilation for any TU that includes this header. Also, the thread parameter is unused and may trigger -Wunused-parameter when the function is referenced; add (void)thread; (or an existing unused-arg macro) to silence warnings.

Copilot uses AI. Check for mistakes.
Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fixed


static inline void k_sleep(k_timeout_t timeout)
{
wait_delay(timeout.ticks);
Expand Down
7 changes: 7 additions & 0 deletions zephyr/include/rtos/kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,11 @@

#include <zephyr/kernel.h>

#include <stdbool.h>

static inline bool thread_is_userspace(struct k_thread *thread)
{
return !!(thread->base.user_options & K_USER);
}

#endif /* __ZEPHYR_RTOS_KERNEL_H__ */
44 changes: 18 additions & 26 deletions zephyr/lib/fast-get.c
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,6 @@
struct sof_fast_get_entry {
const void *dram_ptr;
void *sram_ptr;
#if CONFIG_USERSPACE
struct k_mem_domain *mdom;
#endif
size_t size;
unsigned int refcount;
};
Expand Down Expand Up @@ -103,10 +100,8 @@ static struct sof_fast_get_entry *fast_get_find_entry(struct sof_fast_get_data *
#endif

#if CONFIG_USERSPACE
static bool fast_get_partition_exists(struct k_thread *thread, void *start, size_t size)
static bool fast_get_partition_exists(struct k_mem_domain *domain, void *start, size_t size)
{
struct k_mem_domain *domain = thread->mem_domain_info.mem_domain;

for (unsigned int i = 0; i < domain->num_partitions; i++) {
struct k_mem_partition *dpart = &domain->partitions[i];

Expand All @@ -132,6 +127,9 @@ static int fast_get_access_grant(struct k_mem_domain *mdom, void *addr, size_t s

const void *fast_get(struct k_heap *heap, const void *dram_ptr, size_t size)
{
#if CONFIG_USERSPACE
bool current_is_userspace = thread_is_userspace(k_current_get());
#endif
struct sof_fast_get_data *data = &fast_get_data;
uint32_t alloc_flags = SOF_MEM_FLAG_USER;
struct sof_fast_get_entry *entry;
Expand Down Expand Up @@ -167,8 +165,8 @@ const void *fast_get(struct k_heap *heap, const void *dram_ptr, size_t size)
} while (!entry);

#if CONFIG_USERSPACE
LOG_DBG("userspace %u part %#zx bytes alloc %p entry %p DRAM %p",
k_current_get()->mem_domain_info.mem_domain->num_partitions, size,
LOG_DBG("%s: %#zx bytes alloc %p entry %p DRAM %p",
current_is_userspace ? "userspace" : "kernel", size,
alloc_ptr, entry->sram_ptr, dram_ptr);
#endif

Expand All @@ -189,13 +187,10 @@ const void *fast_get(struct k_heap *heap, const void *dram_ptr, size_t size)
* We only get there for large buffers, since small buffers with
* enabled userspace don't create fast-get entries
*/
if (mdom->num_partitions > 1) {
/* A userspace thread makes the request */
if (mdom != entry->mdom &&
!fast_get_partition_exists(k_current_get(), ret,
if (current_is_userspace) {
if (!fast_get_partition_exists(mdom, ret,
ALIGN_UP(size, CONFIG_MM_DRV_PAGE_SIZE))) {
LOG_DBG("grant access to domain %p first was %p", mdom,
entry->mdom);
LOG_DBG("grant access to domain %p", mdom);

int err = fast_get_access_grant(mdom, ret, size);

Expand Down Expand Up @@ -228,27 +223,27 @@ const void *fast_get(struct k_heap *heap, const void *dram_ptr, size_t size)
ret = sof_heap_alloc(heap, alloc_flags, alloc_size, alloc_align);
if (!ret)
goto out;
entry->size = size;
entry->sram_ptr = ret;
memcpy_s(entry->sram_ptr, entry->size, dram_ptr, size);
dcache_writeback_region((__sparse_force void __sparse_cache *)entry->sram_ptr, size);

#if CONFIG_USERSPACE
entry->mdom = k_current_get()->mem_domain_info.mem_domain;
if (size > FAST_GET_MAX_COPY_SIZE) {
if (size > FAST_GET_MAX_COPY_SIZE && current_is_userspace) {
/* Otherwise we've allocated on thread's heap, so it already has access */
int err = fast_get_access_grant(entry->mdom, ret, size);
int err = fast_get_access_grant(k_current_get()->mem_domain_info.mem_domain,
ret, size);

if (err < 0) {
LOG_ERR("failed to grant access err=%d", err);
sof_heap_free(NULL, ret);
sof_heap_free(heap, ret);
ret = NULL;
goto out;
Comment on lines +228 to 237
Copy link

Copilot AI Apr 17, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

On the fast_get_access_grant() failure path, entry->size and entry->sram_ptr have already been set, but the function returns with ret = NULL and leaves the entry partially initialized. This can cause subsequent fast_get() calls to hit entry->sram_ptr and then fail the mismatch check (or repeatedly reuse the same slot), effectively breaking fast-get until reboot. Delay assigning entry->{size,sram_ptr} until after all fallible steps succeed, or explicitly clear those fields before goto out on error.

Copilot uses AI. Check for mistakes.
Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

not introduced by this PR, but added a commit to fix that too

}
}
#endif /* CONFIG_USERSPACE */

entry->dram_ptr = dram_ptr;
entry->size = size;
entry->sram_ptr = ret;
memcpy_s(ret, alloc_size, dram_ptr, size);
dcache_writeback_region((__sparse_force void __sparse_cache *)ret, size);
entry->refcount = 1;
out:
k_spin_unlock(&data->lock, key);
Expand Down Expand Up @@ -296,11 +291,8 @@ void fast_put(struct k_heap *heap, struct k_mem_domain *mdom, const void *sram_p
* For large buffers, each thread that called fast_get() has a partition
* in its memory domain. Each thread must remove its own partition here
* to prevent partition leaks.
*
* Order matters: free buffer first (needs partition for cache access),
* then remove partition.
*/
if (entry->size > FAST_GET_MAX_COPY_SIZE && entry->mdom && mdom) {
if (entry->size > FAST_GET_MAX_COPY_SIZE && mdom) {
struct k_mem_partition part = {
.start = (uintptr_t)sram_ptr,
.size = ALIGN_UP(entry->size, CONFIG_MM_DRV_PAGE_SIZE),
Expand Down
Loading