15#ifndef ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_
16#define ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_
24#if defined(CONFIG_USERSPACE)
34#define ARCH_STACK_PTR_ALIGN 16
36#define Z_RISCV_STACK_PMP_ALIGN \
37 MAX(CONFIG_PMP_GRANULARITY, ARCH_STACK_PTR_ALIGN)
39#ifdef CONFIG_PMP_STACK_GUARD
49#ifdef CONFIG_PMP_POWER_OF_TWO_ALIGNMENT
50#define Z_RISCV_STACK_GUARD_SIZE \
51 Z_POW2_CEIL(MAX(sizeof(struct arch_esf) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \
52 Z_RISCV_STACK_PMP_ALIGN))
53#define ARCH_KERNEL_STACK_OBJ_ALIGN Z_RISCV_STACK_GUARD_SIZE
55#define Z_RISCV_STACK_GUARD_SIZE \
56 ROUND_UP(sizeof(struct arch_esf) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \
57 Z_RISCV_STACK_PMP_ALIGN)
58#define ARCH_KERNEL_STACK_OBJ_ALIGN Z_RISCV_STACK_PMP_ALIGN
73#define ARCH_KERNEL_STACK_RESERVED Z_RISCV_STACK_GUARD_SIZE
76#define Z_RISCV_STACK_GUARD_SIZE 0
79#ifdef CONFIG_PMP_POWER_OF_TWO_ALIGNMENT
116#define ARCH_THREAD_STACK_RESERVED Z_RISCV_STACK_GUARD_SIZE
117#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
118 Z_POW2_CEIL(MAX(MAX(size, CONFIG_PRIVILEGED_STACK_SIZE), \
119 Z_RISCV_STACK_PMP_ALIGN))
120#define ARCH_THREAD_STACK_OBJ_ALIGN(size) \
121 ARCH_THREAD_STACK_SIZE_ADJUST(size)
140#define ARCH_THREAD_STACK_RESERVED \
141 ROUND_UP(Z_RISCV_STACK_GUARD_SIZE + CONFIG_PRIVILEGED_STACK_SIZE, \
142 Z_RISCV_STACK_PMP_ALIGN)
143#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
144 ROUND_UP(size, Z_RISCV_STACK_PMP_ALIGN)
145#define ARCH_THREAD_STACK_OBJ_ALIGN(size) Z_RISCV_STACK_PMP_ALIGN
160#define MSTATUS_IEN (1UL << 3)
161#define MSTATUS_MPP_M (3UL << 11)
162#define MSTATUS_MPIE_EN (1UL << 7)
164#define MSTATUS_FS_OFF (0UL << 13)
165#define MSTATUS_FS_INIT (1UL << 13)
166#define MSTATUS_FS_CLEAN (2UL << 13)
167#define MSTATUS_FS_DIRTY (3UL << 13)
177#define MSTATUS_DEF_RESTORE (MSTATUS_MPP_M | MSTATUS_MPIE_EN)
186#ifdef CONFIG_IRQ_VECTOR_TABLE_JUMP_BY_CODE
187#define ARCH_IRQ_VECTOR_JUMP_CODE(v) "j " STRINGIFY(v)
200#define K_MEM_PARTITION_P_RW_U_RW ((k_mem_partition_attr_t) \
202#define K_MEM_PARTITION_P_RW_U_RO ((k_mem_partition_attr_t) \
204#define K_MEM_PARTITION_P_RW_U_NA ((k_mem_partition_attr_t) \
206#define K_MEM_PARTITION_P_RO_U_RO ((k_mem_partition_attr_t) \
208#define K_MEM_PARTITION_P_RO_U_NA ((k_mem_partition_attr_t) \
210#define K_MEM_PARTITION_P_NA_U_NA ((k_mem_partition_attr_t) \
214#define K_MEM_PARTITION_P_RWX_U_RWX ((k_mem_partition_attr_t) \
215 {PMP_R | PMP_W | PMP_X})
216#define K_MEM_PARTITION_P_RX_U_RX ((k_mem_partition_attr_t) \
228extern void z_irq_spurious(
const void *unused);
236#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
237 return z_soc_irq_lock();
241 __asm__
volatile (
"csrrc %0, mstatus, %1"
256#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
257 z_soc_irq_unlock(key);
259 __asm__
volatile (
"csrs mstatus, %0"
268#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
269 return z_soc_irq_unlocked(key);
277 __asm__
volatile(
"nop");
302#if defined(CONFIG_RISCV_PRIVILEGED)
static ALWAYS_INLINE void arch_nop(void)
Definition arch.h:348
RISC-V public interrupt handling.
RISCV specific syscall header.
Per-arch thread definition.
#define ALWAYS_INLINE
Definition common.h:129
uint32_t k_mem_partition_attr_t
Definition arch.h:346
Public interface for configuring interrupts.
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
Definition arch.h:63
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
Definition arch.h:74
uint64_t sys_clock_cycle_get_64(void)
uint32_t sys_clock_cycle_get_32(void)
static uint32_t arch_k_cycle_get_32(void)
Definition arch.h:99
static uint64_t arch_k_cycle_get_64(void)
Definition arch.h:106
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
Definition arch.h:87
#define MSTATUS_IEN
Definition arch.h:160
RISCV public error handling.
RISCV public exception handling.
__UINT32_TYPE__ uint32_t
Definition stdint.h:90
__UINT64_TYPE__ uint64_t
Definition stdint.h:91
__UINT8_TYPE__ uint8_t
Definition stdint.h:88
unsigned int pmp_update_nr
Definition arch.h:225
uint8_t pmp_attr
Definition arch.h:221
Software-managed ISR table.