Compare commits

..

No commits in common. "master" and "v1.6" have entirely different histories.
master ... v1.6

171 changed files with 1920 additions and 4384 deletions

View File

@ -1,4 +1,4 @@
# SPDX-License-Identifier: BSD-2-Clause
# SPDX-License-Identifier: GPL-2.0-only
# See here for more information about the format and editor support:
# https://editorconfig.org/

View File

@ -94,26 +94,20 @@ OPENSBI_VERSION_MINOR=`grep "define OPENSBI_VERSION_MINOR" $(include_dir)/sbi/sb
OPENSBI_VERSION_GIT=
# Detect 'git' presence before issuing 'git' commands
GIT_AVAIL := $(shell command -v git 2> /dev/null)
GIT_AVAIL=$(shell command -v git 2> /dev/null)
ifneq ($(GIT_AVAIL),)
GIT_DIR := $(shell git rev-parse --git-dir 2> /dev/null)
GIT_DIR=$(shell git rev-parse --git-dir 2> /dev/null)
ifneq ($(GIT_DIR),)
OPENSBI_VERSION_GIT := $(shell if [ -d $(GIT_DIR) ]; then git describe 2> /dev/null; fi)
OPENSBI_VERSION_GIT=$(shell if [ -d $(GIT_DIR) ]; then git describe 2> /dev/null; fi)
endif
endif
# Setup compilation commands
ifneq ($(LLVM),)
ifneq ($(filter %/,$(LLVM)),)
LLVM_PREFIX := $(LLVM)
else ifneq ($(filter -%,$(LLVM)),)
LLVM_SUFFIX := $(LLVM)
endif
CC = $(LLVM_PREFIX)clang$(LLVM_SUFFIX)
AR = $(LLVM_PREFIX)llvm-ar$(LLVM_SUFFIX)
LD = $(LLVM_PREFIX)ld.lld$(LLVM_SUFFIX)
OBJCOPY = $(LLVM_PREFIX)llvm-objcopy$(LLVM_SUFFIX)
CC = clang
AR = llvm-ar
LD = ld.lld
OBJCOPY = llvm-objcopy
else
ifdef CROSS_COMPILE
CC = $(CROSS_COMPILE)gcc
@ -180,11 +174,6 @@ else
USE_LD_FLAG = -fuse-ld=bfd
endif
REPRODUCIBLE ?= n
ifeq ($(REPRODUCIBLE),y)
REPRODUCIBLE_FLAGS += -ffile-prefix-map=$(src_dir)=
endif
# Check whether the linker supports creating PIEs
OPENSBI_LD_PIE := $(shell $(CC) $(CLANG_TARGET) $(RELAX_FLAG) $(USE_LD_FLAG) -fPIE -nostdlib -Wl,-pie -x c /dev/null -o /dev/null >/dev/null 2>&1 && echo y || echo n)
@ -213,18 +202,16 @@ endif
BUILD_INFO ?= n
ifeq ($(BUILD_INFO),y)
OPENSBI_BUILD_DATE_FMT = +%Y-%m-%d %H:%M:%S %z
ifndef OPENSBI_BUILD_TIME_STAMP
ifdef SOURCE_DATE_EPOCH
OPENSBI_BUILD_TIME_STAMP := $(shell date -u -d "@$(SOURCE_DATE_EPOCH)" \
OPENSBI_BUILD_TIME_STAMP ?= $(shell date -u -d "@$(SOURCE_DATE_EPOCH)" \
"$(OPENSBI_BUILD_DATE_FMT)" 2>/dev/null || \
date -u -r "$(SOURCE_DATE_EPOCH)" \
"$(OPENSBI_BUILD_DATE_FMT)" 2>/dev/null || \
date -u "$(OPENSBI_BUILD_DATE_FMT)")
else
OPENSBI_BUILD_TIME_STAMP := $(shell date "$(OPENSBI_BUILD_DATE_FMT)")
OPENSBI_BUILD_TIME_STAMP ?= $(shell date "$(OPENSBI_BUILD_DATE_FMT)")
endif
endif
OPENSBI_BUILD_COMPILER_VERSION := $(shell $(CC) -v 2>&1 | grep ' version ' | \
OPENSBI_BUILD_COMPILER_VERSION=$(shell $(CC) -v 2>&1 | grep ' version ' | \
sed 's/[[:space:]]*$$//')
endif
@ -363,7 +350,6 @@ ifeq ($(BUILD_INFO),y)
GENFLAGS += -DOPENSBI_BUILD_TIME_STAMP="\"$(OPENSBI_BUILD_TIME_STAMP)\""
GENFLAGS += -DOPENSBI_BUILD_COMPILER_VERSION="\"$(OPENSBI_BUILD_COMPILER_VERSION)\""
endif
GENFLAGS += -include $(include_dir)/sbi/sbi_visibility.h
ifdef PLATFORM
GENFLAGS += -include $(KCONFIG_AUTOHEADER)
endif
@ -373,8 +359,6 @@ GENFLAGS += $(firmware-genflags-y)
CFLAGS = -g -Wall -Werror -ffreestanding -nostdlib -fno-stack-protector -fno-strict-aliasing -ffunction-sections -fdata-sections
CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
CFLAGS += $(REPRODUCIBLE_FLAGS)
# Optionally supported flags
ifeq ($(CC_SUPPORT_VECTOR),y)
CFLAGS += -DOPENSBI_CC_SUPPORT_VECTOR
@ -400,7 +384,6 @@ CPPFLAGS += $(firmware-cppflags-y)
ASFLAGS = -g -Wall -nostdlib
ASFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
ASFLAGS += -fPIE
ASFLAGS += $(REPRODUCIBLE_FLAGS)
# Optionally supported flags
ifeq ($(CC_SUPPORT_SAVE_RESTORE),y)
ASFLAGS += -mno-save-restore

View File

@ -99,7 +99,7 @@ capable enough to bring up all other non-booting harts using HSM extension.
Required Toolchain and Packages
-------------------------------
OpenSBI can be compiled natively or cross-compiled on a host machine. For
OpenSBI can be compiled natively or cross-compiled on a x86 host. For
cross-compilation, you can build your own toolchain, download a prebuilt one
from the [Bootlin toolchain repository] or install a distribution-provided
toolchain; if you opt to use LLVM/Clang, most distribution toolchains will
@ -108,12 +108,16 @@ LLVM/Clang toolchain due to LLVM's ability to support multiple backends in the
same binary, so is often an easy way to obtain a working cross-compilation
toolchain.
Toolchains with Position Independent Executable (PIE) support like
*riscv64-linux-gnu-gcc*, *riscv64-unknown-freebsd-gcc*, or *Clang/LLVM* are
required in order to generate PIE firmware images that can run at arbitrary
address with appropriate alignment. Bare-metal GNU toolchains (e.g.
*riscv64-unknown-elf-gcc*) cannot be used. *Clang/LLVM* can still generate PIE
images if a bare-metal triple is used (e.g. *-target riscv64-unknown-elf*).
Basically, we prefer toolchains with Position Independent Executable (PIE)
support like *riscv64-linux-gnu-gcc*, *riscv64-unknown-freebsd-gcc*, or
*Clang/LLVM* as they generate PIE firmware images that can run at arbitrary
address with appropriate alignment. If a bare-metal GNU toolchain (e.g.
*riscv64-unknown-elf-gcc*) is used, static linked firmware images are
generated instead. *Clang/LLVM* can still generate PIE images if a bare-metal
triple is used (e.g. *-target riscv64-unknown-elf*).
Please note that only a 64-bit version of the toolchain is available in
the Bootlin toolchain repository for now.
In addition to a toolchain, OpenSBI also requires the following packages on
the host:
@ -252,18 +256,6 @@ option with:
make LLVM=1
```
To build with a specific version of LLVM, a path to a directory containing the
LLVM tools can be provided:
```
make LLVM=/path/to/llvm/
```
If you have versioned llvm tools you would like to use, such as `clang-17`, the LLVM variable can
be set as:
```
make LLVM=-17
```
When using Clang, *CROSS_COMPILE* often does not need to be defined unless
using GNU binutils with prefixed binary names. *PLATFORM_RISCV_XLEN* will be
used to infer a default triple to pass to Clang, so if *PLATFORM_RISCV_XLEN*

View File

@ -7,8 +7,8 @@ processor from ETH Zurich. To this end, Ariane has been equipped with a
different L1 cache subsystem that follows a write-through protocol and that has
support for cache invalidations and atomics.
To build platform specific library and firmwares, provide the *PLATFORM=generic*
parameter to the top level `make` command.
To build platform specific library and firmwares, provide the
*PLATFORM=fpga/openpiton* parameter to the top level `make` command.
Platform Options
----------------
@ -21,7 +21,7 @@ Building Ariane FPGA Platform
**Linux Kernel Payload**
```
make PLATFORM=generic FW_PAYLOAD_PATH=<linux_build_directory>/arch/riscv/boot/Image
make PLATFORM=fpga/openpiton FW_PAYLOAD_PATH=<linux_build_directory>/arch/riscv/boot/Image
```
Booting Ariane FPGA Platform

View File

@ -47,7 +47,6 @@ RISC-V Platforms Using Generic Platform
* **SiFive HiFive Unleashed** (*[sifive_fu540.md]*)
* **Spike** (*[spike.md]*)
* **T-HEAD C9xx series Processors** (*[thead-c9xx.md]*)
* **OpenPiton FPGA SoC** (*[fpga-openpiton.md]*)
[andes-ae350.md]: andes-ae350.md
[qemu_virt.md]: qemu_virt.md
@ -56,4 +55,3 @@ RISC-V Platforms Using Generic Platform
[sifive_fu540.md]: sifive_fu540.md
[spike.md]: spike.md
[thead-c9xx.md]: thead-c9xx.md
[fpga-openpiton.md]: fpga-openpiton.md

View File

@ -31,6 +31,10 @@ OpenSBI currently supports the following virtual and hardware platforms:
* **Spike**: Platform support for the Spike emulator. More
details on this platform can be found in the file *[spike.md]*.
* **OpenPiton FPGA SoC**: Platform support OpenPiton research platform based
on ariane core. More details on this platform can be found in the file
*[fpga-openpiton.md]*.
* **Shakti C-class SoC Platform**: Platform support for Shakti C-class
processor based SOCs. More details on this platform can be found in the
file *[shakti_cclass.md]*.
@ -52,5 +56,6 @@ comments to facilitate the implementation.
[andes-ae350.md]: andes-ae350.md
[thead-c910.md]: thead-c910.md
[spike.md]: spike.md
[fpga-openpiton.md]: fpga-openpiton.md
[shakti_cclass.md]: shakti_cclass.md
[renesas-rzfive.md]: renesas-rzfive.md

View File

@ -19,10 +19,6 @@ Base Platform Requirements
The base RISC-V platform requirements for OpenSBI are as follows:
1. At least rv32ima_zicsr or rv64ima_zicsr required on all HARTs
* Users may restrict the usage of atomic instructions to lr/sc
via rv32im_zalrsc_zicsr or rv64im_zalrsc_zicsr if preferred
2. At least one HART should have S-mode support because:
* SBI calls are meant for RISC-V S-mode (Supervisor mode)

View File

@ -74,10 +74,10 @@ pmu {
<0x10000 0x10033 0x000ff000>;
/* For event ID 0x0002 */
riscv,raw-event-to-mhpmcounters = <0x0000 0x0002 0xffffffff 0xffffffff 0x00000f8>,
/* For event ID 0-15 */
/* For event ID 0-4 */
<0x0 0x0 0xffffffff 0xfffffff0 0x00000ff0>,
/* For event ID 0xffffffff0000000f - 0xffffffff000000ff */
<0xffffffff 0xf 0xffffffff 0xffffff0f 0x00000ff0>;
<0xffffffff 0x0 0xffffffff 0xffffff0f 0x00000ff0>;
};
```

View File

@ -1,28 +1 @@
# SPDX-License-Identifier: BSD-2-Clause
menu "Stack Protector Support"
config STACK_PROTECTOR
bool "Stack Protector buffer overflow detection"
default n
help
This option turns on the "stack-protector" compiler feature.
config STACK_PROTECTOR_STRONG
bool "Strong Stack Protector"
depends on STACK_PROTECTOR
default n
help
Turn on the "stack-protector" with "-fstack-protector-strong" option.
Like -fstack-protector but includes additional functions to be
protected.
config STACK_PROTECTOR_ALL
bool "Almighty Stack Protector"
depends on STACK_PROTECTOR
default n
help
Turn on the "stack-protector" with "-fstack-protector-all" option.
Like -fstack-protector except that all functions are protected.
endmenu

View File

@ -59,38 +59,28 @@ _try_lottery:
/* Jump to relocation wait loop if we don't get relocation lottery */
lla a6, _boot_lottery
li a7, BOOT_LOTTERY_ACQUIRED
#ifdef __riscv_atomic
amoswap.w a6, a7, (a6)
bnez a6, _wait_for_boot_hart
#elif __riscv_zalrsc
_sc_fail:
lr.w t0, (a6)
sc.w t1, a7, (a6)
bnez t1, _sc_fail
bnez t0, _wait_for_boot_hart
#else
#error "need a or zalrsc"
#endif
/* relocate the global table content */
li t0, FW_TEXT_START /* link start */
lla t1, _fw_start /* load start */
sub t2, t1, t0 /* load offset */
lla t0, __rela_dyn_start
lla t1, __rela_dyn_end
lla t0, __rel_dyn_start
lla t1, __rel_dyn_end
beq t0, t1, _relocate_done
2:
REG_L t5, __SIZEOF_LONG__(t0) /* t5 <-- relocation info:type */
REG_L t5, REGBYTES(t0) /* t5 <-- relocation info:type */
li t3, R_RISCV_RELATIVE /* reloc type R_RISCV_RELATIVE */
bne t5, t3, 3f
REG_L t3, 0(t0)
REG_L t5, (__SIZEOF_LONG__ * 2)(t0) /* t5 <-- addend */
REG_L t5, (REGBYTES * 2)(t0) /* t5 <-- addend */
add t5, t5, t2
add t3, t3, t2
REG_S t5, 0(t3) /* store runtime address to the GOT entry */
3:
addi t0, t0, (__SIZEOF_LONG__ * 3)
addi t0, t0, (REGBYTES * 3)
blt t0, t1, 2b
_relocate_done:
/* At this point we are running from link address */
@ -302,7 +292,7 @@ _fdt_reloc_done:
REG_S t0, 0(t1)
j _start_warm
/* waiting for boot hart to be done (_boot_status == BOOT_STATUS_BOOT_HART_DONE) */
/* waiting for boot hart to be done (_boot_status == 2) */
_wait_for_boot_hart:
li t0, BOOT_STATUS_BOOT_HART_DONE
lla t1, _boot_status
@ -736,27 +726,6 @@ _reset_regs:
ret
.section .rodata
.Lstack_corrupt_msg:
.string "stack smashing detected\n"
/* This will be called when the stack corruption is detected */
.section .text
.align 3
.globl __stack_chk_fail
.type __stack_chk_fail, %function
__stack_chk_fail:
la a0, .Lstack_corrupt_msg
call sbi_panic
/* Initial value of the stack guard variable */
.section .data
.align 3
.globl __stack_chk_guard
.type __stack_chk_guard, %object
__stack_chk_guard:
RISCV_PTR 0x95B5FF5A
#ifdef FW_FDT_PATH
.section .rodata
.align 4

View File

@ -47,9 +47,9 @@
. = ALIGN(0x1000); /* Ensure next section is page aligned */
.rela.dyn : {
PROVIDE(__rela_dyn_start = .);
PROVIDE(__rel_dyn_start = .);
*(.rela*)
PROVIDE(__rela_dyn_end = .);
PROVIDE(__rel_dyn_end = .);
}
PROVIDE(_rodata_end = .);

View File

@ -66,12 +66,3 @@ endif
ifdef FW_OPTIONS
firmware-genflags-y += -DFW_OPTIONS=$(FW_OPTIONS)
endif
ifeq ($(CONFIG_STACK_PROTECTOR),y)
stack-protector-cflags-$(CONFIG_STACK_PROTECTOR) := -fstack-protector
stack-protector-cflags-$(CONFIG_STACK_PROTECTOR_STRONG) := -fstack-protector-strong
stack-protector-cflags-$(CONFIG_STACK_PROTECTOR_ALL) := -fstack-protector-all
else
stack-protector-cflags-y := -fno-stack-protector
endif
firmware-cflags-y += $(stack-protector-cflags-y)

View File

@ -30,18 +30,7 @@ _start:
/* Pick one hart to run the main boot sequence */
lla a3, _hart_lottery
li a2, 1
#ifdef __riscv_atomic
amoadd.w a3, a2, (a3)
#elif __riscv_zalrsc
_sc_fail:
lr.w t0, (a3)
addw t1, t0, a2
sc.w t1, t1, (a3)
bnez t1, _sc_fail
move a3, t0
#else
#error "need a or zalrsc"
#endif
bnez a3, _start_hang
/* Save a0 and a1 */
@ -97,18 +86,3 @@ _boot_a0:
RISCV_PTR 0
_boot_a1:
RISCV_PTR 0
/* This will be called when the stack corruption is detected */
.section .text
.align 3
.globl __stack_chk_fail
.type __stack_chk_fail, %function
.equ __stack_chk_fail, _start_hang
/* Initial value of the stack guard variable */
.section .data
.align 3
.globl __stack_chk_guard
.type __stack_chk_guard, %object
__stack_chk_guard:
RISCV_PTR 0x95B5FF5A

View File

@ -46,13 +46,6 @@ static inline void sbi_ecall_console_puts(const char *str)
sbi_strlen(str), (unsigned long)str, 0, 0, 0, 0);
}
static inline void sbi_ecall_shutdown(void)
{
sbi_ecall(SBI_EXT_SRST, SBI_EXT_SRST_RESET,
SBI_SRST_RESET_TYPE_SHUTDOWN, SBI_SRST_RESET_REASON_NONE,
0, 0, 0, 0);
}
#define wfi() \
do { \
__asm__ __volatile__("wfi" ::: "memory"); \
@ -61,6 +54,7 @@ static inline void sbi_ecall_shutdown(void)
void test_main(unsigned long a0, unsigned long a1)
{
sbi_ecall_console_puts("\nTest payload running\n");
sbi_ecall_shutdown();
sbi_ecall_console_puts("sbi_ecall_shutdown failed to execute.\n");
while (1)
wfi();
}

View File

@ -79,12 +79,36 @@ struct fw_dynamic_info {
* Prevent modification of struct fw_dynamic_info from affecting
* FW_DYNAMIC_INFO_xxx_OFFSET
*/
assert_member_offset(struct fw_dynamic_info, magic, FW_DYNAMIC_INFO_MAGIC_OFFSET);
assert_member_offset(struct fw_dynamic_info, version, FW_DYNAMIC_INFO_VERSION_OFFSET);
assert_member_offset(struct fw_dynamic_info, next_addr, FW_DYNAMIC_INFO_NEXT_ADDR_OFFSET);
assert_member_offset(struct fw_dynamic_info, next_mode, FW_DYNAMIC_INFO_NEXT_MODE_OFFSET);
assert_member_offset(struct fw_dynamic_info, options, FW_DYNAMIC_INFO_OPTIONS_OFFSET);
assert_member_offset(struct fw_dynamic_info, boot_hart, FW_DYNAMIC_INFO_BOOT_HART_OFFSET);
_Static_assert(
offsetof(struct fw_dynamic_info, magic)
== FW_DYNAMIC_INFO_MAGIC_OFFSET,
"struct fw_dynamic_info definition has changed, please redefine "
"FW_DYNAMIC_INFO_MAGIC_OFFSET");
_Static_assert(
offsetof(struct fw_dynamic_info, version)
== FW_DYNAMIC_INFO_VERSION_OFFSET,
"struct fw_dynamic_info definition has changed, please redefine "
"FW_DYNAMIC_INFO_VERSION_OFFSET");
_Static_assert(
offsetof(struct fw_dynamic_info, next_addr)
== FW_DYNAMIC_INFO_NEXT_ADDR_OFFSET,
"struct fw_dynamic_info definition has changed, please redefine "
"FW_DYNAMIC_INFO_NEXT_ADDR_OFFSET");
_Static_assert(
offsetof(struct fw_dynamic_info, next_mode)
== FW_DYNAMIC_INFO_NEXT_MODE_OFFSET,
"struct fw_dynamic_info definition has changed, please redefine "
"FW_DYNAMIC_INFO_NEXT_MODE_OFFSET");
_Static_assert(
offsetof(struct fw_dynamic_info, options)
== FW_DYNAMIC_INFO_OPTIONS_OFFSET,
"struct fw_dynamic_info definition has changed, please redefine "
"FW_DYNAMIC_INFO_OPTIONS_OFFSET");
_Static_assert(
offsetof(struct fw_dynamic_info, boot_hart)
== FW_DYNAMIC_INFO_BOOT_HART_OFFSET,
"struct fw_dynamic_info definition has changed, please redefine "
"FW_DYNAMIC_INFO_BOOT_HART_OFFSET");
#endif

View File

@ -156,26 +156,6 @@
: "memory"); \
})
#if __riscv_xlen == 64
#define __csrrw64(op, csr, csrh, val) (true ? op(csr, val) : (uint64_t)csrh)
#define __csrr64( op, csr, csrh) (true ? op(csr) : (uint64_t)csrh)
#define __csrw64( op, csr, csrh, val) (true ? op(csr, val) : (uint64_t)csrh)
#elif __riscv_xlen == 32
#define __csrrw64(op, csr, csrh, val) ( op(csr, val) | (uint64_t)op(csrh, val >> 32) << 32)
#define __csrr64( op, csr, csrh) ( op(csr) | (uint64_t)op(csrh) << 32)
#define __csrw64( op, csr, csrh, val) ({ op(csr, val); op(csrh, val >> 32); })
#endif
#define csr_swap64( csr, val) __csrrw64(csr_swap, csr, csr ## H, val)
#define csr_read64( csr) __csrr64 (csr_read, csr, csr ## H)
#define csr_read_relaxed64(csr) __csrr64 (csr_read_relaxed, csr, csr ## H)
#define csr_write64( csr, val) __csrw64 (csr_write, csr, csr ## H, val)
#define csr_read_set64( csr, val) __csrrw64(csr_read_set, csr, csr ## H, val)
#define csr_set64( csr, val) __csrw64 (csr_set, csr, csr ## H, val)
#define csr_clear64( csr, val) __csrw64 (csr_clear, csr, csr ## H, val)
#define csr_read_clear64( csr, val) __csrrw64(csr_read_clear, csr, csr ## H, val)
#define csr_clear64( csr, val) __csrw64 (csr_clear, csr, csr ## H, val)
unsigned long csr_read_num(int csr_num);
void csr_write_num(int csr_num, unsigned long val);

View File

@ -122,50 +122,6 @@ enum {
RV_DBTR_DECLARE_BIT_MASK(MC, TYPE, 4),
};
/* ICOUNT - Match Control Type Register */
enum {
RV_DBTR_DECLARE_BIT(ICOUNT, ACTION, 0),
RV_DBTR_DECLARE_BIT(ICOUNT, U, 6),
RV_DBTR_DECLARE_BIT(ICOUNT, S, 7),
RV_DBTR_DECLARE_BIT(ICOUNT, PENDING, 8),
RV_DBTR_DECLARE_BIT(ICOUNT, M, 9),
RV_DBTR_DECLARE_BIT(ICOUNT, COUNT, 10),
RV_DBTR_DECLARE_BIT(ICOUNT, HIT, 24),
RV_DBTR_DECLARE_BIT(ICOUNT, VU, 25),
RV_DBTR_DECLARE_BIT(ICOUNT, VS, 26),
#if __riscv_xlen == 64
RV_DBTR_DECLARE_BIT(ICOUNT, DMODE, 59),
RV_DBTR_DECLARE_BIT(ICOUNT, TYPE, 60),
#elif __riscv_xlen == 32
RV_DBTR_DECLARE_BIT(ICOUNT, DMODE, 27),
RV_DBTR_DECLARE_BIT(ICOUNT, TYPE, 28),
#else
#error "Unknown __riscv_xlen"
#endif
};
enum {
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, ACTION, 6),
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, U, 1),
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, S, 1),
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, PENDING, 1),
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, M, 1),
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, COUNT, 14),
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, HIT, 1),
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, VU, 1),
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, VS, 1),
#if __riscv_xlen == 64
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, DMODE, 1),
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, TYPE, 4),
#elif __riscv_xlen == 32
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, DMODE, 1),
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, TYPE, 4),
#else
#error "Unknown __riscv_xlen"
#endif
};
/* MC6 - Match Control 6 Type Register */
enum {
RV_DBTR_DECLARE_BIT(MC6, LOAD, 0),

View File

@ -33,7 +33,6 @@
#define MSTATUS_TW _UL(0x00200000)
#define MSTATUS_TSR _UL(0x00400000)
#define MSTATUS_SPELP _UL(0x00800000)
#define MSTATUS_SDT _UL(0x01000000)
#define MSTATUS32_SD _UL(0x80000000)
#if __riscv_xlen == 64
#define MSTATUS_UXL _ULL(0x0000000300000000)
@ -86,8 +85,6 @@
#define HSTATUS_GVA _UL(0x00000040)
#define HSTATUS_VSBE _UL(0x00000020)
#define MTVEC_MODE _UL(0x00000003)
#define MCAUSE_IRQ_MASK (_UL(1) << (__riscv_xlen - 1))
#define IRQ_S_SOFT 1
@ -378,17 +375,6 @@
#define CSR_SSTATEEN2 0x10E
#define CSR_SSTATEEN3 0x10F
/* Machine-Level Control transfer records CSRs */
#define CSR_MCTRCTL 0x34e
/* Supervisor-Level Control transfer records CSRs */
#define CSR_SCTRCTL 0x14e
#define CSR_SCTRSTATUS 0x14f
#define CSR_SCTRDEPTH 0x15f
/* VS-Level Control transfer records CSRs */
#define CSR_VSCTRCTL 0x24e
/* ===== Hypervisor-level CSRs ===== */
/* Hypervisor Trap Setup (H-extension) */
@ -813,8 +799,6 @@
#define SMSTATEEN0_CS (_ULL(1) << SMSTATEEN0_CS_SHIFT)
#define SMSTATEEN0_FCSR_SHIFT 1
#define SMSTATEEN0_FCSR (_ULL(1) << SMSTATEEN0_FCSR_SHIFT)
#define SMSTATEEN0_CTR_SHIFT 54
#define SMSTATEEN0_CTR (_ULL(1) << SMSTATEEN0_CTR_SHIFT)
#define SMSTATEEN0_CONTEXT_SHIFT 57
#define SMSTATEEN0_CONTEXT (_ULL(1) << SMSTATEEN0_CONTEXT_SHIFT)
#define SMSTATEEN0_IMSIC_SHIFT 58
@ -986,14 +970,13 @@
#define INSN_MATCH_VS4RV 0x62800027
#define INSN_MATCH_VS8RV 0xe2800027
#define INSN_OPCODE_MASK 0x7f
#define INSN_OPCODE_VECTOR_LOAD 0x07
#define INSN_OPCODE_VECTOR_STORE 0x27
#define INSN_OPCODE_AMO 0x2f
#define INSN_MASK_VECTOR_LOAD_STORE 0x7f
#define INSN_MATCH_VECTOR_LOAD 0x07
#define INSN_MATCH_VECTOR_STORE 0x27
#define IS_VECTOR_LOAD_STORE(insn) \
((((insn) & INSN_OPCODE_MASK) == INSN_OPCODE_VECTOR_LOAD) || \
(((insn) & INSN_OPCODE_MASK) == INSN_OPCODE_VECTOR_STORE))
((((insn) & INSN_MASK_VECTOR_LOAD_STORE) == INSN_MATCH_VECTOR_LOAD) || \
(((insn) & INSN_MASK_VECTOR_LOAD_STORE) == INSN_MATCH_VECTOR_STORE))
#define IS_VECTOR_INSN_MATCH(insn, match, mask) \
(((insn) & (mask)) == ((match) & (mask)))
@ -1287,19 +1270,6 @@
#error "Unexpected __riscv_xlen"
#endif
#define MASK_FUNCT3 0x7000
#define SHIFT_FUNCT3 12
#define MASK_RS1 0xf8000
#define MASK_RS2 0x1f00000
#define MASK_RD 0xf80
#define MASK_CSR 0xfff00000
#define SHIFT_CSR 20
#define MASK_AQRL 0x06000000
#define SHIFT_AQRL 25
#define VM_MASK 0x1
#define VIEW_MASK 0x3
#define VSEW_MASK 0x3
@ -1317,6 +1287,13 @@
#define INSN_LEN(insn) (INSN_IS_16BIT(insn) ? 2 : 4)
#if __riscv_xlen == 64
#define LOG_REGBYTES 3
#else
#define LOG_REGBYTES 2
#endif
#define REGBYTES (1 << LOG_REGBYTES)
#define SH_VSEW 3
#define SH_VIEW 12
#define SH_VD 7
@ -1351,17 +1328,27 @@
#define SHIFT_RIGHT(x, y) \
((y) < 0 ? ((x) << -(y)) : ((x) >> (y)))
#define GET_FUNC3(insn) ((insn & MASK_FUNCT3) >> SHIFT_FUNCT3)
#define GET_RM(insn) GET_FUNC3(insn)
#define GET_RS1_NUM(insn) ((insn & MASK_RS1) >> SH_RS1)
#define GET_RS2_NUM(insn) ((insn & MASK_RS2) >> SH_RS2)
#define GET_RS1S_NUM(insn) RVC_RS1S(insn)
#define GET_RS2S_NUM(insn) RVC_RS2S(insn)
#define GET_RS2C_NUM(insn) RVC_RS2(insn)
#define GET_RD_NUM(insn) ((insn & MASK_RD) >> SH_RD)
#define GET_CSR_NUM(insn) ((insn & MASK_CSR) >> SHIFT_CSR)
#define GET_AQRL(insn) ((insn & MASK_AQRL) >> SHIFT_AQRL)
#define REG_MASK \
((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES))
#define REG_OFFSET(insn, pos) \
(SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK)
#define REG_PTR(insn, pos, regs) \
(ulong *)((ulong)(regs) + REG_OFFSET(insn, pos))
#define GET_RM(insn) ((insn & MASK_FUNCT3) >> SHIFT_FUNCT3)
#define GET_RS1_NUM(insn) ((insn & MASK_RS1) >> 15)
#define GET_CSR_NUM(insn) ((insn & MASK_CSR) >> SHIFT_CSR)
#define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs))
#define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs))
#define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs))
#define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs))
#define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs))
#define GET_SP(regs) (*REG_PTR(2, 0, regs))
#define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val))
#define IMM_I(insn) ((s32)(insn) >> 20)
#define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \
(s32)(((insn) >> 7) & 0x1f))
@ -1378,6 +1365,13 @@
#define GET_VEMUL(vlmul, view, vsew) ((vlmul + view - vsew) & 7)
#define GET_EMUL(vemul) (1UL << ((vemul) >= 4 ? 0 : (vemul)))
#define MASK_FUNCT3 0x7000
#define MASK_RS1 0xf8000
#define MASK_CSR 0xfff00000
#define SHIFT_FUNCT3 12
#define SHIFT_CSR 20
#define CSRRW 1
#define CSRRS 2
#define CSRRC 3

View File

@ -130,17 +130,4 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
__bitmap_xor(dst, src1, src2, nbits);
}
static inline int bitmap_weight(const unsigned long *src, int nbits)
{
int i, res = 0;
for (i = 0; i < nbits / BITS_PER_LONG; i++)
res += sbi_popcount(src[i]);
if (nbits % BITS_PER_LONG)
res += sbi_popcount(src[i] & BITMAP_LAST_WORD_MASK(nbits));
return res;
}
#endif

View File

@ -125,22 +125,14 @@ static inline unsigned long sbi_fls(unsigned long word)
*/
static inline unsigned long sbi_popcount(unsigned long word)
{
unsigned long count;
unsigned long count = 0;
#if BITS_PER_LONG == 64
count = word - ((word >> 1) & 0x5555555555555555ul);
count = (count & 0x3333333333333333ul) + ((count >> 2) & 0x3333333333333333ul);
count = (count + (count >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
count = count + (count >> 8);
count = count + (count >> 16);
return (count + (count >> 32)) & 0x00000000000000FFul;
#else
count = word - ((word >> 1) & 0x55555555);
count = (count & 0x33333333) + ((count >> 2) & 0x33333333);
count = (count + (count >> 4)) & 0x0F0F0F0F;
count = count + (count >> 8);
return (count + (count >> 16)) & 0x000000FF;
#endif
while (word) {
word &= word - 1;
count++;
}
return count;
}
#define for_each_set_bit(bit, addr, size) \

View File

@ -14,13 +14,13 @@
# define _conv_cast(type, val) ((type)(val))
#endif
#define __BSWAP16(x) ((((x) & 0x00ff) << 8) | \
#define BSWAP16(x) ((((x) & 0x00ff) << 8) | \
(((x) & 0xff00) >> 8))
#define __BSWAP32(x) ((((x) & 0x000000ff) << 24) | \
#define BSWAP32(x) ((((x) & 0x000000ff) << 24) | \
(((x) & 0x0000ff00) << 8) | \
(((x) & 0x00ff0000) >> 8) | \
(((x) & 0xff000000) >> 24))
#define __BSWAP64(x) ((((x) & 0x00000000000000ffULL) << 56) | \
#define BSWAP64(x) ((((x) & 0x00000000000000ffULL) << 56) | \
(((x) & 0x000000000000ff00ULL) << 40) | \
(((x) & 0x0000000000ff0000ULL) << 24) | \
(((x) & 0x00000000ff000000ULL) << 8) | \
@ -29,10 +29,6 @@
(((x) & 0x00ff000000000000ULL) >> 40) | \
(((x) & 0xff00000000000000ULL) >> 56))
#define BSWAP64(x) ({ uint64_t _sv = (x); __BSWAP64(_sv); })
#define BSWAP32(x) ({ uint32_t _sv = (x); __BSWAP32(_sv); })
#define BSWAP16(x) ({ uint16_t _sv = (x); __BSWAP16(_sv); })
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ /* CPU(little-endian) */
#define cpu_to_be16(x) _conv_cast(uint16_t, BSWAP16(x))
#define cpu_to_be32(x) _conv_cast(uint32_t, BSWAP32(x))

View File

@ -18,7 +18,7 @@
({ \
register ulong tinfo asm("a3") = (ulong)trap; \
register ulong ttmp asm("a4"); \
register ulong mtvec = (ulong)sbi_hart_expected_trap; \
register ulong mtvec = sbi_hart_expected_trap_addr(); \
register ulong ret = 0; \
((struct sbi_trap_info *)(trap))->cause = 0; \
asm volatile( \
@ -37,7 +37,7 @@
({ \
register ulong tinfo asm("a3") = (ulong)trap; \
register ulong ttmp asm("a4"); \
register ulong mtvec = (ulong)sbi_hart_expected_trap; \
register ulong mtvec = sbi_hart_expected_trap_addr(); \
((struct sbi_trap_info *)(trap))->cause = 0; \
asm volatile( \
"add %[ttmp], %[tinfo], zero\n" \

View File

@ -90,7 +90,7 @@ struct sbi_dbtr_hart_triggers_state {
}while (0);
/** SBI shared mem messages layout */
union sbi_dbtr_shmem_entry {
struct sbi_dbtr_shmem_entry {
struct sbi_dbtr_data_msg data;
struct sbi_dbtr_id_msg id;
};
@ -115,7 +115,8 @@ int sbi_dbtr_uninstall_trig(unsigned long trig_idx_base,
int sbi_dbtr_enable_trig(unsigned long trig_idx_base,
unsigned long trig_idx_mask);
int sbi_dbtr_update_trig(unsigned long smode,
unsigned long trig_count);
unsigned long trig_idx_base,
unsigned long trig_idx_mask);
int sbi_dbtr_disable_trig(unsigned long trig_idx_base,
unsigned long trig_idx_mask);

View File

@ -307,11 +307,8 @@ int sbi_domain_register(struct sbi_domain *dom,
int sbi_domain_root_add_memrange(unsigned long addr, unsigned long size,
unsigned long align, unsigned long region_flags);
/** Startup non-root domains */
int sbi_domain_startup(struct sbi_scratch *scratch, u32 cold_hartid);
/** Finalize domain tables */
int sbi_domain_finalize(struct sbi_scratch *scratch);
/** Finalize domain tables and startup non-root domains */
int sbi_domain_finalize(struct sbi_scratch *scratch, u32 cold_hartid);
/** Initialize domains */
int sbi_domain_init(struct sbi_scratch *scratch, u32 cold_hartid);

View File

@ -1,20 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2025 Rivos Inc.
*
* Authors:
* Clément Léger <cleger@rivosinc.com>
*/
#ifndef __SBI_DOUBLE_TRAP_H__
#define __SBI_DOUBLE_TRAP_H__
#include <sbi/sbi_types.h>
#include <sbi/sbi_trap.h>
int sbi_double_trap_handler(struct sbi_trap_context *tcntx);
void sbi_double_trap_init(struct sbi_scratch *scratch);
#endif

View File

@ -13,7 +13,7 @@
#include <sbi/sbi_types.h>
#include <sbi/sbi_list.h>
#define SBI_ECALL_VERSION_MAJOR 3
#define SBI_ECALL_VERSION_MAJOR 2
#define SBI_ECALL_VERSION_MINOR 0
#define SBI_OPENSBI_IMPID 1

View File

@ -380,12 +380,10 @@ enum sbi_sse_attr_id {
#define SBI_SSE_ATTR_CONFIG_ONESHOT (1 << 0)
#define SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPP BIT(0)
#define SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPIE BIT(1)
#define SBI_SSE_ATTR_INTERRUPTED_FLAGS_STATUS_SPP BIT(0)
#define SBI_SSE_ATTR_INTERRUPTED_FLAGS_STATUS_SPIE BIT(1)
#define SBI_SSE_ATTR_INTERRUPTED_FLAGS_HSTATUS_SPV BIT(2)
#define SBI_SSE_ATTR_INTERRUPTED_FLAGS_HSTATUS_SPVP BIT(3)
#define SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPELP BIT(4)
#define SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SDT BIT(5)
enum sbi_sse_state {
SBI_SSE_STATE_UNUSED = 0,
@ -395,77 +393,48 @@ enum sbi_sse_state {
};
/* SBI SSE Event IDs. */
/* Range 0x00000000 - 0x0000ffff */
#define SBI_SSE_EVENT_LOCAL_HIGH_PRIO_RAS 0x00000000
#define SBI_SSE_EVENT_LOCAL_RAS 0x00000000
#define SBI_SSE_EVENT_LOCAL_DOUBLE_TRAP 0x00000001
#define SBI_SSE_EVENT_LOCAL_RESERVED_0_START 0x00000002
#define SBI_SSE_EVENT_LOCAL_RESERVED_0_END 0x00003fff
#define SBI_SSE_EVENT_LOCAL_PLAT_0_START 0x00004000
#define SBI_SSE_EVENT_LOCAL_PLAT_0_END 0x00007fff
#define SBI_SSE_EVENT_GLOBAL_HIGH_PRIO_RAS 0x00008000
#define SBI_SSE_EVENT_GLOBAL_RESERVED_0_START 0x00008001
#define SBI_SSE_EVENT_GLOBAL_RESERVED_0_END 0x0000bfff
#define SBI_SSE_EVENT_GLOBAL_RAS 0x00008000
#define SBI_SSE_EVENT_GLOBAL_PLAT_0_START 0x0000c000
#define SBI_SSE_EVENT_GLOBAL_PLAT_0_END 0x0000ffff
/* Range 0x00010000 - 0x0001ffff */
#define SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW 0x00010000
#define SBI_SSE_EVENT_LOCAL_RESERVED_1_START 0x00010001
#define SBI_SSE_EVENT_LOCAL_RESERVED_1_END 0x00013fff
#define SBI_SSE_EVENT_LOCAL_PMU 0x00010000
#define SBI_SSE_EVENT_LOCAL_PLAT_1_START 0x00014000
#define SBI_SSE_EVENT_LOCAL_PLAT_1_END 0x00017fff
#define SBI_SSE_EVENT_GLOBAL_RESERVED_1_START 0x00018000
#define SBI_SSE_EVENT_GLOBAL_RESERVED_1_END 0x0001bfff
#define SBI_SSE_EVENT_GLOBAL_PLAT_1_START 0x0001c000
#define SBI_SSE_EVENT_GLOBAL_PLAT_1_END 0x0001ffff
/* Range 0x00100000 - 0x0010ffff */
#define SBI_SSE_EVENT_LOCAL_LOW_PRIO_RAS 0x00100000
#define SBI_SSE_EVENT_LOCAL_RESERVED_2_START 0x00100001
#define SBI_SSE_EVENT_LOCAL_RESERVED_2_END 0x00103fff
#define SBI_SSE_EVENT_LOCAL_PLAT_2_START 0x00104000
#define SBI_SSE_EVENT_LOCAL_PLAT_2_END 0x00107fff
#define SBI_SSE_EVENT_LOCAL_PLAT_2_START 0x00024000
#define SBI_SSE_EVENT_LOCAL_PLAT_2_END 0x00027fff
#define SBI_SSE_EVENT_GLOBAL_PLAT_2_START 0x0002c000
#define SBI_SSE_EVENT_GLOBAL_PLAT_2_END 0x0002ffff
#define SBI_SSE_EVENT_GLOBAL_LOW_PRIO_RAS 0x00108000
#define SBI_SSE_EVENT_GLOBAL_RESERVED_2_START 0x00108001
#define SBI_SSE_EVENT_GLOBAL_RESERVED_2_END 0x0010bfff
#define SBI_SSE_EVENT_GLOBAL_PLAT_2_START 0x0010c000
#define SBI_SSE_EVENT_GLOBAL_PLAT_2_END 0x0010ffff
/* Range 0xffff0000 - 0xffffffff */
#define SBI_SSE_EVENT_LOCAL_SOFTWARE 0xffff0000
#define SBI_SSE_EVENT_LOCAL_RESERVED_3_START 0xffff0001
#define SBI_SSE_EVENT_LOCAL_RESERVED_3_END 0xffff3fff
#define SBI_SSE_EVENT_LOCAL_PLAT_3_START 0xffff4000
#define SBI_SSE_EVENT_LOCAL_PLAT_3_END 0xffff7fff
#define SBI_SSE_EVENT_GLOBAL_SOFTWARE 0xffff8000
#define SBI_SSE_EVENT_GLOBAL_RESERVED_3_START 0xffff8001
#define SBI_SSE_EVENT_GLOBAL_RESERVED_3_END 0xffffbfff
#define SBI_SSE_EVENT_GLOBAL_PLAT_3_START 0xffffc000
#define SBI_SSE_EVENT_GLOBAL_PLAT_3_END 0xffffffff
#define SBI_SSE_EVENT_GLOBAL_BIT BIT(15)
#define SBI_SSE_EVENT_PLATFORM_BIT BIT(14)
#define SBI_SSE_EVENT_GLOBAL_BIT (1 << 15)
#define SBI_SSE_EVENT_PLATFORM_BIT (1 << 14)
/* SBI function IDs for MPXY extension */
#define SBI_EXT_MPXY_GET_SHMEM_SIZE 0x0
#define SBI_EXT_MPXY_SET_SHMEM 0x1
#define SBI_EXT_MPXY_GET_CHANNEL_IDS 0x2
#define SBI_EXT_MPXY_READ_ATTRS 0x3
#define SBI_EXT_MPXY_WRITE_ATTRS 0x4
#define SBI_EXT_MPXY_SEND_MSG_WITH_RESP 0x5
#define SBI_EXT_MPXY_SEND_MSG_WITHOUT_RESP 0x6
#define SBI_EXT_MPXY_GET_NOTIFICATION_EVENTS 0x7
#define SBI_EXT_MPXY_SET_SHMEM 0x0
#define SBI_EXT_MPXY_GET_CHANNEL_IDS 0x1
#define SBI_EXT_MPXY_READ_ATTRS 0x2
#define SBI_EXT_MPXY_WRITE_ATTRS 0x3
#define SBI_EXT_MPXY_SEND_MSG_WITH_RESP 0x4
#define SBI_EXT_MPXY_SEND_MSG_NO_RESP 0x5
#define SBI_EXT_MPXY_GET_NOTIFICATION_EVENTS 0x6
/* SBI base specification related macros */
#define SBI_SPEC_VERSION_MAJOR_OFFSET 24
#define SBI_SPEC_VERSION_MAJOR_MASK 0x7f
#define SBI_SPEC_VERSION_MINOR_MASK 0xffffff
#define SBI_EXT_EXPERIMENTAL_START 0x08000000
#define SBI_EXT_EXPERIMENTAL_END 0x08FFFFFF
#define SBI_EXT_VENDOR_START 0x09000000
#define SBI_EXT_VENDOR_END 0x09FFFFFF
#define SBI_EXT_FIRMWARE_START 0x0A000000
@ -486,9 +455,8 @@ enum sbi_sse_state {
#define SBI_ERR_BAD_RANGE -11
#define SBI_ERR_TIMEOUT -12
#define SBI_ERR_IO -13
#define SBI_ERR_DENIED_LOCKED -14
#define SBI_LAST_ERR SBI_ERR_DENIED_LOCKED
#define SBI_LAST_ERR SBI_ERR_BAD_RANGE
/* clang-format on */

View File

@ -29,7 +29,6 @@
#define SBI_ETIMEOUT SBI_ERR_TIMEOUT
#define SBI_ETIMEDOUT SBI_ERR_TIMEOUT
#define SBI_EIO SBI_ERR_IO
#define SBI_EDENIED_LOCKED SBI_ERR_DENIED_LOCKED
#define SBI_ENODEV -1000
#define SBI_ENOSYS -1001

View File

@ -31,7 +31,7 @@ enum sbi_hart_extensions {
SBI_HART_EXT_SMAIA = 0,
/** HART has Smepmp */
SBI_HART_EXT_SMEPMP,
/** HART has Smstateen extension **/
/** HART has Smstateen CSR **/
SBI_HART_EXT_SMSTATEEN,
/** Hart has Sscofpmt extension */
SBI_HART_EXT_SSCOFPMF,
@ -75,12 +75,6 @@ enum sbi_hart_extensions {
SBI_HART_EXT_ZICFISS,
/** Hart has Ssdbltrp extension */
SBI_HART_EXT_SSDBLTRP,
/** HART has CTR M-mode CSRs */
SBI_HART_EXT_SMCTR,
/** HART has CTR S-mode CSRs */
SBI_HART_EXT_SSCTR,
/** HART has Ssstateen extension **/
SBI_HART_EXT_SSSTATEEN,
/** Maximum index of Hart extension */
SBI_HART_EXT_MAX,
@ -93,14 +87,6 @@ struct sbi_hart_ext_data {
extern const struct sbi_hart_ext_data sbi_hart_ext[];
/** CSRs should be detected by access and trapping */
enum sbi_hart_csrs {
SBI_HART_CSR_CYCLE = 0,
SBI_HART_CSR_TIME,
SBI_HART_CSR_INSTRET,
SBI_HART_CSR_MAX,
};
/*
* Smepmp enforces access boundaries between M-mode and
* S/U-mode. When it is enabled, the PMPs are programmed
@ -120,7 +106,6 @@ struct sbi_hart_features {
bool detected;
int priv_version;
unsigned long extensions[BITS_TO_LONGS(SBI_HART_EXT_MAX)];
unsigned long csrs[BITS_TO_LONGS(SBI_HART_CSR_MAX)];
unsigned int pmp_count;
unsigned int pmp_addr_bits;
unsigned int pmp_log2gran;
@ -134,6 +119,10 @@ int sbi_hart_reinit(struct sbi_scratch *scratch);
int sbi_hart_init(struct sbi_scratch *scratch, bool cold_boot);
extern void (*sbi_hart_expected_trap)(void);
static inline ulong sbi_hart_expected_trap_addr(void)
{
return (ulong)sbi_hart_expected_trap;
}
unsigned int sbi_hart_mhpm_mask(struct sbi_scratch *scratch);
void sbi_hart_delegation_dump(struct sbi_scratch *scratch,
@ -155,7 +144,6 @@ bool sbi_hart_has_extension(struct sbi_scratch *scratch,
enum sbi_hart_extensions ext);
void sbi_hart_get_extensions_str(struct sbi_scratch *scratch,
char *extension_str, int nestr);
bool sbi_hart_has_csr(struct sbi_scratch *scratch, enum sbi_hart_csrs csr);
void __attribute__((noreturn)) sbi_hart_hang(void);

View File

@ -181,17 +181,6 @@ static inline void sbi_hartmask_xor(struct sbi_hartmask *dstp,
sbi_hartmask_bits(src2p), SBI_HARTMASK_MAX_BITS);
}
/**
* Count of bits in *srcp
* @param srcp the hartmask to count bits in
*
* Return: count of bits set in *srcp
*/
static inline int sbi_hartmask_weight(const struct sbi_hartmask *srcp)
{
return bitmap_weight(sbi_hartmask_bits(srcp), SBI_HARTMASK_MAX_BITS);
}
/**
* Iterate over each HART index in hartmask
* __i hart index

View File

@ -1,17 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2025 MIPS
*
*/
#ifndef __SBI_ILLEGAL_ATOMIC_H__
#define __SBI_ILLEGAL_ATOMIC_H__
#include <sbi/sbi_types.h>
struct sbi_trap_regs;
int sbi_illegal_atomic(ulong insn, struct sbi_trap_regs *regs);
#endif

View File

@ -14,10 +14,6 @@
struct sbi_trap_context;
typedef int (*illegal_insn_func)(ulong insn, struct sbi_trap_regs *regs);
int truly_illegal_insn(ulong insn, struct sbi_trap_regs *regs);
int sbi_illegal_insn_handler(struct sbi_trap_context *tcntx);
#endif

View File

@ -16,8 +16,6 @@ struct sbi_scratch;
void __noreturn sbi_init(struct sbi_scratch *scratch);
void sbi_revert_entry_count(struct sbi_scratch *scratch);
unsigned long sbi_entry_count(u32 hartindex);
unsigned long sbi_init_count(u32 hartindex);

View File

@ -160,28 +160,4 @@ static inline void sbi_list_del_init(struct sbi_dlist *entry)
&pos->member != (head); \
pos = sbi_list_entry(pos->member.next, typeof(*pos), member))
/**
* Iterate over list of given type safe against removal of list entry
* @param pos the type * to use as a loop cursor.
* @param n another type * to use as temporary storage.
* @param head the head for your list.
* @param member the name of the list_struct within the struct.
*/
#define sbi_list_for_each_entry_safe(pos, n, head, member) \
for (pos = sbi_list_entry((head)->next, typeof(*pos), member), \
n = sbi_list_entry(pos->member.next, typeof(*pos), member); \
&pos->member != (head); \
pos = n, n = sbi_list_entry(pos->member.next, typeof(*pos), member))
/**
* Iterate over list of given type in reverse order
* @param pos the type * to use as a loop cursor.
* @param head the head for your list.
* @param member the name of the list_struct within the struct.
*/
#define sbi_list_for_each_entry_reverse(pos, head, member) \
for (pos = sbi_list_entry((head)->prev, typeof(*pos), member); \
&pos->member != (head); \
pos = sbi_list_entry(pos->member.prev, typeof(*pos), member))
#endif

View File

@ -153,11 +153,9 @@ int sbi_mpxy_init(struct sbi_scratch *scratch);
/** Check if some Message proxy channel is available */
bool sbi_mpxy_channel_available(void);
/** Get message proxy shared memory size */
unsigned long sbi_mpxy_get_shmem_size(void);
/** Set message proxy shared memory on the calling HART */
int sbi_mpxy_set_shmem(unsigned long shmem_phys_lo,
/** Set Message proxy shared memory on the calling HART */
int sbi_mpxy_set_shmem(unsigned long shmem_size,
unsigned long shmem_phys_lo,
unsigned long shmem_phys_hi,
unsigned long flags);

View File

@ -39,8 +39,6 @@
#define SBI_PLATFORM_FIRMWARE_CONTEXT_OFFSET (0x60 + __SIZEOF_POINTER__)
/** Offset of hart_index2id in struct sbi_platform */
#define SBI_PLATFORM_HART_INDEX2ID_OFFSET (0x60 + (__SIZEOF_POINTER__ * 2))
/** Offset of cbom_block_size in struct sbi_platform */
#define SBI_PLATFORM_CBOM_BLOCK_SIZE_OFFSET (0x60 + (__SIZEOF_POINTER__ * 3))
#define SBI_PLATFORM_TLB_RANGE_FLUSH_LIMIT_DEFAULT (1UL << 12)
@ -131,6 +129,8 @@ struct sbi_platform_operations {
/** Initialize the platform Message Proxy(MPXY) driver */
int (*mpxy_init)(void);
/** Check if SBI vendor extension is implemented or not */
bool (*vendor_ext_check)(void);
/** platform specific SBI extension implementation provider */
int (*vendor_ext_provider)(long funcid,
struct sbi_trap_regs *regs,
@ -142,13 +142,6 @@ struct sbi_platform_operations {
/** platform specific handler to fixup store fault */
int (*emulate_store)(int wlen, unsigned long addr,
union sbi_ldst_data in_val);
/** platform specific pmp setup on current HART */
void (*pmp_set)(unsigned int n, unsigned long flags,
unsigned long prot, unsigned long addr,
unsigned long log2len);
/** platform specific pmp disable on current HART */
void (*pmp_disable)(unsigned int n);
};
/** Platform default per-HART stack size for exception/interrupt handling */
@ -176,7 +169,7 @@ struct sbi_platform {
char name[64];
/** Supported features */
u64 features;
/** Total number of HARTs (at most SBI_HARTMASK_MAX_BITS) */
/** Total number of HARTs */
u32 hart_count;
/** Per-HART stack size for exception/interrupt handling */
u32 hart_stack_size;
@ -191,34 +184,70 @@ struct sbi_platform {
/**
* HART index to HART id table
*
* If hart_index2id != NULL then the table must contain a mapping
* for each HART index 0 <= <abc> < hart_count:
* For used HART index <abc>:
* hart_index2id[<abc>] = some HART id
* For unused HART index <abc>:
* hart_index2id[<abc>] = -1U
*
* If hart_index2id == NULL then we assume identity mapping
* hart_index2id[<abc>] = <abc>
*
* We have only two restrictions:
* 1. HART index < sbi_platform hart_count
* 2. HART id < SBI_HARTMASK_MAX_BITS
*/
const u32 *hart_index2id;
/** Allocation alignment for Scratch */
unsigned long cbom_block_size;
};
/**
* Prevent modification of struct sbi_platform from affecting
* SBI_PLATFORM_xxx_OFFSET
*/
assert_member_offset(struct sbi_platform, opensbi_version, SBI_PLATFORM_OPENSBI_VERSION_OFFSET);
assert_member_offset(struct sbi_platform, platform_version, SBI_PLATFORM_VERSION_OFFSET);
assert_member_offset(struct sbi_platform, name, SBI_PLATFORM_NAME_OFFSET);
assert_member_offset(struct sbi_platform, features, SBI_PLATFORM_FEATURES_OFFSET);
assert_member_offset(struct sbi_platform, hart_count, SBI_PLATFORM_HART_COUNT_OFFSET);
assert_member_offset(struct sbi_platform, hart_stack_size, SBI_PLATFORM_HART_STACK_SIZE_OFFSET);
assert_member_offset(struct sbi_platform, heap_size, SBI_PLATFORM_HEAP_SIZE_OFFSET);
assert_member_offset(struct sbi_platform, reserved, SBI_PLATFORM_RESERVED_OFFSET);
assert_member_offset(struct sbi_platform, platform_ops_addr, SBI_PLATFORM_OPS_OFFSET);
assert_member_offset(struct sbi_platform, firmware_context, SBI_PLATFORM_FIRMWARE_CONTEXT_OFFSET);
assert_member_offset(struct sbi_platform, hart_index2id, SBI_PLATFORM_HART_INDEX2ID_OFFSET);
assert_member_offset(struct sbi_platform, cbom_block_size, SBI_PLATFORM_CBOM_BLOCK_SIZE_OFFSET);
_Static_assert(
offsetof(struct sbi_platform, opensbi_version)
== SBI_PLATFORM_OPENSBI_VERSION_OFFSET,
"struct sbi_platform definition has changed, please redefine "
"SBI_PLATFORM_OPENSBI_VERSION_OFFSET");
_Static_assert(
offsetof(struct sbi_platform, platform_version)
== SBI_PLATFORM_VERSION_OFFSET,
"struct sbi_platform definition has changed, please redefine "
"SBI_PLATFORM_VERSION_OFFSET");
_Static_assert(
offsetof(struct sbi_platform, name)
== SBI_PLATFORM_NAME_OFFSET,
"struct sbi_platform definition has changed, please redefine "
"SBI_PLATFORM_NAME_OFFSET");
_Static_assert(
offsetof(struct sbi_platform, features)
== SBI_PLATFORM_FEATURES_OFFSET,
"struct sbi_platform definition has changed, please redefine "
"SBI_PLATFORM_FEATURES_OFFSET");
_Static_assert(
offsetof(struct sbi_platform, hart_count)
== SBI_PLATFORM_HART_COUNT_OFFSET,
"struct sbi_platform definition has changed, please redefine "
"SBI_PLATFORM_HART_COUNT_OFFSET");
_Static_assert(
offsetof(struct sbi_platform, hart_stack_size)
== SBI_PLATFORM_HART_STACK_SIZE_OFFSET,
"struct sbi_platform definition has changed, please redefine "
"SBI_PLATFORM_HART_STACK_SIZE_OFFSET");
_Static_assert(
offsetof(struct sbi_platform, platform_ops_addr)
== SBI_PLATFORM_OPS_OFFSET,
"struct sbi_platform definition has changed, please redefine "
"SBI_PLATFORM_OPS_OFFSET");
_Static_assert(
offsetof(struct sbi_platform, firmware_context)
== SBI_PLATFORM_FIRMWARE_CONTEXT_OFFSET,
"struct sbi_platform definition has changed, please redefine "
"SBI_PLATFORM_FIRMWARE_CONTEXT_OFFSET");
_Static_assert(
offsetof(struct sbi_platform, hart_index2id)
== SBI_PLATFORM_HART_INDEX2ID_OFFSET,
"struct sbi_platform definition has changed, please redefine "
"SBI_PLATFORM_HART_INDEX2ID_OFFSET");
/** Get pointer to sbi_platform for sbi_scratch pointer */
#define sbi_platform_ptr(__s) \
@ -302,7 +331,7 @@ static inline u32 sbi_platform_tlb_fifo_num_entries(const struct sbi_platform *p
{
if (plat && sbi_platform_ops(plat)->get_tlb_num_entries)
return sbi_platform_ops(plat)->get_tlb_num_entries();
return sbi_hart_count();
return sbi_scratch_last_hartindex() + 1;
}
/**
@ -580,7 +609,10 @@ static inline int sbi_platform_mpxy_init(const struct sbi_platform *plat)
static inline bool sbi_platform_vendor_ext_check(
const struct sbi_platform *plat)
{
return plat && sbi_platform_ops(plat)->vendor_ext_provider;
if (plat && sbi_platform_ops(plat)->vendor_ext_check)
return sbi_platform_ops(plat)->vendor_ext_check();
return false;
}
/**
@ -651,38 +683,6 @@ static inline int sbi_platform_emulate_store(const struct sbi_platform *plat,
return SBI_ENOTSUPP;
}
/**
* Platform specific PMP setup on current HART
*
* @param plat pointer to struct sbi_platform
* @param n index of the pmp entry
* @param flags domain memregion flags
* @param prot attribute of the pmp entry
* @param addr address of the pmp entry
* @param log2len size of the pmp entry as power-of-2
*/
static inline void sbi_platform_pmp_set(const struct sbi_platform *plat,
unsigned int n, unsigned long flags,
unsigned long prot, unsigned long addr,
unsigned long log2len)
{
if (plat && sbi_platform_ops(plat)->pmp_set)
sbi_platform_ops(plat)->pmp_set(n, flags, prot, addr, log2len);
}
/**
* Platform specific PMP disable on current HART
*
* @param plat pointer to struct sbi_platform
* @param n index of the pmp entry
*/
static inline void sbi_platform_pmp_disable(const struct sbi_platform *plat,
unsigned int n)
{
if (plat && sbi_platform_ops(plat)->pmp_disable)
sbi_platform_ops(plat)->pmp_disable(n);
}
#endif
#endif

View File

@ -114,9 +114,6 @@ void sbi_pmu_exit(struct sbi_scratch *scratch);
/** Return the pmu irq bit depending on extension existence */
int sbi_pmu_irq_bit(void);
/** Return the pmu irq mask or 0 if the pmu overflow irq is not supported */
unsigned long sbi_pmu_irq_mask(void);
/**
* Add the hardware event to counter mapping information. This should be called
* from the platform code to update the mapping table.

View File

@ -93,21 +93,61 @@ struct sbi_scratch {
* Prevent modification of struct sbi_scratch from affecting
* SBI_SCRATCH_xxx_OFFSET
*/
assert_member_offset(struct sbi_scratch, fw_start, SBI_SCRATCH_FW_START_OFFSET);
assert_member_offset(struct sbi_scratch, fw_size, SBI_SCRATCH_FW_SIZE_OFFSET);
assert_member_offset(struct sbi_scratch, fw_rw_offset, SBI_SCRATCH_FW_RW_OFFSET);
assert_member_offset(struct sbi_scratch, fw_heap_offset, SBI_SCRATCH_FW_HEAP_OFFSET);
assert_member_offset(struct sbi_scratch, fw_heap_size, SBI_SCRATCH_FW_HEAP_SIZE_OFFSET);
assert_member_offset(struct sbi_scratch, next_arg1, SBI_SCRATCH_NEXT_ARG1_OFFSET);
assert_member_offset(struct sbi_scratch, next_addr, SBI_SCRATCH_NEXT_ADDR_OFFSET);
assert_member_offset(struct sbi_scratch, next_mode, SBI_SCRATCH_NEXT_MODE_OFFSET);
assert_member_offset(struct sbi_scratch, warmboot_addr, SBI_SCRATCH_WARMBOOT_ADDR_OFFSET);
assert_member_offset(struct sbi_scratch, platform_addr, SBI_SCRATCH_PLATFORM_ADDR_OFFSET);
assert_member_offset(struct sbi_scratch, hartid_to_scratch, SBI_SCRATCH_HARTID_TO_SCRATCH_OFFSET);
assert_member_offset(struct sbi_scratch, trap_context, SBI_SCRATCH_TRAP_CONTEXT_OFFSET);
assert_member_offset(struct sbi_scratch, tmp0, SBI_SCRATCH_TMP0_OFFSET);
assert_member_offset(struct sbi_scratch, options, SBI_SCRATCH_OPTIONS_OFFSET);
assert_member_offset(struct sbi_scratch, hartindex, SBI_SCRATCH_HARTINDEX_OFFSET);
_Static_assert(
offsetof(struct sbi_scratch, fw_start)
== SBI_SCRATCH_FW_START_OFFSET,
"struct sbi_scratch definition has changed, please redefine "
"SBI_SCRATCH_FW_START_OFFSET");
_Static_assert(
offsetof(struct sbi_scratch, fw_size)
== SBI_SCRATCH_FW_SIZE_OFFSET,
"struct sbi_scratch definition has changed, please redefine "
"SBI_SCRATCH_FW_SIZE_OFFSET");
_Static_assert(
offsetof(struct sbi_scratch, next_arg1)
== SBI_SCRATCH_NEXT_ARG1_OFFSET,
"struct sbi_scratch definition has changed, please redefine "
"SBI_SCRATCH_NEXT_ARG1_OFFSET");
_Static_assert(
offsetof(struct sbi_scratch, next_addr)
== SBI_SCRATCH_NEXT_ADDR_OFFSET,
"struct sbi_scratch definition has changed, please redefine "
"SBI_SCRATCH_NEXT_ADDR_OFFSET");
_Static_assert(
offsetof(struct sbi_scratch, next_mode)
== SBI_SCRATCH_NEXT_MODE_OFFSET,
"struct sbi_scratch definition has changed, please redefine "
"SBI_SCRATCH_NEXT_MODE_OFFSET");
_Static_assert(
offsetof(struct sbi_scratch, warmboot_addr)
== SBI_SCRATCH_WARMBOOT_ADDR_OFFSET,
"struct sbi_scratch definition has changed, please redefine "
"SBI_SCRATCH_WARMBOOT_ADDR_OFFSET");
_Static_assert(
offsetof(struct sbi_scratch, platform_addr)
== SBI_SCRATCH_PLATFORM_ADDR_OFFSET,
"struct sbi_scratch definition has changed, please redefine "
"SBI_SCRATCH_PLATFORM_ADDR_OFFSET");
_Static_assert(
offsetof(struct sbi_scratch, hartid_to_scratch)
== SBI_SCRATCH_HARTID_TO_SCRATCH_OFFSET,
"struct sbi_scratch definition has changed, please redefine "
"SBI_SCRATCH_HARTID_TO_SCRATCH_OFFSET");
_Static_assert(
offsetof(struct sbi_scratch, trap_context)
== SBI_SCRATCH_TRAP_CONTEXT_OFFSET,
"struct sbi_scratch definition has changed, please redefine "
"SBI_SCRATCH_TRAP_CONTEXT_OFFSET");
_Static_assert(
offsetof(struct sbi_scratch, tmp0)
== SBI_SCRATCH_TMP0_OFFSET,
"struct sbi_scratch definition has changed, please redefine "
"SBI_SCRATCH_TMP0_OFFSET");
_Static_assert(
offsetof(struct sbi_scratch, options)
== SBI_SCRATCH_OPTIONS_OFFSET,
"struct sbi_scratch definition has changed, please redefine "
"SBI_SCRATCH_OPTIONS_OFFSET");
/** Possible options for OpenSBI library */
enum sbi_scratch_options {
@ -170,18 +210,15 @@ do { \
#define current_hartindex() \
(sbi_scratch_thishart_ptr()->hartindex)
/** Number of harts managed by this OpenSBI instance */
extern u32 sbi_scratch_hart_count;
/** Last HART index having a sbi_scratch pointer */
extern u32 last_hartindex_having_scratch;
/** Get the number of harts managed by this OpenSBI instance */
#define sbi_hart_count() sbi_scratch_hart_count
/** Iterate over the harts managed by this OpenSBI instance */
#define sbi_for_each_hartindex(__var) \
for (u32 __var = 0; __var < sbi_hart_count(); ++__var)
/** Get last HART index having a sbi_scratch pointer */
#define sbi_scratch_last_hartindex() last_hartindex_having_scratch
/** Check whether a particular HART index is valid or not */
#define sbi_hartindex_valid(__hartindex) ((__hartindex) < sbi_hart_count())
#define sbi_hartindex_valid(__hartindex) \
(((__hartindex) <= sbi_scratch_last_hartindex()) ? true : false)
/** HART index to HART id table */
extern u32 hartindex_to_hartid_table[];
@ -189,7 +226,7 @@ extern u32 hartindex_to_hartid_table[];
/** Get sbi_scratch from HART index */
#define sbi_hartindex_to_hartid(__hartindex) \
({ \
((__hartindex) < SBI_HARTMASK_MAX_BITS) ? \
((__hartindex) <= sbi_scratch_last_hartindex()) ?\
hartindex_to_hartid_table[__hartindex] : -1U; \
})
@ -199,8 +236,8 @@ extern struct sbi_scratch *hartindex_to_scratch_table[];
/** Get sbi_scratch from HART index */
#define sbi_hartindex_to_scratch(__hartindex) \
({ \
((__hartindex) < SBI_HARTMASK_MAX_BITS) ? \
hartindex_to_scratch_table[__hartindex] : NULL; \
((__hartindex) <= sbi_scratch_last_hartindex()) ?\
hartindex_to_scratch_table[__hartindex] : NULL;\
})
/**

View File

@ -1,33 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Simple simply-linked list library.
*
* Copyright (c) 2025 Rivos Inc.
*
* Authors:
* Clément Léger <cleger@rivosinc.com>
*/
#ifndef __SBI_SLIST_H__
#define __SBI_SLIST_H__
#include <sbi/sbi_types.h>
#define SBI_SLIST_HEAD_INIT(_ptr) (_ptr)
#define SBI_SLIST_HEAD(_lname, _stype) struct _stype *_lname
#define SBI_SLIST_NODE(_stype) SBI_SLIST_HEAD(next, _stype)
#define SBI_SLIST_NODE_INIT(_ptr) .next = _ptr
#define SBI_INIT_SLIST_HEAD(_head) (_head) = NULL
#define SBI_SLIST_ADD(_ptr, _head) \
do { \
(_ptr)->next = _head; \
(_head) = _ptr; \
} while (0)
#define SBI_SLIST_FOR_EACH_ENTRY(_ptr, _head) \
for (_ptr = _head; _ptr; _ptr = _ptr->next)
#endif

View File

@ -54,12 +54,12 @@ struct sbi_sse_cb_ops {
void (*disable_cb)(uint32_t event_id);
};
/* Add a supported event with associated callback operations
* @param event_id Event identifier (SBI_SSE_EVENT_* or a custom platform one)
* @param cb_ops Callback operations (Can be NULL if any)
/* Set the callback operations for an event
* @param event_id Event identifier (SBI_SSE_EVENT_*)
* @param cb_ops Callback operations
* @return 0 on success, error otherwise
*/
int sbi_sse_add_event(uint32_t event_id, const struct sbi_sse_cb_ops *cb_ops);
int sbi_sse_set_cb_ops(uint32_t event_id, const struct sbi_sse_cb_ops *cb_ops);
/* Inject an event to the current hard
* @param event_id Event identifier (SBI_SSE_EVENT_*)

View File

@ -112,13 +112,10 @@
/** Size (in bytes) of sbi_trap_info */
#define SBI_TRAP_INFO_SIZE SBI_TRAP_INFO_OFFSET(last)
#define STACK_BOUNDARY 16
#define ALIGN_TO_BOUNDARY(x, a) (((x) + (a) - 1) & ~((a) - 1))
/** Size (in bytes) of sbi_trap_context */
#define SBI_TRAP_CONTEXT_SIZE ALIGN_TO_BOUNDARY((SBI_TRAP_REGS_SIZE + \
#define SBI_TRAP_CONTEXT_SIZE (SBI_TRAP_REGS_SIZE + \
SBI_TRAP_INFO_SIZE + \
__SIZEOF_POINTER__), STACK_BOUNDARY)
__SIZEOF_POINTER__)
#ifndef __ASSEMBLER__
@ -127,9 +124,6 @@
/** Representation of register state at time of trap/interrupt */
struct sbi_trap_regs {
union {
unsigned long gprs[32];
struct {
/** zero register state */
unsigned long zero;
/** ra register state */
@ -194,8 +188,6 @@ struct sbi_trap_regs {
unsigned long t5;
/** t6 register state */
unsigned long t6;
};
};
/** mepc register state */
unsigned long mepc;
/** mstatus register state */
@ -204,21 +196,6 @@ struct sbi_trap_regs {
unsigned long mstatusH;
};
_Static_assert(
sizeof(((struct sbi_trap_regs *)0)->gprs) ==
offsetof(struct sbi_trap_regs, t6) +
sizeof(((struct sbi_trap_regs *)0)->t6),
"struct sbi_trap_regs's layout differs between gprs and named members");
#define REG_VAL(idx, regs) ((regs)->gprs[(idx)])
#define GET_RS1(insn, regs) REG_VAL(GET_RS1_NUM(insn), regs)
#define GET_RS2(insn, regs) REG_VAL(GET_RS2_NUM(insn), regs)
#define GET_RS1S(insn, regs) REG_VAL(GET_RS1S_NUM(insn), regs)
#define GET_RS2S(insn, regs) REG_VAL(GET_RS2S_NUM(insn), regs)
#define GET_RS2C(insn, regs) REG_VAL(GET_RS2C_NUM(insn), regs)
#define SET_RD(insn, regs, val) (REG_VAL(GET_RD_NUM(insn), regs) = (val))
/** Representation of trap details */
struct sbi_trap_info {
/** cause Trap exception cause */

View File

@ -28,6 +28,8 @@ int sbi_load_access_handler(struct sbi_trap_context *tcntx);
int sbi_store_access_handler(struct sbi_trap_context *tcntx);
int sbi_double_trap_handler(struct sbi_trap_context *tcntx);
ulong sbi_misaligned_tinst_fixup(ulong orig_tinst, ulong new_tinst,
ulong addr_offset);

View File

@ -96,13 +96,6 @@ typedef uint64_t be64_t;
const typeof(((type *)0)->member) * __mptr = (ptr); \
(type *)((char *)__mptr - offsetof(type, member)); })
#define assert_member_offset(type, member, offset) \
_Static_assert( \
(offsetof(type, member)) == (offset ), \
"The offset " #offset " of " #member " in " #type \
"is not correct, please redefine it.")
#define array_size(x) (sizeof(x) / sizeof((x)[0]))
#define MAX(a, b) ((a) > (b) ? (a) : (b))

View File

@ -11,7 +11,7 @@
#define __SBI_VERSION_H__
#define OPENSBI_VERSION_MAJOR 1
#define OPENSBI_VERSION_MINOR 7
#define OPENSBI_VERSION_MINOR 6
/**
* OpenSBI 32-bit version with:

View File

@ -1,18 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2025 SiFive
*/
#ifndef __SBI_VISIBILITY_H__
#define __SBI_VISIBILITY_H__
#ifndef __DTS__
/*
* Declare all global objects with hidden visibility so access is PC-relative
* instead of going through the GOT.
*/
#pragma GCC visibility push(hidden)
#endif
#endif

View File

@ -0,0 +1,26 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2024 Ventana Micro Systems Inc.
*
* Authors:
* Anup Patel <apatel@ventanamicro.com>
*/
#ifndef __FDT_CPPC_H__
#define __FDT_CPPC_H__
#include <sbi/sbi_types.h>
#include <sbi_utils/fdt/fdt_driver.h>
#ifdef CONFIG_FDT_CPPC
void fdt_cppc_init(const void *fdt);
#else
static inline void fdt_cppc_init(const void *fdt) { }
#endif
#endif

View File

@ -18,9 +18,6 @@ struct fdt_driver {
bool experimental;
};
/* List of early FDT drivers generated at compile time */
extern const struct fdt_driver *const fdt_early_drivers[];
/**
* Initialize a driver instance for a specific DT node
*

View File

@ -34,6 +34,13 @@ struct platform_uart_data {
unsigned long reg_offset;
};
const struct fdt_match *fdt_match_node(const void *fdt, int nodeoff,
const struct fdt_match *match_table);
int fdt_find_match(const void *fdt, int startoff,
const struct fdt_match *match_table,
const struct fdt_match **out_match);
int fdt_parse_phandle_with_args(const void *fdt, int nodeoff,
const char *prop, const char *cells_prop,
int index, struct fdt_phandle_args *out_args);
@ -50,11 +57,9 @@ int fdt_parse_hart_id(const void *fdt, int cpu_offset, u32 *hartid);
int fdt_parse_max_enabled_hart_id(const void *fdt, u32 *max_hartid);
int fdt_parse_cbom_block_size(const void *fdt, int cpu_offset, unsigned long *cbom_block_size);
int fdt_parse_timebase_frequency(const void *fdt, unsigned long *freq);
int fdt_parse_isa_extensions(const void *fdt, unsigned int hartid,
int fdt_parse_isa_extensions(const void *fdt, unsigned int hard_id,
unsigned long *extensions);
int fdt_parse_gaisler_uart_node(const void *fdt, int nodeoffset,

View File

@ -62,6 +62,11 @@ int fdt_pmu_setup(const void *fdt);
*/
uint64_t fdt_pmu_get_select_value(uint32_t event_idx);
/** The event index to selector value table instance */
extern struct fdt_pmu_hw_event_select_map fdt_pmu_evt_select[];
/** The number of valid entries in fdt_pmu_evt_select[] */
extern uint32_t hw_event_count;
#else
static inline void fdt_pmu_fixup(void *fdt) { }

View File

@ -0,0 +1,26 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2024 Ventana Micro Systems Inc.
*
* Authors:
* Anup Patel <apatel@ventanamicro.com>
*/
#ifndef __FDT_HSM_H__
#define __FDT_HSM_H__
#include <sbi/sbi_types.h>
#include <sbi_utils/fdt/fdt_driver.h>
#ifdef CONFIG_FDT_HSM
void fdt_hsm_init(const void *fdt);
#else
static inline void fdt_hsm_init(const void *fdt) { }
#endif
#endif

View File

@ -11,10 +11,14 @@
#define __FDT_IRQCHIP_H__
#include <sbi/sbi_types.h>
#include <sbi_utils/fdt/fdt_driver.h>
#ifdef CONFIG_FDT_IRQCHIP
struct fdt_irqchip {
const struct fdt_match *match_table;
int (*cold_init)(const void *fdt, int nodeoff, const struct fdt_match *match);
};
int fdt_irqchip_init(void);
#else

View File

@ -11,7 +11,6 @@
#define __RPMI_MAILBOX_H__
#include <sbi/sbi_error.h>
#include <sbi_utils/mailbox/mailbox.h>
#include <sbi_utils/mailbox/rpmi_msgprot.h>
#define rpmi_u32_count(__var) (sizeof(__var) / sizeof(u32))

View File

@ -175,7 +175,7 @@ enum rpmi_error {
RPMI_ERR_VENDOR_START = -128,
};
/** RPMI Mailbox Message Arguments */
/** RPMI Message Arguments */
struct rpmi_message_args {
u32 flags;
#define RPMI_MSG_FLAGS_NO_TX (1U << 0)
@ -189,20 +189,6 @@ struct rpmi_message_args {
u32 rx_data_len;
};
/** RPMI Mailbox Channel Attribute IDs */
enum rpmi_channel_attribute_id {
RPMI_CHANNEL_ATTR_PROTOCOL_VERSION = 0,
RPMI_CHANNEL_ATTR_MAX_DATA_LEN,
RPMI_CHANNEL_ATTR_P2A_DOORBELL_SYSMSI_INDEX,
RPMI_CHANNEL_ATTR_TX_TIMEOUT,
RPMI_CHANNEL_ATTR_RX_TIMEOUT,
RPMI_CHANNEL_ATTR_SERVICEGROUP_ID,
RPMI_CHANNEL_ATTR_SERVICEGROUP_VERSION,
RPMI_CHANNEL_ATTR_IMPL_ID,
RPMI_CHANNEL_ATTR_IMPL_VERSION,
RPMI_CHANNEL_ATTR_MAX,
};
/*
* RPMI SERVICEGROUPS AND SERVICES
*/
@ -211,12 +197,11 @@ enum rpmi_channel_attribute_id {
enum rpmi_servicegroup_id {
RPMI_SRVGRP_ID_MIN = 0,
RPMI_SRVGRP_BASE = 0x0001,
RPMI_SRVGRP_SYSTEM_MSI = 0x0002,
RPMI_SRVGRP_SYSTEM_RESET = 0x0003,
RPMI_SRVGRP_SYSTEM_SUSPEND = 0x0004,
RPMI_SRVGRP_HSM = 0x0005,
RPMI_SRVGRP_CPPC = 0x0006,
RPMI_SRVGRP_CLOCK = 0x0008,
RPMI_SRVGRP_SYSTEM_RESET = 0x0002,
RPMI_SRVGRP_SYSTEM_SUSPEND = 0x0003,
RPMI_SRVGRP_HSM = 0x0004,
RPMI_SRVGRP_CPPC = 0x0005,
RPMI_SRVGRP_CLOCK = 0x0007,
RPMI_SRVGRP_ID_MAX_COUNT,
/* Reserved range for service groups */
@ -247,10 +232,12 @@ enum rpmi_base_service_id {
RPMI_BASE_SRV_GET_PLATFORM_INFO = 0x05,
RPMI_BASE_SRV_PROBE_SERVICE_GROUP = 0x06,
RPMI_BASE_SRV_GET_ATTRIBUTES = 0x07,
RPMI_BASE_SRV_SET_MSI = 0x08,
};
#define RPMI_BASE_FLAGS_F0_PRIVILEGE (1U << 1)
#define RPMI_BASE_FLAGS_F0_EV_NOTIFY (1U << 0)
#define RPMI_BASE_FLAGS_F0_PRIVILEGE (1U << 2)
#define RPMI_BASE_FLAGS_F0_EV_NOTIFY (1U << 1)
#define RPMI_BASE_FLAGS_F0_MSI_EN (1U)
enum rpmi_base_context_priv_level {
RPMI_BASE_CONTEXT_PRIV_S_MODE,
@ -271,92 +258,6 @@ struct rpmi_base_get_platform_info_resp {
char plat_info[];
};
/** RPMI System MSI ServiceGroup Service IDs */
enum rpmi_sysmsi_service_id {
RPMI_SYSMSI_SRV_ENABLE_NOTIFICATION = 0x01,
RPMI_SYSMSI_SRV_GET_ATTRIBUTES = 0x2,
RPMI_SYSMSI_SRV_GET_MSI_ATTRIBUTES = 0x3,
RPMI_SYSMSI_SRV_SET_MSI_STATE = 0x4,
RPMI_SYSMSI_SRV_GET_MSI_STATE = 0x5,
RPMI_SYSMSI_SRV_SET_MSI_TARGET = 0x6,
RPMI_SYSMSI_SRV_GET_MSI_TARGET = 0x7,
RPMI_SYSMSI_SRV_ID_MAX_COUNT,
};
/** Response for system MSI service group attributes */
struct rpmi_sysmsi_get_attributes_resp {
s32 status;
u32 sys_num_msi;
u32 flag0;
u32 flag1;
};
/** Request for system MSI attributes */
struct rpmi_sysmsi_get_msi_attributes_req {
u32 sys_msi_index;
};
/** Response for system MSI attributes */
struct rpmi_sysmsi_get_msi_attributes_resp {
s32 status;
u32 flag0;
u32 flag1;
u8 name[16];
};
#define RPMI_SYSMSI_MSI_ATTRIBUTES_FLAG0_PREF_PRIV (1U << 0)
/** Request for system MSI set state */
struct rpmi_sysmsi_set_msi_state_req {
u32 sys_msi_index;
u32 sys_msi_state;
};
#define RPMI_SYSMSI_MSI_STATE_ENABLE (1U << 0)
#define RPMI_SYSMSI_MSI_STATE_PENDING (1U << 1)
/** Response for system MSI set state */
struct rpmi_sysmsi_set_msi_state_resp {
s32 status;
};
/** Request for system MSI get state */
struct rpmi_sysmsi_get_msi_state_req {
u32 sys_msi_index;
};
/** Response for system MSI get state */
struct rpmi_sysmsi_get_msi_state_resp {
s32 status;
u32 sys_msi_state;
};
/** Request for system MSI set target */
struct rpmi_sysmsi_set_msi_target_req {
u32 sys_msi_index;
u32 sys_msi_address_low;
u32 sys_msi_address_high;
u32 sys_msi_data;
};
/** Response for system MSI set target */
struct rpmi_sysmsi_set_msi_target_resp {
s32 status;
};
/** Request for system MSI get target */
struct rpmi_sysmsi_get_msi_target_req {
u32 sys_msi_index;
};
/** Response for system MSI get target */
struct rpmi_sysmsi_get_msi_target_resp {
s32 status;
u32 sys_msi_address_low;
u32 sys_msi_address_high;
u32 sys_msi_data;
};
/** RPMI System Reset ServiceGroup Service IDs */
enum rpmi_system_reset_service_id {
RPMI_SYSRST_SRV_ENABLE_NOTIFICATION = 0x01,

View File

@ -15,11 +15,11 @@
#ifdef CONFIG_FDT_MPXY
int fdt_mpxy_init(const void *fdt);
void fdt_mpxy_init(const void *fdt);
#else
static inline int fdt_mpxy_init(const void *fdt) { return 0; }
static inline void fdt_mpxy_init(const void *fdt) { }
#endif

View File

@ -1,85 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2024 Ventana Micro Systems Inc.
*
* Authors:
* Anup Patel <apatel@ventanamicro.com>
*/
#ifndef __FDT_MPXY_RPMI_MBOX_H__
#define __FDT_MPXY_RPMI_MBOX_H__
#include <sbi/sbi_types.h>
#include <sbi/sbi_mpxy.h>
#include <sbi_utils/mailbox/fdt_mailbox.h>
#include <sbi_utils/mailbox/rpmi_msgprot.h>
#include <sbi_utils/mpxy/fdt_mpxy.h>
/** Convert the mpxy attribute ID to attribute array index */
#define attr_id2index(attr_id) (attr_id - SBI_MPXY_ATTR_MSGPROTO_ATTR_START)
enum mpxy_msgprot_rpmi_attr_id {
MPXY_MSGPROT_RPMI_ATTR_SERVICEGROUP_ID = SBI_MPXY_ATTR_MSGPROTO_ATTR_START,
MPXY_MSGPROT_RPMI_ATTR_SERVICEGROUP_VERSION,
MPXY_MSGPROT_RPMI_ATTR_IMPL_ID,
MPXY_MSGPROT_RPMI_ATTR_IMPL_VERSION,
MPXY_MSGPROT_RPMI_ATTR_MAX_ID
};
/**
* MPXY message protocol attributes for RPMI
* Order of attribute fields must follow the
* attribute IDs in `enum mpxy_msgprot_rpmi_attr_id`
*/
struct mpxy_rpmi_channel_attrs {
u32 servicegrp_id;
u32 servicegrp_ver;
u32 impl_id;
u32 impl_ver;
};
/** Make sure all attributes are packed for direct memcpy */
#define assert_field_offset(field, attr_offset) \
_Static_assert( \
((offsetof(struct mpxy_rpmi_channel_attrs, field)) / \
sizeof(u32)) == (attr_offset - SBI_MPXY_ATTR_MSGPROTO_ATTR_START),\
"field " #field \
" from struct mpxy_rpmi_channel_attrs invalid offset, expected " #attr_offset)
assert_field_offset(servicegrp_id, MPXY_MSGPROT_RPMI_ATTR_SERVICEGROUP_ID);
assert_field_offset(servicegrp_ver, MPXY_MSGPROT_RPMI_ATTR_SERVICEGROUP_VERSION);
assert_field_offset(impl_id, MPXY_MSGPROT_RPMI_ATTR_IMPL_ID);
assert_field_offset(impl_ver, MPXY_MSGPROT_RPMI_ATTR_IMPL_VERSION);
/** MPXY RPMI service data for each service group */
struct mpxy_rpmi_service_data {
u8 id;
u32 min_tx_len;
u32 max_tx_len;
u32 min_rx_len;
u32 max_rx_len;
};
/** MPXY RPMI mbox data for each service group */
struct mpxy_rpmi_mbox_data {
u32 servicegrp_id;
u32 num_services;
struct mpxy_rpmi_service_data *service_data;
/** Transfer RPMI service group message */
int (*xfer_group)(void *context, struct mbox_chan *chan,
struct mbox_xfer *xfer);
/** Setup RPMI service group context for MPXY */
int (*setup_group)(void **context, struct mbox_chan *chan,
const struct mpxy_rpmi_mbox_data *data);
/** Cleanup RPMI service group context for MPXY */
void (*cleanup_group)(void *context);
};
/** Common probe function for MPXY RPMI drivers */
int mpxy_rpmi_mbox_init(const void *fdt, int nodeoff, const struct fdt_match *match);
#endif

View File

@ -0,0 +1,31 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2020 Western Digital Corporation or its affiliates.
*
* Authors:
* Anup Patel <anup.patel@wdc.com>
*/
#ifndef __FDT_RESET_H__
#define __FDT_RESET_H__
#include <sbi/sbi_types.h>
#include <sbi_utils/fdt/fdt_driver.h>
#ifdef CONFIG_FDT_RESET
/**
* fdt_reset_init() - initialize reset drivers based on the device-tree
*
* This function shall be invoked in final init.
*/
void fdt_reset_init(const void *fdt);
#else
static inline void fdt_reset_init(const void *fdt) { }
#endif
#endif

View File

@ -12,9 +12,7 @@
#include <sbi/sbi_types.h>
#define UART_CAP_UUE BIT(0) /* Check UUE capability for XScale PXA UARTs */
int uart8250_init(unsigned long base, u32 in_freq, u32 baudrate, u32 reg_shift,
u32 reg_width, u32 reg_offset, u32 caps);
u32 reg_width, u32 reg_offset);
#endif

View File

@ -0,0 +1,26 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2024 Ventana Micro Systems Inc.
*
* Authors:
* Anup Patel <apatel@ventanamicro.com>
*/
#ifndef __FDT_SUSPEND_H__
#define __FDT_SUSPEND_H__
#include <sbi/sbi_types.h>
#include <sbi_utils/fdt/fdt_driver.h>
#ifdef CONFIG_FDT_SUSPEND
void fdt_suspend_init(const void *fdt);
#else
static inline void fdt_suspend_init(const void *fdt) { }
#endif
#endif

View File

@ -79,7 +79,6 @@ libsbi-objs-y += sbi_heap.o
libsbi-objs-y += sbi_math.o
libsbi-objs-y += sbi_hfence.o
libsbi-objs-y += sbi_hsm.o
libsbi-objs-y += sbi_illegal_atomic.o
libsbi-objs-y += sbi_illegal_insn.o
libsbi-objs-y += sbi_init.o
libsbi-objs-y += sbi_ipi.o

View File

@ -12,7 +12,7 @@
#include <sbi/riscv_atomic.h>
#include <sbi/riscv_barrier.h>
#if !defined(__riscv_atomic) && !defined(__riscv_zalrsc)
#ifndef __riscv_atomic
#error "opensbi strongly relies on the A extension of RISC-V"
#endif
@ -31,7 +31,6 @@ void atomic_write(atomic_t *atom, long value)
long atomic_add_return(atomic_t *atom, long value)
{
#ifdef __riscv_atomic
long ret;
#if __SIZEOF_LONG__ == 4
__asm__ __volatile__(" amoadd.w.aqrl %1, %2, %0"
@ -44,29 +43,6 @@ long atomic_add_return(atomic_t *atom, long value)
: "r"(value)
: "memory");
#endif
#elif __riscv_zalrsc
long ret, temp;
#if __SIZEOF_LONG__ == 4
__asm__ __volatile__("1:lr.w.aqrl %1,%0\n"
" addw %2,%1,%3\n"
" sc.w.aqrl %2,%2,%0\n"
" bnez %2,1b"
: "+A"(atom->counter), "=&r"(ret), "=&r"(temp)
: "r"(value)
: "memory");
#elif __SIZEOF_LONG__ == 8
__asm__ __volatile__("1:lr.d.aqrl %1,%0\n"
" add %2,%1,%3\n"
" sc.d.aqrl %2,%2,%0\n"
" bnez %2,1b"
: "+A"(atom->counter), "=&r"(ret), "=&r"(temp)
: "r"(value)
: "memory");
#endif
#else
#error "need a or zalrsc"
#endif
return ret + value;
}
@ -75,7 +51,6 @@ long atomic_sub_return(atomic_t *atom, long value)
return atomic_add_return(atom, -value);
}
#ifdef __riscv_atomic
#define __axchg(ptr, new, size) \
({ \
__typeof__(ptr) __ptr = (ptr); \
@ -101,39 +76,6 @@ long atomic_sub_return(atomic_t *atom, long value)
} \
__ret; \
})
#elif __riscv_zalrsc
#define __axchg(ptr, new, size) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(new) __new = (new); \
__typeof__(*(ptr)) __ret, __temp; \
switch (size) { \
case 4: \
__asm__ __volatile__ ( \
"1: lr.w.aqrl %0, %1\n" \
" sc.w.aqrl %2, %3, %1\n" \
" bnez %2, 1b\n" \
: "=&r" (__ret), "+A" (*__ptr), "=&r" (__temp) \
: "r" (__new) \
: "memory"); \
break; \
case 8: \
__asm__ __volatile__ ( \
"1: lr.d.aqrl %0, %1\n" \
" sc.d.aqrl %2, %3, %1\n" \
" bnez %2, 1b\n" \
: "=&r" (__ret), "+A" (*__ptr), "=&r" (__temp) \
: "r" (__new) \
: "memory"); \
break; \
default: \
break; \
} \
__ret; \
})
#else
#error "need a or zalrsc"
#endif
#define axchg(ptr, x) \
({ \

View File

@ -53,16 +53,7 @@ void spin_lock(spinlock_t *lock)
__asm__ __volatile__(
/* Atomically increment the next ticket. */
#ifdef __riscv_atomic
" amoadd.w.aqrl %0, %4, %3\n"
#elif __riscv_zalrsc
"3: lr.w.aqrl %0, %3\n"
" addw %1, %0, %4\n"
" sc.w.aqrl %1, %1, %3\n"
" bnez %1, 3b\n"
#else
#error "need a or zalrsc"
#endif
/* Did we get the lock? */
" srli %1, %0, %6\n"

View File

@ -336,19 +336,6 @@ static void dbtr_trigger_setup(struct sbi_dbtr_trigger *trig,
if (__test_bit(RV_DBTR_BIT(MC6, VS), &tdata1))
__set_bit(RV_DBTR_BIT(TS, VS), &trig->state);
break;
case RISCV_DBTR_TRIG_ICOUNT:
if (__test_bit(RV_DBTR_BIT(ICOUNT, U), &tdata1))
__set_bit(RV_DBTR_BIT(TS, U), &trig->state);
if (__test_bit(RV_DBTR_BIT(ICOUNT, S), &tdata1))
__set_bit(RV_DBTR_BIT(TS, S), &trig->state);
if (__test_bit(RV_DBTR_BIT(ICOUNT, VU), &tdata1))
__set_bit(RV_DBTR_BIT(TS, VU), &trig->state);
if (__test_bit(RV_DBTR_BIT(ICOUNT, VS), &tdata1))
__set_bit(RV_DBTR_BIT(TS, VS), &trig->state);
break;
default:
sbi_dprintf("%s: Unknown type (tdata1: 0x%lx Type: %ld)\n",
__func__, tdata1, TDATA1_GET_TYPE(tdata1));
@ -392,16 +379,6 @@ static void dbtr_trigger_enable(struct sbi_dbtr_trigger *trig)
update_bit(state & RV_DBTR_BIT_MASK(TS, S),
RV_DBTR_BIT(MC6, S), &trig->tdata1);
break;
case RISCV_DBTR_TRIG_ICOUNT:
update_bit(state & RV_DBTR_BIT_MASK(TS, VU),
RV_DBTR_BIT(ICOUNT, VU), &trig->tdata1);
update_bit(state & RV_DBTR_BIT_MASK(TS, VS),
RV_DBTR_BIT(ICOUNT, VS), &trig->tdata1);
update_bit(state & RV_DBTR_BIT_MASK(TS, U),
RV_DBTR_BIT(ICOUNT, U), &trig->tdata1);
update_bit(state & RV_DBTR_BIT_MASK(TS, S),
RV_DBTR_BIT(ICOUNT, S), &trig->tdata1);
break;
default:
break;
}
@ -441,12 +418,6 @@ static void dbtr_trigger_disable(struct sbi_dbtr_trigger *trig)
__clear_bit(RV_DBTR_BIT(MC6, U), &trig->tdata1);
__clear_bit(RV_DBTR_BIT(MC6, S), &trig->tdata1);
break;
case RISCV_DBTR_TRIG_ICOUNT:
__clear_bit(RV_DBTR_BIT(ICOUNT, VU), &trig->tdata1);
__clear_bit(RV_DBTR_BIT(ICOUNT, VS), &trig->tdata1);
__clear_bit(RV_DBTR_BIT(ICOUNT, U), &trig->tdata1);
__clear_bit(RV_DBTR_BIT(ICOUNT, S), &trig->tdata1);
break;
default:
break;
}
@ -470,7 +441,6 @@ static int dbtr_trigger_supported(unsigned long type)
switch (type) {
case RISCV_DBTR_TRIG_MCONTROL:
case RISCV_DBTR_TRIG_MCONTROL6:
case RISCV_DBTR_TRIG_ICOUNT:
return 1;
default:
break;
@ -492,11 +462,6 @@ static int dbtr_trigger_valid(unsigned long type, unsigned long tdata)
!(tdata & RV_DBTR_BIT_MASK(MC6, M)))
return 1;
break;
case RISCV_DBTR_TRIG_ICOUNT:
if (!(tdata & RV_DBTR_BIT_MASK(ICOUNT, DMODE)) &&
!(tdata & RV_DBTR_BIT_MASK(ICOUNT, M)))
return 1;
break;
default:
break;
}
@ -541,7 +506,7 @@ int sbi_dbtr_read_trig(unsigned long smode,
{
struct sbi_dbtr_data_msg *xmit;
struct sbi_dbtr_trigger *trig;
union sbi_dbtr_shmem_entry *entry;
struct sbi_dbtr_shmem_entry *entry;
void *shmem_base = NULL;
struct sbi_dbtr_hart_triggers_state *hs = NULL;
@ -558,21 +523,16 @@ int sbi_dbtr_read_trig(unsigned long smode,
shmem_base = hart_shmem_base(hs);
sbi_hart_map_saddr((unsigned long)shmem_base,
trig_count * sizeof(*entry));
for_each_trig_entry(shmem_base, trig_count, typeof(*entry), entry) {
sbi_hart_map_saddr((unsigned long)entry, sizeof(*entry));
xmit = &entry->data;
trig = INDEX_TO_TRIGGER((_idx + trig_idx_base));
csr_write(CSR_TSELECT, trig->index);
trig->tdata1 = csr_read(CSR_TDATA1);
trig->tdata2 = csr_read(CSR_TDATA2);
trig->tdata3 = csr_read(CSR_TDATA3);
xmit->tstate = cpu_to_lle(trig->state);
xmit->tdata1 = cpu_to_lle(trig->tdata1);
xmit->tdata2 = cpu_to_lle(trig->tdata2);
xmit->tdata3 = cpu_to_lle(trig->tdata3);
}
sbi_hart_unmap_saddr();
}
return SBI_SUCCESS;
}
@ -581,7 +541,7 @@ int sbi_dbtr_install_trig(unsigned long smode,
unsigned long trig_count, unsigned long *out)
{
void *shmem_base = NULL;
union sbi_dbtr_shmem_entry *entry;
struct sbi_dbtr_shmem_entry *entry;
struct sbi_dbtr_data_msg *recv;
struct sbi_dbtr_id_msg *xmit;
unsigned long ctrl;
@ -596,11 +556,10 @@ int sbi_dbtr_install_trig(unsigned long smode,
return SBI_ERR_NO_SHMEM;
shmem_base = hart_shmem_base(hs);
sbi_hart_map_saddr((unsigned long)shmem_base,
trig_count * sizeof(*entry));
/* Check requested triggers configuration */
for_each_trig_entry(shmem_base, trig_count, typeof(*entry), entry) {
sbi_hart_map_saddr((unsigned long)entry, sizeof(*entry));
recv = (struct sbi_dbtr_data_msg *)(&entry->data);
ctrl = recv->tdata1;
@ -615,11 +574,11 @@ int sbi_dbtr_install_trig(unsigned long smode,
sbi_hart_unmap_saddr();
return SBI_ERR_FAILED;
}
sbi_hart_unmap_saddr();
}
if (hs->available_trigs < trig_count) {
*out = hs->available_trigs;
sbi_hart_unmap_saddr();
return SBI_ERR_FAILED;
}
@ -631,15 +590,16 @@ int sbi_dbtr_install_trig(unsigned long smode,
*/
trig = sbi_alloc_trigger();
sbi_hart_map_saddr((unsigned long)entry, sizeof(*entry));
recv = (struct sbi_dbtr_data_msg *)(&entry->data);
xmit = (struct sbi_dbtr_id_msg *)(&entry->id);
dbtr_trigger_setup(trig, recv);
dbtr_trigger_enable(trig);
xmit->idx = cpu_to_lle(trig->index);
}
sbi_hart_unmap_saddr();
}
return SBI_SUCCESS;
}
@ -691,11 +651,15 @@ int sbi_dbtr_enable_trig(unsigned long trig_idx_base,
}
int sbi_dbtr_update_trig(unsigned long smode,
unsigned long trig_count)
unsigned long trig_idx_base,
unsigned long trig_idx_mask)
{
unsigned long trig_idx;
unsigned long trig_mask = trig_idx_mask << trig_idx_base;
unsigned long idx = trig_idx_base;
struct sbi_dbtr_data_msg *recv;
unsigned long uidx = 0;
struct sbi_dbtr_trigger *trig;
union sbi_dbtr_shmem_entry *entry;
struct sbi_dbtr_shmem_entry *entry;
void *shmem_base = NULL;
struct sbi_dbtr_hart_triggers_state *hs = NULL;
@ -708,28 +672,18 @@ int sbi_dbtr_update_trig(unsigned long smode,
shmem_base = hart_shmem_base(hs);
if (trig_count >= hs->total_trigs)
return SBI_ERR_BAD_RANGE;
for_each_set_bit_from(idx, &trig_mask, hs->total_trigs) {
trig = INDEX_TO_TRIGGER(idx);
for_each_trig_entry(shmem_base, trig_count, typeof(*entry), entry) {
sbi_hart_map_saddr((unsigned long)entry, sizeof(*entry));
trig_idx = entry->id.idx;
if (trig_idx >= hs->total_trigs) {
sbi_hart_unmap_saddr();
if (!(trig->state & RV_DBTR_BIT_MASK(TS, MAPPED)))
return SBI_ERR_INVALID_PARAM;
}
trig = INDEX_TO_TRIGGER(trig_idx);
entry = (shmem_base + uidx * sizeof(*entry));
recv = &entry->data;
if (!(trig->state & RV_DBTR_BIT_MASK(TS, MAPPED))) {
sbi_hart_unmap_saddr();
return SBI_ERR_FAILED;
}
dbtr_trigger_setup(trig, &entry->data);
sbi_hart_unmap_saddr();
trig->tdata2 = lle_to_cpu(recv->tdata2);
dbtr_trigger_enable(trig);
uidx++;
}
return SBI_SUCCESS;

View File

@ -685,15 +685,20 @@ int sbi_domain_root_add_memrange(unsigned long addr, unsigned long size,
return 0;
}
int sbi_domain_startup(struct sbi_scratch *scratch, u32 cold_hartid)
int sbi_domain_finalize(struct sbi_scratch *scratch, u32 cold_hartid)
{
int rc;
u32 dhart;
struct sbi_domain *dom;
const struct sbi_platform *plat = sbi_platform_ptr(scratch);
/* Sanity checks */
if (!domain_finalized)
return SBI_EINVAL;
/* Initialize and populate domains for the platform */
rc = sbi_platform_domains_init(plat);
if (rc) {
sbi_printf("%s: platform domains_init() failed (error %d)\n",
__func__, rc);
return rc;
}
/* Startup boot HART of domains */
sbi_domain_for_each(dom) {
@ -739,26 +744,6 @@ int sbi_domain_startup(struct sbi_scratch *scratch, u32 cold_hartid)
}
}
return 0;
}
int sbi_domain_finalize(struct sbi_scratch *scratch)
{
int rc;
const struct sbi_platform *plat = sbi_platform_ptr(scratch);
/* Sanity checks */
if (domain_finalized)
return SBI_EINVAL;
/* Initialize and populate domains for the platform */
rc = sbi_platform_domains_init(plat);
if (rc) {
sbi_printf("%s: platform domains_init() failed (error %d)\n",
__func__, rc);
return rc;
}
/*
* Set the finalized flag so that the root domain
* regions can't be changed.
@ -770,9 +755,11 @@ int sbi_domain_finalize(struct sbi_scratch *scratch)
int sbi_domain_init(struct sbi_scratch *scratch, u32 cold_hartid)
{
u32 i;
int rc;
struct sbi_hartmask *root_hmask;
struct sbi_domain_memregion *root_memregs;
const struct sbi_platform *plat = sbi_platform_ptr(scratch);
SBI_INIT_LIST_HEAD(&domain_list);
@ -853,7 +840,7 @@ int sbi_domain_init(struct sbi_scratch *scratch, u32 cold_hartid)
root.next_mode = scratch->next_mode;
/* Root domain possible and assigned HARTs */
sbi_for_each_hartindex(i)
for (i = 0; i < plat->hart_count; i++)
sbi_hartmask_set_hartindex(i, root_hmask);
/* Finally register the root domain */

View File

@ -15,7 +15,6 @@
#include <sbi/sbi_string.h>
#include <sbi/sbi_domain.h>
#include <sbi/sbi_domain_context.h>
#include <sbi/sbi_platform.h>
#include <sbi/sbi_trap.h>
/** Context representation for a hart within a domain */
@ -54,30 +53,31 @@ struct hart_context {
bool initialized;
};
static struct sbi_domain_data dcpriv;
struct domain_context_priv {
/** Contexts for possible HARTs indexed by hartindex */
struct hart_context *hartindex_to_context_table[SBI_HARTMASK_MAX_BITS];
};
static struct sbi_domain_data dcpriv = {
.data_size = sizeof(struct domain_context_priv),
};
static inline struct hart_context *hart_context_get(struct sbi_domain *dom,
u32 hartindex)
{
struct hart_context **dom_hartindex_to_context_table;
struct domain_context_priv *dcp = sbi_domain_data_ptr(dom, &dcpriv);
dom_hartindex_to_context_table = sbi_domain_data_ptr(dom, &dcpriv);
if (!dom_hartindex_to_context_table || !sbi_hartindex_valid(hartindex))
return NULL;
return dom_hartindex_to_context_table[hartindex];
return (dcp && hartindex < SBI_HARTMASK_MAX_BITS) ?
dcp->hartindex_to_context_table[hartindex] : NULL;
}
static void hart_context_set(struct sbi_domain *dom, u32 hartindex,
struct hart_context *hc)
{
struct hart_context **dom_hartindex_to_context_table;
struct domain_context_priv *dcp = sbi_domain_data_ptr(dom, &dcpriv);
dom_hartindex_to_context_table = sbi_domain_data_ptr(dom, &dcpriv);
if (!dom_hartindex_to_context_table || !sbi_hartindex_valid(hartindex))
return;
dom_hartindex_to_context_table[hartindex] = hc;
if (dcp && hartindex < SBI_HARTMASK_MAX_BITS)
dcp->hartindex_to_context_table[hartindex] = hc;
}
/** Macro to obtain the current hart's context pointer */
@ -116,7 +116,6 @@ static void switch_to_next_domain_context(struct hart_context *ctx,
/* Reconfigure PMP settings for the new domain */
for (int i = 0; i < pmp_count; i++) {
sbi_platform_pmp_disable(sbi_platform_thishart_ptr(), i);
pmp_disable(i);
}
sbi_hart_pmp_configure(scratch);
@ -233,14 +232,6 @@ int sbi_domain_context_exit(void)
int sbi_domain_context_init(void)
{
/**
* Allocate per-domain and per-hart context data.
* The data type is "struct hart_context **" whose memory space will be
* dynamically allocated by domain_setup_data_one(). Calculate needed
* size of memory space here.
*/
dcpriv.data_size = sizeof(struct hart_context *) * sbi_hart_count();
return sbi_domain_register_data(&dcpriv);
}

View File

@ -10,7 +10,6 @@
#include <sbi/sbi_console.h>
#include <sbi/sbi_ecall_interface.h>
#include <sbi/sbi_error.h>
#include <sbi/sbi_hart.h>
#include <sbi/sbi_sse.h>
#include <sbi/sbi_trap.h>
@ -29,9 +28,3 @@ int sbi_double_trap_handler(struct sbi_trap_context *tcntx)
return sbi_sse_inject_event(SBI_SSE_EVENT_LOCAL_DOUBLE_TRAP);
}
void sbi_double_trap_init(struct sbi_scratch *scratch)
{
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSDBLTRP))
sbi_sse_add_event(SBI_SSE_EVENT_LOCAL_DOUBLE_TRAP, NULL);
}

View File

@ -93,6 +93,7 @@ int sbi_ecall_register_extension(struct sbi_ecall_extension *ext)
return SBI_EINVAL;
}
SBI_INIT_LIST_HEAD(&ext->head);
sbi_list_add_tail(&ext->head, &ecall_exts_list);
return 0;

View File

@ -43,7 +43,7 @@ static int sbi_ecall_dbtr_handler(unsigned long extid, unsigned long funcid,
ret = sbi_dbtr_enable_trig(regs->a0, regs->a1);
break;
case SBI_EXT_DBTR_TRIGGER_UPDATE:
ret = sbi_dbtr_update_trig(smode, regs->a0);
ret = sbi_dbtr_update_trig(smode, regs->a0, regs->a1);
break;
case SBI_EXT_DBTR_TRIGGER_DISABLE:
ret = sbi_dbtr_disable_trig(regs->a0, regs->a1);
@ -69,6 +69,7 @@ struct sbi_ecall_extension ecall_dbtr = {
.name = "dbtr",
.extid_start = SBI_EXT_DBTR,
.extid_end = SBI_EXT_DBTR,
.experimental = true,
.handle = sbi_ecall_dbtr_handler,
.register_extensions = sbi_ecall_dbtr_register_extensions,
};

View File

@ -45,6 +45,7 @@ struct sbi_ecall_extension ecall_fwft = {
.name = "fwft",
.extid_start = SBI_EXT_FWFT,
.extid_end = SBI_EXT_FWFT,
.experimental = true,
.register_extensions = sbi_ecall_fwft_register_extensions,
.handle = sbi_ecall_fwft_handler,
};

View File

@ -20,11 +20,8 @@ static int sbi_ecall_mpxy_handler(unsigned long extid, unsigned long funcid,
int ret = 0;
switch (funcid) {
case SBI_EXT_MPXY_GET_SHMEM_SIZE:
out->value = sbi_mpxy_get_shmem_size();
break;
case SBI_EXT_MPXY_SET_SHMEM:
ret = sbi_mpxy_set_shmem(regs->a0, regs->a1, regs->a2);
ret = sbi_mpxy_set_shmem(regs->a0, regs->a1, regs->a2, regs->a3);
break;
case SBI_EXT_MPXY_GET_CHANNEL_IDS:
ret = sbi_mpxy_get_channel_ids(regs->a0);
@ -39,7 +36,7 @@ static int sbi_ecall_mpxy_handler(unsigned long extid, unsigned long funcid,
ret = sbi_mpxy_send_message(regs->a0, regs->a1,
regs->a2, &out->value);
break;
case SBI_EXT_MPXY_SEND_MSG_WITHOUT_RESP:
case SBI_EXT_MPXY_SEND_MSG_NO_RESP:
ret = sbi_mpxy_send_message(regs->a0, regs->a1, regs->a2,
NULL);
break;
@ -67,6 +64,7 @@ struct sbi_ecall_extension ecall_mpxy = {
.name = "mpxy",
.extid_start = SBI_EXT_MPXY,
.extid_end = SBI_EXT_MPXY,
.experimental = true,
.register_extensions = sbi_ecall_mpxy_register_extensions,
.handle = sbi_ecall_mpxy_handler,
};

View File

@ -59,6 +59,7 @@ struct sbi_ecall_extension ecall_sse = {
.name = "sse",
.extid_start = SBI_EXT_SSE,
.extid_end = SBI_EXT_SSE,
.experimental = true,
.register_extensions = sbi_ecall_sse_register_extensions,
.handle = sbi_ecall_sse_handler,
};

View File

@ -223,32 +223,32 @@ static int fwft_pmlen_supported(struct fwft_config *conf)
return SBI_OK;
}
static bool fwft_try_to_set_pmm(unsigned long pmm)
{
csr_set(CSR_MENVCFG, pmm);
return (csr_read(CSR_MENVCFG) & ENVCFG_PMM) == pmm;
}
static int fwft_set_pmlen(struct fwft_config *conf, unsigned long value)
{
unsigned long pmm, prev;
unsigned long prev;
switch (value) {
case 0:
pmm = ENVCFG_PMM_PMLEN_0;
break;
case 7:
pmm = ENVCFG_PMM_PMLEN_7;
break;
case 16:
pmm = ENVCFG_PMM_PMLEN_16;
break;
default:
if (value > 16)
return SBI_EINVAL;
}
prev = csr_read_clear(CSR_MENVCFG, ENVCFG_PMM);
csr_set(CSR_MENVCFG, pmm);
if ((csr_read(CSR_MENVCFG) & ENVCFG_PMM) != pmm) {
csr_write(CSR_MENVCFG, prev);
return SBI_EINVAL;
}
if (value == 0)
return SBI_OK;
if (value <= 7) {
if (fwft_try_to_set_pmm(ENVCFG_PMM_PMLEN_7))
return SBI_OK;
csr_clear(CSR_MENVCFG, ENVCFG_PMM);
}
if (fwft_try_to_set_pmm(ENVCFG_PMM_PMLEN_16))
return SBI_OK;
csr_write(CSR_MENVCFG, prev);
return SBI_EINVAL;
}
static int fwft_get_pmlen(struct fwft_config *conf, unsigned long *value)
@ -337,7 +337,7 @@ int sbi_fwft_set(enum sbi_fwft_feature_t feature, unsigned long value,
return SBI_EINVAL;
if (conf->flags & SBI_FWFT_SET_FLAG_LOCK)
return SBI_EDENIED_LOCKED;
return SBI_EDENIED;
ret = conf->feature->set(conf, value);
if (ret)

View File

@ -49,10 +49,10 @@ static void mstatus_init(struct sbi_scratch *scratch)
csr_write(CSR_MSTATUS, mstatus_val);
/* Disable user mode usage of all perf counters except TM */
/* Disable user mode usage of all perf counters except default ones (CY, TM, IR) */
if (misa_extension('S') &&
sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_10)
csr_write(CSR_SCOUNTEREN, 0x02);
csr_write(CSR_SCOUNTEREN, 7);
/**
* OpenSBI doesn't use any PMU counters in M-mode.
@ -85,11 +85,11 @@ static void mstatus_init(struct sbi_scratch *scratch)
#endif
}
if (misa_extension('H'))
csr_write(CSR_HSTATUS, 0);
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SMSTATEEN)) {
mstateen_val = 0;
mstateen_val = csr_read(CSR_MSTATEEN0);
#if __riscv_xlen == 32
mstateen_val |= ((uint64_t)csr_read(CSR_MSTATEEN0H)) << 32;
#endif
mstateen_val |= SMSTATEEN_STATEN;
mstateen_val |= SMSTATEEN0_CONTEXT;
mstateen_val |= SMSTATEEN0_HSENVCFG;
@ -105,34 +105,17 @@ static void mstatus_init(struct sbi_scratch *scratch)
else
mstateen_val &= ~(SMSTATEEN0_SVSLCT);
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSCTR))
mstateen_val |= SMSTATEEN0_CTR;
else
mstateen_val &= ~SMSTATEEN0_CTR;
csr_write64(CSR_MSTATEEN0, mstateen_val);
csr_write64(CSR_MSTATEEN1, SMSTATEEN_STATEN);
csr_write64(CSR_MSTATEEN2, SMSTATEEN_STATEN);
csr_write64(CSR_MSTATEEN3, SMSTATEEN_STATEN);
}
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSSTATEEN)) {
if (misa_extension('S')) {
csr_write(CSR_SSTATEEN0, 0);
csr_write(CSR_SSTATEEN1, 0);
csr_write(CSR_SSTATEEN2, 0);
csr_write(CSR_SSTATEEN3, 0);
}
if (misa_extension('H')) {
csr_write64(CSR_HSTATEEN0, (uint64_t)0);
csr_write64(CSR_HSTATEEN1, (uint64_t)0);
csr_write64(CSR_HSTATEEN2, (uint64_t)0);
csr_write64(CSR_HSTATEEN3, (uint64_t)0);
}
csr_write(CSR_MSTATEEN0, mstateen_val);
#if __riscv_xlen == 32
csr_write(CSR_MSTATEEN0H, mstateen_val >> 32);
#endif
}
if (sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_12) {
menvcfg_val = csr_read64(CSR_MENVCFG);
menvcfg_val = csr_read(CSR_MENVCFG);
#if __riscv_xlen == 32
menvcfg_val |= ((uint64_t)csr_read(CSR_MENVCFGH)) << 32;
#endif
/* Disable double trap by default */
menvcfg_val &= ~ENVCFG_DTE;
@ -168,7 +151,10 @@ static void mstatus_init(struct sbi_scratch *scratch)
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SVADE))
menvcfg_val &= ~ENVCFG_ADUE;
csr_write64(CSR_MENVCFG, menvcfg_val);
csr_write(CSR_MENVCFG, menvcfg_val);
#if __riscv_xlen == 32
csr_write(CSR_MENVCFGH, menvcfg_val >> 32);
#endif
/* Enable S-mode access to seed CSR */
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_ZKR)) {
@ -217,7 +203,7 @@ static int delegate_traps(struct sbi_scratch *scratch)
/* Send M-mode interrupts and most exceptions to S-mode */
interrupts = MIP_SSIP | MIP_STIP | MIP_SEIP;
interrupts |= sbi_pmu_irq_mask();
interrupts |= sbi_pmu_irq_bit();
exceptions = (1U << CAUSE_MISALIGNED_FETCH) | (1U << CAUSE_BREAKPOINT) |
(1U << CAUSE_USER_ECALL);
@ -375,9 +361,6 @@ static void sbi_hart_smepmp_set(struct sbi_scratch *scratch,
unsigned long pmp_addr = reg->base >> PMP_SHIFT;
if (pmp_log2gran <= reg->order && pmp_addr < pmp_addr_max) {
sbi_platform_pmp_set(sbi_platform_ptr(scratch),
pmp_idx, reg->flags, pmp_flags,
reg->base, reg->order);
pmp_set(pmp_idx, pmp_flags, reg->base, reg->order);
} else {
sbi_printf("Can not configure pmp for domain %s because"
@ -495,9 +478,6 @@ static int sbi_hart_oldpmp_configure(struct sbi_scratch *scratch,
pmp_addr = reg->base >> PMP_SHIFT;
if (pmp_log2gran <= reg->order && pmp_addr < pmp_addr_max) {
sbi_platform_pmp_set(sbi_platform_ptr(scratch),
pmp_idx, reg->flags, pmp_flags,
reg->base, reg->order);
pmp_set(pmp_idx++, pmp_flags, reg->base, reg->order);
} else {
sbi_printf("Can not configure pmp for domain %s because"
@ -538,9 +518,6 @@ int sbi_hart_map_saddr(unsigned long addr, unsigned long size)
}
}
sbi_platform_pmp_set(sbi_platform_ptr(scratch), SBI_SMEPMP_RESV_ENTRY,
SBI_DOMAIN_MEMREGION_SHARED_SURW_MRW,
pmp_flags, base, order);
pmp_set(SBI_SMEPMP_RESV_ENTRY, pmp_flags, base, order);
return SBI_OK;
@ -553,7 +530,6 @@ int sbi_hart_unmap_saddr(void)
if (!sbi_hart_has_extension(scratch, SBI_HART_EXT_SMEPMP))
return SBI_OK;
sbi_platform_pmp_disable(sbi_platform_ptr(scratch), SBI_SMEPMP_RESV_ENTRY);
return pmp_disable(SBI_SMEPMP_RESV_ENTRY);
}
@ -712,9 +688,6 @@ const struct sbi_hart_ext_data sbi_hart_ext[] = {
__SBI_HART_EXT_DATA(zicfilp, SBI_HART_EXT_ZICFILP),
__SBI_HART_EXT_DATA(zicfiss, SBI_HART_EXT_ZICFISS),
__SBI_HART_EXT_DATA(ssdbltrp, SBI_HART_EXT_SSDBLTRP),
__SBI_HART_EXT_DATA(smctr, SBI_HART_EXT_SMCTR),
__SBI_HART_EXT_DATA(ssctr, SBI_HART_EXT_SSCTR),
__SBI_HART_EXT_DATA(ssstateen, SBI_HART_EXT_SSSTATEEN),
};
_Static_assert(SBI_HART_EXT_MAX == array_size(sbi_hart_ext),
@ -741,10 +714,6 @@ void sbi_hart_get_extensions_str(struct sbi_scratch *scratch,
sbi_memset(extensions_str, 0, nestr);
for_each_set_bit(ext, hfeatures->extensions, SBI_HART_EXT_MAX) {
if (offset + sbi_strlen(sbi_hart_ext[ext].name) + 1 > nestr) {
sbi_printf("%s:extension name is longer than buffer (error)\n", __func__);
break;
}
sbi_snprintf(extensions_str + offset,
nestr - offset,
"%s,", sbi_hart_ext[ext].name);
@ -757,20 +726,6 @@ void sbi_hart_get_extensions_str(struct sbi_scratch *scratch,
sbi_strncpy(extensions_str, "none", nestr);
}
/**
* Check whether a particular CSR is present on the HART
*
* @param scratch pointer to the HART scratch space
* @param csr the CSR number to check
*/
bool sbi_hart_has_csr(struct sbi_scratch *scratch, enum sbi_hart_csrs csr)
{
struct sbi_hart_features *hfeatures =
sbi_scratch_offset_ptr(scratch, hart_features_offset);
return __test_bit(csr, hfeatures->csrs);
}
static unsigned long hart_pmp_get_allowed_addr(void)
{
unsigned long val = 0;
@ -827,6 +782,7 @@ static int hart_detect_features(struct sbi_scratch *scratch)
struct sbi_hart_features *hfeatures =
sbi_scratch_offset_ptr(scratch, hart_features_offset);
unsigned long val, oldval;
bool has_zicntr = false;
int rc;
/* If hart features already detected then do nothing */
@ -835,7 +791,6 @@ static int hart_detect_features(struct sbi_scratch *scratch)
/* Clear hart features */
sbi_memset(hfeatures->extensions, 0, sizeof(hfeatures->extensions));
sbi_memset(hfeatures->csrs, 0, sizeof(hfeatures->csrs));
hfeatures->pmp_count = 0;
hfeatures->mhpm_mask = 0;
hfeatures->priv_version = SBI_HART_PRIV_VER_UNKNOWN;
@ -962,6 +917,9 @@ __pmp_skip:
/* Detect if hart supports sscofpmf */
__check_ext_csr(SBI_HART_PRIV_VER_1_11,
CSR_SCOUNTOVF, SBI_HART_EXT_SSCOFPMF);
/* Detect if hart supports time CSR */
__check_ext_csr(SBI_HART_PRIV_VER_UNKNOWN,
CSR_TIME, SBI_HART_EXT_ZICNTR);
/* Detect if hart has AIA local interrupt CSRs */
__check_ext_csr(SBI_HART_PRIV_VER_UNKNOWN,
CSR_MTOPI, SBI_HART_EXT_SMAIA);
@ -971,9 +929,6 @@ __pmp_skip:
/* Detect if hart supports mstateen CSRs */
__check_ext_csr(SBI_HART_PRIV_VER_1_12,
CSR_MSTATEEN0, SBI_HART_EXT_SMSTATEEN);
/* Detect if hart supports sstateen CSRs */
__check_ext_csr(SBI_HART_PRIV_VER_1_12,
CSR_SSTATEEN0, SBI_HART_EXT_SSSTATEEN);
/* Detect if hart supports smcntrpmf */
__check_ext_csr(SBI_HART_PRIV_VER_1_12,
CSR_MCYCLECFG, SBI_HART_EXT_SMCNTRPMF);
@ -983,16 +938,8 @@ __pmp_skip:
#undef __check_ext_csr
#define __check_csr_existence(__csr, __csr_id) \
csr_read_allowed(__csr, &trap); \
if (!trap.cause) \
__set_bit(__csr_id, hfeatures->csrs);
__check_csr_existence(CSR_CYCLE, SBI_HART_CSR_CYCLE);
__check_csr_existence(CSR_TIME, SBI_HART_CSR_TIME);
__check_csr_existence(CSR_INSTRET, SBI_HART_CSR_INSTRET);
#undef __check_csr_existence
/* Save trap based detection of Zicntr */
has_zicntr = sbi_hart_has_extension(scratch, SBI_HART_EXT_ZICNTR);
/* Let platform populate extensions */
rc = sbi_platform_extensions_init(sbi_platform_thishart_ptr(),
@ -1002,9 +949,7 @@ __pmp_skip:
/* Zicntr should only be detected using traps */
__sbi_hart_update_extension(hfeatures, SBI_HART_EXT_ZICNTR,
sbi_hart_has_csr(scratch, SBI_HART_CSR_CYCLE) &&
sbi_hart_has_csr(scratch, SBI_HART_CSR_TIME) &&
sbi_hart_has_csr(scratch, SBI_HART_CSR_INSTRET));
has_zicntr);
/* Extensions implied by other extensions and features */
if (hfeatures->mhpm_mask)

View File

@ -16,9 +16,7 @@
/* Minimum size and alignment of heap allocations */
#define HEAP_ALLOC_ALIGN 64
/* Number of heap nodes to allocate at once */
#define HEAP_NODE_BATCH_SIZE 8
#define HEAP_HOUSEKEEPING_FACTOR 16
struct heap_node {
struct sbi_dlist head;
@ -30,50 +28,20 @@ struct sbi_heap_control {
spinlock_t lock;
unsigned long base;
unsigned long size;
unsigned long resv;
unsigned long hkbase;
unsigned long hksize;
struct sbi_dlist free_node_list;
struct sbi_dlist free_space_list;
struct sbi_dlist used_space_list;
struct heap_node init_free_space_node;
};
struct sbi_heap_control global_hpctrl;
static bool alloc_nodes(struct sbi_heap_control *hpctrl)
{
size_t size = HEAP_NODE_BATCH_SIZE * sizeof(struct heap_node);
struct heap_node *n, *new = NULL;
/* alloc_with_align() requires at most two free nodes */
if (hpctrl->free_node_list.next != hpctrl->free_node_list.prev)
return true;
sbi_list_for_each_entry_reverse(n, &hpctrl->free_space_list, head) {
if (n->size >= size) {
n->size -= size;
if (!n->size) {
sbi_list_del(&n->head);
sbi_list_add_tail(&n->head, &hpctrl->free_node_list);
}
new = (void *)(n->addr + n->size);
break;
}
}
if (!new)
return false;
for (size_t i = 0; i < HEAP_NODE_BATCH_SIZE; i++)
sbi_list_add_tail(&new[i].head, &hpctrl->free_node_list);
hpctrl->resv += size;
return true;
}
static void *alloc_with_align(struct sbi_heap_control *hpctrl,
size_t align, size_t size)
{
void *ret = NULL;
struct heap_node *n, *np;
struct heap_node *n, *np, *rem;
unsigned long lowest_aligned;
size_t pad;
@ -85,10 +53,6 @@ static void *alloc_with_align(struct sbi_heap_control *hpctrl,
spin_lock(&hpctrl->lock);
/* Ensure at least two free nodes are available for use below */
if (!alloc_nodes(hpctrl))
goto out;
np = NULL;
sbi_list_for_each_entry(n, &hpctrl->free_space_list, head) {
lowest_aligned = ROUNDUP(n->addr, align);
@ -103,33 +67,54 @@ static void *alloc_with_align(struct sbi_heap_control *hpctrl,
goto out;
if (pad) {
if (sbi_list_empty(&hpctrl->free_node_list)) {
goto out;
}
n = sbi_list_first_entry(&hpctrl->free_node_list,
struct heap_node, head);
sbi_list_del(&n->head);
if ((size + pad < np->size) &&
!sbi_list_empty(&hpctrl->free_node_list)) {
rem = sbi_list_first_entry(&hpctrl->free_node_list,
struct heap_node, head);
sbi_list_del(&rem->head);
rem->addr = np->addr + (size + pad);
rem->size = np->size - (size + pad);
sbi_list_add_tail(&rem->head,
&hpctrl->free_space_list);
} else if (size + pad != np->size) {
/* Can't allocate, return n */
sbi_list_add(&n->head, &hpctrl->free_node_list);
ret = NULL;
goto out;
}
n->addr = lowest_aligned;
n->size = size;
sbi_list_add_tail(&n->head, &hpctrl->used_space_list);
np->size = pad;
ret = (void *)n->addr;
} else {
if ((size < np->size) &&
!sbi_list_empty(&hpctrl->free_node_list)) {
n = sbi_list_first_entry(&hpctrl->free_node_list,
struct heap_node, head);
sbi_list_del(&n->head);
n->addr = np->addr;
n->size = pad;
sbi_list_add_tail(&n->head, &np->head);
np->addr += pad;
np->size -= pad;
}
if (size < np->size) {
n = sbi_list_first_entry(&hpctrl->free_node_list,
struct heap_node, head);
sbi_list_del(&n->head);
n->addr = np->addr + size;
n->size = np->size - size;
sbi_list_add(&n->head, &np->head);
np->size = size;
}
n->size = size;
np->addr += size;
np->size -= size;
sbi_list_add_tail(&n->head, &hpctrl->used_space_list);
ret = (void *)n->addr;
} else if (size == np->size) {
sbi_list_del(&np->head);
sbi_list_add_tail(&np->head, &hpctrl->used_space_list);
ret = (void *)np->addr;
}
}
out:
spin_unlock(&hpctrl->lock);
@ -231,32 +216,45 @@ unsigned long sbi_heap_free_space_from(struct sbi_heap_control *hpctrl)
unsigned long sbi_heap_used_space_from(struct sbi_heap_control *hpctrl)
{
return hpctrl->size - hpctrl->resv - sbi_heap_free_space();
return hpctrl->size - hpctrl->hksize - sbi_heap_free_space();
}
unsigned long sbi_heap_reserved_space_from(struct sbi_heap_control *hpctrl)
{
return hpctrl->resv;
return hpctrl->hksize;
}
int sbi_heap_init_new(struct sbi_heap_control *hpctrl, unsigned long base,
unsigned long size)
{
unsigned long i;
struct heap_node *n;
/* Initialize heap control */
SPIN_LOCK_INIT(hpctrl->lock);
hpctrl->base = base;
hpctrl->size = size;
hpctrl->resv = 0;
hpctrl->hkbase = hpctrl->base;
hpctrl->hksize = hpctrl->size / HEAP_HOUSEKEEPING_FACTOR;
hpctrl->hksize &= ~((unsigned long)HEAP_BASE_ALIGN - 1);
SBI_INIT_LIST_HEAD(&hpctrl->free_node_list);
SBI_INIT_LIST_HEAD(&hpctrl->free_space_list);
SBI_INIT_LIST_HEAD(&hpctrl->used_space_list);
/* Prepare free node list */
for (i = 0; i < (hpctrl->hksize / sizeof(*n)); i++) {
n = (struct heap_node *)(hpctrl->hkbase + (sizeof(*n) * i));
SBI_INIT_LIST_HEAD(&n->head);
n->addr = n->size = 0;
sbi_list_add_tail(&n->head, &hpctrl->free_node_list);
}
/* Prepare free space list */
n = &hpctrl->init_free_space_node;
n->addr = base;
n->size = size;
n = sbi_list_first_entry(&hpctrl->free_node_list,
struct heap_node, head);
sbi_list_del(&n->head);
n->addr = hpctrl->hkbase + hpctrl->hksize;
n->size = hpctrl->size - hpctrl->hksize;
sbi_list_add_tail(&n->head, &hpctrl->free_space_list);
return 0;

View File

@ -37,8 +37,6 @@
static const struct sbi_hsm_device *hsm_dev = NULL;
static unsigned long hart_data_offset;
static bool hsm_device_has_hart_hotplug(void);
static int hsm_device_hart_stop(void);
/** Per hart specific data to manage state transition **/
struct sbi_hsm_data {
@ -172,15 +170,6 @@ static void sbi_hsm_hart_wait(struct sbi_scratch *scratch)
/* Wait for state transition requested by sbi_hsm_hart_start() */
while (atomic_read(&hdata->state) != SBI_HSM_STATE_START_PENDING) {
/*
* If the hsm_dev is ready and it support the hotplug, we can
* use the hsm stop for more power saving
*/
if (hsm_device_has_hart_hotplug()) {
sbi_revert_entry_count(scratch);
hsm_device_hart_stop();
}
wfi();
}
@ -249,6 +238,7 @@ static void hsm_device_hart_resume(void)
int sbi_hsm_init(struct sbi_scratch *scratch, bool cold_boot)
{
u32 i;
struct sbi_scratch *rscratch;
struct sbi_hsm_data *hdata;
@ -258,7 +248,7 @@ int sbi_hsm_init(struct sbi_scratch *scratch, bool cold_boot)
return SBI_ENOMEM;
/* Initialize hart state data for every hart */
sbi_for_each_hartindex(i) {
for (i = 0; i <= sbi_scratch_last_hartindex(); i++) {
rscratch = sbi_hartindex_to_scratch(i);
if (!rscratch)
continue;

View File

@ -1,664 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2025 MIPS
*
*/
#include <sbi/riscv_asm.h>
#include <sbi/sbi_hart.h>
#include <sbi/sbi_trap.h>
#include <sbi/sbi_illegal_atomic.h>
#include <sbi/sbi_illegal_insn.h>
#if !defined(__riscv_atomic) && !defined(__riscv_zalrsc)
#error "opensbi strongly relies on the A extension of RISC-V"
#endif
#ifdef __riscv_atomic
int sbi_illegal_atomic(ulong insn, struct sbi_trap_regs *regs)
{
return truly_illegal_insn(insn, regs);
}
#elif __riscv_zalrsc
#define DEFINE_UNPRIVILEGED_LR_FUNCTION(type, aqrl, insn) \
static type lr_##type##aqrl(const type *addr, \
struct sbi_trap_info *trap) \
{ \
register ulong tinfo asm("a3"); \
register ulong mstatus = 0; \
register ulong mtvec = (ulong)sbi_hart_expected_trap; \
type ret = 0; \
trap->cause = 0; \
asm volatile( \
"add %[tinfo], %[taddr], zero\n" \
"csrrw %[mtvec], " STR(CSR_MTVEC) ", %[mtvec]\n" \
"csrrs %[mstatus], " STR(CSR_MSTATUS) ", %[mprv]\n" \
".option push\n" \
".option norvc\n" \
#insn " %[ret], %[addr]\n" \
".option pop\n" \
"csrw " STR(CSR_MSTATUS) ", %[mstatus]\n" \
"csrw " STR(CSR_MTVEC) ", %[mtvec]" \
: [mstatus] "+&r"(mstatus), [mtvec] "+&r"(mtvec), \
[tinfo] "+&r"(tinfo), [ret] "=&r"(ret) \
: [addr] "m"(*addr), [mprv] "r"(MSTATUS_MPRV), \
[taddr] "r"((ulong)trap) \
: "a4", "memory"); \
return ret; \
}
#define DEFINE_UNPRIVILEGED_SC_FUNCTION(type, aqrl, insn) \
static type sc_##type##aqrl(type *addr, type val, \
struct sbi_trap_info *trap) \
{ \
register ulong tinfo asm("a3"); \
register ulong mstatus = 0; \
register ulong mtvec = (ulong)sbi_hart_expected_trap; \
type ret = 0; \
trap->cause = 0; \
asm volatile( \
"add %[tinfo], %[taddr], zero\n" \
"csrrw %[mtvec], " STR(CSR_MTVEC) ", %[mtvec]\n" \
"csrrs %[mstatus], " STR(CSR_MSTATUS) ", %[mprv]\n" \
".option push\n" \
".option norvc\n" \
#insn " %[ret], %[val], %[addr]\n" \
".option pop\n" \
"csrw " STR(CSR_MSTATUS) ", %[mstatus]\n" \
"csrw " STR(CSR_MTVEC) ", %[mtvec]" \
: [mstatus] "+&r"(mstatus), [mtvec] "+&r"(mtvec), \
[tinfo] "+&r"(tinfo), [ret] "=&r"(ret) \
: [addr] "m"(*addr), [mprv] "r"(MSTATUS_MPRV), \
[val] "r"(val), [taddr] "r"((ulong)trap) \
: "a4", "memory"); \
return ret; \
}
DEFINE_UNPRIVILEGED_LR_FUNCTION(s32, , lr.w);
DEFINE_UNPRIVILEGED_LR_FUNCTION(s32, _aq, lr.w.aq);
DEFINE_UNPRIVILEGED_LR_FUNCTION(s32, _rl, lr.w.rl);
DEFINE_UNPRIVILEGED_LR_FUNCTION(s32, _aqrl, lr.w.aqrl);
DEFINE_UNPRIVILEGED_SC_FUNCTION(s32, , sc.w);
DEFINE_UNPRIVILEGED_SC_FUNCTION(s32, _aq, sc.w.aq);
DEFINE_UNPRIVILEGED_SC_FUNCTION(s32, _rl, sc.w.rl);
DEFINE_UNPRIVILEGED_SC_FUNCTION(s32, _aqrl, sc.w.aqrl);
#if __riscv_xlen == 64
DEFINE_UNPRIVILEGED_LR_FUNCTION(s64, , lr.d);
DEFINE_UNPRIVILEGED_LR_FUNCTION(s64, _aq, lr.d.aq);
DEFINE_UNPRIVILEGED_LR_FUNCTION(s64, _rl, lr.d.rl);
DEFINE_UNPRIVILEGED_LR_FUNCTION(s64, _aqrl, lr.d.aqrl);
DEFINE_UNPRIVILEGED_SC_FUNCTION(s64, , sc.d);
DEFINE_UNPRIVILEGED_SC_FUNCTION(s64, _aq, sc.d.aq);
DEFINE_UNPRIVILEGED_SC_FUNCTION(s64, _rl, sc.d.rl);
DEFINE_UNPRIVILEGED_SC_FUNCTION(s64, _aqrl, sc.d.aqrl);
#endif
#define DEFINE_ATOMIC_FUNCTION(name, type, func) \
static int atomic_##name(ulong insn, struct sbi_trap_regs *regs) \
{ \
struct sbi_trap_info uptrap; \
ulong addr = GET_RS1(insn, regs); \
ulong val = GET_RS2(insn, regs); \
ulong rd_val = 0; \
ulong fail = 1; \
while (fail) { \
rd_val = lr_##type((void *)addr, &uptrap); \
if (uptrap.cause) { \
return sbi_trap_redirect(regs, &uptrap); \
} \
fail = sc_##type((void *)addr, func, &uptrap); \
if (uptrap.cause) { \
return sbi_trap_redirect(regs, &uptrap); \
} \
} \
SET_RD(insn, regs, rd_val); \
regs->mepc += 4; \
return 0; \
}
DEFINE_ATOMIC_FUNCTION(add_w, s32, rd_val + val);
DEFINE_ATOMIC_FUNCTION(add_w_aq, s32_aq, rd_val + val);
DEFINE_ATOMIC_FUNCTION(add_w_rl, s32_rl, rd_val + val);
DEFINE_ATOMIC_FUNCTION(add_w_aqrl, s32_aqrl, rd_val + val);
DEFINE_ATOMIC_FUNCTION(and_w, s32, rd_val & val);
DEFINE_ATOMIC_FUNCTION(and_w_aq, s32_aq, rd_val & val);
DEFINE_ATOMIC_FUNCTION(and_w_rl, s32_rl, rd_val & val);
DEFINE_ATOMIC_FUNCTION(and_w_aqrl, s32_aqrl, rd_val & val);
DEFINE_ATOMIC_FUNCTION(or_w, s32, rd_val | val);
DEFINE_ATOMIC_FUNCTION(or_w_aq, s32_aq, rd_val | val);
DEFINE_ATOMIC_FUNCTION(or_w_rl, s32_rl, rd_val | val);
DEFINE_ATOMIC_FUNCTION(or_w_aqrl, s32_aqrl, rd_val | val);
DEFINE_ATOMIC_FUNCTION(xor_w, s32, rd_val ^ val);
DEFINE_ATOMIC_FUNCTION(xor_w_aq, s32_aq, rd_val ^ val);
DEFINE_ATOMIC_FUNCTION(xor_w_rl, s32_rl, rd_val ^ val);
DEFINE_ATOMIC_FUNCTION(xor_w_aqrl, s32_aqrl, rd_val ^ val);
DEFINE_ATOMIC_FUNCTION(swap_w, s32, val);
DEFINE_ATOMIC_FUNCTION(swap_w_aq, s32_aq, val);
DEFINE_ATOMIC_FUNCTION(swap_w_rl, s32_rl, val);
DEFINE_ATOMIC_FUNCTION(swap_w_aqrl, s32_aqrl, val);
DEFINE_ATOMIC_FUNCTION(max_w, s32, (s32)rd_val > (s32)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(max_w_aq, s32_aq, (s32)rd_val > (s32)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(max_w_rl, s32_rl, (s32)rd_val > (s32)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(max_w_aqrl, s32_aqrl, (s32)rd_val > (s32)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(maxu_w, s32, (u32)rd_val > (u32)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(maxu_w_aq, s32_aq, (u32)rd_val > (u32)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(maxu_w_rl, s32_rl, (u32)rd_val > (u32)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(maxu_w_aqrl, s32_aqrl, (u32)rd_val > (u32)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(min_w, s32, (s32)rd_val < (s32)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(min_w_aq, s32_aq, (s32)rd_val < (s32)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(min_w_rl, s32_rl, (s32)rd_val < (s32)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(min_w_aqrl, s32_aqrl, (s32)rd_val < (s32)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(minu_w, s32, (u32)rd_val < (u32)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(minu_w_aq, s32_aq, (u32)rd_val < (u32)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(minu_w_rl, s32_rl, (u32)rd_val < (u32)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(minu_w_aqrl, s32_aqrl, (u32)rd_val < (u32)val ? rd_val : val);
#if __riscv_xlen == 64
DEFINE_ATOMIC_FUNCTION(add_d, s64, rd_val + val);
DEFINE_ATOMIC_FUNCTION(add_d_aq, s64_aq, rd_val + val);
DEFINE_ATOMIC_FUNCTION(add_d_rl, s64_rl, rd_val + val);
DEFINE_ATOMIC_FUNCTION(add_d_aqrl, s64_aqrl, rd_val + val);
DEFINE_ATOMIC_FUNCTION(and_d, s64, rd_val & val);
DEFINE_ATOMIC_FUNCTION(and_d_aq, s64_aq, rd_val & val);
DEFINE_ATOMIC_FUNCTION(and_d_rl, s64_rl, rd_val & val);
DEFINE_ATOMIC_FUNCTION(and_d_aqrl, s64_aqrl, rd_val & val);
DEFINE_ATOMIC_FUNCTION(or_d, s64, rd_val | val);
DEFINE_ATOMIC_FUNCTION(or_d_aq, s64_aq, rd_val | val);
DEFINE_ATOMIC_FUNCTION(or_d_rl, s64_rl, rd_val | val);
DEFINE_ATOMIC_FUNCTION(or_d_aqrl, s64_aqrl, rd_val | val);
DEFINE_ATOMIC_FUNCTION(xor_d, s64, rd_val ^ val);
DEFINE_ATOMIC_FUNCTION(xor_d_aq, s64_aq, rd_val ^ val);
DEFINE_ATOMIC_FUNCTION(xor_d_rl, s64_rl, rd_val ^ val);
DEFINE_ATOMIC_FUNCTION(xor_d_aqrl, s64_aqrl, rd_val ^ val);
DEFINE_ATOMIC_FUNCTION(swap_d, s64, val);
DEFINE_ATOMIC_FUNCTION(swap_d_aq, s64_aq, val);
DEFINE_ATOMIC_FUNCTION(swap_d_rl, s64_rl, val);
DEFINE_ATOMIC_FUNCTION(swap_d_aqrl, s64_aqrl, val);
DEFINE_ATOMIC_FUNCTION(max_d, s64, (s64)rd_val > (s64)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(max_d_aq, s64_aq, (s64)rd_val > (s64)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(max_d_rl, s64_rl, (s64)rd_val > (s64)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(max_d_aqrl, s64_aqrl, (s64)rd_val > (s64)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(maxu_d, s64, (u64)rd_val > (u64)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(maxu_d_aq, s64_aq, (u64)rd_val > (u64)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(maxu_d_rl, s64_rl, (u64)rd_val > (u64)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(maxu_d_aqrl, s64_aqrl, (u64)rd_val > (u64)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(min_d, s64, (s64)rd_val < (s64)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(min_d_aq, s64_aq, (s64)rd_val < (s64)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(min_d_rl, s64_rl, (s64)rd_val < (s64)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(min_d_aqrl, s64_aqrl, (s64)rd_val < (s64)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(minu_d, s64, (u64)rd_val < (u64)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(minu_d_aq, s64_aq, (u64)rd_val < (u64)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(minu_d_rl, s64_rl, (u64)rd_val < (u64)val ? rd_val : val);
DEFINE_ATOMIC_FUNCTION(minu_d_aqrl, s64_aqrl, (u64)rd_val < (u64)val ? rd_val : val);
#endif
static const illegal_insn_func amoadd_table[32] = {
truly_illegal_insn, /* 0 */
truly_illegal_insn, /* 1 */
truly_illegal_insn, /* 2 */
truly_illegal_insn, /* 3 */
truly_illegal_insn, /* 4 */
truly_illegal_insn, /* 5 */
truly_illegal_insn, /* 6 */
truly_illegal_insn, /* 7 */
atomic_add_w, /* 8 */
atomic_add_w_rl, /* 9 */
atomic_add_w_aq, /* 10 */
atomic_add_w_aqrl, /* 11 */
#if __riscv_xlen == 64
atomic_add_d, /* 12 */
atomic_add_d_rl, /* 13 */
atomic_add_d_aq, /* 14 */
atomic_add_d_aqrl, /* 15 */
#else
truly_illegal_insn, /* 12 */
truly_illegal_insn, /* 13 */
truly_illegal_insn, /* 14 */
truly_illegal_insn, /* 15 */
#endif
truly_illegal_insn, /* 16 */
truly_illegal_insn, /* 17 */
truly_illegal_insn, /* 18 */
truly_illegal_insn, /* 19 */
truly_illegal_insn, /* 20 */
truly_illegal_insn, /* 21 */
truly_illegal_insn, /* 22 */
truly_illegal_insn, /* 23 */
truly_illegal_insn, /* 24 */
truly_illegal_insn, /* 25 */
truly_illegal_insn, /* 26 */
truly_illegal_insn, /* 27 */
truly_illegal_insn, /* 28 */
truly_illegal_insn, /* 29 */
truly_illegal_insn, /* 30 */
truly_illegal_insn, /* 31 */
};
static const illegal_insn_func amoswap_table[32] = {
truly_illegal_insn, /* 0 */
truly_illegal_insn, /* 1 */
truly_illegal_insn, /* 2 */
truly_illegal_insn, /* 3 */
truly_illegal_insn, /* 4 */
truly_illegal_insn, /* 5 */
truly_illegal_insn, /* 6 */
truly_illegal_insn, /* 7 */
atomic_swap_w, /* 8 */
atomic_swap_w_rl, /* 9 */
atomic_swap_w_aq, /* 10 */
atomic_swap_w_aqrl, /* 11 */
#if __riscv_xlen == 64
atomic_swap_d, /* 12 */
atomic_swap_d_rl, /* 13 */
atomic_swap_d_aq, /* 14 */
atomic_swap_d_aqrl, /* 15 */
#else
truly_illegal_insn, /* 12 */
truly_illegal_insn, /* 13 */
truly_illegal_insn, /* 14 */
truly_illegal_insn, /* 15 */
#endif
truly_illegal_insn, /* 16 */
truly_illegal_insn, /* 17 */
truly_illegal_insn, /* 18 */
truly_illegal_insn, /* 19 */
truly_illegal_insn, /* 20 */
truly_illegal_insn, /* 21 */
truly_illegal_insn, /* 22 */
truly_illegal_insn, /* 23 */
truly_illegal_insn, /* 24 */
truly_illegal_insn, /* 25 */
truly_illegal_insn, /* 26 */
truly_illegal_insn, /* 27 */
truly_illegal_insn, /* 28 */
truly_illegal_insn, /* 29 */
truly_illegal_insn, /* 30 */
truly_illegal_insn, /* 31 */
};
static const illegal_insn_func amoxor_table[32] = {
truly_illegal_insn, /* 0 */
truly_illegal_insn, /* 1 */
truly_illegal_insn, /* 2 */
truly_illegal_insn, /* 3 */
truly_illegal_insn, /* 4 */
truly_illegal_insn, /* 5 */
truly_illegal_insn, /* 6 */
truly_illegal_insn, /* 7 */
atomic_xor_w, /* 8 */
atomic_xor_w_rl, /* 9 */
atomic_xor_w_aq, /* 10 */
atomic_xor_w_aqrl, /* 11 */
#if __riscv_xlen == 64
atomic_xor_d, /* 12 */
atomic_xor_d_rl, /* 13 */
atomic_xor_d_aq, /* 14 */
atomic_xor_d_aqrl, /* 15 */
#else
truly_illegal_insn, /* 12 */
truly_illegal_insn, /* 13 */
truly_illegal_insn, /* 14 */
truly_illegal_insn, /* 15 */
#endif
truly_illegal_insn, /* 16 */
truly_illegal_insn, /* 17 */
truly_illegal_insn, /* 18 */
truly_illegal_insn, /* 19 */
truly_illegal_insn, /* 20 */
truly_illegal_insn, /* 21 */
truly_illegal_insn, /* 22 */
truly_illegal_insn, /* 23 */
truly_illegal_insn, /* 24 */
truly_illegal_insn, /* 25 */
truly_illegal_insn, /* 26 */
truly_illegal_insn, /* 27 */
truly_illegal_insn, /* 28 */
truly_illegal_insn, /* 29 */
truly_illegal_insn, /* 30 */
truly_illegal_insn, /* 31 */
};
static const illegal_insn_func amoor_table[32] = {
truly_illegal_insn, /* 0 */
truly_illegal_insn, /* 1 */
truly_illegal_insn, /* 2 */
truly_illegal_insn, /* 3 */
truly_illegal_insn, /* 4 */
truly_illegal_insn, /* 5 */
truly_illegal_insn, /* 6 */
truly_illegal_insn, /* 7 */
atomic_or_w, /* 8 */
atomic_or_w_rl, /* 9 */
atomic_or_w_aq, /* 10 */
atomic_or_w_aqrl, /* 11 */
#if __riscv_xlen == 64
atomic_or_d, /* 12 */
atomic_or_d_rl, /* 13 */
atomic_or_d_aq, /* 14 */
atomic_or_d_aqrl, /* 15 */
#else
truly_illegal_insn, /* 12 */
truly_illegal_insn, /* 13 */
truly_illegal_insn, /* 14 */
truly_illegal_insn, /* 15 */
#endif
truly_illegal_insn, /* 16 */
truly_illegal_insn, /* 17 */
truly_illegal_insn, /* 18 */
truly_illegal_insn, /* 19 */
truly_illegal_insn, /* 20 */
truly_illegal_insn, /* 21 */
truly_illegal_insn, /* 22 */
truly_illegal_insn, /* 23 */
truly_illegal_insn, /* 24 */
truly_illegal_insn, /* 25 */
truly_illegal_insn, /* 26 */
truly_illegal_insn, /* 27 */
truly_illegal_insn, /* 28 */
truly_illegal_insn, /* 29 */
truly_illegal_insn, /* 30 */
truly_illegal_insn, /* 31 */
};
static const illegal_insn_func amoand_table[32] = {
truly_illegal_insn, /* 0 */
truly_illegal_insn, /* 1 */
truly_illegal_insn, /* 2 */
truly_illegal_insn, /* 3 */
truly_illegal_insn, /* 4 */
truly_illegal_insn, /* 5 */
truly_illegal_insn, /* 6 */
truly_illegal_insn, /* 7 */
atomic_and_w, /* 8 */
atomic_and_w_rl, /* 9 */
atomic_and_w_aq, /* 10 */
atomic_and_w_aqrl, /* 11 */
#if __riscv_xlen == 64
atomic_and_d, /* 12 */
atomic_and_d_rl, /* 13 */
atomic_and_d_aq, /* 14 */
atomic_and_d_aqrl, /* 15 */
#else
truly_illegal_insn, /* 12 */
truly_illegal_insn, /* 13 */
truly_illegal_insn, /* 14 */
truly_illegal_insn, /* 15 */
#endif
truly_illegal_insn, /* 16 */
truly_illegal_insn, /* 17 */
truly_illegal_insn, /* 18 */
truly_illegal_insn, /* 19 */
truly_illegal_insn, /* 20 */
truly_illegal_insn, /* 21 */
truly_illegal_insn, /* 22 */
truly_illegal_insn, /* 23 */
truly_illegal_insn, /* 24 */
truly_illegal_insn, /* 25 */
truly_illegal_insn, /* 26 */
truly_illegal_insn, /* 27 */
truly_illegal_insn, /* 28 */
truly_illegal_insn, /* 29 */
truly_illegal_insn, /* 30 */
truly_illegal_insn, /* 31 */
};
static const illegal_insn_func amomin_table[32] = {
truly_illegal_insn, /* 0 */
truly_illegal_insn, /* 1 */
truly_illegal_insn, /* 2 */
truly_illegal_insn, /* 3 */
truly_illegal_insn, /* 4 */
truly_illegal_insn, /* 5 */
truly_illegal_insn, /* 6 */
truly_illegal_insn, /* 7 */
atomic_min_w, /* 8 */
atomic_min_w_rl, /* 9 */
atomic_min_w_aq, /* 10 */
atomic_min_w_aqrl, /* 11 */
#if __riscv_xlen == 64
atomic_min_d, /* 12 */
atomic_min_d_rl, /* 13 */
atomic_min_d_aq, /* 14 */
atomic_min_d_aqrl, /* 15 */
#else
truly_illegal_insn, /* 12 */
truly_illegal_insn, /* 13 */
truly_illegal_insn, /* 14 */
truly_illegal_insn, /* 15 */
#endif
truly_illegal_insn, /* 16 */
truly_illegal_insn, /* 17 */
truly_illegal_insn, /* 18 */
truly_illegal_insn, /* 19 */
truly_illegal_insn, /* 20 */
truly_illegal_insn, /* 21 */
truly_illegal_insn, /* 22 */
truly_illegal_insn, /* 23 */
truly_illegal_insn, /* 24 */
truly_illegal_insn, /* 25 */
truly_illegal_insn, /* 26 */
truly_illegal_insn, /* 27 */
truly_illegal_insn, /* 28 */
truly_illegal_insn, /* 29 */
truly_illegal_insn, /* 30 */
truly_illegal_insn, /* 31 */
};
static const illegal_insn_func amomax_table[32] = {
truly_illegal_insn, /* 0 */
truly_illegal_insn, /* 1 */
truly_illegal_insn, /* 2 */
truly_illegal_insn, /* 3 */
truly_illegal_insn, /* 4 */
truly_illegal_insn, /* 5 */
truly_illegal_insn, /* 6 */
truly_illegal_insn, /* 7 */
atomic_max_w, /* 8 */
atomic_max_w_rl, /* 9 */
atomic_max_w_aq, /* 10 */
atomic_max_w_aqrl, /* 11 */
#if __riscv_xlen == 64
atomic_max_d, /* 12 */
atomic_max_d_rl, /* 13 */
atomic_max_d_aq, /* 14 */
atomic_max_d_aqrl, /* 15 */
#else
truly_illegal_insn, /* 12 */
truly_illegal_insn, /* 13 */
truly_illegal_insn, /* 14 */
truly_illegal_insn, /* 15 */
#endif
truly_illegal_insn, /* 16 */
truly_illegal_insn, /* 17 */
truly_illegal_insn, /* 18 */
truly_illegal_insn, /* 19 */
truly_illegal_insn, /* 20 */
truly_illegal_insn, /* 21 */
truly_illegal_insn, /* 22 */
truly_illegal_insn, /* 23 */
truly_illegal_insn, /* 24 */
truly_illegal_insn, /* 25 */
truly_illegal_insn, /* 26 */
truly_illegal_insn, /* 27 */
truly_illegal_insn, /* 28 */
truly_illegal_insn, /* 29 */
truly_illegal_insn, /* 30 */
truly_illegal_insn, /* 31 */
};
static const illegal_insn_func amominu_table[32] = {
truly_illegal_insn, /* 0 */
truly_illegal_insn, /* 1 */
truly_illegal_insn, /* 2 */
truly_illegal_insn, /* 3 */
truly_illegal_insn, /* 4 */
truly_illegal_insn, /* 5 */
truly_illegal_insn, /* 6 */
truly_illegal_insn, /* 7 */
atomic_minu_w, /* 8 */
atomic_minu_w_rl, /* 9 */
atomic_minu_w_aq, /* 10 */
atomic_minu_w_aqrl, /* 11 */
#if __riscv_xlen == 64
atomic_minu_d, /* 12 */
atomic_minu_d_rl, /* 13 */
atomic_minu_d_aq, /* 14 */
atomic_minu_d_aqrl, /* 15 */
#else
truly_illegal_insn, /* 12 */
truly_illegal_insn, /* 13 */
truly_illegal_insn, /* 14 */
truly_illegal_insn, /* 15 */
#endif
truly_illegal_insn, /* 16 */
truly_illegal_insn, /* 17 */
truly_illegal_insn, /* 18 */
truly_illegal_insn, /* 19 */
truly_illegal_insn, /* 20 */
truly_illegal_insn, /* 21 */
truly_illegal_insn, /* 22 */
truly_illegal_insn, /* 23 */
truly_illegal_insn, /* 24 */
truly_illegal_insn, /* 25 */
truly_illegal_insn, /* 26 */
truly_illegal_insn, /* 27 */
truly_illegal_insn, /* 28 */
truly_illegal_insn, /* 29 */
truly_illegal_insn, /* 30 */
truly_illegal_insn, /* 31 */
};
static const illegal_insn_func amomaxu_table[32] = {
truly_illegal_insn, /* 0 */
truly_illegal_insn, /* 1 */
truly_illegal_insn, /* 2 */
truly_illegal_insn, /* 3 */
truly_illegal_insn, /* 4 */
truly_illegal_insn, /* 5 */
truly_illegal_insn, /* 6 */
truly_illegal_insn, /* 7 */
atomic_maxu_w, /* 8 */
atomic_maxu_w_rl, /* 9 */
atomic_maxu_w_aq, /* 10 */
atomic_maxu_w_aqrl, /* 11 */
#if __riscv_xlen == 64
atomic_maxu_d, /* 12 */
atomic_maxu_d_rl, /* 13 */
atomic_maxu_d_aq, /* 14 */
atomic_maxu_d_aqrl, /* 15 */
#else
truly_illegal_insn, /* 12 */
truly_illegal_insn, /* 13 */
truly_illegal_insn, /* 14 */
truly_illegal_insn, /* 15 */
#endif
truly_illegal_insn, /* 16 */
truly_illegal_insn, /* 17 */
truly_illegal_insn, /* 18 */
truly_illegal_insn, /* 19 */
truly_illegal_insn, /* 20 */
truly_illegal_insn, /* 21 */
truly_illegal_insn, /* 22 */
truly_illegal_insn, /* 23 */
truly_illegal_insn, /* 24 */
truly_illegal_insn, /* 25 */
truly_illegal_insn, /* 26 */
truly_illegal_insn, /* 27 */
truly_illegal_insn, /* 28 */
truly_illegal_insn, /* 29 */
truly_illegal_insn, /* 30 */
truly_illegal_insn, /* 31 */
};
static int amoadd_insn(ulong insn, struct sbi_trap_regs *regs)
{
return amoadd_table[(GET_FUNC3(insn) << 2) + GET_AQRL(insn)](insn, regs);
}
static int amoswap_insn(ulong insn, struct sbi_trap_regs *regs)
{
return amoswap_table[(GET_FUNC3(insn) << 2) + GET_AQRL(insn)](insn, regs);
}
static int amoxor_insn(ulong insn, struct sbi_trap_regs *regs)
{
return amoxor_table[(GET_FUNC3(insn) << 2) + GET_AQRL(insn)](insn, regs);
}
static int amoor_insn(ulong insn, struct sbi_trap_regs *regs)
{
return amoor_table[(GET_FUNC3(insn) << 2) + GET_AQRL(insn)](insn, regs);
}
static int amoand_insn(ulong insn, struct sbi_trap_regs *regs)
{
return amoand_table[(GET_FUNC3(insn) << 2) + GET_AQRL(insn)](insn, regs);
}
static int amomin_insn(ulong insn, struct sbi_trap_regs *regs)
{
return amomin_table[(GET_FUNC3(insn) << 2) + GET_AQRL(insn)](insn, regs);
}
static int amomax_insn(ulong insn, struct sbi_trap_regs *regs)
{
return amomax_table[(GET_FUNC3(insn) << 2) + GET_AQRL(insn)](insn, regs);
}
static int amominu_insn(ulong insn, struct sbi_trap_regs *regs)
{
return amominu_table[(GET_FUNC3(insn) << 2) + GET_AQRL(insn)](insn, regs);
}
static int amomaxu_insn(ulong insn, struct sbi_trap_regs *regs)
{
return amomaxu_table[(GET_FUNC3(insn) << 2) + GET_AQRL(insn)](insn, regs);
}
static const illegal_insn_func amo_insn_table[32] = {
amoadd_insn, /* 0 */
amoswap_insn, /* 1 */
truly_illegal_insn, /* 2 */
truly_illegal_insn, /* 3 */
amoxor_insn, /* 4 */
truly_illegal_insn, /* 5 */
truly_illegal_insn, /* 6 */
truly_illegal_insn, /* 7 */
amoor_insn, /* 8 */
truly_illegal_insn, /* 9 */
truly_illegal_insn, /* 10 */
truly_illegal_insn, /* 11 */
amoand_insn, /* 12 */
truly_illegal_insn, /* 13 */
truly_illegal_insn, /* 14 */
truly_illegal_insn, /* 15 */
amomin_insn, /* 16 */
truly_illegal_insn, /* 17 */
truly_illegal_insn, /* 18 */
truly_illegal_insn, /* 19 */
amomax_insn, /* 20 */
truly_illegal_insn, /* 21 */
truly_illegal_insn, /* 22 */
truly_illegal_insn, /* 23 */
amominu_insn, /* 24 */
truly_illegal_insn, /* 25 */
truly_illegal_insn, /* 26 */
truly_illegal_insn, /* 27 */
amomaxu_insn, /* 28 */
truly_illegal_insn, /* 29 */
truly_illegal_insn, /* 30 */
truly_illegal_insn /* 31 */
};
int sbi_illegal_atomic(ulong insn, struct sbi_trap_regs *regs)
{
return amo_insn_table[(insn >> 27) & 0x1f](insn, regs);
}
#else
#error "need a or zalrsc"
#endif

View File

@ -13,14 +13,15 @@
#include <sbi/sbi_bitops.h>
#include <sbi/sbi_emulate_csr.h>
#include <sbi/sbi_error.h>
#include <sbi/sbi_illegal_atomic.h>
#include <sbi/sbi_illegal_insn.h>
#include <sbi/sbi_pmu.h>
#include <sbi/sbi_trap.h>
#include <sbi/sbi_unpriv.h>
#include <sbi/sbi_console.h>
int truly_illegal_insn(ulong insn, struct sbi_trap_regs *regs)
typedef int (*illegal_insn_func)(ulong insn, struct sbi_trap_regs *regs);
static int truly_illegal_insn(ulong insn, struct sbi_trap_regs *regs)
{
struct sbi_trap_info trap;
@ -122,7 +123,7 @@ static const illegal_insn_func illegal_insn_table[32] = {
truly_illegal_insn, /* 8 */
truly_illegal_insn, /* 9 */
truly_illegal_insn, /* 10 */
sbi_illegal_atomic, /* 11 */
truly_illegal_insn, /* 11 */
truly_illegal_insn, /* 12 */
truly_illegal_insn, /* 13 */
truly_illegal_insn, /* 14 */

View File

@ -13,7 +13,6 @@
#include <sbi/sbi_console.h>
#include <sbi/sbi_cppc.h>
#include <sbi/sbi_domain.h>
#include <sbi/sbi_double_trap.h>
#include <sbi/sbi_ecall.h>
#include <sbi/sbi_fwft.h>
#include <sbi/sbi_hart.h>
@ -161,7 +160,7 @@ static void sbi_boot_print_domains(struct sbi_scratch *scratch)
static void sbi_boot_print_hart(struct sbi_scratch *scratch, u32 hartid)
{
int xlen;
char str[256];
char str[128];
const struct sbi_domain *dom = sbi_domain_thishart_ptr();
if (scratch->options & SBI_SCRATCH_NO_BOOT_PRINTS)
@ -267,6 +266,12 @@ static void __noreturn init_coldboot(struct sbi_scratch *scratch, u32 hartid)
if (rc)
sbi_hart_hang();
rc = sbi_sse_init(scratch, true);
if (rc) {
sbi_printf("%s: sse init failed (error %d)\n", __func__, rc);
sbi_hart_hang();
}
rc = sbi_pmu_init(scratch, true);
if (rc) {
sbi_printf("%s: pmu init failed (error %d)\n",
@ -280,8 +285,6 @@ static void __noreturn init_coldboot(struct sbi_scratch *scratch, u32 hartid)
sbi_boot_print_banner(scratch);
sbi_double_trap_init(scratch);
rc = sbi_irqchip_init(scratch, true);
if (rc) {
sbi_printf("%s: irqchip init failed (error %d)\n",
@ -318,13 +321,13 @@ static void __noreturn init_coldboot(struct sbi_scratch *scratch, u32 hartid)
sbi_printf("%s: mpxy init failed (error %d)\n", __func__, rc);
sbi_hart_hang();
}
/*
* Note: Finalize domains after HSM initialization
* Note: Finalize domains after HSM initialization so that we
* can startup non-root domains.
* Note: Finalize domains before HART PMP configuration so
* that we use correct domain for configuring PMP.
*/
rc = sbi_domain_finalize(scratch);
rc = sbi_domain_finalize(scratch, hartid);
if (rc) {
sbi_printf("%s: domain finalize failed (error %d)\n",
__func__, rc);
@ -343,16 +346,6 @@ static void __noreturn init_coldboot(struct sbi_scratch *scratch, u32 hartid)
sbi_hart_hang();
}
/*
* Note: SSE events callbacks can be registered by other drivers so
* sbi_sse_init() needs to be called after all drivers have been probed.
*/
rc = sbi_sse_init(scratch, true);
if (rc) {
sbi_printf("%s: sse init failed (error %d)\n", __func__, rc);
sbi_hart_hang();
}
/*
* Note: Ecall initialization should be after platform final
* initialization so that all available platform devices are
@ -372,17 +365,6 @@ static void __noreturn init_coldboot(struct sbi_scratch *scratch, u32 hartid)
run_all_tests();
/*
* Note: Startup domains after all initialization are done
* otherwise boot HART of non-root domain can crash.
*/
rc = sbi_domain_startup(scratch, hartid);
if (rc) {
sbi_printf("%s: domain startup failed (error %d)\n",
__func__, rc);
sbi_hart_hang();
}
/*
* Configure PMP at last because if SMEPMP is detected,
* M-mode access to the S/U space will be rescinded.
@ -426,6 +408,10 @@ static void __noreturn init_warm_startup(struct sbi_scratch *scratch,
if (rc)
sbi_hart_hang();
rc = sbi_sse_init(scratch, false);
if (rc)
sbi_hart_hang();
rc = sbi_pmu_init(scratch, false);
if (rc)
sbi_hart_hang();
@ -458,10 +444,6 @@ static void __noreturn init_warm_startup(struct sbi_scratch *scratch,
if (rc)
sbi_hart_hang();
rc = sbi_sse_init(scratch, false);
if (rc)
sbi_hart_hang();
/*
* Configure PMP at last because if SMEPMP is detected,
* M-mode access to the S/U space will be rescinded.
@ -579,19 +561,6 @@ void __noreturn sbi_init(struct sbi_scratch *scratch)
init_warmboot(scratch, hartid);
}
void sbi_revert_entry_count(struct sbi_scratch *scratch)
{
unsigned long *entry_count, *init_count;
if (!entry_count_offset || !init_count_offset)
sbi_hart_hang();
entry_count = sbi_scratch_offset_ptr(scratch, entry_count_offset);
init_count = sbi_scratch_offset_ptr(scratch, init_count_offset);
*entry_count = *init_count;
}
unsigned long sbi_entry_count(u32 hartindex)
{
struct sbi_scratch *scratch;

View File

@ -116,11 +116,6 @@ int sbi_ipi_send_many(ulong hmask, ulong hbase, u32 event, void *data)
struct sbi_domain *dom = sbi_domain_thishart_ptr();
struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
if (hmask == 0 && hbase != -1UL) {
/* Nothing to do, but it's not an error either. */
return 0;
}
/* Find the target harts */
rc = sbi_hsm_hart_interruptible_mask(dom, &target_mask);
if (rc)
@ -128,7 +123,6 @@ int sbi_ipi_send_many(ulong hmask, ulong hbase, u32 event, void *data)
if (hbase != -1UL) {
struct sbi_hartmask tmp_mask = { 0 };
int count = sbi_popcount(hmask);
for (i = hbase; hmask; i++, hmask >>= 1) {
if (hmask & 1UL)
@ -136,9 +130,6 @@ int sbi_ipi_send_many(ulong hmask, ulong hbase, u32 event, void *data)
}
sbi_hartmask_and(&target_mask, &target_mask, &tmp_mask);
if (sbi_hartmask_weight(&target_mask) != count)
return SBI_EINVAL;
}
/* Send IPIs */

View File

@ -11,7 +11,6 @@
#include <sbi/sbi_domain.h>
#include <sbi/sbi_error.h>
#include <sbi/sbi_hart.h>
#include <sbi/sbi_heap.h>
#include <sbi/sbi_platform.h>
#include <sbi/sbi_mpxy.h>
#include <sbi/sbi_scratch.h>
@ -20,8 +19,8 @@
#include <sbi/sbi_console.h>
#include <sbi/sbi_byteorder.h>
/** Shared memory size across all harts */
static unsigned long mpxy_shmem_size = PAGE_SIZE;
/** Offset of pointer to MPXY state in scratch space */
static unsigned long mpxy_state_offset;
/** List of MPXY proxy channels */
static SBI_LIST_HEAD(mpxy_channel_list);
@ -44,17 +43,17 @@ static SBI_LIST_HEAD(mpxy_channel_list);
#define CAP_EVENTSSTATE_POS 2
#define CAP_EVENTSSTATE_MASK (1U << CAP_EVENTSSTATE_POS)
/** Channel Capability - Send Message With Response function support */
#define CAP_SEND_MSG_WITH_RESP_POS 3
#define CAP_SEND_MSG_WITH_RESP_MASK (1U << CAP_SEND_MSG_WITH_RESP_POS)
/** Channel Capability - Get Notification function support */
#define CAP_GET_NOTIFICATIONS_POS 3
#define CAP_GET_NOTIFICATIONS_MASK (1U << CAP_GET_NOTIFICATIONS_POS)
/** Channel Capability - Send Message Without Response function support */
#define CAP_SEND_MSG_WITHOUT_RESP_POS 4
#define CAP_SEND_MSG_WITHOUT_RESP_MASK (1U << CAP_SEND_MSG_WITHOUT_RESP_POS)
/** Channel Capability - Get Notification function support */
#define CAP_GET_NOTIFICATIONS_POS 5
#define CAP_GET_NOTIFICATIONS_MASK (1U << CAP_GET_NOTIFICATIONS_POS)
/** Channel Capability - Send Message With Response function support */
#define CAP_SEND_MSG_WITH_RESP_POS 5
#define CAP_SEND_MSG_WITH_RESP_MASK (1U << CAP_SEND_MSG_WITH_RESP_POS)
/** Helpers to enable/disable channel capability bits
* _c: capability variable
@ -64,10 +63,17 @@ static SBI_LIST_HEAD(mpxy_channel_list);
#define CAP_DISABLE(_c, _m) INSERT_FIELD(_c, _m, 0)
#define CAP_GET(_c, _m) EXTRACT_FIELD(_c, _m)
#if __riscv_xlen == 64
#define SHMEM_PHYS_ADDR(_hi, _lo) (_lo)
#elif __riscv_xlen == 32
#define SHMEM_PHYS_ADDR(_hi, _lo) (((u64)(_hi) << 32) | (_lo))
#else
#error "Undefined XLEN"
#endif
/** Per hart shared memory */
struct mpxy_shmem {
unsigned long shmem_size;
unsigned long shmem_addr_lo;
unsigned long shmem_addr_hi;
};
@ -81,17 +87,10 @@ struct mpxy_state {
struct mpxy_shmem shmem;
};
static struct mpxy_state *sbi_domain_get_mpxy_state(struct sbi_domain *dom,
u32 hartindex);
/** Macro to obtain the current hart's MPXY state pointer in current domain */
#define sbi_domain_mpxy_state_thishart_ptr() \
sbi_domain_get_mpxy_state(sbi_domain_thishart_ptr(), \
current_hartindex())
/** Disable hart shared memory */
static inline void sbi_mpxy_shmem_disable(struct mpxy_state *ms)
{
ms->shmem.shmem_size = 0;
ms->shmem.shmem_addr_lo = INVALID_ADDR;
ms->shmem.shmem_addr_hi = INVALID_ADDR;
}
@ -171,8 +170,9 @@ bool sbi_mpxy_channel_available(void)
static void mpxy_std_attrs_init(struct sbi_mpxy_channel *channel)
{
struct mpxy_state *ms = sbi_domain_mpxy_state_thishart_ptr();
u32 capability = 0;
struct mpxy_state *ms =
sbi_scratch_thishart_offset_ptr(mpxy_state_offset);
/* Reset values */
channel->attrs.msi_control = 0;
@ -228,113 +228,37 @@ int sbi_mpxy_register_channel(struct sbi_mpxy_channel *channel)
/* Initialize channel specific attributes */
mpxy_std_attrs_init(channel);
/* Update shared memory size if required */
if (mpxy_shmem_size < channel->attrs.msg_data_maxlen) {
mpxy_shmem_size = channel->attrs.msg_data_maxlen;
mpxy_shmem_size = (mpxy_shmem_size + (PAGE_SIZE - 1)) / PAGE_SIZE;
}
SBI_INIT_LIST_HEAD(&channel->head);
sbi_list_add_tail(&channel->head, &mpxy_channel_list);
return SBI_OK;
}
/** Setup per domain MPXY state data */
static int domain_mpxy_state_data_setup(struct sbi_domain *dom,
struct sbi_domain_data *data,
void *data_ptr)
int sbi_mpxy_init(struct sbi_scratch *scratch)
{
struct mpxy_state **dom_hartindex_to_mpxy_state_table = data_ptr;
struct mpxy_state *ms;
u32 i;
sbi_hartmask_for_each_hartindex(i, dom->possible_harts) {
ms = sbi_zalloc(sizeof(*ms));
if (!ms)
mpxy_state_offset = sbi_scratch_alloc_type_offset(struct mpxy_state);
if (!mpxy_state_offset)
return SBI_ENOMEM;
/*
* TODO: Proper support for checking msi support from
* platform. Currently disable msi and sse and use
* polling
/**
* TODO: Proper support for checking msi support from platform.
* Currently disable msi and sse and use polling
*/
struct mpxy_state *ms =
sbi_scratch_thishart_offset_ptr(mpxy_state_offset);
ms->msi_avail = false;
ms->sse_avail = false;
sbi_mpxy_shmem_disable(ms);
dom_hartindex_to_mpxy_state_table[i] = ms;
}
return 0;
}
/** Cleanup per domain MPXY state data */
static void domain_mpxy_state_data_cleanup(struct sbi_domain *dom,
struct sbi_domain_data *data,
void *data_ptr)
{
struct mpxy_state **dom_hartindex_to_mpxy_state_table = data_ptr;
u32 i;
sbi_hartmask_for_each_hartindex(i, dom->possible_harts)
sbi_free(dom_hartindex_to_mpxy_state_table[i]);
}
static struct sbi_domain_data dmspriv = {
.data_setup = domain_mpxy_state_data_setup,
.data_cleanup = domain_mpxy_state_data_cleanup,
};
/**
* Get per-domain MPXY state pointer for a given domain and HART index
* @param dom pointer to domain
* @param hartindex the HART index
*
* @return per-domain MPXY state pointer for given HART index
*/
static struct mpxy_state *sbi_domain_get_mpxy_state(struct sbi_domain *dom,
u32 hartindex)
{
struct mpxy_state **dom_hartindex_to_mpxy_state_table;
dom_hartindex_to_mpxy_state_table = sbi_domain_data_ptr(dom, &dmspriv);
if (!dom_hartindex_to_mpxy_state_table ||
!sbi_hartindex_valid(hartindex))
return NULL;
return dom_hartindex_to_mpxy_state_table[hartindex];
}
int sbi_mpxy_init(struct sbi_scratch *scratch)
{
int ret;
/**
* Allocate per-domain and per-hart MPXY state data.
* The data type is "struct mpxy_state **" whose memory space will be
* dynamically allocated by domain_setup_data_one() and
* domain_mpxy_state_data_setup(). Calculate needed size of memory space
* here.
*/
dmspriv.data_size = sizeof(struct mpxy_state *) * sbi_hart_count();
ret = sbi_domain_register_data(&dmspriv);
if (ret)
return ret;
return sbi_platform_mpxy_init(sbi_platform_ptr(scratch));
}
unsigned long sbi_mpxy_get_shmem_size(void)
int sbi_mpxy_set_shmem(unsigned long shmem_size, unsigned long shmem_phys_lo,
unsigned long shmem_phys_hi, unsigned long flags)
{
return mpxy_shmem_size;
}
int sbi_mpxy_set_shmem(unsigned long shmem_phys_lo,
unsigned long shmem_phys_hi,
unsigned long flags)
{
struct mpxy_state *ms = sbi_domain_mpxy_state_thishart_ptr();
struct mpxy_state *ms =
sbi_scratch_thishart_offset_ptr(mpxy_state_offset);
unsigned long *ret_buf;
/** Disable shared memory if both hi and lo have all bit 1s */
@ -348,26 +272,13 @@ int sbi_mpxy_set_shmem(unsigned long shmem_phys_lo,
return SBI_ERR_INVALID_PARAM;
/** Check shared memory size and address aligned to 4K Page */
if (shmem_phys_lo & ~PAGE_MASK)
if (!shmem_size || (shmem_size & ~PAGE_MASK) ||
(shmem_phys_lo & ~PAGE_MASK))
return SBI_ERR_INVALID_PARAM;
/*
* On RV32, the M-mode can only access the first 4GB of
* the physical address space because M-mode does not have
* MMU to access full 34-bit physical address space.
* So fail if the upper 32 bits of the physical address
* is non-zero on RV32.
*
* On RV64, kernel sets upper 64bit address part to zero.
* So fail if the upper 64bit of the physical address
* is non-zero on RV64.
*/
if (shmem_phys_hi)
return SBI_ERR_INVALID_ADDRESS;
if (!sbi_domain_check_addr_range(sbi_domain_thishart_ptr(),
SHMEM_PHYS_ADDR(shmem_phys_hi, shmem_phys_lo),
mpxy_shmem_size, PRV_S,
shmem_size, PRV_S,
SBI_DOMAIN_READ | SBI_DOMAIN_WRITE))
return SBI_ERR_INVALID_ADDRESS;
@ -375,13 +286,15 @@ int sbi_mpxy_set_shmem(unsigned long shmem_phys_lo,
if (flags == SBI_EXT_MPXY_SHMEM_FLAG_OVERWRITE_RETURN) {
ret_buf = (unsigned long *)(ulong)SHMEM_PHYS_ADDR(shmem_phys_hi,
shmem_phys_lo);
sbi_hart_map_saddr((unsigned long)ret_buf, mpxy_shmem_size);
ret_buf[0] = cpu_to_lle(ms->shmem.shmem_addr_lo);
ret_buf[1] = cpu_to_lle(ms->shmem.shmem_addr_hi);
sbi_hart_map_saddr((unsigned long)ret_buf, shmem_size);
ret_buf[0] = cpu_to_lle(ms->shmem.shmem_size);
ret_buf[1] = cpu_to_lle(ms->shmem.shmem_addr_lo);
ret_buf[2] = cpu_to_lle(ms->shmem.shmem_addr_hi);
sbi_hart_unmap_saddr();
}
/** Setup the new shared memory */
ms->shmem.shmem_size = shmem_size;
ms->shmem.shmem_addr_lo = shmem_phys_lo;
ms->shmem.shmem_addr_hi = shmem_phys_hi;
@ -390,12 +303,15 @@ int sbi_mpxy_set_shmem(unsigned long shmem_phys_lo,
int sbi_mpxy_get_channel_ids(u32 start_index)
{
struct mpxy_state *ms = sbi_domain_mpxy_state_thishart_ptr();
u32 remaining, returned, max_channelids;
u32 node_index = 0, node_ret = 0;
struct sbi_mpxy_channel *channel;
u32 remaining, returned, max_channelids;
u32 channels_count = 0;
u32 *shmem_base;
struct sbi_mpxy_channel *channel;
/* Check if the shared memory is being setup or not. */
struct mpxy_state *ms =
sbi_scratch_thishart_offset_ptr(mpxy_state_offset);
if (!mpxy_shmem_enabled(ms))
return SBI_ERR_NO_SHMEM;
@ -407,11 +323,12 @@ int sbi_mpxy_get_channel_ids(u32 start_index)
return SBI_ERR_INVALID_PARAM;
shmem_base = hart_shmem_base(ms);
sbi_hart_map_saddr((unsigned long)hart_shmem_base(ms), mpxy_shmem_size);
sbi_hart_map_saddr((unsigned long)hart_shmem_base(ms),
ms->shmem.shmem_size);
/** number of channel ids which can be stored in shmem adjusting
* for remaining and returned fields */
max_channelids = (mpxy_shmem_size / sizeof(u32)) - 2;
max_channelids = (ms->shmem.shmem_size / sizeof(u32)) - 2;
/* total remaining from the start index */
remaining = channels_count - start_index;
/* how many can be returned */
@ -441,11 +358,13 @@ int sbi_mpxy_get_channel_ids(u32 start_index)
int sbi_mpxy_read_attrs(u32 channel_id, u32 base_attr_id, u32 attr_count)
{
struct mpxy_state *ms = sbi_domain_mpxy_state_thishart_ptr();
int ret = SBI_SUCCESS;
u32 *attr_ptr, end_id;
void *shmem_base;
struct mpxy_state *ms =
sbi_scratch_thishart_offset_ptr(mpxy_state_offset);
if (!mpxy_shmem_enabled(ms))
return SBI_ERR_NO_SHMEM;
@ -459,13 +378,14 @@ int sbi_mpxy_read_attrs(u32 channel_id, u32 base_attr_id, u32 attr_count)
return SBI_ERR_INVALID_PARAM;
/* Sanity check for base_attr_id and attr_count */
if (!attr_count || (attr_count > (mpxy_shmem_size / ATTR_SIZE)))
if (!attr_count || (attr_count > (ms->shmem.shmem_size / ATTR_SIZE)))
return SBI_ERR_INVALID_PARAM;
shmem_base = hart_shmem_base(ms);
end_id = base_attr_id + attr_count - 1;
sbi_hart_map_saddr((unsigned long)hart_shmem_base(ms), mpxy_shmem_size);
sbi_hart_map_saddr((unsigned long)hart_shmem_base(ms),
ms->shmem.shmem_size);
/* Standard attributes range check */
if (mpxy_is_std_attr(base_attr_id)) {
@ -522,8 +442,8 @@ out:
static int mpxy_check_write_std_attr(struct sbi_mpxy_channel *channel,
u32 attr_id, u32 attr_val)
{
struct sbi_mpxy_channel_attrs *attrs = &channel->attrs;
int ret = SBI_SUCCESS;
struct sbi_mpxy_channel_attrs *attrs = &channel->attrs;
switch(attr_id) {
case SBI_MPXY_ATTR_MSI_CONTROL:
@ -557,7 +477,9 @@ static int mpxy_check_write_std_attr(struct sbi_mpxy_channel *channel,
static void mpxy_write_std_attr(struct sbi_mpxy_channel *channel, u32 attr_id,
u32 attr_val)
{
struct mpxy_state *ms = sbi_domain_mpxy_state_thishart_ptr();
struct mpxy_state *ms =
sbi_scratch_thishart_offset_ptr(mpxy_state_offset);
struct sbi_mpxy_channel_attrs *attrs = &channel->attrs;
switch(attr_id) {
@ -591,16 +513,17 @@ static void mpxy_write_std_attr(struct sbi_mpxy_channel *channel, u32 attr_id,
int sbi_mpxy_write_attrs(u32 channel_id, u32 base_attr_id, u32 attr_count)
{
struct mpxy_state *ms = sbi_domain_mpxy_state_thishart_ptr();
u32 *mem_ptr, attr_id, end_id, attr_val;
struct sbi_mpxy_channel *channel;
int ret, mem_idx;
void *shmem_base;
u32 *mem_ptr, attr_id, end_id, attr_val;
struct mpxy_state *ms =
sbi_scratch_thishart_offset_ptr(mpxy_state_offset);
if (!mpxy_shmem_enabled(ms))
return SBI_ERR_NO_SHMEM;
channel = mpxy_find_channel(channel_id);
struct sbi_mpxy_channel *channel = mpxy_find_channel(channel_id);
if (!channel)
return SBI_ERR_NOT_SUPPORTED;
@ -610,13 +533,13 @@ int sbi_mpxy_write_attrs(u32 channel_id, u32 base_attr_id, u32 attr_count)
return SBI_ERR_INVALID_PARAM;
/* Sanity check for base_attr_id and attr_count */
if (!attr_count || (attr_count > (mpxy_shmem_size / ATTR_SIZE)))
if (!attr_count || (attr_count > (ms->shmem.shmem_size / ATTR_SIZE)))
return SBI_ERR_INVALID_PARAM;
shmem_base = hart_shmem_base(ms);
end_id = base_attr_id + attr_count - 1;
sbi_hart_map_saddr((unsigned long)shmem_base, mpxy_shmem_size);
sbi_hart_map_saddr((unsigned long)shmem_base, ms->shmem.shmem_size);
mem_ptr = (u32 *)shmem_base;
@ -681,16 +604,17 @@ int sbi_mpxy_send_message(u32 channel_id, u8 msg_id,
unsigned long msg_data_len,
unsigned long *resp_data_len)
{
struct mpxy_state *ms = sbi_domain_mpxy_state_thishart_ptr();
struct sbi_mpxy_channel *channel;
int ret;
void *shmem_base, *resp_buf;
u32 resp_bufsize;
int ret;
struct mpxy_state *ms =
sbi_scratch_thishart_offset_ptr(mpxy_state_offset);
if (!mpxy_shmem_enabled(ms))
return SBI_ERR_NO_SHMEM;
channel = mpxy_find_channel(channel_id);
struct sbi_mpxy_channel *channel = mpxy_find_channel(channel_id);
if (!channel)
return SBI_ERR_NOT_SUPPORTED;
@ -700,23 +624,24 @@ int sbi_mpxy_send_message(u32 channel_id, u8 msg_id,
if (!resp_data_len && !channel->send_message_without_response)
return SBI_ERR_NOT_SUPPORTED;
if (msg_data_len > mpxy_shmem_size ||
if (msg_data_len > ms->shmem.shmem_size ||
msg_data_len > channel->attrs.msg_data_maxlen)
return SBI_ERR_INVALID_PARAM;
shmem_base = hart_shmem_base(ms);
sbi_hart_map_saddr((unsigned long)shmem_base, mpxy_shmem_size);
sbi_hart_map_saddr((unsigned long)shmem_base, ms->shmem.shmem_size);
if (resp_data_len) {
resp_buf = shmem_base;
resp_bufsize = mpxy_shmem_size;
resp_bufsize = ms->shmem.shmem_size;
ret = channel->send_message_with_response(channel, msg_id,
shmem_base,
msg_data_len,
resp_buf,
resp_bufsize,
resp_data_len);
} else {
}
else {
ret = channel->send_message_without_response(channel, msg_id,
shmem_base,
msg_data_len);
@ -730,7 +655,7 @@ int sbi_mpxy_send_message(u32 channel_id, u8 msg_id,
return SBI_ERR_FAILED;
if (resp_data_len &&
(*resp_data_len > mpxy_shmem_size ||
(*resp_data_len > ms->shmem.shmem_size ||
*resp_data_len > channel->attrs.msg_data_maxlen))
return SBI_ERR_FAILED;
@ -739,30 +664,34 @@ int sbi_mpxy_send_message(u32 channel_id, u8 msg_id,
int sbi_mpxy_get_notification_events(u32 channel_id, unsigned long *events_len)
{
struct mpxy_state *ms = sbi_domain_mpxy_state_thishart_ptr();
struct sbi_mpxy_channel *channel;
void *eventsbuf, *shmem_base;
int ret;
void *eventsbuf, *shmem_base;
struct mpxy_state *ms =
sbi_scratch_thishart_offset_ptr(mpxy_state_offset);
if (!mpxy_shmem_enabled(ms))
return SBI_ERR_NO_SHMEM;
channel = mpxy_find_channel(channel_id);
if (!channel || !channel->get_notification_events)
struct sbi_mpxy_channel *channel = mpxy_find_channel(channel_id);
if (!channel)
return SBI_ERR_NOT_SUPPORTED;
if (!channel->get_notification_events)
return SBI_ERR_NOT_SUPPORTED;
shmem_base = hart_shmem_base(ms);
sbi_hart_map_saddr((unsigned long)shmem_base, mpxy_shmem_size);
sbi_hart_map_saddr((unsigned long)shmem_base, ms->shmem.shmem_size);
eventsbuf = shmem_base;
ret = channel->get_notification_events(channel, eventsbuf,
mpxy_shmem_size,
ms->shmem.shmem_size,
events_len);
sbi_hart_unmap_saddr();
if (ret)
return ret;
if (*events_len > (mpxy_shmem_size - 16))
if (*events_len > ms->shmem.shmem_size)
return SBI_ERR_FAILED;
return SBI_SUCCESS;

View File

@ -206,12 +206,6 @@ static int pmu_ctr_validate(struct sbi_pmu_hart_state *phs,
return event_idx_type;
}
static bool pmu_ctr_idx_validate(unsigned long cbase, unsigned long cmask)
{
/* Do a basic sanity check of counter base & mask */
return cmask && cbase + sbi_fls(cmask) < total_ctrs;
}
int sbi_pmu_ctr_fw_read(uint32_t cidx, uint64_t *cval)
{
int event_idx_type;
@ -315,11 +309,11 @@ int sbi_pmu_add_raw_event_counter_map(uint64_t select, uint64_t select_mask, u32
void sbi_pmu_ovf_irq()
{
/*
* We need to disable the overflow irq before returning to S-mode or we will loop
* on an irq being triggered
* We need to disable LCOFIP before returning to S-mode or we will loop
* on LCOFIP being triggered
*/
csr_clear(CSR_MIE, sbi_pmu_irq_mask());
sbi_sse_inject_event(SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW);
csr_clear(CSR_MIE, MIP_LCOFIP);
sbi_sse_inject_event(SBI_SSE_EVENT_LOCAL_PMU);
}
static int pmu_ctr_enable_irq_hw(int ctr_idx)
@ -350,7 +344,7 @@ static int pmu_ctr_enable_irq_hw(int ctr_idx)
* Otherwise, there will be race conditions where we may clear the bit
* the software is yet to handle the interrupt.
*/
if (!(mip_val & sbi_pmu_irq_mask())) {
if (!(mip_val & MIP_LCOFIP)) {
mhpmevent_curr &= of_mask;
csr_write_num(mhpmevent_csr, mhpmevent_curr);
}
@ -411,21 +405,11 @@ int sbi_pmu_irq_bit(void)
struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSCOFPMF))
return IRQ_PMU_OVF;
return MIP_LCOFIP;
if (pmu_dev && pmu_dev->hw_counter_irq_bit)
return pmu_dev->hw_counter_irq_bit();
return -1;
}
unsigned long sbi_pmu_irq_mask(void)
{
int irq_bit = sbi_pmu_irq_bit();
if (irq_bit < 0)
return 0;
return BIT(irq_bit);
}
static int pmu_ctr_start_fw(struct sbi_pmu_hart_state *phs,
@ -478,7 +462,7 @@ int sbi_pmu_ctr_start(unsigned long cbase, unsigned long cmask,
int i, cidx;
uint64_t edata;
if (!pmu_ctr_idx_validate(cbase, cmask))
if ((cbase + sbi_fls(cmask)) >= total_ctrs)
return ret;
if (flags & SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT)
@ -583,8 +567,8 @@ int sbi_pmu_ctr_stop(unsigned long cbase, unsigned long cmask,
uint32_t event_code;
int i, cidx;
if (!pmu_ctr_idx_validate(cbase, cmask))
return ret;
if ((cbase + sbi_fls(cmask)) >= total_ctrs)
return SBI_EINVAL;
if (flag & SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT)
return SBI_ENO_SHMEM;
@ -607,9 +591,9 @@ int sbi_pmu_ctr_stop(unsigned long cbase, unsigned long cmask,
}
}
/* Clear PMU overflow interrupt to avoid spurious ones */
/* Clear MIP_LCOFIP to avoid spurious interrupts */
if (phs->sse_enabled)
csr_clear(CSR_MIP, sbi_pmu_irq_mask());
csr_clear(CSR_MIP, MIP_LCOFIP);
return ret;
}
@ -738,13 +722,12 @@ static int pmu_ctr_find_hw(struct sbi_pmu_hart_state *phs,
return SBI_EINVAL;
/**
* If Sscofpmf or Andes PMU is present, try to find
* the programmable counter for cycle/instret as well.
* If Sscof is present try to find the programmable counter for
* cycle/instret as well.
*/
fixed_ctr = pmu_ctr_find_fixed_hw(event_idx);
if (fixed_ctr >= 0 &&
!sbi_hart_has_extension(scratch, SBI_HART_EXT_SSCOFPMF) &&
!sbi_hart_has_extension(scratch, SBI_HART_EXT_XANDESPMU))
!sbi_hart_has_extension(scratch, SBI_HART_EXT_SSCOFPMF))
return pmu_fixed_ctr_update_inhibit_bits(fixed_ctr, flags);
if (sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_11)
@ -780,7 +763,6 @@ static int pmu_ctr_find_hw(struct sbi_pmu_hart_state *phs,
continue;
/* We found a valid counter that is not started yet */
ctr_idx = cbase;
break;
}
}
@ -846,7 +828,8 @@ int sbi_pmu_ctr_cfg_match(unsigned long cidx_base, unsigned long cidx_mask,
int ret, event_type, ctr_idx = SBI_ENOTSUPP;
u32 event_code;
if (!pmu_ctr_idx_validate(cidx_base, cidx_mask))
/* Do a basic sanity check of counter base & mask */
if ((cidx_base + sbi_fls(cidx_mask)) >= total_ctrs)
return SBI_EINVAL;
event_type = pmu_event_validate(phs, event_idx, event_data);
@ -1103,43 +1086,30 @@ void sbi_pmu_exit(struct sbi_scratch *scratch)
static void pmu_sse_enable(uint32_t event_id)
{
unsigned long irq_mask = sbi_pmu_irq_mask();
struct sbi_pmu_hart_state *phs = pmu_thishart_state_ptr();
csr_set(CSR_MIE, irq_mask);
phs->sse_enabled = true;
csr_clear(CSR_MIDELEG, sbi_pmu_irq_bit());
csr_clear(CSR_MIP, MIP_LCOFIP);
csr_set(CSR_MIE, MIP_LCOFIP);
}
static void pmu_sse_disable(uint32_t event_id)
{
unsigned long irq_mask = sbi_pmu_irq_mask();
struct sbi_pmu_hart_state *phs = pmu_thishart_state_ptr();
csr_clear(CSR_MIE, irq_mask);
csr_clear(CSR_MIP, irq_mask);
csr_clear(CSR_MIE, MIP_LCOFIP);
csr_clear(CSR_MIP, MIP_LCOFIP);
csr_set(CSR_MIDELEG, sbi_pmu_irq_bit());
phs->sse_enabled = false;
}
static void pmu_sse_complete(uint32_t event_id)
{
csr_set(CSR_MIE, sbi_pmu_irq_mask());
}
static void pmu_sse_register(uint32_t event_id)
{
struct sbi_pmu_hart_state *phs = pmu_thishart_state_ptr();
phs->sse_enabled = true;
csr_clear(CSR_MIDELEG, sbi_pmu_irq_mask());
}
static void pmu_sse_unregister(uint32_t event_id)
{
struct sbi_pmu_hart_state *phs = pmu_thishart_state_ptr();
phs->sse_enabled = false;
csr_set(CSR_MIDELEG, sbi_pmu_irq_mask());
csr_set(CSR_MIE, MIP_LCOFIP);
}
static const struct sbi_sse_cb_ops pmu_sse_cb_ops = {
.register_cb = pmu_sse_register,
.unregister_cb = pmu_sse_unregister,
.enable_cb = pmu_sse_enable,
.disable_cb = pmu_sse_disable,
.complete_cb = pmu_sse_complete,
@ -1182,11 +1152,10 @@ int sbi_pmu_init(struct sbi_scratch *scratch, bool cold_boot)
return SBI_EINVAL;
total_ctrs = num_hw_ctrs + SBI_PMU_FW_CTR_MAX;
if (sbi_pmu_irq_bit() >= 0)
sbi_sse_add_event(SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW, &pmu_sse_cb_ops);
}
sbi_sse_set_cb_ops(SBI_SSE_EVENT_LOCAL_PMU, &pmu_sse_cb_ops);
phs = pmu_get_hart_state_ptr(scratch);
if (!phs) {
phs = sbi_zalloc(sizeof(*phs));

View File

@ -14,31 +14,18 @@
#include <sbi/sbi_scratch.h>
#include <sbi/sbi_string.h>
#define DEFAULT_SCRATCH_ALLOC_ALIGN __SIZEOF_POINTER__
u32 sbi_scratch_hart_count;
u32 hartindex_to_hartid_table[SBI_HARTMASK_MAX_BITS] = { [0 ... SBI_HARTMASK_MAX_BITS-1] = -1U };
struct sbi_scratch *hartindex_to_scratch_table[SBI_HARTMASK_MAX_BITS];
u32 last_hartindex_having_scratch = 0;
u32 hartindex_to_hartid_table[SBI_HARTMASK_MAX_BITS + 1] = { -1U };
struct sbi_scratch *hartindex_to_scratch_table[SBI_HARTMASK_MAX_BITS + 1] = { 0 };
static spinlock_t extra_lock = SPIN_LOCK_INITIALIZER;
static unsigned long extra_offset = SBI_SCRATCH_EXTRA_SPACE_OFFSET;
/*
* Get the alignment size.
* Return DEFAULT_SCRATCH_ALLOC_ALIGNMENT or riscv,cbom_block_size
*/
static unsigned long sbi_get_scratch_alloc_align(void)
{
const struct sbi_platform *plat = sbi_platform_thishart_ptr();
if (!plat || !plat->cbom_block_size)
return DEFAULT_SCRATCH_ALLOC_ALIGN;
return plat->cbom_block_size;
}
u32 sbi_hartid_to_hartindex(u32 hartid)
{
sbi_for_each_hartindex(i)
u32 i;
for (i = 0; i <= last_hartindex_having_scratch; i++)
if (hartindex_to_hartid_table[i] == hartid)
return i;
@ -49,30 +36,27 @@ typedef struct sbi_scratch *(*hartid2scratch)(ulong hartid, ulong hartindex);
int sbi_scratch_init(struct sbi_scratch *scratch)
{
u32 h, hart_count;
u32 i, h;
const struct sbi_platform *plat = sbi_platform_ptr(scratch);
hart_count = plat->hart_count;
if (hart_count > SBI_HARTMASK_MAX_BITS)
hart_count = SBI_HARTMASK_MAX_BITS;
sbi_scratch_hart_count = hart_count;
sbi_for_each_hartindex(i) {
for (i = 0; i < plat->hart_count; i++) {
h = (plat->hart_index2id) ? plat->hart_index2id[i] : i;
hartindex_to_hartid_table[i] = h;
hartindex_to_scratch_table[i] =
((hartid2scratch)scratch->hartid_to_scratch)(h, i);
}
last_hartindex_having_scratch = plat->hart_count - 1;
return 0;
}
unsigned long sbi_scratch_alloc_offset(unsigned long size)
{
u32 i;
void *ptr;
unsigned long ret = 0;
struct sbi_scratch *rscratch;
unsigned long scratch_alloc_align = 0;
/*
* We have a simple brain-dead allocator which never expects
@ -86,14 +70,8 @@ unsigned long sbi_scratch_alloc_offset(unsigned long size)
if (!size)
return 0;
scratch_alloc_align = sbi_get_scratch_alloc_align();
/*
* We let the allocation align to cacheline bytes to avoid livelock on
* certain platforms due to atomic variables from the same cache line.
*/
size += scratch_alloc_align - 1;
size &= ~(scratch_alloc_align - 1);
size += __SIZEOF_POINTER__ - 1;
size &= ~((unsigned long)__SIZEOF_POINTER__ - 1);
spin_lock(&extra_lock);
@ -107,7 +85,7 @@ done:
spin_unlock(&extra_lock);
if (ret) {
sbi_for_each_hartindex(i) {
for (i = 0; i <= sbi_scratch_last_hartindex(); i++) {
rscratch = sbi_hartindex_to_scratch(i);
if (!rscratch)
continue;

View File

@ -23,7 +23,6 @@
#include <sbi/sbi_pmu.h>
#include <sbi/sbi_sse.h>
#include <sbi/sbi_scratch.h>
#include <sbi/sbi_slist.h>
#include <sbi/sbi_string.h>
#include <sbi/sbi_trap.h>
@ -40,11 +39,21 @@
#define EVENT_IS_GLOBAL(__event_id) ((__event_id) & SBI_SSE_EVENT_GLOBAL_BIT)
static const uint32_t supported_events[] = {
SBI_SSE_EVENT_LOCAL_RAS,
SBI_SSE_EVENT_LOCAL_DOUBLE_TRAP,
SBI_SSE_EVENT_GLOBAL_RAS,
SBI_SSE_EVENT_LOCAL_PMU,
SBI_SSE_EVENT_LOCAL_SOFTWARE,
SBI_SSE_EVENT_GLOBAL_SOFTWARE,
};
#define EVENT_COUNT array_size(supported_events)
#define sse_event_invoke_cb(_event, _cb, ...) \
{ \
const struct sbi_sse_cb_ops *__ops = _event->info->cb_ops; \
if (__ops && __ops->_cb) \
__ops->_cb(_event->event_id, ##__VA_ARGS__); \
if (_event->cb_ops && _event->cb_ops->_cb) \
_event->cb_ops->_cb(_event->event_id, ##__VA_ARGS__); \
}
struct sse_entry_state {
@ -101,7 +110,7 @@ struct sbi_sse_event {
struct sbi_sse_event_attrs attrs;
uint32_t event_id;
u32 hartindex;
struct sse_event_info *info;
const struct sbi_sse_cb_ops *cb_ops;
struct sbi_dlist node;
};
@ -158,12 +167,6 @@ struct sse_global_event {
spinlock_t lock;
};
struct sse_event_info {
uint32_t event_id;
const struct sbi_sse_cb_ops *cb_ops;
SBI_SLIST_NODE(sse_event_info);
};
static unsigned int local_event_count;
static unsigned int global_event_count;
static struct sse_global_event *global_events;
@ -177,58 +180,6 @@ static u32 sse_ipi_inject_event = SBI_IPI_EVENT_MAX;
static int sse_ipi_inject_send(unsigned long hartid, uint32_t event_id);
struct sse_event_info global_software_event = {
.event_id = SBI_SSE_EVENT_GLOBAL_SOFTWARE,
SBI_SLIST_NODE_INIT(NULL),
};
struct sse_event_info local_software_event = {
.event_id = SBI_SSE_EVENT_LOCAL_SOFTWARE,
SBI_SLIST_NODE_INIT(&global_software_event),
};
static SBI_SLIST_HEAD(supported_events, sse_event_info) =
SBI_SLIST_HEAD_INIT(&local_software_event);
/*
* This array is used to distinguish between standard event and platform
* events in order to return SBI_ERR_NOT_SUPPORTED for them.
*/
static const uint32_t standard_events[] = {
SBI_SSE_EVENT_LOCAL_HIGH_PRIO_RAS,
SBI_SSE_EVENT_LOCAL_DOUBLE_TRAP,
SBI_SSE_EVENT_GLOBAL_HIGH_PRIO_RAS,
SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW,
SBI_SSE_EVENT_LOCAL_LOW_PRIO_RAS,
SBI_SSE_EVENT_GLOBAL_LOW_PRIO_RAS,
SBI_SSE_EVENT_LOCAL_SOFTWARE,
SBI_SSE_EVENT_GLOBAL_SOFTWARE,
};
static bool sse_is_standard_event(uint32_t event_id)
{
int i;
for (i = 0; i < array_size(standard_events); i++) {
if (event_id == standard_events[i])
return true;
}
return false;
}
static struct sse_event_info *sse_event_info_get(uint32_t event_id)
{
struct sse_event_info *info;
SBI_SLIST_FOR_EACH_ENTRY(info, supported_events) {
if (info->event_id == event_id)
return info;
}
return NULL;
}
static unsigned long sse_event_state(struct sbi_sse_event *e)
{
return e->attrs.status & SBI_SSE_ATTR_STATUS_STATE_MASK;
@ -293,41 +244,30 @@ static void sse_event_set_state(struct sbi_sse_event *e,
e->attrs.status |= new_state;
}
static int sse_event_get(uint32_t event_id, struct sbi_sse_event **eret)
static struct sbi_sse_event *sse_event_get(uint32_t event_id)
{
unsigned int i;
struct sbi_sse_event *e;
struct sse_hart_state *shs;
if (!eret)
return SBI_EINVAL;
if (EVENT_IS_GLOBAL(event_id)) {
for (i = 0; i < global_event_count; i++) {
e = &global_events[i].event;
if (e->event_id == event_id) {
spin_lock(&global_events[i].lock);
*eret = e;
return SBI_SUCCESS;
return e;
}
}
} else {
shs = sse_thishart_state_ptr();
for (i = 0; i < local_event_count; i++) {
e = &shs->local_events[i];
if (e->event_id == event_id) {
*eret = e;
return SBI_SUCCESS;
}
if (e->event_id == event_id)
return e;
}
}
/* Check if the event is a standard one but not supported */
if (sse_is_standard_event(event_id))
return SBI_ENOTSUPP;
/* If not supported nor a standard event, it is invalid */
return SBI_EINVAL;
return NULL;
}
static void sse_event_put(struct sbi_sse_event *e)
@ -388,7 +328,7 @@ static int sse_event_set_hart_id_check(struct sbi_sse_event *e,
struct sbi_domain *hd = sbi_domain_thishart_ptr();
if (!sse_event_is_global(e))
return SBI_EDENIED;
return SBI_EBAD_RANGE;
if (!sbi_domain_is_assigned_hart(hd, sbi_hartid_to_hartindex(hartid)))
return SBI_EINVAL;
@ -427,12 +367,10 @@ static int sse_event_set_attr_check(struct sbi_sse_event *e, uint32_t attr_id,
return sse_event_set_hart_id_check(e, val);
case SBI_SSE_ATTR_INTERRUPTED_FLAGS:
if (val & ~(SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPP |
SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPIE |
if (val & ~(SBI_SSE_ATTR_INTERRUPTED_FLAGS_STATUS_SPP |
SBI_SSE_ATTR_INTERRUPTED_FLAGS_STATUS_SPIE |
SBI_SSE_ATTR_INTERRUPTED_FLAGS_HSTATUS_SPV |
SBI_SSE_ATTR_INTERRUPTED_FLAGS_HSTATUS_SPVP |
SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPELP |
SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SDT))
SBI_SSE_ATTR_INTERRUPTED_FLAGS_HSTATUS_SPVP))
return SBI_EINVAL;
__attribute__((__fallthrough__));
case SBI_SSE_ATTR_INTERRUPTED_SEPC:
@ -446,13 +384,7 @@ static int sse_event_set_attr_check(struct sbi_sse_event *e, uint32_t attr_id,
return SBI_OK;
default:
/*
* Attribute range validity was already checked by
* sbi_sse_attr_check(). If we end up here, attribute was not
* handled by the above 'case' statements and thus it is
* read-only.
*/
return SBI_EDENIED;
return SBI_EBAD_RANGE;
}
}
@ -520,14 +452,10 @@ static unsigned long sse_interrupted_flags(unsigned long mstatus)
{
unsigned long hstatus, flags = 0;
if (mstatus & MSTATUS_SPIE)
flags |= SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPIE;
if (mstatus & MSTATUS_SPP)
flags |= SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPP;
if (mstatus & MSTATUS_SPELP)
flags |= SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPELP;
if (mstatus & MSTATUS_SDT)
flags |= SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SDT;
if (mstatus & (MSTATUS_SPIE))
flags |= SBI_SSE_ATTR_INTERRUPTED_FLAGS_STATUS_SPIE;
if (mstatus & (MSTATUS_SPP))
flags |= SBI_SSE_ATTR_INTERRUPTED_FLAGS_STATUS_SPP;
if (misa_extension('H')) {
hstatus = csr_read(CSR_HSTATUS);
@ -585,13 +513,9 @@ static void sse_event_inject(struct sbi_sse_event *e,
regs->a7 = e->attrs.entry.arg;
regs->mepc = e->attrs.entry.pc;
/*
* Return to S-mode with virtualization disabled, not expected landing
* pad, supervisor trap disabled.
*/
regs->mstatus &= ~(MSTATUS_MPP | MSTATUS_SIE | MSTATUS_SPELP);
/* Return to S-mode with virtualization disabled */
regs->mstatus &= ~(MSTATUS_MPP | MSTATUS_SIE);
regs->mstatus |= (PRV_S << MSTATUS_MPP_SHIFT);
regs->mstatus |= MSTATUS_SDT;
#if __riscv_xlen == 64
regs->mstatus &= ~MSTATUS_MPV;
@ -642,21 +566,13 @@ static void sse_event_resume(struct sbi_sse_event *e,
regs->mstatus |= MSTATUS_SIE;
regs->mstatus &= ~MSTATUS_SPIE;
if (i_ctx->flags & SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPIE)
if (i_ctx->flags & SBI_SSE_ATTR_INTERRUPTED_FLAGS_STATUS_SPIE)
regs->mstatus |= MSTATUS_SPIE;
regs->mstatus &= ~MSTATUS_SPP;
if (i_ctx->flags & SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPP)
if (i_ctx->flags & SBI_SSE_ATTR_INTERRUPTED_FLAGS_STATUS_SPP)
regs->mstatus |= MSTATUS_SPP;
regs->mstatus &= ~MSTATUS_SPELP;
if (i_ctx->flags & SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPELP)
regs->mstatus |= MSTATUS_SPELP;
regs->mstatus &= ~MSTATUS_SDT;
if (i_ctx->flags & SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SDT)
regs->mstatus |= MSTATUS_SDT;
regs->a7 = i_ctx->a7;
regs->a6 = i_ctx->a6;
csr_write(CSR_SEPC, i_ctx->sepc);
@ -737,7 +653,8 @@ static void sse_ipi_inject_process(struct sbi_scratch *scratch)
/* Mark all queued events as pending */
while (!sbi_fifo_dequeue(sse_inject_fifo_r, &evt)) {
if (sse_event_get(evt.event_id, &e))
e = sse_event_get(evt.event_id);
if (!e)
continue;
sse_event_set_pending(e);
@ -779,9 +696,10 @@ static int sse_inject_event(uint32_t event_id, unsigned long hartid)
int ret;
struct sbi_sse_event *e;
ret = sse_event_get(event_id, &e);
if (ret)
return ret;
e = sse_event_get(event_id);
if (!e)
return SBI_EINVAL;
/* In case of global event, provided hart_id is ignored */
if (sse_event_is_global(e))
@ -870,9 +788,9 @@ int sbi_sse_enable(uint32_t event_id)
int ret;
struct sbi_sse_event *e;
ret = sse_event_get(event_id, &e);
if (ret)
return ret;
e = sse_event_get(event_id);
if (!e)
return SBI_EINVAL;
sse_enabled_event_lock(e);
ret = sse_event_enable(e);
@ -887,9 +805,9 @@ int sbi_sse_disable(uint32_t event_id)
int ret;
struct sbi_sse_event *e;
ret = sse_event_get(event_id, &e);
if (ret)
return ret;
e = sse_event_get(event_id);
if (!e)
return SBI_EINVAL;
sse_enabled_event_lock(e);
ret = sse_event_disable(e);
@ -908,7 +826,7 @@ int sbi_sse_hart_mask(void)
return SBI_EFAIL;
if (state->masked)
return SBI_EALREADY_STOPPED;
return SBI_EALREADY_STARTED;
state->masked = true;
@ -923,7 +841,7 @@ int sbi_sse_hart_unmask(void)
return SBI_EFAIL;
if (!state->masked)
return SBI_EALREADY_STARTED;
return SBI_EALREADY_STOPPED;
state->masked = false;
@ -945,26 +863,19 @@ int sbi_sse_inject_event(uint32_t event_id)
return sse_inject_event(event_id, current_hartid());
}
int sbi_sse_add_event(uint32_t event_id, const struct sbi_sse_cb_ops *cb_ops)
int sbi_sse_set_cb_ops(uint32_t event_id, const struct sbi_sse_cb_ops *cb_ops)
{
struct sse_event_info *info;
struct sbi_sse_event *e;
/* Do not allow adding an event twice */
info = sse_event_info_get(event_id);
if (info)
return SBI_EALREADY;
if (cb_ops && cb_ops->set_hartid_cb && !EVENT_IS_GLOBAL(event_id))
if (cb_ops->set_hartid_cb && !EVENT_IS_GLOBAL(event_id))
return SBI_EINVAL;
info = sbi_zalloc(sizeof(*info));
if (!info)
return SBI_ENOMEM;
e = sse_event_get(event_id);
if (!e)
return SBI_EINVAL;
info->cb_ops = cb_ops;
info->event_id = event_id;
SBI_SLIST_ADD(info, supported_events);
e->cb_ops = cb_ops;
sse_event_put(e);
return SBI_OK;
}
@ -1032,9 +943,9 @@ int sbi_sse_read_attrs(uint32_t event_id, uint32_t base_attr_id,
if (ret)
return ret;
ret = sse_event_get(event_id, &e);
if (ret)
return ret;
e = sse_event_get(event_id);
if (!e)
return SBI_EINVAL;
sbi_hart_map_saddr(output_phys_lo, sizeof(unsigned long) * attr_count);
@ -1097,9 +1008,9 @@ int sbi_sse_write_attrs(uint32_t event_id, uint32_t base_attr_id,
if (ret)
return ret;
ret = sse_event_get(event_id, &e);
if (ret)
return ret;
e = sse_event_get(event_id);
if (!e)
return SBI_EINVAL;
ret = sse_write_attrs(e, base_attr_id, attr_count, input_phys_lo);
sse_event_put(e);
@ -1122,9 +1033,9 @@ int sbi_sse_register(uint32_t event_id, unsigned long handler_entry_pc,
SBI_DOMAIN_EXECUTE))
return SBI_EINVALID_ADDR;
ret = sse_event_get(event_id, &e);
if (ret)
return ret;
e = sse_event_get(event_id);
if (!e)
return SBI_EINVAL;
ret = sse_event_register(e, handler_entry_pc, handler_entry_arg);
sse_event_put(e);
@ -1137,9 +1048,9 @@ int sbi_sse_unregister(uint32_t event_id)
int ret;
struct sbi_sse_event *e;
ret = sse_event_get(event_id, &e);
if (ret)
return ret;
e = sse_event_get(event_id);
if (!e)
return SBI_EINVAL;
ret = sse_event_unregister(e);
sse_event_put(e);
@ -1147,10 +1058,9 @@ int sbi_sse_unregister(uint32_t event_id)
return ret;
}
static void sse_event_init(struct sbi_sse_event *e, struct sse_event_info *info)
static void sse_event_init(struct sbi_sse_event *e, uint32_t event_id)
{
e->event_id = info->event_id;
e->info = info;
e->event_id = event_id;
e->hartindex = current_hartindex();
e->attrs.hartid = current_hartid();
/* Declare all events as injectable */
@ -1159,10 +1069,10 @@ static void sse_event_init(struct sbi_sse_event *e, struct sse_event_info *info)
static void sse_event_count_init()
{
struct sse_event_info *info;
unsigned int i;
SBI_SLIST_FOR_EACH_ENTRY(info, supported_events) {
if (EVENT_IS_GLOBAL(info->event_id))
for (i = 0; i < EVENT_COUNT; i++) {
if (EVENT_IS_GLOBAL(supported_events[i]))
global_event_count++;
else
local_event_count++;
@ -1172,19 +1082,18 @@ static void sse_event_count_init()
static int sse_global_init()
{
struct sbi_sse_event *e;
unsigned int ev = 0;
struct sse_event_info *info;
unsigned int i, ev = 0;
global_events = sbi_zalloc(sizeof(*global_events) * global_event_count);
if (!global_events)
return SBI_ENOMEM;
SBI_SLIST_FOR_EACH_ENTRY(info, supported_events) {
if (!EVENT_IS_GLOBAL(info->event_id))
for (i = 0; i < EVENT_COUNT; i++) {
if (!EVENT_IS_GLOBAL(supported_events[i]))
continue;
e = &global_events[ev].event;
sse_event_init(e, info);
sse_event_init(e, supported_events[i]);
SPIN_LOCK_INIT(global_events[ev].lock);
ev++;
@ -1195,16 +1104,16 @@ static int sse_global_init()
static void sse_local_init(struct sse_hart_state *shs)
{
unsigned int ev = 0;
struct sse_event_info *info;
unsigned int i, ev = 0;
SBI_INIT_LIST_HEAD(&shs->enabled_event_list);
SPIN_LOCK_INIT(shs->enabled_event_lock);
SBI_SLIST_FOR_EACH_ENTRY(info, supported_events) {
if (EVENT_IS_GLOBAL(info->event_id))
for (i = 0; i < EVENT_COUNT; i++) {
if (EVENT_IS_GLOBAL(supported_events[i]))
continue;
sse_event_init(&shs->local_events[ev++], info);
sse_event_init(&shs->local_events[ev++], supported_events[i]);
}
}
@ -1234,8 +1143,7 @@ int sbi_sse_init(struct sbi_scratch *scratch, bool cold_boot)
}
sse_inject_fifo_mem_off = sbi_scratch_alloc_offset(
(global_event_count + local_event_count) *
sizeof(struct sse_ipi_inject_data));
EVENT_COUNT * sizeof(struct sse_ipi_inject_data));
if (!sse_inject_fifo_mem_off) {
sbi_scratch_free_offset(sse_inject_fifo_off);
sbi_scratch_free_offset(shs_ptr_off);
@ -1272,8 +1180,7 @@ int sbi_sse_init(struct sbi_scratch *scratch, bool cold_boot)
sse_inject_mem =
sbi_scratch_offset_ptr(scratch, sse_inject_fifo_mem_off);
sbi_fifo_init(sse_inject_q, sse_inject_mem,
(global_event_count + local_event_count),
sbi_fifo_init(sse_inject_q, sse_inject_mem, EVENT_COUNT,
sizeof(struct sse_ipi_inject_data));
return 0;
@ -1281,18 +1188,21 @@ int sbi_sse_init(struct sbi_scratch *scratch, bool cold_boot)
void sbi_sse_exit(struct sbi_scratch *scratch)
{
int i;
struct sbi_sse_event *e;
struct sse_event_info *info;
SBI_SLIST_FOR_EACH_ENTRY(info, supported_events) {
if (sse_event_get(info->event_id, &e))
for (i = 0; i < EVENT_COUNT; i++) {
e = sse_event_get(supported_events[i]);
if (!e)
continue;
if (e->attrs.hartid != current_hartid())
goto skip;
if (sse_event_state(e) > SBI_SSE_STATE_REGISTERED)
if (sse_event_state(e) > SBI_SSE_STATE_REGISTERED) {
sbi_printf("Event %d in invalid state at exit", i);
sse_event_set_state(e, SBI_SSE_STATE_UNUSED);
}
skip:
sse_event_put(e);

View File

@ -68,22 +68,22 @@ char *sbi_strcpy(char *dest, const char *src)
{
char *ret = dest;
while ((*dest++ = *src++) != '\0') {
while (*src != '\0') {
*dest++ = *src++;
}
return ret;
}
char *sbi_strncpy(char *dest, const char *src, size_t count)
{
char *tmp = dest;
char *ret = dest;
while (count) {
if ((*tmp = *src) != 0)
src++;
tmp++;
count--;
while (count-- && *src != '\0') {
*dest++ = *src++;
}
return dest;
return ret;
}
char *sbi_strchr(const char *s, int c)

View File

@ -139,7 +139,12 @@ void sbi_timer_event_start(u64 next_event)
* the older software to leverage sstc extension on newer hardware.
*/
if (sbi_hart_has_extension(sbi_scratch_thishart_ptr(), SBI_HART_EXT_SSTC)) {
csr_write64(CSR_STIMECMP, next_event);
#if __riscv_xlen == 32
csr_write(CSR_STIMECMP, next_event & 0xFFFFFFFF);
csr_write(CSR_STIMECMPH, next_event >> 32);
#else
csr_write(CSR_STIMECMP, next_event);
#endif
} else if (timer_dev && timer_dev->timer_event_start) {
timer_dev->timer_event_start(next_event);
csr_clear(CSR_MIP, MIP_STIP);
@ -185,7 +190,7 @@ int sbi_timer_init(struct sbi_scratch *scratch, bool cold_boot)
if (!time_delta_off)
return SBI_ENOMEM;
if (sbi_hart_has_csr(scratch, SBI_HART_CSR_TIME))
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_ZICNTR))
get_time_val = get_ticks;
ret = sbi_platform_timer_init(plat);

View File

@ -11,7 +11,6 @@
#include <sbi/riscv_encoding.h>
#include <sbi/sbi_bitops.h>
#include <sbi/sbi_console.h>
#include <sbi/sbi_double_trap.h>
#include <sbi/sbi_ecall.h>
#include <sbi/sbi_error.h>
#include <sbi/sbi_hart.h>
@ -169,7 +168,7 @@ int sbi_trap_redirect(struct sbi_trap_regs *regs,
csr_write(CSR_VSCAUSE, trap->cause);
/* Set MEPC to VS-mode exception vector base */
regs->mepc = csr_read(CSR_VSTVEC) & ~MTVEC_MODE;
regs->mepc = csr_read(CSR_VSTVEC);
/* Set MPP to VS-mode */
regs->mstatus &= ~MSTATUS_MPP;
@ -204,7 +203,7 @@ int sbi_trap_redirect(struct sbi_trap_regs *regs,
csr_write(CSR_SCAUSE, trap->cause);
/* Set MEPC to S-mode exception vector base */
regs->mepc = csr_read(CSR_STVEC) & ~MTVEC_MODE;
regs->mepc = csr_read(CSR_STVEC);
/* Set MPP to S-mode */
regs->mstatus &= ~MSTATUS_MPP;
@ -240,13 +239,12 @@ static int sbi_trap_nonaia_irq(unsigned long irq)
case IRQ_M_SOFT:
sbi_ipi_process();
break;
case IRQ_PMU_OVF:
sbi_pmu_ovf_irq();
break;
case IRQ_M_EXT:
return sbi_irqchip_process();
default:
if (irq == sbi_pmu_irq_bit()) {
sbi_pmu_ovf_irq();
return 0;
}
return SBI_ENOENT;
}
@ -267,17 +265,15 @@ static int sbi_trap_aia_irq(void)
case IRQ_M_SOFT:
sbi_ipi_process();
break;
case IRQ_PMU_OVF:
sbi_pmu_ovf_irq();
break;
case IRQ_M_EXT:
rc = sbi_irqchip_process();
if (rc)
return rc;
break;
default:
if (mtopi == sbi_pmu_irq_bit()) {
sbi_pmu_ovf_irq();
break;
}
return SBI_ENOENT;
}
}

View File

@ -24,7 +24,7 @@
{ \
register ulong tinfo asm("a3"); \
register ulong mstatus = 0; \
register ulong mtvec = (ulong)sbi_hart_expected_trap; \
register ulong mtvec = sbi_hart_expected_trap_addr(); \
type ret = 0; \
trap->cause = 0; \
asm volatile( \
@ -51,7 +51,7 @@
{ \
register ulong tinfo asm("a3") = (ulong)trap; \
register ulong mstatus = 0; \
register ulong mtvec = (ulong)sbi_hart_expected_trap; \
register ulong mtvec = sbi_hart_expected_trap_addr(); \
trap->cause = 0; \
asm volatile( \
"add %[tinfo], %[taddr], zero\n" \
@ -121,7 +121,7 @@ ulong sbi_get_insn(ulong mepc, struct sbi_trap_info *trap)
register ulong tinfo asm("a3");
register ulong ttmp asm("a4");
register ulong mstatus = 0;
register ulong mtvec = (ulong)sbi_hart_expected_trap;
register ulong mtvec = sbi_hart_expected_trap_addr();
ulong insn = 0;
trap->cause = 0;

View File

@ -15,9 +15,3 @@ libsbi-objs-$(CONFIG_SBIUNIT) += tests/riscv_locks_test.o
carray-sbi_unit_tests-$(CONFIG_SBIUNIT) += math_test_suite
libsbi-objs-$(CONFIG_SBIUNIT) += tests/sbi_math_test.o
carray-sbi_unit_tests-$(CONFIG_SBIUNIT) += ecall_test_suite
libsbi-objs-$(CONFIG_SBIUNIT) += tests/sbi_ecall_test.o
carray-sbi_unit_tests-$(CONFIG_SBIUNIT) += bitops_test_suite
libsbi-objs-$(CONFIG_SBIUNIT) += tests/sbi_bitops_test.o

View File

@ -1,135 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright 2025 Beijing ESWIN Computing Technology Co., Ltd.
*
* Author: Dongdong Zhang <zhangdongdong@eswincomputing.com>
*/
#include <sbi/sbi_bitops.h>
#include <sbi/sbi_unit_test.h>
#define BPL BITS_PER_LONG
unsigned long bits_to_search = 64;
static unsigned long ffb1[] = {};
static unsigned long ffb2[] = { 0 };
static unsigned long ffb3[] = { 1 };
static unsigned long ffb4[] = { 1UL << (BPL - 1) };
static unsigned long ffb5[] = { 0, 0x10 };
static unsigned long ffb6[] = { 0, 0, 1UL << (BPL - 1) };
static unsigned long ffb7[] = { 0, 0, 0, 0x01 };
static void find_first_bit_test(struct sbiunit_test_case *test)
{
SBIUNIT_EXPECT_EQ(test, find_first_bit(ffb1, 0), 0);
SBIUNIT_EXPECT_EQ(test, find_first_bit(ffb2, BPL), BPL);
SBIUNIT_EXPECT_EQ(test, find_first_bit(ffb3, BPL), 0);
SBIUNIT_EXPECT_EQ(test, find_first_bit(ffb4, BPL), BPL - 1);
SBIUNIT_EXPECT_EQ(test, find_first_bit(ffb5, 2 * BPL), BPL + 4);
SBIUNIT_EXPECT_EQ(test, find_first_bit(ffb6, 3 * BPL),
2 * BPL + BPL - 1);
SBIUNIT_EXPECT_EQ(test, find_first_bit(ffb7, 4 * BPL), 3 * BPL);
}
static unsigned long ffzb1[] = {};
static unsigned long ffzb2[] = { ~0UL };
static unsigned long ffzb3[] = { ~1UL };
static unsigned long ffzb4[] = { ~(1UL << (BPL - 1)) };
static unsigned long ffzb5[] = { ~0UL, ~0x10UL };
static unsigned long ffzb6[] = { ~0UL, ~0UL, ~(1UL << (BPL - 1)) };
static unsigned long ffzb7[] = { ~0UL, ~0UL, ~0UL, ~0x01UL };
static void find_first_zero_bit_test(struct sbiunit_test_case *test)
{
SBIUNIT_ASSERT_EQ(test, find_first_zero_bit(ffzb1, 0), 0);
SBIUNIT_ASSERT_EQ(test, find_first_zero_bit(ffzb2, BPL), BPL);
SBIUNIT_ASSERT_EQ(test, find_first_zero_bit(ffzb3, BPL), 0);
SBIUNIT_ASSERT_EQ(test, find_first_zero_bit(ffzb4, BPL), BPL - 1);
SBIUNIT_ASSERT_EQ(test, find_first_zero_bit(ffzb5, 2 * BPL), BPL + 4);
SBIUNIT_ASSERT_EQ(test, find_first_zero_bit(ffzb6, 3 * BPL),
2 * BPL + BPL - 1);
SBIUNIT_ASSERT_EQ(test, find_first_zero_bit(ffzb7, 4 * BPL), 3 * BPL);
}
static unsigned long flb1[] = {};
static unsigned long flb2[] = { 0 };
static unsigned long flb3[] = { 1 };
static unsigned long flb4[] = { 1UL << (BPL - 1) };
static unsigned long flb5[] = { 0, 0x10 };
static unsigned long flb6[] = { 0, 0, 1UL << (BPL - 1) };
static unsigned long flb7[] = { 0, 0, 0, 0x01 };
static void find_last_bit_test(struct sbiunit_test_case *test)
{
SBIUNIT_EXPECT_EQ(test, find_last_bit(flb1, 0), 0);
SBIUNIT_EXPECT_EQ(test, find_last_bit(flb2, BPL), BPL);
SBIUNIT_EXPECT_EQ(test, find_last_bit(flb3, BPL), 0);
SBIUNIT_EXPECT_EQ(test, find_last_bit(flb4, BPL), BPL - 1);
SBIUNIT_EXPECT_EQ(test, find_last_bit(flb5, 2 * BPL), BPL + 4);
SBIUNIT_EXPECT_EQ(test, find_last_bit(flb6, 3 * BPL),
2 * BPL + BPL - 1);
SBIUNIT_EXPECT_EQ(test, find_last_bit(flb7, 4 * BPL), 3 * BPL);
}
static unsigned long fnb1[] = {};
static unsigned long fnb2[] = { 0 };
static unsigned long fnb3[] = { 1 };
static unsigned long fnb4[] = { 1UL << (BPL - 1) };
static unsigned long fnb5[] = { 0, 0x10 };
static unsigned long fnb6[] = { 0, 0, 1UL << (BPL - 1) };
static unsigned long fnb7[] = { 0, 0, 0, 0x01 };
static void find_next_bit_test(struct sbiunit_test_case *test)
{
SBIUNIT_EXPECT_EQ(test, find_next_bit(fnb1, 0, 0), 0);
SBIUNIT_EXPECT_EQ(test, find_next_bit(fnb2, BPL, 0), BPL);
SBIUNIT_EXPECT_EQ(test, find_next_bit(fnb3, BPL, 0), 0);
SBIUNIT_EXPECT_EQ(test, find_next_bit(fnb4, BPL, 0), BPL - 1);
SBIUNIT_EXPECT_EQ(test, find_next_bit(fnb5, 2 * BPL, 0), BPL + 4);
SBIUNIT_EXPECT_EQ(test, find_next_bit(fnb6, 3 * BPL, 0),
2 * BPL + BPL - 1);
SBIUNIT_EXPECT_EQ(test, find_next_bit(fnb7, 4 * BPL, 0), 3 * BPL);
SBIUNIT_EXPECT_EQ(test, find_next_bit(fnb5, 2 * BPL, BPL), BPL + 4);
SBIUNIT_EXPECT_EQ(test, find_next_bit(fnb7, 4 * BPL, 3 * BPL), 3 * BPL);
SBIUNIT_EXPECT_EQ(test, find_next_bit(fnb6, 3 * BPL, BPL),
2 * BPL + BPL - 1);
}
static unsigned long fnzb1[] = {};
static unsigned long fnzb2[] = { ~0UL };
static unsigned long fnzb3[] = { ~1UL };
static unsigned long fnzb4[] = { ~(1UL << (BPL - 1)) };
static unsigned long fnzb5[] = { ~0UL, ~0x10UL };
static unsigned long fnzb6[] = { ~0UL, ~0UL, ~(1UL << (BPL - 1)) };
static unsigned long fnzb7[] = { ~0UL, ~0UL, ~0UL, ~0x01UL };
static void find_next_zero_bit_test(struct sbiunit_test_case *test)
{
SBIUNIT_EXPECT_EQ(test, find_next_zero_bit(fnzb1, 0, 0), 0);
SBIUNIT_EXPECT_EQ(test, find_next_zero_bit(fnzb2, BPL, 0), BPL);
SBIUNIT_EXPECT_EQ(test, find_next_zero_bit(fnzb3, BPL, 0), 0);
SBIUNIT_EXPECT_EQ(test, find_next_zero_bit(fnzb4, BPL, 0), BPL - 1);
SBIUNIT_EXPECT_EQ(test, find_next_zero_bit(fnzb5, 2 * BPL, 0), BPL + 4);
SBIUNIT_EXPECT_EQ(test, find_next_zero_bit(fnzb6, 3 * BPL, 0),
2 * BPL + BPL - 1);
SBIUNIT_EXPECT_EQ(test, find_next_zero_bit(fnzb7, 4 * BPL, 0), 3 * BPL);
SBIUNIT_EXPECT_EQ(test, find_next_zero_bit(fnzb5, 2 * BPL, BPL),
BPL + 4);
SBIUNIT_EXPECT_EQ(test, find_next_zero_bit(fnzb7, 4 * BPL, 3 * BPL),
3 * BPL);
SBIUNIT_EXPECT_EQ(test, find_next_zero_bit(fnzb6, 3 * BPL, BPL),
2 * BPL + BPL - 1);
}
static struct sbiunit_test_case bitops_test_cases[] = {
SBIUNIT_TEST_CASE(find_first_bit_test),
SBIUNIT_TEST_CASE(find_first_zero_bit_test),
SBIUNIT_TEST_CASE(find_last_bit_test),
SBIUNIT_TEST_CASE(find_next_bit_test),
SBIUNIT_TEST_CASE(find_next_zero_bit_test),
SBIUNIT_END_CASE,
};
SBIUNIT_TEST_SUITE(bitops_test_suite, bitops_test_cases);

View File

@ -1,50 +0,0 @@
#include <sbi/sbi_unit_test.h>
#include <sbi/sbi_ecall.h>
#include <sbi/sbi_ecall_interface.h>
static void test_sbi_ecall_version(struct sbiunit_test_case *test)
{
SBIUNIT_EXPECT_EQ(test, sbi_ecall_version_major(), SBI_ECALL_VERSION_MAJOR);
SBIUNIT_EXPECT_EQ(test, sbi_ecall_version_minor(), SBI_ECALL_VERSION_MINOR);
}
static void test_sbi_ecall_impid(struct sbiunit_test_case *test)
{
unsigned long old_impid = sbi_ecall_get_impid();
sbi_ecall_set_impid(42);
SBIUNIT_EXPECT_EQ(test, sbi_ecall_get_impid(), 42);
sbi_ecall_set_impid(old_impid);
}
static int dummy_handler(unsigned long extid, unsigned long funcid,
struct sbi_trap_regs *regs,
struct sbi_ecall_return *out)
{
return 0;
}
static void test_sbi_ecall_register_find_extension(struct sbiunit_test_case *test)
{
struct sbi_ecall_extension test_ext = {
/* Use experimental extension space for no overlap */
.extid_start = SBI_EXT_EXPERIMENTAL_START,
.extid_end = SBI_EXT_EXPERIMENTAL_START,
.name = "TestExt",
.handle = dummy_handler,
};
SBIUNIT_EXPECT_EQ(test, sbi_ecall_register_extension(&test_ext), 0);
SBIUNIT_EXPECT_EQ(test, sbi_ecall_find_extension(SBI_EXT_EXPERIMENTAL_START), &test_ext);
sbi_ecall_unregister_extension(&test_ext);
SBIUNIT_EXPECT_EQ(test, sbi_ecall_find_extension(SBI_EXT_EXPERIMENTAL_START), NULL);
}
static struct sbiunit_test_case ecall_tests[] = {
SBIUNIT_TEST_CASE(test_sbi_ecall_version),
SBIUNIT_TEST_CASE(test_sbi_ecall_impid),
SBIUNIT_TEST_CASE(test_sbi_ecall_register_find_extension),
SBIUNIT_END_CASE,
};
SBIUNIT_TEST_SUITE(ecall_test_suite, ecall_tests);

22
lib/utils/cppc/fdt_cppc.c Normal file
View File

@ -0,0 +1,22 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2024 Ventana Micro Systems Inc.
*
* Authors:
* Anup Patel <apatel@ventanamicro.com>
*/
#include <sbi_utils/cppc/fdt_cppc.h>
/* List of FDT CPPC drivers generated at compile time */
extern const struct fdt_driver *const fdt_cppc_drivers[];
void fdt_cppc_init(const void *fdt)
{
/*
* Platforms might have multiple CPPC devices or might
* not have any so probe all and don't fail.
*/
fdt_driver_init_all(fdt, fdt_cppc_drivers);
}

View File

@ -0,0 +1,3 @@
HEADER: sbi_utils/cppc/fdt_cppc.h
TYPE: const struct fdt_driver
NAME: fdt_cppc_drivers

View File

@ -12,7 +12,7 @@
#include <sbi/sbi_cppc.h>
#include <sbi/sbi_ecall_interface.h>
#include <sbi/sbi_scratch.h>
#include <sbi_utils/fdt/fdt_driver.h>
#include <sbi_utils/cppc/fdt_cppc.h>
#include <sbi_utils/fdt/fdt_helper.h>
#include <sbi_utils/mailbox/fdt_mailbox.h>
#include <sbi_utils/mailbox/rpmi_mailbox.h>
@ -367,7 +367,8 @@ static const struct fdt_match rpmi_cppc_match[] = {
{},
};
const struct fdt_driver fdt_cppc_rpmi = {
struct fdt_driver fdt_cppc_rpmi = {
.match_table = rpmi_cppc_match,
.init = rpmi_cppc_cold_init,
.experimental = true,
};

View File

@ -7,5 +7,8 @@
# Anup Patel <apatel@ventanamicro.com>
#
carray-fdt_early_drivers-$(CONFIG_FDT_CPPC_RPMI) += fdt_cppc_rpmi
libsbiutils-objs-$(CONFIG_FDT_CPPC) += cppc/fdt_cppc.o
libsbiutils-objs-$(CONFIG_FDT_CPPC) += cppc/fdt_cppc_drivers.carray.o
carray-fdt_cppc_drivers-$(CONFIG_FDT_CPPC_RPMI) += fdt_cppc_rpmi
libsbiutils-objs-$(CONFIG_FDT_CPPC_RPMI) += cppc/fdt_cppc_rpmi.o

View File

@ -289,10 +289,8 @@ static int __fdt_parse_region(const void *fdt, int domain_offset,
return SBI_EINVAL;
order = val32;
flags = region_access & (SBI_DOMAIN_MEMREGION_ACCESS_MASK
| SBI_DOMAIN_MEMREGION_ENF_PERMISSIONS);
/* Read "mmio" DT property */
flags = region_access & SBI_DOMAIN_MEMREGION_ACCESS_MASK;
if (fdt_get_property(fdt, region_offset, "mmio", NULL))
flags |= SBI_DOMAIN_MEMREGION_MMIO;
@ -473,7 +471,7 @@ static int __fdt_parse_domain(const void *fdt, int domain_offset, void *opaque)
if (err)
continue;
if (SBI_HARTMASK_MAX_BITS <= sbi_hartid_to_hartindex(val32))
if (SBI_HARTMASK_MAX_BITS <= val32)
continue;
if (!fdt_node_is_enabled(fdt, cpu_offset))

View File

@ -15,29 +15,21 @@ int fdt_driver_init_by_offset(const void *fdt, int nodeoff,
{
const struct fdt_driver *driver;
const struct fdt_match *match;
int compat_len, prop_len, rc;
const char *compat_str;
const void *prop;
int len, rc;
if (!fdt_node_is_enabled(fdt, nodeoff))
return SBI_ENODEV;
compat_str = fdt_getprop(fdt, nodeoff, "compatible", &prop_len);
if (!compat_str)
prop = fdt_getprop(fdt, nodeoff, "compatible", &len);
if (!prop)
return SBI_ENODEV;
while ((compat_len = strnlen(compat_str, prop_len) + 1) <= prop_len) {
for (int i = 0; (driver = drivers[i]); i++)
for (match = driver->match_table; match->compatible; match++)
if (!memcmp(match->compatible, compat_str, compat_len))
goto found;
while ((driver = *drivers++)) {
for (match = driver->match_table; match->compatible; match++) {
if (!fdt_stringlist_contains(prop, len, match->compatible))
continue;
compat_str += compat_len;
prop_len -= compat_len;
}
return SBI_ENODEV;
found:
if (driver->experimental)
sbi_printf("WARNING: %s driver is experimental and may change\n",
match->compatible);
@ -52,6 +44,10 @@ found:
}
return rc;
}
}
return SBI_ENODEV;
}
static int fdt_driver_init_scan(const void *fdt,

View File

@ -1,3 +0,0 @@
HEADER: sbi_utils/fdt/fdt_driver.h
TYPE: const struct fdt_driver
NAME: fdt_early_drivers

View File

@ -16,7 +16,6 @@
#include <sbi/sbi_scratch.h>
#include <sbi/sbi_string.h>
#include <sbi/sbi_error.h>
#include <sbi/sbi_timer.h>
#include <sbi_utils/fdt/fdt_fixup.h>
#include <sbi_utils/fdt/fdt_pmu.h>
#include <sbi_utils/fdt/fdt_helper.h>
@ -108,21 +107,10 @@ int fdt_add_cpu_idle_states(void *fdt, const struct sbi_cpu_idle_state *state)
void fdt_cpu_fixup(void *fdt)
{
struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
struct sbi_domain *dom = sbi_domain_thishart_ptr();
int err, cpu_offset, cpus_offset, len;
const char *mmu_type, *extensions;
const char *mmu_type;
u32 hartid, hartindex;
bool emulated_zicntr;
/*
* Claim Zicntr extension in riscv,isa-extensions if
* 1. OpenSBI can emulate time CSR with a timer
* 2. The other two CSRs specified by Zicntr are available
*/
emulated_zicntr = sbi_timer_get_device() != NULL &&
sbi_hart_has_csr(scratch, SBI_HART_CSR_CYCLE) &&
sbi_hart_has_csr(scratch, SBI_HART_CSR_INSTRET);
err = fdt_open_into(fdt, fdt, fdt_totalsize(fdt) + 32);
if (err < 0)
@ -152,25 +140,6 @@ void fdt_cpu_fixup(void *fdt)
!mmu_type || !len)
fdt_setprop_string(fdt, cpu_offset, "status",
"disabled");
if (!emulated_zicntr)
continue;
extensions = fdt_getprop(fdt, cpu_offset,
"riscv,isa-extensions", &len);
/*
* For legacy devicetrees, don't create riscv,isa-extensions
* property if there hasn't been already one.
*/
if (extensions &&
!fdt_stringlist_contains(extensions, len, "zicntr")) {
err = fdt_open_into(fdt, fdt, fdt_totalsize(fdt) + 16);
if (err)
continue;
fdt_appendprop_string(fdt, cpu_offset,
"riscv,isa-extensions", "zicntr");
}
}
}
@ -446,6 +415,7 @@ int fdt_register_general_fixup(struct fdt_general_fixup *fixup)
return SBI_EALREADY;
}
SBI_INIT_LIST_HEAD(&fixup->head);
sbi_list_add_tail(&fixup->head, &fixup_list);
return 0;

View File

@ -33,6 +33,48 @@
#define DEFAULT_SHAKTI_UART_FREQ 50000000
#define DEFAULT_SHAKTI_UART_BAUD 115200
const struct fdt_match *fdt_match_node(const void *fdt, int nodeoff,
const struct fdt_match *match_table)
{
int ret;
if (!fdt || nodeoff < 0 || !match_table)
return NULL;
while (match_table->compatible) {
ret = fdt_node_check_compatible(fdt, nodeoff,
match_table->compatible);
if (!ret)
return match_table;
match_table++;
}
return NULL;
}
int fdt_find_match(const void *fdt, int startoff,
const struct fdt_match *match_table,
const struct fdt_match **out_match)
{
int nodeoff;
if (!fdt || !match_table)
return SBI_ENODEV;
while (match_table->compatible) {
nodeoff = fdt_node_offset_by_compatible(fdt, startoff,
match_table->compatible);
if (nodeoff >= 0) {
if (out_match)
*out_match = match_table;
return nodeoff;
}
match_table++;
}
return SBI_ENODEV;
}
int fdt_parse_phandle_with_args(const void *fdt, int nodeoff,
const char *prop, const char *cells_prop,
int index, struct fdt_phandle_args *out_args)
@ -84,27 +126,23 @@ static int fdt_translate_address(const void *fdt, uint64_t reg, int parent,
uint64_t *addr)
{
int i, rlen;
int cell_parent_addr, cell_child_addr, cell_size;
int cell_addr, cell_size;
const fdt32_t *ranges;
uint64_t offset, caddr = 0, paddr = 0, rsize = 0;
ranges = fdt_getprop(fdt, parent, "ranges", &rlen);
if (ranges && rlen > 0) {
cell_child_addr = fdt_address_cells(fdt, parent);
if (cell_child_addr < 1)
return SBI_ENODEV;
cell_parent_addr = fdt_address_cells(fdt, fdt_parent_offset(fdt, parent));
if (cell_parent_addr < 1)
cell_addr = fdt_address_cells(fdt, parent);
if (cell_addr < 1)
return SBI_ENODEV;
cell_size = fdt_size_cells(fdt, parent);
if (cell_size < 0)
return SBI_ENODEV;
for (i = 0; i < cell_child_addr; i++)
ranges = fdt_getprop(fdt, parent, "ranges", &rlen);
if (ranges && rlen > 0) {
for (i = 0; i < cell_addr; i++)
caddr = (caddr << 32) | fdt32_to_cpu(*ranges++);
for (i = 0; i < cell_parent_addr; i++)
for (i = 0; i < cell_addr; i++)
paddr = (paddr << 32) | fdt32_to_cpu(*ranges++);
for (i = 0; i < cell_size; i++)
rsize = (rsize << 32) | fdt32_to_cpu(*ranges++);
@ -250,30 +288,6 @@ int fdt_parse_hart_id(const void *fdt, int cpu_offset, u32 *hartid)
return 0;
}
int fdt_parse_cbom_block_size(const void *fdt, int cpu_offset, unsigned long *cbom_block_size)
{
int len;
const void *prop;
const fdt32_t *val;
if (!fdt || cpu_offset < 0)
return SBI_EINVAL;
prop = fdt_getprop(fdt, cpu_offset, "device_type", &len);
if (!prop || !len)
return SBI_EINVAL;
if (strncmp (prop, "cpu", strlen ("cpu")))
return SBI_EINVAL;
val = fdt_getprop(fdt, cpu_offset, "riscv,cbom-block-size", &len);
if (!val || len < sizeof(fdt32_t))
return SBI_EINVAL;
if (cbom_block_size)
*cbom_block_size = fdt32_to_cpu(*val);
return 0;
}
int fdt_parse_max_enabled_hart_id(const void *fdt, u32 *max_hartid)
{
u32 hartid;
@ -611,64 +625,19 @@ int fdt_parse_xlnx_uartlite_node(const void *fdt, int nodeoffset,
return fdt_parse_uart_node_common(fdt, nodeoffset, uart, 0, 0);
}
static int fdt_aplic_find_imsic_node(const void *fdt, int nodeoff,
struct imsic_data *imsic, bool mmode)
{
const fdt32_t *val;
int i, len, noff, rc;
val = fdt_getprop(fdt, nodeoff, "msi-parent", &len);
if (val && len >= sizeof(fdt32_t)) {
noff = fdt_node_offset_by_phandle(fdt, fdt32_to_cpu(*val));
if (noff < 0)
return noff;
rc = fdt_parse_imsic_node(fdt, noff, imsic);
if (rc)
return rc;
rc = imsic_data_check(imsic);
if (rc)
return rc;
if (imsic->targets_mmode == mmode) {
return 0;
}
} else {
return SBI_ENODEV;
}
val = fdt_getprop(fdt, nodeoff, "riscv,children", &len);
if (!val || len < sizeof(fdt32_t))
return SBI_ENODEV;
len /= sizeof(fdt32_t);
for (i = 0; i < len; i++) {
noff = fdt_node_offset_by_phandle(fdt, fdt32_to_cpu(val[i]));
if (noff < 0)
return noff;
rc = fdt_aplic_find_imsic_node(fdt, noff, imsic, mmode);
if (!rc)
break;
}
return rc;
}
int fdt_parse_aplic_node(const void *fdt, int nodeoff, struct aplic_data *aplic)
{
bool child_found;
const fdt32_t *val;
const fdt32_t *del;
struct imsic_data imsic = { 0 };
int i, j, d, dcnt, len, rc;
struct imsic_data imsic;
int i, j, d, dcnt, len, noff, rc;
uint64_t reg_addr, reg_size;
struct aplic_delegate_data *deleg;
if (nodeoff < 0 || !aplic || !fdt)
return SBI_ENODEV;
memset(aplic, 0, sizeof(*aplic));
rc = fdt_get_node_addr_size(fdt, nodeoff, 0, &reg_addr, &reg_size);
if (rc < 0 || !reg_addr || !reg_size)
@ -690,34 +659,78 @@ int fdt_parse_aplic_node(const void *fdt, int nodeoff, struct aplic_data *aplic)
}
}
aplic->num_idc = len / 2;
goto aplic_msi_parent_done;
}
rc = fdt_aplic_find_imsic_node(fdt, nodeoff, &imsic, true);
if (!rc) {
aplic->targets_mmode = true;
val = fdt_getprop(fdt, nodeoff, "msi-parent", &len);
if (val && len >= sizeof(fdt32_t)) {
noff = fdt_node_offset_by_phandle(fdt, fdt32_to_cpu(*val));
if (noff < 0)
return noff;
rc = fdt_parse_imsic_node(fdt, noff, &imsic);
if (rc)
return rc;
rc = imsic_data_check(&imsic);
if (rc)
return rc;
aplic->targets_mmode = imsic.targets_mmode;
if (imsic.targets_mmode) {
aplic->has_msicfg_mmode = true;
aplic->msicfg_mmode.lhxs = imsic.guest_index_bits;
aplic->msicfg_mmode.lhxw = imsic.hart_index_bits;
aplic->msicfg_mmode.hhxw = imsic.group_index_bits;
aplic->msicfg_mmode.hhxs = imsic.group_index_shift;
if (aplic->msicfg_mmode.hhxs < (2 * IMSIC_MMIO_PAGE_SHIFT))
if (aplic->msicfg_mmode.hhxs <
(2 * IMSIC_MMIO_PAGE_SHIFT))
return SBI_EINVAL;
aplic->msicfg_mmode.hhxs -= 24;
aplic->msicfg_mmode.base_addr = imsic.regs[0].addr;
} else {
goto aplic_msi_parent_done;
}
rc = fdt_aplic_find_imsic_node(fdt, nodeoff, &imsic, false);
if (!rc) {
val = fdt_getprop(fdt, nodeoff, "riscv,children", &len);
if (!val || len < sizeof(fdt32_t))
goto aplic_msi_parent_done;
noff = fdt_node_offset_by_phandle(fdt, fdt32_to_cpu(*val));
if (noff < 0)
return noff;
val = fdt_getprop(fdt, noff, "msi-parent", &len);
if (!val || len < sizeof(fdt32_t))
goto aplic_msi_parent_done;
noff = fdt_node_offset_by_phandle(fdt, fdt32_to_cpu(*val));
if (noff < 0)
return noff;
rc = fdt_parse_imsic_node(fdt, noff, &imsic);
if (rc)
return rc;
rc = imsic_data_check(&imsic);
if (rc)
return rc;
if (!imsic.targets_mmode) {
aplic->has_msicfg_smode = true;
aplic->msicfg_smode.lhxs = imsic.guest_index_bits;
aplic->msicfg_smode.lhxw = imsic.hart_index_bits;
aplic->msicfg_smode.hhxw = imsic.group_index_bits;
aplic->msicfg_smode.hhxs = imsic.group_index_shift;
if (aplic->msicfg_smode.hhxs < (2 * IMSIC_MMIO_PAGE_SHIFT))
if (aplic->msicfg_smode.hhxs <
(2 * IMSIC_MMIO_PAGE_SHIFT))
return SBI_EINVAL;
aplic->msicfg_smode.hhxs -= 24;
aplic->msicfg_smode.base_addr = imsic.regs[0].addr;
}
}
aplic_msi_parent_done:
for (d = 0; d < APLIC_MAX_DELEGATE; d++) {
deleg = &aplic->delegate[d];
@ -973,7 +986,7 @@ int fdt_parse_aclint_node(const void *fdt, int nodeoffset,
{
const fdt32_t *val;
int i, rc, count, cpu_offset, cpu_intc_offset;
u32 phandle, hwirq, hartid, first_hartid, last_hartid;
u32 phandle, hwirq, hartid, first_hartid, last_hartid, hart_count;
u32 match_hwirq = (for_timer) ? IRQ_M_TIMER : IRQ_M_SOFT;
if (nodeoffset < 0 || !fdt ||
@ -1002,7 +1015,7 @@ int fdt_parse_aclint_node(const void *fdt, int nodeoffset,
count = count / sizeof(fdt32_t);
first_hartid = -1U;
last_hartid = 0;
hart_count = last_hartid = 0;
for (i = 0; i < (count / 2); i++) {
phandle = fdt32_to_cpu(val[2 * i]);
hwirq = fdt32_to_cpu(val[(2 * i) + 1]);
@ -1019,18 +1032,24 @@ int fdt_parse_aclint_node(const void *fdt, int nodeoffset,
if (rc)
continue;
if (SBI_HARTMASK_MAX_BITS <= hartid)
continue;
if (match_hwirq == hwirq) {
if (hartid < first_hartid)
first_hartid = hartid;
if (hartid > last_hartid)
last_hartid = hartid;
hart_count++;
}
}
if ((last_hartid >= first_hartid) && first_hartid != -1U) {
*out_first_hartid = first_hartid;
*out_hart_count = last_hartid - first_hartid + 1;
count = last_hartid - first_hartid + 1;
*out_hart_count = (hart_count < count) ? hart_count : count;
}
return 0;
}
@ -1078,7 +1097,7 @@ int fdt_parse_plmt_node(const void *fdt, int nodeoffset, unsigned long *plmt_bas
if (rc)
continue;
if (SBI_HARTMASK_MAX_BITS <= sbi_hartid_to_hartindex(hartid))
if (SBI_HARTMASK_MAX_BITS <= hartid)
continue;
if (hwirq == IRQ_M_TIMER)
@ -1134,7 +1153,7 @@ int fdt_parse_plicsw_node(const void *fdt, int nodeoffset, unsigned long *plicsw
if (rc)
continue;
if (SBI_HARTMASK_MAX_BITS <= sbi_hartid_to_hartindex(hartid))
if (SBI_HARTMASK_MAX_BITS <= hartid)
continue;
if (hwirq == IRQ_M_SOFT)

View File

@ -11,14 +11,15 @@
#include <libfdt.h>
#include <sbi/sbi_hart.h>
#include <sbi/sbi_error.h>
#include <sbi/sbi_heap.h>
#include <sbi/sbi_pmu.h>
#include <sbi/sbi_scratch.h>
#include <sbi_utils/fdt/fdt_helper.h>
#include <sbi_utils/fdt/fdt_pmu.h>
static struct fdt_pmu_hw_event_select_map *fdt_pmu_evt_select;
static uint32_t hw_event_count;
#define FDT_PMU_HW_EVENT_MAX (SBI_PMU_HW_EVENT_MAX * 2)
struct fdt_pmu_hw_event_select_map fdt_pmu_evt_select[FDT_PMU_HW_EVENT_MAX] = {0};
uint32_t hw_event_count;
uint64_t fdt_pmu_get_select_value(uint32_t event_idx)
{
@ -73,7 +74,7 @@ int fdt_pmu_setup(const void *fdt)
event_ctr_map = fdt_getprop(fdt, pmu_offset,
"riscv,event-to-mhpmcounters", &len);
if (event_ctr_map) {
if (event_ctr_map && len >= 8) {
len = len / (sizeof(u32) * 3);
for (i = 0; i < len; i++) {
event_idx_start = fdt32_to_cpu(event_ctr_map[3 * i]);
@ -88,27 +89,21 @@ int fdt_pmu_setup(const void *fdt)
event_val = fdt_getprop(fdt, pmu_offset,
"riscv,event-to-mhpmevent", &len);
if (event_val) {
if (event_val && len >= 8) {
len = len / (sizeof(u32) * 3);
hw_event_count = len;
fdt_pmu_evt_select = sbi_calloc(hw_event_count,
sizeof(*fdt_pmu_evt_select));
if (!fdt_pmu_evt_select)
return SBI_ENOMEM;
for (i = 0; i < len; i++) {
event = &fdt_pmu_evt_select[i];
event = &fdt_pmu_evt_select[hw_event_count];
event->eidx = fdt32_to_cpu(event_val[3 * i]);
event->select = fdt32_to_cpu(event_val[3 * i + 1]);
event->select = (event->select << 32) |
fdt32_to_cpu(event_val[3 * i + 2]);
hw_event_count++;
}
}
event_val = fdt_getprop(fdt, pmu_offset,
"riscv,raw-event-to-mhpmcounters", &len);
if (event_val) {
if (event_val && len >= 20) {
len = len / (sizeof(u32) * 5);
for (i = 0; i < len; i++) {
raw_selector = fdt32_to_cpu(event_val[5 * i]);

View File

@ -4,8 +4,6 @@
# Copyright (C) 2020 Bin Meng <bmeng.cn@gmail.com>
#
libsbiutils-objs-$(CONFIG_FDT) += fdt/fdt_early_drivers.carray.o
libsbiutils-objs-$(CONFIG_FDT_DOMAIN) += fdt/fdt_domain.o
libsbiutils-objs-$(CONFIG_FDT_PMU) += fdt/fdt_pmu.o
libsbiutils-objs-$(CONFIG_FDT) += fdt/fdt_helper.o

Some files were not shown because too many files have changed in this diff Show More