mirror of
				https://github.com/riscv-software-src/opensbi
				synced 2025-11-04 05:50:22 +00:00 
			
		
		
		
	When -march=rv64im_zalrsc_zicsr is used, provide atomic operations and locks using lr and sc instructions only. Signed-off-by: Chao-ying Fu <cfu@mips.com> Reviewed-by: Anup Patel <anup@brainfault.org> Link: https://lore.kernel.org/r/20250226014727.19710-1-cfu@mips.com Signed-off-by: Anup Patel <anup@brainfault.org>
		
			
				
	
	
		
			87 lines
		
	
	
		
			1.8 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			87 lines
		
	
	
		
			1.8 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
/*
 | 
						|
 * SPDX-License-Identifier: BSD-2-Clause
 | 
						|
 *
 | 
						|
 * Copyright (c) 2019 Western Digital Corporation or its affiliates.
 | 
						|
 * Copyright (c) 2021 Christoph Müllner <cmuellner@linux.com>
 | 
						|
 */
 | 
						|
 | 
						|
#include <sbi/riscv_barrier.h>
 | 
						|
#include <sbi/riscv_locks.h>
 | 
						|
 | 
						|
static inline bool spin_lock_unlocked(spinlock_t lock)
 | 
						|
{
 | 
						|
	return lock.owner == lock.next;
 | 
						|
}
 | 
						|
 | 
						|
bool spin_lock_check(spinlock_t *lock)
 | 
						|
{
 | 
						|
	RISCV_FENCE(r, rw);
 | 
						|
	return !spin_lock_unlocked(*lock);
 | 
						|
}
 | 
						|
 | 
						|
bool spin_trylock(spinlock_t *lock)
 | 
						|
{
 | 
						|
	unsigned long inc = 1u << TICKET_SHIFT;
 | 
						|
	unsigned long mask = 0xffffu << TICKET_SHIFT;
 | 
						|
	u32 l0, tmp1, tmp2;
 | 
						|
 | 
						|
	__asm__ __volatile__(
 | 
						|
		/* Get the current lock counters. */
 | 
						|
		"1:	lr.w.aq	%0, %3\n"
 | 
						|
		"	slli	%2, %0, %6\n"
 | 
						|
		"	and	%2, %2, %5\n"
 | 
						|
		"	and	%1, %0, %5\n"
 | 
						|
		/* Is the lock free right now? */
 | 
						|
		"	bne	%1, %2, 2f\n"
 | 
						|
		"	add	%0, %0, %4\n"
 | 
						|
		/* Acquire the lock. */
 | 
						|
		"	sc.w.rl	%0, %0, %3\n"
 | 
						|
		"	bnez	%0, 1b\n"
 | 
						|
		"2:"
 | 
						|
		: "=&r"(l0), "=&r"(tmp1), "=&r"(tmp2), "+A"(*lock)
 | 
						|
		: "r"(inc), "r"(mask), "I"(TICKET_SHIFT)
 | 
						|
		: "memory");
 | 
						|
 | 
						|
	return l0 == 0;
 | 
						|
}
 | 
						|
 | 
						|
void spin_lock(spinlock_t *lock)
 | 
						|
{
 | 
						|
	unsigned long inc = 1u << TICKET_SHIFT;
 | 
						|
	unsigned long mask = 0xffffu;
 | 
						|
	u32 l0, tmp1, tmp2;
 | 
						|
 | 
						|
	__asm__ __volatile__(
 | 
						|
		/* Atomically increment the next ticket. */
 | 
						|
#ifdef __riscv_atomic
 | 
						|
		"	amoadd.w.aqrl	%0, %4, %3\n"
 | 
						|
#elif __riscv_zalrsc
 | 
						|
		"3:	lr.w.aqrl	%0, %3\n"
 | 
						|
		"	addw	%1, %0, %4\n"
 | 
						|
		"	sc.w.aqrl	%1, %1, %3\n"
 | 
						|
		"	bnez	%1, 3b\n"
 | 
						|
#else
 | 
						|
#error "need a or zalrsc"
 | 
						|
#endif
 | 
						|
 | 
						|
		/* Did we get the lock? */
 | 
						|
		"	srli	%1, %0, %6\n"
 | 
						|
		"	and	%1, %1, %5\n"
 | 
						|
		"1:	and	%2, %0, %5\n"
 | 
						|
		"	beq	%1, %2, 2f\n"
 | 
						|
 | 
						|
		/* If not, then spin on the lock. */
 | 
						|
		"	lw	%0, %3\n"
 | 
						|
		RISCV_ACQUIRE_BARRIER
 | 
						|
		"	j	1b\n"
 | 
						|
		"2:"
 | 
						|
		: "=&r"(l0), "=&r"(tmp1), "=&r"(tmp2), "+A"(*lock)
 | 
						|
		: "r"(inc), "r"(mask), "I"(TICKET_SHIFT)
 | 
						|
		: "memory");
 | 
						|
}
 | 
						|
 | 
						|
void spin_unlock(spinlock_t *lock)
 | 
						|
{
 | 
						|
	__smp_store_release(&lock->owner, lock->owner + 1);
 | 
						|
}
 |