mirror of
				https://github.com/riscv-software-src/opensbi
				synced 2025-11-03 21:48:45 +00:00 
			
		
		
		
	The sbi_scratch_last_hartid() macro is not of much use on platforms with really sparse hartids so let us replace use of this macro with other approaches. Signed-off-by: Anup Patel <apatel@ventanamicro.com>
		
			
				
	
	
		
			122 lines
		
	
	
		
			2.6 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			122 lines
		
	
	
		
			2.6 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
 /*
 | 
						|
 * SPDX-License-Identifier: BSD-2-Clause
 | 
						|
 *
 | 
						|
 * Copyright (c) 2019 Western Digital Corporation or its affiliates.
 | 
						|
 *
 | 
						|
 * Authors:
 | 
						|
 *   Anup Patel <anup.patel@wdc.com>
 | 
						|
 */
 | 
						|
 | 
						|
#include <sbi/riscv_locks.h>
 | 
						|
#include <sbi/sbi_hart.h>
 | 
						|
#include <sbi/sbi_hartmask.h>
 | 
						|
#include <sbi/sbi_platform.h>
 | 
						|
#include <sbi/sbi_scratch.h>
 | 
						|
#include <sbi/sbi_string.h>
 | 
						|
 | 
						|
u32 last_hartindex_having_scratch = 0;
 | 
						|
u32 hartindex_to_hartid_table[SBI_HARTMASK_MAX_BITS + 1] = { -1U };
 | 
						|
struct sbi_scratch *hartindex_to_scratch_table[SBI_HARTMASK_MAX_BITS + 1] = { 0 };
 | 
						|
 | 
						|
static spinlock_t extra_lock = SPIN_LOCK_INITIALIZER;
 | 
						|
static unsigned long extra_offset = SBI_SCRATCH_EXTRA_SPACE_OFFSET;
 | 
						|
 | 
						|
u32 sbi_hartid_to_hartindex(u32 hartid)
 | 
						|
{
 | 
						|
	u32 i;
 | 
						|
 | 
						|
	for (i = 0; i <= last_hartindex_having_scratch; i++)
 | 
						|
		if (hartindex_to_hartid_table[i] == hartid)
 | 
						|
			return i;
 | 
						|
 | 
						|
	return -1U;
 | 
						|
}
 | 
						|
 | 
						|
typedef struct sbi_scratch *(*hartid2scratch)(ulong hartid, ulong hartindex);
 | 
						|
 | 
						|
int sbi_scratch_init(struct sbi_scratch *scratch)
 | 
						|
{
 | 
						|
	u32 i, h;
 | 
						|
	const struct sbi_platform *plat = sbi_platform_ptr(scratch);
 | 
						|
 | 
						|
	for (i = 0; i < plat->hart_count; i++) {
 | 
						|
		h = (plat->hart_index2id) ? plat->hart_index2id[i] : i;
 | 
						|
		hartindex_to_hartid_table[i] = h;
 | 
						|
		hartindex_to_scratch_table[i] =
 | 
						|
			((hartid2scratch)scratch->hartid_to_scratch)(h, i);
 | 
						|
	}
 | 
						|
 | 
						|
	last_hartindex_having_scratch = plat->hart_count - 1;
 | 
						|
 | 
						|
	return 0;
 | 
						|
}
 | 
						|
 | 
						|
unsigned long sbi_scratch_alloc_offset(unsigned long size)
 | 
						|
{
 | 
						|
	u32 i;
 | 
						|
	void *ptr;
 | 
						|
	unsigned long ret = 0;
 | 
						|
	struct sbi_scratch *rscratch;
 | 
						|
 | 
						|
	/*
 | 
						|
	 * We have a simple brain-dead allocator which never expects
 | 
						|
	 * anything to be free-ed hence it keeps incrementing the
 | 
						|
	 * next allocation offset until it runs-out of space.
 | 
						|
	 *
 | 
						|
	 * In future, we will have more sophisticated allocator which
 | 
						|
	 * will allow us to re-claim free-ed space.
 | 
						|
	 */
 | 
						|
 | 
						|
	if (!size)
 | 
						|
		return 0;
 | 
						|
 | 
						|
	size += __SIZEOF_POINTER__ - 1;
 | 
						|
	size &= ~((unsigned long)__SIZEOF_POINTER__ - 1);
 | 
						|
 | 
						|
	spin_lock(&extra_lock);
 | 
						|
 | 
						|
	if (SBI_SCRATCH_SIZE < (extra_offset + size))
 | 
						|
		goto done;
 | 
						|
 | 
						|
	ret = extra_offset;
 | 
						|
	extra_offset += size;
 | 
						|
 | 
						|
done:
 | 
						|
	spin_unlock(&extra_lock);
 | 
						|
 | 
						|
	if (ret) {
 | 
						|
		for (i = 0; i <= sbi_scratch_last_hartindex(); i++) {
 | 
						|
			rscratch = sbi_hartindex_to_scratch(i);
 | 
						|
			if (!rscratch)
 | 
						|
				continue;
 | 
						|
			ptr = sbi_scratch_offset_ptr(rscratch, ret);
 | 
						|
			sbi_memset(ptr, 0, size);
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
	return ret;
 | 
						|
}
 | 
						|
 | 
						|
void sbi_scratch_free_offset(unsigned long offset)
 | 
						|
{
 | 
						|
	if ((offset < SBI_SCRATCH_EXTRA_SPACE_OFFSET) ||
 | 
						|
	    (SBI_SCRATCH_SIZE <= offset))
 | 
						|
		return;
 | 
						|
 | 
						|
	/*
 | 
						|
	 * We don't actually free-up because it's a simple
 | 
						|
	 * brain-dead allocator.
 | 
						|
	 */
 | 
						|
}
 | 
						|
 | 
						|
unsigned long sbi_scratch_used_space(void)
 | 
						|
{
 | 
						|
	unsigned long ret = 0;
 | 
						|
 | 
						|
	spin_lock(&extra_lock);
 | 
						|
	ret = extra_offset;
 | 
						|
	spin_unlock(&extra_lock);
 | 
						|
 | 
						|
	return ret;
 | 
						|
}
 |