mirror of
				https://github.com/smaeul/u-boot.git
				synced 2025-10-22 08:38:15 +01:00 
			
		
		
		
	At present U-Boot SPL fails to boot on SiFive Unleashed board, due to a load address misaligned exception happens when loading the FIT image in spl_load_simple_fit(). The exception happens in memmove() which is called by fdt_splice_(). Commit 8f0dc4cfd106 introduces an assembly version of memmove but it does take misalignment into account (it checks if length is a multiple of machine word size but pointers need also be aligned). As a result it will generate misaligned load/store for the majority of cases and causes significant performance regression on hardware that traps misaligned load/store and emulate them using firmware. The current behaviour of memcpy is that it checks if both src and dest pointers are co-aligned (aka congruent modular SZ_REG). If aligned, it will copy data word-by-word after first aligning pointers to word boundary. If src and dst are not co-aligned, however, byte-wise copy will be performed. This patch was taken from the Linux kernel patch [1], which has not been applied at the time being. It fixes the memmove and optimises memcpy for misaligned cases. It will first align destination pointer to word-boundary regardless whether src and dest are co-aligned or not. If they indeed are, then wordwise copy is performed. If they are not co-aligned, then it will load two adjacent words from src and use shifts to assemble a full machine word. Some additional assembly level micro-optimisation is also performed to ensure more instructions can be compressed (e.g. prefer a0 to t6). With this patch, U-Boot boots again on SiFive Unleashed board. [1] https://patchwork.kernel.org/project/linux-riscv/patch/20210216225555.4976-1-gary@garyguo.net/ Fixes: 8f0dc4cfd106 ("riscv: assembler versions of memcpy, memmove, memset") Signed-off-by: Bin Meng <bmeng.cn@gmail.com>
		
			
				
	
	
		
			129 lines
		
	
	
		
			2.6 KiB
		
	
	
	
		
			ArmAsm
		
	
	
	
	
	
			
		
		
	
	
			129 lines
		
	
	
		
			2.6 KiB
		
	
	
	
		
			ArmAsm
		
	
	
	
	
	
| /* SPDX-License-Identifier: GPL-2.0 */
 | |
| 
 | |
| #include <linux/linkage.h>
 | |
| #include <asm/asm.h>
 | |
| 
 | |
| ENTRY(__memmove)
 | |
| WEAK(memmove)
 | |
| 	/*
 | |
| 	 * Here we determine if forward copy is possible. Forward copy is
 | |
| 	 * preferred to backward copy as it is more cache friendly.
 | |
| 	 *
 | |
| 	 * If a0 >= a1, t0 gives their distance, if t0 >= a2 then we can
 | |
| 	 *   copy forward.
 | |
| 	 * If a0 < a1, we can always copy forward. This will make t0 negative,
 | |
| 	 *   so a *unsigned* comparison will always have t0 >= a2.
 | |
| 	 *
 | |
| 	 * For forward copy we just delegate the task to memcpy.
 | |
| 	 */
 | |
| 	sub	t0, a0, a1
 | |
| 	bltu	t0, a2, 1f
 | |
| 	tail	__memcpy
 | |
| 1:
 | |
| 
 | |
| 	/*
 | |
| 	 * Register allocation for code below:
 | |
| 	 * a0 - end of uncopied dst
 | |
| 	 * a1 - end of uncopied src
 | |
| 	 * t0 - start of uncopied dst
 | |
| 	 */
 | |
| 	mv	t0, a0
 | |
| 	add	a0, a0, a2
 | |
| 	add	a1, a1, a2
 | |
| 
 | |
| 	/*
 | |
| 	 * Use bytewise copy if too small.
 | |
| 	 *
 | |
| 	 * This threshold must be at least 2*SZREG to ensure at least one
 | |
| 	 * wordwise copy is performed. It is chosen to be 16 because it will
 | |
| 	 * save at least 7 iterations of bytewise copy, which pays off the
 | |
| 	 * fixed overhead.
 | |
| 	 */
 | |
| 	li	a3, 16
 | |
| 	bltu	a2, a3, .Lbyte_copy_tail
 | |
| 
 | |
| 	/*
 | |
| 	 * Bytewise copy first to align t0 to word boundary.
 | |
| 	 */
 | |
| 	andi	a2, a0, ~(SZREG-1)
 | |
| 	beq	a0, a2, 2f
 | |
| 1:
 | |
| 	addi	a1, a1, -1
 | |
| 	lb	a5, 0(a1)
 | |
| 	addi	a0, a0, -1
 | |
| 	sb	a5, 0(a0)
 | |
| 	bne	a0, a2, 1b
 | |
| 2:
 | |
| 
 | |
| 	/*
 | |
| 	 * Now a0 is word-aligned. If a1 is also word aligned, we could perform
 | |
| 	 * aligned word-wise copy. Otherwise we need to perform misaligned
 | |
| 	 * word-wise copy.
 | |
| 	 */
 | |
| 	andi	a3, a1, SZREG-1
 | |
| 	bnez	a3, .Lmisaligned_word_copy
 | |
| 
 | |
| 	/* Wordwise copy */
 | |
| 	addi	t0, t0, SZREG-1
 | |
| 	bleu	a0, t0, 2f
 | |
| 1:
 | |
| 	addi	a1, a1, -SZREG
 | |
| 	REG_L	a5, 0(a1)
 | |
| 	addi	a0, a0, -SZREG
 | |
| 	REG_S	a5, 0(a0)
 | |
| 	bgtu	a0, t0, 1b
 | |
| 2:
 | |
| 	addi	t0, t0, -(SZREG-1)
 | |
| 
 | |
| .Lbyte_copy_tail:
 | |
| 	/*
 | |
| 	 * Bytewise copy anything left.
 | |
| 	 */
 | |
| 	beq	a0, t0, 2f
 | |
| 1:
 | |
| 	addi	a1, a1, -1
 | |
| 	lb	a5, 0(a1)
 | |
| 	addi	a0, a0, -1
 | |
| 	sb	a5, 0(a0)
 | |
| 	bne	a0, t0, 1b
 | |
| 2:
 | |
| 
 | |
| 	mv	a0, t0
 | |
| 	ret
 | |
| 
 | |
| .Lmisaligned_word_copy:
 | |
| 	/*
 | |
| 	 * Misaligned word-wise copy.
 | |
| 	 * For misaligned copy we still perform word-wise copy, but we need to
 | |
| 	 * use the value fetched from the previous iteration and do some shifts.
 | |
| 	 * This is safe because we wouldn't access more words than necessary.
 | |
| 	 */
 | |
| 
 | |
| 	/* Calculate shifts */
 | |
| 	slli	t3, a3, 3
 | |
| 	sub	t4, x0, t3 /* negate is okay as shift will only look at LSBs */
 | |
| 
 | |
| 	/* Load the initial value and align a1 */
 | |
| 	andi	a1, a1, ~(SZREG-1)
 | |
| 	REG_L	a5, 0(a1)
 | |
| 
 | |
| 	addi	t0, t0, SZREG-1
 | |
| 	/* At least one iteration will be executed here, no check */
 | |
| 1:
 | |
| 	sll	a4, a5, t4
 | |
| 	addi	a1, a1, -SZREG
 | |
| 	REG_L	a5, 0(a1)
 | |
| 	srl	a2, a5, t3
 | |
| 	or	a2, a2, a4
 | |
| 	addi	a0, a0, -SZREG
 | |
| 	REG_S	a2, 0(a0)
 | |
| 	bgtu	a0, t0, 1b
 | |
| 
 | |
| 	/* Update pointers to correct value */
 | |
| 	addi	t0, t0, -(SZREG-1)
 | |
| 	add	a1, a1, a3
 | |
| 
 | |
| 	j	.Lbyte_copy_tail
 | |
| 
 | |
| END(__memmove)
 |