mirror of
				https://github.com/smaeul/u-boot.git
				synced 2025-10-31 12:08:19 +00:00 
			
		
		
		
	The branch_if_master macro jumps to a label if the CPU is the "master" core, which we define as having all affinity levels set to 0. To check for this condition, we need to mask off some bits from the MPIDR register, then compare the remaining register value against zero. The implementation of this was slighly broken (it preserved the upper RES0 bits), overly complicated and hard to understand, especially since it lacked comments. The same was true for the very similar branch_if_slave macro. Use a much shorter assembly sequence for those checks, use the same masking for both macros (just negate the final branch), and put some comments on them, to make it clear what the code does. This allows to drop the second temporary register for branch_if_master, so we adjust all call sites as well. Also use the opportunity to remove a misleading comment: the macro works fine on SoCs with multiple clusters. Judging by the commit message, the original problem with the Juno SoC stems from the fact that the master CPU *can* be configured to be from cluster 1, so the assumption that the master CPU has all affinity values set to 0 does not hold there. But this is already mentioned above in a comment, so remove the extra comment. Signed-off-by: Andre Przywara <andre.przywara@arm.com>
		
			
				
	
	
		
			77 lines
		
	
	
		
			1.5 KiB
		
	
	
	
		
			ArmAsm
		
	
	
	
	
	
			
		
		
	
	
			77 lines
		
	
	
		
			1.5 KiB
		
	
	
	
		
			ArmAsm
		
	
	
	
	
	
| /*
 | |
|  * Copyright (C) 2020 Intel Corporation. All rights reserved
 | |
|  *
 | |
|  * SPDX-License-Identifier:    GPL-2.0
 | |
|  */
 | |
| 
 | |
| #include <asm-offsets.h>
 | |
| #include <config.h>
 | |
| #include <linux/linkage.h>
 | |
| #include <asm/macro.h>
 | |
| 
 | |
| ENTRY(lowlevel_init)
 | |
| 	mov	x29, lr			/* Save LR */
 | |
| 
 | |
| #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
 | |
| #if defined(CONFIG_SPL_BUILD) && defined(CONFIG_SPL_ATF)
 | |
| wait_for_atf:
 | |
| 	ldr	x4, =CPU_RELEASE_ADDR
 | |
| 	ldr	x5, [x4]
 | |
| 	cbz	x5, slave_wait_atf
 | |
| 	br	x5
 | |
| slave_wait_atf:
 | |
| 	branch_if_slave x0, wait_for_atf
 | |
| #else
 | |
| 	branch_if_slave x0, 1f
 | |
| #endif
 | |
| 	ldr	x0, =GICD_BASE
 | |
| 	bl	gic_init_secure
 | |
| 1:
 | |
| #if defined(CONFIG_GICV3)
 | |
| 	ldr	x0, =GICR_BASE
 | |
| 	bl	gic_init_secure_percpu
 | |
| #elif defined(CONFIG_GICV2)
 | |
| 	ldr	x0, =GICD_BASE
 | |
| 	ldr	x1, =GICC_BASE
 | |
| 	bl	gic_init_secure_percpu
 | |
| #endif
 | |
| #endif
 | |
| 
 | |
| #ifdef CONFIG_ARMV8_MULTIENTRY
 | |
| 	branch_if_master x0, 2f
 | |
| 
 | |
| 	/*
 | |
| 	 * Slave should wait for master clearing spin table.
 | |
| 	 * This sync prevent slaves observing incorrect
 | |
| 	 * value of spin table and jumping to wrong place.
 | |
| 	 */
 | |
| #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
 | |
| #ifdef CONFIG_GICV2
 | |
| 	ldr	x0, =GICC_BASE
 | |
| #endif
 | |
| 	bl	gic_wait_for_interrupt
 | |
| #endif
 | |
| 
 | |
| 	/*
 | |
| 	 * All slaves will enter EL2 and optionally EL1.
 | |
| 	 */
 | |
| 	adr	x4, lowlevel_in_el2
 | |
| 	ldr	x5, =ES_TO_AARCH64
 | |
| 	bl	armv8_switch_to_el2
 | |
| 
 | |
| lowlevel_in_el2:
 | |
| #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
 | |
| 	adr	x4, lowlevel_in_el1
 | |
| 	ldr	x5, =ES_TO_AARCH64
 | |
| 	bl	armv8_switch_to_el1
 | |
| 
 | |
| lowlevel_in_el1:
 | |
| #endif
 | |
| 
 | |
| #endif /* CONFIG_ARMV8_MULTIENTRY */
 | |
| 
 | |
| 2:
 | |
| 	mov	lr, x29			/* Restore LR */
 | |
| 	ret
 | |
| ENDPROC(lowlevel_init)
 |