diff --git a/xen/arch/arm/include/asm/current.h b/xen/arch/arm/include/asm/current.h index 6973eeb1d13f..51d1c8efa83d 100644 --- a/xen/arch/arm/include/asm/current.h +++ b/xen/arch/arm/include/asm/current.h @@ -53,7 +53,7 @@ static inline struct cpu_info *get_cpu_info(void) DECLARE_PER_CPU(unsigned int, cpu_id); -#define get_processor_id() this_cpu(cpu_id) +#define smp_processor_id() this_cpu(cpu_id) #define set_processor_id(id) \ do { \ WRITE_SYSREG(__per_cpu_offset[(id)], TPIDR_EL2); \ diff --git a/xen/arch/arm/include/asm/smp.h b/xen/arch/arm/include/asm/smp.h index 4fabdf5310d8..22e12f38db6b 100644 --- a/xen/arch/arm/include/asm/smp.h +++ b/xen/arch/arm/include/asm/smp.h @@ -11,8 +11,6 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_mask); #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) -#define smp_processor_id() get_processor_id() - /* * Do we, for platform reasons, need to actually keep CPUs online when we * would otherwise prefer them to be off? diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c index ec76de3cac12..5533aed455e7 100644 --- a/xen/arch/arm/smpboot.c +++ b/xen/arch/arm/smpboot.c @@ -401,7 +401,7 @@ void start_secondary(void) /* Shut down the current CPU */ void __cpu_disable(void) { - unsigned int cpu = get_processor_id(); + unsigned int cpu = smp_processor_id(); local_irq_disable(); gic_disable_cpu(); diff --git a/xen/arch/x86/include/asm/current.h b/xen/arch/x86/include/asm/current.h index da5e152a10cc..35cca5cbe448 100644 --- a/xen/arch/x86/include/asm/current.h +++ b/xen/arch/x86/include/asm/current.h @@ -99,7 +99,7 @@ static inline struct cpu_info *get_cpu_info(void) #define set_current(vcpu) (get_cpu_info()->current_vcpu = (vcpu)) #define current (get_current()) -#define get_processor_id() (get_cpu_info()->processor_id) +#define smp_processor_id() (get_cpu_info()->processor_id) #define guest_cpu_user_regs() (&get_cpu_info()->guest_cpu_user_regs) /* diff --git a/xen/arch/x86/include/asm/smp.h b/xen/arch/x86/include/asm/smp.h index 41a3b6a0dadf..c0b5d7cdd8dd 100644 --- a/xen/arch/x86/include/asm/smp.h +++ b/xen/arch/x86/include/asm/smp.h @@ -48,13 +48,6 @@ extern void cpu_exit_clear(unsigned int cpu); extern void cpu_uninit(unsigned int cpu); int cpu_add(uint32_t apic_id, uint32_t acpi_id, uint32_t pxm); -/* - * This function is needed by all SMP systems. It must _always_ be valid - * from the initial startup. We map APIC_BASE very early in page_setup(), - * so this is correct in the x86 case. - */ -#define smp_processor_id() get_processor_id() - void __stop_this_cpu(void); long cf_check cpu_up_helper(void *data); diff --git a/xen/arch/x86/mm/mm-locks.h b/xen/arch/x86/mm/mm-locks.h index 5a3f96fbaadd..00b1bc402d6d 100644 --- a/xen/arch/x86/mm/mm-locks.h +++ b/xen/arch/x86/mm/mm-locks.h @@ -122,7 +122,7 @@ static inline void mm_rwlock_init(mm_rwlock_t *l) static inline int mm_write_locked_by_me(mm_rwlock_t *l) { - return (l->locker == get_processor_id()); + return (l->locker == smp_processor_id()); } static inline void _mm_write_lock(const struct domain *d, mm_rwlock_t *l, @@ -132,7 +132,7 @@ static inline void _mm_write_lock(const struct domain *d, mm_rwlock_t *l, { _check_lock_level(d, level); percpu_write_lock(p2m_percpu_rwlock, &l->lock); - l->locker = get_processor_id(); + l->locker = smp_processor_id(); l->locker_function = func; l->unlock_level = _get_lock_level(); _set_lock_level(_lock_level(d, level));