Skip to content

Commit

Permalink
Automatic merge of 'next' into merge (2024-09-17 22:19)
Browse files Browse the repository at this point in the history
  • Loading branch information
mpe committed Sep 17, 2024
2 parents 48eb0b9 + 39190ac commit 93a0594
Show file tree
Hide file tree
Showing 5 changed files with 23 additions and 14 deletions.
1 change: 1 addition & 0 deletions MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -12976,6 +12976,7 @@ M: Michael Ellerman <[email protected]>
R: Nicholas Piggin <[email protected]>
R: Christophe Leroy <[email protected]>
R: Naveen N Rao <[email protected]>
R: Madhavan Srinivasan <[email protected]>
L: [email protected]
S: Supported
W: https://github.com/linuxppc/wiki/wiki
Expand Down
6 changes: 6 additions & 0 deletions arch/powerpc/include/asm/asm-compat.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,12 @@
#define STDX_BE stringify_in_c(stdbrx)
#endif

#ifdef CONFIG_CC_IS_CLANG
#define DS_FORM_CONSTRAINT "Z<>"
#else
#define DS_FORM_CONSTRAINT "YZ<>"
#endif

#else /* 32-bit */

/* operations for longs and pointers */
Expand Down
5 changes: 3 additions & 2 deletions arch/powerpc/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
#include <asm/asm-const.h>
#include <asm/asm-compat.h>

/*
* Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
Expand Down Expand Up @@ -197,7 +198,7 @@ static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
__asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter));
else
__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : DS_FORM_CONSTRAINT (v->counter));

return t;
}
Expand All @@ -208,7 +209,7 @@ static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
__asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
else
__asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
__asm__ __volatile__("std%U0%X0 %1,%0" : "=" DS_FORM_CONSTRAINT (v->counter) : "r"(i));
}

#define ATOMIC64_OP(op, asm_op) \
Expand Down
7 changes: 1 addition & 6 deletions arch/powerpc/include/asm/uaccess.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
#include <asm/page.h>
#include <asm/extable.h>
#include <asm/kup.h>
#include <asm/asm-compat.h>

#ifdef __powerpc64__
/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
Expand Down Expand Up @@ -92,12 +93,6 @@ __pu_failed: \
: label)
#endif

#ifdef CONFIG_CC_IS_CLANG
#define DS_FORM_CONSTRAINT "Z<>"
#else
#define DS_FORM_CONSTRAINT "YZ<>"
#endif

#ifdef __powerpc64__
#ifdef CONFIG_PPC_KERNEL_PREFIXED
#define __put_user_asm2_goto(x, ptr, label) \
Expand Down
18 changes: 12 additions & 6 deletions arch/powerpc/kvm/book3s_hv.c
Original file line number Diff line number Diff line change
Expand Up @@ -1922,14 +1922,22 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,

r = EMULATE_FAIL;
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
if (cause == FSCR_MSGP_LG)
switch (cause) {
case FSCR_MSGP_LG:
r = kvmppc_emulate_doorbell_instr(vcpu);
if (cause == FSCR_PM_LG)
break;
case FSCR_PM_LG:
r = kvmppc_pmu_unavailable(vcpu);
if (cause == FSCR_EBB_LG)
break;
case FSCR_EBB_LG:
r = kvmppc_ebb_unavailable(vcpu);
if (cause == FSCR_TM_LG)
break;
case FSCR_TM_LG:
r = kvmppc_tm_unavailable(vcpu);
break;
default:
break;
}
}
if (r == EMULATE_FAIL) {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL |
Expand Down Expand Up @@ -4049,7 +4057,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
/* Return to whole-core mode if we split the core earlier */
if (cmd_bit) {
unsigned long hid0 = mfspr(SPRN_HID0);
unsigned long loops = 0;

hid0 &= ~HID0_POWER8_DYNLPARDIS;
stat_bit = HID0_POWER8_2LPARMODE | HID0_POWER8_4LPARMODE;
Expand All @@ -4061,7 +4068,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
if (!(hid0 & stat_bit))
break;
cpu_relax();
++loops;
}
split_info.do_nap = 0;
}
Expand Down

0 comments on commit 93a0594

Please sign in to comment.