Skip to content

Commit fdbb058

Browse files
ozbenhstewartsmith
authored andcommitted
xive: Implement cache watch and use it for EQs
We need to do cache coherent updates of the EQs when modifying escalation interrupts. Use the cache watch facility for that. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Stewart Smith <stewart@linux.vnet.ibm.com>
1 parent eba2c3b commit fdbb058

File tree

2 files changed

+135
-7
lines changed

2 files changed

+135
-7
lines changed

hw/xive.c

Lines changed: 99 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -686,6 +686,8 @@ static int64_t __xive_cache_scrub(struct xive *x, enum xive_cache_type ctype,
686686
mreg = PC_VPC_SCRUB_MASK;
687687
mregx = X_PC_VPC_SCRUB_MASK;
688688
break;
689+
default:
690+
return OPAL_PARAMETER;
689691
}
690692
if (ctype == xive_cache_vpc) {
691693
mval = PC_SCRUB_BLOCK_ID | PC_SCRUB_OFFSET;
@@ -719,6 +721,86 @@ static int64_t xive_ivc_scrub(struct xive *x, uint64_t block, uint64_t idx)
719721
return __xive_cache_scrub(x, xive_cache_ivc, block, idx, false, false);
720722
}
721723

724+
static int64_t __xive_cache_watch(struct xive *x, enum xive_cache_type ctype,
725+
uint64_t block, uint64_t idx,
726+
uint32_t start_dword, uint32_t dword_count,
727+
void *new_data, bool light_watch)
728+
{
729+
uint64_t sreg, sregx, dreg0, dreg0x;
730+
uint64_t dval0, sval, status, i;
731+
732+
switch (ctype) {
733+
case xive_cache_eqc:
734+
sreg = VC_EQC_CWATCH_SPEC;
735+
sregx = X_VC_EQC_CWATCH_SPEC;
736+
dreg0 = VC_EQC_CWATCH_DAT0;
737+
dreg0x = X_VC_EQC_CWATCH_DAT0;
738+
sval = SETFIELD(VC_EQC_CWATCH_BLOCKID, idx, block);
739+
break;
740+
case xive_cache_vpc:
741+
sreg = PC_VPC_CWATCH_SPEC;
742+
sregx = X_PC_VPC_CWATCH_SPEC;
743+
dreg0 = PC_VPC_CWATCH_DAT0;
744+
dreg0x = X_PC_VPC_CWATCH_DAT0;
745+
sval = SETFIELD(PC_VPC_CWATCH_BLOCKID, idx, block);
746+
break;
747+
default:
748+
return OPAL_PARAMETER;
749+
}
750+
751+
/* The full bit is in the same position for EQC and VPC */
752+
if (!light_watch)
753+
sval |= VC_EQC_CWATCH_FULL;
754+
755+
do {
756+
/* Write the cache watch spec */
757+
__xive_regw(x, sreg, sregx, sval, NULL);
758+
759+
/* Load data0 register to populate the watch */
760+
dval0 = __xive_regr(x, dreg0, dreg0x, NULL);
761+
762+
/* Write the words into the watch facility. We write in reverse
763+
* order in case word 0 is part of it as it must be the last
764+
* one written.
765+
*/
766+
for (i = start_dword + dword_count - 1; i >= start_dword ;i--) {
767+
uint64_t dw = ((uint64_t *)new_data)[i - start_dword];
768+
__xive_regw(x, dreg0 + i * 8, dreg0x + i, dw, NULL);
769+
}
770+
771+
/* Write data0 register to trigger the update if word 0 wasn't
772+
* written above
773+
*/
774+
if (start_dword > 0)
775+
__xive_regw(x, dreg0, dreg0x, dval0, NULL);
776+
777+
/* This may not be necessary for light updates (it's possible\
778+
* that a sync in sufficient, TBD). Ensure the above is
779+
* complete and check the status of the watch.
780+
*/
781+
status = __xive_regr(x, sreg, sregx, NULL);
782+
783+
/* XXX Add timeout ? */
784+
785+
/* Bits FULL and CONFLICT are in the same position in
786+
* EQC and VPC
787+
*/
788+
} while((status & VC_EQC_CWATCH_FULL) &&
789+
(status & VC_EQC_CWATCH_CONFLICT));
790+
791+
return 0;
792+
}
793+
794+
static int64_t xive_eqc_cache_update(struct xive *x, uint64_t block,
795+
uint64_t idx, uint32_t start_dword,
796+
uint32_t dword_count, void *new_data,
797+
bool light_watch)
798+
{
799+
return __xive_cache_watch(x, xive_cache_eqc, block, idx,
800+
start_dword, dword_count,
801+
new_data, light_watch);
802+
}
803+
722804
static bool xive_set_vsd(struct xive *x, uint32_t tbl, uint32_t idx, uint64_t v)
723805
{
724806
/* Set VC version */
@@ -1518,6 +1600,7 @@ static bool xive_set_eq_info(uint32_t isn, uint32_t target, uint8_t prio)
15181600
struct xive_ive *ive;
15191601
uint32_t eq_blk, eq_idx;
15201602
bool is_escalation = GIRQ_IS_ESCALATION(isn);
1603+
uint64_t new_ive;
15211604

15221605
/* Find XIVE on which the IVE resides */
15231606
x = xive_from_isn(isn);
@@ -1534,15 +1617,17 @@ static bool xive_set_eq_info(uint32_t isn, uint32_t target, uint8_t prio)
15341617

15351618
lock(&x->lock);
15361619

1620+
/* Read existing IVE */
1621+
new_ive = ive->w;
1622+
15371623
/* Are we masking ? */
15381624
if (prio == 0xff) {
15391625
/* Masking, just set the M bit */
15401626
if (!is_escalation)
1541-
ive->w |= IVE_MASKED;
1627+
new_ive |= IVE_MASKED;
15421628

15431629
xive_vdbg(x, "ISN %x masked !\n", isn);
15441630
} else {
1545-
uint64_t new_ive;
15461631

15471632
/* Unmasking, re-target the IVE. First find the EQ
15481633
* correponding to the target
@@ -1560,15 +1645,22 @@ static bool xive_set_eq_info(uint32_t isn, uint32_t target, uint8_t prio)
15601645
new_ive = ive->w & ~IVE_MASKED;
15611646
new_ive = SETFIELD(IVE_EQ_BLOCK, new_ive, eq_blk);
15621647
new_ive = SETFIELD(IVE_EQ_INDEX, new_ive, eq_idx);
1563-
sync();
1564-
ive->w = new_ive;
15651648

15661649
xive_vdbg(x,"ISN %x routed to eq %x/%x IVE=%016llx !\n",
1567-
isn, eq_blk, eq_idx, new_ive);
1650+
isn, eq_blk, eq_idx, new_ive);
15681651
}
15691652

1570-
/* Scrub IVE from cache */
1571-
xive_ivc_scrub(x, x->chip_id, GIRQ_TO_IDX(isn));
1653+
/* Updating the cache differs between real IVEs and escalation
1654+
* IVEs inside an EQ
1655+
*/
1656+
if (is_escalation) {
1657+
xive_eqc_cache_update(x, x->chip_id, GIRQ_TO_IDX(isn),
1658+
2, 1, &new_ive, true);
1659+
} else {
1660+
sync();
1661+
ive->w = new_ive;
1662+
xive_ivc_scrub(x, x->chip_id, GIRQ_TO_IDX(isn));
1663+
}
15721664

15731665
unlock(&x->lock);
15741666
return true;

include/xive.h

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,28 @@
107107
#define PC_SCRUB_WANT_INVAL PPC_BIT(2)
108108
#define PC_SCRUB_BLOCK_ID PPC_BITMASK(27,31)
109109
#define PC_SCRUB_OFFSET PPC_BITMASK(45,63)
110+
#define X_PC_VPC_CWATCH_SPEC 0x167
111+
#define PC_VPC_CWATCH_SPEC 0x738
112+
#define PC_VPC_CWATCH_CONFLICT PPC_BIT(0)
113+
#define PC_VPC_CWATCH_FULL PPC_BIT(8)
114+
#define PC_VPC_CWATCH_BLOCKID PPC_BITMASK(27,31)
115+
#define PC_VPC_CWATCH_OFFSET PPC_BITMASK(45,63)
116+
#define X_PC_VPC_CWATCH_DAT0 0x168
117+
#define PC_VPC_CWATCH_DAT0 0x740
118+
#define X_PC_VPC_CWATCH_DAT1 0x169
119+
#define PC_VPC_CWATCH_DAT1 0x748
120+
#define X_PC_VPC_CWATCH_DAT2 0x16a
121+
#define PC_VPC_CWATCH_DAT2 0x750
122+
#define X_PC_VPC_CWATCH_DAT3 0x16b
123+
#define PC_VPC_CWATCH_DAT3 0x758
124+
#define X_PC_VPC_CWATCH_DAT4 0x16c
125+
#define PC_VPC_CWATCH_DAT4 0x760
126+
#define X_PC_VPC_CWATCH_DAT5 0x16d
127+
#define PC_VPC_CWATCH_DAT5 0x768
128+
#define X_PC_VPC_CWATCH_DAT6 0x16e
129+
#define PC_VPC_CWATCH_DAT6 0x770
130+
#define X_PC_VPC_CWATCH_DAT7 0x16f
131+
#define PC_VPC_CWATCH_DAT7 0x778
110132

111133
/* VC0 register offsets */
112134
#define X_VC_GLOBAL_CONFIG 0x200
@@ -144,6 +166,20 @@
144166
#define VC_EQC_SCRUB_TRIG 0x910
145167
#define X_VC_EQC_SCRUB_MASK 0x213
146168
#define VC_EQC_SCRUB_MASK 0x918
169+
#define X_VC_EQC_CWATCH_SPEC 0x215
170+
#define VC_EQC_CWATCH_SPEC 0x928
171+
#define VC_EQC_CWATCH_CONFLICT PPC_BIT(0)
172+
#define VC_EQC_CWATCH_FULL PPC_BIT(8)
173+
#define VC_EQC_CWATCH_BLOCKID PPC_BITMASK(28,31)
174+
#define VC_EQC_CWATCH_OFFSET PPC_BITMASK(40,63)
175+
#define X_VC_EQC_CWATCH_DAT0 0x216
176+
#define VC_EQC_CWATCH_DAT0 0x930
177+
#define X_VC_EQC_CWATCH_DAT1 0x217
178+
#define VC_EQC_CWATCH_DAT1 0x938
179+
#define X_VC_EQC_CWATCH_DAT2 0x218
180+
#define VC_EQC_CWATCH_DAT2 0x940
181+
#define X_VC_EQC_CWATCH_DAT3 0x219
182+
#define VC_EQC_CWATCH_DAT3 0x948
147183
#define X_VC_IVC_SCRUB_TRIG 0x222
148184
#define VC_IVC_SCRUB_TRIG 0x990
149185
#define X_VC_IVC_SCRUB_MASK 0x223

0 commit comments

Comments
 (0)