@@ -686,6 +686,8 @@ static int64_t __xive_cache_scrub(struct xive *x, enum xive_cache_type ctype,
686
686
mreg = PC_VPC_SCRUB_MASK ;
687
687
mregx = X_PC_VPC_SCRUB_MASK ;
688
688
break ;
689
+ default :
690
+ return OPAL_PARAMETER ;
689
691
}
690
692
if (ctype == xive_cache_vpc ) {
691
693
mval = PC_SCRUB_BLOCK_ID | PC_SCRUB_OFFSET ;
@@ -719,6 +721,86 @@ static int64_t xive_ivc_scrub(struct xive *x, uint64_t block, uint64_t idx)
719
721
return __xive_cache_scrub (x , xive_cache_ivc , block , idx , false, false);
720
722
}
721
723
724
+ static int64_t __xive_cache_watch (struct xive * x , enum xive_cache_type ctype ,
725
+ uint64_t block , uint64_t idx ,
726
+ uint32_t start_dword , uint32_t dword_count ,
727
+ void * new_data , bool light_watch )
728
+ {
729
+ uint64_t sreg , sregx , dreg0 , dreg0x ;
730
+ uint64_t dval0 , sval , status , i ;
731
+
732
+ switch (ctype ) {
733
+ case xive_cache_eqc :
734
+ sreg = VC_EQC_CWATCH_SPEC ;
735
+ sregx = X_VC_EQC_CWATCH_SPEC ;
736
+ dreg0 = VC_EQC_CWATCH_DAT0 ;
737
+ dreg0x = X_VC_EQC_CWATCH_DAT0 ;
738
+ sval = SETFIELD (VC_EQC_CWATCH_BLOCKID , idx , block );
739
+ break ;
740
+ case xive_cache_vpc :
741
+ sreg = PC_VPC_CWATCH_SPEC ;
742
+ sregx = X_PC_VPC_CWATCH_SPEC ;
743
+ dreg0 = PC_VPC_CWATCH_DAT0 ;
744
+ dreg0x = X_PC_VPC_CWATCH_DAT0 ;
745
+ sval = SETFIELD (PC_VPC_CWATCH_BLOCKID , idx , block );
746
+ break ;
747
+ default :
748
+ return OPAL_PARAMETER ;
749
+ }
750
+
751
+ /* The full bit is in the same position for EQC and VPC */
752
+ if (!light_watch )
753
+ sval |= VC_EQC_CWATCH_FULL ;
754
+
755
+ do {
756
+ /* Write the cache watch spec */
757
+ __xive_regw (x , sreg , sregx , sval , NULL );
758
+
759
+ /* Load data0 register to populate the watch */
760
+ dval0 = __xive_regr (x , dreg0 , dreg0x , NULL );
761
+
762
+ /* Write the words into the watch facility. We write in reverse
763
+ * order in case word 0 is part of it as it must be the last
764
+ * one written.
765
+ */
766
+ for (i = start_dword + dword_count - 1 ; i >= start_dword ;i -- ) {
767
+ uint64_t dw = ((uint64_t * )new_data )[i - start_dword ];
768
+ __xive_regw (x , dreg0 + i * 8 , dreg0x + i , dw , NULL );
769
+ }
770
+
771
+ /* Write data0 register to trigger the update if word 0 wasn't
772
+ * written above
773
+ */
774
+ if (start_dword > 0 )
775
+ __xive_regw (x , dreg0 , dreg0x , dval0 , NULL );
776
+
777
+ /* This may not be necessary for light updates (it's possible\
778
+ * that a sync in sufficient, TBD). Ensure the above is
779
+ * complete and check the status of the watch.
780
+ */
781
+ status = __xive_regr (x , sreg , sregx , NULL );
782
+
783
+ /* XXX Add timeout ? */
784
+
785
+ /* Bits FULL and CONFLICT are in the same position in
786
+ * EQC and VPC
787
+ */
788
+ } while ((status & VC_EQC_CWATCH_FULL ) &&
789
+ (status & VC_EQC_CWATCH_CONFLICT ));
790
+
791
+ return 0 ;
792
+ }
793
+
794
+ static int64_t xive_eqc_cache_update (struct xive * x , uint64_t block ,
795
+ uint64_t idx , uint32_t start_dword ,
796
+ uint32_t dword_count , void * new_data ,
797
+ bool light_watch )
798
+ {
799
+ return __xive_cache_watch (x , xive_cache_eqc , block , idx ,
800
+ start_dword , dword_count ,
801
+ new_data , light_watch );
802
+ }
803
+
722
804
static bool xive_set_vsd (struct xive * x , uint32_t tbl , uint32_t idx , uint64_t v )
723
805
{
724
806
/* Set VC version */
@@ -1518,6 +1600,7 @@ static bool xive_set_eq_info(uint32_t isn, uint32_t target, uint8_t prio)
1518
1600
struct xive_ive * ive ;
1519
1601
uint32_t eq_blk , eq_idx ;
1520
1602
bool is_escalation = GIRQ_IS_ESCALATION (isn );
1603
+ uint64_t new_ive ;
1521
1604
1522
1605
/* Find XIVE on which the IVE resides */
1523
1606
x = xive_from_isn (isn );
@@ -1534,15 +1617,17 @@ static bool xive_set_eq_info(uint32_t isn, uint32_t target, uint8_t prio)
1534
1617
1535
1618
lock (& x -> lock );
1536
1619
1620
+ /* Read existing IVE */
1621
+ new_ive = ive -> w ;
1622
+
1537
1623
/* Are we masking ? */
1538
1624
if (prio == 0xff ) {
1539
1625
/* Masking, just set the M bit */
1540
1626
if (!is_escalation )
1541
- ive -> w |= IVE_MASKED ;
1627
+ new_ive |= IVE_MASKED ;
1542
1628
1543
1629
xive_vdbg (x , "ISN %x masked !\n" , isn );
1544
1630
} else {
1545
- uint64_t new_ive ;
1546
1631
1547
1632
/* Unmasking, re-target the IVE. First find the EQ
1548
1633
* correponding to the target
@@ -1560,15 +1645,22 @@ static bool xive_set_eq_info(uint32_t isn, uint32_t target, uint8_t prio)
1560
1645
new_ive = ive -> w & ~IVE_MASKED ;
1561
1646
new_ive = SETFIELD (IVE_EQ_BLOCK , new_ive , eq_blk );
1562
1647
new_ive = SETFIELD (IVE_EQ_INDEX , new_ive , eq_idx );
1563
- sync ();
1564
- ive -> w = new_ive ;
1565
1648
1566
1649
xive_vdbg (x ,"ISN %x routed to eq %x/%x IVE=%016llx !\n" ,
1567
- isn , eq_blk , eq_idx , new_ive );
1650
+ isn , eq_blk , eq_idx , new_ive );
1568
1651
}
1569
1652
1570
- /* Scrub IVE from cache */
1571
- xive_ivc_scrub (x , x -> chip_id , GIRQ_TO_IDX (isn ));
1653
+ /* Updating the cache differs between real IVEs and escalation
1654
+ * IVEs inside an EQ
1655
+ */
1656
+ if (is_escalation ) {
1657
+ xive_eqc_cache_update (x , x -> chip_id , GIRQ_TO_IDX (isn ),
1658
+ 2 , 1 , & new_ive , true);
1659
+ } else {
1660
+ sync ();
1661
+ ive -> w = new_ive ;
1662
+ xive_ivc_scrub (x , x -> chip_id , GIRQ_TO_IDX (isn ));
1663
+ }
1572
1664
1573
1665
unlock (& x -> lock );
1574
1666
return true;
0 commit comments