47
47
#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
48
48
#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
49
49
#define ITS_FLAGS_FORCE_NON_SHAREABLE (1ULL << 3)
50
+ #define ITS_FLAGS_WORKAROUND_HISILICON_162100801 (1ULL << 4)
50
51
51
52
#define RD_LOCAL_LPI_ENABLED BIT(0)
52
53
#define RD_LOCAL_PENDTABLE_PREALLOCATED BIT(1)
@@ -64,6 +65,7 @@ static u32 lpi_id_bits;
64
65
#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
65
66
66
67
static u8 __ro_after_init lpi_prop_prio ;
68
+ static struct its_node * find_4_1_its (void );
67
69
68
70
/*
69
71
* Collection structure - just an ID, and a redistributor address to
@@ -3883,13 +3885,28 @@ static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
3883
3885
raw_spin_unlock_irqrestore (& vpe_proxy .lock , flags );
3884
3886
}
3885
3887
3888
+ static void its_vpe_4_1_invall_locked (int cpu , struct its_vpe * vpe )
3889
+ {
3890
+ void __iomem * rdbase ;
3891
+ u64 val ;
3892
+
3893
+ val = GICR_INVALLR_V ;
3894
+ val |= FIELD_PREP (GICR_INVALLR_VPEID , vpe -> vpe_id );
3895
+
3896
+ guard (raw_spinlock )(& gic_data_rdist_cpu (cpu )-> rd_lock );
3897
+ rdbase = per_cpu_ptr (gic_rdists -> rdist , cpu )-> rd_base ;
3898
+ gic_write_lpir (val , rdbase + GICR_INVALLR );
3899
+ wait_for_syncr (rdbase );
3900
+ }
3901
+
3886
3902
static int its_vpe_set_affinity (struct irq_data * d ,
3887
3903
const struct cpumask * mask_val ,
3888
3904
bool force )
3889
3905
{
3890
3906
struct its_vpe * vpe = irq_data_get_irq_chip_data (d );
3891
3907
unsigned int from , cpu = nr_cpu_ids ;
3892
3908
struct cpumask * table_mask ;
3909
+ struct its_node * its ;
3893
3910
unsigned long flags ;
3894
3911
3895
3912
/*
@@ -3952,6 +3969,11 @@ static int its_vpe_set_affinity(struct irq_data *d,
3952
3969
vpe -> col_idx = cpu ;
3953
3970
3954
3971
its_send_vmovp (vpe );
3972
+
3973
+ its = find_4_1_its ();
3974
+ if (its && its -> flags & ITS_FLAGS_WORKAROUND_HISILICON_162100801 )
3975
+ its_vpe_4_1_invall_locked (cpu , vpe );
3976
+
3955
3977
its_vpe_db_proxy_move (vpe , from , cpu );
3956
3978
3957
3979
out :
@@ -4259,22 +4281,12 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
4259
4281
4260
4282
static void its_vpe_4_1_invall (struct its_vpe * vpe )
4261
4283
{
4262
- void __iomem * rdbase ;
4263
4284
unsigned long flags ;
4264
- u64 val ;
4265
4285
int cpu ;
4266
4286
4267
- val = GICR_INVALLR_V ;
4268
- val |= FIELD_PREP (GICR_INVALLR_VPEID , vpe -> vpe_id );
4269
-
4270
4287
/* Target the redistributor this vPE is currently known on */
4271
4288
cpu = vpe_to_cpuid_lock (vpe , & flags );
4272
- raw_spin_lock (& gic_data_rdist_cpu (cpu )-> rd_lock );
4273
- rdbase = per_cpu_ptr (gic_rdists -> rdist , cpu )-> rd_base ;
4274
- gic_write_lpir (val , rdbase + GICR_INVALLR );
4275
-
4276
- wait_for_syncr (rdbase );
4277
- raw_spin_unlock (& gic_data_rdist_cpu (cpu )-> rd_lock );
4289
+ its_vpe_4_1_invall_locked (cpu , vpe );
4278
4290
vpe_to_cpuid_unlock (vpe , flags );
4279
4291
}
4280
4292
@@ -4867,6 +4879,14 @@ static bool its_set_non_coherent(void *data)
4867
4879
return true;
4868
4880
}
4869
4881
4882
+ static bool __maybe_unused its_enable_quirk_hip09_162100801 (void * data )
4883
+ {
4884
+ struct its_node * its = data ;
4885
+
4886
+ its -> flags |= ITS_FLAGS_WORKAROUND_HISILICON_162100801 ;
4887
+ return true;
4888
+ }
4889
+
4870
4890
static const struct gic_quirk its_quirks [] = {
4871
4891
#ifdef CONFIG_CAVIUM_ERRATUM_22375
4872
4892
{
@@ -4913,6 +4933,14 @@ static const struct gic_quirk its_quirks[] = {
4913
4933
.init = its_enable_quirk_hip07_161600802 ,
4914
4934
},
4915
4935
#endif
4936
+ #ifdef CONFIG_HISILICON_ERRATUM_162100801
4937
+ {
4938
+ .desc = "ITS: Hip09 erratum 162100801" ,
4939
+ .iidr = 0x00051736 ,
4940
+ .mask = 0xffffffff ,
4941
+ .init = its_enable_quirk_hip09_162100801 ,
4942
+ },
4943
+ #endif
4916
4944
#ifdef CONFIG_ROCKCHIP_ERRATUM_3588001
4917
4945
{
4918
4946
.desc = "ITS: Rockchip erratum RK3588001" ,
0 commit comments