@@ -59,37 +59,28 @@ static void __iomem *plic_regs;
59
59
60
60
struct plic_handler {
61
61
bool present ;
62
- int ctxid ;
62
+ void __iomem * hart_base ;
63
+ /*
64
+ * Protect mask operations on the registers given that we can't
65
+ * assume atomic memory operations work on them.
66
+ */
67
+ raw_spinlock_t enable_lock ;
68
+ void __iomem * enable_base ;
63
69
};
64
70
static DEFINE_PER_CPU (struct plic_handler , plic_handlers ) ;
65
71
66
- static inline void __iomem * plic_hart_offset (int ctxid )
67
- {
68
- return plic_regs + CONTEXT_BASE + ctxid * CONTEXT_PER_HART ;
69
- }
70
-
71
- static inline u32 __iomem * plic_enable_base (int ctxid )
72
- {
73
- return plic_regs + ENABLE_BASE + ctxid * ENABLE_PER_HART ;
74
- }
75
-
76
- /*
77
- * Protect mask operations on the registers given that we can't assume that
78
- * atomic memory operations work on them.
79
- */
80
- static DEFINE_RAW_SPINLOCK (plic_toggle_lock );
81
-
82
- static inline void plic_toggle (int ctxid , int hwirq , int enable )
72
+ static inline void plic_toggle (struct plic_handler * handler ,
73
+ int hwirq , int enable )
83
74
{
84
- u32 __iomem * reg = plic_enable_base ( ctxid ) + (hwirq / 32 );
75
+ u32 __iomem * reg = handler -> enable_base + (hwirq / 32 ) * sizeof ( u32 );
85
76
u32 hwirq_mask = 1 << (hwirq % 32 );
86
77
87
- raw_spin_lock (& plic_toggle_lock );
78
+ raw_spin_lock (& handler -> enable_lock );
88
79
if (enable )
89
80
writel (readl (reg ) | hwirq_mask , reg );
90
81
else
91
82
writel (readl (reg ) & ~hwirq_mask , reg );
92
- raw_spin_unlock (& plic_toggle_lock );
83
+ raw_spin_unlock (& handler -> enable_lock );
93
84
}
94
85
95
86
static inline void plic_irq_toggle (struct irq_data * d , int enable )
@@ -101,7 +92,7 @@ static inline void plic_irq_toggle(struct irq_data *d, int enable)
101
92
struct plic_handler * handler = per_cpu_ptr (& plic_handlers , cpu );
102
93
103
94
if (handler -> present )
104
- plic_toggle (handler -> ctxid , d -> hwirq , enable );
95
+ plic_toggle (handler , d -> hwirq , enable );
105
96
}
106
97
}
107
98
@@ -150,7 +141,7 @@ static struct irq_domain *plic_irqdomain;
150
141
static void plic_handle_irq (struct pt_regs * regs )
151
142
{
152
143
struct plic_handler * handler = this_cpu_ptr (& plic_handlers );
153
- void __iomem * claim = plic_hart_offset ( handler -> ctxid ) + CONTEXT_CLAIM ;
144
+ void __iomem * claim = handler -> hart_base + CONTEXT_CLAIM ;
154
145
irq_hw_number_t hwirq ;
155
146
156
147
WARN_ON_ONCE (!handler -> present );
@@ -244,12 +235,16 @@ static int __init plic_init(struct device_node *node,
244
235
245
236
handler = per_cpu_ptr (& plic_handlers , cpu );
246
237
handler -> present = true;
247
- handler -> ctxid = i ;
238
+ handler -> hart_base =
239
+ plic_regs + CONTEXT_BASE + i * CONTEXT_PER_HART ;
240
+ raw_spin_lock_init (& handler -> enable_lock );
241
+ handler -> enable_base =
242
+ plic_regs + ENABLE_BASE + i * ENABLE_PER_HART ;
248
243
249
244
/* priority must be > threshold to trigger an interrupt */
250
- writel (0 , plic_hart_offset ( i ) + CONTEXT_THRESHOLD );
245
+ writel (0 , handler -> hart_base + CONTEXT_THRESHOLD );
251
246
for (hwirq = 1 ; hwirq <= nr_irqs ; hwirq ++ )
252
- plic_toggle (i , hwirq , 0 );
247
+ plic_toggle (handler , hwirq , 0 );
253
248
nr_mapped ++ ;
254
249
}
255
250
0 commit comments