15
15
#include <linux/radix-tree.h>
16
16
#include <linux/bitmap.h>
17
17
#include <linux/irqdomain.h>
18
+ #include <linux/sysfs.h>
18
19
19
20
#include "internals.h"
20
21
@@ -123,6 +124,181 @@ static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
123
124
124
125
#ifdef CONFIG_SPARSE_IRQ
125
126
127
+ static void irq_kobj_release (struct kobject * kobj );
128
+
129
+ #ifdef CONFIG_SYSFS
130
+ static struct kobject * irq_kobj_base ;
131
+
132
+ #define IRQ_ATTR_RO (_name ) \
133
+ static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
134
+
135
+ static ssize_t per_cpu_count_show (struct kobject * kobj ,
136
+ struct kobj_attribute * attr , char * buf )
137
+ {
138
+ struct irq_desc * desc = container_of (kobj , struct irq_desc , kobj );
139
+ int cpu , irq = desc -> irq_data .irq ;
140
+ ssize_t ret = 0 ;
141
+ char * p = "" ;
142
+
143
+ for_each_possible_cpu (cpu ) {
144
+ unsigned int c = kstat_irqs_cpu (irq , cpu );
145
+
146
+ ret += scnprintf (buf + ret , PAGE_SIZE - ret , "%s%u" , p , c );
147
+ p = "," ;
148
+ }
149
+
150
+ ret += scnprintf (buf + ret , PAGE_SIZE - ret , "\n" );
151
+ return ret ;
152
+ }
153
+ IRQ_ATTR_RO (per_cpu_count );
154
+
155
+ static ssize_t chip_name_show (struct kobject * kobj ,
156
+ struct kobj_attribute * attr , char * buf )
157
+ {
158
+ struct irq_desc * desc = container_of (kobj , struct irq_desc , kobj );
159
+ ssize_t ret = 0 ;
160
+
161
+ raw_spin_lock_irq (& desc -> lock );
162
+ if (desc -> irq_data .chip && desc -> irq_data .chip -> name ) {
163
+ ret = scnprintf (buf , PAGE_SIZE , "%s\n" ,
164
+ desc -> irq_data .chip -> name );
165
+ }
166
+ raw_spin_unlock_irq (& desc -> lock );
167
+
168
+ return ret ;
169
+ }
170
+ IRQ_ATTR_RO (chip_name );
171
+
172
+ static ssize_t hwirq_show (struct kobject * kobj ,
173
+ struct kobj_attribute * attr , char * buf )
174
+ {
175
+ struct irq_desc * desc = container_of (kobj , struct irq_desc , kobj );
176
+ ssize_t ret = 0 ;
177
+
178
+ raw_spin_lock_irq (& desc -> lock );
179
+ if (desc -> irq_data .domain )
180
+ ret = sprintf (buf , "%d\n" , (int )desc -> irq_data .hwirq );
181
+ raw_spin_unlock_irq (& desc -> lock );
182
+
183
+ return ret ;
184
+ }
185
+ IRQ_ATTR_RO (hwirq );
186
+
187
+ static ssize_t type_show (struct kobject * kobj ,
188
+ struct kobj_attribute * attr , char * buf )
189
+ {
190
+ struct irq_desc * desc = container_of (kobj , struct irq_desc , kobj );
191
+ ssize_t ret = 0 ;
192
+
193
+ raw_spin_lock_irq (& desc -> lock );
194
+ ret = sprintf (buf , "%s\n" ,
195
+ irqd_is_level_type (& desc -> irq_data ) ? "level" : "edge" );
196
+ raw_spin_unlock_irq (& desc -> lock );
197
+
198
+ return ret ;
199
+
200
+ }
201
+ IRQ_ATTR_RO (type );
202
+
203
+ static ssize_t name_show (struct kobject * kobj ,
204
+ struct kobj_attribute * attr , char * buf )
205
+ {
206
+ struct irq_desc * desc = container_of (kobj , struct irq_desc , kobj );
207
+ ssize_t ret = 0 ;
208
+
209
+ raw_spin_lock_irq (& desc -> lock );
210
+ if (desc -> name )
211
+ ret = scnprintf (buf , PAGE_SIZE , "%s\n" , desc -> name );
212
+ raw_spin_unlock_irq (& desc -> lock );
213
+
214
+ return ret ;
215
+ }
216
+ IRQ_ATTR_RO (name );
217
+
218
+ static ssize_t actions_show (struct kobject * kobj ,
219
+ struct kobj_attribute * attr , char * buf )
220
+ {
221
+ struct irq_desc * desc = container_of (kobj , struct irq_desc , kobj );
222
+ struct irqaction * action ;
223
+ ssize_t ret = 0 ;
224
+ char * p = "" ;
225
+
226
+ raw_spin_lock_irq (& desc -> lock );
227
+ for (action = desc -> action ; action != NULL ; action = action -> next ) {
228
+ ret += scnprintf (buf + ret , PAGE_SIZE - ret , "%s%s" ,
229
+ p , action -> name );
230
+ p = "," ;
231
+ }
232
+ raw_spin_unlock_irq (& desc -> lock );
233
+
234
+ if (ret )
235
+ ret += scnprintf (buf + ret , PAGE_SIZE - ret , "\n" );
236
+
237
+ return ret ;
238
+ }
239
+ IRQ_ATTR_RO (actions );
240
+
241
+ static struct attribute * irq_attrs [] = {
242
+ & per_cpu_count_attr .attr ,
243
+ & chip_name_attr .attr ,
244
+ & hwirq_attr .attr ,
245
+ & type_attr .attr ,
246
+ & name_attr .attr ,
247
+ & actions_attr .attr ,
248
+ NULL
249
+ };
250
+
251
+ static struct kobj_type irq_kobj_type = {
252
+ .release = irq_kobj_release ,
253
+ .sysfs_ops = & kobj_sysfs_ops ,
254
+ .default_attrs = irq_attrs ,
255
+ };
256
+
257
+ static void irq_sysfs_add (int irq , struct irq_desc * desc )
258
+ {
259
+ if (irq_kobj_base ) {
260
+ /*
261
+ * Continue even in case of failure as this is nothing
262
+ * crucial.
263
+ */
264
+ if (kobject_add (& desc -> kobj , irq_kobj_base , "%d" , irq ))
265
+ pr_warn ("Failed to add kobject for irq %d\n" , irq );
266
+ }
267
+ }
268
+
269
+ static int __init irq_sysfs_init (void )
270
+ {
271
+ struct irq_desc * desc ;
272
+ int irq ;
273
+
274
+ /* Prevent concurrent irq alloc/free */
275
+ irq_lock_sparse ();
276
+
277
+ irq_kobj_base = kobject_create_and_add ("irq" , kernel_kobj );
278
+ if (!irq_kobj_base ) {
279
+ irq_unlock_sparse ();
280
+ return - ENOMEM ;
281
+ }
282
+
283
+ /* Add the already allocated interrupts */
284
+ for_each_irq_desc (irq , desc )
285
+ irq_sysfs_add (irq , desc );
286
+ irq_unlock_sparse ();
287
+
288
+ return 0 ;
289
+ }
290
+ postcore_initcall (irq_sysfs_init );
291
+
292
+ #else /* !CONFIG_SYSFS */
293
+
294
+ static struct kobj_type irq_kobj_type = {
295
+ .release = irq_kobj_release ,
296
+ };
297
+
298
+ static void irq_sysfs_add (int irq , struct irq_desc * desc ) {}
299
+
300
+ #endif /* CONFIG_SYSFS */
301
+
126
302
static RADIX_TREE (irq_desc_tree , GFP_KERNEL ) ;
127
303
128
304
static void irq_insert_desc (unsigned int irq , struct irq_desc * desc )
@@ -187,6 +363,7 @@ static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
187
363
188
364
desc_set_defaults (irq , desc , node , affinity , owner );
189
365
irqd_set (& desc -> irq_data , flags );
366
+ kobject_init (& desc -> kobj , & irq_kobj_type );
190
367
191
368
return desc ;
192
369
@@ -197,15 +374,22 @@ static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
197
374
return NULL ;
198
375
}
199
376
200
- static void delayed_free_desc (struct rcu_head * rhp )
377
+ static void irq_kobj_release (struct kobject * kobj )
201
378
{
202
- struct irq_desc * desc = container_of (rhp , struct irq_desc , rcu );
379
+ struct irq_desc * desc = container_of (kobj , struct irq_desc , kobj );
203
380
204
381
free_masks (desc );
205
382
free_percpu (desc -> kstat_irqs );
206
383
kfree (desc );
207
384
}
208
385
386
+ static void delayed_free_desc (struct rcu_head * rhp )
387
+ {
388
+ struct irq_desc * desc = container_of (rhp , struct irq_desc , rcu );
389
+
390
+ kobject_put (& desc -> kobj );
391
+ }
392
+
209
393
static void free_desc (unsigned int irq )
210
394
{
211
395
struct irq_desc * desc = irq_to_desc (irq );
@@ -217,8 +401,12 @@ static void free_desc(unsigned int irq)
217
401
* kstat_irq_usr(). Once we deleted the descriptor from the
218
402
* sparse tree we can free it. Access in proc will fail to
219
403
* lookup the descriptor.
404
+ *
405
+ * The sysfs entry must be serialized against a concurrent
406
+ * irq_sysfs_init() as well.
220
407
*/
221
408
mutex_lock (& sparse_irq_lock );
409
+ kobject_del (& desc -> kobj );
222
410
delete_irq_desc (irq );
223
411
mutex_unlock (& sparse_irq_lock );
224
412
@@ -261,6 +449,7 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node,
261
449
goto err ;
262
450
mutex_lock (& sparse_irq_lock );
263
451
irq_insert_desc (start + i , desc );
452
+ irq_sysfs_add (start + i , desc );
264
453
mutex_unlock (& sparse_irq_lock );
265
454
}
266
455
return start ;
0 commit comments