@@ -252,7 +252,8 @@ static void ring_work(struct work_struct *work)
252
252
* Do not hold on to it.
253
253
*/
254
254
list_del_init (& frame -> list );
255
- frame -> callback (ring , frame , canceled );
255
+ if (frame -> callback )
256
+ frame -> callback (ring , frame , canceled );
256
257
}
257
258
}
258
259
@@ -273,11 +274,106 @@ int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
273
274
}
274
275
EXPORT_SYMBOL_GPL (__tb_ring_enqueue );
275
276
277
+ /**
278
+ * tb_ring_poll() - Poll one completed frame from the ring
279
+ * @ring: Ring to poll
280
+ *
281
+ * This function can be called when @start_poll callback of the @ring
282
+ * has been called. It will read one completed frame from the ring and
283
+ * return it to the caller. Returns %NULL if there is no more completed
284
+ * frames.
285
+ */
286
+ struct ring_frame * tb_ring_poll (struct tb_ring * ring )
287
+ {
288
+ struct ring_frame * frame = NULL ;
289
+ unsigned long flags ;
290
+
291
+ spin_lock_irqsave (& ring -> lock , flags );
292
+ if (!ring -> running )
293
+ goto unlock ;
294
+ if (ring_empty (ring ))
295
+ goto unlock ;
296
+
297
+ if (ring -> descriptors [ring -> tail ].flags & RING_DESC_COMPLETED ) {
298
+ frame = list_first_entry (& ring -> in_flight , typeof (* frame ),
299
+ list );
300
+ list_del_init (& frame -> list );
301
+
302
+ if (!ring -> is_tx ) {
303
+ frame -> size = ring -> descriptors [ring -> tail ].length ;
304
+ frame -> eof = ring -> descriptors [ring -> tail ].eof ;
305
+ frame -> sof = ring -> descriptors [ring -> tail ].sof ;
306
+ frame -> flags = ring -> descriptors [ring -> tail ].flags ;
307
+ }
308
+
309
+ ring -> tail = (ring -> tail + 1 ) % ring -> size ;
310
+ }
311
+
312
+ unlock :
313
+ spin_unlock_irqrestore (& ring -> lock , flags );
314
+ return frame ;
315
+ }
316
+ EXPORT_SYMBOL_GPL (tb_ring_poll );
317
+
318
+ static void __ring_interrupt_mask (struct tb_ring * ring , bool mask )
319
+ {
320
+ int idx = ring_interrupt_index (ring );
321
+ int reg = REG_RING_INTERRUPT_BASE + idx / 32 * 4 ;
322
+ int bit = idx % 32 ;
323
+ u32 val ;
324
+
325
+ val = ioread32 (ring -> nhi -> iobase + reg );
326
+ if (mask )
327
+ val &= ~BIT (bit );
328
+ else
329
+ val |= BIT (bit );
330
+ iowrite32 (val , ring -> nhi -> iobase + reg );
331
+ }
332
+
333
+ /* Both @nhi->lock and @ring->lock should be held */
334
+ static void __ring_interrupt (struct tb_ring * ring )
335
+ {
336
+ if (!ring -> running )
337
+ return ;
338
+
339
+ if (ring -> start_poll ) {
340
+ __ring_interrupt_mask (ring , false);
341
+ ring -> start_poll (ring -> poll_data );
342
+ } else {
343
+ schedule_work (& ring -> work );
344
+ }
345
+ }
346
+
347
+ /**
348
+ * tb_ring_poll_complete() - Re-start interrupt for the ring
349
+ * @ring: Ring to re-start the interrupt
350
+ *
351
+ * This will re-start (unmask) the ring interrupt once the user is done
352
+ * with polling.
353
+ */
354
+ void tb_ring_poll_complete (struct tb_ring * ring )
355
+ {
356
+ unsigned long flags ;
357
+
358
+ spin_lock_irqsave (& ring -> nhi -> lock , flags );
359
+ spin_lock (& ring -> lock );
360
+ if (ring -> start_poll )
361
+ __ring_interrupt_mask (ring , false);
362
+ spin_unlock (& ring -> lock );
363
+ spin_unlock_irqrestore (& ring -> nhi -> lock , flags );
364
+ }
365
+ EXPORT_SYMBOL_GPL (tb_ring_poll_complete );
366
+
276
367
static irqreturn_t ring_msix (int irq , void * data )
277
368
{
278
369
struct tb_ring * ring = data ;
279
370
280
- schedule_work (& ring -> work );
371
+ spin_lock (& ring -> nhi -> lock );
372
+ spin_lock (& ring -> lock );
373
+ __ring_interrupt (ring );
374
+ spin_unlock (& ring -> lock );
375
+ spin_unlock (& ring -> nhi -> lock );
376
+
281
377
return IRQ_HANDLED ;
282
378
}
283
379
@@ -317,7 +413,9 @@ static void ring_release_msix(struct tb_ring *ring)
317
413
318
414
static struct tb_ring * tb_ring_alloc (struct tb_nhi * nhi , u32 hop , int size ,
319
415
bool transmit , unsigned int flags ,
320
- u16 sof_mask , u16 eof_mask )
416
+ u16 sof_mask , u16 eof_mask ,
417
+ void (* start_poll )(void * ),
418
+ void * poll_data )
321
419
{
322
420
struct tb_ring * ring = NULL ;
323
421
dev_info (& nhi -> pdev -> dev , "allocating %s ring %d of size %d\n" ,
@@ -346,6 +444,8 @@ static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
346
444
ring -> head = 0 ;
347
445
ring -> tail = 0 ;
348
446
ring -> running = false;
447
+ ring -> start_poll = start_poll ;
448
+ ring -> poll_data = poll_data ;
349
449
350
450
ring -> descriptors = dma_alloc_coherent (& ring -> nhi -> pdev -> dev ,
351
451
size * sizeof (* ring -> descriptors ),
@@ -399,7 +499,7 @@ static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
399
499
struct tb_ring * tb_ring_alloc_tx (struct tb_nhi * nhi , int hop , int size ,
400
500
unsigned int flags )
401
501
{
402
- return tb_ring_alloc (nhi , hop , size , true, flags , 0 , 0 );
502
+ return tb_ring_alloc (nhi , hop , size , true, flags , 0 , 0 , NULL , NULL );
403
503
}
404
504
EXPORT_SYMBOL_GPL (tb_ring_alloc_tx );
405
505
@@ -411,11 +511,17 @@ EXPORT_SYMBOL_GPL(tb_ring_alloc_tx);
411
511
* @flags: Flags for the ring
412
512
* @sof_mask: Mask of PDF values that start a frame
413
513
* @eof_mask: Mask of PDF values that end a frame
514
+ * @start_poll: If not %NULL the ring will call this function when an
515
+ * interrupt is triggered and masked, instead of callback
516
+ * in each Rx frame.
517
+ * @poll_data: Optional data passed to @start_poll
414
518
*/
415
519
struct tb_ring * tb_ring_alloc_rx (struct tb_nhi * nhi , int hop , int size ,
416
- unsigned int flags , u16 sof_mask , u16 eof_mask )
520
+ unsigned int flags , u16 sof_mask , u16 eof_mask ,
521
+ void (* start_poll )(void * ), void * poll_data )
417
522
{
418
- return tb_ring_alloc (nhi , hop , size , false, flags , sof_mask , eof_mask );
523
+ return tb_ring_alloc (nhi , hop , size , false, flags , sof_mask , eof_mask ,
524
+ start_poll , poll_data );
419
525
}
420
526
EXPORT_SYMBOL_GPL (tb_ring_alloc_rx );
421
527
@@ -556,6 +662,7 @@ void tb_ring_free(struct tb_ring *ring)
556
662
dev_WARN (& ring -> nhi -> pdev -> dev , "%s %d still running\n" ,
557
663
RING_TYPE (ring ), ring -> hop );
558
664
}
665
+ spin_unlock_irq (& ring -> nhi -> lock );
559
666
560
667
ring_release_msix (ring );
561
668
@@ -572,7 +679,6 @@ void tb_ring_free(struct tb_ring *ring)
572
679
RING_TYPE (ring ),
573
680
ring -> hop );
574
681
575
- spin_unlock_irq (& ring -> nhi -> lock );
576
682
/**
577
683
* ring->work can no longer be scheduled (it is scheduled only
578
684
* by nhi_interrupt_work, ring_stop and ring_msix). Wait for it
@@ -682,8 +788,10 @@ static void nhi_interrupt_work(struct work_struct *work)
682
788
hop );
683
789
continue ;
684
790
}
685
- /* we do not check ring->running, this is done in ring->work */
686
- schedule_work (& ring -> work );
791
+
792
+ spin_lock (& ring -> lock );
793
+ __ring_interrupt (ring );
794
+ spin_unlock (& ring -> lock );
687
795
}
688
796
spin_unlock_irq (& nhi -> lock );
689
797
}
0 commit comments