|
20 | 20 |
|
21 | 21 | #ifdef CONFIG_ARC_HAS_LLSC
|
22 | 22 |
|
23 |
| -/* |
24 |
| - * A normal LLOCK/SCOND based system, w/o need for livelock workaround |
25 |
| - */ |
26 |
| -#ifndef CONFIG_ARC_STAR_9000923308 |
27 |
| - |
28 | 23 | static inline void arch_spin_lock(arch_spinlock_t *lock)
|
29 | 24 | {
|
30 | 25 | unsigned int val;
|
@@ -238,294 +233,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
|
238 | 233 | smp_mb();
|
239 | 234 | }
|
240 | 235 |
|
241 |
| -#else /* CONFIG_ARC_STAR_9000923308 */ |
242 |
| - |
243 |
| -/* |
244 |
| - * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping |
245 |
| - * coherency transactions in the SCU. The exclusive line state keeps rotating |
246 |
| - * among contenting cores leading to a never ending cycle. So break the cycle |
247 |
| - * by deferring the retry of failed exclusive access (SCOND). The actual delay |
248 |
| - * needed is function of number of contending cores as well as the unrelated |
249 |
| - * coherency traffic from other cores. To keep the code simple, start off with |
250 |
| - * small delay of 1 which would suffice most cases and in case of contention |
251 |
| - * double the delay. Eventually the delay is sufficient such that the coherency |
252 |
| - * pipeline is drained, thus a subsequent exclusive access would succeed. |
253 |
| - */ |
254 |
| - |
255 |
| -#define SCOND_FAIL_RETRY_VAR_DEF \ |
256 |
| - unsigned int delay, tmp; \ |
257 |
| - |
258 |
| -#define SCOND_FAIL_RETRY_ASM \ |
259 |
| - " ; --- scond fail delay --- \n" \ |
260 |
| - " mov %[tmp], %[delay] \n" /* tmp = delay */ \ |
261 |
| - "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \ |
262 |
| - " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \ |
263 |
| - " asl.f %[delay], %[delay], 1 \n" /* delay *= 2 */ \ |
264 |
| - " mov.z %[delay], 1 \n" /* handle overflow */ \ |
265 |
| - " b 1b \n" /* start over */ \ |
266 |
| - " \n" \ |
267 |
| - "4: ; --- done --- \n" \ |
268 |
| - |
269 |
| -#define SCOND_FAIL_RETRY_VARS \ |
270 |
| - ,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \ |
271 |
| - |
272 |
| -static inline void arch_spin_lock(arch_spinlock_t *lock) |
273 |
| -{ |
274 |
| - unsigned int val; |
275 |
| - SCOND_FAIL_RETRY_VAR_DEF; |
276 |
| - |
277 |
| - smp_mb(); |
278 |
| - |
279 |
| - __asm__ __volatile__( |
280 |
| - "0: mov %[delay], 1 \n" |
281 |
| - "1: llock %[val], [%[slock]] \n" |
282 |
| - " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */ |
283 |
| - " scond %[LOCKED], [%[slock]] \n" /* acquire */ |
284 |
| - " bz 4f \n" /* done */ |
285 |
| - " \n" |
286 |
| - SCOND_FAIL_RETRY_ASM |
287 |
| - |
288 |
| - : [val] "=&r" (val) |
289 |
| - SCOND_FAIL_RETRY_VARS |
290 |
| - : [slock] "r" (&(lock->slock)), |
291 |
| - [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) |
292 |
| - : "memory", "cc"); |
293 |
| - |
294 |
| - smp_mb(); |
295 |
| -} |
296 |
| - |
297 |
| -/* 1 - lock taken successfully */ |
298 |
| -static inline int arch_spin_trylock(arch_spinlock_t *lock) |
299 |
| -{ |
300 |
| - unsigned int val, got_it = 0; |
301 |
| - SCOND_FAIL_RETRY_VAR_DEF; |
302 |
| - |
303 |
| - smp_mb(); |
304 |
| - |
305 |
| - __asm__ __volatile__( |
306 |
| - "0: mov %[delay], 1 \n" |
307 |
| - "1: llock %[val], [%[slock]] \n" |
308 |
| - " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */ |
309 |
| - " scond %[LOCKED], [%[slock]] \n" /* acquire */ |
310 |
| - " bz.d 4f \n" |
311 |
| - " mov.z %[got_it], 1 \n" /* got it */ |
312 |
| - " \n" |
313 |
| - SCOND_FAIL_RETRY_ASM |
314 |
| - |
315 |
| - : [val] "=&r" (val), |
316 |
| - [got_it] "+&r" (got_it) |
317 |
| - SCOND_FAIL_RETRY_VARS |
318 |
| - : [slock] "r" (&(lock->slock)), |
319 |
| - [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) |
320 |
| - : "memory", "cc"); |
321 |
| - |
322 |
| - smp_mb(); |
323 |
| - |
324 |
| - return got_it; |
325 |
| -} |
326 |
| - |
327 |
| -static inline void arch_spin_unlock(arch_spinlock_t *lock) |
328 |
| -{ |
329 |
| - smp_mb(); |
330 |
| - |
331 |
| - lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__; |
332 |
| - |
333 |
| - smp_mb(); |
334 |
| -} |
335 |
| - |
336 |
| -/* |
337 |
| - * Read-write spinlocks, allowing multiple readers but only one writer. |
338 |
| - * Unfair locking as Writers could be starved indefinitely by Reader(s) |
339 |
| - */ |
340 |
| - |
341 |
| -static inline void arch_read_lock(arch_rwlock_t *rw) |
342 |
| -{ |
343 |
| - unsigned int val; |
344 |
| - SCOND_FAIL_RETRY_VAR_DEF; |
345 |
| - |
346 |
| - smp_mb(); |
347 |
| - |
348 |
| - /* |
349 |
| - * zero means writer holds the lock exclusively, deny Reader. |
350 |
| - * Otherwise grant lock to first/subseq reader |
351 |
| - * |
352 |
| - * if (rw->counter > 0) { |
353 |
| - * rw->counter--; |
354 |
| - * ret = 1; |
355 |
| - * } |
356 |
| - */ |
357 |
| - |
358 |
| - __asm__ __volatile__( |
359 |
| - "0: mov %[delay], 1 \n" |
360 |
| - "1: llock %[val], [%[rwlock]] \n" |
361 |
| - " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */ |
362 |
| - " sub %[val], %[val], 1 \n" /* reader lock */ |
363 |
| - " scond %[val], [%[rwlock]] \n" |
364 |
| - " bz 4f \n" /* done */ |
365 |
| - " \n" |
366 |
| - SCOND_FAIL_RETRY_ASM |
367 |
| - |
368 |
| - : [val] "=&r" (val) |
369 |
| - SCOND_FAIL_RETRY_VARS |
370 |
| - : [rwlock] "r" (&(rw->counter)), |
371 |
| - [WR_LOCKED] "ir" (0) |
372 |
| - : "memory", "cc"); |
373 |
| - |
374 |
| - smp_mb(); |
375 |
| -} |
376 |
| - |
377 |
| -/* 1 - lock taken successfully */ |
378 |
| -static inline int arch_read_trylock(arch_rwlock_t *rw) |
379 |
| -{ |
380 |
| - unsigned int val, got_it = 0; |
381 |
| - SCOND_FAIL_RETRY_VAR_DEF; |
382 |
| - |
383 |
| - smp_mb(); |
384 |
| - |
385 |
| - __asm__ __volatile__( |
386 |
| - "0: mov %[delay], 1 \n" |
387 |
| - "1: llock %[val], [%[rwlock]] \n" |
388 |
| - " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */ |
389 |
| - " sub %[val], %[val], 1 \n" /* counter-- */ |
390 |
| - " scond %[val], [%[rwlock]] \n" |
391 |
| - " bz.d 4f \n" |
392 |
| - " mov.z %[got_it], 1 \n" /* got it */ |
393 |
| - " \n" |
394 |
| - SCOND_FAIL_RETRY_ASM |
395 |
| - |
396 |
| - : [val] "=&r" (val), |
397 |
| - [got_it] "+&r" (got_it) |
398 |
| - SCOND_FAIL_RETRY_VARS |
399 |
| - : [rwlock] "r" (&(rw->counter)), |
400 |
| - [WR_LOCKED] "ir" (0) |
401 |
| - : "memory", "cc"); |
402 |
| - |
403 |
| - smp_mb(); |
404 |
| - |
405 |
| - return got_it; |
406 |
| -} |
407 |
| - |
408 |
| -static inline void arch_write_lock(arch_rwlock_t *rw) |
409 |
| -{ |
410 |
| - unsigned int val; |
411 |
| - SCOND_FAIL_RETRY_VAR_DEF; |
412 |
| - |
413 |
| - smp_mb(); |
414 |
| - |
415 |
| - /* |
416 |
| - * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__), |
417 |
| - * deny writer. Otherwise if unlocked grant to writer |
418 |
| - * Hence the claim that Linux rwlocks are unfair to writers. |
419 |
| - * (can be starved for an indefinite time by readers). |
420 |
| - * |
421 |
| - * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) { |
422 |
| - * rw->counter = 0; |
423 |
| - * ret = 1; |
424 |
| - * } |
425 |
| - */ |
426 |
| - |
427 |
| - __asm__ __volatile__( |
428 |
| - "0: mov %[delay], 1 \n" |
429 |
| - "1: llock %[val], [%[rwlock]] \n" |
430 |
| - " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */ |
431 |
| - " mov %[val], %[WR_LOCKED] \n" |
432 |
| - " scond %[val], [%[rwlock]] \n" |
433 |
| - " bz 4f \n" |
434 |
| - " \n" |
435 |
| - SCOND_FAIL_RETRY_ASM |
436 |
| - |
437 |
| - : [val] "=&r" (val) |
438 |
| - SCOND_FAIL_RETRY_VARS |
439 |
| - : [rwlock] "r" (&(rw->counter)), |
440 |
| - [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), |
441 |
| - [WR_LOCKED] "ir" (0) |
442 |
| - : "memory", "cc"); |
443 |
| - |
444 |
| - smp_mb(); |
445 |
| -} |
446 |
| - |
447 |
| -/* 1 - lock taken successfully */ |
448 |
| -static inline int arch_write_trylock(arch_rwlock_t *rw) |
449 |
| -{ |
450 |
| - unsigned int val, got_it = 0; |
451 |
| - SCOND_FAIL_RETRY_VAR_DEF; |
452 |
| - |
453 |
| - smp_mb(); |
454 |
| - |
455 |
| - __asm__ __volatile__( |
456 |
| - "0: mov %[delay], 1 \n" |
457 |
| - "1: llock %[val], [%[rwlock]] \n" |
458 |
| - " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */ |
459 |
| - " mov %[val], %[WR_LOCKED] \n" |
460 |
| - " scond %[val], [%[rwlock]] \n" |
461 |
| - " bz.d 4f \n" |
462 |
| - " mov.z %[got_it], 1 \n" /* got it */ |
463 |
| - " \n" |
464 |
| - SCOND_FAIL_RETRY_ASM |
465 |
| - |
466 |
| - : [val] "=&r" (val), |
467 |
| - [got_it] "+&r" (got_it) |
468 |
| - SCOND_FAIL_RETRY_VARS |
469 |
| - : [rwlock] "r" (&(rw->counter)), |
470 |
| - [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), |
471 |
| - [WR_LOCKED] "ir" (0) |
472 |
| - : "memory", "cc"); |
473 |
| - |
474 |
| - smp_mb(); |
475 |
| - |
476 |
| - return got_it; |
477 |
| -} |
478 |
| - |
479 |
| -static inline void arch_read_unlock(arch_rwlock_t *rw) |
480 |
| -{ |
481 |
| - unsigned int val; |
482 |
| - |
483 |
| - smp_mb(); |
484 |
| - |
485 |
| - /* |
486 |
| - * rw->counter++; |
487 |
| - */ |
488 |
| - __asm__ __volatile__( |
489 |
| - "1: llock %[val], [%[rwlock]] \n" |
490 |
| - " add %[val], %[val], 1 \n" |
491 |
| - " scond %[val], [%[rwlock]] \n" |
492 |
| - " bnz 1b \n" |
493 |
| - " \n" |
494 |
| - : [val] "=&r" (val) |
495 |
| - : [rwlock] "r" (&(rw->counter)) |
496 |
| - : "memory", "cc"); |
497 |
| - |
498 |
| - smp_mb(); |
499 |
| -} |
500 |
| - |
501 |
| -static inline void arch_write_unlock(arch_rwlock_t *rw) |
502 |
| -{ |
503 |
| - unsigned int val; |
504 |
| - |
505 |
| - smp_mb(); |
506 |
| - |
507 |
| - /* |
508 |
| - * rw->counter = __ARCH_RW_LOCK_UNLOCKED__; |
509 |
| - */ |
510 |
| - __asm__ __volatile__( |
511 |
| - "1: llock %[val], [%[rwlock]] \n" |
512 |
| - " scond %[UNLOCKED], [%[rwlock]]\n" |
513 |
| - " bnz 1b \n" |
514 |
| - " \n" |
515 |
| - : [val] "=&r" (val) |
516 |
| - : [rwlock] "r" (&(rw->counter)), |
517 |
| - [UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__) |
518 |
| - : "memory", "cc"); |
519 |
| - |
520 |
| - smp_mb(); |
521 |
| -} |
522 |
| - |
523 |
| -#undef SCOND_FAIL_RETRY_VAR_DEF |
524 |
| -#undef SCOND_FAIL_RETRY_ASM |
525 |
| -#undef SCOND_FAIL_RETRY_VARS |
526 |
| - |
527 |
| -#endif /* CONFIG_ARC_STAR_9000923308 */ |
528 |
| - |
529 | 236 | #else /* !CONFIG_ARC_HAS_LLSC */
|
530 | 237 |
|
531 | 238 | static inline void arch_spin_lock(arch_spinlock_t *lock)
|
|
0 commit comments