Skip to content

Commit dc0ee26

Browse files
herbertxdavem330
authored andcommitted
rhashtable: Rip out obsolete out-of-line interface
Now that all rhashtable users have been converted over to the inline interface, this patch removes the unused out-of-line interface. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 6cca728 commit dc0ee26

File tree

2 files changed

+3
-300
lines changed

2 files changed

+3
-300
lines changed

include/linux/rhashtable.h

Lines changed: 3 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,13 @@
11
/*
22
* Resizable, Scalable, Concurrent Hash Table
33
*
4+
* Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
45
* Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
56
* Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
67
*
7-
* Based on the following paper by Josh Triplett, Paul E. McKenney
8-
* and Jonathan Walpole:
9-
* https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
10-
*
118
* Code partially derived from nft_hash
9+
* Rewritten with rehash code from br_multicast plus single list
10+
* pointer as suggested by Josh Triplett
1211
*
1312
* This program is free software; you can redistribute it and/or modify
1413
* it under the terms of the GNU General Public License version 2 as
@@ -282,22 +281,10 @@ int rhashtable_init(struct rhashtable *ht,
282281
int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
283282
struct rhash_head *obj,
284283
struct bucket_table *old_tbl);
285-
void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node);
286-
bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node);
287284

288285
int rhashtable_expand(struct rhashtable *ht);
289286
int rhashtable_shrink(struct rhashtable *ht);
290287

291-
void *rhashtable_lookup(struct rhashtable *ht, const void *key);
292-
void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
293-
bool (*compare)(void *, void *), void *arg);
294-
295-
bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj);
296-
bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
297-
struct rhash_head *obj,
298-
bool (*compare)(void *, void *),
299-
void *arg);
300-
301288
int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
302289
void rhashtable_walk_exit(struct rhashtable_iter *iter);
303290
int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);

lib/rhashtable.c

Lines changed: 0 additions & 284 deletions
Original file line numberDiff line numberDiff line change
@@ -339,290 +339,6 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
339339
}
340340
EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
341341

342-
static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
343-
bool (*compare)(void *, void *), void *arg)
344-
{
345-
struct bucket_table *tbl, *old_tbl;
346-
struct rhash_head *head;
347-
bool no_resize_running;
348-
unsigned hash;
349-
spinlock_t *old_lock;
350-
bool success = true;
351-
352-
rcu_read_lock();
353-
354-
old_tbl = rht_dereference_rcu(ht->tbl, ht);
355-
hash = head_hashfn(ht, old_tbl, obj);
356-
old_lock = rht_bucket_lock(old_tbl, hash);
357-
358-
spin_lock_bh(old_lock);
359-
360-
/* Because we have already taken the bucket lock in old_tbl,
361-
* if we find that future_tbl is not yet visible then that
362-
* guarantees all other insertions of the same entry will
363-
* also grab the bucket lock in old_tbl because until the
364-
* rehash completes ht->tbl won't be changed.
365-
*/
366-
tbl = rht_dereference_rcu(old_tbl->future_tbl, ht) ?: old_tbl;
367-
if (tbl != old_tbl) {
368-
hash = head_hashfn(ht, tbl, obj);
369-
spin_lock_nested(rht_bucket_lock(tbl, hash),
370-
SINGLE_DEPTH_NESTING);
371-
}
372-
373-
if (compare &&
374-
rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset,
375-
compare, arg)) {
376-
success = false;
377-
goto exit;
378-
}
379-
380-
no_resize_running = tbl == old_tbl;
381-
382-
head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
383-
384-
if (rht_is_a_nulls(head))
385-
INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
386-
else
387-
RCU_INIT_POINTER(obj->next, head);
388-
389-
rcu_assign_pointer(tbl->buckets[hash], obj);
390-
391-
atomic_inc(&ht->nelems);
392-
if (no_resize_running && rht_grow_above_75(ht, tbl))
393-
schedule_work(&ht->run_work);
394-
395-
exit:
396-
if (tbl != old_tbl)
397-
spin_unlock(rht_bucket_lock(tbl, hash));
398-
399-
spin_unlock_bh(old_lock);
400-
401-
rcu_read_unlock();
402-
403-
return success;
404-
}
405-
406-
/**
407-
* rhashtable_insert - insert object into hash table
408-
* @ht: hash table
409-
* @obj: pointer to hash head inside object
410-
*
411-
* Will take a per bucket spinlock to protect against mutual mutations
412-
* on the same bucket. Multiple insertions may occur in parallel unless
413-
* they map to the same bucket lock.
414-
*
415-
* It is safe to call this function from atomic context.
416-
*
417-
* Will trigger an automatic deferred table resizing if the size grows
418-
* beyond the watermark indicated by grow_decision() which can be passed
419-
* to rhashtable_init().
420-
*/
421-
void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
422-
{
423-
__rhashtable_insert(ht, obj, NULL, NULL);
424-
}
425-
EXPORT_SYMBOL_GPL(rhashtable_insert);
426-
427-
static bool __rhashtable_remove(struct rhashtable *ht,
428-
struct bucket_table *tbl,
429-
struct rhash_head *obj)
430-
{
431-
struct rhash_head __rcu **pprev;
432-
struct rhash_head *he;
433-
spinlock_t * lock;
434-
unsigned hash;
435-
bool ret = false;
436-
437-
hash = head_hashfn(ht, tbl, obj);
438-
lock = rht_bucket_lock(tbl, hash);
439-
440-
spin_lock_bh(lock);
441-
442-
pprev = &tbl->buckets[hash];
443-
rht_for_each(he, tbl, hash) {
444-
if (he != obj) {
445-
pprev = &he->next;
446-
continue;
447-
}
448-
449-
rcu_assign_pointer(*pprev, obj->next);
450-
ret = true;
451-
break;
452-
}
453-
454-
spin_unlock_bh(lock);
455-
456-
return ret;
457-
}
458-
459-
/**
460-
* rhashtable_remove - remove object from hash table
461-
* @ht: hash table
462-
* @obj: pointer to hash head inside object
463-
*
464-
* Since the hash chain is single linked, the removal operation needs to
465-
* walk the bucket chain upon removal. The removal operation is thus
466-
* considerable slow if the hash table is not correctly sized.
467-
*
468-
* Will automatically shrink the table via rhashtable_expand() if the
469-
* shrink_decision function specified at rhashtable_init() returns true.
470-
*
471-
* The caller must ensure that no concurrent table mutations occur. It is
472-
* however valid to have concurrent lookups if they are RCU protected.
473-
*/
474-
bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
475-
{
476-
struct bucket_table *tbl;
477-
bool ret;
478-
479-
rcu_read_lock();
480-
481-
tbl = rht_dereference_rcu(ht->tbl, ht);
482-
483-
/* Because we have already taken (and released) the bucket
484-
* lock in old_tbl, if we find that future_tbl is not yet
485-
* visible then that guarantees the entry to still be in
486-
* the old tbl if it exists.
487-
*/
488-
while (!(ret = __rhashtable_remove(ht, tbl, obj)) &&
489-
(tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
490-
;
491-
492-
if (ret) {
493-
atomic_dec(&ht->nelems);
494-
if (rht_shrink_below_30(ht, tbl))
495-
schedule_work(&ht->run_work);
496-
}
497-
498-
rcu_read_unlock();
499-
500-
return ret;
501-
}
502-
EXPORT_SYMBOL_GPL(rhashtable_remove);
503-
504-
/**
505-
* rhashtable_lookup - lookup key in hash table
506-
* @ht: hash table
507-
* @key: pointer to key
508-
*
509-
* Computes the hash value for the key and traverses the bucket chain looking
510-
* for a entry with an identical key. The first matching entry is returned.
511-
*
512-
* This lookup function may only be used for fixed key hash table (key_len
513-
* parameter set). It will BUG() if used inappropriately.
514-
*
515-
* Lookups may occur in parallel with hashtable mutations and resizing.
516-
*/
517-
void *rhashtable_lookup(struct rhashtable *ht, const void *key)
518-
{
519-
return rhashtable_lookup_fast(ht, key, ht->p);
520-
}
521-
EXPORT_SYMBOL_GPL(rhashtable_lookup);
522-
523-
/**
524-
* rhashtable_lookup_compare - search hash table with compare function
525-
* @ht: hash table
526-
* @key: the pointer to the key
527-
* @compare: compare function, must return true on match
528-
* @arg: argument passed on to compare function
529-
*
530-
* Traverses the bucket chain behind the provided hash value and calls the
531-
* specified compare function for each entry.
532-
*
533-
* Lookups may occur in parallel with hashtable mutations and resizing.
534-
*
535-
* Returns the first entry on which the compare function returned true.
536-
*/
537-
void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
538-
bool (*compare)(void *, void *),
539-
void *arg)
540-
{
541-
const struct bucket_table *tbl;
542-
struct rhash_head *he;
543-
u32 hash;
544-
545-
rcu_read_lock();
546-
547-
tbl = rht_dereference_rcu(ht->tbl, ht);
548-
restart:
549-
hash = rht_key_hashfn(ht, tbl, key, ht->p);
550-
rht_for_each_rcu(he, tbl, hash) {
551-
if (!compare(rht_obj(ht, he), arg))
552-
continue;
553-
rcu_read_unlock();
554-
return rht_obj(ht, he);
555-
}
556-
557-
/* Ensure we see any new tables. */
558-
smp_rmb();
559-
560-
tbl = rht_dereference_rcu(tbl->future_tbl, ht);
561-
if (unlikely(tbl))
562-
goto restart;
563-
rcu_read_unlock();
564-
565-
return NULL;
566-
}
567-
EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
568-
569-
/**
570-
* rhashtable_lookup_insert - lookup and insert object into hash table
571-
* @ht: hash table
572-
* @obj: pointer to hash head inside object
573-
*
574-
* Locks down the bucket chain in both the old and new table if a resize
575-
* is in progress to ensure that writers can't remove from the old table
576-
* and can't insert to the new table during the atomic operation of search
577-
* and insertion. Searches for duplicates in both the old and new table if
578-
* a resize is in progress.
579-
*
580-
* This lookup function may only be used for fixed key hash table (key_len
581-
* parameter set). It will BUG() if used inappropriately.
582-
*
583-
* It is safe to call this function from atomic context.
584-
*
585-
* Will trigger an automatic deferred table resizing if the size grows
586-
* beyond the watermark indicated by grow_decision() which can be passed
587-
* to rhashtable_init().
588-
*/
589-
bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
590-
{
591-
return rhashtable_lookup_insert_fast(ht, obj, ht->p);
592-
}
593-
EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);
594-
595-
/**
596-
* rhashtable_lookup_compare_insert - search and insert object to hash table
597-
* with compare function
598-
* @ht: hash table
599-
* @obj: pointer to hash head inside object
600-
* @compare: compare function, must return true on match
601-
* @arg: argument passed on to compare function
602-
*
603-
* Locks down the bucket chain in both the old and new table if a resize
604-
* is in progress to ensure that writers can't remove from the old table
605-
* and can't insert to the new table during the atomic operation of search
606-
* and insertion. Searches for duplicates in both the old and new table if
607-
* a resize is in progress.
608-
*
609-
* Lookups may occur in parallel with hashtable mutations and resizing.
610-
*
611-
* Will trigger an automatic deferred table resizing if the size grows
612-
* beyond the watermark indicated by grow_decision() which can be passed
613-
* to rhashtable_init().
614-
*/
615-
bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
616-
struct rhash_head *obj,
617-
bool (*compare)(void *, void *),
618-
void *arg)
619-
{
620-
BUG_ON(!ht->p.key_len);
621-
622-
return __rhashtable_insert(ht, obj, compare, arg);
623-
}
624-
EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert);
625-
626342
/**
627343
* rhashtable_walk_init - Initialise an iterator
628344
* @ht: Table to walk over

0 commit comments

Comments
 (0)