redis 是一个存储键值对的内存数据库, 其存储键值的方式和 Java 中的 HashMap 相似表征 redis 数据库的结构体是 redisDb (在 server.h 文件中),redis 服务器默认有 16 个数据库, 编号从 0 到 15
- typedef struct redisDb {
- dict *dict; /* 键空间 */
- dict *expires; /* 过期键空间 */
- dict *blocking_keys; /* 客户端在等待的键 (BLPOP) */
- dict *ready_keys; /* 接收到 push 的阻塞键 */
- dict *watched_keys; /* WATCHED keys for MULTI/EXEC CAS */
- struct evictionPoolEntry *eviction_pool; /* Eviction pool of keys */
- int id; /* Database ID */
- long long avg_ttl; /* Average TTL, just for stats */
- } redisDb;
dict 中存储的是 key -> value, 而 expires 存储的 key -> 过期时间
dict 是 dict.h 文件中定义的结构体:
- typedef struct dict {
- dictType *type;
- void *privdata;
- dictht ht[2];
- long rehashidx; /* rehashing not in progress if rehashidx == -1 */
- unsigned long iterators; /* number of iterators currently running */
- } dict;
- typedef struct dictht {
- dictEntry **table;
- unsigned long size; //table 的大小
- unsigned long sizemask;
- unsigned long used; //table 中键值对的数量
- } dictht;
- typedef struct dictEntry {
- void *key;
- union {
- void *val;
- uint64_t u64;
- int64_t s64;
- double d;
- } v;
- struct dictEntry *next;
- } dictEntry;
dict 可以类比为 java 中的 HashMap,dictEntry 对应 java.util.HashMap.Entry<K, V>, 稍微不同的是, dict 对 entry 的 table 做了简单的封装(即 dictht), 而且 dict 中有两个 table 用于 rehash
分析 dict 的 dictReplace(dict.c 文件中), 类似于 HashMap 的 put:
- /* Add or Overwrite:
- * Add an element, discarding the old value if the key already exists.
- * Return 1 if the key was added from scratch, 0 if there was already an
- * element with such key and dictReplace() just performed a value update
- * operation. */
- int dictReplace(dict *d, void *key, void *val)
- {
- dictEntry *entry, *existing, auxentry;
- /* Try to add the element. If the key
- * does not exists dictAdd will suceed. */
- entry = dictAddRaw(d,key,&existing);
- if (entry) {
- dictSetVal(d, entry, val);
- return 1;
- }
- /* Set the new value and free the old one. Note that it is important
- * to do that in this order, as the value may just be exactly the same
- * as the previous one. In this context, think to reference counting,
- * you want to increment (set), and then decrement (free), and not the
- * reverse. */
- auxentry = *existing;
- dictSetVal(d, existing, val);
- dictFreeVal(d, &auxentry);
- return 0;
- }
- dictEntry *dictAddRaw(dict *d, void *key, dictEntry **existing)
- {
- int index;
- dictEntry *entry;
- dictht *ht;
- if (dictIsRehashing(d)) _dictRehashStep(d);
- /* Get the index of the new element, or -1 if
- * the element already exists. */
- if ((index = _dictKeyIndex(d, key, dictHashKey(d,key), existing)) == -1)
- return NULL;
- /* Allocate the memory and store the new entry.
- * Insert the element in top, with the assumption that in a database
- * system it is more likely that recently added entries are accessed
- * more frequently. */
- ht = dictIsRehashing(d) ? &d->ht[1] : &d->ht[0];
- entry = zmalloc(sizeof(*entry));
- entry->next = ht->table[index];
- ht->table[index] = entry;
- ht->used++;
- /* Set the hash entry fields. */
- dictSetKey(d, entry, key);
- return entry;
- }
主要逻辑在 dictAddRaw 中, 也是先取得 table 中 index, 然后使用头插法插入到 table 的链表中
如果 dict 处于 rehash 状态(即 rehashidx != -1), 则在插入的时候, 先调用_dictRehashStep, 对于 rehash 中的 dict, 使用的 table 是 ht[1]
- static void _dictRehashStep(dict *d) {
- if (d->iterators == 0) dictRehash(d,1);
- }
- int dictRehash(dict *d, int n) {
- int empty_visits = n*10; /* Max number of empty buckets to visit. */
- if (!dictIsRehashing(d)) return 0;
- while(n-- && d->ht[0].used != 0) {
- dictEntry *de, *nextde;
- /* Note that rehashidx can't overflow as we are sure there are more
- * elements because ht[0].used != 0 */
- assert(d->ht[0].size > (unsigned long)d->rehashidx);
- while(d->ht[0].table[d->rehashidx] == NULL) {
- d->rehashidx++;
- if (--empty_visits == 0) return 1;
- }
- de = d->ht[0].table[d->rehashidx];
- /* Move all the keys in this bucket from the old to the new hash HT */
- while(de) {
- unsigned int h;
- nextde = de->next;
- /* Get the index in the new hash table */
- h = dictHashKey(d, de->key) & d->ht[1].sizemask;
- de->next = d->ht[1].table[h];
- d->ht[1].table[h] = de;
- d->ht[0].used--;
- d->ht[1].used++;
- de = nextde;
- }
- d->ht[0].table[d->rehashidx] = NULL;
- d->rehashidx++;
- }
- /* Check if we already rehashed the whole table... */
- if (d->ht[0].used == 0) {
- zfree(d->ht[0].table);
- d->ht[0] = d->ht[1];
- _dictReset(&d->ht[1]);
- d->rehashidx = -1;
- return 0;
- }
- /* More to rehash... */
- return 1;
- }
从代码中可以看出: rehashidx 标记了 ht[0]中正在 rehash 的链表的 index
那么, 在什么情况下, redis 会对 dict 进行 rehash 呢?
调用栈: _dictKeyIndex -> _dictExpandIfNeeded -> dictExpand 在计算键的 index 时, 会判断是否需要扩展 dict, 如果需要扩展, 则把 dict 的 rehashidx 置为 0
- static int _dictKeyIndex(dict *d, const void *key, unsigned int hash, dictEntry **existing)
- {
- unsigned int idx, table;
- dictEntry *he;
- if (existing) *existing = NULL;
- /* Expand the hash table if needed */
- if (_dictExpandIfNeeded(d) == DICT_ERR)
- return -1;
- for (table = 0; table <= 1; table++) {
- idx = hash & d->ht[table].sizemask;
- /* Search if this slot does not already contain the given key */
- he = d->ht[table].table[idx];
- while(he) {
- if (key==he->key || dictCompareKeys(d, key, he->key)) {
- if (existing) *existing = he;
- return -1;
- }
- he = he->next;
- }
- if (!dictIsRehashing(d)) break;
- }
- return idx;
- }
- /* Expand the hash table if needed */
- static int _dictExpandIfNeeded(dict *d)
- {
- /* Incremental rehashing already in progress. Return. */
- if (dictIsRehashing(d)) return DICT_OK;
- /* If the hash table is empty expand it to the initial size. */
- if (d->ht[0].size == 0) return dictExpand(d, DICT_HT_INITIAL_SIZE);
- /* If we reached the 1:1 ratio, and we are allowed to resize the hash
- * table (global setting) or we should avoid it but the ratio between
- * elements/buckets is over the "safe" threshold, we resize doubling
- * the number of buckets. */
- if (d->ht[0].used >= d->ht[0].size &&
- (dict_can_resize ||
- d->ht[0].used/d->ht[0].size > dict_force_resize_ratio))
- {
- return dictExpand(d, d->ht[0].used*2);
- }
- return DICT_OK;
- }
- /* Expand or create the hash table */
- int dictExpand(dict *d, unsigned long size)
- {
- dictht n; /* the new hash table */
- unsigned long realsize = _dictNextPower(size);
- /* the size is invalid if it is smaller than the number of
- * elements already inside the hash table */
- if (dictIsRehashing(d) || d->ht[0].used > size)
- return DICT_ERR;
- /* Rehashing to the same table size is not useful. */
- if (realsize == d->ht[0].size) return DICT_ERR;
- /* Allocate the new hash table and initialize all pointers to NULL */
- n.size = realsize;
- n.sizemask = realsize-1;
- n.table = zcalloc(realsize*sizeof(dictEntry*));
- n.used = 0;
- /* Is this the first initialization? If so it's not really a rehashing
- * we just set the first hash table so that it can accept keys. */
- if (d->ht[0].table == NULL) {
- d->ht[0] = n;
- return DICT_OK;
- }
- /* Prepare a second hash table for incremental rehashing */
- d->ht[1] = n;
- d->rehashidx = 0;
- return DICT_OK;
- }
从数据结构的角度来看, redis 的 dict 和 java 的 HashMap 很像, 区别在于 rehash:HashMap 在 resize 时是一次性拷贝的, 然后使用新的数组, 而 dict 维持了 2 个 dictht, 平常使用 ht[0], 一旦开始 rehash 则使用 ht[0]和 ht[1],rehash 被分摊到每次的 dictAdd 和 dictFind 等操作中
- dictEntry *dictFind(dict *d, const void *key)
- {
- dictEntry *he;
- unsigned int h, idx, table;
- if (d->ht[0].used + d->ht[1].used == 0) return NULL; /* dict is empty */
- if (dictIsRehashing(d)) _dictRehashStep(d);
- h = dictHashKey(d, key);
- for (table = 0; table <= 1; table++) { // 会遍历 d->ht[0]和 d->ht[1]
- idx = h & d->ht[table].sizemask;
- he = d->ht[table].table[idx];
- while(he) {
- if (key==he->key || dictCompareKeys(d, key, he->key))
- return he; // 找到即返回
- he = he->next;
- }
- if (!dictIsRehashing(d)) return NULL;
- }
- return NULL;
- }
redis 为什么要如此设计?
试想一下, 如果和 java 的 HashMap 一样, redis 也是一次性拷贝, 那么当这个 dict 非常大时, 拷贝就会比较耗时, 而在这段时间内, redis 就无法对外提供服务了
这种设计增加了复杂度, 开始 rehash 后, dict 的数据分散在 ht[0]和 ht[1]中, 对于查询 (dictFind) 和删除 (dictDelete) 和设置 (dictReplace), 则会遍历 ht[0] 和 ht[1]
来源: http://www.linuxidc.com/Linux/2018-03/151173.htm