翻倍扩容(增量扩容)当桶内key-value总数/桶的长度>6.5时
看一下源码:
func overLoadFactor(count int, B uint8) bool {
return count > bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
}
- count:当前map中key-value对数量
- B:当前桶的对数
- bucketCnt:每个桶能存的key-value数量,恒定是8
- loadFactorNum:13负载因子的分子(定义好的常量)
- loadFactorDen:2负载因子的分母(定义好的常量)
- bucketShift(B):返回桶总数 1 << B,也就是2的B次方
其实换算一下就是:
count > 8 && count > (13*2^B)/2
count > 6.5 * 2^B
还有一个理解:
装载因子:loadFactor := count / (2^B)
源码里阈值是 6.5
装载因子 > 源码里的阈值 时就会进行扩容
也就是说,当map中的key-value对的数量与桶的数量比值大于等于6.5的时候会发生增量扩容的情况。(count/2^B) > 6.5 将会发生增量扩容。
等量扩容(桶的数量不变)
- 当溢出桶(overflow bucket)的数量大于桶(bucket)的数量时会触发
- overflow bucket > bucket
- B指数如果超过15按15来算
func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
// If the threshold is too low, we do extraneous work.
// If the threshold is too high, maps that grow and shrink can hold on to lots of unused memory.
// "too many" means (approximately) as many overflow buckets as regular buckets.
// See incrnoverflow for more details.
if B > 15 {
B = 15
}
// The compiler doesn't see here that B < 16; mask B to generate shorter shift code.
return noverflow >= uint16(1)<<(B&15)
}
- noverflow:这里是hmap中的一个字段,是记录溢出桶的数量。
接下来还是看看源码吧
// 决定是否扩容、创建bucket
// t 当前map的类型信息
// h 当前的map实例指针(hmap结构)
func hashGrow(t *maptype, h *hmap) {
// If we've hit the load factor, get bigger.
// Otherwise, there are too many overflow buckets,
// so keep the same number of buckets and "grow" laterally.
// 默认翻倍扩容
bigger := uint8(1)
// 判断是否是增量扩容 是否已经负载
if !overLoadFactor(h.count+1, h.B) {
// 没有发生负载情况 将进行等量扩容 桶的数量不变
bigger = 0
// 把hmap的flags标记为 等量扩容
h.flags |= sameSizeGrow
}
// 将当前的bucket挂载到旧的bucket上
oldbuckets := h.buckets
// 申请新的buckets空间
// h.B+bigger 桶的数量
// nextOverflow 提前分配的溢出桶
newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil)
// 清除迭代标志
flags := h.flags &^ (iterator | oldIterator)
if h.flags&iterator != 0 {
flags |= oldIterator
}
// commit the grow (atomic wrt gc)
h.B += bigger
h.flags = flags
h.oldbuckets = oldbuckets
h.buckets = newbuckets
h.nevacuate = 0
h.noverflow = 0
// 将当前溢出桶更新为旧的溢出桶
// 然后清楚当前溢出桶
if h.extra != nil && h.extra.overflow != nil {
// Promote current overflow buckets to the old generation.
if h.extra.oldoverflow != nil {
throw("oldoverflow is not nil")
}
h.extra.oldoverflow = h.extra.overflow
h.extra.overflow = nil
}
if nextOverflow != nil {
if h.extra == nil {
h.extra = new(mapextra)
}
h.extra.nextOverflow = nextOverflow
}
// the actual copying of the hash table data is done incrementally
// by growWork() and evacuate().
}
// t map类型的元素信息,key-value类型、哈希函数等
// h map的核心结构、bucket信息、扩容信息等
// bucket 当前操作桶的编号、索引
func growWork(t *maptype, h *hmap, bucket uintptr) {
// make sure we evacuate the oldbucket corresponding
// to the bucket we're about to use
// 数据迁移,注意:操作当前buckt的位置在旧桶中 将当前操作的bucket旧桶迁移到新的buckets中
evacuate(t, h, bucket&h.oldbucketmask())
// evacuate one more oldbucket to make progress on growing
// 额外再迁移一个旧桶的数据 这里需要判断
// h.growing() 判断是否在扩容中
if h.growing() {
evacuate(t, h, h.nevacuate)
}
}