源码分析go map

380 阅读6分钟

map结构

  • hmap.B
    在map中,桶的个数 n=2^B 次方,但是为什么规定是2^B呢,因为hash%n可以定位桶,但是%操作速度没有位运算快. 当n为2的B次方时,有如下替换公式:
    hash%n = hash&(n-1) = hash&(2^B-1) = hash&(1<<B-1)
    (1<<B-1)就是源码中的bucketMask

map初始化

func makemap(t *maptype, hint int, h *hmap) *hmap {
   mem, overflow := math.MulUintptr(uintptr(hint), t.bucket.size)
   if overflow || mem > maxAlloc {
      hint = 0
   }

   // initialize Hmap
   if h == nil {
      h = new(hmap)
   }
   h.hash0 = fastrand()

   // Find the size parameter B which will hold the requested # of elements.
   // For hint < 0 overLoadFactor returns false since hint < bucketCnt.
   B := uint8(0)
   for overLoadFactor(hint, B) {
      B++
   }
   h.B = B

   // allocate initial hash table
   // if B == 0, the buckets field is allocated lazily later (in mapassign)
   // If hint is large zeroing this memory could take a while.
   if h.B != 0 {
      var nextOverflow *bmap
      h.buckets, nextOverflow = makeBucketArray(t, h.B, nil)
      if nextOverflow != nil {
         h.extra = new(mapextra)
         h.extra.nextOverflow = nextOverflow
      }
   }

   return h
}

首先通过fastrand创建一个hash种子,再利用overLoadFactor计算出能够容纳hint(创建map传入的个数参数)的最小的B,

// overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
func overLoadFactor(count int, B uint8) bool {
   return count > bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
}

以上代码可转化成:
初始化bucket个数 > 6.5*2^B
比如传入16,则:
B = 0, 16 > 6.5*1 => 16 > 6.5,返回true,继续执行for循环
B = 1, 16 > 6.5*2 => 16 > 13,返回true,继续执行for循环
B = 2, 16 > 6.5*4 => 16 > 24,返回false,结束for循环
这个map就有2^2=4个桶

// makeBucketArray initializes a backing array for map buckets.
// 1<<b is the minimum number of buckets to allocate.
// dirtyalloc should either be nil or a bucket array previously
// allocated by makeBucketArray with the same t and b parameters.
// If dirtyalloc is nil a new backing array will be alloced and
// otherwise dirtyalloc will be cleared and reused as backing array.
func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap) {
   base := bucketShift(b)
   // 如果b >=4, 那么 nbuckets 就会大于base
   nbuckets := base
   // b >= 4时候,有可能出现用到溢出桶的情况,所以这里先申请一些桶,留着以后用,节省之后的计算消耗
   if b >= 4 {
      // Add on the estimated number of overflow buckets
      // required to insert the median number of elements
      // used with this value of b.
      nbuckets += bucketShift(b - 4)
      sz := t.bucket.size * nbuckets
      up := roundupsize(sz)
      if up != sz {
         nbuckets = up / t.bucket.size
      }
   }

   if dirtyalloc == nil {
      // 如果之前没分配过,那么申请一块连续的空间
      buckets = newarray(t.bucket, int(nbuckets))
   } else {
      // dirtyalloc was previously generated by
      // the above newarray(t.bucket, int(nbuckets))
      // but may not be empty.
      buckets = dirtyalloc
      size := t.bucket.size * nbuckets
      if t.bucket.ptrdata != 0 {
         memclrHasPointers(buckets, size)
      } else {
         memclrNoHeapPointers(buckets, size)
      }
   }
   // 处理多出来的溢出桶
   if base != nbuckets {
      // We preallocated some overflow buckets.
      // To keep the overhead of tracking these overflow buckets to a minimum,
      // we use the convention that if a preallocated overflow bucket's overflow
      // pointer is nil, then there are more available by bumping the pointer.
      // We need a safe non-nil pointer for the last overflow bucket; just use buckets.
      // 因为多申请了一些用于溢出情况的桶,所以溢出的桶对B取模不合理,根本获得不到溢出桶的位置,
      // 所以就把溢出桶的起始地址放到mapextra.nextOverflow字段上
      nextOverflow = (*bmap)(add(buckets, base*uintptr(t.bucketsize)))
      // last 指向nextOverflow最后一个bucket
      last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.bucketsize)))
      // 将最后一个bucket的overflow字段指向bucket(就是这段内存的开头)
      // 获取预分配的溢出桶里`bmap`时,可以通过判断overflow是不是为nil判断是不是最后一个
      last.setoverflow(t, (*bmap)(buckets))
   }
   return buckets, nextOverflow
}

makeBucketArray做的事为:
如果多申请了用于处理溢出情况的bucket,那么计算吃溢出空间的最后一块bucket地址,并将次bucket的overflow指针指向整个bucket连续空间的头(获取预分配的溢出桶里bmap时,可以通过判断overflow是不是为nil判断是不是最后一个)
下图为初始化时,为hint=208,B=5的情况,此时nbuckets = 2^5 + 2^(5-4) = 32 + 2 = 34,多出来两个bucket放到nextoverflow字段下

image.png

map访问

func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
   (...)
   // map为空或者元素为0,返回零值和false
   if h == nil || h.count == 0 {
      if t.hashMightPanic() {
         t.hasher(key, 0) // see issue 23734
      }
      return unsafe.Pointer(&zeroVal[0]), false
   }
   // 目前正在写的话,直接报错
   if h.flags&hashWriting != 0 {
      throw("concurrent map read and map write")
   }
   // 计算key的hash值
   hash := t.hasher(key, uintptr(h.hash0))
   // m用于接下来从hash的最后B位计算出bucket的位置
   // B = 3, 则m二进制为  111
   // B = 4, 则m二进制为  1111
   m := bucketMask(h.B)
   // b为计算出来bucket的位置
   b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
   // oldbuckets则代表正在扩容,如果正在扩容,则从老得bucket里找,
   // 但是若该bucket搬迁完,则从buckets里找
   if c := h.oldbuckets; c != nil {
      if !h.sameSizeGrow() {
         // There used to be half as many buckets; mask down one more power of two.
         m >>= 1
      }
      oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
      if !evacuated(oldb) {
         b = oldb
      }
   }
   // 取高8位的值
   top := tophash(hash)
bucketloop:
   // 外层遍历用于解决冲突的链表
   for ; b != nil; b = b.overflow(t) {
      // 遍历当前bucket上的tophash
      for i := uintptr(0); i < bucketCnt; i++ {
         // 不相等
         if b.tophash[i] != top {
            if b.tophash[i] == emptyRest {
               // 表示后面没有数据了 (包括overflow 和 index),这个值是delete操作设置的
               break bucketloop
            }
            continue
         }
         // 计算k的位置
         k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
         // 如果k是指针,则解引用
         if t.indirectkey() {
            k = *((*unsafe.Pointer)(k))
         }
         // 判断key与k代表的值是否相同
         if t.key.equal(key, k) {
            //
            e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
            if t.indirectelem() {
               e = *((*unsafe.Pointer)(e))
            }
            return e, true
         }
      }
   }
   return unsafe.Pointer(&zeroVal[0]), false
}

dataOffset其实就是bmap的大小,所以dataOffset + i * keySize就可以计算出key在bmap的位置,要访问的value地址也是如此。

dataOffset = unsafe.Offsetof(struct {
   b bmap
   v int64
}{}.v)

总结

  • 计算出key的hash,计算在buckets中要访问的bucket的位置
  • 若正在扩容,则计算出在oldbuckets中要访问bucket的位置
  • 遍历得到的bucket,以及bucket所在的链表
  • 遍历当前bucket,若bucket的tophash中存在emptyRest,则代表后续无元素,直接返回零值
  • 若hash匹配成功,则匹配key是否相等,不想等继续遍历,相等就计算value的位置,并返回

map赋值

// Like mapaccess, but allocates a slot for the key if it is not present in the map.
func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
   if h == nil {
      panic(plainError("assignment to entry in nil map"))
   }
   if raceenabled {
      callerpc := getcallerpc()
      pc := funcPC(mapassign)
      racewritepc(unsafe.Pointer(h), callerpc, pc)
      raceReadObjectPC(t.key, key, callerpc, pc)
   }
   if msanenabled {
      msanread(key, t.key.size)
   }
   // 如果正在被写,则报错
   if h.flags&hashWriting != 0 {
      throw("concurrent map writes")
   }
   // 计算key的hash值
   hash := t.hasher(key, uintptr(h.hash0))

   // Set hashWriting after calling t.hasher, since t.hasher may panic,
   // in which case we have not actually done a write.
   // 标记当前为写状态
   h.flags ^= hashWriting
   // 若当前没有bucket,则分配一个
   if h.buckets == nil {
      h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
   }

again:
   // 计算bucket的偏移量
   bucket := hash & bucketMask(h.B)
   if h.growing() {
      growWork(t, h, bucket)
   }
   // 计算bucket的位置
   b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
   // 高8位的值
   top := tophash(hash)

   var inserti *uint8
   var insertk unsafe.Pointer
   var elem unsafe.Pointer
bucketloop:
   for {
      for i := uintptr(0); i < bucketCnt; i++ {
         // 与top不想等
         if b.tophash[i] != top {
            if isEmpty(b.tophash[i]) && inserti == nil {
               // 这里是记录第一个空位置
               inserti = &b.tophash[i]
               insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
               elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
            }
            // 代表之后都没有值了,就不用再找了
            if b.tophash[i] == emptyRest {
               break bucketloop
            }
            continue
         }
         // 这里代表tophash匹配成功
         k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
         // key是指针的话,就解指针
         if t.indirectkey() {
            k = *((*unsafe.Pointer)(k))
         }
         // 比较key是否相等,不等则继续遍历
         if !t.key.equal(key, k) {
            continue
         }
         // 直接更新
         // already have a mapping for key. Update it.
         if t.needkeyupdate() {
            typedmemmove(t.key, k, key)
         }
         elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
         goto done
      }
      ovf := b.overflow(t)
      if ovf == nil {
         break
      }
      b = ovf
   }

   // Did not find mapping for key. Allocate new cell & add entry.

   // If we hit the max load factor or we have too many overflow buckets,
   // and we're not already in the middle of growing, start growing.
   if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
      hashGrow(t, h)
      goto again // Growing the table invalidates everything, so try again
   }
   // 在前面没有找到能放key的位置,说明所有bucket都是满的,则分配一个新的bucket,
   // 这个新的bucket先从nextOverflow里获取,获取不到再新建
   if inserti == nil {
      // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
      newb := h.newoverflow(t, b)
      inserti = &newb.tophash[0]
      insertk = add(unsafe.Pointer(newb), dataOffset)
      elem = add(insertk, bucketCnt*uintptr(t.keysize))
   }
   // 写数据
   // store new key/elem at insert position
   if t.indirectkey() {
      kmem := newobject(t.key)
      *(*unsafe.Pointer)(insertk) = kmem
      insertk = kmem
   }
   if t.indirectelem() {
      vmem := newobject(t.elem)
      *(*unsafe.Pointer)(elem) = vmem
   }
   typedmemmove(t.key, insertk, key)
   *inserti = top
   h.count++

done:
   if h.flags&hashWriting == 0 {
      throw("concurrent map writes")
   }
   // 去掉写标记
   h.flags &^= hashWriting
   if t.indirectelem() {
      elem = *((*unsafe.Pointer)(elem))
   }
   return elem
}
func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
   var ovf *bmap
   if h.extra != nil && h.extra.nextOverflow != nil {
      // We have preallocated overflow buckets available.
      // See makeBucketArray for more details.
      ovf = h.extra.nextOverflow
      // 表示ovf不是overflow中的最后一个
      if ovf.overflow(t) == nil {
         // We're not at the end of the preallocated overflow buckets. Bump the pointer.
         // 将h.extra.nextOverflow向后移动一个bmap
         h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.bucketsize)))
      } else {
         // This is the last preallocated overflow bucket.
         // Reset the overflow pointer on this bucket,
         // which was set to a non-nil sentinel value.
         // 这里代表是overflow里面的最后一个

         // 把last bmap指向buckets起点的指针清空
         ovf.setoverflow(t, nil)
         // 代表overflow用完了
         h.extra.nextOverflow = nil
      }
   } else {
      // 没有overflow,新建一个
      ovf = (*bmap)(newobject(t.bucket))
   }
   // 增加hmap中noverflow数量
   h.incrnoverflow()
   if t.bucket.ptrdata == 0 {
      // 初始化extra字段
      h.createOverflow()
      // 将此次加入的overflow追加到全局的h.extra.overflow数组中
      *h.extra.overflow = append(*h.extra.overflow, ovf)
   }
   b.setoverflow(t, ovf)
   return ovf
}

对于newoverflow,有两种情况(以B=5进行分析,并且key的后B位落在bucket1上,bucket1无可用位置了)

情况一: h.extra.nextOverflow != nil

image.png 情况二: h.extra.nextOverflow == nil

image.png

总结

  • 判断是否被其他的goroutine写,是则报错
  • 更新写flags为写状态
  • 若当前buckets为空,则新建一个
  • 计算hash值,并得到要进行写的bucket的位置
  • 遍历bucket的tophash,遍历过程中
    • tophash不匹配,保存第一个值为空的位置,若遍历中发现emptyRest,则取消遍历
    • tophash匹配,判断key值是否相等,相等则直接更新,不想等则继续遍历
  • inserti == nil,则代表bucket都满了,通过newoverflow获取一个新的,并写入数据
  • 去掉flags的写状态

map扩容

发生扩容有两种情况:

overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)
// overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
func overLoadFactor(count int, B uint8) bool {
   return count > bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
}

可以简化成:count > 6.5*2^B

// tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
// Note that most of these overflow buckets must be in sparse use;
// if use was dense, then we'd have already triggered regular map growth.
func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
   // If the threshold is too low, we do extraneous work.
   // If the threshold is too high, maps that grow and shrink can hold on to lots of unused memory.
   // "too many" means (approximately) as many overflow buckets as regular buckets.
   // See incrnoverflow for more details.
   if B > 15 {
      B = 15
   }
   // The compiler doesn't see here that B < 16; mask B to generate shorter shift code.
   return noverflow >= uint16(1)<<(B&15)
}

情况一:说明大多数哦桶都快满了,在插入新数据,大概率会放在overflow的桶上
情况二:
a. 当bucket总数 <= 2^15时候,若overflow桶数量 > 2^B数量,则认为overflow太多了
b. 当bucket > 2^15,直接和2^15比

针对情况一:将B+1,桶数量增加一倍 针对情况而:申请同等大小bucket,将旧的内容移动到新的bucket里

func hashGrow(t *maptype, h *hmap) {
   // If we've hit the load factor, get bigger.
   // Otherwise, there are too many overflow buckets,
   // so keep the same number of buckets and "grow" laterally.
   bigger := uint8(1)
   // 如果未超过load factor,则等大扩容
   if !overLoadFactor(h.count+1, h.B) {
      bigger = 0
      // 标记sameSizeGrow,容量不变
      h.flags |= sameSizeGrow
   }
   oldbuckets := h.buckets
   // 分配新的buckets和overflow
   newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil)

   flags := h.flags &^ (iterator | oldIterator)
   if h.flags&iterator != 0 {
      flags |= oldIterator
   }
   // commit the grow (atomic wrt gc)
   // 更新hmap属性
   h.B += bigger
   h.flags = flags
   h.oldbuckets = oldbuckets
   h.buckets = newbuckets
   h.nevacuate = 0
   h.noverflow = 0

   if h.extra != nil && h.extra.overflow != nil {
      // Promote current overflow buckets to the old generation.
      if h.extra.oldoverflow != nil {
         throw("oldoverflow is not nil")
      }
      // 将当前的overflow赋值给oldoverflow
      h.extra.oldoverflow = h.extra.overflow
      h.extra.overflow = nil
   }
   if nextOverflow != nil {
      if h.extra == nil {
         h.extra = new(mapextra)
      }
      h.extra.nextOverflow = nextOverflow
   }

   // the actual copying of the hash table data is done incrementally
   // by growWork() and evacuate().
}
func growWork(t *maptype, h *hmap, bucket uintptr) {
   // make sure we evacuate the oldbucket corresponding
   // to the bucket we're about to use
   evacuate(t, h, bucket&h.oldbucketmask())

   // evacuate one more oldbucket to make progress on growing
   if h.growing() {
      evacuate(t, h, h.nevacuate)
   }
}

func evacuated(b *bmap) bool {
   h := b.tophash[0]
   return h > emptyOne && h < minTopHash
}

当前bucket是否搬迁完是通过检查tophash[0]的值来完成的,所以在下面的搬迁过程中会设置这个值

func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
   // 找到老的bucket的位置
   b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
   newbit := h.noldbuckets()
   // 是否搬迁过
   if !evacuated(b) {
      // TODO: reuse overflow buckets instead of using new ones, if there
      // is no iterator using the old buckets.  (If !oldIterator.)

      // xy contains the x and y (low and high) evacuation destinations.
      var xy [2]evacDst
      // x代表新bucket数组的前半部分
      // y代表新bucket数组的后半部分
      x := &xy[0]
      x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
      x.k = add(unsafe.Pointer(x.b), dataOffset)
      x.e = add(x.k, bucketCnt*uintptr(t.keysize))

      if !h.sameSizeGrow() {
         // 如果是增量扩容,则计算出新的位置
         // Only calculate y pointers if we're growing bigger.
         // Otherwise GC can see bad pointers.
         y := &xy[1]
         y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
         y.k = add(unsafe.Pointer(y.b), dataOffset)
         y.e = add(y.k, bucketCnt*uintptr(t.keysize))
      }

      // 遍历oldbuckets以及对应的overflow
      for ; b != nil; b = b.overflow(t) {
         // 获取key的位置
         k := add(unsafe.Pointer(b), dataOffset)
         // 获取value的位置
         e := add(k, bucketCnt*uintptr(t.keysize))
         for i := 0; i < bucketCnt; i, k, e = i+1, add(k, uintptr(t.keysize)), add(e, uintptr(t.elemsize)) {
            top := b.tophash[i]
            // 如果未空则跳过
            if isEmpty(top) {
               b.tophash[i] = evacuatedEmpty
               continue
            }
            if top < minTopHash {
               throw("bad map state")
            }
            k2 := k
            if t.indirectkey() {
               k2 = *((*unsafe.Pointer)(k2))
            }
            var useY uint8
            if !h.sameSizeGrow() {
               // Compute hash to make our evacuation decision (whether we need
               // to send this key/elem to bucket x or bucket y).
               hash := t.hasher(k2, uintptr(h.hash0))
               if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.equal(k2, k2) {
                  // 为什么要加 reflexivekey 的判断,可以参考这里:
                  // https://go-review.googlesource.com/c/go/+/1480
                  // key != key,只有在 float 数的 NaN 时会出现
                  // 比如:
                  // n1 := math.NaN()
                  // n2 := math.NaN()
                  // fmt.Println(n1, n2)
                  // fmt.Println(n1 == n2)
                  // 这种情况下 n1 和 n2 的哈希值也完全不一样
                  // 这里官方表示这种情况是不可复现的
                  // 需要在 iterators 参与的情况下才能复现
                  // 但是对于这种 key 我们也可以随意对其目标进行发配
                  // 同时 tophash 对于 NaN 也没啥意义
                  // 还是按正常的情况下算一个随机的 tophash
                  // 然后公平地把这些 key 平均分布到各 bucket 就好
                  // 让这个 key 50% 概率去 Y 半区
                  useY = top & 1
                  top = tophash(hash)
               } else {
                  // 这里写的比较 trick
                  // 比如当前有 8 个桶
                  // 那么如果 hash & 8 != 0
                  // 那么说明这个元素的 hash 这种形式
                  // xxx1xxx
                  // 而扩容后的 bucketMask 是
                  //    1111
                  // 所以实际上这个就是
                  // xxx1xxx & 1000 > 0
                  // 说明这个元素在扩容后一定会去下半区,即Y部分
                  // 所以就是 useY 了
                  if hash&newbit != 0 {
                     useY = 1
                  }
               }
            }

            if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY {
               throw("bad evacuatedN")
            }

            b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY
            dst := &xy[useY]                 // evacuation destination

            if dst.i == bucketCnt {
               dst.b = h.newoverflow(t, dst.b)
               dst.i = 0
               dst.k = add(unsafe.Pointer(dst.b), dataOffset)
               dst.e = add(dst.k, bucketCnt*uintptr(t.keysize))
            }
            dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
            if t.indirectkey() {
               *(*unsafe.Pointer)(dst.k) = k2 // copy pointer
            } else {
               typedmemmove(t.key, dst.k, k) // copy elem
            }
            if t.indirectelem() {
               *(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e)
            } else {
               typedmemmove(t.elem, dst.e, e)
            }
            dst.i++
            // These updates might push these pointers past the end of the
            // key or elem arrays.  That's ok, as we have the overflow pointer
            // at the end of the bucket to protect against pointing past the
            // end of the bucket.
            dst.k = add(dst.k, uintptr(t.keysize))
            dst.e = add(dst.e, uintptr(t.elemsize))
         }
      }
      // Unlink the overflow buckets & clear key/elem to help GC.
      if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
         b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
         // Preserve b.tophash because the evacuation
         // state is maintained there.
         ptr := add(b, dataOffset)
         n := uintptr(t.bucketsize) - dataOffset
         memclrHasPointers(ptr, n)
      }
   }

   if oldbucket == h.nevacuate {
      advanceEvacuationMark(h, t, newbit)
   }
}

map删除

func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
   if raceenabled && h != nil {
      callerpc := getcallerpc()
      pc := funcPC(mapdelete)
      racewritepc(unsafe.Pointer(h), callerpc, pc)
      raceReadObjectPC(t.key, key, callerpc, pc)
   }
   if msanenabled && h != nil {
      msanread(key, t.key.size)
   }
   if h == nil || h.count == 0 {
      if t.hashMightPanic() {
         t.hasher(key, 0) // see issue 23734
      }
      return
   }
   // 状态检查
   if h.flags&hashWriting != 0 {
      throw("concurrent map writes")
   }
   // 计算key的hash
   hash := t.hasher(key, uintptr(h.hash0))

   // Set hashWriting after calling t.hasher, since t.hasher may panic,
   // in which case we have not actually done a write (delete).
   // 写入位置1
   h.flags ^= hashWriting
    // 计算出key的后B位
   bucket := hash & bucketMask(h.B)
   if h.growing() {
      growWork(t, h, bucket)
   }
   // 计算出bucket的位置
   b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
   bOrig := b
   // 找到对应的top
   top := tophash(hash)
search:
   // 遍历当前的bucket以及overflow
   for ; b != nil; b = b.overflow(t) {
      for i := uintptr(0); i < bucketCnt; i++ {
         if b.tophash[i] != top {
            // emptyRest代表本位置以及后面都是空的,没必要遍历了
            if b.tophash[i] == emptyRest {
               break search
            }
            continue
         }
         k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
         k2 := k
         if t.indirectkey() {
            k2 = *((*unsafe.Pointer)(k2))
         }
         // key值不相等,继续遍历
         if !t.key.equal(key, k2) {
            continue
         }
         // Only clear key if there are pointers in it.
         if t.indirectkey() {
            *(*unsafe.Pointer)(k) = nil
         } else if t.key.ptrdata != 0 {
            memclrHasPointers(k, t.key.size)
         }
         e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
         if t.indirectelem() {
            *(*unsafe.Pointer)(e) = nil
         } else if t.elem.ptrdata != 0 {
            memclrHasPointers(e, t.elem.size)
         } else {
            memclrNoHeapPointers(e, t.elem.size)
         }
         // emptyOne只代表本位置是空的
         b.tophash[i] = emptyOne
         // If the bucket now ends in a bunch of emptyOne states,
         // change those to emptyRest states.
         // It would be nice to make this a separate function, but
         // for loops are not currently inlineable.
         // ====这一块代码判断当前位置之后是否都是空的====
         
         if i == bucketCnt-1 {
            if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
               goto notLast
            }
         } else {
            if b.tophash[i+1] != emptyRest {
               goto notLast
            }
         }
         // =========================================
         
         
         // 走到这里说明当前位置以及后面的位置都是空的
         // 从后向前,处理
         for {
            // 将当前top设置为emptyRest
            b.tophash[i] = emptyRest
            if i == 0 {
               if b == bOrig {
                  break // beginning of initial bucket, we're done.
               }
               // Find previous bucket, continue at its last entry.
               c := b
               // 找到上一个bucket
               for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
               }
               i = bucketCnt - 1
            } else {
               i--
            }
            if b.tophash[i] != emptyOne {
               break
            }
         }
      notLast:
         h.count--
         // Reset the hash seed to make it more difficult for attackers to
         // repeatedly trigger hash collisions. See issue 25237.
         if h.count == 0 {
            h.hash0 = fastrand()
         }
         break search
      }
   }