Source file
src/runtime/hashmap.go
Documentation: runtime
1
2
3
4
5 package runtime
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56 import (
57 "runtime/internal/atomic"
58 "runtime/internal/sys"
59 "unsafe"
60 )
61
62 const (
63
64 bucketCntBits = 3
65 bucketCnt = 1 << bucketCntBits
66
67
68
69 loadFactorNum = 13
70 loadFactorDen = 2
71
72
73
74
75
76 maxKeySize = 128
77 maxValueSize = 128
78
79
80
81
82 dataOffset = unsafe.Offsetof(struct {
83 b bmap
84 v int64
85 }{}.v)
86
87
88
89
90
91 empty = 0
92 evacuatedEmpty = 1
93 evacuatedX = 2
94 evacuatedY = 3
95 minTopHash = 4
96
97
98 iterator = 1
99 oldIterator = 2
100 hashWriting = 4
101 sameSizeGrow = 8
102
103
104 noCheck = 1<<(8*sys.PtrSize) - 1
105 )
106
107
108 type hmap struct {
109
110
111 count int
112 flags uint8
113 B uint8
114 noverflow uint16
115 hash0 uint32
116
117 buckets unsafe.Pointer
118 oldbuckets unsafe.Pointer
119 nevacuate uintptr
120
121 extra *mapextra
122 }
123
124
125 type mapextra struct {
126
127
128
129
130
131
132
133
134 overflow *[]*bmap
135 oldoverflow *[]*bmap
136
137
138 nextOverflow *bmap
139 }
140
141
142 type bmap struct {
143
144
145
146 tophash [bucketCnt]uint8
147
148
149
150
151
152 }
153
154
155
156
157 type hiter struct {
158 key unsafe.Pointer
159 value unsafe.Pointer
160 t *maptype
161 h *hmap
162 buckets unsafe.Pointer
163 bptr *bmap
164 overflow *[]*bmap
165 oldoverflow *[]*bmap
166 startBucket uintptr
167 offset uint8
168 wrapped bool
169 B uint8
170 i uint8
171 bucket uintptr
172 checkBucket uintptr
173 }
174
175
176 func bucketShift(b uint8) uintptr {
177 if sys.GoarchAmd64|sys.GoarchAmd64p32|sys.Goarch386 != 0 {
178 b &= sys.PtrSize*8 - 1
179 }
180 return uintptr(1) << b
181 }
182
183
184 func bucketMask(b uint8) uintptr {
185 return bucketShift(b) - 1
186 }
187
188
189 func tophash(hash uintptr) uint8 {
190 top := uint8(hash >> (sys.PtrSize*8 - 8))
191 if top < minTopHash {
192 top += minTopHash
193 }
194 return top
195 }
196
197 func evacuated(b *bmap) bool {
198 h := b.tophash[0]
199 return h > empty && h < minTopHash
200 }
201
202 func (b *bmap) overflow(t *maptype) *bmap {
203 return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize))
204 }
205
206 func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
207 *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize)) = ovf
208 }
209
210 func (b *bmap) keys() unsafe.Pointer {
211 return add(unsafe.Pointer(b), dataOffset)
212 }
213
214
215
216
217
218
219
220
221 func (h *hmap) incrnoverflow() {
222
223
224
225 if h.B < 16 {
226 h.noverflow++
227 return
228 }
229
230
231
232 mask := uint32(1)<<(h.B-15) - 1
233
234
235 if fastrand()&mask == 0 {
236 h.noverflow++
237 }
238 }
239
240 func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
241 var ovf *bmap
242 if h.extra != nil && h.extra.nextOverflow != nil {
243
244
245 ovf = h.extra.nextOverflow
246 if ovf.overflow(t) == nil {
247
248 h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.bucketsize)))
249 } else {
250
251
252
253 ovf.setoverflow(t, nil)
254 h.extra.nextOverflow = nil
255 }
256 } else {
257 ovf = (*bmap)(newobject(t.bucket))
258 }
259 h.incrnoverflow()
260 if t.bucket.kind&kindNoPointers != 0 {
261 h.createOverflow()
262 *h.extra.overflow = append(*h.extra.overflow, ovf)
263 }
264 b.setoverflow(t, ovf)
265 return ovf
266 }
267
268 func (h *hmap) createOverflow() {
269 if h.extra == nil {
270 h.extra = new(mapextra)
271 }
272 if h.extra.overflow == nil {
273 h.extra.overflow = new([]*bmap)
274 }
275 }
276
277 func makemap64(t *maptype, hint int64, h *hmap) *hmap {
278 if int64(int(hint)) != hint {
279 hint = 0
280 }
281 return makemap(t, int(hint), h)
282 }
283
284
285
286
287 func makemap_small() *hmap {
288 h := new(hmap)
289 h.hash0 = fastrand()
290 return h
291 }
292
293
294
295
296
297
298 func makemap(t *maptype, hint int, h *hmap) *hmap {
299
300
301 if sz := unsafe.Sizeof(hmap{}); sz != 8+5*sys.PtrSize {
302 println("runtime: sizeof(hmap) =", sz, ", t.hmap.size =", t.hmap.size)
303 throw("bad hmap size")
304 }
305
306 if hint < 0 || hint > int(maxSliceCap(t.bucket.size)) {
307 hint = 0
308 }
309
310
311 if h == nil {
312 h = (*hmap)(newobject(t.hmap))
313 }
314 h.hash0 = fastrand()
315
316
317 B := uint8(0)
318 for overLoadFactor(hint, B) {
319 B++
320 }
321 h.B = B
322
323
324
325
326 if h.B != 0 {
327 var nextOverflow *bmap
328 h.buckets, nextOverflow = makeBucketArray(t, h.B)
329 if nextOverflow != nil {
330 h.extra = new(mapextra)
331 h.extra.nextOverflow = nextOverflow
332 }
333 }
334
335 return h
336 }
337
338
339
340
341
342
343 func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
344 if raceenabled && h != nil {
345 callerpc := getcallerpc()
346 pc := funcPC(mapaccess1)
347 racereadpc(unsafe.Pointer(h), callerpc, pc)
348 raceReadObjectPC(t.key, key, callerpc, pc)
349 }
350 if msanenabled && h != nil {
351 msanread(key, t.key.size)
352 }
353 if h == nil || h.count == 0 {
354 return unsafe.Pointer(&zeroVal[0])
355 }
356 if h.flags&hashWriting != 0 {
357 throw("concurrent map read and map write")
358 }
359 alg := t.key.alg
360 hash := alg.hash(key, uintptr(h.hash0))
361 m := bucketMask(h.B)
362 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
363 if c := h.oldbuckets; c != nil {
364 if !h.sameSizeGrow() {
365
366 m >>= 1
367 }
368 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
369 if !evacuated(oldb) {
370 b = oldb
371 }
372 }
373 top := tophash(hash)
374 for ; b != nil; b = b.overflow(t) {
375 for i := uintptr(0); i < bucketCnt; i++ {
376 if b.tophash[i] != top {
377 continue
378 }
379 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
380 if t.indirectkey {
381 k = *((*unsafe.Pointer)(k))
382 }
383 if alg.equal(key, k) {
384 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
385 if t.indirectvalue {
386 v = *((*unsafe.Pointer)(v))
387 }
388 return v
389 }
390 }
391 }
392 return unsafe.Pointer(&zeroVal[0])
393 }
394
395 func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
396 if raceenabled && h != nil {
397 callerpc := getcallerpc()
398 pc := funcPC(mapaccess2)
399 racereadpc(unsafe.Pointer(h), callerpc, pc)
400 raceReadObjectPC(t.key, key, callerpc, pc)
401 }
402 if msanenabled && h != nil {
403 msanread(key, t.key.size)
404 }
405 if h == nil || h.count == 0 {
406 return unsafe.Pointer(&zeroVal[0]), false
407 }
408 if h.flags&hashWriting != 0 {
409 throw("concurrent map read and map write")
410 }
411 alg := t.key.alg
412 hash := alg.hash(key, uintptr(h.hash0))
413 m := bucketMask(h.B)
414 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
415 if c := h.oldbuckets; c != nil {
416 if !h.sameSizeGrow() {
417
418 m >>= 1
419 }
420 oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&m)*uintptr(t.bucketsize)))
421 if !evacuated(oldb) {
422 b = oldb
423 }
424 }
425 top := tophash(hash)
426 for ; b != nil; b = b.overflow(t) {
427 for i := uintptr(0); i < bucketCnt; i++ {
428 if b.tophash[i] != top {
429 continue
430 }
431 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
432 if t.indirectkey {
433 k = *((*unsafe.Pointer)(k))
434 }
435 if alg.equal(key, k) {
436 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
437 if t.indirectvalue {
438 v = *((*unsafe.Pointer)(v))
439 }
440 return v, true
441 }
442 }
443 }
444 return unsafe.Pointer(&zeroVal[0]), false
445 }
446
447
448 func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
449 if h == nil || h.count == 0 {
450 return nil, nil
451 }
452 alg := t.key.alg
453 hash := alg.hash(key, uintptr(h.hash0))
454 m := bucketMask(h.B)
455 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
456 if c := h.oldbuckets; c != nil {
457 if !h.sameSizeGrow() {
458
459 m >>= 1
460 }
461 oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&m)*uintptr(t.bucketsize)))
462 if !evacuated(oldb) {
463 b = oldb
464 }
465 }
466 top := tophash(hash)
467 for ; b != nil; b = b.overflow(t) {
468 for i := uintptr(0); i < bucketCnt; i++ {
469 if b.tophash[i] != top {
470 continue
471 }
472 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
473 if t.indirectkey {
474 k = *((*unsafe.Pointer)(k))
475 }
476 if alg.equal(key, k) {
477 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
478 if t.indirectvalue {
479 v = *((*unsafe.Pointer)(v))
480 }
481 return k, v
482 }
483 }
484 }
485 return nil, nil
486 }
487
488 func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
489 v := mapaccess1(t, h, key)
490 if v == unsafe.Pointer(&zeroVal[0]) {
491 return zero
492 }
493 return v
494 }
495
496 func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
497 v := mapaccess1(t, h, key)
498 if v == unsafe.Pointer(&zeroVal[0]) {
499 return zero, false
500 }
501 return v, true
502 }
503
504
505 func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
506 if h == nil {
507 panic(plainError("assignment to entry in nil map"))
508 }
509 if raceenabled {
510 callerpc := getcallerpc()
511 pc := funcPC(mapassign)
512 racewritepc(unsafe.Pointer(h), callerpc, pc)
513 raceReadObjectPC(t.key, key, callerpc, pc)
514 }
515 if msanenabled {
516 msanread(key, t.key.size)
517 }
518 if h.flags&hashWriting != 0 {
519 throw("concurrent map writes")
520 }
521 alg := t.key.alg
522 hash := alg.hash(key, uintptr(h.hash0))
523
524
525
526 h.flags |= hashWriting
527
528 if h.buckets == nil {
529 h.buckets = newobject(t.bucket)
530 }
531
532 again:
533 bucket := hash & bucketMask(h.B)
534 if h.growing() {
535 growWork(t, h, bucket)
536 }
537 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
538 top := tophash(hash)
539
540 var inserti *uint8
541 var insertk unsafe.Pointer
542 var val unsafe.Pointer
543 for {
544 for i := uintptr(0); i < bucketCnt; i++ {
545 if b.tophash[i] != top {
546 if b.tophash[i] == empty && inserti == nil {
547 inserti = &b.tophash[i]
548 insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
549 val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
550 }
551 continue
552 }
553 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
554 if t.indirectkey {
555 k = *((*unsafe.Pointer)(k))
556 }
557 if !alg.equal(key, k) {
558 continue
559 }
560
561 if t.needkeyupdate {
562 typedmemmove(t.key, k, key)
563 }
564 val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
565 goto done
566 }
567 ovf := b.overflow(t)
568 if ovf == nil {
569 break
570 }
571 b = ovf
572 }
573
574
575
576
577
578 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
579 hashGrow(t, h)
580 goto again
581 }
582
583 if inserti == nil {
584
585 newb := h.newoverflow(t, b)
586 inserti = &newb.tophash[0]
587 insertk = add(unsafe.Pointer(newb), dataOffset)
588 val = add(insertk, bucketCnt*uintptr(t.keysize))
589 }
590
591
592 if t.indirectkey {
593 kmem := newobject(t.key)
594 *(*unsafe.Pointer)(insertk) = kmem
595 insertk = kmem
596 }
597 if t.indirectvalue {
598 vmem := newobject(t.elem)
599 *(*unsafe.Pointer)(val) = vmem
600 }
601 typedmemmove(t.key, insertk, key)
602 *inserti = top
603 h.count++
604
605 done:
606 if h.flags&hashWriting == 0 {
607 throw("concurrent map writes")
608 }
609 h.flags &^= hashWriting
610 if t.indirectvalue {
611 val = *((*unsafe.Pointer)(val))
612 }
613 return val
614 }
615
616 func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
617 if raceenabled && h != nil {
618 callerpc := getcallerpc()
619 pc := funcPC(mapdelete)
620 racewritepc(unsafe.Pointer(h), callerpc, pc)
621 raceReadObjectPC(t.key, key, callerpc, pc)
622 }
623 if msanenabled && h != nil {
624 msanread(key, t.key.size)
625 }
626 if h == nil || h.count == 0 {
627 return
628 }
629 if h.flags&hashWriting != 0 {
630 throw("concurrent map writes")
631 }
632
633 alg := t.key.alg
634 hash := alg.hash(key, uintptr(h.hash0))
635
636
637
638 h.flags |= hashWriting
639
640 bucket := hash & bucketMask(h.B)
641 if h.growing() {
642 growWork(t, h, bucket)
643 }
644 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
645 top := tophash(hash)
646 search:
647 for ; b != nil; b = b.overflow(t) {
648 for i := uintptr(0); i < bucketCnt; i++ {
649 if b.tophash[i] != top {
650 continue
651 }
652 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
653 k2 := k
654 if t.indirectkey {
655 k2 = *((*unsafe.Pointer)(k2))
656 }
657 if !alg.equal(key, k2) {
658 continue
659 }
660
661 if t.indirectkey {
662 *(*unsafe.Pointer)(k) = nil
663 } else if t.key.kind&kindNoPointers == 0 {
664 memclrHasPointers(k, t.key.size)
665 }
666
667 if t.indirectvalue || t.elem.kind&kindNoPointers == 0 {
668 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
669 if t.indirectvalue {
670 *(*unsafe.Pointer)(v) = nil
671 } else {
672 memclrHasPointers(v, t.elem.size)
673 }
674 }
675 b.tophash[i] = empty
676 h.count--
677 break search
678 }
679 }
680
681 if h.flags&hashWriting == 0 {
682 throw("concurrent map writes")
683 }
684 h.flags &^= hashWriting
685 }
686
687
688
689
690
691 func mapiterinit(t *maptype, h *hmap, it *hiter) {
692 if raceenabled && h != nil {
693 callerpc := getcallerpc()
694 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit))
695 }
696
697 if h == nil || h.count == 0 {
698 return
699 }
700
701 if unsafe.Sizeof(hiter{})/sys.PtrSize != 12 {
702 throw("hash_iter size incorrect")
703 }
704 it.t = t
705 it.h = h
706
707
708 it.B = h.B
709 it.buckets = h.buckets
710 if t.bucket.kind&kindNoPointers != 0 {
711
712
713
714
715 h.createOverflow()
716 it.overflow = h.extra.overflow
717 it.oldoverflow = h.extra.oldoverflow
718 }
719
720
721 r := uintptr(fastrand())
722 if h.B > 31-bucketCntBits {
723 r += uintptr(fastrand()) << 31
724 }
725 it.startBucket = r & bucketMask(h.B)
726 it.offset = uint8(r >> h.B & (bucketCnt - 1))
727
728
729 it.bucket = it.startBucket
730
731
732
733 if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
734 atomic.Or8(&h.flags, iterator|oldIterator)
735 }
736
737 mapiternext(it)
738 }
739
740 func mapiternext(it *hiter) {
741 h := it.h
742 if raceenabled {
743 callerpc := getcallerpc()
744 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext))
745 }
746 if h.flags&hashWriting != 0 {
747 throw("concurrent map iteration and map write")
748 }
749 t := it.t
750 bucket := it.bucket
751 b := it.bptr
752 i := it.i
753 checkBucket := it.checkBucket
754 alg := t.key.alg
755
756 next:
757 if b == nil {
758 if bucket == it.startBucket && it.wrapped {
759
760 it.key = nil
761 it.value = nil
762 return
763 }
764 if h.growing() && it.B == h.B {
765
766
767
768
769 oldbucket := bucket & it.h.oldbucketmask()
770 b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
771 if !evacuated(b) {
772 checkBucket = bucket
773 } else {
774 b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
775 checkBucket = noCheck
776 }
777 } else {
778 b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
779 checkBucket = noCheck
780 }
781 bucket++
782 if bucket == bucketShift(it.B) {
783 bucket = 0
784 it.wrapped = true
785 }
786 i = 0
787 }
788 for ; i < bucketCnt; i++ {
789 offi := (i + it.offset) & (bucketCnt - 1)
790 if b.tophash[offi] == empty || b.tophash[offi] == evacuatedEmpty {
791 continue
792 }
793 k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
794 if t.indirectkey {
795 k = *((*unsafe.Pointer)(k))
796 }
797 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize))
798 if checkBucket != noCheck && !h.sameSizeGrow() {
799
800
801
802
803
804
805
806 if t.reflexivekey || alg.equal(k, k) {
807
808
809 hash := alg.hash(k, uintptr(h.hash0))
810 if hash&bucketMask(it.B) != checkBucket {
811 continue
812 }
813 } else {
814
815
816
817
818
819
820
821 if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
822 continue
823 }
824 }
825 }
826 if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
827 !(t.reflexivekey || alg.equal(k, k)) {
828
829
830
831
832 it.key = k
833 if t.indirectvalue {
834 v = *((*unsafe.Pointer)(v))
835 }
836 it.value = v
837 } else {
838
839
840
841
842
843
844
845 rk, rv := mapaccessK(t, h, k)
846 if rk == nil {
847 continue
848 }
849 it.key = rk
850 it.value = rv
851 }
852 it.bucket = bucket
853 if it.bptr != b {
854 it.bptr = b
855 }
856 it.i = i + 1
857 it.checkBucket = checkBucket
858 return
859 }
860 b = b.overflow(t)
861 i = 0
862 goto next
863 }
864
865 func makeBucketArray(t *maptype, b uint8) (buckets unsafe.Pointer, nextOverflow *bmap) {
866 base := bucketShift(b)
867 nbuckets := base
868
869
870 if b >= 4 {
871
872
873
874 nbuckets += bucketShift(b - 4)
875 sz := t.bucket.size * nbuckets
876 up := roundupsize(sz)
877 if up != sz {
878 nbuckets = up / t.bucket.size
879 }
880 }
881 buckets = newarray(t.bucket, int(nbuckets))
882 if base != nbuckets {
883
884
885
886
887
888 nextOverflow = (*bmap)(add(buckets, base*uintptr(t.bucketsize)))
889 last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.bucketsize)))
890 last.setoverflow(t, (*bmap)(buckets))
891 }
892 return buckets, nextOverflow
893 }
894
895 func hashGrow(t *maptype, h *hmap) {
896
897
898
899 bigger := uint8(1)
900 if !overLoadFactor(h.count+1, h.B) {
901 bigger = 0
902 h.flags |= sameSizeGrow
903 }
904 oldbuckets := h.buckets
905 newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger)
906
907 flags := h.flags &^ (iterator | oldIterator)
908 if h.flags&iterator != 0 {
909 flags |= oldIterator
910 }
911
912 h.B += bigger
913 h.flags = flags
914 h.oldbuckets = oldbuckets
915 h.buckets = newbuckets
916 h.nevacuate = 0
917 h.noverflow = 0
918
919 if h.extra != nil && h.extra.overflow != nil {
920
921 if h.extra.oldoverflow != nil {
922 throw("oldoverflow is not nil")
923 }
924 h.extra.oldoverflow = h.extra.overflow
925 h.extra.overflow = nil
926 }
927 if nextOverflow != nil {
928 if h.extra == nil {
929 h.extra = new(mapextra)
930 }
931 h.extra.nextOverflow = nextOverflow
932 }
933
934
935
936 }
937
938
939 func overLoadFactor(count int, B uint8) bool {
940 return count > bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
941 }
942
943
944
945
946 func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
947
948
949
950
951 if B > 15 {
952 B = 15
953 }
954
955 return noverflow >= uint16(1)<<(B&15)
956 }
957
958
959 func (h *hmap) growing() bool {
960 return h.oldbuckets != nil
961 }
962
963
964 func (h *hmap) sameSizeGrow() bool {
965 return h.flags&sameSizeGrow != 0
966 }
967
968
969 func (h *hmap) noldbuckets() uintptr {
970 oldB := h.B
971 if !h.sameSizeGrow() {
972 oldB--
973 }
974 return bucketShift(oldB)
975 }
976
977
978 func (h *hmap) oldbucketmask() uintptr {
979 return h.noldbuckets() - 1
980 }
981
982 func growWork(t *maptype, h *hmap, bucket uintptr) {
983
984
985 evacuate(t, h, bucket&h.oldbucketmask())
986
987
988 if h.growing() {
989 evacuate(t, h, h.nevacuate)
990 }
991 }
992
993 func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool {
994 b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.bucketsize)))
995 return evacuated(b)
996 }
997
998
999 type evacDst struct {
1000 b *bmap
1001 i int
1002 k unsafe.Pointer
1003 v unsafe.Pointer
1004 }
1005
1006 func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
1007 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
1008 newbit := h.noldbuckets()
1009 if !evacuated(b) {
1010
1011
1012
1013
1014 var xy [2]evacDst
1015 x := &xy[0]
1016 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
1017 x.k = add(unsafe.Pointer(x.b), dataOffset)
1018 x.v = add(x.k, bucketCnt*uintptr(t.keysize))
1019
1020 if !h.sameSizeGrow() {
1021
1022
1023 y := &xy[1]
1024 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
1025 y.k = add(unsafe.Pointer(y.b), dataOffset)
1026 y.v = add(y.k, bucketCnt*uintptr(t.keysize))
1027 }
1028
1029 for ; b != nil; b = b.overflow(t) {
1030 k := add(unsafe.Pointer(b), dataOffset)
1031 v := add(k, bucketCnt*uintptr(t.keysize))
1032 for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
1033 top := b.tophash[i]
1034 if top == empty {
1035 b.tophash[i] = evacuatedEmpty
1036 continue
1037 }
1038 if top < minTopHash {
1039 throw("bad map state")
1040 }
1041 k2 := k
1042 if t.indirectkey {
1043 k2 = *((*unsafe.Pointer)(k2))
1044 }
1045 var useY uint8
1046 if !h.sameSizeGrow() {
1047
1048
1049 hash := t.key.alg.hash(k2, uintptr(h.hash0))
1050 if h.flags&iterator != 0 && !t.reflexivekey && !t.key.alg.equal(k2, k2) {
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062 useY = top & 1
1063 top = tophash(hash)
1064 } else {
1065 if hash&newbit != 0 {
1066 useY = 1
1067 }
1068 }
1069 }
1070
1071 if evacuatedX+1 != evacuatedY {
1072 throw("bad evacuatedN")
1073 }
1074
1075 b.tophash[i] = evacuatedX + useY
1076 dst := &xy[useY]
1077
1078 if dst.i == bucketCnt {
1079 dst.b = h.newoverflow(t, dst.b)
1080 dst.i = 0
1081 dst.k = add(unsafe.Pointer(dst.b), dataOffset)
1082 dst.v = add(dst.k, bucketCnt*uintptr(t.keysize))
1083 }
1084 dst.b.tophash[dst.i&(bucketCnt-1)] = top
1085 if t.indirectkey {
1086 *(*unsafe.Pointer)(dst.k) = k2
1087 } else {
1088 typedmemmove(t.key, dst.k, k)
1089 }
1090 if t.indirectvalue {
1091 *(*unsafe.Pointer)(dst.v) = *(*unsafe.Pointer)(v)
1092 } else {
1093 typedmemmove(t.elem, dst.v, v)
1094 }
1095 dst.i++
1096
1097
1098
1099
1100 dst.k = add(dst.k, uintptr(t.keysize))
1101 dst.v = add(dst.v, uintptr(t.valuesize))
1102 }
1103 }
1104
1105 if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 {
1106 b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
1107
1108
1109 ptr := add(b, dataOffset)
1110 n := uintptr(t.bucketsize) - dataOffset
1111 memclrHasPointers(ptr, n)
1112 }
1113 }
1114
1115 if oldbucket == h.nevacuate {
1116 advanceEvacuationMark(h, t, newbit)
1117 }
1118 }
1119
1120 func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
1121 h.nevacuate++
1122
1123
1124 stop := h.nevacuate + 1024
1125 if stop > newbit {
1126 stop = newbit
1127 }
1128 for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
1129 h.nevacuate++
1130 }
1131 if h.nevacuate == newbit {
1132
1133 h.oldbuckets = nil
1134
1135
1136
1137 if h.extra != nil {
1138 h.extra.oldoverflow = nil
1139 }
1140 h.flags &^= sameSizeGrow
1141 }
1142 }
1143
1144 func ismapkey(t *_type) bool {
1145 return t.alg.hash != nil
1146 }
1147
1148
1149
1150
1151 func reflect_makemap(t *maptype, cap int) *hmap {
1152
1153 if sz := unsafe.Sizeof(hmap{}); sz != t.hmap.size {
1154 println("runtime: sizeof(hmap) =", sz, ", t.hmap.size =", t.hmap.size)
1155 throw("bad hmap size")
1156 }
1157 if !ismapkey(t.key) {
1158 throw("runtime.reflect_makemap: unsupported map key type")
1159 }
1160 if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(sys.PtrSize)) ||
1161 t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) {
1162 throw("key size wrong")
1163 }
1164 if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(sys.PtrSize)) ||
1165 t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) {
1166 throw("value size wrong")
1167 }
1168 if t.key.align > bucketCnt {
1169 throw("key align too big")
1170 }
1171 if t.elem.align > bucketCnt {
1172 throw("value align too big")
1173 }
1174 if t.key.size%uintptr(t.key.align) != 0 {
1175 throw("key size not a multiple of key align")
1176 }
1177 if t.elem.size%uintptr(t.elem.align) != 0 {
1178 throw("value size not a multiple of value align")
1179 }
1180 if bucketCnt < 8 {
1181 throw("bucketsize too small for proper alignment")
1182 }
1183 if dataOffset%uintptr(t.key.align) != 0 {
1184 throw("need padding in bucket (key)")
1185 }
1186 if dataOffset%uintptr(t.elem.align) != 0 {
1187 throw("need padding in bucket (value)")
1188 }
1189
1190 return makemap(t, cap, nil)
1191 }
1192
1193
1194 func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
1195 val, ok := mapaccess2(t, h, key)
1196 if !ok {
1197
1198 val = nil
1199 }
1200 return val
1201 }
1202
1203
1204 func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
1205 p := mapassign(t, h, key)
1206 typedmemmove(t.elem, p, val)
1207 }
1208
1209
1210 func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
1211 mapdelete(t, h, key)
1212 }
1213
1214
1215 func reflect_mapiterinit(t *maptype, h *hmap) *hiter {
1216 it := new(hiter)
1217 mapiterinit(t, h, it)
1218 return it
1219 }
1220
1221
1222 func reflect_mapiternext(it *hiter) {
1223 mapiternext(it)
1224 }
1225
1226
1227 func reflect_mapiterkey(it *hiter) unsafe.Pointer {
1228 return it.key
1229 }
1230
1231
1232 func reflect_maplen(h *hmap) int {
1233 if h == nil {
1234 return 0
1235 }
1236 if raceenabled {
1237 callerpc := getcallerpc()
1238 racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
1239 }
1240 return h.count
1241 }
1242
1243
1244 func reflect_ismapkey(t *_type) bool {
1245 return ismapkey(t)
1246 }
1247
1248 const maxZero = 1024
1249 var zeroVal [maxZero]byte
1250
View as plain text