并发安全 sync.Map
https://mp.weixin.qq.com/s/MqPm7QH3_D9roVkpTs9Xpw
谈谈Go的并发安全相关
package main import "fmt" func main() {
var ch1 chan bool
ch1 = make(chan bool)
go func() {
for i := 0; i < 10; i++ {
fmt.Println("sub-goroutine,i:", i)
}
ch1 <- true
fmt.Println("sub-goroutine-DONE")
}()
data := <-ch1
fmt.Println("main-goroutine", data)
fmt.Println("main-goroutine:done")
}
sync.Map源码分析 – 陈思敏捷 https://www.chenjie.info/2303
sync.Map源码分析
概述
go语言中的map并不是并发安全的,在Go 1.6之前,并发读写map会导致读取到脏数据,在1.6之后则程序直接panic,所以go 1.9之前的解决方案是额外绑定一个锁,封装成一个新的struct或者单独使用锁都可以。直到sync.Map出现提供了一种空间换时间有效减少锁的实现方法。
原理
为了减少并发抢锁导致的阻塞,sync.Map分出了read和dirty两个map,里面存的都是指针。存、删和查都先操作read,并用atomic进行并发保护,速度较快,直到read不能满足需求才去操作dirty,操作dirty的时用Mutex锁进行并发保护,速度较慢。
源码分析
主要结构
1
2
3
4
|
//用于保存value的interface指针,通过atomic进行原子操作
type entry struct {
p unsafe.Pointer // *interface{}
}
|
1
2
3
4
5
|
//Map.read 用的就是readOnly,对其进行操作的时候,使用atomic进行保护
type readOnly struct {
m map[interface{}]*entry // 存储写入的数据
amended bool // 如果Map.dirty有些数据不在中的时候,这个值为true
}
|
1
2
3
4
5
6
7
|
//sync.Map的主结构
type Map struct {
mu Mutex // 锁,操作dirty的时候用的
read atomic.Value // 存的是readOnly结构体,用atomic保护进行操作,无需加锁
dirty map[interface{}]*entry//加锁进行操作,和read构成冗余,misses达到len(dirty)后升级为read
misses int// 未命中read的次数
}
|
主要方法
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
|
//加载方法,也就是提供一个键key,查找对应的值value,如果不存在,通过ok反映
func (m *Map) Load(key interface{}) (value interface{}, ok bool) {
read, _ := m.read.Load().(readOnly)
e, ok := read.m[key]
// 不存在,且dirty中有新数据
if !ok && read.amended {
m.mu.Lock()//加锁
// 双检查,避免加锁的时候m.dirty提升为m.read,这个时候m.read可能被替换了。
read, _ = m.read.Load().(readOnly)
e, ok = read.m[key]
if !ok && read.amended {
e, ok = m.dirty[key]
// 不管m.dirty中存不存在,都将misses计数加一
// missLocked()中满足条件后就会提升m.dirty
m.missLocked()
}
m.mu.Unlock()
}
if !ok {
return nil, false
}
return e.load()// 使用原子操作读取数据
}
// Map.misses += 1, 如果misses == len(dirty) ,dirty升级为read ,然后dirty指向nil
func (m *Map) missLocked() {
m.misses++
if m.misses < len(m.dirty) {
return
}
m.read.Store(readOnly{m: m.dirty})
m.dirty = nil
m.misses = 0
}
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
|
// 更新或者新增一个entry
func (m *Map) Store(key, value interface{}) {
read, _ := m.read.Load().(readOnly)
// 如果m.read存在这个键,并且这个entry没有被标记删除,尝试直接存储。
// 因为m.dirty也指向这个entry,所以m.dirty也保持最新的entry。
if e, ok := read.m[key]; ok && e.tryStore(&value) {
return
}
// 如果m.read不存在或者已经被标记删除
m.mu.Lock()
read, _ = m.read.Load().(readOnly)
if e, ok := read.m[key]; ok {//m.read存在并已被标记删除时
if e.unexpungeLocked() {//标记成未被删除
m.dirty[key] = e //m.dirty中不存在这个键,所以加入m.dirty
}
e.storeLocked(&value)//更新
} else if e, ok := m.dirty[key]; ok {// m.dirty存在这个键时
e.storeLocked(&value)//更新
} else {// key不在read里面,也不在dirty里面时
if !read.amended {// amended 若为false,则表示dirty未被初始化过
m.dirtyLocked()// 初始化dirty,将dirty中未被删除的数据全都复制到dirty中,read中指向nil的数据才会被标记为expunged
m.read.Store(readOnly{m: read.m, amended: true})//将amended改为true
}
m.dirty[key] = newEntry(value)// 将值存入dirty
}
m.mu.Unlock()
}
func (m *Map) dirtyLocked() {
if m.dirty != nil {
return
}
read, _ := m.read.Load().(readOnly)
m.dirty = make(map[interface{}]*entry, len(read.m))
for k, e := range read.m {
if !e.tryExpungeLocked() {
m.dirty[k] = e
}
}
}
func (e *entry) tryExpungeLocked() (isExpunged bool) {
p := atomic.LoadPointer(&e.p)
for p == nil {
// 将已经删除标记为nil的数据标记为expunged
if atomic.CompareAndSwapPointer(&e.p, nil, expunged) {
return true
}
p = atomic.LoadPointer(&e.p)
}
return p == expunged
}
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
|
//删除一个键值
func (m *Map) Delete(key interface{}) {
read, _ := m.read.Load().(readOnly)
e, ok := read.m[key]
if !ok && read.amended {//不在read中,且dirty中有新数据
m.mu.Lock()
//双检查
read, _ = m.read.Load().(readOnly)
e, ok = read.m[key]
if !ok && read.amended {
delete(m.dirty, key)//直接删除dirty的数据
}
m.mu.Unlock()
}
if ok {
// read中存在key,将这个key标记为删除状态,但并不删除数据
e.delete()
}
}
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
|
//遍历map(通过回调的方式)
func (m *Map) Range(f func(key, value interface{}) bool) {
read, _ := m.read.Load().(readOnly)
if read.amended {// amended==true,表示dirty中有read没有的数据,此时dirty的数据最全
m.mu.Lock()
read, _ = m.read.Load().(readOnly)
if read.amended {// 双检查,判断获取锁之前,该值是否变了
// 将dirty升级为read
read = readOnly{m: m.dirty}
m.read.Store(read)
m.dirty = nil
m.misses = 0
}
m.mu.Unlock()
}
// 遍历read.m,将值传入回调函数f
for k, e := range read.m {
v, ok := e.load()
if !ok {
continue
}
if !f(k, v) {
break
}
}
}
|
特别提醒
sync.Map在初始化时会将read中所有未删除的数据复制到dirty,而频繁往map中插入新数据会导致dirty中有大量read中没有的数据,从而导致read的命中率过低,需要频繁调用锁进行操作,并且未命中次数达到len(dirty)后,dirty会被升级为read,再次有新数据插入的时候,又会重复dirty初始化的过程。这一系列流程均会造成较大的开销影响整体性能。
适用场景
综上sync.Map适用于读多更新多,新增少的场景。
注:这里更新多特指当read中存在该键且未被标记删除时更新操作,此场景可直接原子更新无需加锁。
olang中内置了map关键字,但是它是非线程安全的。从go 1.9开始,标准库加入了sync.Map,提供用于并发安全的map。
普通map的并发问题
map的并发读写代码
func main() {
m := make(map[int]int)
go func() {
for {
_ = m[1] // 读
}
}()
go func() {
for {
m[2] = 2 // 写
}
}()
select {} // 维持主goroutine
}
以上是一段并发读写map的代码, 其中一个goroutine一直读,另外一个goroutine一直写。即使读写的map键不相同,且不存在"扩容"等操作,代码还是会报错。
fatal error: concurrent map read and map write
锁+map
那普通map有没有能力实现并发安全呢?答案是肯定的。通过给map额外绑定一个锁(sync.Mutex或sync.RWMutex),封装成一个新的struct,实现并发安全。
定义带有锁的对象M
type M struct {
sync.RWMutex
Map map[int]int
}
执行并发读写
func main() {
m := M{Map: make(map[int]int)}
go func() {
for {
m.RLock()
v := m.Map[2] // 读
fmt.Println(v)
m.RUnlock()
}
}()
go func() {
for i := 1; i > 0; i++ {
m.Lock()
m.Map[2] = i // 写
m.Unlock()
}
}()
select {}
}
在读goroutine读数据时,通过读锁锁定,在写goroutine写数据时,写锁锁定,程序就能并发安全的运行,运行结果示意如下。
...
1123
1124
1125
...
sync.Map
既然通过加锁的方式就能解决map的并发问题,实现方式简洁,并且利用读写锁而不是Mutex可以进一步减少读写的时候因为锁而带来的性能损耗。那么为什么还会有sync.Map的出现?
当map的数据量非常大时,会引发并发的大量goroutine争夺同一把锁,这种现象将直接导致性能的急剧下降。在java中有类似于map的hashMap,它同样是并发不安全,但是JDK提供了线程安全的ConcurrentHashMap,它在面对上述场景时,其核心解决方案是锁分段技术,即内部使用多个锁,每个区间共享一把锁,当多线程访问map中的不同数据段的数据时,线程间就不会存在锁竞争,从而提高了并发访问效率。那sync.Map采取的是什么策略来提升并发性能的呢?
sync.Map的源码结构(基于1.14.1)
type Map struct {
// 此锁是为了保护Map中的dirty数据
mu Mutex
// 用来存读的数据,只读类型,不会造成读写冲突
read atomic.Value // readOnly
// dirty包含最新的写入数据(entry),在写的同时,将read中未删除的数据拷贝到dirty中
// 因为是go中内置的普通map类型,且涉及写操作,所以需要通过mu加锁
dirty map[interface{}]*entry
// 当读数据时,该字段不在read中,尝试从dirty中读取,不管是否在dirty中读取到数据,misses+1
// 当累计到len(dirty)时,会将dirty拷贝到read中,并将dirty清空,以此提升读性能。
misses int
}
(左右滑动查看完整代码图片)
在sync.Map中用到了两个冗余数据结构read、dirty。其中read的类型为atomic.Value,它会通过atomic.Value的Load方法将其断言为readOnly对象。
read, _ := m.read.Load().(readOnly) // m为sync.Map
因此,read的真实类型即是readOnly,其数据类型如下。
type readOnly struct {
// read 中的go内置map类型,但是它不需要锁。
m map[interface{}]*entry
// 当sync.Map.diry中的包含了某些不在m中的key时,amended的值为true.
amended bool
}
(左右滑动查看完整代码图片)
amended属性的作用是指明dirty中是否有readOnly.m中未包含的数据,因此当对sync.Map的读操作在read中找不到数据时,将进一步到dirty中查找。
readOnly.m和Map.dirty中map存储的值类型是*entry,它包含一个指针p,指向用户存储的value值。
type entry struct {
p unsafe.Pointer // *interface{}
}
entry.p的值有三种类型:
nil:entry已经被删除,且m.dirty为nil
expunged:entry被删除,m.dirty不为nil,但entry不存在m.dirty中
其他:entry有效,记录在m.read中,若dirty不为空,也会记录在dirty中。
虽然read和dirty存在冗余数据,但是这些数据entry是通过指针指向的,因此,尽管Map的value可能会很大,但是空间存储还是足够的。
以上是sync.Map的数据结构,下面着重看看它的四个方法实现:Load、Store、Delete和Range。
Load
加载方法,通过提供的键key,查找对应的值value。
func (m *Map) Load(key interface{}) (value interface{}, ok bool) {
// 首先从m.read中通过Load方法得到readOnly
read, _ := m.read.Load().(readOnly)
// 从read中的map中查找key
e, ok := read.m[key]
// 如果在read中没有找到,且表明有新数据在diry中(read.amended为true)
// 那么,就需要在dirty中查找,这时需要加锁。
if !ok && read.amended {
m.mu.Lock()
// 双重检查:避免在本次加锁的时候,有其他goroutine正好将Map中的dirty数据复制到了read中。
// 能发生上述可能的原因是以下两行代码语句,并不是原子操作。
// if !ok && read.amended {
// m.mu.Lock()
// }
// 而Map.read其并发安全性的保障就在于它的修改是通过原子操作进行的。
// 因此需要再检查一次read.
read, _ = m.read.Load().(readOnly)
e, ok = read.m[key]
// 如果m.read中key还是不存在,且dirty中有新数据,则检查dirty中的数据。
if !ok && read.amended {
e, ok = m.dirty[key]
// 不管是否从dirty中得到了数据,都会将misses的计数+1
m.missLocked()
}
m.mu.Unlock()
}
if !ok {
return nil, false
}
// 通过Map的load方法,将entry.p加载为对应指针,再返回指针指向的值
return e.load()
}
(左右滑动查看完整代码图片)
Map.missLocked函数是保证sync.Map性能的重要函数,它的目的是将存在有锁的dirty中的数据,转移到只读线程安全的read中去。
func (m *Map) missLocked() {
m.misses++ // 计数+1
if m.misses < len(m.dirty) {
return
}
m.read.Store(readOnly{m: m.dirty}) // 将dirty数据复制到read中去
m.dirty = nil // dirty清空
m.misses = 0 // misses重置为0
}
(左右滑动查看完整代码图片)
Store
该方法更新或新增键值对key-value。
func (m *Map) Store(key, value interface{}) {
// 如果m.read中存在该键,且该键没有被标记删除(expunged)
// 则尝试直接存储(见entry的tryStore方法)
// 注意: 如果m.dirty中也有该键(key对应的entry),由于都是通过指针指向,所有m.dirty中也会保持最新entry值。
read, _ := m.read.Load().(readOnly)
if e, ok := read.m[key]; ok && e.tryStore(&value) {
return
}
// 如果不满足上述条件,即m.read不存在或者已经被标记删除
m.mu.Lock()
read, _ = m.read.Load().(readOnly)
if e, ok := read.m[key]; ok { // 如果read中有该键
if e.unexpungeLocked() { // 判断entry是否被标记删除
// 如果entry被标记删除,则将entry添加进m.dirty中
m.dirty[key] = e
}
// 更新entry指向value地址
e.storeLocked(&value)
} else if e, ok := m.dirty[key]; ok { //dirty中有该键:更新
e.storeLocked(&value)
} else { // dirty和read中均无该键:新增
if !read.amended { // 表明dirty中没有新数据,在dirty中增加第一个新键
m.dirtyLocked() // 从m.read中复制未删除的数据到dirty中
m.read.Store(readOnly{m: read.m, amended: true})
}
m.dirty[key] = newEntry(value) // 将entry增加到dirty中
}
m.mu.Unlock()
}
(左右滑动查看完整代码图片)
Store的每次操作都是先从read开始,当不满足条件时,才加锁操作dirty。但是由于存在从read中复制数据的情况(例如dirty刚复制完数据给m.read,又来了一个新键),当m.read中数据量很大时,可能对性能造成影响。
Delete
删除某键值。
func (m *Map) Delete(key interface{}) {
read, _ := m.read.Load().(readOnly)
e, ok := read.m[key]
if !ok && read.amended {
m.mu.Lock()
read, _ = m.read.Load().(readOnly)
e, ok = read.m[key]
if !ok && read.amended {
delete(m.dirty, key)
}
m.mu.Unlock()
}
if ok {
e.delete()
}
}
// 如果read中有该键,则从read中删除,其删除方式是通过原子操作
func (e *entry) delete() (hadValue bool) {
for {
p := atomic.LoadPointer(&e.p)
// 如果p指针为空,或者被标记清除
if p == nil || p == expunged {
return false
}
// 通过原子操作,将e.p标记为nil.
if atomic.CompareAndSwapPointer(&e.p, p, nil) {
return true
}
}
}
(左右滑动查看完整代码图片)
Delete中的逻辑和Store逻辑相似,都是从read开始,如果这个key(也即是entry)不在read中,且dirty中有新数据,则加锁从dirty中删除。注意,和Load与Store方法一样,也是需要双检查。
Range
想要遍历sync.Map,不能通过for range的形式,因此,它自身提供了Range方法,通过回调的方式遍历。
func (m *Map) Range(f func(key, value interface{}) bool) {
read, _ := m.read.Load().(readOnly)
// 判断dirty中是否有新的数据
if read.amended {
m.mu.Lock()
// 双检查
read, _ = m.read.Load().(readOnly)
if read.amended {
// 将dirty中的数据复制到read中
read = readOnly{m: m.dirty}
m.read.Store(read)
m.dirty = nil
m.misses = 0
}
m.mu.Unlock()
}
// 遍历已经整合过dirty的read
for k, e := range read.m {
v, ok := e.load()
if !ok {
continue
}
if !f(k, v) {
break
}
}
}
(左右滑动查看完整代码图片)
sync.Map的优化总结
1. 空间换时间:通过两个冗余的数据结构(read、write),减小锁对性能的影响。
2. 读操作使用read,避免读写冲突。
3. 动态调整:通过misses值,避免dirty数据过多。
4. 双检查机制:避免在非原子操作时产生数据错误。
5. 延迟删除机制:删除一个键值只是先打标记,只有等提升dirty(复制到read中,并清空自身)时才清理删除的数据。
6. 优先从read中读、改和删除,因为对read的操作不用加锁,大大提升性能。
sync.Map的使用例子
func main() {
var sm sync.Map
// 注意:同一个sync.Map,和map不一样,每个item的key或value可以和其他的数据类型不一样
// 只要满足key能hash即可
sm.Store(1, "a")
sm.Store("b", 2)
sm.Store("c", 3)
// 和map获取key值类似
if v, ok := sm.Load("b"); ok {
fmt.Println(v)
}
// 删除某个key的键值对
sm.Delete(1)
// 参数fun中的参数是遍历获得的key和value,返回一个bool值
// 返回true时,继续遍历
// 返回false,遍历结束
sm.Range(func(key, value interface{}) bool {
fmt.Println(key,value)
return true
})
}
(左右滑动查看完整代码图片)
输出
2
b 2
c 3
sync.Map的性能
在Go源码$GOROOT/src/sync中,提供了测试代码。
map_reference_test.go: 定义了测试用的mapInterface接口,sync.Map、RwMutexMap和DeepCopyMap对象实现该接口方法。
map_test.go: 三个对象的方法测试代码。
map_bench_test.go: 三个对象的benchmark性能对比测试代码。
在小菜刀的机器上,运行性能测试结果如下。
$ go test -bench=LoadMostlyHits -benchmem
BenchmarkLoadMostlyHits/*sync_test.DeepCopyMap-8 80252629 13.5 ns/op 7 B/op 0 allocs/op
BenchmarkLoadMostlyHits/*sync_test.RWMutexMap-8 23025050 51.8 ns/op 7 B/op 0 allocs/op
BenchmarkLoadMostlyHits/*sync.Map-8 67718686 14.9 ns/op 7 B/op 0 allocs/op
$ go test -bench=LoadMostlyMisses -benchmem
BenchmarkLoadMostlyMisses/*sync_test.DeepCopyMap-8 128480215 11.2 ns/op 7 B/op 0 allocs/op
BenchmarkLoadMostlyMisses/*sync_test.RWMutexMap-8 23989224 47.4 ns/op 7 B/op 0 allocs/op
BenchmarkLoadMostlyMisses/*sync.Map-8 132403878 9.30 ns/op 7 B/op 0 allocs/op
$ go test -bench=LoadOrStoreBalanced -benchmem
BenchmarkLoadOrStoreBalanced/*sync_test.RWMutexMap-8 3909409 553 ns/op 99 B/op 2 allocs/op
BenchmarkLoadOrStoreBalanced/*sync.Map-8 3574923 368 ns/op 97 B/op 3 allocs/op
$ go test -bench=LoadOrStoreUnique -benchmem
BenchmarkLoadOrStoreUnique/*sync_test.RWMutexMap-8 2053806 647 ns/op 174 B/op 2 allocs/op
BenchmarkLoadOrStoreUnique/*sync.Map-8 2456720 577 ns/op 140 B/op 4 allocs/op
$ go test -bench=LoadOrStoreCollision -benchmem
BenchmarkLoadOrStoreCollision/*sync_test.DeepCopyMap-8 153679003 8.18 ns/op 0 B/op 0 allocs/op
BenchmarkLoadOrStoreCollision/*sync_test.RWMutexMap-8 13718534 87.9 ns/op 0 B/op 0 allocs/op
BenchmarkLoadOrStoreCollision/*sync.Map-8 175620835 7.08 ns/op 0 B/op 0 allocs/op
$ go test -bench=Range -benchmem
BenchmarkRange/*sync_test.DeepCopyMap-8 416906 2947 ns/op 0 B/op 0 allocs/op
BenchmarkRange/*sync_test.RWMutexMap-8 22784 52370 ns/op 16384 B/op 1 allocs/op
BenchmarkRange/*sync.Map-8 369955 3194 ns/op 0 B/op 0 allocs/op
$ go test -bench=AdversarialAlloc -benchmem
BenchmarkAdversarialAlloc/*sync_test.DeepCopyMap-8 1875109 646 ns/op 539 B/op 1 allocs/op
BenchmarkAdversarialAlloc/*sync_test.RWMutexMap-8 19454866 61.6 ns/op 8 B/op 1 allocs/op
BenchmarkAdversarialAlloc/*sync.Map-8 3712470 320 ns/op 51 B/op 1 allocs/op
$ go test -bench=AdversarialDelete -benchmem
BenchmarkAdversarialDelete/*sync_test.DeepCopyMap-8 6749067 215 ns/op 168 B/op 1 allocs/op
BenchmarkAdversarialDelete/*sync_test.RWMutexMap-8 16046545 76.9 ns/op 25 B/op 1 allocs/op
BenchmarkAdversarialDelete/*sync.Map-8 18678104 64.2 ns/op 18 B/op 1 allocs/op
(左右滑动查看完整代码图片)
如何选择Map
从性能测试结果可以看出,sync.Map并不是为了代替锁+map的组合。它的设计,是为了在某些并发场景下,相对前者能有较小的性能损耗。
源码文档中($GOROOT/src/sync/map.go)已经给出了sync.Map的合适场景。
// The Map type is specialized. Most code should use a plain Go map instead,
// with separate locking or coordination, for better type safety and to make it
// easier to maintain other invariants along with the map content.
//
// The Map type is optimized for two common use cases: (1) when the entry for a given
// key is only ever written once but read many times, as in caches that only grow,
// or (2) when multiple goroutines read, write, and overwrite entries for disjoint
// sets of keys. In these two cases, use of a Map may significantly reduce lock
// contention compared to a Go map paired with a separate Mutex or RWMutex.
(左右滑动查看完整代码图片)
两种情况应该选择sync.Map
key值一次写入,多次读取(即写少读多场景)。
多个goroutine的读取、写入和覆盖在不相交的key集。
Class ConcurrentHashMap<K,V>
- java.lang.Object
- java.util.AbstractMap<K,V>
- java.util.concurrent.ConcurrentHashMap<K,V>
- Type Parameters:
K
- the type of keys maintained by this mapV
- the type of mapped values
- All Implemented Interfaces:
- Serializable, ConcurrentMap<K,V>, Map<K,V>
public class ConcurrentHashMap<K,V>
extends AbstractMap<K,V>
implements ConcurrentMap<K,V>, SerializableA hash table supporting full concurrency of retrievals and high expected concurrency for updates. This class obeys the same functional specification asHashtable
, and includes versions of methods corresponding to each method ofHashtable
. However, even though all operations are thread-safe, retrieval operations do not entail locking, and there is not any support for locking the entire table in a way that prevents all access. This class is fully interoperable withHashtable
in programs that rely on its thread safety but not on its synchronization details.Retrieval operations (including
get
) generally do not block, so may overlap with update operations (includingput
andremove
). Retrievals reflect the results of the most recently completed update operations holding upon their onset. (More formally, an update operation for a given key bears a happens-before relation with any (non-null) retrieval for that key reporting the updated value.) For aggregate operations such asputAll
andclear
, concurrent retrievals may reflect insertion or removal of only some entries. Similarly, Iterators, Spliterators and Enumerations return elements reflecting the state of the hash table at some point at or since the creation of the iterator/enumeration. They do not throwConcurrentModificationException
. However, iterators are designed to be used by only one thread at a time. Bear in mind that the results of aggregate status methods includingsize
,isEmpty
, andcontainsValue
are typically useful only when a map is not undergoing concurrent updates in other threads. Otherwise the results of these methods reflect transient states that may be adequate for monitoring or estimation purposes, but not for program control.The table is dynamically expanded when there are too many collisions (i.e., keys that have distinct hash codes but fall into the same slot modulo the table size), with the expected average effect of maintaining roughly two bins per mapping (corresponding to a 0.75 load factor threshold for resizing). There may be much variance around this average as mappings are added and removed, but overall, this maintains a commonly accepted time/space tradeoff for hash tables. However, resizing this or any other kind of hash table may be a relatively slow operation. When possible, it is a good idea to provide a size estimate as an optional
initialCapacity
constructor argument. An additional optionalloadFactor
constructor argument provides a further means of customizing initial table capacity by specifying the table density to be used in calculating the amount of space to allocate for the given number of elements. Also, for compatibility with previous versions of this class, constructors may optionally specify an expectedconcurrencyLevel
as an additional hint for internal sizing. Note that using many keys with exactly the samehashCode()
is a sure way to slow down performance of any hash table. To ameliorate impact, when keys areComparable
, this class may use comparison order among keys to help break ties.A
Set
projection of a ConcurrentHashMap may be created (usingnewKeySet()
ornewKeySet(int)
), or viewed (usingkeySet(Object)
when only keys are of interest, and the mapped values are (perhaps transiently) not used or all take the same mapping value.A ConcurrentHashMap can be used as scalable frequency map (a form of histogram or multiset) by using
LongAdder
values and initializing viacomputeIfAbsent
. For example, to add a count to aConcurrentHashMap<String,LongAdder> freqs
, you can usefreqs.computeIfAbsent(k -> new LongAdder()).increment();
This class and its views and iterators implement all of the optional methods of the
Map
andIterator
interfaces.Like
Hashtable
but unlikeHashMap
, this class does not allownull
to be used as a key or value.ConcurrentHashMaps support a set of sequential and parallel bulk operations that, unlike most
Stream
methods, are designed to be safely, and often sensibly, applied even with maps that are being concurrently updated by other threads; for example, when computing a snapshot summary of the values in a shared registry. There are three kinds of operation, each with four forms, accepting functions with Keys, Values, Entries, and (Key, Value) arguments and/or return values. Because the elements of a ConcurrentHashMap are not ordered in any particular way, and may be processed in different orders in different parallel executions, the correctness of supplied functions should not depend on any ordering, or on any other objects or values that may transiently change while computation is in progress; and except for forEach actions, should ideally be side-effect-free. Bulk operations onMap.Entry
objects do not support methodsetValue
.- forEach: Perform a given action on each element. A variant form applies a given transformation on each element before performing the action.
- search: Return the first available non-null result of applying a given function on each element; skipping further search when a result is found.
- reduce: Accumulate each element. The supplied reduction function cannot rely on ordering (more formally, it should be both associative and commutative). There are five variants:
- Plain reductions. (There is not a form of this method for (key, value) function arguments since there is no corresponding return type.)
- Mapped reductions that accumulate the results of a given function applied to each element.
- Reductions to scalar doubles, longs, and ints, using a given basis value.
These bulk operations accept a
parallelismThreshold
argument. Methods proceed sequentially if the current map size is estimated to be less than the given threshold. Using a value ofLong.MAX_VALUE
suppresses all parallelism. Using a value of1
results in maximal parallelism by partitioning into enough subtasks to fully utilize theForkJoinPool.commonPool()
that is used for all parallel computations. Normally, you would initially choose one of these extreme values, and then measure performance of using in-between values that trade off overhead versus throughput.The concurrency properties of bulk operations follow from those of ConcurrentHashMap: Any non-null result returned from
get(key)
and related access methods bears a happens-before relation with the associated insertion or update. The result of any bulk operation reflects the composition of these per-element relations (but is not necessarily atomic with respect to the map as a whole unless it is somehow known to be quiescent). Conversely, because keys and values in the map are never null, null serves as a reliable atomic indicator of the current lack of any result. To maintain this property, null serves as an implicit basis for all non-scalar reduction operations. For the double, long, and int versions, the basis should be one that, when combined with any other value, returns that other value (more formally, it should be the identity element for the reduction). Most common reductions have these properties; for example, computing a sum with basis 0 or a minimum with basis MAX_VALUE.Search and transformation functions provided as arguments should similarly return null to indicate the lack of any result (in which case it is not used). In the case of mapped reductions, this also enables transformations to serve as filters, returning null (or, in the case of primitive specializations, the identity basis) if the element should not be combined. You can create compound transformations and filterings by composing them yourself under this "null means there is nothing there now" rule before using them in search or reduce operations.
Methods accepting and/or returning Entry arguments maintain key-value associations. They may be useful for example when finding the key for the greatest value. Note that "plain" Entry arguments can be supplied using
new AbstractMap.SimpleEntry(k,v)
.Bulk operations may complete abruptly, throwing an exception encountered in the application of a supplied function. Bear in mind when handling such exceptions that other concurrently executing functions could also have thrown exceptions, or would have done so if the first exception had not occurred.
Speedups for parallel compared to sequential forms are common but not guaranteed. Parallel operations involving brief functions on small maps may execute more slowly than sequential forms if the underlying work to parallelize the computation is more expensive than the computation itself. Similarly, parallelization may not lead to much actual parallelism if all processors are busy performing unrelated tasks.
All arguments to all task methods must be non-null.
This class is a member of the Java Collections Framework.
Class ConcurrentHashMap<K,V>
- java.lang.Object
- java.util.AbstractMap<K,V>
- java.util.concurrent.ConcurrentHashMap<K,V>
- Type Parameters:
K
- the type of keys maintained by this mapV
- the type of mapped values
- All Implemented Interfaces:
- Serializable, ConcurrentMap<K,V>, Map<K,V>
public class ConcurrentHashMap<K,V>
extends AbstractMap<K,V>
implements ConcurrentMap<K,V>, SerializableA hash table supporting full concurrency of retrievals and high expected concurrency for updates. This class obeys the same functional specification asHashtable
, and includes versions of methods corresponding to each method ofHashtable
. However, even though all operations are thread-safe, retrieval operations do not entail locking, and there is not any support for locking the entire table in a way that prevents all access. This class is fully interoperable withHashtable
in programs that rely on its thread safety but not on its synchronization details.Retrieval operations (including
get
) generally do not block, so may overlap with update operations (includingput
andremove
). Retrievals reflect the results of the most recently completed update operations holding upon their onset. (More formally, an update operation for a given key bears a happens-before relation with any (non-null) retrieval for that key reporting the updated value.) For aggregate operations such asputAll
andclear
, concurrent retrievals may reflect insertion or removal of only some entries. Similarly, Iterators, Spliterators and Enumerations return elements reflecting the state of the hash table at some point at or since the creation of the iterator/enumeration. They do not throwConcurrentModificationException
. However, iterators are designed to be used by only one thread at a time. Bear in mind that the results of aggregate status methods includingsize
,isEmpty
, andcontainsValue
are typically useful only when a map is not undergoing concurrent updates in other threads. Otherwise the results of these methods reflect transient states that may be adequate for monitoring or estimation purposes, but not for program control.The table is dynamically expanded when there are too many collisions (i.e., keys that have distinct hash codes but fall into the same slot modulo the table size), with the expected average effect of maintaining roughly two bins per mapping (corresponding to a 0.75 load factor threshold for resizing). There may be much variance around this average as mappings are added and removed, but overall, this maintains a commonly accepted time/space tradeoff for hash tables. However, resizing this or any other kind of hash table may be a relatively slow operation. When possible, it is a good idea to provide a size estimate as an optional
initialCapacity
constructor argument. An additional optionalloadFactor
constructor argument provides a further means of customizing initial table capacity by specifying the table density to be used in calculating the amount of space to allocate for the given number of elements. Also, for compatibility with previous versions of this class, constructors may optionally specify an expectedconcurrencyLevel
as an additional hint for internal sizing. Note that using many keys with exactly the samehashCode()
is a sure way to slow down performance of any hash table. To ameliorate impact, when keys areComparable
, this class may use comparison order among keys to help break ties.A
Set
projection of a ConcurrentHashMap may be created (usingnewKeySet()
ornewKeySet(int)
), or viewed (usingkeySet(Object)
when only keys are of interest, and the mapped values are (perhaps transiently) not used or all take the same mapping value.A ConcurrentHashMap can be used as scalable frequency map (a form of histogram or multiset) by using
LongAdder
values and initializing viacomputeIfAbsent
. For example, to add a count to aConcurrentHashMap<String,LongAdder> freqs
, you can usefreqs.computeIfAbsent(k -> new LongAdder()).increment();
This class and its views and iterators implement all of the optional methods of the
Map
andIterator
interfaces.Like
Hashtable
but unlikeHashMap
, this class does not allownull
to be used as a key or value.ConcurrentHashMaps support a set of sequential and parallel bulk operations that, unlike most
Stream
methods, are designed to be safely, and often sensibly, applied even with maps that are being concurrently updated by other threads; for example, when computing a snapshot summary of the values in a shared registry. There are three kinds of operation, each with four forms, accepting functions with Keys, Values, Entries, and (Key, Value) arguments and/or return values. Because the elements of a ConcurrentHashMap are not ordered in any particular way, and may be processed in different orders in different parallel executions, the correctness of supplied functions should not depend on any ordering, or on any other objects or values that may transiently change while computation is in progress; and except for forEach actions, should ideally be side-effect-free. Bulk operations onMap.Entry
objects do not support methodsetValue
.- forEach: Perform a given action on each element. A variant form applies a given transformation on each element before performing the action.
- search: Return the first available non-null result of applying a given function on each element; skipping further search when a result is found.
- reduce: Accumulate each element. The supplied reduction function cannot rely on ordering (more formally, it should be both associative and commutative). There are five variants:
- Plain reductions. (There is not a form of this method for (key, value) function arguments since there is no corresponding return type.)
- Mapped reductions that accumulate the results of a given function applied to each element.
- Reductions to scalar doubles, longs, and ints, using a given basis value.
These bulk operations accept a
parallelismThreshold
argument. Methods proceed sequentially if the current map size is estimated to be less than the given threshold. Using a value ofLong.MAX_VALUE
suppresses all parallelism. Using a value of1
results in maximal parallelism by partitioning into enough subtasks to fully utilize theForkJoinPool.commonPool()
that is used for all parallel computations. Normally, you would initially choose one of these extreme values, and then measure performance of using in-between values that trade off overhead versus throughput.The concurrency properties of bulk operations follow from those of ConcurrentHashMap: Any non-null result returned from
get(key)
and related access methods bears a happens-before relation with the associated insertion or update. The result of any bulk operation reflects the composition of these per-element relations (but is not necessarily atomic with respect to the map as a whole unless it is somehow known to be quiescent). Conversely, because keys and values in the map are never null, null serves as a reliable atomic indicator of the current lack of any result. To maintain this property, null serves as an implicit basis for all non-scalar reduction operations. For the double, long, and int versions, the basis should be one that, when combined with any other value, returns that other value (more formally, it should be the identity element for the reduction). Most common reductions have these properties; for example, computing a sum with basis 0 or a minimum with basis MAX_VALUE.Search and transformation functions provided as arguments should similarly return null to indicate the lack of any result (in which case it is not used). In the case of mapped reductions, this also enables transformations to serve as filters, returning null (or, in the case of primitive specializations, the identity basis) if the element should not be combined. You can create compound transformations and filterings by composing them yourself under this "null means there is nothing there now" rule before using them in search or reduce operations.
Methods accepting and/or returning Entry arguments maintain key-value associations. They may be useful for example when finding the key for the greatest value. Note that "plain" Entry arguments can be supplied using
new AbstractMap.SimpleEntry(k,v)
.Bulk operations may complete abruptly, throwing an exception encountered in the application of a supplied function. Bear in mind when handling such exceptions that other concurrently executing functions could also have thrown exceptions, or would have done so if the first exception had not occurred.
Speedups for parallel compared to sequential forms are common but not guaranteed. Parallel operations involving brief functions on small maps may execute more slowly than sequential forms if the underlying work to parallelize the computation is more expensive than the computation itself. Similarly, parallelization may not lead to much actual parallelism if all processors are busy performing unrelated tasks.
All arguments to all task methods must be non-null.
This class is a member of the Java Collections Framework.
并发安全 sync.Map的更多相关文章
- sync.Map(在并发环境中使用的map)
sync.Map 有以下特性: 需要并发读写时,一般的做法是加锁,但这样性能并不高,Go语言在 1.9 版本中提供了一种效率较高的并发安全的 sync.Map,sync.Map 和 map 不同,不是 ...
- Go 1.9 sync.Map揭秘
Go 1.9 sync.Map揭秘 目录 [−] 有并发问题的map Go 1.9之前的解决方案 sync.Map Load Store Delete Range sync.Map的性能 其它 在Go ...
- Golang:sync.Map
由于map在gorountine 上不是安全的,所以在大量并发读写的时候,会出现错误. 在1.9版的时候golang推出了sync.Map. sync.Map 通过阅读源码我们发现sync.Map是通 ...
- go的sync.Map
sync.Map这个数据结构是线程安全的(基本类型Map结构体在并发读写时会panic严重错误),它填补了Map线程不安全的缺陷,不过最好只在需要的情况下使用.它一般用于并发模型中对同一类map结构体 ...
- sync.Map与Concurrent Map
1. sync.Map 1.1. map并发不安全 go1.6以后map有了并发的安全检查,所以如果在并发环境中读写map就会报错 func unsafeMap() { // 创建一个map对象 m ...
- 图解Go里面的sync.Map了解编程语言核心实现源码
基础筑基 在大多数语言中原始map都不是一个线程安全的数据结构,那如果要在多个线程或者goroutine中对线程进行更改就需要加锁,除了加1个大锁,不同的语言还有不同的优化方式, 像在java和go这 ...
- 源码解读 Golang 的 sync.Map 实现原理
简介 Go 的内建 map 是不支持并发写操作的,原因是 map 写操作不是并发安全的,当你尝试多个 Goroutine 操作同一个 map,会产生报错:fatal error: concurrent ...
- 深度解密 Go 语言之 sync.map
工作中,经常会碰到并发读写 map 而造成 panic 的情况,为什么在并发读写的时候,会 panic 呢?因为在并发读写的情况下,map 里的数据会被写乱,之后就是 Garbage in, garb ...
- 深入理解golang:sync.map
疑惑开篇 有了map为什么还要搞个sync.map 呢?它们之间有什么区别? 答:重要的一点是,map并发不是安全的. 在Go 1.6之前, 内置的map类型是部分goroutine安全的,并发的读没 ...
随机推荐
- 10分钟带你入门git到github
git的产生背景 开局先来一个故事吧,故事看完如果不想看枯燥无味的指令,没关系我已经把这篇文章的内容录制成了一个视频,点击文末阅读原文就可以观看.或者说你已经熟练掌握git的使用了,可以直接跳到总结部 ...
- 手摸手带你用Hexo撸博客(一)
原文地址 手摸手带你用Hexo撸博客(一) 环境搭建 安装 node 狂点下一步 命令行输入此条命令 如果能看到版本号则安装成功 node -v 安装Git (同上) 实在不会的小伙伴百度一下,教程很 ...
- PLA-机器学习基石2
转自:http://blog.csdn.net/u013455341/article/details/46747343 在<机器学习基石>这门课里面也进入了第一讲的内容,这次学习到的是Pe ...
- Qt学习笔记-Qt5和Qt4在音频方面的不同-QtAV
之前学习qt4的时候,播放音频用的是phonon播放后端插件+mplayer. 今天改用qt5了.qt5中去掉了phonon模块加了multimedia.但是依然无法播放音乐,因为没有ffmpge s ...
- 详细介绍如何自研一款"博客搬家"功能
前言 现在的技术博客(社区)越来越多,比如:imooc.spring4All.csdn.cnblogs或者iteye等,有很多朋友可能在这些网站上都发表过博文,当有一天我们想自己搞一个博客网站时就会发 ...
- hive2
4.hive优化 1)跑sql的时候会出现的参数: In order to change the average load for a reducer (in bytes): set hive.exe ...
- 接口的不同写法在Swagger上的不同
接口请求方法类型 (1) 如果接口没有指定请求的 method 类型,例如 GET.POST.PUT.DELETE 等. Swagger中 (2)指定了请求方法后 Swagger中就只有一个GET请求 ...
- Redis集群搭建与简单使用【转】
Redis集群搭建与简单使用 安装环境与版本 用两台虚拟机模拟6个节点,一台机器3个节点,创建出3 master.3 salve 环境. redis 采用 redis-3.2.4 版本. 两台虚拟机都 ...
- FastApi 进阶
前言 终于有了第一个使用 FastApi 编写的线上服务, 在开发的过程中还是遇到了些问题, 这里记录一下 正文 目录结构 我们知道, FastApi 的启动方式推荐使用 uvicorn, 其启动方式 ...
- 解锁Renderbus客户端使用新技巧----快速渲染效果图篇
度娘说,效果图最基本的要求就是:应该符合事物的本身尺寸,不能为了美观而使用效果把相关模型的尺寸变动,那样的效果图不但不能起到表现设计的作用,反而成为影响设计的一个因素.可见高效渲染效果图是都是当下我们 ...