ConcurrentDictionary源码概读
ConcurrentDictionary的数据结构主要由Tables和Node组成,其中Tables包括桶(Node,节点)数组、局部锁(Local lock)、每个锁保护的元素数量(PerLock)。Node包含用户实际操作的key和value,以及为实现链表数据结构的下一个节点(Next Node)的引用和当前节点key的原始(未取正)散列值。以及其它一些标识。
private class Tables
{
/// <summary>
/// 每个桶的单链表
/// </summary>
internal readonly Node[] m_buckets; /// <summary>
/// 锁数组,每个锁都锁住table的一部分
/// </summary>
internal readonly object[] m_locks; /// <summary>
/// 每个锁保护的元素的数量
/// </summary>
internal volatile int[] m_countPerLock; /// <summary>
/// key的比较器
/// </summary>
internal readonly IEqualityComparer<TKey> m_comparer; internal Tables(Node[] buckets, object[] locks, int[] countPerLock, IEqualityComparer<TKey> comparer)
{
m_buckets = buckets;
m_locks = locks;
m_countPerLock = countPerLock;
m_comparer = comparer;
}
} private class Node
{
internal TKey m_key;
internal TValue m_value;
internal volatile Node m_next;
internal int m_hashcode; internal Node(TKey key, TValue value, int hashcode, Node next)
{
m_key = key;
m_value = value;
m_next = next;
m_hashcode = hashcode;
}
}
当new一个ConcurrentDictionary时,默认调用无参构造函数,给定默认的并发数量(Environment.ProcessorCount)、默认的键比较器、默认的容量(桶的初始容量,为31),该容量是经过权衡得到,不能被最小的素数整除。之后再处理容量与并发数的关系、容量与锁的关系以及每个锁的最大元素数。将桶、锁对象、锁保护封装在一个对象中,并初始化。
//初始化 ConcurrentDictionary 类的新实例,
//该实例为空,具有默认的并发级别和默认的初始容量,并为键类型使用默认比较器。
public ConcurrentDictionary() :
this(DefaultConcurrencyLevel, DEFAULT_CAPACITY, true, EqualityComparer<TKey>.Default) { } /// <summary>
/// 无参构造函数真正调用的函数
/// </summary>
/// <param name="concurrencyLevel">并发线程的可能数量(更改字典的线程可能数量)</param>
/// <param name="capacity">容量</param>
/// <param name="growLockArray">是否动态增加 striped lock 的大小</param>
/// <param name="comparer">比较器</param>
internal ConcurrentDictionary(int concurrencyLevel, int capacity, bool growLockArray, IEqualityComparer<TKey> comparer)
{
if (concurrencyLevel < )
{
throw new ArgumentOutOfRangeException("concurrencyLevel", GetResource("ConcurrentDictionary_ConcurrencyLevelMustBePositive"));
}
if (capacity < )
{
throw new ArgumentOutOfRangeException("capacity", GetResource("ConcurrentDictionary_CapacityMustNotBeNegative"));
}
if (comparer == null) throw new ArgumentNullException("comparer"); //容量应当至少与并发数一致,否则会有锁对象浪费
if (capacity < concurrencyLevel)
{
capacity = concurrencyLevel;
} //锁对象数组,大小为 并发线程的可能数量
object[] locks = new object[concurrencyLevel];
for (int i = ; i < locks.Length; i++)
{
locks[i] = new object();
} //每个锁保护的元素的数量
int[] countPerLock = new int[locks.Length];
//单链表中的节点,表示特定的哈希存储桶(桶:Node类型的数组)。
Node[] buckets = new Node[capacity];
//可以保持字典内部状态的表,将桶、锁对象、锁保护封装在一个对象中,以便一次原子操作
m_tables = new Tables(buckets, locks, countPerLock, comparer);
//是否动态增加 striped lock 的大小
m_growLockArray = growLockArray;
//在调整大小操作被触发之前,每个锁可锁住的最大(预计)元素数
//默认按锁个数平均分配,即Node总个数除以锁总个数
m_budget = buckets.Length / locks.Length;
}
当调用TryAdd时,实际调用的是内部公共方法TryAddInternal。如果存在key,则始终返回false,如果updateIfExists为true,则更新value,如果不存在key,则始终返回true,并且添加value。详细解读见代码。
/// <summary>
/// 尝试将指定的键和值添加到字典
/// </summary>
/// <param name="key">要添加的元素的键</param>
/// <param name="value">要添加的元素的值。对于引用类型,该值可以是空引用</param>
/// <returns>键值对添加成功则返回true,否则false</returns>
/// 异常:
// T:System.ArgumentNullException:
// key 为 null。
// T:System.OverflowException:
// 字典中已包含元素的最大数量(System.Int32.MaxValue)。
public bool TryAdd(TKey key, TValue value)
{
if (key == null) throw new ArgumentNullException("key");
TValue dummy;
return TryAddInternal(key, value, false, true, out dummy);
} /// <summary>
/// 对字典添加和修改的内部公共方法
/// 如果存在key,则始终返回false,如果updateIfExists为true,则更新value
/// 如果不存在key,则始终返回true,并且添加value
/// </summary>
[SuppressMessage("Microsoft.Concurrency", "CA8001", Justification = "Reviewed for thread safety")]
private bool TryAddInternal(TKey key, TValue value, bool updateIfExists, bool acquireLock, out TValue resultingValue)
{
while (true)
{
//桶序号(下标),锁序号(下标)
int bucketNo, lockNo;
int hashcode; Tables tables = m_tables;
IEqualityComparer<TKey> comparer = tables.m_comparer;
hashcode = comparer.GetHashCode(key); //获取桶下标、锁下标
GetBucketAndLockNo(hashcode, out bucketNo, out lockNo, tables.m_buckets.Length, tables.m_locks.Length); bool resizeDesired = false;
bool lockTaken = false;
#if FEATURE_RANDOMIZED_STRING_HASHING
#if !FEATURE_CORECLR
bool resizeDueToCollisions = false;
#endif // !FEATURE_CORECLR
#endif try
{
if (acquireLock)
//根据上面得到的锁的下标(lockNo),获取对应(lockNo)的对象锁
//hash落在不同的锁对象上,因此不同线程获取锁的对象可能不同,降低了“抢锁”概率
Monitor.Enter(tables.m_locks[lockNo], ref lockTaken); //在这之前如果tables被修改则有可能未正确锁定,此时需要重试
if (tables != m_tables)
{
continue;
} #if FEATURE_RANDOMIZED_STRING_HASHING
#if !FEATURE_CORECLR
int collisionCount = ;
#endif // !FEATURE_CORECLR
#endif // Try to find this key in the bucket
Node prev = null;
for (Node node = tables.m_buckets[bucketNo]; node != null; node = node.m_next)
{
Assert((prev == null && node == tables.m_buckets[bucketNo]) || prev.m_next == node);
//如果key已经存在
if (comparer.Equals(node.m_key, key))
{
//如果允许更新,则更新该键值对的值
if (updateIfExists)
{
//如果可以原子操作则直接赋值
if (s_isValueWriteAtomic)
{
node.m_value = value;
}
//否则需要为更新创建一个新的节点,以便支持不能以原子方式写的类型,
//因为无锁读取也可能在此时发生
else
{
//node.m_next 新节点指向下一个节点
Node newNode = new Node(node.m_key, value, hashcode, node.m_next);
if (prev == null)
{
tables.m_buckets[bucketNo] = newNode;
}
else
{
//上一个节点指向新节点。此时完成单链表的新旧节点替换
prev.m_next = newNode;
}
}
resultingValue = value;
}
else
{
resultingValue = node.m_value;
}
return false;
}
//循环到最后时,prev是最后一个node(node.m_next==null)
prev = node; #if FEATURE_RANDOMIZED_STRING_HASHING
#if !FEATURE_CORECLR
collisionCount++;
#endif // !FEATURE_CORECLR
#endif
} #if FEATURE_RANDOMIZED_STRING_HASHING
#if !FEATURE_CORECLR
if(collisionCount > HashHelpers.HashCollisionThreshold && HashHelpers.IsWellKnownEqualityComparer(comparer))
{
resizeDesired = true;
resizeDueToCollisions = true;
}
#endif // !FEATURE_CORECLR
#endif //使用可变内存操作插入键值对
Volatile.Write<Node>(ref tables.m_buckets[bucketNo], new Node(key, value, hashcode, tables.m_buckets[bucketNo]));
checked
{
//第lockNo个锁保护的元素数量,并检查是否益处
tables.m_countPerLock[lockNo]++;
} //
// If the number of elements guarded by this lock has exceeded the budget, resize the bucket table.
// It is also possible that GrowTable will increase the budget but won't resize the bucket table.
// That happens if the bucket table is found to be poorly utilized due to a bad hash function.
//如果第lockNo个锁要锁的元素超出预计,则需要调整
if (tables.m_countPerLock[lockNo] > m_budget)
{
resizeDesired = true;
}
}
finally
{
if (lockTaken)
//释放第lockNo个锁
Monitor.Exit(tables.m_locks[lockNo]);
} //
// The fact that we got here means that we just performed an insertion. If necessary, we will grow the table.
//
// Concurrency notes:
// - Notice that we are not holding any locks at when calling GrowTable. This is necessary to prevent deadlocks.
// - As a result, it is possible that GrowTable will be called unnecessarily. But, GrowTable will obtain lock 0
// and then verify that the table we passed to it as the argument is still the current table.
//
if (resizeDesired)
{
#if FEATURE_RANDOMIZED_STRING_HASHING
#if !FEATURE_CORECLR
if (resizeDueToCollisions)
{
GrowTable(tables, (IEqualityComparer<TKey>)HashHelpers.GetRandomizedEqualityComparer(comparer), true, m_keyRehashCount);
}
else
#endif // !FEATURE_CORECLR
{
GrowTable(tables, tables.m_comparer, false, m_keyRehashCount);
}
#else
GrowTable(tables, tables.m_comparer, false, m_keyRehashCount);
#endif
} resultingValue = value;
return true;
}
}
需要特别指出的是ConcurrentDictionary在插入、更新、获取键值对时对key的比较默认是使用的引用比较,不同于Dictionary使用引用加散列值。在Dictionary中,只有两者都一致才相等,ConcurrentDictionary则只判断引用相等。前提是未重写Equals。
/// <summary>
/// Attempts to get the value associated with the specified key from the <see
/// cref="ConcurrentDictionary{TKey,TValue}"/>.
/// </summary>
/// <param name="key">The key of the value to get.</param>
/// <param name="value">When this method returns, <paramref name="value"/> contains the object from
/// the
/// <see cref="ConcurrentDictionary{TKey,TValue}"/> with the specified key or the default value of
/// <typeparamref name="TValue"/>, if the operation failed.</param>
/// <returns>true if the key was found in the <see cref="ConcurrentDictionary{TKey,TValue}"/>;
/// otherwise, false.</returns>
/// <exception cref="T:System.ArgumentNullException"><paramref name="key"/> is a null reference
/// (Nothing in Visual Basic).</exception>
[SuppressMessage("Microsoft.Concurrency", "CA8001", Justification = "Reviewed for thread safety")]
public bool TryGetValue(TKey key, out TValue value)
{
if (key == null) throw new ArgumentNullException("key"); int bucketNo, lockNoUnused; // We must capture the m_buckets field in a local variable. It is set to a new table on each table resize.
Tables tables = m_tables;
IEqualityComparer<TKey> comparer = tables.m_comparer;
GetBucketAndLockNo(comparer.GetHashCode(key), out bucketNo, out lockNoUnused, tables.m_buckets.Length, tables.m_locks.Length); // We can get away w/out a lock here.
// The Volatile.Read ensures that the load of the fields of 'n' doesn't move before the load from buckets[i].
Node n = Volatile.Read<Node>(ref tables.m_buckets[bucketNo]); while (n != null)
{
//默认比较的是引用
if (comparer.Equals(n.m_key, key))
{
value = n.m_value;
return true;
}
n = n.m_next;
} value = default(TValue);
return false;
}
其它一些需要知道的内容,比如默认并发数、如何为指定key计算桶号和锁号等
#if !FEATURE_CORECLR
[NonSerialized]
#endif
private volatile Tables m_tables; // Internal tables of the dictionary
// NOTE: this is only used for compat reasons to serialize the comparer.
// This should not be accessed from anywhere else outside of the serialization methods.
internal IEqualityComparer<TKey> m_comparer;
#if !FEATURE_CORECLR
[NonSerialized]
#endif
private readonly bool m_growLockArray; // Whether to dynamically increase the size of the striped lock // How many times we resized becaused of collisions.
// This is used to make sure we don't resize the dictionary because of multi-threaded Add() calls
// that generate collisions. Whenever a GrowTable() should be the only place that changes this
#if !FEATURE_CORECLR
// The field should be have been marked as NonSerialized but because we shipped it without that attribute in 4.5.1.
// we can't add it back without breaking compat. To maximize compat we are going to keep the OptionalField attribute
// This will prevent cases where the field was not serialized.
[OptionalField]
#endif
private int m_keyRehashCount; #if !FEATURE_CORECLR
[NonSerialized]
#endif
private int m_budget; // The maximum number of elements per lock before a resize operation is triggered #if !FEATURE_CORECLR // These fields are not used in CoreCLR
private KeyValuePair<TKey, TValue>[] m_serializationArray; // Used for custom serialization private int m_serializationConcurrencyLevel; // used to save the concurrency level in serialization private int m_serializationCapacity; // used to save the capacity in serialization
#endif // The default capacity, i.e. the initial # of buckets. When choosing this value, we are making
// a trade-off between the size of a very small dictionary, and the number of resizes when
// constructing a large dictionary. Also, the capacity should not be divisible by a small prime.
private const int DEFAULT_CAPACITY = ; // The maximum size of the striped lock that will not be exceeded when locks are automatically
// added as the dictionary grows. However, the user is allowed to exceed this limit by passing
// a concurrency level larger than MAX_LOCK_NUMBER into the constructor.
private const int MAX_LOCK_NUMBER = ; private const int PROCESSOR_COUNT_REFRESH_INTERVAL_MS = ; // How often to refresh the count, in milliseconds.
private static volatile int s_processorCount; // The last count seen.
private static volatile int s_lastProcessorCountRefreshTicks; // The last time we refreshed. /// <summary>
/// Gets the number of available processors
/// </summary>
private static int ProcessorCount
{
get
{
int now = Environment.TickCount;
int procCount = s_processorCount;
if (procCount == || (now - s_lastProcessorCountRefreshTicks) >= PROCESSOR_COUNT_REFRESH_INTERVAL_MS)
{
s_processorCount = procCount = Environment.ProcessorCount;
s_lastProcessorCountRefreshTicks = now;
} Contract.Assert(procCount > && procCount <= ,
"Processor count not within the expected range (1 - 64)."); return procCount;
}
} // Whether TValue is a type that can be written atomically (i.e., with no danger of torn reads)
private static readonly bool s_isValueWriteAtomic = IsValueWriteAtomic();
/// <summary>
/// The number of concurrent writes for which to optimize by default.
/// </summary>
private static int DefaultConcurrencyLevel
{
get { return ProcessorCount; }
}
/// <summary>
/// Replaces the bucket table with a larger one. To prevent multiple threads from resizing the
/// table as a result of ----s, the Tables instance that holds the table of buckets deemed too
/// small is passed in as an argument to GrowTable(). GrowTable() obtains a lock, and then checks
/// the Tables instance has been replaced in the meantime or not.
/// The <paramref name="rehashCount"/> will be used to ensure that we don't do two subsequent resizes
/// because of a collision
/// </summary>
private void GrowTable(Tables tables, IEqualityComparer<TKey> newComparer, bool regenerateHashKeys, int rehashCount)
{
int locksAcquired = ;
try
{
// The thread that first obtains m_locks[0] will be the one doing the resize operation
AcquireLocks(, , ref locksAcquired); if (regenerateHashKeys && rehashCount == m_keyRehashCount)
{
// This method is called with regenerateHashKeys==true when we detected
// more than HashHelpers.HashCollisionThreshold collisions when adding a new element.
// In that case we are in the process of switching to another (randomized) comparer
// and we have to re-hash all the keys in the table.
// We are only going to do this if we did not just rehash the entire table while waiting for the lock
tables = m_tables;
}
else
{
// If we don't require a regeneration of hash keys we want to make sure we don't do work when
// we don't have to
if (tables != m_tables)
{
// We assume that since the table reference is different, it was already resized (or the budget
// was adjusted). If we ever decide to do table shrinking, or replace the table for other reasons,
// we will have to revisit this logic.
return;
} // Compute the (approx.) total size. Use an Int64 accumulation variable to avoid an overflow.
long approxCount = ;
for (int i = ; i < tables.m_countPerLock.Length; i++)
{
approxCount += tables.m_countPerLock[i];
} //
// If the bucket array is too empty, double the budget instead of resizing the table
//
if (approxCount < tables.m_buckets.Length / )
{
m_budget = * m_budget;
if (m_budget < )
{
m_budget = int.MaxValue;
} return;
}
}
// Compute the new table size. We find the smallest integer larger than twice the previous table size, and not divisible by
// 2,3,5 or 7. We can consider a different table-sizing policy in the future.
int newLength = ;
bool maximizeTableSize = false;
try
{
checked
{
// Double the size of the buckets table and add one, so that we have an odd integer.
newLength = tables.m_buckets.Length * + ; // Now, we only need to check odd integers, and find the first that is not divisible
// by 3, 5 or 7.
while (newLength % == || newLength % == || newLength % == )
{
newLength += ;
} Assert(newLength % != ); if (newLength > Array.MaxArrayLength)
{
maximizeTableSize = true;
}
}
}
catch (OverflowException)
{
maximizeTableSize = true;
} if (maximizeTableSize)
{
newLength = Array.MaxArrayLength; // We want to make sure that GrowTable will not be called again, since table is at the maximum size.
// To achieve that, we set the budget to int.MaxValue.
//
// (There is one special case that would allow GrowTable() to be called in the future:
// calling Clear() on the ConcurrentDictionary will shrink the table and lower the budget.)
m_budget = int.MaxValue;
} // Now acquire all other locks for the table
AcquireLocks(, tables.m_locks.Length, ref locksAcquired); object[] newLocks = tables.m_locks; // Add more locks
if (m_growLockArray && tables.m_locks.Length < MAX_LOCK_NUMBER)
{
newLocks = new object[tables.m_locks.Length * ];
Array.Copy(tables.m_locks, newLocks, tables.m_locks.Length); for (int i = tables.m_locks.Length; i < newLocks.Length; i++)
{
newLocks[i] = new object();
}
} Node[] newBuckets = new Node[newLength];
int[] newCountPerLock = new int[newLocks.Length]; // Copy all data into a new table, creating new nodes for all elements
for (int i = ; i < tables.m_buckets.Length; i++)
{
Node current = tables.m_buckets[i];
while (current != null)
{
Node next = current.m_next;
int newBucketNo, newLockNo;
int nodeHashCode = current.m_hashcode; if (regenerateHashKeys)
{
// Recompute the hash from the key
nodeHashCode = newComparer.GetHashCode(current.m_key);
} GetBucketAndLockNo(nodeHashCode, out newBucketNo, out newLockNo, newBuckets.Length, newLocks.Length); newBuckets[newBucketNo] = new Node(current.m_key, current.m_value, nodeHashCode, newBuckets[newBucketNo]); checked
{
newCountPerLock[newLockNo]++;
} current = next;
}
} // If this resize regenerated the hashkeys, increment the count
if (regenerateHashKeys)
{
// We use unchecked here because we don't want to throw an exception if
// an overflow happens
unchecked
{
m_keyRehashCount++;
}
} // Adjust the budget
m_budget = Math.Max(, newBuckets.Length / newLocks.Length); // Replace tables with the new versions
m_tables = new Tables(newBuckets, newLocks, newCountPerLock, newComparer);
}
finally
{
// Release all locks that we took earlier
ReleaseLocks(, locksAcquired);
}
} /// <summary>
/// 为指定key计算桶号和锁号
/// </summary>
/// <param name="hashcode">key的hashcode</param>
/// <param name="bucketNo"></param>
/// <param name="lockNo"></param>
/// <param name="bucketCount">桶数量</param>
/// <param name="lockCount">锁数量</param>
private void GetBucketAndLockNo(int hashcode, out int bucketNo, out int lockNo, int bucketCount, int lockCount)
{
//取正hashcode,余数恒小于除数
bucketNo = (hashcode & 0x7fffffff) % bucketCount;
//若桶下标与锁个数的余数相同,则这一簇数据都使用同一个锁(局部锁)
lockNo = bucketNo % lockCount; Assert(bucketNo >= && bucketNo < bucketCount);
Assert(lockNo >= && lockNo < lockCount);
} /// <summary>
/// Determines whether type TValue can be written atomically
/// </summary>
private static bool IsValueWriteAtomic()
{
Type valueType = typeof(TValue); //
// Section 12.6.6 of ECMA CLI explains which types can be read and written atomically without
// the risk of tearing.
//
// See http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-335.pdf
//
if (valueType.IsClass)
{
return true;
}
switch (Type.GetTypeCode(valueType))
{
case TypeCode.Boolean:
case TypeCode.Byte:
case TypeCode.Char:
case TypeCode.Int16:
case TypeCode.Int32:
case TypeCode.SByte:
case TypeCode.Single:
case TypeCode.UInt16:
case TypeCode.UInt32:
return true;
case TypeCode.Int64:
case TypeCode.Double:
case TypeCode.UInt64:
return IntPtr.Size == ;
default:
return false;
}
}
ConcurrentDictionary源码概读的更多相关文章
- Spark 源码浅读-SparkSubmit
Spark 源码浅读-任务提交SparkSubmit main方法 main方法主要用于初始化日志,然后接着调用doSubmit方法. override def main(args: Array[St ...
- spring-cloud-square源码速读(spring-cloud-square-okhttp篇)
欢迎访问我的GitHub https://github.com/zq2599/blog_demos 内容:所有原创文章分类汇总及配套源码,涉及Java.Docker.Kubernetes.DevOPS ...
- spring-cloud-square源码速读(retrofit + okhttp篇)
欢迎访问我的GitHub 这里分类和汇总了欣宸的全部原创(含配套源码):https://github.com/zq2599/blog_demos spring-cloud-square系列文章 五分钟 ...
- Handlebars模板引擎中的each嵌套及源码浅读
若显示效果不佳,可移步到愚安的小窝 Handlebars模板引擎作为时下最流行的模板引擎之一,已然在开发中为我们提供了无数便利.作为一款无语义的模板引擎,Handlebars只提供极少的helper函 ...
- [源码分析]读写锁ReentrantReadWriteLock
一.简介 读写锁. 读锁之间是共享的. 写锁是独占的. 首先声明一点: 我在分析源码的时候, 把jdk源码复制出来进行中文的注释, 有时还进行编译调试什么的, 为了避免和jdk原生的类混淆, 我在类前 ...
- 【源码分析】HashMap源码再读-基于Java8
最近工作不是太忙,准备再读读一些源码,想来想去,还是先从JDK的源码读起吧,毕竟很久不去读了,很多东西都生疏了.当然,还是先从炙手可热的HashMap,每次读都会有一些收获.当然,JDK8对HashM ...
- AQS源码泛读,梳理设计流程(jdk8)
一.AQS介绍 AQS(AbstractQueuedSynchronizer)抽象队列同步器,属于多线程编程的基本工具:JDK对其定义得很详细,并提供了多种常用的工具类(重入锁,读写锁,信号量,Cyc ...
- MyBatis 之源码浅读
环境简介与入口 记录一下尝试阅读Mybatis源码的过程,这篇笔记是我一边读,一遍记录下来的,虽然内容也不多,对Mybatis整体的架构体系也没有摸的很清楚,起码也能把这个过程整理下来,这也是我比较喜 ...
- 读源码【读mybatis的源码的思路】
✿ 需要掌握的编译器知识 ★ 编译器为eclipse为例子 调试准备工作(步骤:Window -> Show View ->...): □ 打开调试断点Breakpoint: □ 打开变量 ...
随机推荐
- webdriervAPI(多表单切换)
讲三个方法 driver.switch_to.frame("第一个iframe标签属性值") driver.switch_to.frame(" 第二个iframe标签属性 ...
- 记录下关于RabbitMQ常用知识点(持续更新)
1.端口及说明: 4369 -- erlang发现口 5672 --client端通信口 15672 -- 管理界面ui端口 25672 -- server间内部通信口 举例说明 我们访问Rabbit ...
- 对ysoserial工具及java反序列化的一个阶段性理解【未完成】
经过一段时间的琢磨与反思,以及重读了大量之前看不懂的反序列化文章,目前为止算是对java反序列化这块有了一个阶段性的小理解. 目前为止,发送的所有java反序列化的漏洞中.主要需要两个触发条件: 1. ...
- NDK学习笔记-多线程与生产消费模式
在做NDK开发的时候,很多情况下都是需要使用多线程的,一方面是提高程序运行效率,另一方面就是防止主线程阻塞 C的多线程 在C语言里,可以通过对于POSIX标准的运用,使得C语言执行多线程 提高程序的执 ...
- 最新 完美世界java校招面经 (含整理过的面试题大全)
从6月到10月,经过4个月努力和坚持,自己有幸拿到了网易雷火.京东.去哪儿.完美世界等10家互联网公司的校招Offer,因为某些自身原因最终选择了完美世界.6.7月主要是做系统复习.项目复盘.Leet ...
- CQRS1
CQRS之旅——旅程1(我们的领域:Contoso会议管理系统) 旅程1:我们的领域:Contoso会议管理系统 起点:我们从哪里来,我们带来了什么,谁将与我们同行?“ 只要前进,我愿意去任何地方 ...
- vue项目富文本编辑器vue-quill-editor之自定义图片上传
使用富文本编辑器的第一步肯定是先安装依赖 npm i vue-quill-editor 1.如果按照官网富文本编辑器中的图片上传是将图片转为base64格式的,如果需要上传图片到自己的服务器,需要修改 ...
- Rocketmq-简单部署
一.准备环境 1.系统:Centos7.3(无硬性要求) 2. jdk:1.8 3.maven:3.5(无硬性要求) 4.git 5.rocketmq 4.2 二.环境部署 1.jdk1.8以及mav ...
- eXosip的register注册
转载于:http://blog.sina.com.cn/s/blog_4868f98601018ioh.html 这个测试程序是从eXosip原有的测试程序改造的.原程序是tools 目录下的 sip ...
- Vue代码分割懒加载的实现方法
什么是懒加载 懒加载也叫延迟加载,即在需要的时候进行加载,随用随载. 为什么需要懒加载 在单页应用中,如果没有应用懒加载,运用webpack打包后的文件将会异常的大,造成进入首页时,需要加载的内容过多 ...