ConcurrentDictionary的数据结构主要由Tables和Node组成,其中Tables包括桶(Node,节点)数组、局部锁(Local lock)、每个锁保护的元素数量(PerLock)。Node包含用户实际操作的key和value,以及为实现链表数据结构的下一个节点(Next Node)的引用和当前节点key的原始(未取正)散列值。以及其它一些标识。

  1. private class Tables
  2. {
  3. /// <summary>
  4. /// 每个桶的单链表
  5. /// </summary>
  6. internal readonly Node[] m_buckets;
  7.  
  8. /// <summary>
  9. /// 锁数组,每个锁都锁住table的一部分
  10. /// </summary>
  11. internal readonly object[] m_locks;
  12.  
  13. /// <summary>
  14. /// 每个锁保护的元素的数量
  15. /// </summary>
  16. internal volatile int[] m_countPerLock;
  17.  
  18. /// <summary>
  19. /// key的比较器
  20. /// </summary>
  21. internal readonly IEqualityComparer<TKey> m_comparer;
  22.  
  23. internal Tables(Node[] buckets, object[] locks, int[] countPerLock, IEqualityComparer<TKey> comparer)
  24. {
  25. m_buckets = buckets;
  26. m_locks = locks;
  27. m_countPerLock = countPerLock;
  28. m_comparer = comparer;
  29. }
  30. }
  31.  
  32. private class Node
  33. {
  34. internal TKey m_key;
  35. internal TValue m_value;
  36. internal volatile Node m_next;
  37. internal int m_hashcode;
  38.  
  39. internal Node(TKey key, TValue value, int hashcode, Node next)
  40. {
  41. m_key = key;
  42. m_value = value;
  43. m_next = next;
  44. m_hashcode = hashcode;
  45. }
  46. }

  当new一个ConcurrentDictionary时,默认调用无参构造函数,给定默认的并发数量(Environment.ProcessorCount)、默认的键比较器、默认的容量(桶的初始容量,为31),该容量是经过权衡得到,不能被最小的素数整除。之后再处理容量与并发数的关系、容量与锁的关系以及每个锁的最大元素数。将桶、锁对象、锁保护封装在一个对象中,并初始化。

  1. //初始化 ConcurrentDictionary 类的新实例,
  2. //该实例为空,具有默认的并发级别和默认的初始容量,并为键类型使用默认比较器。
  3. public ConcurrentDictionary() :
  4. this(DefaultConcurrencyLevel, DEFAULT_CAPACITY, true, EqualityComparer<TKey>.Default) { }
  5.  
  6. /// <summary>
  7. /// 无参构造函数真正调用的函数
  8. /// </summary>
  9. /// <param name="concurrencyLevel">并发线程的可能数量(更改字典的线程可能数量)</param>
  10. /// <param name="capacity">容量</param>
  11. /// <param name="growLockArray">是否动态增加 striped lock 的大小</param>
  12. /// <param name="comparer">比较器</param>
  13. internal ConcurrentDictionary(int concurrencyLevel, int capacity, bool growLockArray, IEqualityComparer<TKey> comparer)
  14. {
  15. if (concurrencyLevel < )
  16. {
  17. throw new ArgumentOutOfRangeException("concurrencyLevel", GetResource("ConcurrentDictionary_ConcurrencyLevelMustBePositive"));
  18. }
  19. if (capacity < )
  20. {
  21. throw new ArgumentOutOfRangeException("capacity", GetResource("ConcurrentDictionary_CapacityMustNotBeNegative"));
  22. }
  23. if (comparer == null) throw new ArgumentNullException("comparer");
  24.  
  25. //容量应当至少与并发数一致,否则会有锁对象浪费
  26. if (capacity < concurrencyLevel)
  27. {
  28. capacity = concurrencyLevel;
  29. }
  30.  
  31. //锁对象数组,大小为 并发线程的可能数量
  32. object[] locks = new object[concurrencyLevel];
  33. for (int i = ; i < locks.Length; i++)
  34. {
  35. locks[i] = new object();
  36. }
  37.  
  38. //每个锁保护的元素的数量
  39. int[] countPerLock = new int[locks.Length];
  40. //单链表中的节点,表示特定的哈希存储桶(桶:Node类型的数组)。
  41. Node[] buckets = new Node[capacity];
  42. //可以保持字典内部状态的表,将桶、锁对象、锁保护封装在一个对象中,以便一次原子操作
  43. m_tables = new Tables(buckets, locks, countPerLock, comparer);
  44. //是否动态增加 striped lock 的大小
  45. m_growLockArray = growLockArray;
  46. //在调整大小操作被触发之前,每个锁可锁住的最大(预计)元素数
  47. //默认按锁个数平均分配,即Node总个数除以锁总个数
  48. m_budget = buckets.Length / locks.Length;
  49. }

  当调用TryAdd时,实际调用的是内部公共方法TryAddInternal。如果存在key,则始终返回false,如果updateIfExists为true,则更新value,如果不存在key,则始终返回true,并且添加value。详细解读见代码。

  1. /// <summary>
  2. /// 尝试将指定的键和值添加到字典
  3. /// </summary>
  4. /// <param name="key">要添加的元素的键</param>
  5. /// <param name="value">要添加的元素的值。对于引用类型,该值可以是空引用</param>
  6. /// <returns>键值对添加成功则返回true,否则false</returns>
  7. /// 异常:
  8. // T:System.ArgumentNullException:
  9. // key 为 null。
  10. // T:System.OverflowException:
  11. // 字典中已包含元素的最大数量(System.Int32.MaxValue)。
  12. public bool TryAdd(TKey key, TValue value)
  13. {
  14. if (key == null) throw new ArgumentNullException("key");
  15. TValue dummy;
  16. return TryAddInternal(key, value, false, true, out dummy);
  17. }
  18.  
  19. /// <summary>
  20. /// 对字典添加和修改的内部公共方法
  21. /// 如果存在key,则始终返回false,如果updateIfExists为true,则更新value
  22. /// 如果不存在key,则始终返回true,并且添加value
  23. /// </summary>
  24. [SuppressMessage("Microsoft.Concurrency", "CA8001", Justification = "Reviewed for thread safety")]
  25. private bool TryAddInternal(TKey key, TValue value, bool updateIfExists, bool acquireLock, out TValue resultingValue)
  26. {
  27. while (true)
  28. {
  29. //桶序号(下标),锁序号(下标)
  30. int bucketNo, lockNo;
  31. int hashcode;
  32.  
  33. Tables tables = m_tables;
  34. IEqualityComparer<TKey> comparer = tables.m_comparer;
  35. hashcode = comparer.GetHashCode(key);
  36.  
  37. //获取桶下标、锁下标
  38. GetBucketAndLockNo(hashcode, out bucketNo, out lockNo, tables.m_buckets.Length, tables.m_locks.Length);
  39.  
  40. bool resizeDesired = false;
  41. bool lockTaken = false;
  42. #if FEATURE_RANDOMIZED_STRING_HASHING
  43. #if !FEATURE_CORECLR
  44. bool resizeDueToCollisions = false;
  45. #endif // !FEATURE_CORECLR
  46. #endif
  47.  
  48. try
  49. {
  50. if (acquireLock)
  51. //根据上面得到的锁的下标(lockNo),获取对应(lockNo)的对象锁
  52. //hash落在不同的锁对象上,因此不同线程获取锁的对象可能不同,降低了“抢锁”概率
  53. Monitor.Enter(tables.m_locks[lockNo], ref lockTaken);
  54.  
  55. //在这之前如果tables被修改则有可能未正确锁定,此时需要重试
  56. if (tables != m_tables)
  57. {
  58. continue;
  59. }
  60.  
  61. #if FEATURE_RANDOMIZED_STRING_HASHING
  62. #if !FEATURE_CORECLR
  63. int collisionCount = ;
  64. #endif // !FEATURE_CORECLR
  65. #endif
  66.  
  67. // Try to find this key in the bucket
  68. Node prev = null;
  69. for (Node node = tables.m_buckets[bucketNo]; node != null; node = node.m_next)
  70. {
  71. Assert((prev == null && node == tables.m_buckets[bucketNo]) || prev.m_next == node);
  72. //如果key已经存在
  73. if (comparer.Equals(node.m_key, key))
  74. {
  75. //如果允许更新,则更新该键值对的值
  76. if (updateIfExists)
  77. {
  78. //如果可以原子操作则直接赋值
  79. if (s_isValueWriteAtomic)
  80. {
  81. node.m_value = value;
  82. }
  83. //否则需要为更新创建一个新的节点,以便支持不能以原子方式写的类型,
  84. //因为无锁读取也可能在此时发生
  85. else
  86. {
  87. //node.m_next 新节点指向下一个节点
  88. Node newNode = new Node(node.m_key, value, hashcode, node.m_next);
  89. if (prev == null)
  90. {
  91. tables.m_buckets[bucketNo] = newNode;
  92. }
  93. else
  94. {
  95. //上一个节点指向新节点。此时完成单链表的新旧节点替换
  96. prev.m_next = newNode;
  97. }
  98. }
  99. resultingValue = value;
  100. }
  101. else
  102. {
  103. resultingValue = node.m_value;
  104. }
  105. return false;
  106. }
  107. //循环到最后时,prev是最后一个node(node.m_next==null)
  108. prev = node;
  109.  
  110. #if FEATURE_RANDOMIZED_STRING_HASHING
  111. #if !FEATURE_CORECLR
  112. collisionCount++;
  113. #endif // !FEATURE_CORECLR
  114. #endif
  115. }
  116.  
  117. #if FEATURE_RANDOMIZED_STRING_HASHING
  118. #if !FEATURE_CORECLR
  119. if(collisionCount > HashHelpers.HashCollisionThreshold && HashHelpers.IsWellKnownEqualityComparer(comparer))
  120. {
  121. resizeDesired = true;
  122. resizeDueToCollisions = true;
  123. }
  124. #endif // !FEATURE_CORECLR
  125. #endif
  126.  
  127. //使用可变内存操作插入键值对
  128. Volatile.Write<Node>(ref tables.m_buckets[bucketNo], new Node(key, value, hashcode, tables.m_buckets[bucketNo]));
  129. checked
  130. {
  131. //第lockNo个锁保护的元素数量,并检查是否益处
  132. tables.m_countPerLock[lockNo]++;
  133. }
  134.  
  135. //
  136. // If the number of elements guarded by this lock has exceeded the budget, resize the bucket table.
  137. // It is also possible that GrowTable will increase the budget but won't resize the bucket table.
  138. // That happens if the bucket table is found to be poorly utilized due to a bad hash function.
  139. //如果第lockNo个锁要锁的元素超出预计,则需要调整
  140. if (tables.m_countPerLock[lockNo] > m_budget)
  141. {
  142. resizeDesired = true;
  143. }
  144. }
  145. finally
  146. {
  147. if (lockTaken)
  148. //释放第lockNo个锁
  149. Monitor.Exit(tables.m_locks[lockNo]);
  150. }
  151.  
  152. //
  153. // The fact that we got here means that we just performed an insertion. If necessary, we will grow the table.
  154. //
  155. // Concurrency notes:
  156. // - Notice that we are not holding any locks at when calling GrowTable. This is necessary to prevent deadlocks.
  157. // - As a result, it is possible that GrowTable will be called unnecessarily. But, GrowTable will obtain lock 0
  158. // and then verify that the table we passed to it as the argument is still the current table.
  159. //
  160. if (resizeDesired)
  161. {
  162. #if FEATURE_RANDOMIZED_STRING_HASHING
  163. #if !FEATURE_CORECLR
  164. if (resizeDueToCollisions)
  165. {
  166. GrowTable(tables, (IEqualityComparer<TKey>)HashHelpers.GetRandomizedEqualityComparer(comparer), true, m_keyRehashCount);
  167. }
  168. else
  169. #endif // !FEATURE_CORECLR
  170. {
  171. GrowTable(tables, tables.m_comparer, false, m_keyRehashCount);
  172. }
  173. #else
  174. GrowTable(tables, tables.m_comparer, false, m_keyRehashCount);
  175. #endif
  176. }
  177.  
  178. resultingValue = value;
  179. return true;
  180. }
  181. }

  需要特别指出的是ConcurrentDictionary在插入、更新、获取键值对时对key的比较默认是使用的引用比较,不同于Dictionary使用引用加散列值。在Dictionary中,只有两者都一致才相等,ConcurrentDictionary则只判断引用相等。前提是未重写Equals。

  1. /// <summary>
  2. /// Attempts to get the value associated with the specified key from the <see
  3. /// cref="ConcurrentDictionary{TKey,TValue}"/>.
  4. /// </summary>
  5. /// <param name="key">The key of the value to get.</param>
  6. /// <param name="value">When this method returns, <paramref name="value"/> contains the object from
  7. /// the
  8. /// <see cref="ConcurrentDictionary{TKey,TValue}"/> with the specified key or the default value of
  9. /// <typeparamref name="TValue"/>, if the operation failed.</param>
  10. /// <returns>true if the key was found in the <see cref="ConcurrentDictionary{TKey,TValue}"/>;
  11. /// otherwise, false.</returns>
  12. /// <exception cref="T:System.ArgumentNullException"><paramref name="key"/> is a null reference
  13. /// (Nothing in Visual Basic).</exception>
  14. [SuppressMessage("Microsoft.Concurrency", "CA8001", Justification = "Reviewed for thread safety")]
  15. public bool TryGetValue(TKey key, out TValue value)
  16. {
  17. if (key == null) throw new ArgumentNullException("key");
  18.  
  19. int bucketNo, lockNoUnused;
  20.  
  21. // We must capture the m_buckets field in a local variable. It is set to a new table on each table resize.
  22. Tables tables = m_tables;
  23. IEqualityComparer<TKey> comparer = tables.m_comparer;
  24. GetBucketAndLockNo(comparer.GetHashCode(key), out bucketNo, out lockNoUnused, tables.m_buckets.Length, tables.m_locks.Length);
  25.  
  26. // We can get away w/out a lock here.
  27. // The Volatile.Read ensures that the load of the fields of 'n' doesn't move before the load from buckets[i].
  28. Node n = Volatile.Read<Node>(ref tables.m_buckets[bucketNo]);
  29.  
  30. while (n != null)
  31. {
  32. //默认比较的是引用
  33. if (comparer.Equals(n.m_key, key))
  34. {
  35. value = n.m_value;
  36. return true;
  37. }
  38. n = n.m_next;
  39. }
  40.  
  41. value = default(TValue);
  42. return false;
  43. }

  其它一些需要知道的内容,比如默认并发数、如何为指定key计算桶号和锁号等

  1. #if !FEATURE_CORECLR
  2. [NonSerialized]
  3. #endif
  4. private volatile Tables m_tables; // Internal tables of the dictionary
  5. // NOTE: this is only used for compat reasons to serialize the comparer.
  6. // This should not be accessed from anywhere else outside of the serialization methods.
  7. internal IEqualityComparer<TKey> m_comparer;
  8. #if !FEATURE_CORECLR
  9. [NonSerialized]
  10. #endif
  11. private readonly bool m_growLockArray; // Whether to dynamically increase the size of the striped lock
  12.  
  13. // How many times we resized becaused of collisions.
  14. // This is used to make sure we don't resize the dictionary because of multi-threaded Add() calls
  15. // that generate collisions. Whenever a GrowTable() should be the only place that changes this
  16. #if !FEATURE_CORECLR
  17. // The field should be have been marked as NonSerialized but because we shipped it without that attribute in 4.5.1.
  18. // we can't add it back without breaking compat. To maximize compat we are going to keep the OptionalField attribute
  19. // This will prevent cases where the field was not serialized.
  20. [OptionalField]
  21. #endif
  22. private int m_keyRehashCount;
  23.  
  24. #if !FEATURE_CORECLR
  25. [NonSerialized]
  26. #endif
  27. private int m_budget; // The maximum number of elements per lock before a resize operation is triggered
  28.  
  29. #if !FEATURE_CORECLR // These fields are not used in CoreCLR
  30. private KeyValuePair<TKey, TValue>[] m_serializationArray; // Used for custom serialization
  31.  
  32. private int m_serializationConcurrencyLevel; // used to save the concurrency level in serialization
  33.  
  34. private int m_serializationCapacity; // used to save the capacity in serialization
  35. #endif
  36.  
  37. // The default capacity, i.e. the initial # of buckets. When choosing this value, we are making
  38. // a trade-off between the size of a very small dictionary, and the number of resizes when
  39. // constructing a large dictionary. Also, the capacity should not be divisible by a small prime.
  40. private const int DEFAULT_CAPACITY = ;
  41.  
  42. // The maximum size of the striped lock that will not be exceeded when locks are automatically
  43. // added as the dictionary grows. However, the user is allowed to exceed this limit by passing
  44. // a concurrency level larger than MAX_LOCK_NUMBER into the constructor.
  45. private const int MAX_LOCK_NUMBER = ;
  46.  
  47. private const int PROCESSOR_COUNT_REFRESH_INTERVAL_MS = ; // How often to refresh the count, in milliseconds.
  48. private static volatile int s_processorCount; // The last count seen.
  49. private static volatile int s_lastProcessorCountRefreshTicks; // The last time we refreshed.
  50.  
  51. /// <summary>
  52. /// Gets the number of available processors
  53. /// </summary>
  54. private static int ProcessorCount
  55. {
  56. get
  57. {
  58. int now = Environment.TickCount;
  59. int procCount = s_processorCount;
  60. if (procCount == || (now - s_lastProcessorCountRefreshTicks) >= PROCESSOR_COUNT_REFRESH_INTERVAL_MS)
  61. {
  62. s_processorCount = procCount = Environment.ProcessorCount;
  63. s_lastProcessorCountRefreshTicks = now;
  64. }
  65.  
  66. Contract.Assert(procCount > && procCount <= ,
  67. "Processor count not within the expected range (1 - 64).");
  68.  
  69. return procCount;
  70. }
  71. }
  72.  
  73. // Whether TValue is a type that can be written atomically (i.e., with no danger of torn reads)
  74. private static readonly bool s_isValueWriteAtomic = IsValueWriteAtomic();
  75. /// <summary>
  76. /// The number of concurrent writes for which to optimize by default.
  77. /// </summary>
  78. private static int DefaultConcurrencyLevel
  79. {
  80. get { return ProcessorCount; }
  81. }
  82. /// <summary>
  83. /// Replaces the bucket table with a larger one. To prevent multiple threads from resizing the
  84. /// table as a result of ----s, the Tables instance that holds the table of buckets deemed too
  85. /// small is passed in as an argument to GrowTable(). GrowTable() obtains a lock, and then checks
  86. /// the Tables instance has been replaced in the meantime or not.
  87. /// The <paramref name="rehashCount"/> will be used to ensure that we don't do two subsequent resizes
  88. /// because of a collision
  89. /// </summary>
  90. private void GrowTable(Tables tables, IEqualityComparer<TKey> newComparer, bool regenerateHashKeys, int rehashCount)
  91. {
  92. int locksAcquired = ;
  93. try
  94. {
  95. // The thread that first obtains m_locks[0] will be the one doing the resize operation
  96. AcquireLocks(, , ref locksAcquired);
  97.  
  98. if (regenerateHashKeys && rehashCount == m_keyRehashCount)
  99. {
  100. // This method is called with regenerateHashKeys==true when we detected
  101. // more than HashHelpers.HashCollisionThreshold collisions when adding a new element.
  102. // In that case we are in the process of switching to another (randomized) comparer
  103. // and we have to re-hash all the keys in the table.
  104. // We are only going to do this if we did not just rehash the entire table while waiting for the lock
  105. tables = m_tables;
  106. }
  107. else
  108. {
  109. // If we don't require a regeneration of hash keys we want to make sure we don't do work when
  110. // we don't have to
  111. if (tables != m_tables)
  112. {
  113. // We assume that since the table reference is different, it was already resized (or the budget
  114. // was adjusted). If we ever decide to do table shrinking, or replace the table for other reasons,
  115. // we will have to revisit this logic.
  116. return;
  117. }
  118.  
  119. // Compute the (approx.) total size. Use an Int64 accumulation variable to avoid an overflow.
  120. long approxCount = ;
  121. for (int i = ; i < tables.m_countPerLock.Length; i++)
  122. {
  123. approxCount += tables.m_countPerLock[i];
  124. }
  125.  
  126. //
  127. // If the bucket array is too empty, double the budget instead of resizing the table
  128. //
  129. if (approxCount < tables.m_buckets.Length / )
  130. {
  131. m_budget = * m_budget;
  132. if (m_budget < )
  133. {
  134. m_budget = int.MaxValue;
  135. }
  136.  
  137. return;
  138. }
  139. }
  140. // Compute the new table size. We find the smallest integer larger than twice the previous table size, and not divisible by
  141. // 2,3,5 or 7. We can consider a different table-sizing policy in the future.
  142. int newLength = ;
  143. bool maximizeTableSize = false;
  144. try
  145. {
  146. checked
  147. {
  148. // Double the size of the buckets table and add one, so that we have an odd integer.
  149. newLength = tables.m_buckets.Length * + ;
  150.  
  151. // Now, we only need to check odd integers, and find the first that is not divisible
  152. // by 3, 5 or 7.
  153. while (newLength % == || newLength % == || newLength % == )
  154. {
  155. newLength += ;
  156. }
  157.  
  158. Assert(newLength % != );
  159.  
  160. if (newLength > Array.MaxArrayLength)
  161. {
  162. maximizeTableSize = true;
  163. }
  164. }
  165. }
  166. catch (OverflowException)
  167. {
  168. maximizeTableSize = true;
  169. }
  170.  
  171. if (maximizeTableSize)
  172. {
  173. newLength = Array.MaxArrayLength;
  174.  
  175. // We want to make sure that GrowTable will not be called again, since table is at the maximum size.
  176. // To achieve that, we set the budget to int.MaxValue.
  177. //
  178. // (There is one special case that would allow GrowTable() to be called in the future:
  179. // calling Clear() on the ConcurrentDictionary will shrink the table and lower the budget.)
  180. m_budget = int.MaxValue;
  181. }
  182.  
  183. // Now acquire all other locks for the table
  184. AcquireLocks(, tables.m_locks.Length, ref locksAcquired);
  185.  
  186. object[] newLocks = tables.m_locks;
  187.  
  188. // Add more locks
  189. if (m_growLockArray && tables.m_locks.Length < MAX_LOCK_NUMBER)
  190. {
  191. newLocks = new object[tables.m_locks.Length * ];
  192. Array.Copy(tables.m_locks, newLocks, tables.m_locks.Length);
  193.  
  194. for (int i = tables.m_locks.Length; i < newLocks.Length; i++)
  195. {
  196. newLocks[i] = new object();
  197. }
  198. }
  199.  
  200. Node[] newBuckets = new Node[newLength];
  201. int[] newCountPerLock = new int[newLocks.Length];
  202.  
  203. // Copy all data into a new table, creating new nodes for all elements
  204. for (int i = ; i < tables.m_buckets.Length; i++)
  205. {
  206. Node current = tables.m_buckets[i];
  207. while (current != null)
  208. {
  209. Node next = current.m_next;
  210. int newBucketNo, newLockNo;
  211. int nodeHashCode = current.m_hashcode;
  212.  
  213. if (regenerateHashKeys)
  214. {
  215. // Recompute the hash from the key
  216. nodeHashCode = newComparer.GetHashCode(current.m_key);
  217. }
  218.  
  219. GetBucketAndLockNo(nodeHashCode, out newBucketNo, out newLockNo, newBuckets.Length, newLocks.Length);
  220.  
  221. newBuckets[newBucketNo] = new Node(current.m_key, current.m_value, nodeHashCode, newBuckets[newBucketNo]);
  222.  
  223. checked
  224. {
  225. newCountPerLock[newLockNo]++;
  226. }
  227.  
  228. current = next;
  229. }
  230. }
  231.  
  232. // If this resize regenerated the hashkeys, increment the count
  233. if (regenerateHashKeys)
  234. {
  235. // We use unchecked here because we don't want to throw an exception if
  236. // an overflow happens
  237. unchecked
  238. {
  239. m_keyRehashCount++;
  240. }
  241. }
  242.  
  243. // Adjust the budget
  244. m_budget = Math.Max(, newBuckets.Length / newLocks.Length);
  245.  
  246. // Replace tables with the new versions
  247. m_tables = new Tables(newBuckets, newLocks, newCountPerLock, newComparer);
  248. }
  249. finally
  250. {
  251. // Release all locks that we took earlier
  252. ReleaseLocks(, locksAcquired);
  253. }
  254. }
  255.  
  256. /// <summary>
  257. /// 为指定key计算桶号和锁号
  258. /// </summary>
  259. /// <param name="hashcode">key的hashcode</param>
  260. /// <param name="bucketNo"></param>
  261. /// <param name="lockNo"></param>
  262. /// <param name="bucketCount">桶数量</param>
  263. /// <param name="lockCount">锁数量</param>
  264. private void GetBucketAndLockNo(int hashcode, out int bucketNo, out int lockNo, int bucketCount, int lockCount)
  265. {
  266. //取正hashcode,余数恒小于除数
  267. bucketNo = (hashcode & 0x7fffffff) % bucketCount;
  268. //若桶下标与锁个数的余数相同,则这一簇数据都使用同一个锁(局部锁)
  269. lockNo = bucketNo % lockCount;
  270.  
  271. Assert(bucketNo >= && bucketNo < bucketCount);
  272. Assert(lockNo >= && lockNo < lockCount);
  273. }
  274.  
  275. /// <summary>
  276. /// Determines whether type TValue can be written atomically
  277. /// </summary>
  278. private static bool IsValueWriteAtomic()
  279. {
  280. Type valueType = typeof(TValue);
  281.  
  282. //
  283. // Section 12.6.6 of ECMA CLI explains which types can be read and written atomically without
  284. // the risk of tearing.
  285. //
  286. // See http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-335.pdf
  287. //
  288. if (valueType.IsClass)
  289. {
  290. return true;
  291. }
  292. switch (Type.GetTypeCode(valueType))
  293. {
  294. case TypeCode.Boolean:
  295. case TypeCode.Byte:
  296. case TypeCode.Char:
  297. case TypeCode.Int16:
  298. case TypeCode.Int32:
  299. case TypeCode.SByte:
  300. case TypeCode.Single:
  301. case TypeCode.UInt16:
  302. case TypeCode.UInt32:
  303. return true;
  304. case TypeCode.Int64:
  305. case TypeCode.Double:
  306. case TypeCode.UInt64:
  307. return IntPtr.Size == ;
  308. default:
  309. return false;
  310. }
  311. }

ConcurrentDictionary源码概读的更多相关文章

  1. Spark 源码浅读-SparkSubmit

    Spark 源码浅读-任务提交SparkSubmit main方法 main方法主要用于初始化日志,然后接着调用doSubmit方法. override def main(args: Array[St ...

  2. spring-cloud-square源码速读(spring-cloud-square-okhttp篇)

    欢迎访问我的GitHub https://github.com/zq2599/blog_demos 内容:所有原创文章分类汇总及配套源码,涉及Java.Docker.Kubernetes.DevOPS ...

  3. spring-cloud-square源码速读(retrofit + okhttp篇)

    欢迎访问我的GitHub 这里分类和汇总了欣宸的全部原创(含配套源码):https://github.com/zq2599/blog_demos spring-cloud-square系列文章 五分钟 ...

  4. Handlebars模板引擎中的each嵌套及源码浅读

    若显示效果不佳,可移步到愚安的小窝 Handlebars模板引擎作为时下最流行的模板引擎之一,已然在开发中为我们提供了无数便利.作为一款无语义的模板引擎,Handlebars只提供极少的helper函 ...

  5. [源码分析]读写锁ReentrantReadWriteLock

    一.简介 读写锁. 读锁之间是共享的. 写锁是独占的. 首先声明一点: 我在分析源码的时候, 把jdk源码复制出来进行中文的注释, 有时还进行编译调试什么的, 为了避免和jdk原生的类混淆, 我在类前 ...

  6. 【源码分析】HashMap源码再读-基于Java8

    最近工作不是太忙,准备再读读一些源码,想来想去,还是先从JDK的源码读起吧,毕竟很久不去读了,很多东西都生疏了.当然,还是先从炙手可热的HashMap,每次读都会有一些收获.当然,JDK8对HashM ...

  7. AQS源码泛读,梳理设计流程(jdk8)

    一.AQS介绍 AQS(AbstractQueuedSynchronizer)抽象队列同步器,属于多线程编程的基本工具:JDK对其定义得很详细,并提供了多种常用的工具类(重入锁,读写锁,信号量,Cyc ...

  8. MyBatis 之源码浅读

    环境简介与入口 记录一下尝试阅读Mybatis源码的过程,这篇笔记是我一边读,一遍记录下来的,虽然内容也不多,对Mybatis整体的架构体系也没有摸的很清楚,起码也能把这个过程整理下来,这也是我比较喜 ...

  9. 读源码【读mybatis的源码的思路】

    ✿ 需要掌握的编译器知识 ★ 编译器为eclipse为例子 调试准备工作(步骤:Window -> Show View ->...): □ 打开调试断点Breakpoint: □ 打开变量 ...

随机推荐

  1. Spark2.4源码阅读1-Shuffle机制概述

    本文参考: a. https://www.jianshu.com/p/c46bfaa5dd15 1. shuffle及历史简介 shuffle,即"洗牌",所有采用map-redu ...

  2. centos7:ssh免密登陆设置

    1.使用root用户登录,进入到目录/root/.ssh 2.执行命令:ssh-keygen -t rsa 一路回车,完成后会在目录/root/.ssh下面生成文件 id_rsa和id_rsa.pub ...

  3. AssassinGo: 基于Go的高并发可拓展式Web渗透框架

    转载自FreeBuf.COM AssassinGo是一款使用Golang开发,集成了信息收集.基础攻击探测.Google-Hacking域名搜索和PoC批量检测等功能的Web渗透框架,并且有着基于Vu ...

  4. 物联网安全himqtt防火墙数据结构之ringbuffer环形缓冲区

    物联网安全himqtt防火墙数据结构之ringbuffer环形缓冲区 随着5G的普及,物联网安全显得特别重要,himqtt是首款完整源码的高性能MQTT物联网防火墙 - MQTT Applicatio ...

  5. 通过js获取本机的IP地址

    参考链接:https://blog.csdn.net/qq_39327418/article/details/90052668

  6. docker部署zabbix并设置自动发现规则

      docker部署zabbix比源码安装简单一些,特此记录: 机器准备: zabbix-server: 192.168.0.150 homeserver zabbix-agent: 192.168. ...

  7. loadrunner如何监控linux,以及重点指标分析

    监控UNIX一. lr监控UNIX ,UNIX先启动一rstatd服务 以下是在IBM AIX系统中启动rstatd服务的方法: 1. 使用telnet以root用户的身份登录入AIX系统 2. 在命 ...

  8. python-day5(正式学习)

    格式化输出 符合某种输出规范的print函数的应用 第一种方式 使用占位符.漫威里有个人叫斯塔克,他平时站在人堆里(print函数引号内的内容)我们看不出来和其他人有什么异常(print的终端显示), ...

  9. 8.perf top系统性能分析工具

    perf 是一个调查 Linux 中各种性能问题的有力工具. # perf --help  usage: perf [--version] [--help] COMMAND [ARGS]  The m ...

  10. Istio技术与实践01: 源码解析之Pilot多云平台服务发现机制

    服务模型 首先,Istio作为一个(微)服务治理的平台,和其他的微服务模型一样也提供了Service,ServiceInstance这样抽象服务模型.如Service的定义中所表达的,一个服务有一个全 ...