基于folly的AtomicIntrusiveLinkedList无锁队列进行简单封装的多生产多消费模型
1.基于folly的AtomicIntrusiveLinkedList略微修改的无锁队列代码:
#ifndef FOLLY_REVISE_H
#define FOLLY_REVISE_H namespace folly {
/**
* A very simple atomic single-linked list primitive
*
*/ template <typename T>
struct node
{
T data;
node* next; node(const T& data) : data(data), next(nullptr) { }
node(T&& data): data(std::move(data)), next(nullptr) { }
}; template <class T>
class AtomicForwardList
{
public:
AtomicForwardList() { }
AtomicForwardList(const AtomicForwardList&) = delete;
AtomicForwardList& operator=(const AtomicForwardList&) = delete; AtomicForwardList(AtomicForwardList&& other) noexcept
: head_(other.head_.load())
{
other.head_ = nullptr;
} AtomicForwardList& operator=(AtomicForwardList&& other) noexcept
{
AtomicForwardList tmp(std::move(other));
swap(*this, tmp); return *this;
} /**
* Note: list must be empty on destruction.
*/
~AtomicForwardList()
{
assert(empty());
} bool empty() const
{
return head_.load() == nullptr;
} /**
* Atomically insert t at the head of the list.
* @return True if the inserted element is the only one in the list
* after the call.
*/
bool insertHead(T* t)
{
assert(t->next == nullptr); auto oldHead = head_.load(std::memory_order_relaxed);
do
{
t->next = oldHead;
/* oldHead is updated by the call below.
NOTE: we don't use next(t) instead of oldHead directly due to
compiler bugs (GCC prior to 4.8.3 (bug 60272), clang (bug 18899),
MSVC (bug 819819); source:
http://en.cppreference.com/w/cpp/atomic/atomic/compare_exchange */
} while (!head_.compare_exchange_weak(oldHead, t,
std::memory_order_release, std::memory_order_relaxed)); return oldHead == nullptr;
} /**
* Replaces the head with nullptr,
* and calls func() on the removed elements in the order from tail to head
* Returns false if the list was empty.
*/
template <typename F>
bool sweepOnce(F&& func)
{
if (auto head = head_.exchange(nullptr)) // why is memory_order_seq_cst
{
auto rhead = reverse(head);
unlinkAll(rhead, std::forward<F>(func));
return true;
}
return false;
} /**
* Repeatedly replaces the head with nullptr
* and calls func() on the removed elements in the order from tail to head.
* Stops when the list is empty.
*/
template <typename F>
void sweep(F&& func)
{
while (sweepOnce(std::forward<F>(func)))
{
}
} /**
* Similar to sweepOnce() but calls func() on elements in LIFO order
*
* func() is called for all elements in the list at the moment
* reverseSweepOnce() is called.
*/
template <typename F>
bool reverseSweepOnce(F&& func)
{
// We don't loop like sweep() does because the overall order of callbacks
// would be strand-wise LIFO which is meanless to callers.
if (auto head = head_.exchange(nullptr))
{
unlinkAll(head, std::forward<F>(func));
return true;
}
return false;
} /**
* Replaces the head with nullptr,
* and get the member list pointed by head in input order
*/
T* getInputList()
{
if (auto head = head_.exchange(nullptr, std::memory_order_acquire)) // why is memory_order_seq_cst
{
auto rhead = reverse(head);
return rhead;
} return nullptr;
} /**
* Replaces the head with nullptr
* and get the member list pointed by head in reversed input order
*/
T* getList()
{
return head_.exchange(nullptr);
} private:
std::atomic<T*> head_{ nullptr }; /* Reverses a linked list, returning the pointer to the new head
(old tail) */
static T* reverse(T* head)
{
T* rhead = nullptr; while (head != nullptr)
{
auto t = head;
head = t->next;
t->next = rhead;
rhead = t;
} return rhead;
} /* Unlinks all elements in the linked list fragment pointed to by 'head',
* calling func() on every element */
template <typename F>
void unlinkAll(T* head, F&& func)
{
while (head != nullptr)
{
auto t = head;
head = t->next;
t->next = nullptr;
func(t);
}
}
};
} #endif // FOLLY_REVISE_H
2.基于上面无锁队列的封装
#ifndef COMPOSITE_ATOMIC_LIST_H
#define COMPOSITE_ATOMIC_LIST_H /**
* Compose a multiple-producers and multiple-consumers atomic list
* through given consumer number AtomicForwardList
*/ #include <vector>
#include <cassert>
#include "folly_revise.h" namespace folly
{ template <class T>
class CompositeAtomicList
{
public:
using size_type = typename std::vector<AtomicForwardList<T>>::size_type;
public:
CompositeAtomicList(size_type producerNum, size_type consumerNum)
: m_producerNum(producerNum), m_consumerNum(consumerNum)
{
// it is meanless if there is no producer or consumer
assert(producerNum > );
assert(consumerNum > ); // the number of composite list is equal to consumer number
m_compositeList.resize(consumerNum);
// initialize the first insertion index of the producers
m_producerIdxs.resize(producerNum);
for (std::vector<size_type>::size_type si = ; si != m_producerIdxs.size(); ++si)
{
m_producerIdxs[si] = si % consumerNum;
}
} CompositeAtomicList(const CompositeAtomicList&) = delete;
CompositeAtomicList& operator=(const CompositeAtomicList&) = delete; //CompositeAtomicList(CompositeAtomicList&& other) noexcept
// : m_producerNum(other.m_producerNum), m_consumerNum(other.m_consumerNum),
// m_producerIdxs(std::move(other.m_producerIdxs),
// m_compositeList(std::move(other.m_compositeList)
//{ //}
CompositeAtomicList(CompositeAtomicList&& other) noexcept = default;
CompositeAtomicList& operator=(CompositeAtomicList&& other) noexcept = default; ~CompositeAtomicList() = default; // producer num
size_type getProducerNum() const
{
return m_producerNum;
} // consumer num
size_type getConsumerNum() const
{
return m_consumerNum;
} bool empty() const
{
// if there is one consumer list is not empty, the CompositeList is not empty
for (const auto& item : m_compositeList)
{
if (!item.empty())
{
return false;
}
} return true;
} // insert node for producer number producer_num
bool insertHead(size_type producer_num, T* t)
{
auto ret = m_compositeList[m_producerIdxs[producer_num]].insertHead(t);
m_producerIdxs[producer_num] = (++m_producerIdxs[producer_num]) % m_consumerNum;
return ret;
} /**
* A consumer function
* consume nodes for consumer number consumer_num through
* invoking function func for every list node in consumer_num.
* You should invoke all consumer function for a particular consumer_num
* within just one thread to evenly distribute the tasks.
*
* Recommend calling std::this_thread::yield() when this function returns false
*/
template <typename F>
bool sweepOnce(size_type consumer_num, F&& func)
{
return m_compositeList[consumer_num].sweepOnce(std::forward<F>(func));
} /**
* A consumer function
* repeat consume nodes for consumer number consumer_num through
* invoking function func for every list node in consumer_sum.
* You should invoke all consumer function for a particular consumer_num
* within just one thread to evenly distribute the tasks.
*
* Recommend calling std::this_thread::yield() after calling this function
*/
template <typename F>
void sweep(size_type consumer_num, F&& func)
{
m_compositeList[consumer_num].sweep(std::forward<F>(func));
} /**
* A consumer function
* consume nodes for all consumer numbers once through
* invoking function func for every list node in consumer_num.
* You could invoke this function after all task handler threads terminated
* to ensure all nodes have been consumed
*/
template <typename F>
void sweepAll(F&& func)
{
for (size_type si = ; si != m_consumerNum; ++si)
{
sweepOnce(si, std::forward<F>(func));
}
} /**
* A consumer function
* Similar to sweepOnce() but calls func() on elements in LIFO order
*
* func() is called for all elements in the list at the moment
* reverseSweepOnce() is called.
*
* Recommend calling std::this_thread::yield() when this function returns false
*/
template <typename F>
bool reverseSweepOnce(size_type consumer_num, F&& func)
{
return m_compositeList[consumer_num].reverseSweepOnce(std::forward<F>(func));
} /**
* A consumer function
* get all the nodes from consumer list consumer_num in input order
* @ return a list of node
*
* Recommend calling std::this_thread::yield() when this function returns nullptr
*/
T* getInputList(size_type consumer_num)
{
return m_compositeList[consumer_num].getInputList();
} /**
* A consumer function
* get all the nodes from consumer list consumer_num in reversed input order
* @ return a list of node
*
* Recommend calling std::this_thread::yield() when this function returns nullptr
*/
T* getList()
{
return m_compositeList[consumer_num].getList();
} private:
// the producer and consumer count
size_type m_producerNum;
size_type m_consumerNum;
// the next inserted list for producers
std::vector<size_type> m_producerIdxs;
// the composite atomic lists
std::vector<AtomicForwardList<T>> m_compositeList;
}; } #endif // COMPOSITE_ATOMIC_LIST_H
3.测试用代码:
#include <memory>
#include <cassert> #include <iostream>
#include <vector>
#include <thread>
#include <future>
#include <random>
#include <cmath> #include "folly_revise.h" #include "composite_atomic_list.h" using namespace folly; struct student_name
{
student_name(int age = )
: age(age), next(nullptr)
{ } int age; student_name* next;
}; using ATOMIC_STUDENT_LIST = CompositeAtomicList<student_name>; constexpr int PRODUCE_THREAD_NUM = ; // producing thread number
constexpr int CONSUME_THREAD_NUM = ; // consuming thread number ATOMIC_STUDENT_LIST g_students(PRODUCE_THREAD_NUM, CONSUME_THREAD_NUM); std::atomic<int> g_inserts; // insert num (successful)
std::atomic<int> g_drops; // drop num (successful) std::atomic<int> g_printNum; // as same as g_drops std::atomic<long> g_ageInSum; // age sum when producing student_name
std::atomic<long> g_ageOutSum; // age sum when consuming student_name std::atomic<bool> goOn(true); constexpr int ONE_THREAD_PRODUCE_NUM = ; // when testing, no more than this number, you know 20,000,00 * 100 * 10 ~= MAX_INT if thread num <= 10 inline void printOne(student_name* t)
{
g_printNum.fetch_add(, std::memory_order_relaxed);
g_ageOutSum.fetch_add(t->age, std::memory_order_relaxed);
g_drops.fetch_add(, std::memory_order_relaxed);
delete t;
} void insert_students(int idNo)
{
std::default_random_engine dre(time(nullptr));
std::uniform_int_distribution<int> ageDi(, ); for (int i = ; i < ONE_THREAD_PRODUCE_NUM; ++i)
{
int newAge = ageDi(dre);
g_ageInSum.fetch_add(newAge, std::memory_order_relaxed); g_students.insertHead(idNo, new student_name(newAge));
// use memory_order_relaxed avoiding affect folly memory order
g_inserts.fetch_add(, std::memory_order_relaxed);
}
} void drop_students(int idNo)
{
while (goOn.load(std::memory_order_relaxed))
{
//auto st = g_students.getInputList();
//while (st)
//{
// auto next = st->next; // printOne(st);
// // use memory_order_relaxed avoiding affect folly memory order
// g_drops.fetch_add(1, std::memory_order_relaxed); // st = next;
//} g_students.sweep(idNo, printOne);
std::this_thread::yield();
}
} int main()
{
std::vector<std::future<void>> insert_threads;
for (int i = ; i != PRODUCE_THREAD_NUM; ++i)
{
insert_threads.push_back(std::async(std::launch::async, insert_students, i));
} std::vector<std::future<void>> drop_threads;
for (int i = ; i != CONSUME_THREAD_NUM; ++i)
{
drop_threads.push_back(std::async(std::launch::async, drop_students, i));
} for (auto& item : insert_threads)
{
item.get();
} goOn.store(std::memory_order_relaxed); for (auto& item : drop_threads)
{
item.get();
} g_students.sweepAll(printOne); std::cout << "insert count1: " << g_inserts.load() << std::endl;
std::cout << "drop count1: " << g_drops.load() << std::endl;
std::cout << "print num1: " << g_printNum.load() << std::endl; std::cout << "age in1: " << g_ageInSum.load() << std::endl;
std::cout << "age out1: " << g_ageOutSum.load() << std::endl; std::cout << std::endl;
}
4. 基于AtomicIntrusiveLinkedList插入操作可以一次插入一个节点,而移出操作则会一次移出多个节点,如果每个消费队列都使用一个AtomicInstructiveLinkedList来存储,只要生产均匀分布到各个消费队列中,应该可以实现比较好的效果。不过,由于生产均匀分布分布到各个消费队列中并不那么容易实现,通过使用随机化之类的方式,可以防止人为导致的不均匀。不过,都不能从根本上解决问题,所以,上述方法只有在比较容易实现生产均匀分布到各个消费队列时,适合采用。
基于folly的AtomicIntrusiveLinkedList无锁队列进行简单封装的多生产多消费模型的更多相关文章
- folly无锁队列,尝试添加新的函数(续)
基于上一篇文章,dropHead取出节点后,删除节点,会出现内存访问的问题.按照这个逻辑,如果将移出的节点保存到一个无锁队列中,然后在需要节点的时候,从这个备用的无锁队列中取出节点,那么应该就可以避开 ...
- folly无锁队列正确性说明
folly无锁队列是facebook开源的一个无所队列,使用的是单向链表,通过compare_exchange语句实现的多生产多消费的队列,我曾经花了比较多的时间学习memory_order的说明,对 ...
- folly无锁队列,尝试添加新的函数
1. folly是facebook开源的关于无锁队列的库,实现过程很精妙.folly向队列中添加节点过程,符合标准库中的队列的设计,而取出节点的过程,则会造成多个线程的分配不均.我曾经试着提供一次 取 ...
- 无锁队列--基于linuxkfifo实现
一直想写一个无锁队列,为了提高项目的背景效率. 有机会看到linux核心kfifo.h 原则. 所以这个实现自己仿照,眼下linux我们应该能够提供外部接口. #ifndef _NO_LOCK_QUE ...
- 基于无锁队列和c++11的高性能线程池
基于无锁队列和c++11的高性能线程池线程使用c++11库和线程池之间的消息通讯使用一个简单的无锁消息队列适用于linux平台,gcc 4.6以上 标签: <无> 代码片段(6)[ ...
- boost 无锁队列
一哥们翻译的boost的无锁队列的官方文档 原文地址:http://blog.csdn.net/great3779/article/details/8765103 Boost_1_53_0终于迎来了久 ...
- 一个可无限伸缩且无ABA问题的无锁队列
关于无锁队列,详细的介绍请参考陈硕先生的<无锁队列的实现>一文.然进一步,如何实现一个不限node数目即能够无限伸缩的无锁队列,即是本文的要旨. 无锁队列有两种实现形式,分别是数组与链表. ...
- DIOCP开源项目-高效稳定的服务端解决方案(DIOCP + 无锁队列 + ZeroMQ + QWorkers) 出炉了
[概述] 自从上次发布了[DIOCP开源项目-利用队列+0MQ+多进程逻辑处理,搭建稳定,高效,分布式的服务端]文章后,得到了很多朋友的支持和肯定.这加大了我的开发动力,经过几个晚上的熬夜,终于在昨天 ...
- 高性能无锁队列 Disruptor 初体验
原文地址: haifeiWu和他朋友们的博客 博客地址:www.hchstudio.cn 欢迎转载,转载请注明作者及出处,谢谢! 最近一直在研究队列的一些问题,今天楼主要分享一个高性能的队列 Disr ...
随机推荐
- Odoo9以后的社区版本和企业版功能上的区别
Odoo9以后的社区版本和企业版除了授权模式的区别外,整理功能上的区别 透过功能设置菜单整理的区别如下,主要功能模块. 未包括所有模块,毕竟模块太多了. 以下是企业版有,而社区版没有的功能.
- Codeforces Div3 #501 A-E(2) F以后补
感觉自己有点强迫症 不都写出来就找理由不写题解 http://codeforces.com/contest/1015 题目链接 A. Points in Segments 题目意思 n个线段 ...
- linux----别名
经常一些命令太长,输入太麻烦,给该命令起个别名,直接执行,简单又方便. 1.查看别名 alias 2.编辑别名 vi ~/.brashrc 3.添加自己的别名 例如:重启网卡 alias netres ...
- SQL将时间格式化为year-month-day
SQL将输出年月日格式化为:2017-12-3-28 CONVERT(varchar(100),RunDate, 23) AS RunDate,
- day39机器学习
2 Numpy快速上手 2.1. 什么是Numpy Numpy是Python的一个科学计算的库 主要提供矩阵运算的功能,而矩阵运算在机器学习领域应用非常广泛 Numpy一般与Scipy.matplot ...
- Windows 2008 R2环境下DHCP服务的安装部署使用
(第一版本) 这个实验好像需要在部署了activity directory服务的基础上的,给个直达链接 http://blog.csdn.net/qq_34829953/article/details ...
- oracle-锁概念
http://liwenshui322.iteye.com/blog/1166934 ORACLE DDL锁介绍 在DDL操作中会自动为对象加DDL锁(DDL Lock),从而保护这些对象不会被其他会 ...
- MySQL 安全整理
MySQL 安全整理 关闭外网的端口访问. 使用高位的端口号. 如果需要外网访问不给最高的权限. 如果需要外网访问也是绑定客户端. To be continued
- MongoDB之 写安全(Write Concern)
MongoDB Write Concern,简称MongoDB写入安全机制,是一种客户端设置,用于控制写入安全的级别.Write Concern 描述了MongoDB写入到mongod单实例,副本集, ...
- vue-cli 2.x 项目优化之:引入本地静态库文件
demo地址:https://github.com/cag2050/vue_cli_optimize_static_resource vue-cli 将静态资源文件放到 static 文件夹下并引用: ...