C++11线程池实现

496 阅读1分钟

    线程池概念和用途略过 


    一个完善的线程池一般包含以下功能 

  • 线程池基本调度功能 
  • 启动 & 关闭线程池  
  • 线程池自动扩容缩容(后续扩展...,可参考美团https://tech.meituan.com/2020/04/02/java-pooling-pratice-in-meituan.html实现) 


简版线程池代码:

//
// Created by daniel on 2020/6/1.
//
#ifndef THREADPOOL_H
#define THREADPOOL_H
#include <vector>
#include <queue>
#include <memory>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <future>
#include <functional>
#include <stdexcept>
#include <atomic>
class ThreadPool {

public:
    ThreadPool(size_t = std::thread::hardware_concurrency());
    void StartWork();
    template<class F, class... Args>
    auto Commit(F&& f, Args&&... args)-> std::future<decltype(f(args...))>;
    template<class F, class... Args>
    bool AsyncCommit(F&& f, Args&&... args);
    ~ThreadPool();
private:
    void ShutDown();
private:
    std::vector< std::thread > _workers;
    typedef std::function<void()> TASK_ITEM;
    std::queue<std::shared_ptr<TASK_ITEM>> _tasks;
    std::mutex _workMutex;
    std::condition_variable _condition;
    std::atomic<bool> _stop;
    size_t _threadCount;
};
ThreadPool::ThreadPool(size_t threads)
        : _stop(true)
        , _threadCount(0)
{
    _threadCount = threads < 1 ? 1 : threads;
}

void ThreadPool::StartWork() {
    if (!std::atomic_exchange(&_stop, false))
    {
        return;
    }

    for(size_t i = 0; i < _threadCount; ++i)
        _workers.emplace_back(
                [&] {
                    for(;;)
                    {
                        std::function<void()> task;
                        {
                            std::unique_lock<std::mutex> lock(_workMutex);
                            _condition.wait(lock,
                                                 [&]{ return _stop.load() || !_tasks.empty(); });
                            if(_stop.load() && _tasks.empty())
                            {
                                std::cout << "exit thread " << std::this_thread::get_id() << std::endl;
                                return;
                            }

                            task = std::move(*_tasks.front());
                            _tasks.pop();
                        }

                        task();
                    }
                }
        );
}

void ThreadPool::ShutDown() {
    if (std::atomic_exchange(&_stop, true))
    {
        return;
    }

    _condition.notify_all();
    for(std::thread &worker: _workers)
        worker.join();
}

template<class F, class... Args>
auto ThreadPool::Commit(F&& f, Args&&... args)
-> std::future<decltype(f(args...))>
{
    using return_type = decltype(f(args...));
    auto task = std::make_shared< std::packaged_task<return_type()> >(
            std::bind(std::forward<F>(f), std::forward<Args>(args)...)
    );
    std::future<return_type> res = task->get_future();
    {
        std::unique_lock<std::mutex> lock(_workMutex);
        assert(!_stop.load());
        _tasks.emplace(std::make_shared<TASK_ITEM>([task](){ (*task)(); }));
    }

    _condition.notify_one();
    return res;
}

template<class F, class... Args>
bool ThreadPool::AsyncCommit(F&& f, Args&&... args)
{
    if (_stop.load())
    {
        return false;
    }

    auto task = std::make_shared<TASK_ITEM>(
            std::bind(std::forward<F>(f), std::forward<Args>(args)...)
    );
    {
        std::unique_lock<std::mutex> lock(_workMutex);
        assert(!_stop.load());
        _tasks.emplace(task);
    }

    _condition.notify_one();
    return true;
}

ThreadPool::~ThreadPool()
{
    ShutDown();
}

#endif //THREADPOOL_H