summaryrefslogtreecommitdiff
path: root/graphics/asymptote/LspCpp/third_party/threadpool/boost/threadpool/detail/pool_core.hpp
blob: a81df877635e36ac7d5038b9d17269a13f410673 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
/*! \file
* \brief Thread pool core.
*
* This file contains the threadpool's core class: pool<Task, SchedulingPolicy>.
*
* Thread pools are a mechanism for asynchronous and parallel processing
* within the same process. The pool class provides a convenient way
* for dispatching asynchronous tasks as functions objects. The scheduling
* of these tasks can be easily controlled by using customized schedulers.
*
* Copyright (c) 2005-2007 Philipp Henkel
*
* Use, modification, and distribution are  subject to the
* Boost Software License, Version 1.0. (See accompanying  file
* LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
*
* http://threadpool.sourceforge.net
*
*/


#ifndef THREADPOOL_POOL_CORE_HPP_INCLUDED
#define THREADPOOL_POOL_CORE_HPP_INCLUDED




#include "locking_ptr.hpp"
#include "worker_thread.hpp"

#include "../task_adaptors.hpp"

#include <boost/thread.hpp>
#include <boost/thread/exceptions.hpp>
#include <boost/thread/mutex.hpp>
#include <boost/thread/condition.hpp>
#include <boost/smart_ptr.hpp>
#include <boost/bind.hpp>
#include <boost/static_assert.hpp>
#include <boost/type_traits.hpp>

#include <vector>


/// The namespace threadpool contains a thread pool and related utility classes.
namespace boost { namespace threadpool { namespace detail
{

  /*! \brief Thread pool.
  *
  * Thread pools are a mechanism for asynchronous and parallel processing
  * within the same process. The pool class provides a convenient way
  * for dispatching asynchronous tasks as functions objects. The scheduling
  * of these tasks can be easily controlled by using customized schedulers.
  * A task must not throw an exception.
  *
  * A pool_impl is DefaultConstructible and NonCopyable.
  *
  * \param Task A function object which implements the operator 'void operator() (void) const'. The operator () is called by the pool to execute the task. Exceptions are ignored.
  * \param Scheduler A task container which determines how tasks are scheduled. It is guaranteed that this container is accessed only by one thread at a time. The scheduler shall not throw exceptions.
  *
  * \remarks The pool class is thread-safe.
  *
  * \see Tasks: task_func, prio_task_func
  * \see Scheduling policies: fifo_scheduler, lifo_scheduler, prio_scheduler
  */
  template <
    typename Task,

    template <typename> class SchedulingPolicy,
    template <typename> class SizePolicy,
    template <typename> class SizePolicyController,
    template <typename> class ShutdownPolicy
  >
  class pool_core
  : public enable_shared_from_this< pool_core<Task, SchedulingPolicy, SizePolicy, SizePolicyController, ShutdownPolicy > >
  , private noncopyable
  {

  public: // Type definitions
    typedef Task task_type;                                 //!< Indicates the task's type.
    typedef SchedulingPolicy<task_type> scheduler_type;     //!< Indicates the scheduler's type.
    typedef pool_core<Task,
                      SchedulingPolicy,
                      SizePolicy,
                      SizePolicyController,
                      ShutdownPolicy > pool_type;           //!< Indicates the thread pool's type.
    typedef SizePolicy<pool_type> size_policy_type;         //!< Indicates the sizer's type.
    //typedef typename size_policy_type::size_controller size_controller_type;

    typedef SizePolicyController<pool_type> size_controller_type;

//    typedef SizePolicy<pool_type>::size_controller size_controller_type;
    typedef ShutdownPolicy<pool_type> shutdown_policy_type;//!< Indicates the shutdown policy's type.

    typedef worker_thread<pool_type> worker_type;

    // The task is required to be a nullary function.
    BOOST_STATIC_ASSERT(function_traits<task_type()>::arity == 0);

    // The task function's result type is required to be void.
    BOOST_STATIC_ASSERT(is_void<typename result_of<task_type()>::type >::value);


  private:  // Friends
    friend class worker_thread<pool_type>;

#if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x580)  // Tested with CC: Sun C++ 5.8 Patch 121018-08 2006/12/06
   friend class SizePolicy;
   friend class ShutdownPolicy;
#else
   friend class SizePolicy<pool_type>;
   friend class ShutdownPolicy<pool_type>;
#endif

  private: // The following members may be accessed by _multiple_ threads at the same time:
    volatile size_t m_worker_count;
    volatile size_t m_target_worker_count;
    volatile size_t m_active_worker_count;



  private: // The following members are accessed only by _one_ thread at the same time:
    scheduler_type  m_scheduler;
    scoped_ptr<size_policy_type> m_size_policy; // is never null

    bool  m_terminate_all_workers;								// Indicates if termination of all workers was triggered.
    std::vector<shared_ptr<worker_type> > m_terminated_workers; // List of workers which are terminated but not fully destructed.

  private: // The following members are implemented thread-safe:
    mutable recursive_mutex  m_monitor;
    mutable condition m_worker_idle_or_terminated_event;	// A worker is idle or was terminated.
    mutable condition m_task_or_terminate_workers_event;  // Task is available OR total worker count should be reduced.

  public:
    /// Constructor.
    pool_core()
      : m_worker_count(0)
      , m_target_worker_count(0)
      , m_active_worker_count(0)
      , m_terminate_all_workers(false)
    {
      pool_type volatile & self_ref = *this;
      m_size_policy.reset(new size_policy_type(self_ref));

      m_scheduler.clear();
    }


    /// Destructor.
    ~pool_core()
    {
    }

    /*! Gets the size controller which manages the number of threads in the pool.
    * \return The size controller.
    * \see SizePolicy
    */
    size_controller_type size_controller()
    {
      return size_controller_type(*m_size_policy, this->shared_from_this());
    }

    /*! Gets the number of threads in the pool.
    * \return The number of threads.
    */
    size_t size()	const volatile
    {
      return m_worker_count;
    }

// TODO is only called once
    void shutdown()
    {
      ShutdownPolicy<pool_type>::shutdown(*this);
    }

    /*! Schedules a task for asynchronous execution. The task will be executed once only.
    * \param task The task function object. It should not throw execeptions.
    * \return true, if the task could be scheduled and false otherwise.
    */
    bool schedule(task_type const & task) volatile
    {
      locking_ptr<pool_type, recursive_mutex> lockedThis(*this, m_monitor);

      if(lockedThis->m_scheduler.push(task))
      {
        lockedThis->m_task_or_terminate_workers_event.notify_one();
        return true;
      }
      else
      {
        return false;
      }
    }


    /*! Returns the number of tasks which are currently executed.
    * \return The number of active tasks.
    */
    size_t active() const volatile
    {
      return m_active_worker_count;
    }


    /*! Returns the number of tasks which are ready for execution.
    * \return The number of pending tasks.
    */
    size_t pending() const volatile
    {
      locking_ptr<const pool_type, recursive_mutex> lockedThis(*this, m_monitor);
      return lockedThis->m_scheduler.size();
    }


    /*! Removes all pending tasks from the pool's scheduler.
    */
    void clear() volatile
    {
      locking_ptr<pool_type, recursive_mutex> lockedThis(*this, m_monitor);
      lockedThis->m_scheduler.clear();
    }


    /*! Indicates that there are no tasks pending.
    * \return true if there are no tasks ready for execution.
    * \remarks This function is more efficient that the check 'pending() == 0'.
    */
    bool empty() const volatile
    {
      locking_ptr<const pool_type, recursive_mutex> lockedThis(*this, m_monitor);
      return lockedThis->m_scheduler.empty();
    }


    /*! The current thread of execution is blocked until the sum of all active
    *  and pending tasks is equal or less than a given threshold.
    * \param task_threshold The maximum number of tasks in pool and scheduler.
    */
    void wait(size_t const task_threshold = 0) const volatile
    {
      const pool_type* self = const_cast<const pool_type*>(this);
      recursive_mutex::scoped_lock lock(self->m_monitor);

      if(0 == task_threshold)
      {
        while(0 != self->m_active_worker_count || !self->m_scheduler.empty())
        {
          self->m_worker_idle_or_terminated_event.wait(lock);
        }
      }
      else
      {
        while(task_threshold < self->m_active_worker_count + self->m_scheduler.size())
        {
          self->m_worker_idle_or_terminated_event.wait(lock);
        }
      }
    }

    /*! The current thread of execution is blocked until the timestamp is met
    * or the sum of all active and pending tasks is equal or less
    * than a given threshold.
    * \param timestamp The time when function returns at the latest.
    * \param task_threshold The maximum number of tasks in pool and scheduler.
    * \return true if the task sum is equal or less than the threshold, false otherwise.
    */
    bool wait(xtime const & timestamp, size_t const task_threshold = 0) const volatile
    {
      const pool_type* self = const_cast<const pool_type*>(this);
      recursive_mutex::scoped_lock lock(self->m_monitor);

      if(0 == task_threshold)
      {
        while(0 != self->m_active_worker_count || !self->m_scheduler.empty())
        {
          if(!self->m_worker_idle_or_terminated_event.timed_wait(lock, timestamp)) return false;
        }
      }
      else
      {
        while(task_threshold < self->m_active_worker_count + self->m_scheduler.size())
        {
          if(!self->m_worker_idle_or_terminated_event.timed_wait(lock, timestamp)) return false;
        }
      }

      return true;
    }


  private:


    void terminate_all_workers(bool const wait) volatile
    {
      pool_type* self = const_cast<pool_type*>(this);
      recursive_mutex::scoped_lock lock(self->m_monitor);

      self->m_terminate_all_workers = true;

      m_target_worker_count = 0;
      self->m_task_or_terminate_workers_event.notify_all();

      if(wait)
      {
        while(m_active_worker_count > 0)
        {
          self->m_worker_idle_or_terminated_event.wait(lock);
        }

        for(typename std::vector<shared_ptr<worker_type> >::iterator it = self->m_terminated_workers.begin();
          it != self->m_terminated_workers.end();
          ++it)
        {
          (*it)->join();
        }
        self->m_terminated_workers.clear();
      }
    }


    /*! Changes the number of worker threads in the pool. The resizing
    *  is handled by the SizePolicy.
    * \param threads The new number of worker threads.
    * \return true, if pool will be resized and false if not.
    */
    bool resize(size_t const worker_count) volatile
    {
      locking_ptr<pool_type, recursive_mutex> lockedThis(*this, m_monitor);

      if(!m_terminate_all_workers)
      {
        m_target_worker_count = worker_count;
      }
      else
      {
        return false;
      }


      if(m_worker_count <= m_target_worker_count)
      { // increase worker count
        while(m_worker_count < m_target_worker_count)
        {
          try
          {
            worker_thread<pool_type>::create_and_attach(lockedThis->shared_from_this());
            m_worker_count++;
            m_active_worker_count++;
          }
          catch(thread_resource_error&)
          {
            return false;
          }
        }
      }
      else
      { // decrease worker count
        lockedThis->m_task_or_terminate_workers_event.notify_all();   // TODO: Optimize number of notified workers
      }

      return true;
    }


    // worker died with unhandled exception
    void worker_died_unexpectedly(shared_ptr<worker_type> worker) volatile
    {
      locking_ptr<pool_type, recursive_mutex> lockedThis(*this, m_monitor);

      m_worker_count--;
      m_active_worker_count--;
      lockedThis->m_worker_idle_or_terminated_event.notify_all();

      if(m_terminate_all_workers)
      {
        lockedThis->m_terminated_workers.push_back(worker);
      }
      else
      {
        lockedThis->m_size_policy->worker_died_unexpectedly(m_worker_count);
      }
    }

    void worker_destructed(shared_ptr<worker_type> worker) volatile
    {
      locking_ptr<pool_type, recursive_mutex> lockedThis(*this, m_monitor);
      m_worker_count--;
      m_active_worker_count--;
      lockedThis->m_worker_idle_or_terminated_event.notify_all();

      if(m_terminate_all_workers)
      {
        lockedThis->m_terminated_workers.push_back(worker);
      }
    }


    bool execute_task() volatile
    {
      function0<void> task;

      { // fetch task
        pool_type* lockedThis = const_cast<pool_type*>(this);
        recursive_mutex::scoped_lock lock(lockedThis->m_monitor);

        // decrease number of threads if necessary
        if(m_worker_count > m_target_worker_count)
        {
          return false;	// terminate worker
        }


        // wait for tasks
        while(lockedThis->m_scheduler.empty())
        {
          // decrease number of workers if necessary
          if(m_worker_count > m_target_worker_count)
          {
            return false;	// terminate worker
          }
          else
          {
            m_active_worker_count--;
            lockedThis->m_worker_idle_or_terminated_event.notify_all();
            lockedThis->m_task_or_terminate_workers_event.wait(lock);
            m_active_worker_count++;
          }
        }

        task = lockedThis->m_scheduler.top();
        lockedThis->m_scheduler.pop();
      }

      // call task function
      if(task)
      {
        task();
      }

      //guard->disable();
      return true;
    }
  };




} } } // namespace boost::threadpool::detail

#endif // THREADPOOL_POOL_CORE_HPP_INCLUDED