LCOV - code coverage report
Current view: top level - lib/support - Pool.h (source / functions) Hit Total Coverage
Test: lcov_final.info Lines: 85 93 91.4 %
Date: 2024-02-15 08:20:41 Functions: 279 377 74.0 %

          Line data    Source code
       1             : /*
       2             :  *    Copyright (c) 2020 Project CHIP Authors
       3             :  *    Copyright (c) 2013 Nest Labs, Inc.
       4             :  *    All rights reserved.
       5             :  *
       6             :  *    Licensed under the Apache License, Version 2.0 (the "License");
       7             :  *    you may not use this file except in compliance with the License.
       8             :  *    You may obtain a copy of the License at
       9             :  *
      10             :  *        http://www.apache.org/licenses/LICENSE-2.0
      11             :  *
      12             :  *    Unless required by applicable law or agreed to in writing, software
      13             :  *    distributed under the License is distributed on an "AS IS" BASIS,
      14             :  *    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      15             :  *    See the License for the specific language governing permissions and
      16             :  *    limitations under the License.
      17             :  */
      18             : 
      19             : /**
      20             :  * Defines memory pool classes.
      21             :  */
      22             : 
      23             : #pragma once
      24             : 
      25             : #include <lib/support/CHIPMem.h>
      26             : #include <lib/support/CodeUtils.h>
      27             : #include <system/SystemConfig.h>
      28             : 
      29             : #include <lib/support/Iterators.h>
      30             : 
      31             : #include <atomic>
      32             : #include <limits>
      33             : #include <new>
      34             : #include <stddef.h>
      35             : #include <utility>
      36             : 
      37             : namespace chip {
      38             : 
      39             : namespace internal {
      40             : 
      41             : class Statistics
      42             : {
      43             : public:
      44         277 :     Statistics() : mAllocated(0), mHighWaterMark(0) {}
      45             : 
      46        3303 :     size_t Allocated() const { return mAllocated; }
      47             :     size_t HighWaterMark() const { return mHighWaterMark; }
      48     3059127 :     void IncreaseUsage()
      49             :     {
      50     3059127 :         if (++mAllocated > mHighWaterMark)
      51             :         {
      52        1349 :             mHighWaterMark = mAllocated;
      53             :         }
      54     3059127 :     }
      55     1382772 :     void DecreaseUsage() { --mAllocated; }
      56             : 
      57             : protected:
      58             :     size_t mAllocated;
      59             :     size_t mHighWaterMark;
      60             : };
      61             : 
      62             : class StaticAllocatorBase : public Statistics
      63             : {
      64             : public:
      65          80 :     StaticAllocatorBase(size_t capacity) : mCapacity(capacity) {}
      66      188048 :     size_t Capacity() const { return mCapacity; }
      67          82 :     bool Exhausted() const { return mAllocated == mCapacity; }
      68             : 
      69             : protected:
      70             :     const size_t mCapacity;
      71             : };
      72             : 
      73             : class StaticAllocatorBitmap : public internal::StaticAllocatorBase
      74             : {
      75             : protected:
      76             :     /**
      77             :      * Use the largest data type supported by `std::atomic`. Putting multiple atomic inside a single cache line won't improve
      78             :      * concurrency, while the use of larger data type can improve the performance by reducing the number of outer loop iterations.
      79             :      */
      80             :     using tBitChunkType                         = unsigned long;
      81             :     static constexpr const tBitChunkType kBit1  = 1; // make sure bitshifts produce the right type
      82             :     static constexpr const size_t kBitChunkSize = std::numeric_limits<tBitChunkType>::digits;
      83             :     static_assert(ATOMIC_LONG_LOCK_FREE, "StaticAllocatorBitmap is not lock free");
      84             : 
      85             : public:
      86             :     StaticAllocatorBitmap(void * storage, std::atomic<tBitChunkType> * usage, size_t capacity, size_t elementSize);
      87             : 
      88             : protected:
      89             :     void * Allocate();
      90             :     void Deallocate(void * element);
      91       60298 :     void * At(size_t index) { return static_cast<uint8_t *>(mElements) + mElementSize * index; }
      92             :     size_t IndexOf(void * element);
      93             : 
      94             :     using Lambda = Loop (*)(void * context, void * object);
      95             :     Loop ForEachActiveObjectInner(void * context, Lambda lambda);
      96           0 :     Loop ForEachActiveObjectInner(void * context, Loop lambda(void * context, const void * object)) const
      97             :     {
      98           0 :         return const_cast<StaticAllocatorBitmap *>(this)->ForEachActiveObjectInner(context, reinterpret_cast<Lambda>(lambda));
      99             :     }
     100             : 
     101             : private:
     102             :     void * mElements;
     103             :     const size_t mElementSize;
     104             :     std::atomic<tBitChunkType> * mUsage;
     105             : };
     106             : 
     107             : template <typename T, typename Function>
     108             : class LambdaProxy
     109             : {
     110             : public:
     111      248862 :     LambdaProxy(Function && function) : mFunction(std::move(function)) {}
     112     3651473 :     static Loop Call(void * context, void * target)
     113             :     {
     114     3651473 :         return static_cast<LambdaProxy *>(context)->mFunction(static_cast<T *>(target));
     115             :     }
     116         172 :     static Loop ConstCall(void * context, const void * target)
     117             :     {
     118         172 :         return static_cast<LambdaProxy *>(context)->mFunction(static_cast<const T *>(target));
     119             :     }
     120             : 
     121             : private:
     122             :     Function mFunction;
     123             : };
     124             : 
     125             : #if CHIP_SYSTEM_CONFIG_POOL_USE_HEAP
     126             : 
     127             : struct HeapObjectListNode
     128             : {
     129     1382639 :     void Remove()
     130             :     {
     131     1382639 :         mNext->mPrev = mPrev;
     132     1382639 :         mPrev->mNext = mNext;
     133     1382639 :     }
     134             : 
     135             :     void * mObject;
     136             :     HeapObjectListNode * mNext;
     137             :     HeapObjectListNode * mPrev;
     138             : };
     139             : 
     140             : struct HeapObjectList : HeapObjectListNode
     141             : {
     142          74 :     HeapObjectList() { mNext = mPrev = this; }
     143             : 
     144     1683022 :     void Append(HeapObjectListNode * node)
     145             :     {
     146     1683022 :         node->mNext  = this;
     147     1683022 :         node->mPrev  = mPrev;
     148     1683022 :         mPrev->mNext = node;
     149     1683022 :         mPrev        = node;
     150     1683022 :     }
     151             : 
     152             :     HeapObjectListNode * FindNode(void * object) const;
     153             : 
     154             :     using Lambda = Loop (*)(void *, void *);
     155             :     Loop ForEachNode(void * context, Lambda lambda);
     156          75 :     Loop ForEachNode(void * context, Loop lambda(void * context, const void * object)) const
     157             :     {
     158          75 :         return const_cast<HeapObjectList *>(this)->ForEachNode(context, reinterpret_cast<Lambda>(lambda));
     159             :     }
     160             : 
     161             :     size_t mIterationDepth         = 0;
     162             :     bool mHaveDeferredNodeRemovals = false;
     163             : };
     164             : 
     165             : #endif // CHIP_SYSTEM_CONFIG_POOL_USE_HEAP
     166             : 
     167             : } // namespace internal
     168             : 
     169             : /**
     170             :  * @class ObjectPool
     171             :  *
     172             :  * Depending on build configuration, ObjectPool is either a fixed-size static pool or a heap-allocated pool.
     173             :  *
     174             :  * @tparam T    Type of element to be allocated.
     175             :  * @tparam N    Number of elements in the pool, in the fixed-size case.
     176             :  *
     177             :  * @fn CreateObject
     178             :  * @memberof ObjectPool
     179             :  *
     180             :  * Create an object from the pool. Forwards its arguments to construct a T.
     181             :  *
     182             :  * @fn ReleaseObject
     183             :  * @memberof ObjectPool
     184             :  * @param object   Pointer to object to release (or return to the pool). Its destructor runs.
     185             :  *
     186             :  * @fn ForEachActiveObject
     187             :  * @memberof ObjectPool
     188             :  * @param visitor   A function that takes a T* and returns Loop::Continue to continue iterating or Loop::Break to stop iterating.
     189             :  * @returns Loop::Break if a visitor call returned Loop::Break, Loop::Finish otherwise.
     190             :  *
     191             :  * Iteration may be nested. ReleaseObject() can be called during iteration, on the current object or any other.
     192             :  * CreateObject() can be called, but it is undefined whether or not a newly created object will be visited.
     193             :  */
     194             : 
     195             : /**
     196             :  * A class template used for allocating objects from a fixed-size static pool.
     197             :  *
     198             :  *  @tparam     T   type of element to be allocated.
     199             :  *  @tparam     N   a positive integer max number of elements the pool provides.
     200             :  */
     201             : template <class T, size_t N>
     202             : class BitMapObjectPool : public internal::StaticAllocatorBitmap
     203             : {
     204             : public:
     205         112 :     BitMapObjectPool() : StaticAllocatorBitmap(mData.mMemory, mUsage, N, sizeof(T)) {}
     206          56 :     ~BitMapObjectPool() { VerifyOrDie(Allocated() == 0); }
     207             : 
     208             :     template <typename... Args>
     209         109 :     T * CreateObject(Args &&... args)
     210             :     {
     211         109 :         T * element = static_cast<T *>(Allocate());
     212         109 :         if (element != nullptr)
     213         109 :             return new (element) T(std::forward<Args>(args)...);
     214           0 :         return nullptr;
     215             :     }
     216             : 
     217         109 :     void ReleaseObject(T * element)
     218             :     {
     219         109 :         if (element == nullptr)
     220           0 :             return;
     221             : 
     222         109 :         element->~T();
     223         109 :         Deallocate(element);
     224             :     }
     225             : 
     226        1602 :     void ReleaseAll() { ForEachActiveObjectInner(this, ReleaseObject); }
     227             : 
     228             :     /**
     229             :      * @brief
     230             :      *   Run a functor for each active object in the pool
     231             :      *
     232             :      *  @param     function A functor of type `Loop (*)(T*)`.
     233             :      *                      Return Loop::Break to break the iteration.
     234             :      *                      The only modification the functor is allowed to make
     235             :      *                      to the pool before returning is releasing the
     236             :      *                      object that was passed to the functor.  Any other
     237             :      *                      desired changes need to be made after iteration
     238             :      *                      completes.
     239             :      *  @return    Loop     Returns Break if some call to the functor returned
     240             :      *                      Break.  Otherwise returns Finish.
     241             :      *
     242             :      * caution
     243             :      *   this function is not thread-safe, make sure all usage of the
     244             :      *   pool is protected by a lock, or else avoid using this function
     245             :      */
     246             :     template <typename Function>
     247        1055 :     Loop ForEachActiveObject(Function && function)
     248             :     {
     249             :         static_assert(std::is_same<Loop, decltype(function(std::declval<T *>()))>::value,
     250             :                       "The function must take T* and return Loop");
     251        1055 :         internal::LambdaProxy<T, Function> proxy(std::forward<Function>(function));
     252        1055 :         return ForEachActiveObjectInner(&proxy, &internal::LambdaProxy<T, Function>::Call);
     253             :     }
     254             :     template <typename Function>
     255           0 :     Loop ForEachActiveObject(Function && function) const
     256             :     {
     257             :         static_assert(std::is_same<Loop, decltype(function(std::declval<const T *>()))>::value,
     258             :                       "The function must take const T* and return Loop");
     259           0 :         internal::LambdaProxy<T, Function> proxy(std::forward<Function>(function));
     260           0 :         return ForEachActiveObjectInner(&proxy, &internal::LambdaProxy<T, Function>::ConstCall);
     261             :     }
     262             : 
     263             : private:
     264         109 :     static Loop ReleaseObject(void * context, void * object)
     265             :     {
     266         109 :         static_cast<BitMapObjectPool *>(context)->ReleaseObject(static_cast<T *>(object));
     267         109 :         return Loop::Continue;
     268             :     }
     269             : 
     270             :     std::atomic<tBitChunkType> mUsage[(N + kBitChunkSize - 1) / kBitChunkSize];
     271             :     union Data
     272             :     {
     273          56 :         Data() {}
     274          56 :         ~Data() {}
     275             :         alignas(alignof(T)) uint8_t mMemory[N * sizeof(T)];
     276             :         T mMemoryViewForDebug[N]; // Just for debugger
     277             :     } mData;
     278             : };
     279             : 
     280             : #if CHIP_SYSTEM_CONFIG_POOL_USE_HEAP
     281             : 
     282             : class HeapObjectPoolExitHandling
     283             : {
     284             : public:
     285             :     // If IgnoreLeaksOnExit is called, some time after all static initializers have
     286             :     // run, HeapObjectPool will not assert that everything in it has been
     287             :     // released if its destructor runs under exit() (i.e. when the application
     288             :     // is quitting anyway).
     289             :     static void IgnoreLeaksOnExit();
     290             : 
     291             : protected:
     292             :     static bool sIgnoringLeaksOnExit;
     293             : 
     294             : private:
     295             :     static void ExitHandler();
     296             :     static bool sExitHandlerRegistered;
     297             : };
     298             : 
     299             : /**
     300             :  * A class template used for allocating objects from the heap.
     301             :  *
     302             :  *  @tparam     T   type to be allocated.
     303             :  */
     304             : template <class T>
     305             : class HeapObjectPool : public internal::Statistics, public HeapObjectPoolExitHandling
     306             : {
     307             : public:
     308         578 :     HeapObjectPool() {}
     309         536 :     ~HeapObjectPool()
     310             :     {
     311             : #ifndef __SANITIZE_ADDRESS__
     312             : #ifdef __clang__
     313             : #if __has_feature(address_sanitizer)
     314             : #define __SANITIZE_ADDRESS__ 1
     315             : #else
     316             : #define __SANITIZE_ADDRESS__ 0
     317             : #endif // __has_feature(address_sanitizer)
     318             : #else
     319             : #define __SANITIZE_ADDRESS__ 0
     320             : #endif // __clang__
     321             : #endif // __SANITIZE_ADDRESS__
     322             : #if __SANITIZE_ADDRESS__
     323             :         // Free all remaining objects so that ASAN can catch specific use-after-free cases.
     324             :         ReleaseAll();
     325             : #else  // __SANITIZE_ADDRESS__
     326         536 :         if (!sIgnoringLeaksOnExit)
     327             :         {
     328             :             // Verify that no live objects remain, to prevent potential use-after-free.
     329         536 :             VerifyOrDie(Allocated() == 0);
     330             :         }
     331             : #endif // __SANITIZE_ADDRESS__
     332         536 :     }
     333             : 
     334             :     template <typename... Args>
     335     4282565 :     T * CreateObject(Args &&... args)
     336             :     {
     337     4282565 :         T * object = Platform::New<T>(std::forward<Args>(args)...);
     338     4282565 :         if (object != nullptr)
     339             :         {
     340     4282565 :             auto node = Platform::New<internal::HeapObjectListNode>();
     341     4282565 :             if (node != nullptr)
     342             :             {
     343     4282565 :                 node->mObject = object;
     344     4282565 :                 mObjects.Append(node);
     345     4282565 :                 IncreaseUsage();
     346     4282565 :                 return object;
     347             :             }
     348             :         }
     349           0 :         return nullptr;
     350             :     }
     351             : 
     352             :     /*
     353             :      * This method exists purely to line up with the static allocator version.
     354             :      * Consequently, return a nonsensically large number to normalize comparison
     355             :      * operations that act on this value.
     356             :      */
     357             :     size_t Capacity() const { return SIZE_MAX; }
     358             : 
     359             :     /*
     360             :      * This method exists purely to line up with the static allocator version. Heap based object pool will never be exhausted.
     361             :      */
     362             :     bool Exhausted() const { return false; }
     363             : 
     364     4282149 :     void ReleaseObject(T * object)
     365             :     {
     366     4282149 :         if (object != nullptr)
     367             :         {
     368     4282149 :             internal::HeapObjectListNode * node = mObjects.FindNode(object);
     369             :             // Releasing an object that is not allocated indicates likely memory
     370             :             // corruption; better to safe-crash than proceed at this point.
     371     4282149 :             VerifyOrDie(node != nullptr);
     372             : 
     373     4282149 :             node->mObject = nullptr;
     374     4282149 :             Platform::Delete(object);
     375             : 
     376             :             // The node needs to be released immediately if we are not in the middle of iteration.
     377             :             // Otherwise cleanup is deferred until all iteration on this pool completes and it's safe to release nodes.
     378     4282149 :             if (mObjects.mIterationDepth == 0)
     379             :             {
     380     4270020 :                 node->Remove();
     381     4270020 :                 Platform::Delete(node);
     382             :             }
     383             :             else
     384             :             {
     385       12129 :                 mObjects.mHaveDeferredNodeRemovals = true;
     386             :             }
     387             : 
     388     4282149 :             DecreaseUsage();
     389             :         }
     390     4282149 :     }
     391             : 
     392        2401 :     void ReleaseAll() { mObjects.ForEachNode(this, ReleaseObject); }
     393             : 
     394             :     /**
     395             :      * @brief
     396             :      *   Run a functor for each active object in the pool
     397             :      *
     398             :      *  @param     function A functor of type `Loop (*)(T*)`.
     399             :      *                      Return Loop::Break to break the iteration.
     400             :      *                      The only modification the functor is allowed to make
     401             :      *                      to the pool before returning is releasing the
     402             :      *                      object that was passed to the functor.  Any other
     403             :      *                      desired changes need to be made after iteration
     404             :      *                      completes.
     405             :      *  @return    Loop     Returns Break if some call to the functor returned
     406             :      *                      Break.  Otherwise returns Finish.
     407             :      */
     408             :     template <typename Function>
     409      247732 :     Loop ForEachActiveObject(Function && function)
     410             :     {
     411             :         static_assert(std::is_same<Loop, decltype(function(std::declval<T *>()))>::value,
     412             :                       "The function must take T* and return Loop");
     413      247732 :         internal::LambdaProxy<T, Function> proxy(std::forward<Function>(function));
     414      247732 :         return mObjects.ForEachNode(&proxy, &internal::LambdaProxy<T, Function>::Call);
     415         365 :     }
     416             :     template <typename Function>
     417          75 :     Loop ForEachActiveObject(Function && function) const
     418             :     {
     419             :         static_assert(std::is_same<Loop, decltype(function(std::declval<const T *>()))>::value,
     420             :                       "The function must take const T* and return Loop");
     421          75 :         internal::LambdaProxy<const T, Function> proxy(std::forward<Function>(function));
     422          75 :         return mObjects.ForEachNode(&proxy, &internal::LambdaProxy<const T, Function>::ConstCall);
     423             :     }
     424             : 
     425             : private:
     426          66 :     static Loop ReleaseObject(void * context, void * object)
     427             :     {
     428          66 :         static_cast<HeapObjectPool *>(context)->ReleaseObject(static_cast<T *>(object));
     429          66 :         return Loop::Continue;
     430             :     }
     431             : 
     432             :     internal::HeapObjectList mObjects;
     433             : };
     434             : 
     435             : #endif // CHIP_SYSTEM_CONFIG_POOL_USE_HEAP
     436             : 
     437             : /**
     438             :  * Specify ObjectPool storage allocation.
     439             :  */
     440             : enum class ObjectPoolMem
     441             : {
     442             :     /**
     443             :      * Use storage inside the containing scope for both objects and pool management state.
     444             :      */
     445             :     kInline,
     446             : #if CHIP_SYSTEM_CONFIG_POOL_USE_HEAP
     447             :     /**
     448             :      * Allocate objects from the heap, with only pool management state in the containing scope.
     449             :      *
     450             :      * For this case, the ObjectPool size parameter is ignored.
     451             :      */
     452             :     kHeap,
     453             :     kDefault = kHeap
     454             : #else  // CHIP_SYSTEM_CONFIG_POOL_USE_HEAP
     455             :     kDefault = kInline
     456             : #endif // CHIP_SYSTEM_CONFIG_POOL_USE_HEAP
     457             : };
     458             : 
     459             : template <typename T, size_t N, ObjectPoolMem P = ObjectPoolMem::kDefault>
     460             : class ObjectPool;
     461             : 
     462             : template <typename T, size_t N>
     463             : class ObjectPool<T, N, ObjectPoolMem::kInline> : public BitMapObjectPool<T, N>
     464             : {
     465             : };
     466             : 
     467             : #if CHIP_SYSTEM_CONFIG_POOL_USE_HEAP
     468             : template <typename T, size_t N>
     469             : class ObjectPool<T, N, ObjectPoolMem::kHeap> : public HeapObjectPool<T>
     470             : {
     471             : };
     472             : #endif // CHIP_SYSTEM_CONFIG_POOL_USE_HEAP
     473             : 
     474             : } // namespace chip

Generated by: LCOV version 1.14