Line data Source code
1 : /*
2 : * Copyright (c) 2020 Project CHIP Authors
3 : * Copyright (c) 2013 Nest Labs, Inc.
4 : * All rights reserved.
5 : *
6 : * Licensed under the Apache License, Version 2.0 (the "License");
7 : * you may not use this file except in compliance with the License.
8 : * You may obtain a copy of the License at
9 : *
10 : * http://www.apache.org/licenses/LICENSE-2.0
11 : *
12 : * Unless required by applicable law or agreed to in writing, software
13 : * distributed under the License is distributed on an "AS IS" BASIS,
14 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 : * See the License for the specific language governing permissions and
16 : * limitations under the License.
17 : */
18 :
19 : /**
20 : * Defines memory pool classes.
21 : */
22 :
23 : #pragma once
24 :
25 : #include <lib/support/CHIPMem.h>
26 : #include <lib/support/CodeUtils.h>
27 : #include <lib/support/ObjectDump.h>
28 : #include <system/SystemConfig.h>
29 :
30 : #include <lib/support/Iterators.h>
31 :
32 : #include <atomic>
33 : #include <limits>
34 : #include <new>
35 : #include <stddef.h>
36 : #include <utility>
37 :
38 : namespace chip {
39 :
40 : template <class T>
41 : class BitmapActiveObjectIterator;
42 :
43 : namespace internal {
44 :
45 : class Statistics
46 : {
47 : public:
48 734 : Statistics() : mAllocated(0), mHighWaterMark(0) {}
49 :
50 133703 : size_t Allocated() const { return mAllocated; }
51 : size_t HighWaterMark() const { return mHighWaterMark; }
52 5658444 : void IncreaseUsage()
53 : {
54 5658444 : if (++mAllocated > mHighWaterMark)
55 : {
56 2016 : mHighWaterMark = mAllocated;
57 : }
58 5658444 : }
59 2547899 : void DecreaseUsage() { --mAllocated; }
60 :
61 : protected:
62 : size_t mAllocated;
63 : size_t mHighWaterMark;
64 : };
65 :
66 : class StaticAllocatorBase : public Statistics
67 : {
68 : public:
69 156 : StaticAllocatorBase(size_t capacity) : mCapacity(capacity) {}
70 30079 : size_t Capacity() const { return mCapacity; }
71 82 : bool Exhausted() const { return mAllocated == mCapacity; }
72 :
73 : protected:
74 : const size_t mCapacity;
75 : };
76 :
77 : class StaticAllocatorBitmap : public internal::StaticAllocatorBase
78 : {
79 : protected:
80 : /**
81 : * Use the largest data type supported by `std::atomic`. Putting multiple atomic inside a single cache line won't improve
82 : * concurrency, while the use of larger data type can improve the performance by reducing the number of outer loop iterations.
83 : */
84 : using tBitChunkType = unsigned long;
85 : static constexpr const tBitChunkType kBit1 = 1; // make sure bitshifts produce the right type
86 : static constexpr const size_t kBitChunkSize = std::numeric_limits<tBitChunkType>::digits;
87 : static_assert(ATOMIC_LONG_LOCK_FREE, "StaticAllocatorBitmap is not lock free");
88 :
89 : public:
90 : StaticAllocatorBitmap(void * storage, std::atomic<tBitChunkType> * usage, size_t capacity, size_t elementSize);
91 :
92 : protected:
93 : void * Allocate();
94 : void Deallocate(void * element);
95 1809 : void * At(size_t index) { return static_cast<uint8_t *>(mElements) + mElementSize * index; }
96 : size_t IndexOf(void * element);
97 :
98 : /// Returns the first index that is active (i.e. allocated data).
99 : ///
100 : /// If nothing is active, this will return mCapacity
101 : size_t FirstActiveIndex();
102 :
103 : /// Returns the next active index after `start`.
104 : ///
105 : /// If nothing else active/allocated, returns mCapacity
106 : size_t NextActiveIndexAfter(size_t start);
107 :
108 : using Lambda = Loop (*)(void * context, void * object);
109 : Loop ForEachActiveObjectInner(void * context, Lambda lambda);
110 0 : Loop ForEachActiveObjectInner(void * context, Loop lambda(void * context, const void * object)) const
111 : {
112 0 : return const_cast<StaticAllocatorBitmap *>(this)->ForEachActiveObjectInner(context, reinterpret_cast<Lambda>(lambda));
113 : }
114 :
115 : private:
116 : void * mElements;
117 : const size_t mElementSize;
118 : std::atomic<tBitChunkType> * mUsage;
119 :
120 : /// allow accessing direct At() calls
121 : template <class T>
122 : friend class ::chip::BitmapActiveObjectIterator;
123 : };
124 :
125 : template <typename T, typename Function>
126 : class LambdaProxy
127 : {
128 : public:
129 233591 : LambdaProxy(Function && function) : mFunction(std::move(function)) {}
130 3637085 : static Loop Call(void * context, void * target)
131 : {
132 3637085 : return static_cast<LambdaProxy *>(context)->mFunction(static_cast<T *>(target));
133 : }
134 172 : static Loop ConstCall(void * context, const void * target)
135 : {
136 172 : return static_cast<LambdaProxy *>(context)->mFunction(static_cast<const T *>(target));
137 : }
138 :
139 : private:
140 : Function mFunction;
141 : };
142 :
143 : #if CHIP_SYSTEM_CONFIG_POOL_USE_HEAP
144 :
145 : struct HeapObjectListNode
146 : {
147 2547886 : void Remove()
148 : {
149 2547886 : mNext->mPrev = mPrev;
150 2547886 : mPrev->mNext = mNext;
151 2547886 : }
152 :
153 : void * mObject = nullptr;
154 : HeapObjectListNode * mNext = nullptr;
155 : HeapObjectListNode * mPrev = nullptr;
156 : };
157 :
158 : struct HeapObjectList : HeapObjectListNode
159 : {
160 458 : HeapObjectList() { mNext = mPrev = this; }
161 :
162 5658104 : void Append(HeapObjectListNode * node)
163 : {
164 5658104 : node->mNext = this;
165 5658104 : node->mPrev = mPrev;
166 5658104 : mPrev->mNext = node;
167 5658104 : mPrev = node;
168 5658104 : }
169 :
170 : HeapObjectListNode * FindNode(void * object) const;
171 :
172 : using Lambda = Loop (*)(void *, void *);
173 : Loop ForEachNode(void * context, Lambda lambda);
174 0 : Loop ForEachNode(void * context, Loop lambda(void * context, const void * object)) const
175 : {
176 0 : return const_cast<HeapObjectList *>(this)->ForEachNode(context, reinterpret_cast<Lambda>(lambda));
177 : }
178 :
179 : /// Cleans up any deferred releases IFF iteration depth is 0
180 : void CleanupDeferredReleases();
181 :
182 : size_t mIterationDepth = 0;
183 : bool mHaveDeferredNodeRemovals = false;
184 : };
185 :
186 : #endif // CHIP_SYSTEM_CONFIG_POOL_USE_HEAP
187 :
188 : } // namespace internal
189 :
190 : /// Provides iteration over active objects in a Bitmap pool.
191 : ///
192 : /// Creating and releasing items within a pool does not invalidate
193 : /// an iterator, however there are no guarantees which objects the
194 : /// iterator will return (i.e. newly created objects while iterating
195 : /// may be visible or not to the iterator depending where they are
196 : /// allocated).
197 : ///
198 : /// You are not prevented from releasing the object the iterator
199 : /// currently points at. In that case, iterator should be advanced.
200 : template <class T>
201 : class BitmapActiveObjectIterator
202 : {
203 : public:
204 : using value_type = T;
205 : using pointer = T *;
206 : using reference = T &;
207 :
208 : explicit BitmapActiveObjectIterator(internal::StaticAllocatorBitmap * pool, size_t idx) : mPool(pool), mIndex(idx) {}
209 : BitmapActiveObjectIterator() {}
210 :
211 : bool operator==(const BitmapActiveObjectIterator & other) const
212 : {
213 : return (AtEnd() && other.AtEnd()) || ((mPool == other.mPool) && (mIndex == other.mIndex));
214 : }
215 : bool operator!=(const BitmapActiveObjectIterator & other) const { return !(*this == other); }
216 : BitmapActiveObjectIterator & operator++()
217 : {
218 : mIndex = mPool->NextActiveIndexAfter(mIndex);
219 : return *this;
220 : }
221 : T * operator*() const { return static_cast<T *>(mPool->At(mIndex)); }
222 :
223 : private:
224 : bool AtEnd() const { return (mPool == nullptr) || (mIndex >= mPool->Capacity()); }
225 :
226 : internal::StaticAllocatorBitmap * mPool = nullptr; // pool that this belongs to
227 : size_t mIndex = std::numeric_limits<size_t>::max();
228 : };
229 :
230 : /**
231 : * @class ObjectPool
232 : *
233 : * Depending on build configuration, ObjectPool is either a fixed-size static pool or a heap-allocated pool.
234 : *
235 : * @tparam T Type of element to be allocated.
236 : * @tparam N Number of elements in the pool, in the fixed-size case.
237 : *
238 : * @fn CreateObject
239 : * @memberof ObjectPool
240 : *
241 : * Create an object from the pool. Forwards its arguments to construct a T.
242 : *
243 : * @fn ReleaseObject
244 : * @memberof ObjectPool
245 : * @param object Pointer to object to release (or return to the pool). Its destructor runs.
246 : *
247 : * @fn ForEachActiveObject
248 : * @memberof ObjectPool
249 : * @param visitor A function that takes a T* and returns Loop::Continue to continue iterating or Loop::Break to stop iterating.
250 : * @returns Loop::Break if a visitor call returned Loop::Break, Loop::Finish otherwise.
251 : *
252 : * Iteration may be nested. ReleaseObject() can be called during iteration, on the current object or any other.
253 : * CreateObject() can be called, but it is undefined whether or not a newly created object will be visited.
254 : */
255 :
256 : /**
257 : * A class template used for allocating objects from a fixed-size static pool.
258 : *
259 : * @tparam T type of element to be allocated.
260 : * @tparam N a positive integer max number of elements the pool provides.
261 : */
262 : template <class T, size_t N>
263 : class BitMapObjectPool : public internal::StaticAllocatorBitmap
264 : {
265 : public:
266 250 : BitMapObjectPool() : StaticAllocatorBitmap(mData.mMemory, mUsage, N, sizeof(T)) {}
267 125 : ~BitMapObjectPool() { VerifyOrDieWithObject(Allocated() == 0, this); }
268 :
269 : BitmapActiveObjectIterator<T> begin() { return BitmapActiveObjectIterator<T>(this, FirstActiveIndex()); }
270 : BitmapActiveObjectIterator<T> end() { return BitmapActiveObjectIterator<T>(this, N); }
271 :
272 : template <typename... Args>
273 103 : T * CreateObject(Args &&... args)
274 : {
275 103 : T * element = static_cast<T *>(Allocate());
276 103 : if (element != nullptr)
277 103 : return new (element) T(std::forward<Args>(args)...);
278 0 : return nullptr;
279 : }
280 :
281 103 : void ReleaseObject(T * element)
282 : {
283 103 : if (element == nullptr)
284 0 : return;
285 :
286 103 : element->~T();
287 103 : Deallocate(element);
288 : }
289 :
290 1540 : void ReleaseAll() { ForEachActiveObjectInner(this, ReleaseObject); }
291 :
292 : /**
293 : * @brief
294 : * Run a functor for each active object in the pool
295 : *
296 : * @param function A functor of type `Loop (*)(T*)`.
297 : * Return Loop::Break to break the iteration.
298 : * The only modification the functor is allowed to make
299 : * to the pool before returning is releasing the
300 : * object that was passed to the functor. Any other
301 : * desired changes need to be made after iteration
302 : * completes.
303 : * @return Loop Returns Break if some call to the functor returned
304 : * Break. Otherwise returns Finish.
305 : *
306 : * caution
307 : * this function is not thread-safe, make sure all usage of the
308 : * pool is protected by a lock, or else avoid using this function
309 : */
310 : template <typename Function>
311 1049 : Loop ForEachActiveObject(Function && function)
312 : {
313 : static_assert(std::is_same<Loop, decltype(function(std::declval<T *>()))>::value,
314 : "The function must take T* and return Loop");
315 1049 : internal::LambdaProxy<T, Function> proxy(std::forward<Function>(function));
316 1049 : return ForEachActiveObjectInner(&proxy, &internal::LambdaProxy<T, Function>::Call);
317 : }
318 : template <typename Function>
319 0 : Loop ForEachActiveObject(Function && function) const
320 : {
321 : static_assert(std::is_same<Loop, decltype(function(std::declval<const T *>()))>::value,
322 : "The function must take const T* and return Loop");
323 0 : internal::LambdaProxy<T, Function> proxy(std::forward<Function>(function));
324 0 : return ForEachActiveObjectInner(&proxy, &internal::LambdaProxy<T, Function>::ConstCall);
325 : }
326 :
327 0 : void DumpToLog() const
328 : {
329 0 : ChipLogError(Support, "BitMapObjectPool: %lu allocated", static_cast<unsigned long>(Allocated()));
330 : if constexpr (IsDumpable<T>::value)
331 : {
332 : ForEachActiveObject([](const T * object) {
333 : object->DumpToLog();
334 : return Loop::Continue;
335 : });
336 : }
337 0 : }
338 :
339 : private:
340 103 : static Loop ReleaseObject(void * context, void * object)
341 : {
342 103 : static_cast<BitMapObjectPool *>(context)->ReleaseObject(static_cast<T *>(object));
343 103 : return Loop::Continue;
344 : }
345 :
346 : std::atomic<tBitChunkType> mUsage[(N + kBitChunkSize - 1) / kBitChunkSize];
347 : union Data
348 : {
349 125 : Data() {}
350 125 : ~Data() {}
351 : alignas(alignof(T)) uint8_t mMemory[N * sizeof(T)];
352 : T mMemoryViewForDebug[N]; // Just for debugger
353 : } mData;
354 : };
355 :
356 : #if CHIP_SYSTEM_CONFIG_POOL_USE_HEAP
357 :
358 : class HeapObjectPoolExitHandling
359 : {
360 : public:
361 : // If IgnoreLeaksOnExit is called, some time after all static initializers have
362 : // run, HeapObjectPool will not assert that everything in it has been
363 : // released if its destructor runs under exit() (i.e. when the application
364 : // is quitting anyway).
365 : static void IgnoreLeaksOnExit();
366 :
367 : protected:
368 : static bool sIgnoringLeaksOnExit;
369 :
370 : private:
371 : static void ExitHandler();
372 : static bool sExitHandlerRegistered;
373 : };
374 :
375 : /**
376 : * A class template used for allocating objects from the heap.
377 : *
378 : * @tparam T type to be allocated.
379 : */
380 : template <class T>
381 : class HeapObjectPool : public internal::Statistics, public HeapObjectPoolExitHandling
382 : {
383 : public:
384 2048 : HeapObjectPool() {}
385 1618 : ~HeapObjectPool()
386 : {
387 : #ifndef __SANITIZE_ADDRESS__
388 : #ifdef __clang__
389 : #if __has_feature(address_sanitizer)
390 : #define __SANITIZE_ADDRESS__ 1
391 : #else
392 : #define __SANITIZE_ADDRESS__ 0
393 : #endif // __has_feature(address_sanitizer)
394 : #else
395 : #define __SANITIZE_ADDRESS__ 0
396 : #endif // __clang__
397 : #endif // __SANITIZE_ADDRESS__
398 : #if __SANITIZE_ADDRESS__
399 : // Free all remaining objects so that ASAN can catch specific use-after-free cases.
400 : ReleaseAll();
401 : #else // __SANITIZE_ADDRESS__
402 1618 : if (!sIgnoringLeaksOnExit)
403 : {
404 : // Verify that no live objects remain, to prevent potential use-after-free.
405 1618 : VerifyOrDieWithObject(Allocated() == 0, this);
406 : }
407 : #endif // __SANITIZE_ADDRESS__
408 1618 : }
409 :
410 : /// Provides iteration over active objects in the pool.
411 : ///
412 : /// NOTE: There is extra logic to allow objects release WHILE the iterator is
413 : /// active while still allowing to advance the iterator.
414 : /// This is done by flagging an iteration depth whenever an active
415 : /// iterator exists. This also means that while a pool iterator exists, releasing
416 : /// of tracking memory objects may be deferred until the last active iterator is
417 : /// released.
418 : class ActiveObjectIterator
419 : {
420 : public:
421 : using value_type = T;
422 : using pointer = T *;
423 : using reference = T &;
424 :
425 : ActiveObjectIterator() {}
426 : ActiveObjectIterator(const ActiveObjectIterator & other) : mCurrent(other.mCurrent), mEnd(other.mEnd)
427 : {
428 : if (mEnd != nullptr)
429 : {
430 : // Iteration depth is used to support `Release` while an iterator is active.
431 : //
432 : // Code was historically using this functionality, so we support it here
433 : // as well: while iteration is active, iteration depth is > 0. When it
434 : // goes to 0, then any deferred `Release()` calls are executed.
435 : mEnd->mIterationDepth++;
436 : }
437 : }
438 :
439 : ActiveObjectIterator & operator=(const ActiveObjectIterator & other)
440 : {
441 : if (mEnd != nullptr)
442 : {
443 : mEnd->mIterationDepth--;
444 : mEnd->CleanupDeferredReleases();
445 : }
446 : mCurrent = other.mCurrent;
447 : mEnd = other.mEnd;
448 : mEnd->mIterationDepth++;
449 : }
450 :
451 984 : ~ActiveObjectIterator()
452 : {
453 984 : if (mEnd != nullptr)
454 : {
455 984 : mEnd->mIterationDepth--;
456 984 : mEnd->CleanupDeferredReleases();
457 : }
458 984 : }
459 :
460 492 : bool operator==(const ActiveObjectIterator & other) const
461 : {
462 : // extra current/end compare is to have all "end iterators"
463 : // compare as equal (in particular default active object iterator is the end
464 : // of an iterator)
465 492 : return (mCurrent == other.mCurrent) || ((mCurrent == mEnd) && (other.mCurrent == other.mEnd));
466 : }
467 : bool operator!=(const ActiveObjectIterator & other) const { return !(*this == other); }
468 : ActiveObjectIterator & operator++()
469 : {
470 : do
471 : {
472 : mCurrent = mCurrent->mNext;
473 : } while ((mCurrent != mEnd) && (mCurrent->mObject == nullptr));
474 : return *this;
475 : }
476 : T * operator*() const { return static_cast<T *>(mCurrent->mObject); }
477 :
478 : protected:
479 : friend class HeapObjectPool<T>;
480 :
481 984 : explicit ActiveObjectIterator(internal::HeapObjectListNode * current, internal::HeapObjectList * end) :
482 984 : mCurrent(current), mEnd(end)
483 : {
484 984 : mEnd->mIterationDepth++;
485 984 : }
486 :
487 : private:
488 : internal::HeapObjectListNode * mCurrent = nullptr;
489 : internal::HeapObjectList * mEnd = nullptr;
490 : };
491 :
492 492 : ActiveObjectIterator begin() { return ActiveObjectIterator(mObjects.mNext, &mObjects); }
493 492 : ActiveObjectIterator end() { return ActiveObjectIterator(&mObjects, &mObjects); }
494 :
495 : template <typename... Args>
496 6795579 : T * CreateObject(Args &&... args)
497 : {
498 6795579 : T * object = Platform::New<T>(std::forward<Args>(args)...);
499 6795579 : if (object != nullptr)
500 : {
501 6795579 : auto node = Platform::New<internal::HeapObjectListNode>();
502 6795579 : if (node != nullptr)
503 : {
504 6795579 : node->mObject = object;
505 6795579 : mObjects.Append(node);
506 6795579 : IncreaseUsage();
507 6795579 : return object;
508 : }
509 : }
510 0 : return nullptr;
511 : }
512 :
513 : /*
514 : * This method exists purely to line up with the static allocator version.
515 : * Consequently, return a nonsensically large number to normalize comparison
516 : * operations that act on this value.
517 : */
518 : size_t Capacity() const { return SIZE_MAX; }
519 :
520 : /*
521 : * This method exists purely to line up with the static allocator version. Heap based object pool will never be exhausted.
522 : */
523 : bool Exhausted() const { return false; }
524 :
525 6795094 : void ReleaseObject(T * object)
526 : {
527 6795094 : if (object != nullptr)
528 : {
529 6795094 : internal::HeapObjectListNode * node = mObjects.FindNode(object);
530 : // Releasing an object that is not allocated indicates likely memory
531 : // corruption; better to safe-crash than proceed at this point.
532 6795094 : VerifyOrDie(node != nullptr);
533 :
534 6795094 : node->mObject = nullptr;
535 6795094 : Platform::Delete(object);
536 :
537 : // The node needs to be released immediately if we are not in the middle of iteration.
538 : // Otherwise cleanup is deferred until all iteration on this pool completes and it's safe to release nodes.
539 6795094 : if (mObjects.mIterationDepth == 0)
540 : {
541 6782499 : node->Remove();
542 6782499 : Platform::Delete(node);
543 : }
544 : else
545 : {
546 12595 : mObjects.mHaveDeferredNodeRemovals = true;
547 : }
548 :
549 6795094 : DecreaseUsage();
550 : }
551 6795094 : }
552 :
553 2244 : void ReleaseAll() { mObjects.ForEachNode(this, ReleaseObject); }
554 :
555 : /**
556 : * @brief
557 : * Run a functor for each active object in the pool
558 : *
559 : * @param function A functor of type `Loop (*)(T*)`.
560 : * Return Loop::Break to break the iteration.
561 : * The only modification the functor is allowed to make
562 : * to the pool before returning is releasing the
563 : * object that was passed to the functor. Any other
564 : * desired changes need to be made after iteration
565 : * completes.
566 : * @return Loop Returns Break if some call to the functor returned
567 : * Break. Otherwise returns Finish.
568 : */
569 : template <typename Function>
570 232467 : Loop ForEachActiveObject(Function && function)
571 : {
572 : static_assert(std::is_same<Loop, decltype(function(std::declval<T *>()))>::value,
573 : "The function must take T* and return Loop");
574 232467 : internal::LambdaProxy<T, Function> proxy(std::forward<Function>(function));
575 232467 : return mObjects.ForEachNode(&proxy, &internal::LambdaProxy<T, Function>::Call);
576 283 : }
577 : template <typename Function>
578 75 : Loop ForEachActiveObject(Function && function) const
579 : {
580 : static_assert(std::is_same<Loop, decltype(function(std::declval<const T *>()))>::value,
581 : "The function must take const T* and return Loop");
582 75 : internal::LambdaProxy<const T, Function> proxy(std::forward<Function>(function));
583 75 : return mObjects.ForEachNode(&proxy, &internal::LambdaProxy<const T, Function>::ConstCall);
584 : }
585 :
586 0 : void DumpToLog() const
587 : {
588 0 : ChipLogError(Support, "HeapObjectPool: %lu allocated", static_cast<unsigned long>(Allocated()));
589 : if constexpr (IsDumpable<T>::value)
590 : {
591 0 : ForEachActiveObject([](const T * object) {
592 0 : object->DumpToLog();
593 0 : return Loop::Continue;
594 : });
595 : }
596 0 : }
597 :
598 : private:
599 82 : static Loop ReleaseObject(void * context, void * object)
600 : {
601 82 : static_cast<HeapObjectPool *>(context)->ReleaseObject(static_cast<T *>(object));
602 82 : return Loop::Continue;
603 : }
604 :
605 : internal::HeapObjectList mObjects;
606 : };
607 :
608 : #endif // CHIP_SYSTEM_CONFIG_POOL_USE_HEAP
609 :
610 : /**
611 : * Specify ObjectPool storage allocation.
612 : */
613 : enum class ObjectPoolMem
614 : {
615 : /**
616 : * Use storage inside the containing scope for both objects and pool management state.
617 : */
618 : kInline,
619 : #if CHIP_SYSTEM_CONFIG_POOL_USE_HEAP
620 : /**
621 : * Allocate objects from the heap, with only pool management state in the containing scope.
622 : *
623 : * For this case, the ObjectPool size parameter is ignored.
624 : */
625 : kHeap,
626 : kDefault = kHeap
627 : #else // CHIP_SYSTEM_CONFIG_POOL_USE_HEAP
628 : kDefault = kInline
629 : #endif // CHIP_SYSTEM_CONFIG_POOL_USE_HEAP
630 : };
631 :
632 : template <typename T, ObjectPoolMem P = ObjectPoolMem::kDefault>
633 : struct ObjectPoolIterator;
634 :
635 : template <typename T>
636 : struct ObjectPoolIterator<T, ObjectPoolMem::kInline>
637 : {
638 : using Type = BitmapActiveObjectIterator<T>;
639 : };
640 :
641 : template <typename T, size_t N, ObjectPoolMem P = ObjectPoolMem::kDefault>
642 : class ObjectPool;
643 :
644 : template <typename T, size_t N>
645 : class ObjectPool<T, N, ObjectPoolMem::kInline> : public BitMapObjectPool<T, N>
646 : {
647 : };
648 :
649 : #if CHIP_SYSTEM_CONFIG_POOL_USE_HEAP
650 :
651 : template <typename T>
652 : struct ObjectPoolIterator<T, ObjectPoolMem::kHeap>
653 : {
654 : using Type = typename HeapObjectPool<T>::ActiveObjectIterator;
655 : };
656 :
657 : template <typename T, size_t N>
658 : class ObjectPool<T, N, ObjectPoolMem::kHeap> : public HeapObjectPool<T>
659 : {
660 : };
661 : #endif // CHIP_SYSTEM_CONFIG_POOL_USE_HEAP
662 :
663 : } // namespace chip
|