|
| 1 | +// Copyright (C) 2020-2026 - DevSH Graphics Programming Sp. z O.O. |
| 2 | +// This file is part of the "Nabla Engine". |
| 3 | +// For conditions of distribution and use, see copyright notice in nabla.h |
1 | 4 | #ifndef _NBL_CORE_C_MEMORY_POOL_H_INCLUDED_ |
2 | 5 | #define _NBL_CORE_C_MEMORY_POOL_H_INCLUDED_ |
3 | 6 |
|
4 | 7 |
|
5 | 8 | #include "nbl/core/decl/compile_config.h" |
6 | | -#include "nbl/core/alloc/SimpleBlockBasedAllocator.h" |
7 | 9 | #include "nbl/core/decl/BaseClasses.h" |
| 10 | +#include "nbl/core/alloc/SimpleBlockBasedAllocator.h" |
8 | 11 |
|
9 | 12 | #include <memory> |
10 | 13 | #include <type_traits> |
|
13 | 16 | namespace nbl::core |
14 | 17 | { |
15 | 18 |
|
16 | | -// TODO: change DataAllocator to PMR |
17 | | -template <class AddressAllocator, template<class> class DataAllocator, bool isThreadSafe, typename... Args> |
18 | | -class CMemoryPool : public Uncopyable |
| 19 | +template<typename C> |
| 20 | +concept MemoryPoolConfig = requires |
19 | 21 | { |
20 | | -public: |
21 | | - using addr_allocator_type = AddressAllocator; |
22 | | - using allocator_type = typename std::conditional<isThreadSafe, |
23 | | - SimpleBlockBasedAllocatorMT<AddressAllocator,DataAllocator, std::recursive_mutex, Args...>, |
24 | | - SimpleBlockBasedAllocatorST<AddressAllocator,DataAllocator, Args...>>::type; |
25 | | - using size_type = typename core::address_allocator_traits<addr_allocator_type>::size_type; |
26 | | - using addr_type = size_type; |
| 22 | +// {C::ThreadSafe} -> std::same_as<bool>; // TODO: how to do it |
| 23 | + typename C::AddressAllocator; // TODO: check its an Address Allocator |
| 24 | +}; |
27 | 25 |
|
28 | | - CMemoryPool(size_type _blockSize, size_type _minBlockCount, size_type _maxBlockCount, Args... args) : // intentionally no && here, i dont wont to do here anything like reference collapsing, `Args` come from class template |
29 | | - m_alctr(_blockSize,_minBlockCount,_maxBlockCount,std::forward<Args>(args)...) |
30 | | - { |
31 | | - } |
32 | | - |
33 | | - void* allocate(size_type s, size_type a) |
34 | | - { |
35 | | - return m_alctr.allocate(s, a); |
36 | | - } |
37 | | - void deallocate(void* _ptr, size_type s) |
38 | | - { |
39 | | - m_alctr.deallocate(_ptr, s); |
40 | | - } |
| 26 | +template<MemoryPoolConfig Config> |
| 27 | +class CMemoryPool final : public Uncopyable |
| 28 | +{ |
| 29 | + using block_allocator_st_type = SimpleBlockBasedAllocatorST<typename Config::AddressAllocator>; |
| 30 | + public: |
| 31 | + using addr_allocator_type = Config::AddressAllocator; |
| 32 | + using size_type = typename core::address_allocator_traits<addr_allocator_type>::size_type; |
41 | 33 |
|
42 | | - template <typename T, typename... FuncArgs> |
43 | | - T* emplace_n(uint32_t n, FuncArgs&&... args) |
44 | | - { |
45 | | - size_type s = static_cast<size_type>(n) * sizeof(T); |
46 | | - size_type a = alignof(T); |
47 | | - void* ptr = allocate(s, a); |
48 | | - if (!ptr) |
49 | | - return nullptr; |
| 34 | + using block_allocator_type = std::conditional_t<Config::ThreadSafe,SimpleBlockBasedAllocatorMT<block_allocator_st_type,std::recursive_mutex>,block_allocator_st_type>; |
| 35 | +// TODO: not appropriate |
| 36 | +// using addr_type = size_type; |
50 | 37 |
|
51 | | - using traits_t = std::allocator_traits<DataAllocator<T>>; |
52 | | - DataAllocator<T> data_alctr; |
53 | | - if constexpr (sizeof...(FuncArgs)!=0u || !std::is_trivial_v<T>) |
| 38 | + inline CMemoryPool(block_allocator_st_type::SCreationParams&& params) : m_block_alctr(std::move(params)) {} |
| 39 | + |
| 40 | + // |
| 41 | + inline void* allocate(const size_type s, const size_type a) |
| 42 | + { |
| 43 | + return m_block_alctr.allocate(s,a); |
| 44 | + } |
| 45 | + inline void deallocate(void* _ptr, const size_type s) |
54 | 46 | { |
55 | | - for (uint32_t i = 0u; i < n; ++i) |
56 | | - traits_t::construct(data_alctr, reinterpret_cast<T*>(ptr) + i, std::forward<FuncArgs>(args)...); |
| 47 | + m_block_alctr.deallocate(_ptr,s); |
57 | 48 | } |
58 | | - return reinterpret_cast<T*>(ptr); |
59 | | - } |
60 | | - template <typename T, typename... FuncArgs> |
61 | | - T* emplace(FuncArgs&&... args) |
62 | | - { |
63 | | - return emplace_n<T,FuncArgs...>(1u, std::forward<FuncArgs>(args)...); |
64 | | - } |
65 | 49 |
|
66 | | - template <typename T> |
67 | | - void free_n(void* _ptr, uint32_t n) |
68 | | - { |
69 | | - using traits_t = std::allocator_traits<DataAllocator<T>>; |
70 | | - DataAllocator<T> data_alctr; |
| 50 | + // |
| 51 | + template <typename T, typename... FuncArgs> requires (!std::is_array_v<T>) // for now until we have a test |
| 52 | + inline T* emplace_n(const uint32_t n, FuncArgs&&... args) |
| 53 | + { |
| 54 | + size_type s = static_cast<size_type>(n)*sizeof(T); |
| 55 | + size_type a = alignof(T); |
| 56 | + T* const ptr = std::launder(reinterpret_cast<T*>(allocate(s,a))); |
| 57 | + if (!ptr) |
| 58 | + return nullptr; |
| 59 | + |
| 60 | + if constexpr (!std::is_trivial_v<T>) |
| 61 | + { |
| 62 | + if constexpr (sizeof...(FuncArgs)!=0u) |
| 63 | + { |
| 64 | + for (uint32_t i=0u; i<n; ++i) |
| 65 | + std::construct_at(ptr+i,std::forward<FuncArgs>(args)...); |
| 66 | + } |
| 67 | + else |
| 68 | + std::uninitialized_default_construct_n(ptr,n); |
| 69 | + } |
| 70 | + return ptr; |
| 71 | + } |
| 72 | + template <typename T, typename... FuncArgs> |
| 73 | + inline T* emplace(FuncArgs&&... args) |
| 74 | + { |
| 75 | + return emplace_n<T,FuncArgs...>(1u,std::forward<FuncArgs>(args)...); |
| 76 | + } |
71 | 77 |
|
72 | | - T* ptr = reinterpret_cast<T*>(_ptr); |
73 | | - if constexpr (!std::is_trivially_destructible_v<T>) |
| 78 | + // You must know the original type, we don't keep track of original size |
| 79 | + // TODO: this shouldn't be called `free` but `delete` |
| 80 | + template <typename T> requires (!std::is_array_v<T>) // for now until we have a test |
| 81 | + inline void free_n(void* _ptr, const uint32_t n) |
| 82 | + { |
| 83 | + T* ptr = reinterpret_cast<T*>(_ptr); |
| 84 | + if constexpr (!std::is_trivially_destructible_v<T>) |
| 85 | + std::destroy_n(ptr,n); |
| 86 | + deallocate(_ptr,sizeof(T)*n); |
| 87 | + } |
| 88 | + template <typename T> |
| 89 | + inline void free(void* ptr) |
74 | 90 | { |
75 | | - for (uint32_t i = 0u; i < n; ++i) |
76 | | - traits_t::destroy(data_alctr, ptr + i); |
| 91 | + return free_n<T>(ptr,1u); |
77 | 92 | } |
78 | | - deallocate(_ptr, sizeof(T)*n); |
79 | | - } |
80 | | - template <typename T> |
81 | | - void free(void* ptr) |
82 | | - { |
83 | | - return free_n<T>(ptr, 1u); |
84 | | - } |
85 | 93 |
|
86 | | -private: |
87 | | - allocator_type m_alctr; |
| 94 | + private: |
| 95 | + block_allocator_type m_block_alctr; |
88 | 96 | }; |
89 | 97 |
|
90 | 98 | } |
91 | | - |
92 | 99 | #endif |
0 commit comments