@@ -18,45 +18,41 @@ namespace nbl::asset::material_compiler3
1818// Class to manage all nodes' backing and hand them out as `uint32_t` handles
1919class CNodePool : public core ::IReferenceCounted
2020{
21- public:
22- // everything is handed out by index not pointer
23- struct Handle
21+ struct Config
2422 {
25- using value_t = uint32_t ;
26- constexpr static inline value_t Invalid = ~value_t (0 );
23+ using AddressAllocator = core::LinearAddressAllocator<uint32_t >;
24+ using HandleValue = uint32_t ;
25+ constexpr static inline bool ThreadSafe = false ;
26+ };
2727
28- explicit inline operator bool () const {return value!=Invalid;}
29- inline bool operator ==(const Handle& other) const {return value==other.value ;}
28+ public:
29+ //
30+ using obj_pool_type = nbl::core::CObjectPool<Config>;
3031
31- // also serves as a byte offset into the pool
32- value_t value = Invalid;
33- };
32+ //
33+ inline obj_pool_type& getObjectPool () {return m_composed;}
34+ inline const obj_pool_type& getObjectPool () const {return m_composed;}
35+
36+ //
3437 class INode
3538 {
3639 public:
37- //
3840 virtual const std::string_view getTypeName () const = 0;
39-
40- protected:
41- //
42- friend class CNodePool ;
43-
44- // to not be able to make the variable length stuff on the stack
45- virtual ~INode () = 0 ;
46-
47- // to support variable length stuff
48- virtual uint32_t getSize () const = 0;
4941 };
42+ //
43+ template <typename T> requires std::is_base_of_v<INode,std::remove_const_t <T>>
44+ using typed_pointer_type = obj_pool_type::template typed_pointer_type<T>;
45+
5046 // Debug Info node
51- class CDebugInfo : public INode
47+ class CDebugInfo : public obj_pool_type ::IVariableSize, public INode
5248 {
5349 public:
54- inline const std::string_view getTypeName () const override {return " nbl::CNodePool::CDebugInfo" ;}
50+ inline const std::string_view getTypeName () const override {return " nbl::asset::material_compiler3:: CNodePool::CDebugInfo" ;}
5551 inline uint32_t getSize () const {return calc_size (nullptr ,m_size);}
5652
5753 static inline uint32_t calc_size (const void * data, const uint32_t size)
5854 {
59- return sizeof (CDebugInfo)+size;
55+ return core::alignUp ( sizeof (CDebugInfo)+size, alignof (CDebugInfo)) ;
6056 }
6157 static inline uint32_t calc_size (const std::string_view& view)
6258 {
@@ -77,229 +73,27 @@ class CNodePool : public core::IReferenceCounted
7773
7874 inline const std::span<const uint8_t > data () const
7975 {
80- return {std::launder ( reinterpret_cast <const uint8_t *>(this +1 ) ),m_size};
76+ return {reinterpret_cast <const uint8_t *>(this +1 ),m_size};
8177 }
8278
8379 protected:
8480 const uint32_t m_size;
8581 };
8682
87- //
88- template <typename T>
89- struct TypedHandle
90- {
91- using node_type = T;
92-
93- explicit inline operator bool () const {return bool (untyped);}
94- inline bool operator ==(const TypedHandle& other) const {return untyped==other.untyped ;}
95-
96- inline operator const TypedHandle<const T>&() const
97- {
98- static_assert (std::is_base_of_v<INode,std::remove_const_t <T>>);
99- return *reinterpret_cast <const TypedHandle<const T>*>(this );
100- }
101- template <typename U> requires (std::is_base_of_v<U,T> && (std::is_const_v<U> || !std::is_const_v<T>))
102- inline operator const TypedHandle<U>&() const
103- {
104- return *reinterpret_cast <const TypedHandle<U>*>(this );
105- }
106-
107- Handle untyped = {};
108- };
109- template <typename T> requires (!std::is_const_v<T>)
110- inline T* deref(const TypedHandle<T> h) {return deref<T>(h.untyped );}
111- template <typename T>
112- inline const T* deref (const TypedHandle<T> h) const {return deref<const T>(h.untyped );}
113-
11483 template <typename T>
115- inline const std::string_view getTypeName (const TypedHandle <T> h) const
84+ inline const std::string_view getTypeName (const typed_pointer_type <T> h) const
11685 {
117- const auto * node = deref<const T>(h. untyped );
86+ const auto * node = getObjectPool (). deref <const T>(h);
11887 return node ? node->getTypeName ():" nullptr" ;
11988 }
89+
12090
12191 protected:
122- struct HandleHash
123- {
124- inline size_t operator ()(const TypedHandle<const INode> handle) const
125- {
126- return std::hash<Handle::value_t >()(handle.untyped .value );
127- }
128- };
129- // save myself some typing
130- using refctd_pmr_t = core::smart_refctd_ptr<core::refctd_memory_resource>;
131-
132- inline Handle alloc (const uint32_t size, const uint16_t alignment)
133- {
134- Handle retval = {};
135- auto allocFromChunk = [&](Chunk& chunk, const uint32_t chunkIx)
136- {
137- const auto localOffset = chunk.alloc (size,alignment);
138- if (localOffset!=Chunk::allocator_t ::invalid_address)
139- retval.value = localOffset|(chunkIx<<m_chunkSizeLog2);
140- };
141- // try current back chunk
142- if (!m_chunks.empty ())
143- allocFromChunk (m_chunks.back (),m_chunks.size ()-1 );
144- // if fail try new chunk
145- if (!retval)
146- {
147- const auto chunkSize = 0x1u <<m_chunkSizeLog2;
148- const auto chunkAlign = 0x1u <<m_maxNodeAlignLog2;
149- Chunk newChunk;
150- newChunk.getAllocator () = Chunk::allocator_t (nullptr ,0 ,0 ,chunkAlign,chunkSize),
151- newChunk.m_data = reinterpret_cast <uint8_t *>(m_pmr->allocate (chunkSize,chunkAlign));
152- if (newChunk.m_data )
153- {
154- allocFromChunk (newChunk,m_chunks.size ());
155- if (retval)
156- m_chunks.push_back (std::move (newChunk));
157- else
158- m_pmr->deallocate (newChunk.m_data ,chunkSize,chunkAlign);
159- }
160- }
161- return retval;
162- }
163- inline void free (const Handle h, const uint32_t size)
164- {
165- assert (getChunkIx (h)<m_chunks.size ());
166- }
92+ inline CNodePool (typename obj_pool_type::creation_params_type&& params) : m_composed(std::move(params)) {}
16793
168- // new
169- template <typename T, typename ... Args>
170- inline TypedHandle<T> _new (Args&&... args)
171- {
172- const uint32_t size = T::calc_size (args...);
173- const Handle retval = alloc (size,alignof (T));
174- if (retval)
175- new (deref<void >(retval)) T (std::forward<Args>(args)...);
176- return {.untyped =retval};
177- }
178- // delete
179- template <typename T>
180- inline void _delete (const TypedHandle<T> h)
181- {
182- T* ptr = deref<T>(h);
183- const uint32_t size = ptr->getSize ();
184- static_cast <INode*>(ptr)->~INode (); // can't use `std::destroy_at<T>(ptr);` because of destructor being non-public
185- // wipe v-table to mark as dead (so `~CNodePool` doesn't run destructor twice)
186- // NOTE: This won't work if we start reusing memory, even zeroing out the whole node won't work! Then need an accurate record of live nodes!
187- const void * nullVTable = nullptr ;
188- assert (memcmp (ptr,&nullVTable,sizeof (nullVTable))!=0 ); // double free
189- memset (static_cast <INode*>(ptr),0 ,sizeof (nullVTable));
190- free (h.untyped ,size);
191- }
192-
193- inline CNodePool (const uint8_t _chunkSizeLog2, const uint8_t _maxNodeAlignLog2, refctd_pmr_t && _pmr) :
194- m_chunkSizeLog2(_chunkSizeLog2), m_maxNodeAlignLog2(_maxNodeAlignLog2), m_pmr(_pmr ? std::move(_pmr):core::getDefaultMemoryResource())
195- {
196- assert (m_chunkSizeLog2>=14 && m_maxNodeAlignLog2>=4 );
197- }
198- // Destructor performs a form of garbage collection (just to make sure destructors are ran)
199- // NOTE: C++26 reflection would allow us to find all the `Handle` and `TypedHandle<U>` in `T` and do actual mark-and-sweep Garbage Collection
200- inline ~CNodePool ()
201- {
202- const auto chunkSize = 0x1u <<m_chunkSizeLog2;
203- const auto chunkAlign = 0x1u <<m_maxNodeAlignLog2;
204- for (auto & chunk : m_chunks)
205- {
206- for (auto handleOff=chunk.getAllocator ().get_total_size (); handleOff<chunkSize; handleOff+=sizeof (Handle))
207- {
208- const auto pHandle = reinterpret_cast <const Handle*>(chunk.m_data +handleOff);
209- // NOTE: This won't work if we start reusing memory, even zeroing out the whole node won't work! Then need an accurate record of live nodes!
210- if (auto * node=deref<INode>(*pHandle); node)
211- node->~INode (); // can't use `std::destroy_at<T>(ptr);` because of destructor being non-public
212- }
213- m_pmr->deallocate (chunk.m_data ,chunkSize,chunkAlign);
214- }
215- }
216-
217- private:
218- struct Chunk
219- {
220- // for now using KISS, we can use geeneralpupose allocator later
221- // Generalpurpose would require us to store the allocated handle list in a different way, so that handles can be quickly removed from it.
222- // Maybe a doubly linked list around the original allocation?
223- using allocator_t = core::LinearAddressAllocatorST<Handle::value_t >;
224-
225- inline allocator_t & getAllocator ()
226- {
227- return *m_alloc.getStorage ();
228- }
229-
230- inline Handle::value_t alloc (const uint32_t size, const uint16_t alignment)
231- {
232- const auto retval = getAllocator ().alloc_addr (size,alignment);
233- // successful allocation, time for some book keeping
234- constexpr auto invalid_address = allocator_t ::invalid_address;
235- if (retval!=invalid_address)
236- {
237- // we keep a list of all the allocated nodes at the back of a chunk
238- const auto newSize = getAllocator ().get_total_size ()-sizeof (retval);
239- // handle no space left for bookkeeping case
240- if (retval+size>newSize)
241- {
242- free (retval,size);
243- return invalid_address;
244- }
245- // clear vtable to mark as not initialized yet
246- // TODO: this won't work with reusable memory / not bump allocator
247- memset (m_data+retval,0 ,sizeof (INode));
248- *std::launder (reinterpret_cast <Handle::value_t *>(m_data+newSize)) = retval;
249- // shrink allocator
250- getAllocator () = allocator_t (newSize, std::move (getAllocator ()), nullptr );
251- }
252- return retval;
253- }
254- inline void free (const Handle::value_t addr, const uint32_t size)
255- {
256- getAllocator ().free_addr (addr,size);
257- }
258-
259- // make the chunk plain data, it has to get initialized and deinitialized externally anyway
260- core::StorageTrivializer<allocator_t > m_alloc;
261- uint8_t * m_data;
262- };
263- inline uint32_t getChunkIx (const Handle h) {return h.value >>m_chunkSizeLog2;}
264-
265- template <typename T> requires (std::is_base_of_v<INode,T> && !std::is_const_v<T> || std::is_void_v<T>)
266- inline T* deref(const Handle h)
267- {
268- if (!h)
269- return nullptr ;
270- const auto hiAddr = getChunkIx (h);
271- assert (hiAddr<m_chunks.size ());
272- {
273- const auto loAddr = h.value &((0x1u <<m_chunkSizeLog2)-1 );
274- void * ptr = m_chunks[hiAddr].m_data +loAddr;
275- if constexpr (std::is_void_v<T>)
276- return ptr;
277- else
278- {
279- if (*std::launder (reinterpret_cast <const void * const *>(ptr))) // vtable not wiped
280- {
281- auto * base = std::launder (reinterpret_cast <INode*>(ptr));
282- return dynamic_cast <T*>(base);
283- }
284- }
285- }
286- return nullptr ;
287- }
288- template <typename T> requires (std::is_base_of_v<INode,T> && std::is_const_v<T>)
289- inline T* deref(const Handle h) const
290- {
291- return const_cast <CNodePool*>(this )->deref <std::remove_const_t <T>>(h);
292- }
293-
294- core::vector<Chunk> m_chunks;
295- refctd_pmr_t m_pmr;
296- const uint8_t m_chunkSizeLog2; // maybe hardcode chunk sizes to 64kb ?
297- const uint8_t m_maxNodeAlignLog2;
94+ obj_pool_type m_composed;
29895};
29996
300- inline CNodePool::INode::~INode ()
301- {
302- }
30397
30498} // namespace nbl::asset::material_compiler3
30599#endif
0 commit comments