1
+ // Copyright (C) 2018-2025 - DevSH Graphics Programming Sp. z O.O.
2
+ // This file is part of the "Nabla Engine".
3
+ // For conditions of distribution and use, see copyright notice in nabla.h
4
+ #ifndef _NBL_ASSET_MATERIAL_COMPILER_V3_C_NODE_POOL_H_INCLUDED_
5
+ #define _NBL_ASSET_MATERIAL_COMPILER_V3_C_NODE_POOL_H_INCLUDED_
6
+
7
+
8
+ #include " nbl/core/declarations.h"
9
+ #include " nbl/core/definitions.h"
10
+
11
+ #include < type_traits>
12
+
13
+
14
+ // temporary
15
+ #define NBL_API
16
+
17
+ namespace nbl ::asset::material_compiler3
18
+ {
19
+
20
+ // Class to manage all nodes' backing and hand them out as `uint32_t` handles
21
+ class CNodePool : public core ::IReferenceCounted
22
+ {
23
+ public:
24
+ // everything is handed out by index not pointer
25
+ struct Handle
26
+ {
27
+ using value_t = uint32_t ;
28
+ constexpr static inline value_t Invalid = ~value_t (0 );
29
+
30
+ inline operator bool () const {return value!=Invalid;}
31
+
32
+ // also serves as a byte offset into the pool
33
+ value_t value = Invalid;
34
+ };
35
+ class INode
36
+ {
37
+ public:
38
+ //
39
+ virtual const std::string_view getTypeName () const = 0;
40
+
41
+ // Only sane child count allowed
42
+ virtual inline uint8_t getChildCount () const = 0;
43
+
44
+ protected:
45
+ //
46
+ friend class CNodePool ;
47
+
48
+ // to not be able to make the variable length stuff on the stack
49
+ virtual ~INode () = 0 ;
50
+
51
+ // to support variable length stuff
52
+ virtual uint32_t getSize () const = 0;
53
+
54
+ // Children are always at the end of the node, unless overriden
55
+ virtual inline Handle* getChildHandleStorage (const int16_t ix)
56
+ {
57
+ if (const int16_t childCount=getChildCount (); ix<childCount)
58
+ return reinterpret_cast <Handle*>(this )+getSize ()+(ix-childCount)*sizeof (Handle);
59
+ return nullptr ;
60
+ }
61
+ inline const Handle* getChildHandleStorage (const int16_t ix) const
62
+ {
63
+ return const_cast <Handle*>(const_cast <INode*>(this )->getChildHandleStorage (ix));
64
+ }
65
+ };
66
+
67
+ //
68
+ template <typename T> requires std::is_base_of_v<INode,T>
69
+ struct TypedHandle
70
+ {
71
+ using node_type = T;
72
+
73
+ Handle untyped;
74
+ };
75
+ template <typename T>
76
+ inline T* deref (const TypedHandle<T>& h) {return deref<T>(h.untyped );}
77
+ template <typename T>
78
+ inline const T* deref (const TypedHandle<const T>& h) const {return deref<const T>(h.untyped );}
79
+
80
+ //
81
+ inline Handle getChild (const Handle& parent, const uint8_t ix) const
82
+ {
83
+ const auto * pHandle = deref<const INode>(parent)->getChildHandleStorage (ix);
84
+ return pHandle ? (*pHandle):Handle{};
85
+ }
86
+ inline void setChild (const Handle& parent, const uint8_t ix, const Handle& child)
87
+ {
88
+ if (auto * pHandle=deref<INode>(parent)->getChildHandleStorage (ix); pHandle)
89
+ *pHandle = child;
90
+ }
91
+
92
+ protected:
93
+ // save myself some typing
94
+ using refctd_pmr_t = core::smart_refctd_ptr<core::refctd_memory_resource>;
95
+
96
+ inline Handle alloc (const uint32_t size, const uint16_t alignment)
97
+ {
98
+ Handle retval = {};
99
+ auto allocFromChunk = [&](Chunk& chunk, const uint32_t chunkIx)
100
+ {
101
+ const auto localOffset = chunk.alloc (size,alignment);
102
+ if (localOffset!=decltype (Chunk::m_alloc)::invalid_address)
103
+ retval.value = localOffset|(chunkIx<<m_chunkSizeLog2);
104
+ };
105
+ // try current back chunk
106
+ if (!m_chunks.empty ())
107
+ allocFromChunk (m_chunks.back (),m_chunks.size ()-1 );
108
+ // if fail try new chunk
109
+ if (!retval)
110
+ {
111
+ const auto chunkSize = 0x1u <<m_chunkSizeLog2;
112
+ const auto chunkAlign = 0x1u <<m_maxNodeAlignLog2;
113
+ Chunk newChunk = {
114
+ .m_alloc = decltype (Chunk::m_alloc)(nullptr ,0 ,0 ,chunkAlign,chunkSize),
115
+ .m_data = reinterpret_cast <uint8_t *>(m_pmr->allocate (chunkSize,chunkAlign))
116
+ };
117
+ if (newChunk.m_data )
118
+ {
119
+ allocFromChunk (newChunk,m_chunks.size ());
120
+ if (retval)
121
+ m_chunks.push_back (std::move (newChunk));
122
+ else
123
+ m_pmr->deallocate (newChunk.m_data ,chunkSize,chunkAlign);
124
+ }
125
+ }
126
+ return retval;
127
+ }
128
+ inline void free (const Handle& h, const uint32_t size)
129
+ {
130
+ assert (getChunkIx (h)<m_chunks.size ());
131
+ }
132
+
133
+ // new
134
+ template <typename T, typename ... Args>
135
+ inline Handle _new (Args&&... args)
136
+ {
137
+ const uint32_t size = T::calc_size (args...);
138
+ const Handle retval = alloc (size,alignof (T));
139
+ if (retval)
140
+ new (deref<T>(retval)) T (std::forward<Args>(args)...);
141
+ return retval;
142
+ }
143
+ // delete
144
+ template <typename T>
145
+ inline void _delete (const Handle& h)
146
+ {
147
+ T* ptr = deref<T>(h);
148
+ const uint32_t size = ptr->getSize ();
149
+ ptr->~T ();
150
+ free (h,size);
151
+ }
152
+
153
+ // for now using KISS, we can use geeneralpupose allocator later
154
+ struct Chunk
155
+ {
156
+ inline Handle::value_t alloc (const uint32_t size, const uint16_t alignment)
157
+ {
158
+ return m_alloc.alloc_addr (size,alignment);
159
+ }
160
+ inline void free (const Handle::value_t addr, const uint32_t size)
161
+ {
162
+ m_alloc.free_addr (addr,size);
163
+ }
164
+
165
+ core::LinearAddressAllocatorST<Handle::value_t > m_alloc;
166
+ uint8_t * m_data;
167
+ };
168
+ inline CNodePool (const uint8_t _chunkSizeLog2, const uint8_t _maxNodeAlignLog2, refctd_pmr_t && _pmr) :
169
+ m_chunkSizeLog2(_chunkSizeLog2), m_maxNodeAlignLog2(_maxNodeAlignLog2), m_pmr(_pmr ? std::move(_pmr):core::getDefaultMemoryResource())
170
+ {
171
+ assert (m_chunkSizeLog2>=14 && m_maxNodeAlignLog2>=4 );
172
+ }
173
+ inline ~CNodePool ()
174
+ {
175
+ const auto chunkSize = 0x1u <<m_chunkSizeLog2;
176
+ const auto chunkAlign = 0x1u <<m_maxNodeAlignLog2;
177
+ for (auto & chunk : m_chunks)
178
+ m_pmr->deallocate (chunk.m_data ,chunkSize,chunkAlign);
179
+ }
180
+
181
+ private:
182
+ inline uint32_t getChunkIx (const Handle& h) {h.value >>m_chunkSizeLog2;}
183
+
184
+ template <typename T> requires (std::is_base_of_v<INode,T> && !std::is_const_v<T>)
185
+ inline T* deref(const Handle& h)
186
+ {
187
+ const auto loAddr = h.value &(0x1u <<m_chunkSizeLog2);
188
+ return reinterpret_cast <T*>(chunks[getChunkIx (h)].data ()+loAddr);
189
+ }
190
+ template <typename T> requires (std::is_base_of_v<INode,T> && std::is_const_v<T>)
191
+ inline T* deref(const Handle& h) const
192
+ {
193
+ return const_cast <CNodePool*>(this )->deref <std::remove_const_t <T>>(h);
194
+ }
195
+
196
+ core::vector<Chunk> m_chunks;
197
+ refctd_pmr_t m_pmr;
198
+ const uint8_t m_chunkSizeLog2;
199
+ const uint8_t m_maxNodeAlignLog2;
200
+ };
201
+
202
+ } // namespace nbl::asset::material_compiler3
203
+ #endif
0 commit comments