@@ -27,18 +27,22 @@ namespace cachelib {
2727
2828class SlabAllocator ;
2929
30- // the following are for pointer compression for the memory allocator. We
31- // compress pointers by storing the slab index and the alloc index of the
32- // allocation inside the slab. With slab worth kNumSlabBits of data, if we
33- // have the min allocation size as 64 bytes, that requires kNumSlabBits - 6
34- // bits for storing the alloc index. This leaves the remaining (32 -
35- // (kNumSlabBits - 6)) bits for the slab index. Hence we can index 256 GiB
36- // of memory in slabs and index anything more than 64 byte allocations inside
37- // the slab using a 32 bit representation.
38- //
30+ template <typename PtrType, typename AllocatorContainer>
31+ class MultiTierPtrCompressor ;
32+
3933// This CompressedPtr makes decompression fast by staying away from division and
40- // modulo arithmetic and doing those during the compression time. We most often
41- // decompress a CompressedPtr than compress a pointer while creating one.
34+ // modulo arithmetic and doing those during the compression time. We most often
35+ // decompress a CompressedPtr than compress a pointer while creating one. This
36+ // is used for pointer compression by the memory allocator.
37+
38+ // We compress pointers by storing the tier index, slab index and alloc index of
39+ // the allocation inside the slab. With slab worth kNumSlabBits (22 bits) of
40+ // data, if we have the min allocation size as 64 bytes, that requires
41+ // kNumSlabBits - 6 = 16 bits for storing the alloc index. The tier id occupies
42+ // the 32nd bit only since its value cannot exceed kMaxTiers (2). This leaves
43+ // the remaining (32 - (kNumSlabBits - 6) - 1 bit for tier id) = 15 bits for
44+ // the slab index. Hence we can index 128 GiB of memory per tier in multi-tier
45+ // configuration or index 256 GiB in single-tier configuration.
4246class CACHELIB_PACKED_ATTR CompressedPtr {
4347 public:
4448 using PtrType = uint32_t ;
@@ -62,9 +66,10 @@ class CACHELIB_PACKED_ATTR CompressedPtr {
6266 return static_cast <uint32_t >(1 ) << (Slab::kMinAllocPower );
6367 }
6468
65- // maximum adressable memory for pointer compression to work.
69+ // maximum addressable memory for pointer compression to work.
6670 static constexpr size_t getMaxAddressableSize () noexcept {
67- return static_cast <size_t >(1 ) << (kNumSlabIdxBits + Slab::kNumSlabBits );
71+ return static_cast <size_t >(1 )
72+ << (numSlabIdxBits (false ) + Slab::kNumSlabBits );
6873 }
6974
7075 // default construct to nullptr.
@@ -89,8 +94,11 @@ class CACHELIB_PACKED_ATTR CompressedPtr {
8994 PtrType ptr_{kNull };
9095
9196 // create a compressed pointer for a valid memory allocation.
92- CompressedPtr (uint32_t slabIdx, uint32_t allocIdx)
93- : ptr_(compress(slabIdx, allocIdx)) {}
97+ CompressedPtr (uint32_t slabIdx,
98+ uint32_t allocIdx,
99+ bool isMultiTiered,
100+ TierId tid = 0 )
101+ : ptr_(compress(slabIdx, allocIdx, isMultiTiered, tid)) {}
94102
95103 constexpr explicit CompressedPtr (PtrType ptr) noexcept : ptr_{ptr} {}
96104
@@ -100,31 +108,52 @@ class CACHELIB_PACKED_ATTR CompressedPtr {
100108 static constexpr unsigned int kNumAllocIdxBits =
101109 Slab::kNumSlabBits - Slab::kMinAllocPower ;
102110
111+ // Use 32nd bit position for TierId
112+ static constexpr unsigned int kNumTierIdxOffset = 31 ;
113+
103114 static constexpr PtrType kAllocIdxMask = ((PtrType)1 << kNumAllocIdxBits ) - 1 ;
104115
116+ // kNumTierIdxBits most significant bits
117+ static constexpr PtrType kTierIdxMask = (PtrType)1 << kNumTierIdxOffset ;
118+
105119 // Number of bits for the slab index. This will be the top 16 bits of the
106120 // compressed ptr.
107- static constexpr unsigned int kNumSlabIdxBits =
108- NumBits<PtrType>::value - kNumAllocIdxBits ;
121+ static constexpr unsigned int numSlabIdxBits (bool isMultiTiered) {
122+ return kNumTierIdxOffset - kNumAllocIdxBits + (!isMultiTiered);
123+ }
109124
110125 // Compress the given slabIdx and allocIdx into a 32-bit compressed
111126 // pointer.
112- static PtrType compress (uint32_t slabIdx, uint32_t allocIdx) noexcept {
127+ static PtrType compress (uint32_t slabIdx,
128+ uint32_t allocIdx,
129+ bool isMultiTiered,
130+ TierId tid) noexcept {
113131 XDCHECK_LE (allocIdx, kAllocIdxMask );
114- XDCHECK_LT (slabIdx, (1u << kNumSlabIdxBits ) - 1 );
132+ XDCHECK_LT (slabIdx, (1u << numSlabIdxBits (isMultiTiered) ) - 1 );
115133 return (slabIdx << kNumAllocIdxBits ) + allocIdx;
116134 }
117135
118136 // Get the slab index of the compressed ptr
119- uint32_t getSlabIdx () const noexcept {
137+ uint32_t getSlabIdx (bool isMultiTiered ) const noexcept {
120138 XDCHECK (!isNull ());
121- return static_cast <uint32_t >(ptr_ >> kNumAllocIdxBits );
139+ auto noTierIdPtr = isMultiTiered ? ptr_ & ~kTierIdxMask : ptr_;
140+ return static_cast <uint32_t >(noTierIdPtr >> kNumAllocIdxBits );
122141 }
123142
124143 // Get the allocation index of the compressed ptr
125- uint32_t getAllocIdx () const noexcept {
144+ uint32_t getAllocIdx (bool isMultiTiered) const noexcept {
145+ XDCHECK (!isNull ());
146+ auto noTierIdPtr = isMultiTiered ? ptr_ & ~kTierIdxMask : ptr_;
147+ return static_cast <uint32_t >(noTierIdPtr & kAllocIdxMask );
148+ }
149+
150+ uint32_t getTierId (bool isMultiTiered) const noexcept {
126151 XDCHECK (!isNull ());
127- return static_cast <uint32_t >(ptr_ & kAllocIdxMask );
152+ return isMultiTiered ? static_cast <uint32_t >(ptr_ >> kNumTierIdxOffset ) : 0 ;
153+ }
154+
155+ void setTierId (TierId tid) noexcept {
156+ ptr_ += static_cast <uint64_t >(tid) << kNumTierIdxOffset ;
128157 }
129158
130159 friend SlabAllocator;
@@ -137,11 +166,11 @@ class PtrCompressor {
137166 : allocator_(allocator) {}
138167
139168 const CompressedPtr compress (const PtrType* uncompressed) const {
140- return allocator_.compress (uncompressed);
169+ return allocator_.compress (uncompressed, false );
141170 }
142171
143172 PtrType* unCompress (const CompressedPtr compressed) const {
144- return static_cast <PtrType*>(allocator_.unCompress (compressed));
173+ return static_cast <PtrType*>(allocator_.unCompress (compressed, false ));
145174 }
146175
147176 bool operator ==(const PtrCompressor& rhs) const noexcept {
@@ -156,5 +185,53 @@ class PtrCompressor {
156185 // memory allocator that does the pointer compression.
157186 const AllocatorT& allocator_;
158187};
188+
189+ template <typename PtrType, typename AllocatorContainer>
190+ class MultiTierPtrCompressor {
191+ public:
192+ explicit MultiTierPtrCompressor (const AllocatorContainer& allocators) noexcept
193+ : allocators_(allocators) {}
194+
195+ const CompressedPtr compress (const PtrType* uncompressed) const {
196+ if (uncompressed == nullptr )
197+ return CompressedPtr{};
198+
199+ TierId tid;
200+ for (tid = 0 ; tid < allocators_.size (); tid++) {
201+ if (allocators_[tid]->isMemoryInAllocator (
202+ static_cast <const void *>(uncompressed)))
203+ break ;
204+ }
205+
206+ bool isMultiTiered = allocators_.size () > 1 ;
207+ auto cptr = allocators_[tid]->compress (uncompressed, isMultiTiered);
208+ if (isMultiTiered) { // config has multiple tiers
209+ cptr.setTierId (tid);
210+ }
211+ return cptr;
212+ }
213+
214+ PtrType* unCompress (const CompressedPtr compressed) const {
215+ if (compressed.isNull ()) {
216+ return nullptr ;
217+ }
218+ bool isMultiTiered = allocators_.size () > 1 ;
219+ auto & allocator = *allocators_[compressed.getTierId (isMultiTiered)];
220+ return static_cast <PtrType*>(
221+ allocator.unCompress (compressed, isMultiTiered));
222+ }
223+
224+ bool operator ==(const MultiTierPtrCompressor& rhs) const noexcept {
225+ return &allocators_ == &rhs.allocators_ ;
226+ }
227+
228+ bool operator !=(const MultiTierPtrCompressor& rhs) const noexcept {
229+ return !(*this == rhs);
230+ }
231+
232+ private:
233+ // memory allocator that does the pointer compression.
234+ const AllocatorContainer& allocators_;
235+ };
159236} // namespace cachelib
160237} // namespace facebook
0 commit comments