Skip to content

Commit 645c333

Browse files
committed
added ability for compressed pointer to use full 32 bits for addressing in single tier mode and use 31 bits for addressing in multi-tier mode
1 parent 519f664 commit 645c333

File tree

7 files changed

+73
-40
lines changed

7 files changed

+73
-40
lines changed

cachelib/allocator/CCacheAllocator.cpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,8 @@ CCacheAllocator::CCacheAllocator(MemoryAllocator& allocator,
3636
currentChunksIndex_(0) {
3737
auto& currentChunks = chunks_[currentChunksIndex_];
3838
for (auto chunk : *object.chunks()) {
39-
currentChunks.push_back(allocator_.unCompress(CompressedPtr(chunk)));
39+
// TODO : pass multi-tier flag when compact cache supports multi-tier config
40+
currentChunks.push_back(allocator_.unCompress(CompressedPtr(chunk), false));
4041
}
4142
}
4243

@@ -97,7 +98,8 @@ CCacheAllocator::SerializationType CCacheAllocator::saveState() {
9798

9899
std::lock_guard<std::mutex> guard(resizeLock_);
99100
for (auto chunk : getCurrentChunks()) {
100-
object.chunks()->push_back(allocator_.compress(chunk).saveState());
101+
// TODO : pass multi-tier flag when compact cache supports multi-tier config
102+
object.chunks()->push_back(allocator_.compress(chunk, false).saveState());
101103
}
102104
return object;
103105
}

cachelib/allocator/memory/CompressedPtr.h

Lines changed: 52 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -27,18 +27,20 @@ namespace cachelib {
2727

2828
class SlabAllocator;
2929

30-
// the following are for pointer compression for the memory allocator. We
31-
// compress pointers by storing the slab index and the alloc index of the
32-
// allocation inside the slab. With slab worth kNumSlabBits of data, if we
33-
// have the min allocation size as 64 bytes, that requires kNumSlabBits - 6
34-
// bits for storing the alloc index. This leaves the remaining (32 -
35-
// (kNumSlabBits - 6)) bits for the slab index. Hence we can index 256 GiB
36-
// of memory in slabs and index anything more than 64 byte allocations inside
37-
// the slab using a 32 bit representation.
38-
//
3930
// This CompressedPtr makes decompression fast by staying away from division and
40-
// modulo arithmetic and doing those during the compression time. We most often
41-
// decompress a CompressedPtr than compress a pointer while creating one.
31+
// modulo arithmetic and doing those during the compression time. We most often
32+
// decompress a CompressedPtr than compress a pointer while creating one. This
33+
// is used for pointer compression by the memory allocator.
34+
35+
// We compress pointers by storing the tier index, slab index and alloc index of
36+
// the allocation inside the slab. With slab worth kNumSlabBits (22 bits) of
37+
// data, if we have the min allocation size as 64 bytes, that requires
38+
// kNumSlabBits - 6 = 16 bits for storing the alloc index. The tier id occupies
39+
// the 32nd bit only since its value cannot exceed kMaxTiers (2). This leaves
40+
// the remaining (32 - (kNumSlabBits - 6) - 1 bit for tier id) = 15 bits for
41+
// the slab index. Hence we can index 128 GiB of memory in slabs per tier and
42+
// index anything more than 64 byte allocations inside the slab using a 32 bit
43+
// representation.
4244
class CACHELIB_PACKED_ATTR CompressedPtr {
4345
public:
4446
using PtrType = uint32_t;
@@ -62,9 +64,9 @@ class CACHELIB_PACKED_ATTR CompressedPtr {
6264
return static_cast<uint32_t>(1) << (Slab::kMinAllocPower);
6365
}
6466

65-
// maximum adressable memory for pointer compression to work.
67+
// maximum addressable memory for pointer compression to work.
6668
static constexpr size_t getMaxAddressableSize() noexcept {
67-
return static_cast<size_t>(1) << (kNumSlabIdxBits + Slab::kNumSlabBits);
69+
return static_cast<size_t>(1) << (kNumSlabIdxBits + Slab::kNumSlabBits + 1);
6870
}
6971

7072
// default construct to nullptr.
@@ -89,8 +91,11 @@ class CACHELIB_PACKED_ATTR CompressedPtr {
8991
PtrType ptr_{kNull};
9092

9193
// create a compressed pointer for a valid memory allocation.
92-
CompressedPtr(uint32_t slabIdx, uint32_t allocIdx)
93-
: ptr_(compress(slabIdx, allocIdx)) {}
94+
CompressedPtr(uint32_t slabIdx,
95+
uint32_t allocIdx,
96+
bool isMultiTiered,
97+
TierId tid = 0)
98+
: ptr_(compress(slabIdx, allocIdx, isMultiTiered, tid)) {}
9499

95100
constexpr explicit CompressedPtr(PtrType ptr) noexcept : ptr_{ptr} {}
96101

@@ -100,31 +105,55 @@ class CACHELIB_PACKED_ATTR CompressedPtr {
100105
static constexpr unsigned int kNumAllocIdxBits =
101106
Slab::kNumSlabBits - Slab::kMinAllocPower;
102107

108+
// Use 32nd bit position for TierId
109+
static constexpr unsigned int kNumTierIdxOffset = 31;
110+
103111
static constexpr PtrType kAllocIdxMask = ((PtrType)1 << kNumAllocIdxBits) - 1;
104112

113+
// kNumTierIdxBits most significant bits
114+
static constexpr PtrType kTierIdxMask = (PtrType)1 << kNumTierIdxOffset;
115+
105116
// Number of bits for the slab index. This will be the top 16 bits of the
106117
// compressed ptr.
107118
static constexpr unsigned int kNumSlabIdxBits =
108-
NumBits<PtrType>::value - kNumAllocIdxBits;
119+
kNumTierIdxOffset - kNumAllocIdxBits;
109120

110121
// Compress the given slabIdx and allocIdx into a 32-bit compressed
111122
// pointer.
112-
static PtrType compress(uint32_t slabIdx, uint32_t allocIdx) noexcept {
123+
static PtrType compress(uint32_t slabIdx,
124+
uint32_t allocIdx,
125+
bool isMultiTiered,
126+
TierId tid) noexcept {
113127
XDCHECK_LE(allocIdx, kAllocIdxMask);
128+
if (!isMultiTiered) {
129+
XDCHECK_LT(slabIdx, (1u << (kNumSlabIdxBits + 1)) - 1);
130+
return (slabIdx << kNumAllocIdxBits) + allocIdx;
131+
}
114132
XDCHECK_LT(slabIdx, (1u << kNumSlabIdxBits) - 1);
115133
return (slabIdx << kNumAllocIdxBits) + allocIdx;
116134
}
117135

118136
// Get the slab index of the compressed ptr
119-
uint32_t getSlabIdx() const noexcept {
137+
uint32_t getSlabIdx(bool isMultiTiered) const noexcept {
120138
XDCHECK(!isNull());
121-
return static_cast<uint32_t>(ptr_ >> kNumAllocIdxBits);
139+
auto noTierIdPtr = isMultiTiered ? ptr_ & ~kTierIdxMask : ptr_;
140+
return static_cast<uint32_t>(noTierIdPtr >> kNumAllocIdxBits);
122141
}
123142

124143
// Get the allocation index of the compressed ptr
125-
uint32_t getAllocIdx() const noexcept {
144+
uint32_t getAllocIdx(bool isMultiTiered) const noexcept {
126145
XDCHECK(!isNull());
127-
return static_cast<uint32_t>(ptr_ & kAllocIdxMask);
146+
auto noTierIdPtr = isMultiTiered ? ptr_ & ~kTierIdxMask : ptr_;
147+
return static_cast<uint32_t>(noTierIdPtr & kAllocIdxMask);
148+
}
149+
150+
uint32_t getTierId(bool isMultiTiered) const noexcept {
151+
XDCHECK(!isNull());
152+
return isMultiTiered ? static_cast<uint32_t>(ptr_ >> kNumTierIdxOffset) : 0;
153+
}
154+
155+
void setTierId(TierId tid) noexcept {
156+
ptr_ += static_cast<uint64_t>(tid) << kNumTierIdxOffset;
128157
}
129158

130159
friend SlabAllocator;
@@ -137,11 +166,11 @@ class PtrCompressor {
137166
: allocator_(allocator) {}
138167

139168
const CompressedPtr compress(const PtrType* uncompressed) const {
140-
return allocator_.compress(uncompressed);
169+
return allocator_.compress(uncompressed, false);
141170
}
142171

143172
PtrType* unCompress(const CompressedPtr compressed) const {
144-
return static_cast<PtrType*>(allocator_.unCompress(compressed));
173+
return static_cast<PtrType*>(allocator_.unCompress(compressed, false));
145174
}
146175

147176
bool operator==(const PtrCompressor& rhs) const noexcept {

cachelib/allocator/memory/MemoryAllocator.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -534,8 +534,8 @@ class MemoryAllocator {
534534
// as the original pointer is valid.
535535
//
536536
// @throw std::invalid_argument if the ptr is invalid.
537-
CompressedPtr CACHELIB_INLINE compress(const void* ptr) const {
538-
return slabAllocator_.compress(ptr);
537+
CompressedPtr CACHELIB_INLINE compress(const void* ptr, bool isMultiTiered) const {
538+
return slabAllocator_.compress(ptr, isMultiTiered);
539539
}
540540

541541
// retrieve the raw pointer corresponding to the compressed pointer. This is
@@ -546,8 +546,8 @@ class MemoryAllocator {
546546
// @return the raw pointer corresponding to this compressed pointer.
547547
//
548548
// @throw std::invalid_argument if the compressed pointer is invalid.
549-
void* CACHELIB_INLINE unCompress(const CompressedPtr cPtr) const {
550-
return slabAllocator_.unCompress(cPtr);
549+
void* CACHELIB_INLINE unCompress(const CompressedPtr cPtr, bool isMultiTiered) const {
550+
return slabAllocator_.unCompress(cPtr, isMultiTiered);
551551
}
552552

553553
// a special implementation of pointer compression for benchmarking purposes.

cachelib/allocator/memory/Slab.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,8 @@ namespace cachelib {
5050
* independantly by the SlabAllocator.
5151
*/
5252

53+
// identifier for the memory tier
54+
using TierId = int8_t;
5355
// identifier for the memory pool
5456
using PoolId = int8_t;
5557
// identifier for the allocation class

cachelib/allocator/memory/SlabAllocator.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -225,7 +225,7 @@ class SlabAllocator {
225225
// the corresponding memory allocator. trying to inline this just increases
226226
// the code size and does not move the needle on the benchmarks much.
227227
// Calling this with invalid input in optimized build is undefined behavior.
228-
CompressedPtr CACHELIB_INLINE compress(const void* ptr) const {
228+
CompressedPtr CACHELIB_INLINE compress(const void* ptr, bool isMultiTiered) const {
229229
if (ptr == nullptr) {
230230
return CompressedPtr{};
231231
}
@@ -246,19 +246,19 @@ class SlabAllocator {
246246
static_cast<uint32_t>(reinterpret_cast<const uint8_t*>(ptr) -
247247
reinterpret_cast<const uint8_t*>(slab)) /
248248
allocSize;
249-
return CompressedPtr{slabIndex, allocIdx};
249+
return CompressedPtr{slabIndex, allocIdx, isMultiTiered};
250250
}
251251

252252
// uncompress the point and return the raw ptr. This function never throws
253253
// in optimized build and assumes that the caller is responsible for calling
254254
// it with a valid compressed pointer.
255-
void* CACHELIB_INLINE unCompress(const CompressedPtr ptr) const {
255+
void* CACHELIB_INLINE unCompress(const CompressedPtr ptr, bool isMultiTiered) const {
256256
if (ptr.isNull()) {
257257
return nullptr;
258258
}
259259

260-
const SlabIdx slabIndex = ptr.getSlabIdx();
261-
const uint32_t allocIdx = ptr.getAllocIdx();
260+
const SlabIdx slabIndex = ptr.getSlabIdx(isMultiTiered);
261+
const uint32_t allocIdx = ptr.getAllocIdx(isMultiTiered);
262262
const Slab* slab = &slabMemoryStart_[slabIndex];
263263

264264
#ifndef NDEBUG

cachelib/allocator/memory/tests/MemoryAllocatorTest.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -401,13 +401,13 @@ TEST_F(MemoryAllocatorTest, PointerCompression) {
401401
for (const auto& pool : poolAllocs) {
402402
const auto& allocs = pool.second;
403403
for (const auto* alloc : allocs) {
404-
CompressedPtr ptr = m.compress(alloc);
404+
CompressedPtr ptr = m.compress(alloc, false);
405405
ASSERT_FALSE(ptr.isNull());
406-
ASSERT_EQ(alloc, m.unCompress(ptr));
406+
ASSERT_EQ(alloc, m.unCompress(ptr, false));
407407
}
408408
}
409409

410-
ASSERT_EQ(nullptr, m.unCompress(m.compress(nullptr)));
410+
ASSERT_EQ(nullptr, m.unCompress(m.compress(nullptr, false), false));
411411
}
412412

413413
TEST_F(MemoryAllocatorTest, Restorable) {

cachelib/benchmarks/PtrCompressionBench.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ void buildAllocs(size_t poolSize) {
6161
void* alloc = ma->allocate(pid, size);
6262
XDCHECK_GE(size, CompressedPtr::getMinAllocSize());
6363
if (alloc != nullptr) {
64-
validAllocs.push_back({alloc, ma->compress(alloc)});
64+
validAllocs.push_back({alloc, ma->compress(alloc, false)});
6565
validAllocsAlt.push_back({alloc, ma->compressAlt(alloc)});
6666
numAllocations++;
6767
}
@@ -83,7 +83,7 @@ BENCHMARK(CompressionAlt) {
8383

8484
BENCHMARK_RELATIVE(Compression) {
8585
for (const auto& alloc : validAllocs) {
86-
CompressedPtr c = m->compress(alloc.first);
86+
CompressedPtr c = m->compress(alloc.first, false);
8787
folly::doNotOptimizeAway(c);
8888
}
8989
}
@@ -97,7 +97,7 @@ BENCHMARK(DeCompressAlt) {
9797

9898
BENCHMARK_RELATIVE(DeCompress) {
9999
for (const auto& alloc : validAllocs) {
100-
void* ptr = m->unCompress(alloc.second);
100+
void* ptr = m->unCompress(alloc.second, false);
101101
folly::doNotOptimizeAway(ptr);
102102
}
103103
}

0 commit comments

Comments
 (0)