-
Notifications
You must be signed in to change notification settings - Fork 7
Expand file tree
/
Copy pathlrmalloc_internal.h
More file actions
112 lines (91 loc) · 2.71 KB
/
lrmalloc_internal.h
File metadata and controls
112 lines (91 loc) · 2.71 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
/*
* Copyright (C) 2022 Ricardo Leite. All rights reserved.
* Licenced under the MIT licence. See COPYING file in the project root for
* details.
*/
#ifndef __LFMALLOC_INTERNAL_H
#define __LFMALLOC_INTERNAL_H
#include <atomic>
#include "lrmalloc.h"
#include "log.h"
// superblock states
// used in Anchor::state
enum SuperblockState : uint8_t {
// all blocks allocated or reserved
SB_FULL = 0,
// has unreserved available blocks
SB_PARTIAL = 1,
// all blocks are free
SB_EMPTY = 2,
};
struct Anchor;
struct DescriptorNode;
struct Descriptor;
struct ProcHeap;
struct SizeClassData;
struct TCacheBin;
#define LG_MAX_BLOCK_NUM 31
#define MAX_BLOCK_NUM (1ul << LG_MAX_BLOCK_NUM)
struct Anchor {
SuperblockState state : 2;
uint32_t avail : LG_MAX_BLOCK_NUM;
uint32_t count : LG_MAX_BLOCK_NUM;
} LFMALLOC_ATTR(packed);
STATIC_ASSERT(sizeof(Anchor) == sizeof(uint64_t), "Invalid anchor size");
struct DescriptorNode {
public:
// ptr
Descriptor* _desc;
// aba counter
// uint64_t _counter;
public:
void Set(Descriptor* desc, uint64_t counter)
{
// desc must be cacheline aligned
ASSERT(((uint64_t)desc & CACHELINE_MASK) == 0);
// counter may be incremented but will always be stored in
// LG_CACHELINE bits
_desc = (Descriptor*)((uint64_t)desc | (counter & CACHELINE_MASK));
}
Descriptor* GetDesc() const
{
return (Descriptor*)((uint64_t)_desc & ~CACHELINE_MASK);
}
uint64_t GetCounter() const
{
return (uint64_t)((uint64_t)_desc & CACHELINE_MASK);
}
} LFMALLOC_ATTR(packed);
STATIC_ASSERT(sizeof(DescriptorNode) == sizeof(uint64_t), "Invalid descriptor node size");
// Superblock descriptor
// needs to be cache-line aligned
// descriptors are allocated and *never* freed
struct Descriptor {
// list node pointers
// used in free descriptor list
std::atomic<DescriptorNode> nextFree;
// used in partial descriptor list
std::atomic<DescriptorNode> nextPartial;
// anchor
std::atomic<Anchor> anchor;
char* superblock;
ProcHeap* heap;
uint32_t blockSize; // block size
uint32_t maxcount;
} LFMALLOC_CACHE_ALIGNED;
// at least one ProcHeap instance exists for each sizeclass
struct ProcHeap {
public:
// ptr to descriptor, head of partial descriptor list
std::atomic<DescriptorNode> partialList;
// size class index
size_t scIdx;
public:
size_t GetScIdx() const { return scIdx; }
SizeClassData* GetSizeClass() const;
} LFMALLOC_ATTR(aligned(CACHELINE));
// size of allocated block when allocating descriptors
// block is split into multiple descriptors
// 64k byte blocks
#define DESCRIPTOR_BLOCK_SZ (16 * PAGE)
#endif // __LFMALLOC_INTERNAL_H