- Quick Start
- Using Pre-built Binaries
- Building from Source
- Integration Methods
- Custom Allocator Examples
- Configuration Best Practices
- Memory Lifecycle and Deallocation
- Troubleshooting
Option A: Download pre-built binaries
- Get the latest release from GitHub Releases
- Extract the archive
Option B: Build from source
git clone https://github.com/el-dockerr/GhostMem.git
cd GhostMem
./build.sh # Linux
# or
build.bat # Windows#include "ghostmem/GhostMemoryManager.h"
#include "ghostmem/GhostAllocator.h"
#include <vector>
int main() {
// Use with STL containers
std::vector<int, GhostAllocator<int>> vec;
// Push data - compression happens automatically
for (int i = 0; i < 1000000; i++) {
vec.push_back(i);
}
// Access data - decompression happens automatically
int value = vec[500000];
return 0;
}ghostmem-windows-x64/
├── lib/
│ ├── ghostmem_shared.dll # Dynamic library
│ └── ghostmem.lib # Static library
├── include/
│ ├── GhostMemoryManager.h
│ ├── GhostAllocator.h
│ └── Version.h
└── bin/
└── ghostmem_demo.exe
YourProject/
├── include/
│ └── ghostmem/ # Copy all headers here
├── lib/
│ ├── ghostmem_shared.dll # Copy DLL here
│ └── ghostmem.lib # Copy static lib here
└── src/
└── main.cpp
Visual Studio:
- Include directories: Add
include/to include paths - Library directories: Add
lib/to library paths - Link library: Add
ghostmem.libto linker input - Runtime: Copy
ghostmem_shared.dllto output directory
CMake:
cmake_minimum_required(VERSION 3.10)
project(MyApp)
# Add include directory
include_directories(${CMAKE_SOURCE_DIR}/include)
# Add library directory
link_directories(${CMAKE_SOURCE_DIR}/lib)
# Create executable
add_executable(myapp src/main.cpp)
# Link against static library
target_link_libraries(myapp ghostmem)
# Or link against DLL (copy DLL to output dir)
# target_link_libraries(myapp ghostmem_shared)
# add_custom_command(TARGET myapp POST_BUILD
# COMMAND ${CMAKE_COMMAND} -E copy_if_different
# "${CMAKE_SOURCE_DIR}/lib/ghostmem_shared.dll"
# $<TARGET_FILE_DIR:myapp>)Makefile:
CXX = cl.exe
CXXFLAGS = /I include /EHsc /std:c++17
LDFLAGS = /link /LIBPATH:lib ghostmem.lib
myapp.exe: src/main.cpp
$(CXX) $(CXXFLAGS) src/main.cpp $(LDFLAGS) /Fe:myapp.exe# Build
cmake --build . --config Release
# Run (DLL must be in same dir or in PATH)
myapp.exeghostmem-linux-x64/
├── lib/
│ ├── libghostmem.so # Shared library
│ └── libghostmem.a # Static library
├── include/
│ ├── GhostMemoryManager.h
│ ├── GhostAllocator.h
│ └── Version.h
└── bin/
└── ghostmem_demo
# Copy headers
sudo cp -r include/ghostmem /usr/local/include/
# Copy libraries
sudo cp lib/libghostmem.so /usr/local/lib/
sudo cp lib/libghostmem.a /usr/local/lib/
# Update library cache
sudo ldconfigCMake:
cmake_minimum_required(VERSION 3.10)
project(MyApp)
# If installed system-wide, just link
add_executable(myapp src/main.cpp)
target_link_libraries(myapp ghostmem pthread)
# Or use local installation
# include_directories(${CMAKE_SOURCE_DIR}/include)
# link_directories(${CMAKE_SOURCE_DIR}/lib)
# target_link_libraries(myapp ghostmem pthread)Makefile:
CXX = g++
CXXFLAGS = -std=c++17 -I include
LDFLAGS = -L lib -lghostmem -lpthread
myapp: src/main.cpp
$(CXX) $(CXXFLAGS) src/main.cpp $(LDFLAGS) -o myapp
# If using shared library, may need to set LD_LIBRARY_PATH
run: myapp
LD_LIBRARY_PATH=lib ./myapppkg-config (Advanced):
# Create ghostmem.pc file
cat > /usr/local/lib/pkgconfig/ghostmem.pc << EOF
prefix=/usr/local
exec_prefix=\${prefix}
libdir=\${exec_prefix}/lib
includedir=\${prefix}/include
Name: GhostMem
Description: Virtual RAM through transparent compression
Version: 0.9.0
Libs: -L\${libdir} -lghostmem -lpthread
Cflags: -I\${includedir}
EOF
# Use in Makefile
CXXFLAGS = $(shell pkg-config --cflags ghostmem)
LDFLAGS = $(shell pkg-config --libs ghostmem)# Build
make
# Run with local shared library
LD_LIBRARY_PATH=lib ./myapp
# Or if installed system-wide
./myapp# Your CMakeLists.txt
cmake_minimum_required(VERSION 3.10)
project(MyApp)
# Add GhostMem as subdirectory
add_subdirectory(external/GhostMem)
# Create your executable
add_executable(myapp src/main.cpp)
# Link against GhostMem
target_link_libraries(myapp ghostmem)
target_include_directories(myapp PRIVATE external/GhostMem/src)Project structure:
MyApp/
├── CMakeLists.txt
├── external/
│ └── GhostMem/ # Git submodule or copied source
└── src/
└── main.cpp
Copy GhostMem source files directly into your project:
MyApp/
├── src/
│ ├── main.cpp
│ └── ghostmem/
│ ├── GhostMemoryManager.cpp
│ ├── GhostMemoryManager.h
│ ├── GhostAllocator.h
│ └── Version.h
└── 3rdparty/
├── lz4.c
└── lz4.h
Compile everything together:
g++ -std=c++17 src/main.cpp \
src/ghostmem/GhostMemoryManager.cpp \
3rdparty/lz4.c \
-I src -pthread -o myapp#include "ghostmem/GhostAllocator.h"
#include <vector>
// Basic usage
std::vector<int, GhostAllocator<int>> ghost_vec;
ghost_vec.push_back(42);
// With reserve
std::vector<double, GhostAllocator<double>> data;
data.reserve(1000000); // Reserve space (still virtual)
for (int i = 0; i < 1000000; i++) {
data.push_back(i * 3.14);
}#include "ghostmem/GhostAllocator.h"
#include <string>
// Type alias for convenience
using GhostString = std::basic_string<
char,
std::char_traits<char>,
GhostAllocator<char>
>;
GhostString text = "This string uses ghost memory!";
text += " Compression happens automatically.";#include "ghostmem/GhostAllocator.h"
#include <map>
using GhostMap = std::map<
int,
std::string,
std::less<int>,
GhostAllocator<std::pair<const int, std::string>>
>;
GhostMap cache;
cache[1] = "one";
cache[2] = "two";#include "ghostmem/GhostAllocator.h"
#include <unordered_map>
using GhostHashMap = std::unordered_map<
std::string,
int,
std::hash<std::string>,
std::equal_to<std::string>,
GhostAllocator<std::pair<const std::string, int>>
>;
GhostHashMap lookup;
lookup["answer"] = 42;#include "ghostmem/GhostMemoryManager.h"
// Allocate raw memory
void* ptr = GhostMemoryManager::Instance().AllocateGhost(1024 * 1024); // 1MB
if (ptr == nullptr) {
// Handle allocation failure
throw std::bad_alloc();
}
// Use the memory
int* data = static_cast<int*>(ptr);
data[0] = 42;
data[1] = 100;
// Note: Deallocation not fully implemented yet
// GhostMemoryManager::Instance().DeallocateGhost(ptr);Create your own allocator that adds features:
#include "ghostmem/GhostAllocator.h"
#include <memory>
template<typename T>
class LoggingGhostAllocator : public GhostAllocator<T> {
public:
using Base = GhostAllocator<T>;
using typename Base::pointer;
using typename Base::size_type;
pointer allocate(size_type n) {
std::cout << "Allocating " << n << " objects of size "
<< sizeof(T) << std::endl;
return Base::allocate(n);
}
void deallocate(pointer p, size_type n) {
std::cout << "Deallocating " << n << " objects" << std::endl;
Base::deallocate(p, n);
}
};
// Usage
std::vector<int, LoggingGhostAllocator<int>> logged_vec;
logged_vec.push_back(42); // Prints allocation message#include "ghostmem/GhostAllocator.h"
#include <vector>
#include <numeric>
void process_large_dataset() {
// Allocate vector for 10 million integers (~40MB)
std::vector<int, GhostAllocator<int>> dataset;
dataset.reserve(10'000'000);
// Fill with data
for (int i = 0; i < 10'000'000; i++) {
dataset.push_back(i);
}
// Process in chunks (keeps memory hot)
const size_t chunk_size = 100'000;
for (size_t i = 0; i < dataset.size(); i += chunk_size) {
size_t end = std::min(i + chunk_size, dataset.size());
// Sum this chunk
int sum = std::accumulate(
dataset.begin() + i,
dataset.begin() + end,
0
);
std::cout << "Chunk " << i/chunk_size << " sum: " << sum << std::endl;
}
}#include "ghostmem/GhostAllocator.h"
#include <vector>
#include <array>
class NeuralNetworkLayer {
public:
using WeightVector = std::vector<float, GhostAllocator<float>>;
NeuralNetworkLayer(size_t input_size, size_t output_size)
: weights_(input_size * output_size)
{
// Initialize weights (compressed automatically when not in use)
for (size_t i = 0; i < weights_.size(); i++) {
weights_[i] = (rand() % 100) / 100.0f;
}
}
WeightVector forward(const std::vector<float>& input) {
// Accessing weights triggers decompression if needed
WeightVector output;
// ... neural network math ...
return output;
}
private:
WeightVector weights_; // Compressed when not in use
};
int main() {
// Create multiple layers - only active layer stays in RAM
std::vector<NeuralNetworkLayer> model;
model.emplace_back(1024, 512); // Layer 1
model.emplace_back(512, 256); // Layer 2
model.emplace_back(256, 10); // Layer 3 (output)
// Run inference - layers decompress as needed
std::vector<float> input(1024, 0.5f);
auto result = model[0].forward(input);
// ... continue through layers ...
}#include "ghostmem/GhostAllocator.h"
#include <unordered_map>
#include <string>
class GhostCache {
public:
using CacheMap = std::unordered_map<
std::string,
std::vector<char, GhostAllocator<char>>,
std::hash<std::string>,
std::equal_to<std::string>,
GhostAllocator<std::pair<const std::string,
std::vector<char, GhostAllocator<char>>>>
>;
void put(const std::string& key, const std::vector<char>& data) {
// Store in ghost memory - compressed automatically
std::vector<char, GhostAllocator<char>> ghost_data(
data.begin(), data.end()
);
cache_[key] = std::move(ghost_data);
}
std::vector<char> get(const std::string& key) {
auto it = cache_.find(key);
if (it != cache_.end()) {
// Decompresses automatically on access
return std::vector<char>(it->second.begin(), it->second.end());
}
return {};
}
private:
CacheMap cache_;
};When to use disk backing:
- ✅ Extremely memory-constrained systems (< 64MB RAM)
- ✅ Large datasets that compress well (text, structured data)
- ✅ Fast storage available (SSD/NVMe)
- ✅ Acceptable I/O latency for your workload
When to use in-memory mode (default):
- ✅ Sufficient RAM available (> 128MB)
- ✅ Performance-critical applications
- ✅ Random access patterns
- ✅ Incompressible data (already compressed, encrypted, random)
Example configurations:
// Disk-backed mode for memory-constrained IoT device
GhostConfig config;
config.use_disk_backing = true;
config.disk_file_path = "/tmp/ghostmem.swap";
config.compress_before_disk = true;
config.max_memory_pages = 32; // Only 128KB RAM for pages
if (!GhostMemoryManager::Instance().Initialize(config)) {
// Handle error
}// In-memory mode for desktop application (default)
// No configuration needed, or:
GhostConfig config;
config.use_disk_backing = false;
config.max_memory_pages = 2048; // 8MB RAM for pages
GhostMemoryManager::Instance().Initialize(config);// Fast disk mode (SSD) without compression
GhostConfig config;
config.use_disk_backing = true;
config.disk_file_path = "ghostmem.swap";
config.compress_before_disk = false; // Skip compression for faster I/O
config.max_memory_pages = 128; // 512KB RAM
GhostMemoryManager::Instance().Initialize(config);Performance comparison:
| Configuration | Memory Usage | Speed | Best For |
|---|---|---|---|
| In-memory (default) | High | Fastest (µs latency) | Desktop apps, gaming, real-time |
| Disk + compression | Lowest | Slow (ms latency) | IoT, embedded, batch processing |
| Disk + no compression | Medium | Medium (sub-ms) | SSD systems, CPU-bound workloads |
You can configure the page limit either by:
Option A: Using GhostConfig (recommended)
GhostConfig config;
config.max_memory_pages = 256; // 1MB RAM
GhostMemoryManager::Instance().Initialize(config);Option B: Editing the constant (requires recompilation)
Edit src/ghostmem/GhostMemoryManager.h:
// For IoT devices (limited RAM)
const size_t MAX_PHYSICAL_PAGES = 10; // 40KB physical RAM
// For desktop applications
const size_t MAX_PHYSICAL_PAGES = 1024; // 4MB physical RAM
// For server applications
const size_t MAX_PHYSICAL_PAGES = 10000; // 40MB physical RAMGuidelines:
- Too low: Frequent compression/decompression (higher CPU, lower performance)
- Too high: More physical RAM used, less compression benefit
- Optimal: Balance based on your working set size
Formula:
Working Set Size ≈ MAX_PHYSICAL_PAGES × PAGE_SIZE
Benchmarking:
#include <chrono>
auto start = std::chrono::high_resolution_clock::now();
// ... your workload ...
auto end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(
end - start
).count();
std::cout << "Time: " << duration << "ms" << std::endl;Test with different max_memory_pages values to find optimal setting.
✅ Good: Sequential access
std::vector<int, GhostAllocator<int>> data(1000000);
// Good - accesses pages sequentially
for (size_t i = 0; i < data.size(); i++) {
data[i] = i * 2;
}❌ Bad: Random access
// Bad - may thrash between pages
for (int i = 0; i < 1000000; i++) {
size_t random_idx = rand() % data.size();
data[random_idx]++; // Random access causes page faults
}✅ Good: Group related data
struct DataChunk {
std::array<int, 1000> values; // Fits in one page
};
std::vector<DataChunk, GhostAllocator<DataChunk>> chunks;❌ Bad: Scattered data
// Each element in different page - bad locality
std::vector<std::unique_ptr<LargeObject>,
GhostAllocator<std::unique_ptr<LargeObject>>> scattered;✅ Good: Reserve capacity upfront
std::vector<int, GhostAllocator<int>> vec;
vec.reserve(1000000); // Reserve virtual space
for (int i = 0; i < 1000000; i++) {
vec.push_back(i); // No reallocations
}❌ Bad: Grow dynamically
std::vector<int, GhostAllocator<int>> vec;
// Vector grows and reallocates multiple times
for (int i = 0; i < 1000000; i++) {
vec.push_back(i); // May cause multiple reallocations
}GhostMem memory goes through several states during its lifecycle:
1. VIRTUAL RESERVED → 2. PHYSICAL COMMITTED → 3. ACTIVE IN RAM
↓
4. EVICTED/COMPRESSED
↓
5. RESTORED (back to step 3)
↓
6. DEALLOCATED
State Descriptions:
-
Virtual Reserved (after
AllocateGhost())- Virtual address space reserved
- No physical RAM allocated
- Page marked as inaccessible
- Immediate operation (microseconds)
-
Physical Committed (on first access)
- Page fault triggers handler
- Physical RAM committed for page
- Page zeroed and made accessible
- Added to LRU tracking
-
Active in RAM
- Page in physical memory
- Direct CPU access (no overhead)
- Tracked in
active_ram_pageslist - Subject to LRU eviction
-
Evicted/Compressed (when RAM limit reached)
- Page compressed with LZ4
- Stored in backing store (RAM or disk)
- Physical memory released
- Virtual address still valid
-
Restored (on next access)
- Page fault triggers handler
- Compressed data decompressed
- Physical RAM recommitted
- Returns to Active state
-
Deallocated (when freed)
- Reference count decremented
- When count reaches zero:
- Removed from all tracking
- Compressed data deleted
- Virtual and physical memory released
Recommended Approach: Use GhostAllocator with STL containers for automatic memory management:
#include "ghostmem/GhostAllocator.h"
#include <vector>
void processData() {
// Allocation happens automatically
std::vector<int, GhostAllocator<int>> data;
data.reserve(1000000);
for (int i = 0; i < 1000000; i++) {
data.push_back(i);
}
// Use the data
int sum = std::accumulate(data.begin(), data.end(), 0);
// Deallocation happens automatically when vector goes out of scope
// No manual cleanup needed!
}How It Works:
When the vector destructor runs, it calls GhostAllocator::deallocate(), which:
- Calls
GhostMemoryManager::Instance().DeallocateGhost(ptr, size) - Decrements page reference counts
- Frees pages with zero references
- Cleans up compressed data
- Releases memory back to OS
For direct memory allocation without STL containers:
#include "ghostmem/GhostMemoryManager.h"
void manualAllocation() {
auto& mgr = GhostMemoryManager::Instance();
// Allocate
size_t size = 8192;
void* ptr = mgr.AllocateGhost(size);
if (ptr) {
// Use memory
int* data = static_cast<int*>(ptr);
data[0] = 42;
data[1] = 100;
// IMPORTANT: Must deallocate manually
mgr.DeallocateGhost(ptr, size);
}
}- Always match size: Pass same
sizetoDeallocateGhost()as toAllocateGhost() - Don't double-free: Each allocation should be freed exactly once
- Don't access after free: Accessing freed memory causes crash
- Check for nullptr:
AllocateGhost()returns nullptr on failure
Multiple allocations can share the same 4KB page. GhostMem uses reference counting:
// Example: Two small allocations in same page
void* ptr1 = mgr.AllocateGhost(1024); // Page refcount = 1
void* ptr2 = mgr.AllocateGhost(1024); // Page refcount = 2 (if same page)
mgr.DeallocateGhost(ptr1, 1024); // Page refcount = 1
// Page still in memory, not freed yet
mgr.DeallocateGhost(ptr2, 1024); // Page refcount = 0
// Page now fully freed, memory returned to OSVisual Representation:
Page 0x10000000 (4096 bytes):
├─ Allocation A: bytes 0-1023 (refcount = 1)
├─ Allocation B: bytes 1024-2047 (refcount = 2)
└─ Free space: bytes 2048-4095
After deallocate(A):
├─ Allocation B: bytes 1024-2047 (refcount = 1)
└─ Free space: bytes 0-1023, 2048-4095
After deallocate(B):
└─ Page fully freed → released to OS
void* ptr = mgr.AllocateGhost(4096);
int* data = static_cast<int*>(ptr);
data[0] = 42; // Page is now in RAM
mgr.DeallocateGhost(ptr, 4096);
// → Removed from active_ram_pages
// → Physical and virtual memory releasedvoid* ptr1 = mgr.AllocateGhost(4096);
void* ptr2 = mgr.AllocateGhost(4096);
// ... allocate more pages to trigger eviction ...
// ptr1's page gets evicted and compressed
mgr.DeallocateGhost(ptr1, 4096);
// → Compressed data removed from backing_store
// → No physical memory to release (already evicted)
// → Virtual memory reservation releasedvoid* ptr = mgr.AllocateGhost(12288); // 3 pages
int* data = static_cast<int*>(ptr);
data[0] = 1; // First page in RAM
data[1024] = 2; // Second page in RAM
// Third page not accessed yet (still virtual)
mgr.DeallocateGhost(ptr, 12288);
// → All 3 pages cleaned up
// → Both committed and uncommitted pages freed✅ Good Practices:
// 1. RAII with STL containers (automatic cleanup)
{
std::vector<int, GhostAllocator<int>> vec(10000);
// Use vec...
} // Automatically cleaned up
// 2. Smart pointer wrapper (if needed)
template<typename T>
using GhostUniquePtr = std::unique_ptr<T,
std::function<void(T*)>>;
GhostUniquePtr<int[]> makeGhostArray(size_t n) {
auto& mgr = GhostMemoryManager::Instance();
size_t size = n * sizeof(int);
int* ptr = static_cast<int*>(mgr.AllocateGhost(size));
return GhostUniquePtr<int[]>(ptr,
[size](int* p) {
GhostMemoryManager::Instance().DeallocateGhost(p, size);
});
}
// 3. Clear exception safety
void processWithExceptionSafety() {
auto& mgr = GhostMemoryManager::Instance();
void* ptr = mgr.AllocateGhost(4096);
try {
// Use memory...
riskyOperation(ptr);
} catch (...) {
mgr.DeallocateGhost(ptr, 4096);
throw;
}
mgr.DeallocateGhost(ptr, 4096);
}❌ Common Mistakes:
// MISTAKE 1: Memory leak - forgot to deallocate
void leak() {
void* ptr = mgr.AllocateGhost(4096);
// Use ptr...
// Missing: mgr.DeallocateGhost(ptr, 4096);
}
// MISTAKE 2: Double free
void doubleFree() {
void* ptr = mgr.AllocateGhost(4096);
mgr.DeallocateGhost(ptr, 4096);
mgr.DeallocateGhost(ptr, 4096); // ERROR! Second free
}
// MISTAKE 3: Use after free
void useAfterFree() {
void* ptr = mgr.AllocateGhost(4096);
int* data = static_cast<int*>(ptr);
mgr.DeallocateGhost(ptr, 4096);
data[0] = 42; // ERROR! Accessing freed memory
}
// MISTAKE 4: Size mismatch
void sizeMismatch() {
void* ptr = mgr.AllocateGhost(8192);
// ...
mgr.DeallocateGhost(ptr, 4096); // ERROR! Wrong size
}Deallocation is fully thread-safe:
#include <thread>
#include <vector>
void threadSafeDeallocation() {
auto& mgr = GhostMemoryManager::Instance();
// Allocate in main thread
std::vector<void*> ptrs;
for (int i = 0; i < 10; i++) {
ptrs.push_back(mgr.AllocateGhost(4096));
}
// Deallocate in parallel threads (safe)
std::vector<std::thread> threads;
for (int i = 0; i < 10; i++) {
threads.emplace_back([&mgr, ptr = ptrs[i]]() {
// Thread-safe deallocation
mgr.DeallocateGhost(ptr, 4096);
});
}
for (auto& t : threads) {
t.join();
}
}To check for memory leaks or monitor usage:
// Before operations
size_t allocations_before = /* track your allocations */;
// Perform operations
{
std::vector<int, GhostAllocator<int>> vec(1000);
// Use vec...
}
// After operations
size_t allocations_after = /* track your allocations */;
// Should be equal if no leaks
assert(allocations_before == allocations_after);Future API (planned):
// Not yet implemented - coming in future version
auto stats = mgr.GetMemoryStats();
std::cout << "Active pages: " << stats.active_pages << "\n";
std::cout << "Compressed: " << stats.compressed_bytes << "\n";
std::cout << "Allocations: " << stats.allocation_count << "\n";| Practice | Recommendation | Reason |
|---|---|---|
Use GhostAllocator |
✅ Strongly Recommended | Automatic cleanup, exception-safe |
Manual AllocateGhost |
Requires manual DeallocateGhost |
|
| Match allocation size | ✅ Required | Must pass same size to deallocate |
| Check for nullptr | ✅ Recommended | Handle allocation failures |
| Smart pointer wrappers | ✅ Good for complex code | RAII cleanup |
| Global allocations | ❌ Avoid | Hard to track lifecycle |
| Mixed allocators | ❌ Don't mix | Use one allocator per object |
Error: undefined reference to GhostMemoryManager::Instance()
Solution: Make sure you're linking against the library:
target_link_libraries(myapp ghostmem pthread) # Linux
target_link_libraries(myapp ghostmem) # WindowsError: The code execution cannot proceed because ghostmem_shared.dll was not found.
Solution:
- Copy DLL to executable directory
- Or add DLL directory to PATH
- Or use static linking instead
Error: error while loading shared libraries: libghostmem.so: cannot open shared object file
Solution:
# Option 1: Set LD_LIBRARY_PATH
export LD_LIBRARY_PATH=/path/to/ghostmem/lib:$LD_LIBRARY_PATH
# Option 2: Install system-wide
sudo cp lib/libghostmem.so /usr/local/lib/
sudo ldconfig
# Option 3: Use rpath in CMake
set_target_properties(myapp PROPERTIES
BUILD_RPATH "/path/to/ghostmem/lib"
INSTALL_RPATH "/usr/local/lib")Possible causes:
- Accessing deallocated memory - Don't use pointers after deallocation
- Stack overflow - Reduce
MAX_PHYSICAL_PAGESor increase stack size - Signal handler conflict (Linux) - Check for conflicting SIGSEGV handlers
Debug:
# Linux - run with debugger
gdb ./myapp
(gdb) run
(gdb) bt # Show backtrace when it crashes
# Check system limits
ulimit -aDiagnosis:
- Too many page faults (MAX_PHYSICAL_PAGES too low)
- Poor access patterns (random access)
- Data not compressible (encrypted, already compressed)
Solution:
- Increase
MAX_PHYSICAL_PAGES - Improve data locality
- Profile with your workload
- Consider if GhostMem is right fit for your use case
- Start with the Getting Started Guide for quick examples
- Read the API Reference for detailed function documentation
- Check Thread Safety for multi-threading guidelines
- See examples/ for more code samples
- Review README for architecture details