nlib
heap/cachedheap/cachedheap.cpp

Allocates and frees memory by explicitly using nn::nlib::CachedHeap and nn::nlib::CentralHeap.

These classes are used implicitly when using nmalloc and nfree, but you can also use them explicitly. This sample demonstrates how to use this method.

By using them explicitly instead of via nmalloc, you can use multiple CentralHeap objects (one for each module) instead of having a shared CentralHeap for all processes. This approach also makes it possible to use multiple CachedHeap objects within a single thread.

/*--------------------------------------------------------------------------------*
Project: CrossRoad
Copyright (C)Nintendo All rights reserved.
These coded instructions, statements, and computer programs contain proprietary
information of Nintendo and/or its licensed developers and are protected by
national and international copyright laws. They may not be disclosed to third
parties or copied or duplicated in any form, in whole or in part, without the
prior written consent of Nintendo.
The content herein is highly confidential and should be handled accordingly.
*--------------------------------------------------------------------------------*/
using nlib_ns::heap::CentralHeap;
using nlib_ns::heap::CachedHeap;
const size_t heapmem_size = 1024 * 512;
NLIB_ALIGNAS(4096) static char heapmem[heapmem_size];
// All the threads allocates memory via CachedHeap from CentralHeap.
CentralHeap g_centralheap;
class HeapSetup {
public:
HeapSetup() {
g_centralheap.Init(&heapmem[0], heapmem_size, 0);
}
~HeapSetup() {
g_centralheap.Finalize();
}
};
volatile bool g_success = true;
static void ThreadFunc(void* ptr) {
NLIB_UNUSED(ptr);
// Initializes CachedHeap, and associate it with the Central Heap.
// There needs a CachedHeap object per a thread.
CachedHeap heap;
if (nlib_is_error(g_centralheap.MakeCache(&heap))) {
g_success = false;
return;
}
const int n = 1000;
void* p[n];
for (int j = 0; j < 1000; ++j) {
for (int i = 0; i < n; ++i) {
// Allocation is fast if free memory is cached in CachedHeap,
// because there needs no mutex to be acquired.
p[i] = heap.Alloc(8);
if (!p[i]) g_success = false;
}
for (int i = 0; i < n; ++i) {
// Freed small memory is cached in CachedHeap.
// The amount of the cache memory is controlled,
// and it returns the memory to CentralHeap when it grows too big.
heap.Free(p[i]);
}
}
// In the destructor of CachedHeap, the memory cached is returned to CentralHeap.
}
const int kNumThread = 10;
nlib_thread th[kNumThread];
static bool SampleMain(int, char**) {
HeapSetup obj;
{
uint64_t from, to;
from = GetTickTime();
for (int i = 0; i < kNumThread; ++i) {
e = nlib_thread_create(&th[i], NULL, ThreadFunc, NULL);
if (e != 0) {
for (int j = 0; j < i; ++j) {
}
return false;
}
}
for (int i = 0; i < kNumThread; ++i) {
}
to = GetTickTime();
nlib_printf("Small: using CachedHeap: %" PRIu64 " msec\n", to - from);
}
return g_success;
}
NLIB_MAINFUNC