Skip to content
This repository was archived by the owner on Oct 12, 2022. It is now read-only.

Commit b12a8db

Browse files
committed
GC: move recovering small pages from recover() to sweep(), optimize sweep by precalculating some common scenarios
1 parent 31fd6c0 commit b12a8db

File tree

1 file changed

+103
-55
lines changed
  • src/gc/impl/conservative

1 file changed

+103
-55
lines changed

src/gc/impl/conservative/gc.d

Lines changed: 103 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -2171,12 +2171,13 @@ struct Gcx
21712171
//log--;
21722172
}
21732173

2174-
// collection step 3: free all unreferenced objects
2174+
// collection step 3: finalize unreferenced objects, recover full pages with no live objects
21752175
size_t sweep() nothrow
21762176
{
21772177
// Free up everything not marked
21782178
debug(COLLECT_PRINTF) printf("\tfree'ing\n");
21792179
size_t freedLargePages;
2180+
size_t freedSmallPages;
21802181
size_t freed;
21812182
for (size_t n = 0; n < npools; n++)
21822183
{
@@ -2244,50 +2245,96 @@ struct Gcx
22442245
}
22452246
else
22462247
{
2247-
22482248
for (pn = 0; pn < pool.npages; pn++)
22492249
{
22502250
Bins bin = cast(Bins)pool.pagetable[pn];
22512251

22522252
if (bin < B_PAGE)
22532253
{
2254-
immutable size = binsize[bin];
2255-
void *p = pool.baseAddr + pn * PAGESIZE;
2256-
immutable base = pn * (PAGESIZE/16);
2257-
immutable bitstride = size / 16;
2254+
auto freebitsdata = pool.freebits.data + pn * PageBits.length;
2255+
auto markdata = pool.mark.data + pn * PageBits.length;
22582256

2259-
bool freeBits;
2257+
// the entries to free are allocated objects (freebits == false)
2258+
// that are not marked (mark == false)
22602259
PageBits toFree;
2260+
static foreach (w; 0 .. PageBits.length)
2261+
toFree[w] = (~freebitsdata[w] & ~markdata[w]);
2262+
2263+
// the page is unchanged if there is nothing to free
2264+
bool unchanged = true;
2265+
static foreach (w; 0 .. PageBits.length)
2266+
unchanged = unchanged && (toFree[w] == 0);
2267+
if (unchanged)
2268+
continue;
22612269

2262-
// ensure that there are at least <size> bytes for every address
2263-
// below ptop even if unaligned
2264-
void *ptop = p + PAGESIZE - size + 1;
2265-
for (size_t i; p < ptop; p += size, i += bitstride)
2270+
// the page can be recovered if all of the allocated objects (freebits == false)
2271+
// are freed
2272+
bool recoverPage = true;
2273+
static foreach (w; 0 .. PageBits.length)
2274+
recoverPage = recoverPage && (~freebitsdata[w] == toFree[w]);
2275+
2276+
bool hasFinalizer = false;
2277+
debug(COLLECT_PRINTF) // need output for each onject
2278+
hasFinalizer = true;
2279+
else debug(LOGGING)
2280+
hasFinalizer = true;
2281+
else debug(MEMSTOMP)
2282+
hasFinalizer = true;
2283+
if (pool.finals.data)
22662284
{
2267-
immutable biti = base + i;
2285+
// finalizers must be called on objects that are about to be freed
2286+
auto finalsdata = pool.finals.data + pn * PageBits.length;
2287+
static foreach (w; 0 .. PageBits.length)
2288+
hasFinalizer = hasFinalizer || (toFree[w] & finalsdata[w]) != 0;
2289+
}
22682290

2269-
if (!pool.mark.test(biti))
2291+
if (hasFinalizer)
2292+
{
2293+
immutable size = binsize[bin];
2294+
void *p = pool.baseAddr + pn * PAGESIZE;
2295+
immutable base = pn * (PAGESIZE/16);
2296+
immutable bitstride = size / 16;
2297+
2298+
// ensure that there are at least <size> bytes for every address
2299+
// below ptop even if unaligned
2300+
void *ptop = p + PAGESIZE - size + 1;
2301+
for (size_t i; p < ptop; p += size, i += bitstride)
22702302
{
2271-
void* q = sentinel_add(p);
2272-
sentinel_Invariant(q);
2303+
immutable biti = base + i;
22732304

2274-
if (pool.finals.nbits && pool.finals.test(biti))
2275-
rt_finalizeFromGC(q, sentinel_size(q, size), pool.getBits(biti));
2305+
if (!pool.mark.test(biti))
2306+
{
2307+
void* q = sentinel_add(p);
2308+
sentinel_Invariant(q);
22762309

2277-
freeBits = true;
2278-
toFree.set(i);
2310+
if (pool.finals.nbits && pool.finals.test(biti))
2311+
rt_finalizeFromGC(q, sentinel_size(q, size), pool.getBits(biti));
22792312

2280-
debug(COLLECT_PRINTF) printf("\tcollecting %p\n", p);
2281-
leakDetector.log_free(sentinel_add(p));
2313+
assert(core.bitop.bt(toFree.ptr, i));
22822314

2283-
debug (MEMSTOMP) memset(p, 0xF3, size);
2315+
debug(COLLECT_PRINTF) printf("\tcollecting %p\n", p);
2316+
leakDetector.log_free(sentinel_add(p));
22842317

2285-
freed += size;
2318+
debug (MEMSTOMP) memset(p, 0xF3, size);
2319+
}
22862320
}
22872321
}
22882322

2289-
if (freeBits)
2323+
if (recoverPage)
2324+
{
2325+
pool.freeAllPageBits(pn);
2326+
2327+
pool.pagetable[pn] = B_FREE;
2328+
// add to free chain
2329+
pool.binPageChain[pn] = cast(uint) pool.searchStart;
2330+
pool.searchStart = pn;
2331+
pool.freepages++;
2332+
freedSmallPages++;
2333+
}
2334+
else
2335+
{
22902336
pool.freePageBits(pn, toFree);
2337+
}
22912338
}
22922339
}
22932340
}
@@ -2296,11 +2343,16 @@ struct Gcx
22962343
assert(freedLargePages <= usedLargePages);
22972344
usedLargePages -= freedLargePages;
22982345
debug(COLLECT_PRINTF) printf("\tfree'd %u bytes, %u pages from %u pools\n", freed, freedLargePages, npools);
2299-
return freedLargePages;
2346+
2347+
assert(freedSmallPages <= usedSmallPages);
2348+
usedSmallPages -= freedSmallPages;
2349+
debug(COLLECT_PRINTF) printf("\trecovered small pages = %d\n", freedSmallPages);
2350+
2351+
return freedLargePages + freedSmallPages;
23002352
}
23012353

2302-
// collection step 4: recover pages with no live objects, rebuild free lists
2303-
size_t recover() nothrow
2354+
// collection step 4: rebuild free lists
2355+
void recover() nothrow
23042356
{
23052357
// init tail list
23062358
List**[B_NUMSMALL] tail = void;
@@ -2327,27 +2379,9 @@ struct Gcx
23272379
if (bin < B_PAGE)
23282380
{
23292381
size_t size = binsize[bin];
2330-
size_t bitstride = size / 16;
23312382
size_t bitbase = pn * (PAGESIZE / 16);
2332-
size_t bittop = bitbase + (PAGESIZE / 16) - bitstride + 1;
2333-
void* p;
2334-
2335-
biti = bitbase;
2336-
for (biti = bitbase; biti < bittop; biti += bitstride)
2337-
{
2338-
if (!pool.freebits.test(biti))
2339-
goto Lnotfree;
2340-
}
2341-
pool.pagetable[pn] = B_FREE;
2342-
// add to free chain
2343-
pool.binPageChain[pn] = cast(uint) pool.searchStart;
2344-
pool.searchStart = pn;
2345-
pool.freepages++;
2346-
freedSmallPages++;
2347-
continue;
23482383

2349-
Lnotfree:
2350-
p = pool.baseAddr + pn * PAGESIZE;
2384+
void* p = pool.baseAddr + pn * PAGESIZE;
23512385
const top = PAGESIZE - size + 1; // ensure <size> bytes available even if unaligned
23522386
for (u = 0; u < top; u += size)
23532387
{
@@ -2365,11 +2399,6 @@ struct Gcx
23652399
// terminate tail list
23662400
foreach (ref next; tail)
23672401
*next = null;
2368-
2369-
assert(freedSmallPages <= usedSmallPages);
2370-
usedSmallPages -= freedSmallPages;
2371-
debug(COLLECT_PRINTF) printf("\trecovered pages = %d\n", freedSmallPages);
2372-
return freedSmallPages;
23732402
}
23742403

23752404
/**
@@ -2428,18 +2457,18 @@ struct Gcx
24282457
start = stop;
24292458

24302459
ConservativeGC._inFinalizer = true;
2431-
size_t freedLargePages=void;
2460+
size_t freedPages = void;
24322461
{
24332462
scope (failure) ConservativeGC._inFinalizer = false;
2434-
freedLargePages = sweep();
2463+
freedPages = sweep();
24352464
ConservativeGC._inFinalizer = false;
24362465
}
24372466

24382467
stop = currTime;
24392468
sweepTime += (stop - start);
24402469
start = stop;
24412470

2442-
immutable freedSmallPages = recover();
2471+
recover();
24432472

24442473
stop = currTime;
24452474
recoverTime += (stop - start);
@@ -2451,7 +2480,7 @@ struct Gcx
24512480

24522481
updateCollectThresholds();
24532482

2454-
return freedLargePages + freedSmallPages;
2483+
return freedPages;
24552484
}
24562485

24572486
/**
@@ -2808,6 +2837,25 @@ struct Pool
28082837
}
28092838
}
28102839

2840+
void freeAllPageBits(size_t pagenum) nothrow
2841+
{
2842+
assert(!isLargeObject);
2843+
assert(!nointerior.nbits); // only for large objects
2844+
2845+
immutable beg = pagenum * PageBits.length;
2846+
static foreach (i; 0 .. PageBits.length)
2847+
{{
2848+
immutable w = beg + i;
2849+
freebits.data[w] = ~0;
2850+
noscan.data[w] = 0;
2851+
appendable.data[w] = 0;
2852+
if (finals.data)
2853+
finals.data[w] = 0;
2854+
if (structFinals.data)
2855+
structFinals.data[w] = 0;
2856+
}}
2857+
}
2858+
28112859
/**
28122860
* Given a pointer p in the p, return the pagenum.
28132861
*/

0 commit comments

Comments
 (0)