Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 26 additions & 4 deletions LiteDB.Tests/Internals/Cache_Tests.cs
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,8 @@ public void Cache_Extends()
pages.Add(m.NewPage());
}

// extends must be increase
m.ExtendSegments.Should().Be(3);
// there were readable pages available, so cache should reuse instead of extending
m.ExtendSegments.Should().Be(2);

// but if I release more than 10 pages, now I will re-use old pages
foreach (var p in pages.Where(x => x.ShareCounter == -1).Take(10))
Expand All @@ -117,7 +117,7 @@ public void Cache_Extends()
}

m.WritablePages.Should().Be(7);
m.FreePages.Should().Be(8);
m.FreePages.Should().Be(3);

// now, if I request for 10 pages, all pages will be reused (no segment extend)
for (var i = 0; i < 10; i++)
Expand All @@ -126,7 +126,7 @@ public void Cache_Extends()
}

// keep same extends
m.ExtendSegments.Should().Be(3);
m.ExtendSegments.Should().Be(2);

// discard all pages
PageBuffer pw;
Expand All @@ -137,6 +137,28 @@ public void Cache_Extends()
}
}

[Fact]
public void Cache_Reuses_Readable_Pages_Before_Extending()
{
var cache = new MemoryCache(new int[] { 2, 4 });

var first = cache.GetReadablePage(0, FileOrigin.Data, (pos, slice) => { });
var second = cache.GetReadablePage(Constants.PAGE_SIZE, FileOrigin.Data, (pos, slice) => { });

cache.ExtendSegments.Should().Be(1);
cache.FreePages.Should().Be(0);

first.Release();
second.Release();

var reused = cache.GetReadablePage(Constants.PAGE_SIZE * 2, FileOrigin.Data, (pos, slice) => { });

cache.ExtendSegments.Should().Be(1);
cache.FreePages.Should().Be(1);

reused.Release();
}

[Fact]
public void Cache_UniqueIDNumbering()
{
Expand Down
98 changes: 54 additions & 44 deletions LiteDB/Engine/Disk/MemoryCache.cs
Original file line number Diff line number Diff line change
Expand Up @@ -303,64 +303,74 @@ private void Extend()
var segmentSize = _segmentSizes[Math.Min(_segmentSizes.Length - 1, _extends)];

// if this count is larger than MEMORY_SEGMENT_SIZE, re-use all this pages
if (emptyShareCounter > segmentSize)
if (emptyShareCounter > 0)
{
// get all readable pages that can return to _free (slow way)
// sort by timestamp used (set as free oldest first)
var readables = _readable
.Where(x => x.Value.ShareCounter == 0)
.OrderBy(x => x.Value.Timestamp)
.Select(x => x.Key)
.Take(segmentSize)
.ToArray();

// move pages from readable list to free list
foreach (var key in readables)
var take = Math.Min(segmentSize, emptyShareCounter);

if (take > 0)
{
var removed = _readable.TryRemove(key, out var page);
// get readable pages that can return to _free (slow way)
// sort by timestamp used (set as free oldest first)
var readables = _readable
.Where(x => x.Value.ShareCounter == 0)
.OrderBy(x => x.Value.Timestamp)
.Select(x => x.Key)
.Take(take)
.ToArray();

var reused = 0;

// move pages from readable list to free list
foreach (var key in readables)
{
var removed = _readable.TryRemove(key, out var page);

ENSURE(removed, "page should be in readable list before moving to free list");
ENSURE(removed, "page should be in readable list before moving to free list");

// if removed page was changed between make array and now, must add back to readable list
if (page.ShareCounter > 0)
{
// but wait: between last "remove" and now, another thread can added this page
if (!_readable.TryAdd(key, page))
// if removed page was changed between make array and now, must add back to readable list
if (page.ShareCounter > 0)
{
// this is a terrible situation, to avoid memory corruption I will throw expcetion for now
throw new LiteException(0, "MemoryCache: removed in-use memory page. This situation has no way to fix (yet). Throwing exception to avoid database corruption. No other thread can read/write from database now.");
// but wait: between last "remove" and now, another thread can added this page
if (!_readable.TryAdd(key, page))
{
// this is a terrible situation, to avoid memory corruption I will throw expcetion for now
throw new LiteException(0, "MemoryCache: removed in-use memory page. This situation has no way to fix (yet). Throwing exception to avoid database corruption. No other thread can read/write from database now.");
}
}
}
else
{
ENSURE(page.ShareCounter == 0, "page should not be in use by anyone");
else
{
ENSURE(page.ShareCounter == 0, "page should not be in use by anyone");

// clean controls
page.Position = long.MaxValue;
page.Origin = FileOrigin.None;
// clean controls
page.Position = long.MaxValue;
page.Origin = FileOrigin.None;

_free.Enqueue(page);
_free.Enqueue(page);
reused++;
}
}
}

LOG($"re-using cache pages (flushing {_free.Count} pages)", "CACHE");
}
else
{
// create big linear array in heap memory (LOH => 85Kb)
var buffer = new byte[PAGE_SIZE * segmentSize];
var uniqueID = this.ExtendPages + 1;

// split linear array into many array slices
for (var i = 0; i < segmentSize; i++)
{
_free.Enqueue(new PageBuffer(buffer, i * PAGE_SIZE, uniqueID++));
if (reused > 0)
{
LOG($"re-using cache pages (flushing {_free.Count} pages)", "CACHE");
return;
}
}
}

_extends++;
// create big linear array in heap memory (LOH => 85Kb)
var buffer = new byte[PAGE_SIZE * segmentSize];
var uniqueID = this.ExtendPages + 1;

LOG($"extending memory usage: (segments: {_extends})", "CACHE");
// split linear array into many array slices
for (var i = 0; i < segmentSize; i++)
{
_free.Enqueue(new PageBuffer(buffer, i * PAGE_SIZE, uniqueID++));
}

_extends++;

LOG($"extending memory usage: (segments: {_extends})", "CACHE");
}

/// <summary>
Expand Down
Loading