Skip to content

Commit

Permalink
Support prefetch last 512KB with direct I/O in block based file reader
Browse files Browse the repository at this point in the history
Summary:
Right now, if direct I/O is enabled, prefetching the last 512KB cannot be applied, except compaction inputs or readahead is enabled for iterators. This can create a lot of I/O for HDD cases. To solve the problem, the 512KB is prefetched in block based table if direct I/O is enabled. The prefetched buffer is passed in totegher with random access file reader, so that we try to read from the buffer before reading from the file. This can be extended in the future to support flexible user iterator readahead too.
Closes facebook#2708

Differential Revision: D5593091

Pulled By: siying

fbshipit-source-id: ee36ff6d8af11c312a2622272b21957a7b5c81e7
  • Loading branch information
siying authored and facebook-github-bot committed Aug 11, 2017
1 parent ad77ee0 commit 666a005
Show file tree
Hide file tree
Showing 14 changed files with 333 additions and 201 deletions.
11 changes: 7 additions & 4 deletions db/db_test2.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2282,12 +2282,15 @@ TEST_F(DBTest2, RateLimitedCompactionReads) {
// chose 1MB as the upper bound on the total bytes read.
size_t rate_limited_bytes =
options.rate_limiter->GetTotalBytesThrough(Env::IO_LOW);
ASSERT_GE(
rate_limited_bytes,
static_cast<size_t>(kNumKeysPerFile * kBytesPerKey * kNumL0Files));
// Include the explict prefetch of the footer in direct I/O case.
size_t direct_io_extra = use_direct_io ? 512 * 1024 : 0;
ASSERT_GE(rate_limited_bytes,
static_cast<size_t>(kNumKeysPerFile * kBytesPerKey * kNumL0Files +
direct_io_extra));
ASSERT_LT(
rate_limited_bytes,
static_cast<size_t>(2 * kNumKeysPerFile * kBytesPerKey * kNumL0Files));
static_cast<size_t>(2 * kNumKeysPerFile * kBytesPerKey * kNumL0Files +
direct_io_extra));

Iterator* iter = db_->NewIterator(ReadOptions());
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
Expand Down
3 changes: 2 additions & 1 deletion table/adaptive_table_factory.cc
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,8 @@ Status AdaptiveTableFactory::NewTableReader(
unique_ptr<TableReader>* table,
bool prefetch_index_and_filter_in_cache) const {
Footer footer;
auto s = ReadFooterFromFile(file.get(), file_size, &footer);
auto s = ReadFooterFromFile(file.get(), nullptr /* prefetch_buffer */,
file_size, &footer);
if (!s.ok()) {
return s;
}
Expand Down
186 changes: 113 additions & 73 deletions table/block_based_table_reader.cc

Large diffs are not rendered by default.

8 changes: 5 additions & 3 deletions table/block_based_table_reader.h
Original file line number Diff line number Diff line change
Expand Up @@ -300,7 +300,7 @@ class BlockBasedTable : public TableReader {
// need to access extra meta blocks for index construction. This parameter
// helps avoid re-reading meta index block if caller already created one.
Status CreateIndexReader(
IndexReader** index_reader,
FilePrefetchBuffer* prefetch_buffer, IndexReader** index_reader,
InternalIterator* preloaded_meta_index_iter = nullptr,
const int level = -1);

Expand All @@ -309,13 +309,15 @@ class BlockBasedTable : public TableReader {
const bool no_io) const;

// Read the meta block from sst.
static Status ReadMetaBlock(Rep* rep, std::unique_ptr<Block>* meta_block,
static Status ReadMetaBlock(Rep* rep, FilePrefetchBuffer* prefetch_buffer,
std::unique_ptr<Block>* meta_block,
std::unique_ptr<InternalIterator>* iter);

Status VerifyChecksumInBlocks(InternalIterator* index_iter);

// Create the filter from the filter block.
FilterBlockReader* ReadFilter(const BlockHandle& filter_handle,
FilterBlockReader* ReadFilter(FilePrefetchBuffer* prefetch_buffer,
const BlockHandle& filter_handle,
const bool is_a_filter_partition) const;

static void SetupCacheKeyPrefix(Rep* rep, uint64_t file_size);
Expand Down
189 changes: 108 additions & 81 deletions table/format.cc
Original file line number Diff line number Diff line change
Expand Up @@ -216,8 +216,10 @@ std::string Footer::ToString() const {
return result;
}

Status ReadFooterFromFile(RandomAccessFileReader* file, uint64_t file_size,
Footer* footer, uint64_t enforce_table_magic_number) {
Status ReadFooterFromFile(RandomAccessFileReader* file,
FilePrefetchBuffer* prefetch_buffer,
uint64_t file_size, Footer* footer,
uint64_t enforce_table_magic_number) {
if (file_size < Footer::kMinEncodedLength) {
return Status::Corruption(
"file is too short (" + ToString(file_size) + " bytes) to be an "
Expand All @@ -230,9 +232,14 @@ Status ReadFooterFromFile(RandomAccessFileReader* file, uint64_t file_size,
(file_size > Footer::kMaxEncodedLength)
? static_cast<size_t>(file_size - Footer::kMaxEncodedLength)
: 0;
Status s = file->Read(read_offset, Footer::kMaxEncodedLength, &footer_input,
footer_space);
if (!s.ok()) return s;
Status s;
if (prefetch_buffer == nullptr ||
!prefetch_buffer->TryReadFromCache(read_offset, Footer::kMaxEncodedLength,
&footer_input)) {
s = file->Read(read_offset, Footer::kMaxEncodedLength, &footer_input,
footer_space);
if (!s.ok()) return s;
}

// Check that we actually read the whole footer from the file. It may be
// that size isn't correct.
Expand All @@ -259,6 +266,43 @@ Status ReadFooterFromFile(RandomAccessFileReader* file, uint64_t file_size,

// Without anonymous namespace here, we fail the warning -Wmissing-prototypes
namespace {
Status CheckBlockChecksum(const ReadOptions& options, const Footer& footer,
const Slice& contents, size_t block_size,
RandomAccessFileReader* file,
const BlockHandle& handle) {
Status s;
// Check the crc of the type and the block contents
if (options.verify_checksums) {
const char* data = contents.data(); // Pointer to where Read put the data
PERF_TIMER_GUARD(block_checksum_time);
uint32_t value = DecodeFixed32(data + block_size + 1);
uint32_t actual = 0;
switch (footer.checksum()) {
case kCRC32c:
value = crc32c::Unmask(value);
actual = crc32c::Value(data, block_size + 1);
break;
case kxxHash:
actual = XXH32(data, static_cast<int>(block_size) + 1, 0);
break;
default:
s = Status::Corruption(
"unknown checksum type " + ToString(footer.checksum()) + " in " +
file->file_name() + " offset " + ToString(handle.offset()) +
" size " + ToString(block_size));
}
if (s.ok() && actual != value) {
s = Status::Corruption(
"block checksum mismatch: expected " + ToString(actual) + ", got " +
ToString(value) + " in " + file->file_name() + " offset " +
ToString(handle.offset()) + " size " + ToString(block_size));
}
if (!s.ok()) {
return s;
}
}
return s;
}

// Read a block and check its CRC
// contents is the result of reading.
Expand All @@ -281,53 +325,21 @@ Status ReadBlock(RandomAccessFileReader* file, const Footer& footer,
return s;
}
if (contents->size() != n + kBlockTrailerSize) {
return Status::Corruption(
"truncated block read from " + file->file_name() + " offset "
+ ToString(handle.offset()) + ", expected "
+ ToString(n + kBlockTrailerSize) + " bytes, got "
+ ToString(contents->size()));
}

// Check the crc of the type and the block contents
const char* data = contents->data(); // Pointer to where Read put the data
if (options.verify_checksums) {
PERF_TIMER_GUARD(block_checksum_time);
uint32_t value = DecodeFixed32(data + n + 1);
uint32_t actual = 0;
switch (footer.checksum()) {
case kCRC32c:
value = crc32c::Unmask(value);
actual = crc32c::Value(data, n + 1);
break;
case kxxHash:
actual = XXH32(data, static_cast<int>(n) + 1, 0);
break;
default:
s = Status::Corruption(
"unknown checksum type " + ToString(footer.checksum())
+ " in " + file->file_name() + " offset "
+ ToString(handle.offset()) + " size " + ToString(n));
}
if (s.ok() && actual != value) {
s = Status::Corruption(
"block checksum mismatch: expected " + ToString(actual)
+ ", got " + ToString(value) + " in " + file->file_name()
+ " offset " + ToString(handle.offset())
+ " size " + ToString(n));
}
if (!s.ok()) {
return s;
}
return Status::Corruption("truncated block read from " + file->file_name() +
" offset " + ToString(handle.offset()) +
", expected " + ToString(n + kBlockTrailerSize) +
" bytes, got " + ToString(contents->size()));
}
return s;
return CheckBlockChecksum(options, footer, *contents, n, file, handle);
}

} // namespace

Status ReadBlockContents(RandomAccessFileReader* file, const Footer& footer,
const ReadOptions& read_options,
Status ReadBlockContents(RandomAccessFileReader* file,
FilePrefetchBuffer* prefetch_buffer,
const Footer& footer, const ReadOptions& read_options,
const BlockHandle& handle, BlockContents* contents,
const ImmutableCFOptions &ioptions,
const ImmutableCFOptions& ioptions,
bool decompression_requested,
const Slice& compression_dict,
const PersistentCacheOptions& cache_options) {
Expand Down Expand Up @@ -357,49 +369,64 @@ Status ReadBlockContents(RandomAccessFileReader* file, const Footer& footer,
}
}

if (cache_options.persistent_cache &&
cache_options.persistent_cache->IsCompressed()) {
bool got_from_prefetch_buffer = false;
if (prefetch_buffer != nullptr &&
prefetch_buffer->TryReadFromCache(
handle.offset(),
static_cast<size_t>(handle.size()) + kBlockTrailerSize, &slice)) {
status =
CheckBlockChecksum(read_options, footer, slice,
static_cast<size_t>(handle.size()), file, handle);
if (!status.ok()) {
return status;
}
got_from_prefetch_buffer = true;
used_buf = const_cast<char*>(slice.data());
} else if (cache_options.persistent_cache &&
cache_options.persistent_cache->IsCompressed()) {
// lookup uncompressed cache mode p-cache
status = PersistentCacheHelper::LookupRawPage(
cache_options, handle, &heap_buf, n + kBlockTrailerSize);
} else {
status = Status::NotFound();
}

if (status.ok()) {
// cache hit
used_buf = heap_buf.get();
slice = Slice(heap_buf.get(), n);
} else {
if (ioptions.info_log && !status.IsNotFound()) {
assert(!status.ok());
ROCKS_LOG_INFO(ioptions.info_log,
"Error reading from persistent cache. %s",
status.ToString().c_str());
}
// cache miss read from device
if (decompression_requested &&
n + kBlockTrailerSize < DefaultStackBufferSize) {
// If we've got a small enough hunk of data, read it in to the
// trivially allocated stack buffer instead of needing a full malloc()
used_buf = &stack_buf[0];
} else {
heap_buf = std::unique_ptr<char[]>(new char[n + kBlockTrailerSize]);
if (!got_from_prefetch_buffer) {
if (status.ok()) {
// cache hit
used_buf = heap_buf.get();
}
slice = Slice(heap_buf.get(), n);
} else {
if (ioptions.info_log && !status.IsNotFound()) {
assert(!status.ok());
ROCKS_LOG_INFO(ioptions.info_log,
"Error reading from persistent cache. %s",
status.ToString().c_str());
}
// cache miss read from device
if (decompression_requested &&
n + kBlockTrailerSize < DefaultStackBufferSize) {
// If we've got a small enough hunk of data, read it in to the
// trivially allocated stack buffer instead of needing a full malloc()
used_buf = &stack_buf[0];
} else {
heap_buf = std::unique_ptr<char[]>(new char[n + kBlockTrailerSize]);
used_buf = heap_buf.get();
}

status = ReadBlock(file, footer, read_options, handle, &slice, used_buf);
if (status.ok() && read_options.fill_cache &&
cache_options.persistent_cache &&
cache_options.persistent_cache->IsCompressed()) {
// insert to raw cache
PersistentCacheHelper::InsertRawPage(cache_options, handle, used_buf,
n + kBlockTrailerSize);
status = ReadBlock(file, footer, read_options, handle, &slice, used_buf);
if (status.ok() && read_options.fill_cache &&
cache_options.persistent_cache &&
cache_options.persistent_cache->IsCompressed()) {
// insert to raw cache
PersistentCacheHelper::InsertRawPage(cache_options, handle, used_buf,
n + kBlockTrailerSize);
}
}
}

if (!status.ok()) {
return status;
if (!status.ok()) {
return status;
}
}

PERF_TIMER_GUARD(block_decompress_time);
Expand All @@ -416,14 +443,14 @@ Status ReadBlockContents(RandomAccessFileReader* file, const Footer& footer,
*contents = BlockContents(Slice(slice.data(), n), false, compression_type);
} else {
// page is uncompressed, the buffer either stack or heap provided
if (used_buf == &stack_buf[0]) {
if (got_from_prefetch_buffer || used_buf == &stack_buf[0]) {
heap_buf = std::unique_ptr<char[]>(new char[n]);
memcpy(heap_buf.get(), stack_buf, n);
memcpy(heap_buf.get(), used_buf, n);
}
*contents = BlockContents(std::move(heap_buf), n, true, compression_type);
}

if (status.ok() && read_options.fill_cache &&
if (status.ok() && !got_from_prefetch_buffer && read_options.fill_cache &&
cache_options.persistent_cache &&
!cache_options.persistent_cache->IsCompressed()) {
// insert to uncompressed cache
Expand Down
12 changes: 7 additions & 5 deletions table/format.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include "options/cf_options.h"
#include "port/port.h" // noexcept
#include "table/persistent_cache_options.h"
#include "util/file_reader_writer.h"

namespace rocksdb {

Expand Down Expand Up @@ -173,8 +174,9 @@ class Footer {
// Read the footer from file
// If enforce_table_magic_number != 0, ReadFooterFromFile() will return
// corruption if table_magic number is not equal to enforce_table_magic_number
Status ReadFooterFromFile(RandomAccessFileReader* file, uint64_t file_size,
Footer* footer,
Status ReadFooterFromFile(RandomAccessFileReader* file,
FilePrefetchBuffer* prefetch_buffer,
uint64_t file_size, Footer* footer,
uint64_t enforce_table_magic_number = 0);

// 1-byte type + 32-bit crc
Expand Down Expand Up @@ -213,9 +215,9 @@ struct BlockContents {
// Read the block identified by "handle" from "file". On failure
// return non-OK. On success fill *result and return OK.
extern Status ReadBlockContents(
RandomAccessFileReader* file, const Footer& footer,
const ReadOptions& options, const BlockHandle& handle,
BlockContents* contents, const ImmutableCFOptions &ioptions,
RandomAccessFileReader* file, FilePrefetchBuffer* prefetch_buffer,
const Footer& footer, const ReadOptions& options, const BlockHandle& handle,
BlockContents* contents, const ImmutableCFOptions& ioptions,
bool do_uncompress = true, const Slice& compression_dict = Slice(),
const PersistentCacheOptions& cache_options = PersistentCacheOptions());

Expand Down
Loading

0 comments on commit 666a005

Please sign in to comment.