Skip to content

Commit

Permalink
Fix hardlink may cause dead loop, also disable config gc_hardlink_rate (
Browse files Browse the repository at this point in the history
  • Loading branch information
JaySon-Huang authored Mar 28, 2022
1 parent 225dabe commit 759e416
Show file tree
Hide file tree
Showing 4 changed files with 12 additions and 10 deletions.
3 changes: 0 additions & 3 deletions dbms/src/Interpreters/Settings.h
Original file line number Diff line number Diff line change
Expand Up @@ -299,21 +299,18 @@ struct Settings
M(SettingUInt64, dt_storage_pool_log_gc_min_legacy_num, 3, "Min number of legacy page files to compact") \
M(SettingUInt64, dt_storage_pool_log_gc_min_bytes, 128 * Constant::MB, "Min bytes of page data to compact") \
M(SettingFloat, dt_storage_pool_log_gc_max_valid_rate, 0.35, "Max valid rate of deciding a page file can be compact") \
M(SettingDouble, dt_storage_pool_log_gc_force_hardlink_rate, 2, "Max valid rate of deciding a page file can do hardlink") \
\
M(SettingUInt64, dt_storage_pool_data_write_slots, 1, "Max write concurrency for each StoragePool.data.") \
M(SettingUInt64, dt_storage_pool_data_gc_min_file_num, 10, "Min number of page files to compact") \
M(SettingUInt64, dt_storage_pool_data_gc_min_legacy_num, 3, "Min number of legacy page files to compact") \
M(SettingUInt64, dt_storage_pool_data_gc_min_bytes, 128 * Constant::MB, "Min bytes of page data to compact") \
M(SettingFloat, dt_storage_pool_data_gc_max_valid_rate, 0.35, "Max valid rate of deciding a page file can be compact") \
M(SettingDouble, dt_storage_pool_data_gc_force_hardlink_rate, 2, "Max valid rate of deciding a page file can do hardlink") \
\
M(SettingUInt64, dt_storage_pool_meta_write_slots, 2, "Max write concurrency for each StoragePool.meta.") \
M(SettingUInt64, dt_storage_pool_meta_gc_min_file_num, 10, "Min number of page files to compact") \
M(SettingUInt64, dt_storage_pool_meta_gc_min_legacy_num, 3, "Min number of legacy page files to compact") \
M(SettingUInt64, dt_storage_pool_meta_gc_min_bytes, 128 * Constant::MB, "Min bytes of page data to compact") \
M(SettingFloat, dt_storage_pool_meta_gc_max_valid_rate, 0.35, "Max valid rate of deciding a page file can be compact") \
M(SettingDouble, dt_storage_pool_meta_gc_force_hardlink_rate, 2, "Max valid rate of deciding a page file can do hardlink") \
\
M(SettingUInt64, dt_checksum_frame_size, DBMS_DEFAULT_BUFFER_SIZE, "Frame size for delta tree stable storage") \
M(SettingChecksumAlgorithm, dt_checksum_algorithm, ChecksumAlgo::XXH3, "Checksum algorithm for delta tree stable storage") \
Expand Down
3 changes: 1 addition & 2 deletions dbms/src/Storages/DeltaMerge/StoragePool.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,7 @@ PageStorage::Config extractConfig(const Settings & settings, StorageType subtype
config.gc_min_files = settings.dt_storage_pool_##NAME##_gc_min_file_num; \
config.gc_min_bytes = settings.dt_storage_pool_##NAME##_gc_min_bytes; \
config.gc_min_legacy_num = settings.dt_storage_pool_##NAME##_gc_min_legacy_num; \
config.gc_max_valid_rate = settings.dt_storage_pool_##NAME##_gc_max_valid_rate; \
config.gc_force_hardlink_rate = settings.dt_storage_pool_##NAME##_gc_force_hardlink_rate;
config.gc_max_valid_rate = settings.dt_storage_pool_##NAME##_gc_max_valid_rate;

PageStorage::Config config = getConfigFromSettings(settings);

Expand Down
14 changes: 10 additions & 4 deletions dbms/src/Storages/Page/V2/PageFile.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -302,7 +302,7 @@ bool PageFile::LinkingMetaAdapter::hasNext() const
return meta_file_offset < meta_size;
}

void PageFile::LinkingMetaAdapter::linkToNewSequenceNext(WriteBatch::SequenceID sid, PageEntriesEdit & edit, UInt64 file_id, UInt64 level)
bool PageFile::LinkingMetaAdapter::linkToNewSequenceNext(WriteBatch::SequenceID sid, PageEntriesEdit & edit, UInt64 file_id, UInt64 level)
{
char * meta_data_end = meta_buffer + meta_size;
char * pos = meta_buffer + meta_file_offset;
Expand All @@ -311,7 +311,7 @@ void PageFile::LinkingMetaAdapter::linkToNewSequenceNext(WriteBatch::SequenceID
LOG_WARNING(page_file.log,
"[batch_start_pos=" << meta_file_offset << "] [meta_size=" << meta_size << "] [file=" << page_file.metaPath()
<< "] ignored.");
return;
return false;
}

const char * wb_start_pos = pos;
Expand All @@ -321,7 +321,7 @@ void PageFile::LinkingMetaAdapter::linkToNewSequenceNext(WriteBatch::SequenceID
LOG_WARNING(page_file.log,
"[expect_batch_bytes=" << wb_bytes << "] [meta_size=" << meta_size << "] [file=" << page_file.metaPath()
<< "] ignored.");
return;
return false;
}

WriteBatch::SequenceID wb_sequence = 0;
Expand Down Expand Up @@ -450,6 +450,8 @@ void PageFile::LinkingMetaAdapter::linkToNewSequenceNext(WriteBatch::SequenceID
ErrorCodes::LOGICAL_ERROR);

meta_file_offset = pos - meta_buffer;

return true;
}

// =========================================================
Expand Down Expand Up @@ -735,7 +737,11 @@ void PageFile::Writer::hardlinkFrom(PageFile & linked_file, WriteBatch::Sequence
// Move to the SequenceID item
while (reader->hasNext())
{
reader->linkToNewSequenceNext(sid, edit, page_file.getFileId(), page_file.getLevel());
if (!reader->linkToNewSequenceNext(sid, edit, page_file.getFileId(), page_file.getLevel()))
{
throw Exception(fmt::format("Failed to update [sid={}] into [file_id={}] , [file_level={}]", sid, page_file.getFileId(), page_file.getLevel()),
ErrorCodes::LOGICAL_ERROR);
}
}

char * linked_meta_data;
Expand Down
2 changes: 1 addition & 1 deletion dbms/src/Storages/Page/V2/PageFile.h
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ class PageFile : public Allocator<false>

bool hasNext() const;

void linkToNewSequenceNext(WriteBatch::SequenceID sid, PageEntriesEdit & edit, UInt64 file_id, UInt64 level);
bool linkToNewSequenceNext(WriteBatch::SequenceID sid, PageEntriesEdit & edit, UInt64 file_id, UInt64 level);

std::pair<char *, size_t> getMetaInfo() { return {meta_buffer, meta_size}; };

Expand Down

0 comments on commit 759e416

Please sign in to comment.