Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add missing options #209

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
109 changes: 103 additions & 6 deletions options.go
Original file line number Diff line number Diff line change
Expand Up @@ -307,6 +307,18 @@ func (opts *Options) SetAllowConcurrentMemtableWrites(allow bool) {
C.rocksdb_options_set_allow_concurrent_memtable_write(opts.c, boolToChar(allow))
}

// SetEnableWriteThreadAdaptiveYield sets enable_write_thread_adaptive_yield.
//
// If true, threads synchronizing with the write batch group leader will
// wait for up to write_thread_max_yield_usec before blocking on a mutex.
// This can substantially improve throughput for concurrent workloads,
// regardless of whether allow_concurrent_memtable_write is enabled.
//
// Default: true
func (opts *Options) SetEnableWriteThreadAdaptiveYield(allow bool) {
C.rocksdb_options_set_enable_write_thread_adaptive_yield(opts.c, boolToChar(allow))
}

// OptimizeLevelStyleCompaction optimize the DB for leveld compaction.
//
// Default values for some parameters in ColumnFamilyOptions are not
Expand Down Expand Up @@ -428,12 +440,6 @@ func (opts *Options) SetMinLevelToCompress(value int) {
C.rocksdb_options_set_min_level_to_compress(opts.c, C.int(value))
}

// SetCompressionOptions sets different options for compression algorithms.
// Default: nil
func (opts *Options) SetCompressionOptions(value *CompressionOptions) {
C.rocksdb_options_set_compression_options(opts.c, C.int(value.WindowBits), C.int(value.Level), C.int(value.Strategy), C.int(value.MaxDictBytes))
}

// SetPrefixExtractor sets the prefic extractor.
//
// If set, use the specified function to determine the
Expand Down Expand Up @@ -739,6 +745,19 @@ func (opts *Options) SetKeepLogFileNum(value int) {
C.rocksdb_options_set_keep_log_file_num(opts.c, C.size_t(value))
}

// SetRecycleLogFileNum sets the recycle log files.
//
// If non-zero, we will reuse previously written log files for new
// logs, overwriting the old data. The value indicates how many
// such files we will keep around at any point in time for later
// use. This is more efficient because the blocks are already
// allocated and fdatasync does not need to update the inode after
// each write.
// Default: 0
func (opts *Options) SetRecycleLogFileNum(value int) {
C.rocksdb_options_set_recycle_log_file_num(opts.c, C.size_t(value))
}

// SetSoftRateLimit sets the soft rate limit.
//
// Puts are delayed 0-1 ms when any level has a compaction score that exceeds
Expand Down Expand Up @@ -853,6 +872,25 @@ func (opts *Options) SetEnablePipelinedWrite(value bool) {
C.rocksdb_options_set_enable_pipelined_write(opts.c, boolToChar(value))
}

// SetMaxSubcompactions sets the maximum number of threads that will
// concurrently perform a compaction job by breaking it into multiple,
// smaller ones that are run simultaneously.
// Default: 1 (i.e. no subcompactions)
//
// Dynamically changeable through SetDBOptions() API.
func (opts *Options) SetMaxSubcompactions(value uint) {
C.rocksdb_options_set_max_subcompactions(opts.c, C.uint32_t(value))
}

// SetMaxBackgroundJobs sets the maximum number of concurrent background jobs (compactions and flushes).
//
// Default: 2
//
// Dynamically changeable through SetDBOptions() API.
func (opts *Options) SetMaxBackgroundJobs(value int) {
C.rocksdb_options_set_max_background_jobs(opts.c, C.int(value))
}

// SetManifestPreallocationSize sets the number of bytes
// to preallocate (via fallocate) the manifest files.
//
Expand Down Expand Up @@ -969,6 +1007,31 @@ func (opts *Options) SetBytesPerSync(value uint64) {
C.rocksdb_options_set_bytes_per_sync(opts.c, C.uint64_t(value))
}

// SetWalBytesPerSync sets the bytes per sync for WAL files.
//
// Same as bytes_per_sync, but applies to WAL files
//
// Default: 0, turned off
//
// Dynamically changeable through SetDBOptions() API.
func (opts *Options) SetWalBytesPerSync(value uint64) {
C.rocksdb_options_set_wal_bytes_per_sync(opts.c, C.uint64_t(value))
}

// SetWritableFileMaxBufferSize sets the maximum buffer size that is used by WritableFileWriter.
//
// On Windows, we need to maintain an aligned buffer for writes.
// We allow the buffer to grow until it's size hits the limit in buffered
// IO and fix the buffer size when using direct IO to ensure alignment of
// write requests if the logical sector size is unusual
//
// Default: 1024 * 1024 (1 MB)
//
// Dynamically changeable through SetDBOptions() API.
func (opts *Options) SetWritableFileMaxBufferSize(value uint64) {
C.rocksdb_options_set_writable_file_max_buffer_size(opts.c, C.uint64_t(value))
}

// SetCompactionStyle sets the compaction style.
// Default: LevelCompactionStyle
func (opts *Options) SetCompactionStyle(value CompactionStyle) {
Expand Down Expand Up @@ -1031,6 +1094,15 @@ func (opts *Options) SetInplaceUpdateNumLocks(value int) {
C.rocksdb_options_set_inplace_update_num_locks(opts.c, C.size_t(value))
}

// SetReportBgIoStats enable/disable to measure IO stats in compactions and flushes.
//
// Default: false
//
// Dynamically changeable through SetOptions() API
func (opts *Options) SetReportBgIoStats(value bool) {
C.rocksdb_options_set_inplace_update_support(opts.c, boolToChar(value))
}

// SetMemtableHugePageSize sets the page size for huge page for
// arena used by the memtable.
// If <=0, it won't allocate from huge page but from malloc.
Expand Down Expand Up @@ -1096,6 +1168,20 @@ func (opts *Options) SetMemtableVectorRep() {
C.rocksdb_options_set_memtable_vector_rep(opts.c)
}

// SetMemtablePrefixBloomSizeRatio sets memtable_prefix_bloom_size_ratio.
//
// if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0,
// create prefix bloom for memtable with the size of
// write_buffer_size * memtable_prefix_bloom_size_ratio.
// If it is larger than 0.25, it is sanitized to 0.25.
//
// Default: 0 (disable)
//
// Dynamically changeable through SetOptions() API
func (opts *Options) SetMemtablePrefixBloomSizeRatio(value float64) {
C.rocksdb_options_set_memtable_prefix_bloom_size_ratio(opts.c, C.double(value))
}

// SetHashSkipListRep sets a hash skip list as MemTableRep.
//
// It contains a fixed array of buckets, each
Expand Down Expand Up @@ -1196,6 +1282,17 @@ func (opts *Options) SetOptimizeFiltersForHits(value bool) {
C.rocksdb_options_set_optimize_filters_for_hits(opts.c, C.int(btoi(value)))
}

// SetSkipStatsUpdateOnDbOpen sets skip_stats_update_on_db_open
// If true, then DB::Open() will not update the statistics used to optimize
// compaction decision by loading table properties from many files.
// Turning off this feature will improve DBOpen time especially in
// disk environment.
//
// Default: false
func (opts *Options) SetSkipStatsUpdateOnDbOpen(value bool) {
C.rocksdb_options_set_skip_stats_update_on_db_open(opts.c, boolToChar(value))
}

// Destroy deallocates the Options object.
func (opts *Options) Destroy() {
C.rocksdb_options_destroy(opts.c)
Expand Down
25 changes: 25 additions & 0 deletions options_block_based_table_v6.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
// +build rocksdb_v6

package gorocksdb

import "C"

// DataBlockIndexType specifies the index type that will be used for the data block.
type DataBlockIndexType byte

const (
KDataBlockBinarySearch = 0 // traditional block type
KDataBlockBinaryAndHash = 1 // additional hash index
)

// SetDataBlockIndexType sets the index type that will be used for the data block.
func (opts *BlockBasedTableOptions) SetDataBlockIndexType(value DataBlockIndexType) {
C.rocksdb_block_based_options_set_data_block_index_type(opts.c, C.int(value))
}

// SetDataBlockIndexType sets the hash radio that will be used for the data block.
// #entries/#buckets. It is valid only when data_block_hash_index_type is
// kDataBlockBinaryAndHash.
func (opts *BlockBasedTableOptions) SetDataBlockHashRadio(value float64) {
C.rocksdb_block_based_options_set_data_block_hash_ratio(opts.c, C.double(value))
}
18 changes: 10 additions & 8 deletions options_compression.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,11 @@ package gorocksdb

// CompressionOptions represents options for different compression algorithms like Zlib.
type CompressionOptions struct {
WindowBits int
Level int
Strategy int
MaxDictBytes int
WindowBits int
Level int
Strategy int
MaxDictBytes int
ZstdMaxTrainBytes int
}

// NewDefaultCompressionOptions creates a default CompressionOptions object.
Expand All @@ -16,9 +17,10 @@ func NewDefaultCompressionOptions() *CompressionOptions {
// NewCompressionOptions creates a CompressionOptions object.
func NewCompressionOptions(windowBits, level, strategy, maxDictBytes int) *CompressionOptions {
return &CompressionOptions{
WindowBits: windowBits,
Level: level,
Strategy: strategy,
MaxDictBytes: maxDictBytes,
WindowBits: windowBits,
Level: level,
Strategy: strategy,
MaxDictBytes: maxDictBytes,
ZstdMaxTrainBytes: 0,
}
}
43 changes: 42 additions & 1 deletion options_read.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,9 @@ package gorocksdb

// #include "rocksdb/c.h"
import "C"
import "unsafe"
import (
"unsafe"
)

// ReadTier controls fetching of data during a read request.
// An application can issue a read request (via Get/Iterators) and specify
Expand Down Expand Up @@ -129,6 +131,45 @@ func (opts *ReadOptions) SetReadaheadSize(value uint64) {
C.rocksdb_readoptions_set_readahead_size(opts.c, C.size_t(value))
}

// SetTotalOrderSeek specifies the value of "total_order_seek".
// Enable a total order seek regardless of index format (e.g. hash index)
// used in the table. Some table format (e.g. plain table) may not support
// this option.
// If true when calling Get(), we also skip prefix bloom when reading from
// block based table. It provides a way to read existing data after
// changing implementation of prefix extractor.
// Default: false
func (opts *ReadOptions) SetTotalOrderSeek(value bool) {
C.rocksdb_readoptions_set_total_order_seek(opts.c, boolToChar(value))
}

// SetMaxSkippableInternalKeys specifies the value of "max_skippable_internal_keys".
// A threshold for the number of keys that can be skipped before failing an
// iterator seek as incomplete. The default value of 0 should be used to
// never fail a request as incomplete, even on skipping too many keys.
// Default: 0
func (opts *ReadOptions) SetMaxSkippableInternalKeys(value uint64) {
C.rocksdb_readoptions_set_max_skippable_internal_keys(opts.c, C.uint64_t(value))
}

// SetBackgroundPurgeOnIteratorCleanup specifies the value of "background_purge_on_iterator_cleanup".
// If true, when PurgeObsoleteFile is called in CleanupIteratorState, we
// schedule a background job in the flush job queue and delete obsolete files
// in background.
// Default: false
func (opts *ReadOptions) SetBackgroundPurgeOnIteratorCleanup(value bool) {
C.rocksdb_readoptions_set_background_purge_on_iterator_cleanup(opts.c, boolToChar(value))
}

// SetIgnoreRangeDeletions specifies the value of "ignore_range_deletions".
// If true, keys deleted using the DeleteRange() API will be visible to
// readers until they are naturally deleted during compaction. This improves
// read performance in DBs with many range deletions.
// Default: false
func (opts *ReadOptions) SetIgnoreRangeDeletions(value bool) {
C.rocksdb_readoptions_set_ignore_range_deletions(opts.c, boolToChar(value))
}

// Destroy deallocates the ReadOptions object.
func (opts *ReadOptions) Destroy() {
C.rocksdb_readoptions_destroy(opts.c)
Expand Down
Loading