Skip to content
Snippets Groups Projects
Unverified Commit ad430e67 authored by Ashwin Ramesh's avatar Ashwin Ramesh
Browse files

Add gorocksdb to govendor

parent da8738fb
No related branches found
No related tags found
No related merge requests found
Showing
with 2782 additions and 0 deletions
Copyright (C) 2016 Thomas Adam
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is furnished
to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
# gorocksdb, a Go wrapper for RocksDB
[![Build Status](https://travis-ci.org/tecbot/gorocksdb.png)](https://travis-ci.org/tecbot/gorocksdb) [![GoDoc](https://godoc.org/github.com/tecbot/gorocksdb?status.png)](http://godoc.org/github.com/tecbot/gorocksdb)
## Install
There exist two options to install gorocksdb.
You can use either a own shared library or you use the embedded RocksDB version from [CockroachDB](https://github.com/cockroachdb/c-rocksdb).
To install the embedded version (it might take a while):
go get -tags=embed github.com/tecbot/gorocksdb
If you want to go the way with the shared library you'll need to build
[RocksDB](https://github.com/facebook/rocksdb) before on your machine.
If you built RocksDB you can install gorocksdb now:
CGO_CFLAGS="-I/path/to/rocksdb/include" \
CGO_LDFLAGS="-L/path/to/rocksdb -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy" \
go get github.com/tecbot/gorocksdb
\ No newline at end of file
package gorocksdb
// #include <stdlib.h>
// #include "rocksdb/c.h"
import "C"
import (
"errors"
"unsafe"
)
// BackupEngineInfo represents the information about the backups
// in a backup engine instance. Use this to get the state of the
// backup like number of backups and their ids and timestamps etc.
type BackupEngineInfo struct {
c *C.rocksdb_backup_engine_info_t
}
// GetCount gets the number backsup available.
func (b *BackupEngineInfo) GetCount() int {
return int(C.rocksdb_backup_engine_info_count(b.c))
}
// GetTimestamp gets the timestamp at which the backup index was taken.
func (b *BackupEngineInfo) GetTimestamp(index int) int64 {
return int64(C.rocksdb_backup_engine_info_timestamp(b.c, C.int(index)))
}
// GetBackupId gets an id that uniquely identifies a backup
// regardless of its position.
func (b *BackupEngineInfo) GetBackupId(index int) int64 {
return int64(C.rocksdb_backup_engine_info_backup_id(b.c, C.int(index)))
}
// GetSize get the size of the backup in bytes.
func (b *BackupEngineInfo) GetSize(index int) int64 {
return int64(C.rocksdb_backup_engine_info_size(b.c, C.int(index)))
}
// GetNumFiles gets the number of files in the backup index.
func (b *BackupEngineInfo) GetNumFiles(index int) int32 {
return int32(C.rocksdb_backup_engine_info_number_files(b.c, C.int(index)))
}
// Destroy destroys the backup engine info instance.
func (b *BackupEngineInfo) Destroy() {
C.rocksdb_backup_engine_info_destroy(b.c)
b.c = nil
}
// RestoreOptions captures the options to be used during
// restoration of a backup.
type RestoreOptions struct {
c *C.rocksdb_restore_options_t
}
// NewRestoreOptions creates a RestoreOptions instance.
func NewRestoreOptions() *RestoreOptions {
return &RestoreOptions{
c: C.rocksdb_restore_options_create(),
}
}
// SetKeepLogFiles is used to set or unset the keep_log_files option
// If true, restore won't overwrite the existing log files in wal_dir. It will
// also move all log files from archive directory to wal_dir.
// By default, this is false.
func (ro *RestoreOptions) SetKeepLogFiles(v int) {
C.rocksdb_restore_options_set_keep_log_files(ro.c, C.int(v))
}
// Destroy destroys this RestoreOptions instance.
func (ro *RestoreOptions) Destroy() {
C.rocksdb_restore_options_destroy(ro.c)
}
// BackupEngine is a reusable handle to a RocksDB Backup, created by
// OpenBackupEngine.
type BackupEngine struct {
c *C.rocksdb_backup_engine_t
path string
opts *Options
}
// OpenBackupEngine opens a backup engine with specified options.
func OpenBackupEngine(opts *Options, path string) (*BackupEngine, error) {
var cErr *C.char
cpath := C.CString(path)
defer C.free(unsafe.Pointer(cpath))
be := C.rocksdb_backup_engine_open(opts.c, cpath, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
return &BackupEngine{
c: be,
path: path,
opts: opts,
}, nil
}
// UnsafeGetBackupEngine returns the underlying c backup engine.
func (b *BackupEngine) UnsafeGetBackupEngine() unsafe.Pointer {
return unsafe.Pointer(b.c)
}
// CreateNewBackup takes a new backup from db.
func (b *BackupEngine) CreateNewBackup(db *DB) error {
var cErr *C.char
C.rocksdb_backup_engine_create_new_backup(b.c, db.c, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// GetInfo gets an object that gives information about
// the backups that have already been taken
func (b *BackupEngine) GetInfo() *BackupEngineInfo {
return &BackupEngineInfo{
c: C.rocksdb_backup_engine_get_backup_info(b.c),
}
}
// RestoreDBFromLatestBackup restores the latest backup to dbDir. walDir
// is where the write ahead logs are restored to and usually the same as dbDir.
func (b *BackupEngine) RestoreDBFromLatestBackup(dbDir, walDir string, ro *RestoreOptions) error {
var cErr *C.char
cDbDir := C.CString(dbDir)
cWalDir := C.CString(walDir)
defer func() {
C.free(unsafe.Pointer(cDbDir))
C.free(unsafe.Pointer(cWalDir))
}()
C.rocksdb_backup_engine_restore_db_from_latest_backup(b.c, cDbDir, cWalDir, ro.c, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// Close close the backup engine and cleans up state
// The backups already taken remain on storage.
func (b *BackupEngine) Close() {
C.rocksdb_backup_engine_close(b.c)
b.c = nil
}
package gorocksdb
// #include "rocksdb/c.h"
import "C"
// Cache is a cache used to store data read from data in memory.
type Cache struct {
c *C.rocksdb_cache_t
}
// NewLRUCache creates a new LRU Cache object with the capacity given.
func NewLRUCache(capacity int) *Cache {
return NewNativeCache(C.rocksdb_cache_create_lru(C.size_t(capacity)))
}
// NewNativeCache creates a Cache object.
func NewNativeCache(c *C.rocksdb_cache_t) *Cache {
return &Cache{c}
}
// Destroy deallocates the Cache object.
func (c *Cache) Destroy() {
C.rocksdb_cache_destroy(c.c)
c.c = nil
}
package gorocksdb
// #include <stdlib.h>
// #include "rocksdb/c.h"
import "C"
import "unsafe"
// ColumnFamilyHandle represents a handle to a ColumnFamily.
type ColumnFamilyHandle struct {
c *C.rocksdb_column_family_handle_t
}
// NewNativeColumnFamilyHandle creates a ColumnFamilyHandle object.
func NewNativeColumnFamilyHandle(c *C.rocksdb_column_family_handle_t) *ColumnFamilyHandle {
return &ColumnFamilyHandle{c}
}
// UnsafeGetCFHandler returns the underlying c column family handle.
func (h *ColumnFamilyHandle) UnsafeGetCFHandler() unsafe.Pointer {
return unsafe.Pointer(h.c)
}
// Destroy calls the destructor of the underlying column family handle.
func (h *ColumnFamilyHandle) Destroy() {
C.rocksdb_column_family_handle_destroy(h.c)
}
package gorocksdb
// #include "rocksdb/c.h"
import "C"
// A CompactionFilter can be used to filter keys during compaction time.
type CompactionFilter interface {
// If the Filter function returns false, it indicates
// that the kv should be preserved, while a return value of true
// indicates that this key-value should be removed from the
// output of the compaction. The application can inspect
// the existing value of the key and make decision based on it.
//
// When the value is to be preserved, the application has the option
// to modify the existing value and pass it back through a new value.
// To retain the previous value, simply return nil
//
// If multithreaded compaction is being used *and* a single CompactionFilter
// instance was supplied via SetCompactionFilter, this the Filter function may be
// called from different threads concurrently. The application must ensure
// that the call is thread-safe.
Filter(level int, key, val []byte) (remove bool, newVal []byte)
// The name of the compaction filter, for logging
Name() string
}
// NewNativeCompactionFilter creates a CompactionFilter object.
func NewNativeCompactionFilter(c *C.rocksdb_comparator_t) Comparator {
return nativeComparator{c}
}
type nativeCompactionFilter struct {
c *C.rocksdb_compactionfilter_t
}
func (c nativeCompactionFilter) Filter(level int, key, val []byte) (remove bool, newVal []byte) {
return false, nil
}
func (c nativeCompactionFilter) Name() string { return "" }
// Hold references to compaction filters.
var compactionFilters []CompactionFilter
func registerCompactionFilter(filter CompactionFilter) int {
compactionFilters = append(compactionFilters, filter)
return len(compactionFilters) - 1
}
//export gorocksdb_compactionfilter_filter
func gorocksdb_compactionfilter_filter(idx int, cLevel C.int, cKey *C.char, cKeyLen C.size_t, cVal *C.char, cValLen C.size_t, cNewVal **C.char, cNewValLen *C.size_t, cValChanged *C.uchar) C.int {
key := charToByte(cKey, cKeyLen)
val := charToByte(cVal, cValLen)
remove, newVal := compactionFilters[idx].Filter(int(cLevel), key, val)
if remove {
return C.int(1)
} else if newVal != nil {
*cNewVal = byteToChar(newVal)
*cNewValLen = C.size_t(len(newVal))
*cValChanged = C.uchar(1)
}
return C.int(0)
}
//export gorocksdb_compactionfilter_name
func gorocksdb_compactionfilter_name(idx int) *C.char {
return stringToChar(compactionFilters[idx].Name())
}
package gorocksdb
// #include "rocksdb/c.h"
import "C"
// A Comparator object provides a total order across slices that are
// used as keys in an sstable or a database.
type Comparator interface {
// Three-way comparison. Returns value:
// < 0 iff "a" < "b",
// == 0 iff "a" == "b",
// > 0 iff "a" > "b"
Compare(a, b []byte) int
// The name of the comparator.
Name() string
}
// NewNativeComparator creates a Comparator object.
func NewNativeComparator(c *C.rocksdb_comparator_t) Comparator {
return nativeComparator{c}
}
type nativeComparator struct {
c *C.rocksdb_comparator_t
}
func (c nativeComparator) Compare(a, b []byte) int { return 0 }
func (c nativeComparator) Name() string { return "" }
// Hold references to comperators.
var comperators []Comparator
func registerComperator(cmp Comparator) int {
comperators = append(comperators, cmp)
return len(comperators) - 1
}
//export gorocksdb_comparator_compare
func gorocksdb_comparator_compare(idx int, cKeyA *C.char, cKeyALen C.size_t, cKeyB *C.char, cKeyBLen C.size_t) C.int {
keyA := charToByte(cKeyA, cKeyALen)
keyB := charToByte(cKeyB, cKeyBLen)
return C.int(comperators[idx].Compare(keyA, keyB))
}
//export gorocksdb_comparator_name
func gorocksdb_comparator_name(idx int) *C.char {
return stringToChar(comperators[idx].Name())
}
package gorocksdb
// #include <stdlib.h>
// #include "rocksdb/c.h"
import "C"
import (
"errors"
"unsafe"
)
// Range is a range of keys in the database. GetApproximateSizes calls with it
// begin at the key Start and end right before the key Limit.
type Range struct {
Start []byte
Limit []byte
}
// DB is a reusable handle to a RocksDB database on disk, created by Open.
type DB struct {
c *C.rocksdb_t
name string
opts *Options
}
// OpenDb opens a database with the specified options.
func OpenDb(opts *Options, name string) (*DB, error) {
var (
cErr *C.char
cName = C.CString(name)
)
defer C.free(unsafe.Pointer(cName))
db := C.rocksdb_open(opts.c, cName, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
return &DB{
name: name,
c: db,
opts: opts,
}, nil
}
// OpenDbForReadOnly opens a database with the specified options for readonly usage.
func OpenDbForReadOnly(opts *Options, name string, errorIfLogFileExist bool) (*DB, error) {
var (
cErr *C.char
cName = C.CString(name)
)
defer C.free(unsafe.Pointer(cName))
db := C.rocksdb_open_for_read_only(opts.c, cName, boolToChar(errorIfLogFileExist), &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
return &DB{
name: name,
c: db,
opts: opts,
}, nil
}
// OpenDbColumnFamilies opens a database with the specified column families.
func OpenDbColumnFamilies(
opts *Options,
name string,
cfNames []string,
cfOpts []*Options,
) (*DB, []*ColumnFamilyHandle, error) {
numColumnFamilies := len(cfNames)
if numColumnFamilies != len(cfOpts) {
return nil, nil, errors.New("must provide the same number of column family names and options")
}
cName := C.CString(name)
defer C.free(unsafe.Pointer(cName))
cNames := make([]*C.char, numColumnFamilies)
for i, s := range cfNames {
cNames[i] = C.CString(s)
}
defer func() {
for _, s := range cNames {
C.free(unsafe.Pointer(s))
}
}()
cOpts := make([]*C.rocksdb_options_t, numColumnFamilies)
for i, o := range cfOpts {
cOpts[i] = o.c
}
cHandles := make([]*C.rocksdb_column_family_handle_t, numColumnFamilies)
var cErr *C.char
db := C.rocksdb_open_column_families(
opts.c,
cName,
C.int(numColumnFamilies),
&cNames[0],
&cOpts[0],
&cHandles[0],
&cErr,
)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, nil, errors.New(C.GoString(cErr))
}
cfHandles := make([]*ColumnFamilyHandle, numColumnFamilies)
for i, c := range cHandles {
cfHandles[i] = NewNativeColumnFamilyHandle(c)
}
return &DB{
name: name,
c: db,
opts: opts,
}, cfHandles, nil
}
// OpenDbForReadOnlyColumnFamilies opens a database with the specified column
// families in read only mode.
func OpenDbForReadOnlyColumnFamilies(
opts *Options,
name string,
cfNames []string,
cfOpts []*Options,
errorIfLogFileExist bool,
) (*DB, []*ColumnFamilyHandle, error) {
numColumnFamilies := len(cfNames)
if numColumnFamilies != len(cfOpts) {
return nil, nil, errors.New("must provide the same number of column family names and options")
}
cName := C.CString(name)
defer C.free(unsafe.Pointer(cName))
cNames := make([]*C.char, numColumnFamilies)
for i, s := range cfNames {
cNames[i] = C.CString(s)
}
defer func() {
for _, s := range cNames {
C.free(unsafe.Pointer(s))
}
}()
cOpts := make([]*C.rocksdb_options_t, numColumnFamilies)
for i, o := range cfOpts {
cOpts[i] = o.c
}
cHandles := make([]*C.rocksdb_column_family_handle_t, numColumnFamilies)
var cErr *C.char
db := C.rocksdb_open_for_read_only_column_families(
opts.c,
cName,
C.int(numColumnFamilies),
&cNames[0],
&cOpts[0],
&cHandles[0],
boolToChar(errorIfLogFileExist),
&cErr,
)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, nil, errors.New(C.GoString(cErr))
}
cfHandles := make([]*ColumnFamilyHandle, numColumnFamilies)
for i, c := range cHandles {
cfHandles[i] = NewNativeColumnFamilyHandle(c)
}
return &DB{
name: name,
c: db,
opts: opts,
}, cfHandles, nil
}
// ListColumnFamilies lists the names of the column families in the DB.
func ListColumnFamilies(opts *Options, name string) ([]string, error) {
var (
cErr *C.char
cLen C.size_t
cName = C.CString(name)
)
defer C.free(unsafe.Pointer(cName))
cNames := C.rocksdb_list_column_families(opts.c, cName, &cLen, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
namesLen := int(cLen)
names := make([]string, namesLen)
cNamesArr := (*[1 << 30]*C.char)(unsafe.Pointer(cNames))[:namesLen:namesLen]
for i, n := range cNamesArr {
names[i] = C.GoString(n)
}
C.rocksdb_list_column_families_destroy(cNames, cLen)
return names, nil
}
// UnsafeGetDB returns the underlying c rocksdb instance.
func (db *DB) UnsafeGetDB() unsafe.Pointer {
return unsafe.Pointer(db.c)
}
// Name returns the name of the database.
func (db *DB) Name() string {
return db.name
}
// Get returns the data associated with the key from the database.
func (db *DB) Get(opts *ReadOptions, key []byte) (*Slice, error) {
var (
cErr *C.char
cValLen C.size_t
cKey = byteToChar(key)
)
cValue := C.rocksdb_get(db.c, opts.c, cKey, C.size_t(len(key)), &cValLen, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
return NewSlice(cValue, cValLen), nil
}
// GetBytes is like Get but returns a copy of the data.
func (db *DB) GetBytes(opts *ReadOptions, key []byte) ([]byte, error) {
var (
cErr *C.char
cValLen C.size_t
cKey = byteToChar(key)
)
cValue := C.rocksdb_get(db.c, opts.c, cKey, C.size_t(len(key)), &cValLen, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
if cValue == nil {
return nil, nil
}
defer C.free(unsafe.Pointer(cValue))
return C.GoBytes(unsafe.Pointer(cValue), C.int(cValLen)), nil
}
// GetCF returns the data associated with the key from the database and column family.
func (db *DB) GetCF(opts *ReadOptions, cf *ColumnFamilyHandle, key []byte) (*Slice, error) {
var (
cErr *C.char
cValLen C.size_t
cKey = byteToChar(key)
)
cValue := C.rocksdb_get_cf(db.c, opts.c, cf.c, cKey, C.size_t(len(key)), &cValLen, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
return NewSlice(cValue, cValLen), nil
}
// Put writes data associated with a key to the database.
func (db *DB) Put(opts *WriteOptions, key, value []byte) error {
var (
cErr *C.char
cKey = byteToChar(key)
cValue = byteToChar(value)
)
C.rocksdb_put(db.c, opts.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// PutCF writes data associated with a key to the database and column family.
func (db *DB) PutCF(opts *WriteOptions, cf *ColumnFamilyHandle, key, value []byte) error {
var (
cErr *C.char
cKey = byteToChar(key)
cValue = byteToChar(value)
)
C.rocksdb_put_cf(db.c, opts.c, cf.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// Delete removes the data associated with the key from the database.
func (db *DB) Delete(opts *WriteOptions, key []byte) error {
var (
cErr *C.char
cKey = byteToChar(key)
)
C.rocksdb_delete(db.c, opts.c, cKey, C.size_t(len(key)), &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// DeleteCF removes the data associated with the key from the database and column family.
func (db *DB) DeleteCF(opts *WriteOptions, cf *ColumnFamilyHandle, key []byte) error {
var (
cErr *C.char
cKey = byteToChar(key)
)
C.rocksdb_delete_cf(db.c, opts.c, cf.c, cKey, C.size_t(len(key)), &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// Merge merges the data associated with the key with the actual data in the database.
func (db *DB) Merge(opts *WriteOptions, key []byte, value []byte) error {
var (
cErr *C.char
cKey = byteToChar(key)
cValue = byteToChar(value)
)
C.rocksdb_merge(db.c, opts.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// MergeCF merges the data associated with the key with the actual data in the
// database and column family.
func (db *DB) MergeCF(opts *WriteOptions, cf *ColumnFamilyHandle, key []byte, value []byte) error {
var (
cErr *C.char
cKey = byteToChar(key)
cValue = byteToChar(value)
)
C.rocksdb_merge_cf(db.c, opts.c, cf.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// Write writes a WriteBatch to the database
func (db *DB) Write(opts *WriteOptions, batch *WriteBatch) error {
var cErr *C.char
C.rocksdb_write(db.c, opts.c, batch.c, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// NewIterator returns an Iterator over the the database that uses the
// ReadOptions given.
func (db *DB) NewIterator(opts *ReadOptions) *Iterator {
cIter := C.rocksdb_create_iterator(db.c, opts.c)
return NewNativeIterator(unsafe.Pointer(cIter))
}
// NewIteratorCF returns an Iterator over the the database and column family
// that uses the ReadOptions given.
func (db *DB) NewIteratorCF(opts *ReadOptions, cf *ColumnFamilyHandle) *Iterator {
cIter := C.rocksdb_create_iterator_cf(db.c, opts.c, cf.c)
return NewNativeIterator(unsafe.Pointer(cIter))
}
// NewSnapshot creates a new snapshot of the database.
func (db *DB) NewSnapshot() *Snapshot {
cSnap := C.rocksdb_create_snapshot(db.c)
return NewNativeSnapshot(cSnap, db.c)
}
// GetProperty returns the value of a database property.
func (db *DB) GetProperty(propName string) string {
cprop := C.CString(propName)
defer C.free(unsafe.Pointer(cprop))
cValue := C.rocksdb_property_value(db.c, cprop)
defer C.free(unsafe.Pointer(cValue))
return C.GoString(cValue)
}
// GetPropertyCF returns the value of a database property.
func (db *DB) GetPropertyCF(propName string, cf *ColumnFamilyHandle) string {
cProp := C.CString(propName)
defer C.free(unsafe.Pointer(cProp))
cValue := C.rocksdb_property_value_cf(db.c, cf.c, cProp)
defer C.free(unsafe.Pointer(cValue))
return C.GoString(cValue)
}
// CreateColumnFamily create a new column family.
func (db *DB) CreateColumnFamily(opts *Options, name string) (*ColumnFamilyHandle, error) {
var (
cErr *C.char
cName = C.CString(name)
)
defer C.free(unsafe.Pointer(cName))
cHandle := C.rocksdb_create_column_family(db.c, opts.c, cName, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
return NewNativeColumnFamilyHandle(cHandle), nil
}
// DropColumnFamily drops a column family.
func (db *DB) DropColumnFamily(c *ColumnFamilyHandle) error {
var cErr *C.char
C.rocksdb_drop_column_family(db.c, c.c, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// GetApproximateSizes returns the approximate number of bytes of file system
// space used by one or more key ranges.
//
// The keys counted will begin at Range.Start and end on the key before
// Range.Limit.
func (db *DB) GetApproximateSizes(ranges []Range) []uint64 {
sizes := make([]uint64, len(ranges))
if len(ranges) == 0 {
return sizes
}
cStarts := make([]*C.char, len(ranges))
cLimits := make([]*C.char, len(ranges))
cStartLens := make([]C.size_t, len(ranges))
cLimitLens := make([]C.size_t, len(ranges))
for i, r := range ranges {
cStarts[i] = byteToChar(r.Start)
cStartLens[i] = C.size_t(len(r.Start))
cLimits[i] = byteToChar(r.Limit)
cLimitLens[i] = C.size_t(len(r.Limit))
}
C.rocksdb_approximate_sizes(
db.c,
C.int(len(ranges)),
&cStarts[0],
&cStartLens[0],
&cLimits[0],
&cLimitLens[0],
(*C.uint64_t)(&sizes[0]))
return sizes
}
// GetApproximateSizesCF returns the approximate number of bytes of file system
// space used by one or more key ranges in the column family.
//
// The keys counted will begin at Range.Start and end on the key before
// Range.Limit.
func (db *DB) GetApproximateSizesCF(cf *ColumnFamilyHandle, ranges []Range) []uint64 {
sizes := make([]uint64, len(ranges))
if len(ranges) == 0 {
return sizes
}
cStarts := make([]*C.char, len(ranges))
cLimits := make([]*C.char, len(ranges))
cStartLens := make([]C.size_t, len(ranges))
cLimitLens := make([]C.size_t, len(ranges))
for i, r := range ranges {
cStarts[i] = byteToChar(r.Start)
cStartLens[i] = C.size_t(len(r.Start))
cLimits[i] = byteToChar(r.Limit)
cLimitLens[i] = C.size_t(len(r.Limit))
}
C.rocksdb_approximate_sizes_cf(
db.c,
cf.c,
C.int(len(ranges)),
&cStarts[0],
&cStartLens[0],
&cLimits[0],
&cLimitLens[0],
(*C.uint64_t)(&sizes[0]))
return sizes
}
// LiveFileMetadata is a metadata which is associated with each SST file.
type LiveFileMetadata struct {
Name string
Level int
Size int64
SmallestKey []byte
LargestKey []byte
}
// GetLiveFilesMetaData returns a list of all table files with their
// level, start key and end key.
func (db *DB) GetLiveFilesMetaData() []LiveFileMetadata {
lf := C.rocksdb_livefiles(db.c)
defer C.rocksdb_livefiles_destroy(lf)
count := C.rocksdb_livefiles_count(lf)
liveFiles := make([]LiveFileMetadata, int(count))
for i := C.int(0); i < count; i++ {
var liveFile LiveFileMetadata
liveFile.Name = C.GoString(C.rocksdb_livefiles_name(lf, i))
liveFile.Level = int(C.rocksdb_livefiles_level(lf, i))
liveFile.Size = int64(C.rocksdb_livefiles_size(lf, i))
var cSize C.size_t
key := C.rocksdb_livefiles_smallestkey(lf, i, &cSize)
liveFile.SmallestKey = C.GoBytes(unsafe.Pointer(key), C.int(cSize))
key = C.rocksdb_livefiles_largestkey(lf, i, &cSize)
liveFile.LargestKey = C.GoBytes(unsafe.Pointer(key), C.int(cSize))
liveFiles[int(i)] = liveFile
}
return liveFiles
}
// CompactRange runs a manual compaction on the Range of keys given. This is
// not likely to be needed for typical usage.
func (db *DB) CompactRange(r Range) {
cStart := byteToChar(r.Start)
cLimit := byteToChar(r.Limit)
C.rocksdb_compact_range(db.c, cStart, C.size_t(len(r.Start)), cLimit, C.size_t(len(r.Limit)))
}
// CompactRangeCF runs a manual compaction on the Range of keys given on the
// given column family. This is not likely to be needed for typical usage.
func (db *DB) CompactRangeCF(cf *ColumnFamilyHandle, r Range) {
cStart := byteToChar(r.Start)
cLimit := byteToChar(r.Limit)
C.rocksdb_compact_range_cf(db.c, cf.c, cStart, C.size_t(len(r.Start)), cLimit, C.size_t(len(r.Limit)))
}
// Flush triggers a manuel flush for the database.
func (db *DB) Flush(opts *FlushOptions) error {
var cErr *C.char
C.rocksdb_flush(db.c, opts.c, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// DisableFileDeletions disables file deletions and should be used when backup the database.
func (db *DB) DisableFileDeletions() error {
var cErr *C.char
C.rocksdb_disable_file_deletions(db.c, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// EnableFileDeletions enables file deletions for the database.
func (db *DB) EnableFileDeletions(force bool) error {
var cErr *C.char
C.rocksdb_enable_file_deletions(db.c, boolToChar(force), &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// DeleteFile deletes the file name from the db directory and update the internal state to
// reflect that. Supports deletion of sst and log files only. 'name' must be
// path relative to the db directory. eg. 000001.sst, /archive/000003.log.
func (db *DB) DeleteFile(name string) {
cName := C.CString(name)
defer C.free(unsafe.Pointer(cName))
C.rocksdb_delete_file(db.c, cName)
}
// Close closes the database.
func (db *DB) Close() {
C.rocksdb_close(db.c)
}
// DestroyDb removes a database entirely, removing everything from the
// filesystem.
func DestroyDb(name string, opts *Options) error {
var (
cErr *C.char
cName = C.CString(name)
)
defer C.free(unsafe.Pointer(cName))
C.rocksdb_destroy_db(opts.c, cName, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// RepairDb repairs a database.
func RepairDb(name string, opts *Options) error {
var (
cErr *C.char
cName = C.CString(name)
)
defer C.free(unsafe.Pointer(cName))
C.rocksdb_repair_db(opts.c, cName, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
/*
Package gorocksdb provides the ability to create and access RocksDB databases.
gorocksdb.OpenDb opens and creates databases.
opts := gorocksdb.NewDefaultOptions()
opts.SetBlockCache(gorocksdb.NewLRUCache(3<<30))
opts.SetCreateIfMissing(true)
db, err := gorocksdb.OpenDb(opts, "/path/to/db")
The DB struct returned by OpenDb provides DB.Get, DB.Put, DB.Merge and DB.Delete to modify
and query the database.
ro := gorocksdb.NewDefaultReadOptions()
wo := gorocksdb.NewDefaultWriteOptions()
// if ro and wo are not used again, be sure to Close them.
err = db.Put(wo, []byte("foo"), []byte("bar"))
...
value, err := db.Get(ro, []byte("foo"))
defer value.Free()
...
err = db.Delete(wo, []byte("foo"))
For bulk reads, use an Iterator. If you want to avoid disturbing your live
traffic while doing the bulk read, be sure to call SetFillCache(false) on the
ReadOptions you use when creating the Iterator.
ro := gorocksdb.NewDefaultReadOptions()
ro.SetFillCache(false)
it := db.NewIterator(ro)
defer it.Close()
it.Seek([]byte("foo"))
for it = it; it.Valid(); it.Next() {
key := it.Key()
value := it.Value()
fmt.Printf("Key: %v Value: %v\n", key.Data(), value.Data())
key.Free()
value.Free()
}
if err := it.Err(); err != nil {
...
}
Batched, atomic writes can be performed with a WriteBatch and
DB.Write.
wb := gorocksdb.NewWriteBatch()
// defer wb.Close or use wb.Clear and reuse.
wb.Delete([]byte("foo"))
wb.Put([]byte("foo"), []byte("bar"))
wb.Put([]byte("bar"), []byte("foo"))
err := db.Write(wo, wb)
If your working dataset does not fit in memory, you'll want to add a bloom
filter to your database. NewBloomFilter and Options.SetFilterPolicy is what
you want. NewBloomFilter is amount of bits in the filter to use per key in
your database.
filter := gorocksdb.NewBloomFilter(10)
opts.SetFilterPolicy(filter)
db, err := gorocksdb.OpenDb(opts, "/path/to/db")
If you're using a custom comparator in your code, be aware you may have to
make your own filter policy object.
This documentation is not a complete discussion of RocksDB. Please read the
RocksDB documentation <http://rocksdb.org/> for information on its
operation. You'll find lots of goodies there.
*/
package gorocksdb
// +build !embed
package gorocksdb
// #cgo LDFLAGS: -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy
import "C"
// +build embed
package gorocksdb
// #cgo CXXFLAGS: -std=c++11
// #cgo CPPFLAGS: -I${SRCDIR}/../../cockroachdb/c-lz4/internal/lib
// #cgo CPPFLAGS: -I${SRCDIR}/../../cockroachdb/c-rocksdb/internal/include
// #cgo CPPFLAGS: -I${SRCDIR}/../../cockroachdb/c-snappy/internal
// #cgo LDFLAGS: -lstdc++
// #cgo darwin LDFLAGS: -Wl,-undefined -Wl,dynamic_lookup
// #cgo !darwin LDFLAGS: -Wl,-unresolved-symbols=ignore-all -lrt
import "C"
import (
_ "github.com/cockroachdb/c-lz4"
_ "github.com/cockroachdb/c-rocksdb"
_ "github.com/cockroachdb/c-snappy"
)
package gorocksdb
// #include "rocksdb/c.h"
import "C"
// Env is a system call environment used by a database.
type Env struct {
c *C.rocksdb_env_t
}
// NewDefaultEnv creates a default environment.
func NewDefaultEnv() *Env {
return NewNativeEnv(C.rocksdb_create_default_env())
}
// NewNativeEnv creates a Environment object.
func NewNativeEnv(c *C.rocksdb_env_t) *Env {
return &Env{c}
}
// SetBackgroundThreads sets the number of background worker threads
// of a specific thread pool for this environment.
// 'LOW' is the default pool.
// Default: 1
func (env *Env) SetBackgroundThreads(n int) {
C.rocksdb_env_set_background_threads(env.c, C.int(n))
}
// SetHighPriorityBackgroundThreads sets the size of the high priority
// thread pool that can be used to prevent compactions from stalling
// memtable flushes.
func (env *Env) SetHighPriorityBackgroundThreads(n int) {
C.rocksdb_env_set_high_priority_background_threads(env.c, C.int(n))
}
// Destroy deallocates the Env object.
func (env *Env) Destroy() {
C.rocksdb_env_destroy(env.c)
env.c = nil
}
package gorocksdb
// #include "rocksdb/c.h"
import "C"
// FilterPolicy is a factory type that allows the RocksDB database to create a
// filter, such as a bloom filter, which will used to reduce reads.
type FilterPolicy interface {
// keys contains a list of keys (potentially with duplicates)
// that are ordered according to the user supplied comparator.
CreateFilter(keys [][]byte) []byte
// "filter" contains the data appended by a preceding call to
// CreateFilter(). This method must return true if
// the key was in the list of keys passed to CreateFilter().
// This method may return true or false if the key was not on the
// list, but it should aim to return false with a high probability.
KeyMayMatch(key []byte, filter []byte) bool
// Return the name of this policy.
Name() string
}
// NewNativeFilterPolicy creates a FilterPolicy object.
func NewNativeFilterPolicy(c *C.rocksdb_filterpolicy_t) FilterPolicy {
return nativeFilterPolicy{c}
}
type nativeFilterPolicy struct {
c *C.rocksdb_filterpolicy_t
}
func (fp nativeFilterPolicy) CreateFilter(keys [][]byte) []byte { return nil }
func (fp nativeFilterPolicy) KeyMayMatch(key []byte, filter []byte) bool { return false }
func (fp nativeFilterPolicy) Name() string { return "" }
// NewBloomFilter returns a new filter policy that uses a bloom filter with approximately
// the specified number of bits per key. A good value for bits_per_key
// is 10, which yields a filter with ~1% false positive rate.
//
// Note: if you are using a custom comparator that ignores some parts
// of the keys being compared, you must not use NewBloomFilterPolicy()
// and must provide your own FilterPolicy that also ignores the
// corresponding parts of the keys. For example, if the comparator
// ignores trailing spaces, it would be incorrect to use a
// FilterPolicy (like NewBloomFilterPolicy) that does not ignore
// trailing spaces in keys.
func NewBloomFilter(bitsPerKey int) FilterPolicy {
return NewNativeFilterPolicy(C.rocksdb_filterpolicy_create_bloom(C.int(bitsPerKey)))
}
// Hold references to filter policies.
var filterPolicies []FilterPolicy
func registerFilterPolicy(fp FilterPolicy) int {
filterPolicies = append(filterPolicies, fp)
return len(filterPolicies) - 1
}
//export gorocksdb_filterpolicy_create_filter
func gorocksdb_filterpolicy_create_filter(idx int, cKeys **C.char, cKeysLen *C.size_t, cNumKeys C.int, cDstLen *C.size_t) *C.char {
rawKeys := charSlice(cKeys, cNumKeys)
keysLen := sizeSlice(cKeysLen, cNumKeys)
keys := make([][]byte, int(cNumKeys))
for i, len := range keysLen {
keys[i] = charToByte(rawKeys[i], len)
}
dst := filterPolicies[idx].CreateFilter(keys)
*cDstLen = C.size_t(len(dst))
return cByteSlice(dst)
}
//export gorocksdb_filterpolicy_key_may_match
func gorocksdb_filterpolicy_key_may_match(idx int, cKey *C.char, cKeyLen C.size_t, cFilter *C.char, cFilterLen C.size_t) C.uchar {
key := charToByte(cKey, cKeyLen)
filter := charToByte(cFilter, cFilterLen)
return boolToChar(filterPolicies[idx].KeyMayMatch(key, filter))
}
//export gorocksdb_filterpolicy_name
func gorocksdb_filterpolicy_name(idx int) *C.char {
return stringToChar(filterPolicies[idx].Name())
}
#include "gorocksdb.h"
#include "_cgo_export.h"
/* Base */
void gorocksdb_destruct_handler(void* state) { }
/* Comparator */
rocksdb_comparator_t* gorocksdb_comparator_create(uintptr_t idx) {
return rocksdb_comparator_create(
(void*)idx,
gorocksdb_destruct_handler,
(int (*)(void*, const char*, size_t, const char*, size_t))(gorocksdb_comparator_compare),
(const char *(*)(void*))(gorocksdb_comparator_name));
}
/* CompactionFilter */
rocksdb_compactionfilter_t* gorocksdb_compactionfilter_create(uintptr_t idx) {
return rocksdb_compactionfilter_create(
(void*)idx,
gorocksdb_destruct_handler,
(unsigned char (*)(void*, int, const char*, size_t, const char*, size_t, char**, size_t*, unsigned char*))(gorocksdb_compactionfilter_filter),
(const char *(*)(void*))(gorocksdb_compactionfilter_name));
}
/* Filter Policy */
rocksdb_filterpolicy_t* gorocksdb_filterpolicy_create(uintptr_t idx) {
return rocksdb_filterpolicy_create(
(void*)idx,
gorocksdb_destruct_handler,
(char* (*)(void*, const char* const*, const size_t*, int, size_t*))(gorocksdb_filterpolicy_create_filter),
(unsigned char (*)(void*, const char*, size_t, const char*, size_t))(gorocksdb_filterpolicy_key_may_match),
gorocksdb_filterpolicy_delete_filter,
(const char *(*)(void*))(gorocksdb_filterpolicy_name));
}
void gorocksdb_filterpolicy_delete_filter(void* state, const char* v, size_t s) { }
/* Merge Operator */
rocksdb_mergeoperator_t* gorocksdb_mergeoperator_create(uintptr_t idx) {
return rocksdb_mergeoperator_create(
(void*)idx,
gorocksdb_destruct_handler,
(char* (*)(void*, const char*, size_t, const char*, size_t, const char* const*, const size_t*, int, unsigned char*, size_t*))(gorocksdb_mergeoperator_full_merge),
(char* (*)(void*, const char*, size_t, const char* const*, const size_t*, int, unsigned char*, size_t*))(gorocksdb_mergeoperator_partial_merge_multi),
gorocksdb_mergeoperator_delete_value,
(const char* (*)(void*))(gorocksdb_mergeoperator_name));
}
void gorocksdb_mergeoperator_delete_value(void* id, const char* v, size_t s) { }
/* Slice Transform */
rocksdb_slicetransform_t* gorocksdb_slicetransform_create(uintptr_t idx) {
return rocksdb_slicetransform_create(
(void*)idx,
gorocksdb_destruct_handler,
(char* (*)(void*, const char*, size_t, size_t*))(gorocksdb_slicetransform_transform),
(unsigned char (*)(void*, const char*, size_t))(gorocksdb_slicetransform_in_domain),
(unsigned char (*)(void*, const char*, size_t))(gorocksdb_slicetransform_in_range),
(const char* (*)(void*))(gorocksdb_slicetransform_name));
}
#include <stdlib.h>
#include "rocksdb/c.h"
// This API provides convenient C wrapper functions for rocksdb client.
/* Base */
extern void gorocksdb_destruct_handler(void* state);
/* CompactionFilter */
extern rocksdb_compactionfilter_t* gorocksdb_compactionfilter_create(uintptr_t idx);
/* Comparator */
extern rocksdb_comparator_t* gorocksdb_comparator_create(uintptr_t idx);
/* Filter Policy */
extern rocksdb_filterpolicy_t* gorocksdb_filterpolicy_create(uintptr_t idx);
extern void gorocksdb_filterpolicy_delete_filter(void* state, const char* v, size_t s);
/* Merge Operator */
extern rocksdb_mergeoperator_t* gorocksdb_mergeoperator_create(uintptr_t idx);
extern void gorocksdb_mergeoperator_delete_value(void* state, const char* v, size_t s);
/* Slice Transform */
extern rocksdb_slicetransform_t* gorocksdb_slicetransform_create(uintptr_t idx);
package gorocksdb
// #include <stdlib.h>
// #include "rocksdb/c.h"
import "C"
import (
"bytes"
"errors"
"unsafe"
)
// Iterator provides a way to seek to specific keys and iterate through
// the keyspace from that point, as well as access the values of those keys.
//
// For example:
//
// it := db.NewIterator(readOpts)
// defer it.Close()
//
// it.Seek([]byte("foo"))
// for ; it.Valid(); it.Next() {
// fmt.Printf("Key: %v Value: %v\n", it.Key().Data(), it.Value().Data())
// }
//
// if err := it.Err(); err != nil {
// return err
// }
//
type Iterator struct {
c *C.rocksdb_iterator_t
}
// NewNativeIterator creates a Iterator object.
func NewNativeIterator(c unsafe.Pointer) *Iterator {
return &Iterator{(*C.rocksdb_iterator_t)(c)}
}
// Valid returns false only when an Iterator has iterated past either the
// first or the last key in the database.
func (iter *Iterator) Valid() bool {
return C.rocksdb_iter_valid(iter.c) != 0
}
// ValidForPrefix returns false only when an Iterator has iterated past the
// first or the last key in the database or the specified prefix.
func (iter *Iterator) ValidForPrefix(prefix []byte) bool {
return C.rocksdb_iter_valid(iter.c) != 0 && bytes.HasPrefix(iter.Key().Data(), prefix)
}
// Key returns the key the iterator currently holds.
func (iter *Iterator) Key() *Slice {
var cLen C.size_t
cKey := C.rocksdb_iter_key(iter.c, &cLen)
if cKey == nil {
return nil
}
return &Slice{cKey, cLen, true}
}
// Value returns the value in the database the iterator currently holds.
func (iter *Iterator) Value() *Slice {
var cLen C.size_t
cVal := C.rocksdb_iter_value(iter.c, &cLen)
if cVal == nil {
return nil
}
return &Slice{cVal, cLen, true}
}
// Next moves the iterator to the next sequential key in the database.
func (iter *Iterator) Next() {
C.rocksdb_iter_next(iter.c)
}
// Prev moves the iterator to the previous sequential key in the database.
func (iter *Iterator) Prev() {
C.rocksdb_iter_prev(iter.c)
}
// SeekToFirst moves the iterator to the first key in the database.
func (iter *Iterator) SeekToFirst() {
C.rocksdb_iter_seek_to_first(iter.c)
}
// SeekToLast moves the iterator to the last key in the database.
func (iter *Iterator) SeekToLast() {
C.rocksdb_iter_seek_to_last(iter.c)
}
// Seek moves the iterator to the position greater than or equal to the key.
func (iter *Iterator) Seek(key []byte) {
cKey := byteToChar(key)
C.rocksdb_iter_seek(iter.c, cKey, C.size_t(len(key)))
}
// Err returns nil if no errors happened during iteration, or the actual
// error otherwise.
func (iter *Iterator) Err() error {
var cErr *C.char
C.rocksdb_iter_get_error(iter.c, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// Close closes the iterator.
func (iter *Iterator) Close() {
C.rocksdb_iter_destroy(iter.c)
iter.c = nil
}
package gorocksdb
// #include "rocksdb/c.h"
import "C"
// A MergeOperator specifies the SEMANTICS of a merge, which only
// client knows. It could be numeric addition, list append, string
// concatenation, edit data structure, ... , anything.
// The library, on the other hand, is concerned with the exercise of this
// interface, at the right time (during get, iteration, compaction...)
//
// Please read the RocksDB documentation <http://rocksdb.org/> for
// more details and example implementations.
type MergeOperator interface {
// Gives the client a way to express the read -> modify -> write semantics
// key: The key that's associated with this merge operation.
// Client could multiplex the merge operator based on it
// if the key space is partitioned and different subspaces
// refer to different types of data which have different
// merge operation semantics.
// existingValue: null indicates that the key does not exist before this op.
// operands: the sequence of merge operations to apply, front() first.
//
// Return true on success.
//
// All values passed in will be client-specific values. So if this method
// returns false, it is because client specified bad data or there was
// internal corruption. This will be treated as an error by the library.
FullMerge(key, existingValue []byte, operands [][]byte) ([]byte, bool)
// This function performs merge(left_op, right_op)
// when both the operands are themselves merge operation types
// that you would have passed to a db.Merge() call in the same order
// (i.e.: db.Merge(key,left_op), followed by db.Merge(key,right_op)).
//
// PartialMerge should combine them into a single merge operation.
// The return value should be constructed such that a call to
// db.Merge(key, new_value) would yield the same result as a call
// to db.Merge(key, left_op) followed by db.Merge(key, right_op).
//
// If it is impossible or infeasible to combine the two operations, return false.
// The library will internally keep track of the operations, and apply them in the
// correct order once a base-value (a Put/Delete/End-of-Database) is seen.
PartialMerge(key, leftOperand, rightOperand []byte) ([]byte, bool)
// The name of the MergeOperator.
Name() string
}
// NewNativeMergeOperator creates a MergeOperator object.
func NewNativeMergeOperator(c *C.rocksdb_mergeoperator_t) MergeOperator {
return nativeMergeOperator{c}
}
type nativeMergeOperator struct {
c *C.rocksdb_mergeoperator_t
}
func (mo nativeMergeOperator) FullMerge(key, existingValue []byte, operands [][]byte) ([]byte, bool) {
return nil, false
}
func (mo nativeMergeOperator) PartialMerge(key, leftOperand, rightOperand []byte) ([]byte, bool) {
return nil, false
}
func (mo nativeMergeOperator) Name() string { return "" }
// Hold references to merge operators.
var mergeOperators []MergeOperator
func registerMergeOperator(merger MergeOperator) int {
mergeOperators = append(mergeOperators, merger)
return len(mergeOperators) - 1
}
//export gorocksdb_mergeoperator_full_merge
func gorocksdb_mergeoperator_full_merge(idx int, cKey *C.char, cKeyLen C.size_t, cExistingValue *C.char, cExistingValueLen C.size_t, cOperands **C.char, cOperandsLen *C.size_t, cNumOperands C.int, cSuccess *C.uchar, cNewValueLen *C.size_t) *C.char {
key := charToByte(cKey, cKeyLen)
rawOperands := charSlice(cOperands, cNumOperands)
operandsLen := sizeSlice(cOperandsLen, cNumOperands)
existingValue := charToByte(cExistingValue, cExistingValueLen)
operands := make([][]byte, int(cNumOperands))
for i, len := range operandsLen {
operands[i] = charToByte(rawOperands[i], len)
}
newValue, success := mergeOperators[idx].FullMerge(key, existingValue, operands)
newValueLen := len(newValue)
*cNewValueLen = C.size_t(newValueLen)
*cSuccess = boolToChar(success)
return cByteSlice(newValue)
}
//export gorocksdb_mergeoperator_partial_merge_multi
func gorocksdb_mergeoperator_partial_merge_multi(idx int, cKey *C.char, cKeyLen C.size_t, cOperands **C.char, cOperandsLen *C.size_t, cNumOperands C.int, cSuccess *C.uchar, cNewValueLen *C.size_t) *C.char {
key := charToByte(cKey, cKeyLen)
rawOperands := charSlice(cOperands, cNumOperands)
operandsLen := sizeSlice(cOperandsLen, cNumOperands)
operands := make([][]byte, int(cNumOperands))
for i, len := range operandsLen {
operands[i] = charToByte(rawOperands[i], len)
}
var newValue []byte
success := true
merger := mergeOperators[idx]
leftOperand := operands[0]
for i := 1; i < int(cNumOperands); i++ {
newValue, success = merger.PartialMerge(key, leftOperand, operands[i])
if !success {
break
}
leftOperand = newValue
}
newValueLen := len(newValue)
*cNewValueLen = C.size_t(newValueLen)
*cSuccess = boolToChar(success)
return cByteSlice(newValue)
}
//export gorocksdb_mergeoperator_name
func gorocksdb_mergeoperator_name(idx int) *C.char {
return stringToChar(mergeOperators[idx].Name())
}
This diff is collapsed.
package gorocksdb
// #include "rocksdb/c.h"
// #include "gorocksdb.h"
import "C"
// BlockBasedTableOptions represents block-based table options.
type BlockBasedTableOptions struct {
c *C.rocksdb_block_based_table_options_t
// Hold references for GC.
cache *Cache
compCache *Cache
// We keep these so we can free their memory in Destroy.
cFp *C.rocksdb_filterpolicy_t
}
// NewDefaultBlockBasedTableOptions creates a default BlockBasedTableOptions object.
func NewDefaultBlockBasedTableOptions() *BlockBasedTableOptions {
return NewNativeBlockBasedTableOptions(C.rocksdb_block_based_options_create())
}
// NewNativeBlockBasedTableOptions creates a BlockBasedTableOptions object.
func NewNativeBlockBasedTableOptions(c *C.rocksdb_block_based_table_options_t) *BlockBasedTableOptions {
return &BlockBasedTableOptions{c: c}
}
// Destroy deallocates the BlockBasedTableOptions object.
func (opts *BlockBasedTableOptions) Destroy() {
C.rocksdb_block_based_options_destroy(opts.c)
opts.c = nil
opts.cache = nil
opts.compCache = nil
}
// SetBlockSize sets the approximate size of user data packed per block.
// Note that the block size specified here corresponds opts uncompressed data.
// The actual size of the unit read from disk may be smaller if
// compression is enabled. This parameter can be changed dynamically.
// Default: 4K
func (opts *BlockBasedTableOptions) SetBlockSize(blockSize int) {
C.rocksdb_block_based_options_set_block_size(opts.c, C.size_t(blockSize))
}
// SetBlockSizeDeviation sets the block size deviation.
// This is used opts close a block before it reaches the configured
// 'block_size'. If the percentage of free space in the current block is less
// than this specified number and adding a new record opts the block will
// exceed the configured block size, then this block will be closed and the
// new record will be written opts the next block.
// Default: 10
func (opts *BlockBasedTableOptions) SetBlockSizeDeviation(blockSizeDeviation int) {
C.rocksdb_block_based_options_set_block_size_deviation(opts.c, C.int(blockSizeDeviation))
}
// SetBlockRestartInterval sets the number of keys between
// restart points for delta encoding of keys.
// This parameter can be changed dynamically. Most clients should
// leave this parameter alone.
// Default: 16
func (opts *BlockBasedTableOptions) SetBlockRestartInterval(blockRestartInterval int) {
C.rocksdb_block_based_options_set_block_restart_interval(opts.c, C.int(blockRestartInterval))
}
// SetFilterPolicy sets the filter policy opts reduce disk reads.
// Many applications will benefit from passing the result of
// NewBloomFilterPolicy() here.
// Default: nil
func (opts *BlockBasedTableOptions) SetFilterPolicy(fp FilterPolicy) {
if nfp, ok := fp.(nativeFilterPolicy); ok {
opts.cFp = nfp.c
} else {
idx := registerFilterPolicy(fp)
opts.cFp = C.gorocksdb_filterpolicy_create(C.uintptr_t(idx))
}
C.rocksdb_block_based_options_set_filter_policy(opts.c, opts.cFp)
}
// SetNoBlockCache specify whether block cache should be used or not.
// Default: false
func (opts *BlockBasedTableOptions) SetNoBlockCache(value bool) {
C.rocksdb_block_based_options_set_no_block_cache(opts.c, boolToChar(value))
}
// SetBlockCache sets the control over blocks (user data is soptsred in a set of blocks, and
// a block is the unit of reading from disk).
//
// If set, use the specified cache for blocks.
// If nil, rocksdb will auoptsmatically create and use an 8MB internal cache.
// Default: nil
func (opts *BlockBasedTableOptions) SetBlockCache(cache *Cache) {
opts.cache = cache
C.rocksdb_block_based_options_set_block_cache(opts.c, cache.c)
}
// SetBlockCacheCompressed sets the cache for compressed blocks.
// If nil, rocksdb will not use a compressed block cache.
// Default: nil
func (opts *BlockBasedTableOptions) SetBlockCacheCompressed(cache *Cache) {
opts.compCache = cache
C.rocksdb_block_based_options_set_block_cache_compressed(opts.c, cache.c)
}
// SetWholeKeyFiltering specify if whole keys in the filter (not just prefixes)
// should be placed.
// This must generally be true for gets opts be efficient.
// Default: true
func (opts *BlockBasedTableOptions) SetWholeKeyFiltering(value bool) {
C.rocksdb_block_based_options_set_whole_key_filtering(opts.c, boolToChar(value))
}
package gorocksdb
// #include "rocksdb/c.h"
import "C"
// UniversalCompactionStopStyle describes a algorithm used to make a
// compaction request stop picking new files into a single compaction run.
type UniversalCompactionStopStyle uint
// Compaction stop style types.
const (
CompactionStopStyleSimilarSize = UniversalCompactionStopStyle(C.rocksdb_similar_size_compaction_stop_style)
CompactionStopStyleTotalSize = UniversalCompactionStopStyle(C.rocksdb_total_size_compaction_stop_style)
)
// FIFOCompactionOptions represent all of the available options for
// FIFO compaction.
type FIFOCompactionOptions struct {
c *C.rocksdb_fifo_compaction_options_t
}
// NewDefaultFIFOCompactionOptions creates a default FIFOCompactionOptions object.
func NewDefaultFIFOCompactionOptions() *FIFOCompactionOptions {
return NewNativeFIFOCompactionOptions(C.rocksdb_fifo_compaction_options_create())
}
// NewNativeFIFOCompactionOptions creates a native FIFOCompactionOptions object.
func NewNativeFIFOCompactionOptions(c *C.rocksdb_fifo_compaction_options_t) *FIFOCompactionOptions {
return &FIFOCompactionOptions{c}
}
// SetMaxTableFilesSize sets the max table file size.
// Once the total sum of table files reaches this, we will delete the oldest
// table file
// Default: 1GB
func (opts *FIFOCompactionOptions) SetMaxTableFilesSize(value uint64) {
C.rocksdb_fifo_compaction_options_set_max_table_files_size(opts.c, C.uint64_t(value))
}
// Destroy deallocates the FIFOCompactionOptions object.
func (opts *FIFOCompactionOptions) Destroy() {
C.rocksdb_fifo_compaction_options_destroy(opts.c)
}
// UniversalCompactionOptions represent all of the available options for
// universal compaction.
type UniversalCompactionOptions struct {
c *C.rocksdb_universal_compaction_options_t
}
// NewDefaultUniversalCompactionOptions creates a default UniversalCompactionOptions
// object.
func NewDefaultUniversalCompactionOptions() *UniversalCompactionOptions {
return NewNativeUniversalCompactionOptions(C.rocksdb_universal_compaction_options_create())
}
// NewNativeUniversalCompactionOptions creates a UniversalCompactionOptions
// object.
func NewNativeUniversalCompactionOptions(c *C.rocksdb_universal_compaction_options_t) *UniversalCompactionOptions {
return &UniversalCompactionOptions{c}
}
// SetSizeRatio sets the percentage flexibilty while comparing file size.
// If the candidate file(s) size is 1% smaller than the next file's size,
// then include next file into this candidate set.
// Default: 1
func (opts *UniversalCompactionOptions) SetSizeRatio(value uint) {
C.rocksdb_universal_compaction_options_set_size_ratio(opts.c, C.int(value))
}
// SetMinMergeWidth sets the minimum number of files in a single compaction run.
// Default: 2
func (opts *UniversalCompactionOptions) SetMinMergeWidth(value uint) {
C.rocksdb_universal_compaction_options_set_min_merge_width(opts.c, C.int(value))
}
// SetMaxMergeWidth sets the maximum number of files in a single compaction run.
// Default: UINT_MAX
func (opts *UniversalCompactionOptions) SetMaxMergeWidth(value uint) {
C.rocksdb_universal_compaction_options_set_max_merge_width(opts.c, C.int(value))
}
// SetMaxSizeAmplificationPercent sets the size amplification.
// It is defined as the amount (in percentage) of
// additional storage needed to store a single byte of data in the database.
// For example, a size amplification of 2% means that a database that
// contains 100 bytes of user-data may occupy upto 102 bytes of
// physical storage. By this definition, a fully compacted database has
// a size amplification of 0%. Rocksdb uses the following heuristic
// to calculate size amplification: it assumes that all files excluding
// the earliest file contribute to the size amplification.
// Default: 200, which means that a 100 byte database could require upto
// 300 bytes of storage.
func (opts *UniversalCompactionOptions) SetMaxSizeAmplificationPercent(value uint) {
C.rocksdb_universal_compaction_options_set_max_size_amplification_percent(opts.c, C.int(value))
}
// SetCompressionSizePercent sets the percentage of compression size.
//
// If this option is set to be -1, all the output files
// will follow compression type specified.
//
// If this option is not negative, we will try to make sure compressed
// size is just above this value. In normal cases, at least this percentage
// of data will be compressed.
// When we are compacting to a new file, here is the criteria whether
// it needs to be compressed: assuming here are the list of files sorted
// by generation time:
// A1...An B1...Bm C1...Ct
// where A1 is the newest and Ct is the oldest, and we are going to compact
// B1...Bm, we calculate the total size of all the files as total_size, as
// well as the total size of C1...Ct as total_C, the compaction output file
// will be compressed iff
// total_C / total_size < this percentage
// Default: -1
func (opts *UniversalCompactionOptions) SetCompressionSizePercent(value int) {
C.rocksdb_universal_compaction_options_set_compression_size_percent(opts.c, C.int(value))
}
// SetStopStyle sets the algorithm used to stop picking files into a single compaction run.
// Default: CompactionStopStyleTotalSize
func (opts *UniversalCompactionOptions) SetStopStyle(value UniversalCompactionStopStyle) {
C.rocksdb_universal_compaction_options_set_stop_style(opts.c, C.int(value))
}
// Destroy deallocates the UniversalCompactionOptions object.
func (opts *UniversalCompactionOptions) Destroy() {
C.rocksdb_universal_compaction_options_destroy(opts.c)
opts.c = nil
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment