diff --git a/vendor/github.com/tecbot/gorocksdb/LICENSE b/vendor/github.com/tecbot/gorocksdb/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..dcd9e57fea3286770057b434fca6c627ca6ba108
--- /dev/null
+++ b/vendor/github.com/tecbot/gorocksdb/LICENSE
@@ -0,0 +1,19 @@
+Copyright (C) 2016 Thomas Adam
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is furnished
+to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/tecbot/gorocksdb/README.md b/vendor/github.com/tecbot/gorocksdb/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..f308bc6c8427ca36c0c61ddb7638eebc6d368a28
--- /dev/null
+++ b/vendor/github.com/tecbot/gorocksdb/README.md
@@ -0,0 +1,20 @@
+# gorocksdb, a Go wrapper for RocksDB
+
+[![Build Status](https://travis-ci.org/tecbot/gorocksdb.png)](https://travis-ci.org/tecbot/gorocksdb) [![GoDoc](https://godoc.org/github.com/tecbot/gorocksdb?status.png)](http://godoc.org/github.com/tecbot/gorocksdb)
+
+## Install
+
+There exist two options to install gorocksdb.
+You can use either a own shared library or you use the embedded RocksDB version from [CockroachDB](https://github.com/cockroachdb/c-rocksdb).
+
+To install the embedded version (it might take a while):
+
+    go get -tags=embed github.com/tecbot/gorocksdb
+
+If you want to go the way with the shared library you'll need to build
+[RocksDB](https://github.com/facebook/rocksdb) before on your machine.
+If you built RocksDB you can install gorocksdb now:
+
+    CGO_CFLAGS="-I/path/to/rocksdb/include" \
+    CGO_LDFLAGS="-L/path/to/rocksdb -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy" \
+      go get github.com/tecbot/gorocksdb
\ No newline at end of file
diff --git a/vendor/github.com/tecbot/gorocksdb/backup.go b/vendor/github.com/tecbot/gorocksdb/backup.go
new file mode 100644
index 0000000000000000000000000000000000000000..a6673ff83f5fc8b9aebcce245974d4a6410a6baf
--- /dev/null
+++ b/vendor/github.com/tecbot/gorocksdb/backup.go
@@ -0,0 +1,152 @@
+package gorocksdb
+
+// #include <stdlib.h>
+// #include "rocksdb/c.h"
+import "C"
+import (
+	"errors"
+	"unsafe"
+)
+
+// BackupEngineInfo represents the information about the backups
+// in a backup engine instance. Use this to get the state of the
+// backup like number of backups and their ids and timestamps etc.
+type BackupEngineInfo struct {
+	c *C.rocksdb_backup_engine_info_t
+}
+
+// GetCount gets the number backsup available.
+func (b *BackupEngineInfo) GetCount() int {
+	return int(C.rocksdb_backup_engine_info_count(b.c))
+}
+
+// GetTimestamp gets the timestamp at which the backup index was taken.
+func (b *BackupEngineInfo) GetTimestamp(index int) int64 {
+	return int64(C.rocksdb_backup_engine_info_timestamp(b.c, C.int(index)))
+}
+
+// GetBackupId gets an id that uniquely identifies a backup
+// regardless of its position.
+func (b *BackupEngineInfo) GetBackupId(index int) int64 {
+	return int64(C.rocksdb_backup_engine_info_backup_id(b.c, C.int(index)))
+}
+
+// GetSize get the size of the backup in bytes.
+func (b *BackupEngineInfo) GetSize(index int) int64 {
+	return int64(C.rocksdb_backup_engine_info_size(b.c, C.int(index)))
+}
+
+// GetNumFiles gets the number of files in the backup index.
+func (b *BackupEngineInfo) GetNumFiles(index int) int32 {
+	return int32(C.rocksdb_backup_engine_info_number_files(b.c, C.int(index)))
+}
+
+// Destroy destroys the backup engine info instance.
+func (b *BackupEngineInfo) Destroy() {
+	C.rocksdb_backup_engine_info_destroy(b.c)
+	b.c = nil
+}
+
+// RestoreOptions captures the options to be used during
+// restoration of a backup.
+type RestoreOptions struct {
+	c *C.rocksdb_restore_options_t
+}
+
+// NewRestoreOptions creates a RestoreOptions instance.
+func NewRestoreOptions() *RestoreOptions {
+	return &RestoreOptions{
+		c: C.rocksdb_restore_options_create(),
+	}
+}
+
+// SetKeepLogFiles is used to set or unset the keep_log_files option
+// If true, restore won't overwrite the existing log files in wal_dir. It will
+// also move all log files from archive directory to wal_dir.
+// By default, this is false.
+func (ro *RestoreOptions) SetKeepLogFiles(v int) {
+	C.rocksdb_restore_options_set_keep_log_files(ro.c, C.int(v))
+}
+
+// Destroy destroys this RestoreOptions instance.
+func (ro *RestoreOptions) Destroy() {
+	C.rocksdb_restore_options_destroy(ro.c)
+}
+
+// BackupEngine is a reusable handle to a RocksDB Backup, created by
+// OpenBackupEngine.
+type BackupEngine struct {
+	c    *C.rocksdb_backup_engine_t
+	path string
+	opts *Options
+}
+
+// OpenBackupEngine opens a backup engine with specified options.
+func OpenBackupEngine(opts *Options, path string) (*BackupEngine, error) {
+	var cErr *C.char
+	cpath := C.CString(path)
+	defer C.free(unsafe.Pointer(cpath))
+
+	be := C.rocksdb_backup_engine_open(opts.c, cpath, &cErr)
+	if cErr != nil {
+		defer C.free(unsafe.Pointer(cErr))
+		return nil, errors.New(C.GoString(cErr))
+	}
+	return &BackupEngine{
+		c:    be,
+		path: path,
+		opts: opts,
+	}, nil
+}
+
+// UnsafeGetBackupEngine returns the underlying c backup engine.
+func (b *BackupEngine) UnsafeGetBackupEngine() unsafe.Pointer {
+	return unsafe.Pointer(b.c)
+}
+
+// CreateNewBackup takes a new backup from db.
+func (b *BackupEngine) CreateNewBackup(db *DB) error {
+	var cErr *C.char
+
+	C.rocksdb_backup_engine_create_new_backup(b.c, db.c, &cErr)
+	if cErr != nil {
+		defer C.free(unsafe.Pointer(cErr))
+		return errors.New(C.GoString(cErr))
+	}
+
+	return nil
+}
+
+// GetInfo gets an object that gives information about
+// the backups that have already been taken
+func (b *BackupEngine) GetInfo() *BackupEngineInfo {
+	return &BackupEngineInfo{
+		c: C.rocksdb_backup_engine_get_backup_info(b.c),
+	}
+}
+
+// RestoreDBFromLatestBackup restores the latest backup to dbDir. walDir
+// is where the write ahead logs are restored to and usually the same as dbDir.
+func (b *BackupEngine) RestoreDBFromLatestBackup(dbDir, walDir string, ro *RestoreOptions) error {
+	var cErr *C.char
+	cDbDir := C.CString(dbDir)
+	cWalDir := C.CString(walDir)
+	defer func() {
+		C.free(unsafe.Pointer(cDbDir))
+		C.free(unsafe.Pointer(cWalDir))
+	}()
+
+	C.rocksdb_backup_engine_restore_db_from_latest_backup(b.c, cDbDir, cWalDir, ro.c, &cErr)
+	if cErr != nil {
+		defer C.free(unsafe.Pointer(cErr))
+		return errors.New(C.GoString(cErr))
+	}
+	return nil
+}
+
+// Close close the backup engine and cleans up state
+// The backups already taken remain on storage.
+func (b *BackupEngine) Close() {
+	C.rocksdb_backup_engine_close(b.c)
+	b.c = nil
+}
diff --git a/vendor/github.com/tecbot/gorocksdb/cache.go b/vendor/github.com/tecbot/gorocksdb/cache.go
new file mode 100644
index 0000000000000000000000000000000000000000..cd9041736a5798ba3d52cf424062a7f97d2c0c45
--- /dev/null
+++ b/vendor/github.com/tecbot/gorocksdb/cache.go
@@ -0,0 +1,25 @@
+package gorocksdb
+
+// #include "rocksdb/c.h"
+import "C"
+
+// Cache is a cache used to store data read from data in memory.
+type Cache struct {
+	c *C.rocksdb_cache_t
+}
+
+// NewLRUCache creates a new LRU Cache object with the capacity given.
+func NewLRUCache(capacity int) *Cache {
+	return NewNativeCache(C.rocksdb_cache_create_lru(C.size_t(capacity)))
+}
+
+// NewNativeCache creates a Cache object.
+func NewNativeCache(c *C.rocksdb_cache_t) *Cache {
+	return &Cache{c}
+}
+
+// Destroy deallocates the Cache object.
+func (c *Cache) Destroy() {
+	C.rocksdb_cache_destroy(c.c)
+	c.c = nil
+}
diff --git a/vendor/github.com/tecbot/gorocksdb/cf_handle.go b/vendor/github.com/tecbot/gorocksdb/cf_handle.go
new file mode 100644
index 0000000000000000000000000000000000000000..fe8106c8d2559cc6e6d7478637a63b88294f23a1
--- /dev/null
+++ b/vendor/github.com/tecbot/gorocksdb/cf_handle.go
@@ -0,0 +1,26 @@
+package gorocksdb
+
+// #include <stdlib.h>
+// #include "rocksdb/c.h"
+import "C"
+import "unsafe"
+
+// ColumnFamilyHandle represents a handle to a ColumnFamily.
+type ColumnFamilyHandle struct {
+	c *C.rocksdb_column_family_handle_t
+}
+
+// NewNativeColumnFamilyHandle creates a ColumnFamilyHandle object.
+func NewNativeColumnFamilyHandle(c *C.rocksdb_column_family_handle_t) *ColumnFamilyHandle {
+	return &ColumnFamilyHandle{c}
+}
+
+// UnsafeGetCFHandler returns the underlying c column family handle.
+func (h *ColumnFamilyHandle) UnsafeGetCFHandler() unsafe.Pointer {
+	return unsafe.Pointer(h.c)
+}
+
+// Destroy calls the destructor of the underlying column family handle.
+func (h *ColumnFamilyHandle) Destroy() {
+	C.rocksdb_column_family_handle_destroy(h.c)
+}
diff --git a/vendor/github.com/tecbot/gorocksdb/compaction_filter.go b/vendor/github.com/tecbot/gorocksdb/compaction_filter.go
new file mode 100644
index 0000000000000000000000000000000000000000..ce23db5c806a2c298f467a04013e3c6394bb3f30
--- /dev/null
+++ b/vendor/github.com/tecbot/gorocksdb/compaction_filter.go
@@ -0,0 +1,69 @@
+package gorocksdb
+
+// #include "rocksdb/c.h"
+import "C"
+
+// A CompactionFilter can be used to filter keys during compaction time.
+type CompactionFilter interface {
+	// If the Filter function returns false, it indicates
+	// that the kv should be preserved, while a return value of true
+	// indicates that this key-value should be removed from the
+	// output of the compaction. The application can inspect
+	// the existing value of the key and make decision based on it.
+	//
+	// When the value is to be preserved, the application has the option
+	// to modify the existing value and pass it back through a new value.
+	// To retain the previous value, simply return nil
+	//
+	// If multithreaded compaction is being used *and* a single CompactionFilter
+	// instance was supplied via SetCompactionFilter, this the Filter function may be
+	// called from different threads concurrently. The application must ensure
+	// that the call is thread-safe.
+	Filter(level int, key, val []byte) (remove bool, newVal []byte)
+
+	// The name of the compaction filter, for logging
+	Name() string
+}
+
+// NewNativeCompactionFilter creates a CompactionFilter object.
+func NewNativeCompactionFilter(c *C.rocksdb_comparator_t) Comparator {
+	return nativeComparator{c}
+}
+
+type nativeCompactionFilter struct {
+	c *C.rocksdb_compactionfilter_t
+}
+
+func (c nativeCompactionFilter) Filter(level int, key, val []byte) (remove bool, newVal []byte) {
+	return false, nil
+}
+func (c nativeCompactionFilter) Name() string { return "" }
+
+// Hold references to compaction filters.
+var compactionFilters []CompactionFilter
+
+func registerCompactionFilter(filter CompactionFilter) int {
+	compactionFilters = append(compactionFilters, filter)
+	return len(compactionFilters) - 1
+}
+
+//export gorocksdb_compactionfilter_filter
+func gorocksdb_compactionfilter_filter(idx int, cLevel C.int, cKey *C.char, cKeyLen C.size_t, cVal *C.char, cValLen C.size_t, cNewVal **C.char, cNewValLen *C.size_t, cValChanged *C.uchar) C.int {
+	key := charToByte(cKey, cKeyLen)
+	val := charToByte(cVal, cValLen)
+
+	remove, newVal := compactionFilters[idx].Filter(int(cLevel), key, val)
+	if remove {
+		return C.int(1)
+	} else if newVal != nil {
+		*cNewVal = byteToChar(newVal)
+		*cNewValLen = C.size_t(len(newVal))
+		*cValChanged = C.uchar(1)
+	}
+	return C.int(0)
+}
+
+//export gorocksdb_compactionfilter_name
+func gorocksdb_compactionfilter_name(idx int) *C.char {
+	return stringToChar(compactionFilters[idx].Name())
+}
diff --git a/vendor/github.com/tecbot/gorocksdb/comparator.go b/vendor/github.com/tecbot/gorocksdb/comparator.go
new file mode 100644
index 0000000000000000000000000000000000000000..82491a62ab5ec1a850f6e92c21478c631de40880
--- /dev/null
+++ b/vendor/github.com/tecbot/gorocksdb/comparator.go
@@ -0,0 +1,49 @@
+package gorocksdb
+
+// #include "rocksdb/c.h"
+import "C"
+
+// A Comparator object provides a total order across slices that are
+// used as keys in an sstable or a database.
+type Comparator interface {
+	// Three-way comparison. Returns value:
+	//   < 0 iff "a" < "b",
+	//   == 0 iff "a" == "b",
+	//   > 0 iff "a" > "b"
+	Compare(a, b []byte) int
+
+	// The name of the comparator.
+	Name() string
+}
+
+// NewNativeComparator creates a Comparator object.
+func NewNativeComparator(c *C.rocksdb_comparator_t) Comparator {
+	return nativeComparator{c}
+}
+
+type nativeComparator struct {
+	c *C.rocksdb_comparator_t
+}
+
+func (c nativeComparator) Compare(a, b []byte) int { return 0 }
+func (c nativeComparator) Name() string            { return "" }
+
+// Hold references to comperators.
+var comperators []Comparator
+
+func registerComperator(cmp Comparator) int {
+	comperators = append(comperators, cmp)
+	return len(comperators) - 1
+}
+
+//export gorocksdb_comparator_compare
+func gorocksdb_comparator_compare(idx int, cKeyA *C.char, cKeyALen C.size_t, cKeyB *C.char, cKeyBLen C.size_t) C.int {
+	keyA := charToByte(cKeyA, cKeyALen)
+	keyB := charToByte(cKeyB, cKeyBLen)
+	return C.int(comperators[idx].Compare(keyA, keyB))
+}
+
+//export gorocksdb_comparator_name
+func gorocksdb_comparator_name(idx int) *C.char {
+	return stringToChar(comperators[idx].Name())
+}
diff --git a/vendor/github.com/tecbot/gorocksdb/db.go b/vendor/github.com/tecbot/gorocksdb/db.go
new file mode 100644
index 0000000000000000000000000000000000000000..10c7cb1a6bb3eaa730878ac0eed9214bb2436e1b
--- /dev/null
+++ b/vendor/github.com/tecbot/gorocksdb/db.go
@@ -0,0 +1,625 @@
+package gorocksdb
+
+// #include <stdlib.h>
+// #include "rocksdb/c.h"
+import "C"
+import (
+	"errors"
+	"unsafe"
+)
+
+// Range is a range of keys in the database. GetApproximateSizes calls with it
+// begin at the key Start and end right before the key Limit.
+type Range struct {
+	Start []byte
+	Limit []byte
+}
+
+// DB is a reusable handle to a RocksDB database on disk, created by Open.
+type DB struct {
+	c    *C.rocksdb_t
+	name string
+	opts *Options
+}
+
+// OpenDb opens a database with the specified options.
+func OpenDb(opts *Options, name string) (*DB, error) {
+	var (
+		cErr  *C.char
+		cName = C.CString(name)
+	)
+	defer C.free(unsafe.Pointer(cName))
+	db := C.rocksdb_open(opts.c, cName, &cErr)
+	if cErr != nil {
+		defer C.free(unsafe.Pointer(cErr))
+		return nil, errors.New(C.GoString(cErr))
+	}
+	return &DB{
+		name: name,
+		c:    db,
+		opts: opts,
+	}, nil
+}
+
+// OpenDbForReadOnly opens a database with the specified options for readonly usage.
+func OpenDbForReadOnly(opts *Options, name string, errorIfLogFileExist bool) (*DB, error) {
+	var (
+		cErr  *C.char
+		cName = C.CString(name)
+	)
+	defer C.free(unsafe.Pointer(cName))
+	db := C.rocksdb_open_for_read_only(opts.c, cName, boolToChar(errorIfLogFileExist), &cErr)
+	if cErr != nil {
+		defer C.free(unsafe.Pointer(cErr))
+		return nil, errors.New(C.GoString(cErr))
+	}
+	return &DB{
+		name: name,
+		c:    db,
+		opts: opts,
+	}, nil
+}
+
+// OpenDbColumnFamilies opens a database with the specified column families.
+func OpenDbColumnFamilies(
+	opts *Options,
+	name string,
+	cfNames []string,
+	cfOpts []*Options,
+) (*DB, []*ColumnFamilyHandle, error) {
+	numColumnFamilies := len(cfNames)
+	if numColumnFamilies != len(cfOpts) {
+		return nil, nil, errors.New("must provide the same number of column family names and options")
+	}
+
+	cName := C.CString(name)
+	defer C.free(unsafe.Pointer(cName))
+
+	cNames := make([]*C.char, numColumnFamilies)
+	for i, s := range cfNames {
+		cNames[i] = C.CString(s)
+	}
+	defer func() {
+		for _, s := range cNames {
+			C.free(unsafe.Pointer(s))
+		}
+	}()
+
+	cOpts := make([]*C.rocksdb_options_t, numColumnFamilies)
+	for i, o := range cfOpts {
+		cOpts[i] = o.c
+	}
+
+	cHandles := make([]*C.rocksdb_column_family_handle_t, numColumnFamilies)
+
+	var cErr *C.char
+	db := C.rocksdb_open_column_families(
+		opts.c,
+		cName,
+		C.int(numColumnFamilies),
+		&cNames[0],
+		&cOpts[0],
+		&cHandles[0],
+		&cErr,
+	)
+	if cErr != nil {
+		defer C.free(unsafe.Pointer(cErr))
+		return nil, nil, errors.New(C.GoString(cErr))
+	}
+
+	cfHandles := make([]*ColumnFamilyHandle, numColumnFamilies)
+	for i, c := range cHandles {
+		cfHandles[i] = NewNativeColumnFamilyHandle(c)
+	}
+
+	return &DB{
+		name: name,
+		c:    db,
+		opts: opts,
+	}, cfHandles, nil
+}
+
+// OpenDbForReadOnlyColumnFamilies opens a database with the specified column
+// families in read only mode.
+func OpenDbForReadOnlyColumnFamilies(
+	opts *Options,
+	name string,
+	cfNames []string,
+	cfOpts []*Options,
+	errorIfLogFileExist bool,
+) (*DB, []*ColumnFamilyHandle, error) {
+	numColumnFamilies := len(cfNames)
+	if numColumnFamilies != len(cfOpts) {
+		return nil, nil, errors.New("must provide the same number of column family names and options")
+	}
+
+	cName := C.CString(name)
+	defer C.free(unsafe.Pointer(cName))
+
+	cNames := make([]*C.char, numColumnFamilies)
+	for i, s := range cfNames {
+		cNames[i] = C.CString(s)
+	}
+	defer func() {
+		for _, s := range cNames {
+			C.free(unsafe.Pointer(s))
+		}
+	}()
+
+	cOpts := make([]*C.rocksdb_options_t, numColumnFamilies)
+	for i, o := range cfOpts {
+		cOpts[i] = o.c
+	}
+
+	cHandles := make([]*C.rocksdb_column_family_handle_t, numColumnFamilies)
+
+	var cErr *C.char
+	db := C.rocksdb_open_for_read_only_column_families(
+		opts.c,
+		cName,
+		C.int(numColumnFamilies),
+		&cNames[0],
+		&cOpts[0],
+		&cHandles[0],
+		boolToChar(errorIfLogFileExist),
+		&cErr,
+	)
+	if cErr != nil {
+		defer C.free(unsafe.Pointer(cErr))
+		return nil, nil, errors.New(C.GoString(cErr))
+	}
+
+	cfHandles := make([]*ColumnFamilyHandle, numColumnFamilies)
+	for i, c := range cHandles {
+		cfHandles[i] = NewNativeColumnFamilyHandle(c)
+	}
+
+	return &DB{
+		name: name,
+		c:    db,
+		opts: opts,
+	}, cfHandles, nil
+}
+
+// ListColumnFamilies lists the names of the column families in the DB.
+func ListColumnFamilies(opts *Options, name string) ([]string, error) {
+	var (
+		cErr  *C.char
+		cLen  C.size_t
+		cName = C.CString(name)
+	)
+	defer C.free(unsafe.Pointer(cName))
+	cNames := C.rocksdb_list_column_families(opts.c, cName, &cLen, &cErr)
+	if cErr != nil {
+		defer C.free(unsafe.Pointer(cErr))
+		return nil, errors.New(C.GoString(cErr))
+	}
+	namesLen := int(cLen)
+	names := make([]string, namesLen)
+	cNamesArr := (*[1 << 30]*C.char)(unsafe.Pointer(cNames))[:namesLen:namesLen]
+	for i, n := range cNamesArr {
+		names[i] = C.GoString(n)
+	}
+	C.rocksdb_list_column_families_destroy(cNames, cLen)
+	return names, nil
+}
+
+// UnsafeGetDB returns the underlying c rocksdb instance.
+func (db *DB) UnsafeGetDB() unsafe.Pointer {
+	return unsafe.Pointer(db.c)
+}
+
+// Name returns the name of the database.
+func (db *DB) Name() string {
+	return db.name
+}
+
+// Get returns the data associated with the key from the database.
+func (db *DB) Get(opts *ReadOptions, key []byte) (*Slice, error) {
+	var (
+		cErr    *C.char
+		cValLen C.size_t
+		cKey    = byteToChar(key)
+	)
+	cValue := C.rocksdb_get(db.c, opts.c, cKey, C.size_t(len(key)), &cValLen, &cErr)
+	if cErr != nil {
+		defer C.free(unsafe.Pointer(cErr))
+		return nil, errors.New(C.GoString(cErr))
+	}
+	return NewSlice(cValue, cValLen), nil
+}
+
+// GetBytes is like Get but returns a copy of the data.
+func (db *DB) GetBytes(opts *ReadOptions, key []byte) ([]byte, error) {
+	var (
+		cErr    *C.char
+		cValLen C.size_t
+		cKey    = byteToChar(key)
+	)
+	cValue := C.rocksdb_get(db.c, opts.c, cKey, C.size_t(len(key)), &cValLen, &cErr)
+	if cErr != nil {
+		defer C.free(unsafe.Pointer(cErr))
+		return nil, errors.New(C.GoString(cErr))
+	}
+	if cValue == nil {
+		return nil, nil
+	}
+	defer C.free(unsafe.Pointer(cValue))
+	return C.GoBytes(unsafe.Pointer(cValue), C.int(cValLen)), nil
+}
+
+// GetCF returns the data associated with the key from the database and column family.
+func (db *DB) GetCF(opts *ReadOptions, cf *ColumnFamilyHandle, key []byte) (*Slice, error) {
+	var (
+		cErr    *C.char
+		cValLen C.size_t
+		cKey    = byteToChar(key)
+	)
+	cValue := C.rocksdb_get_cf(db.c, opts.c, cf.c, cKey, C.size_t(len(key)), &cValLen, &cErr)
+	if cErr != nil {
+		defer C.free(unsafe.Pointer(cErr))
+		return nil, errors.New(C.GoString(cErr))
+	}
+	return NewSlice(cValue, cValLen), nil
+}
+
+// Put writes data associated with a key to the database.
+func (db *DB) Put(opts *WriteOptions, key, value []byte) error {
+	var (
+		cErr   *C.char
+		cKey   = byteToChar(key)
+		cValue = byteToChar(value)
+	)
+	C.rocksdb_put(db.c, opts.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr)
+	if cErr != nil {
+		defer C.free(unsafe.Pointer(cErr))
+		return errors.New(C.GoString(cErr))
+	}
+	return nil
+}
+
+// PutCF writes data associated with a key to the database and column family.
+func (db *DB) PutCF(opts *WriteOptions, cf *ColumnFamilyHandle, key, value []byte) error {
+	var (
+		cErr   *C.char
+		cKey   = byteToChar(key)
+		cValue = byteToChar(value)
+	)
+	C.rocksdb_put_cf(db.c, opts.c, cf.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr)
+	if cErr != nil {
+		defer C.free(unsafe.Pointer(cErr))
+		return errors.New(C.GoString(cErr))
+	}
+	return nil
+}
+
+// Delete removes the data associated with the key from the database.
+func (db *DB) Delete(opts *WriteOptions, key []byte) error {
+	var (
+		cErr *C.char
+		cKey = byteToChar(key)
+	)
+	C.rocksdb_delete(db.c, opts.c, cKey, C.size_t(len(key)), &cErr)
+	if cErr != nil {
+		defer C.free(unsafe.Pointer(cErr))
+		return errors.New(C.GoString(cErr))
+	}
+	return nil
+}
+
+// DeleteCF removes the data associated with the key from the database and column family.
+func (db *DB) DeleteCF(opts *WriteOptions, cf *ColumnFamilyHandle, key []byte) error {
+	var (
+		cErr *C.char
+		cKey = byteToChar(key)
+	)
+	C.rocksdb_delete_cf(db.c, opts.c, cf.c, cKey, C.size_t(len(key)), &cErr)
+	if cErr != nil {
+		defer C.free(unsafe.Pointer(cErr))
+		return errors.New(C.GoString(cErr))
+	}
+	return nil
+}
+
+// Merge merges the data associated with the key with the actual data in the database.
+func (db *DB) Merge(opts *WriteOptions, key []byte, value []byte) error {
+	var (
+		cErr   *C.char
+		cKey   = byteToChar(key)
+		cValue = byteToChar(value)
+	)
+	C.rocksdb_merge(db.c, opts.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr)
+	if cErr != nil {
+		defer C.free(unsafe.Pointer(cErr))
+		return errors.New(C.GoString(cErr))
+	}
+	return nil
+}
+
+// MergeCF merges the data associated with the key with the actual data in the
+// database and column family.
+func (db *DB) MergeCF(opts *WriteOptions, cf *ColumnFamilyHandle, key []byte, value []byte) error {
+	var (
+		cErr   *C.char
+		cKey   = byteToChar(key)
+		cValue = byteToChar(value)
+	)
+	C.rocksdb_merge_cf(db.c, opts.c, cf.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr)
+	if cErr != nil {
+		defer C.free(unsafe.Pointer(cErr))
+		return errors.New(C.GoString(cErr))
+	}
+	return nil
+}
+
+// Write writes a WriteBatch to the database
+func (db *DB) Write(opts *WriteOptions, batch *WriteBatch) error {
+	var cErr *C.char
+	C.rocksdb_write(db.c, opts.c, batch.c, &cErr)
+	if cErr != nil {
+		defer C.free(unsafe.Pointer(cErr))
+		return errors.New(C.GoString(cErr))
+	}
+	return nil
+}
+
+// NewIterator returns an Iterator over the the database that uses the
+// ReadOptions given.
+func (db *DB) NewIterator(opts *ReadOptions) *Iterator {
+	cIter := C.rocksdb_create_iterator(db.c, opts.c)
+	return NewNativeIterator(unsafe.Pointer(cIter))
+}
+
+// NewIteratorCF returns an Iterator over the the database and column family
+// that uses the ReadOptions given.
+func (db *DB) NewIteratorCF(opts *ReadOptions, cf *ColumnFamilyHandle) *Iterator {
+	cIter := C.rocksdb_create_iterator_cf(db.c, opts.c, cf.c)
+	return NewNativeIterator(unsafe.Pointer(cIter))
+}
+
+// NewSnapshot creates a new snapshot of the database.
+func (db *DB) NewSnapshot() *Snapshot {
+	cSnap := C.rocksdb_create_snapshot(db.c)
+	return NewNativeSnapshot(cSnap, db.c)
+}
+
+// GetProperty returns the value of a database property.
+func (db *DB) GetProperty(propName string) string {
+	cprop := C.CString(propName)
+	defer C.free(unsafe.Pointer(cprop))
+	cValue := C.rocksdb_property_value(db.c, cprop)
+	defer C.free(unsafe.Pointer(cValue))
+	return C.GoString(cValue)
+}
+
+// GetPropertyCF returns the value of a database property.
+func (db *DB) GetPropertyCF(propName string, cf *ColumnFamilyHandle) string {
+	cProp := C.CString(propName)
+	defer C.free(unsafe.Pointer(cProp))
+	cValue := C.rocksdb_property_value_cf(db.c, cf.c, cProp)
+	defer C.free(unsafe.Pointer(cValue))
+	return C.GoString(cValue)
+}
+
+// CreateColumnFamily create a new column family.
+func (db *DB) CreateColumnFamily(opts *Options, name string) (*ColumnFamilyHandle, error) {
+	var (
+		cErr  *C.char
+		cName = C.CString(name)
+	)
+	defer C.free(unsafe.Pointer(cName))
+	cHandle := C.rocksdb_create_column_family(db.c, opts.c, cName, &cErr)
+	if cErr != nil {
+		defer C.free(unsafe.Pointer(cErr))
+		return nil, errors.New(C.GoString(cErr))
+	}
+	return NewNativeColumnFamilyHandle(cHandle), nil
+}
+
+// DropColumnFamily drops a column family.
+func (db *DB) DropColumnFamily(c *ColumnFamilyHandle) error {
+	var cErr *C.char
+	C.rocksdb_drop_column_family(db.c, c.c, &cErr)
+	if cErr != nil {
+		defer C.free(unsafe.Pointer(cErr))
+		return errors.New(C.GoString(cErr))
+	}
+	return nil
+}
+
+// GetApproximateSizes returns the approximate number of bytes of file system
+// space used by one or more key ranges.
+//
+// The keys counted will begin at Range.Start and end on the key before
+// Range.Limit.
+func (db *DB) GetApproximateSizes(ranges []Range) []uint64 {
+	sizes := make([]uint64, len(ranges))
+	if len(ranges) == 0 {
+		return sizes
+	}
+
+	cStarts := make([]*C.char, len(ranges))
+	cLimits := make([]*C.char, len(ranges))
+	cStartLens := make([]C.size_t, len(ranges))
+	cLimitLens := make([]C.size_t, len(ranges))
+	for i, r := range ranges {
+		cStarts[i] = byteToChar(r.Start)
+		cStartLens[i] = C.size_t(len(r.Start))
+		cLimits[i] = byteToChar(r.Limit)
+		cLimitLens[i] = C.size_t(len(r.Limit))
+	}
+
+	C.rocksdb_approximate_sizes(
+		db.c,
+		C.int(len(ranges)),
+		&cStarts[0],
+		&cStartLens[0],
+		&cLimits[0],
+		&cLimitLens[0],
+		(*C.uint64_t)(&sizes[0]))
+
+	return sizes
+}
+
+// GetApproximateSizesCF returns the approximate number of bytes of file system
+// space used by one or more key ranges in the column family.
+//
+// The keys counted will begin at Range.Start and end on the key before
+// Range.Limit.
+func (db *DB) GetApproximateSizesCF(cf *ColumnFamilyHandle, ranges []Range) []uint64 {
+	sizes := make([]uint64, len(ranges))
+	if len(ranges) == 0 {
+		return sizes
+	}
+
+	cStarts := make([]*C.char, len(ranges))
+	cLimits := make([]*C.char, len(ranges))
+	cStartLens := make([]C.size_t, len(ranges))
+	cLimitLens := make([]C.size_t, len(ranges))
+	for i, r := range ranges {
+		cStarts[i] = byteToChar(r.Start)
+		cStartLens[i] = C.size_t(len(r.Start))
+		cLimits[i] = byteToChar(r.Limit)
+		cLimitLens[i] = C.size_t(len(r.Limit))
+	}
+
+	C.rocksdb_approximate_sizes_cf(
+		db.c,
+		cf.c,
+		C.int(len(ranges)),
+		&cStarts[0],
+		&cStartLens[0],
+		&cLimits[0],
+		&cLimitLens[0],
+		(*C.uint64_t)(&sizes[0]))
+
+	return sizes
+}
+
+// LiveFileMetadata is a metadata which is associated with each SST file.
+type LiveFileMetadata struct {
+	Name        string
+	Level       int
+	Size        int64
+	SmallestKey []byte
+	LargestKey  []byte
+}
+
+// GetLiveFilesMetaData returns a list of all table files with their
+// level, start key and end key.
+func (db *DB) GetLiveFilesMetaData() []LiveFileMetadata {
+	lf := C.rocksdb_livefiles(db.c)
+	defer C.rocksdb_livefiles_destroy(lf)
+
+	count := C.rocksdb_livefiles_count(lf)
+	liveFiles := make([]LiveFileMetadata, int(count))
+	for i := C.int(0); i < count; i++ {
+		var liveFile LiveFileMetadata
+		liveFile.Name = C.GoString(C.rocksdb_livefiles_name(lf, i))
+		liveFile.Level = int(C.rocksdb_livefiles_level(lf, i))
+		liveFile.Size = int64(C.rocksdb_livefiles_size(lf, i))
+
+		var cSize C.size_t
+		key := C.rocksdb_livefiles_smallestkey(lf, i, &cSize)
+		liveFile.SmallestKey = C.GoBytes(unsafe.Pointer(key), C.int(cSize))
+
+		key = C.rocksdb_livefiles_largestkey(lf, i, &cSize)
+		liveFile.LargestKey = C.GoBytes(unsafe.Pointer(key), C.int(cSize))
+		liveFiles[int(i)] = liveFile
+	}
+	return liveFiles
+}
+
+// CompactRange runs a manual compaction on the Range of keys given. This is
+// not likely to be needed for typical usage.
+func (db *DB) CompactRange(r Range) {
+	cStart := byteToChar(r.Start)
+	cLimit := byteToChar(r.Limit)
+	C.rocksdb_compact_range(db.c, cStart, C.size_t(len(r.Start)), cLimit, C.size_t(len(r.Limit)))
+}
+
+// CompactRangeCF runs a manual compaction on the Range of keys given on the
+// given column family. This is not likely to be needed for typical usage.
+func (db *DB) CompactRangeCF(cf *ColumnFamilyHandle, r Range) {
+	cStart := byteToChar(r.Start)
+	cLimit := byteToChar(r.Limit)
+	C.rocksdb_compact_range_cf(db.c, cf.c, cStart, C.size_t(len(r.Start)), cLimit, C.size_t(len(r.Limit)))
+}
+
+// Flush triggers a manuel flush for the database.
+func (db *DB) Flush(opts *FlushOptions) error {
+	var cErr *C.char
+	C.rocksdb_flush(db.c, opts.c, &cErr)
+	if cErr != nil {
+		defer C.free(unsafe.Pointer(cErr))
+		return errors.New(C.GoString(cErr))
+	}
+	return nil
+}
+
+// DisableFileDeletions disables file deletions and should be used when backup the database.
+func (db *DB) DisableFileDeletions() error {
+	var cErr *C.char
+	C.rocksdb_disable_file_deletions(db.c, &cErr)
+	if cErr != nil {
+		defer C.free(unsafe.Pointer(cErr))
+		return errors.New(C.GoString(cErr))
+	}
+	return nil
+}
+
+// EnableFileDeletions enables file deletions for the database.
+func (db *DB) EnableFileDeletions(force bool) error {
+	var cErr *C.char
+	C.rocksdb_enable_file_deletions(db.c, boolToChar(force), &cErr)
+	if cErr != nil {
+		defer C.free(unsafe.Pointer(cErr))
+		return errors.New(C.GoString(cErr))
+	}
+	return nil
+}
+
+// DeleteFile deletes the file name from the db directory and update the internal state to
+// reflect that. Supports deletion of sst and log files only. 'name' must be
+// path relative to the db directory. eg. 000001.sst, /archive/000003.log.
+func (db *DB) DeleteFile(name string) {
+	cName := C.CString(name)
+	defer C.free(unsafe.Pointer(cName))
+	C.rocksdb_delete_file(db.c, cName)
+}
+
+// Close closes the database.
+func (db *DB) Close() {
+	C.rocksdb_close(db.c)
+}
+
+// DestroyDb removes a database entirely, removing everything from the
+// filesystem.
+func DestroyDb(name string, opts *Options) error {
+	var (
+		cErr  *C.char
+		cName = C.CString(name)
+	)
+	defer C.free(unsafe.Pointer(cName))
+	C.rocksdb_destroy_db(opts.c, cName, &cErr)
+	if cErr != nil {
+		defer C.free(unsafe.Pointer(cErr))
+		return errors.New(C.GoString(cErr))
+	}
+	return nil
+}
+
+// RepairDb repairs a database.
+func RepairDb(name string, opts *Options) error {
+	var (
+		cErr  *C.char
+		cName = C.CString(name)
+	)
+	defer C.free(unsafe.Pointer(cName))
+	C.rocksdb_repair_db(opts.c, cName, &cErr)
+	if cErr != nil {
+		defer C.free(unsafe.Pointer(cErr))
+		return errors.New(C.GoString(cErr))
+	}
+	return nil
+}
diff --git a/vendor/github.com/tecbot/gorocksdb/doc.go b/vendor/github.com/tecbot/gorocksdb/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..156e256a72b1868284b8da2d34e86320d3338c49
--- /dev/null
+++ b/vendor/github.com/tecbot/gorocksdb/doc.go
@@ -0,0 +1,70 @@
+/*
+Package gorocksdb provides the ability to create and access RocksDB databases.
+
+gorocksdb.OpenDb opens and creates databases.
+
+	opts := gorocksdb.NewDefaultOptions()
+	opts.SetBlockCache(gorocksdb.NewLRUCache(3<<30))
+	opts.SetCreateIfMissing(true)
+	db, err := gorocksdb.OpenDb(opts, "/path/to/db")
+
+The DB struct returned by OpenDb provides DB.Get, DB.Put, DB.Merge and DB.Delete to modify
+and query the database.
+
+	ro := gorocksdb.NewDefaultReadOptions()
+	wo := gorocksdb.NewDefaultWriteOptions()
+	// if ro and wo are not used again, be sure to Close them.
+	err = db.Put(wo, []byte("foo"), []byte("bar"))
+	...
+	value, err := db.Get(ro, []byte("foo"))
+	defer value.Free()
+	...
+	err = db.Delete(wo, []byte("foo"))
+
+For bulk reads, use an Iterator. If you want to avoid disturbing your live
+traffic while doing the bulk read, be sure to call SetFillCache(false) on the
+ReadOptions you use when creating the Iterator.
+
+	ro := gorocksdb.NewDefaultReadOptions()
+	ro.SetFillCache(false)
+	it := db.NewIterator(ro)
+	defer it.Close()
+	it.Seek([]byte("foo"))
+	for it = it; it.Valid(); it.Next() {
+		key := it.Key()
+		value := it.Value()
+		fmt.Printf("Key: %v Value: %v\n", key.Data(), value.Data())
+		key.Free()
+		value.Free()
+	}
+	if err := it.Err(); err != nil {
+		...
+	}
+
+Batched, atomic writes can be performed with a WriteBatch and
+DB.Write.
+
+	wb := gorocksdb.NewWriteBatch()
+	// defer wb.Close or use wb.Clear and reuse.
+	wb.Delete([]byte("foo"))
+	wb.Put([]byte("foo"), []byte("bar"))
+	wb.Put([]byte("bar"), []byte("foo"))
+	err := db.Write(wo, wb)
+
+If your working dataset does not fit in memory, you'll want to add a bloom
+filter to your database. NewBloomFilter and Options.SetFilterPolicy is what
+you want. NewBloomFilter is amount of bits in the filter to use per key in
+your database.
+
+	filter := gorocksdb.NewBloomFilter(10)
+	opts.SetFilterPolicy(filter)
+	db, err := gorocksdb.OpenDb(opts, "/path/to/db")
+
+If you're using a custom comparator in your code, be aware you may have to
+make your own filter policy object.
+
+This documentation is not a complete discussion of RocksDB. Please read the
+RocksDB documentation <http://rocksdb.org/> for information on its
+operation. You'll find lots of goodies there.
+*/
+package gorocksdb
diff --git a/vendor/github.com/tecbot/gorocksdb/dynflag.go b/vendor/github.com/tecbot/gorocksdb/dynflag.go
new file mode 100644
index 0000000000000000000000000000000000000000..0b8a3ba498cc43595d7c74ab69b1ed4293df1e8f
--- /dev/null
+++ b/vendor/github.com/tecbot/gorocksdb/dynflag.go
@@ -0,0 +1,6 @@
+// +build !embed
+
+package gorocksdb
+
+// #cgo LDFLAGS: -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy
+import "C"
diff --git a/vendor/github.com/tecbot/gorocksdb/embedflag.go b/vendor/github.com/tecbot/gorocksdb/embedflag.go
new file mode 100644
index 0000000000000000000000000000000000000000..1f218fb7a4b2ee4f5f9e963160665385a1f3ad8c
--- /dev/null
+++ b/vendor/github.com/tecbot/gorocksdb/embedflag.go
@@ -0,0 +1,18 @@
+// +build embed
+
+package gorocksdb
+
+// #cgo CXXFLAGS: -std=c++11
+// #cgo CPPFLAGS: -I${SRCDIR}/../../cockroachdb/c-lz4/internal/lib
+// #cgo CPPFLAGS: -I${SRCDIR}/../../cockroachdb/c-rocksdb/internal/include
+// #cgo CPPFLAGS: -I${SRCDIR}/../../cockroachdb/c-snappy/internal
+// #cgo LDFLAGS: -lstdc++
+// #cgo darwin LDFLAGS: -Wl,-undefined -Wl,dynamic_lookup
+// #cgo !darwin LDFLAGS: -Wl,-unresolved-symbols=ignore-all -lrt
+import "C"
+
+import (
+	_ "github.com/cockroachdb/c-lz4"
+	_ "github.com/cockroachdb/c-rocksdb"
+	_ "github.com/cockroachdb/c-snappy"
+)
diff --git a/vendor/github.com/tecbot/gorocksdb/env.go b/vendor/github.com/tecbot/gorocksdb/env.go
new file mode 100644
index 0000000000000000000000000000000000000000..386335bc67d07001ba9033b56587b8da0ce66636
--- /dev/null
+++ b/vendor/github.com/tecbot/gorocksdb/env.go
@@ -0,0 +1,40 @@
+package gorocksdb
+
+// #include "rocksdb/c.h"
+import "C"
+
+// Env is a system call environment used by a database.
+type Env struct {
+	c *C.rocksdb_env_t
+}
+
+// NewDefaultEnv creates a default environment.
+func NewDefaultEnv() *Env {
+	return NewNativeEnv(C.rocksdb_create_default_env())
+}
+
+// NewNativeEnv creates a Environment object.
+func NewNativeEnv(c *C.rocksdb_env_t) *Env {
+	return &Env{c}
+}
+
+// SetBackgroundThreads sets the number of background worker threads
+// of a specific thread pool for this environment.
+// 'LOW' is the default pool.
+// Default: 1
+func (env *Env) SetBackgroundThreads(n int) {
+	C.rocksdb_env_set_background_threads(env.c, C.int(n))
+}
+
+// SetHighPriorityBackgroundThreads sets the size of the high priority
+// thread pool that can be used to prevent compactions from stalling
+// memtable flushes.
+func (env *Env) SetHighPriorityBackgroundThreads(n int) {
+	C.rocksdb_env_set_high_priority_background_threads(env.c, C.int(n))
+}
+
+// Destroy deallocates the Env object.
+func (env *Env) Destroy() {
+	C.rocksdb_env_destroy(env.c)
+	env.c = nil
+}
diff --git a/vendor/github.com/tecbot/gorocksdb/filter_policy.go b/vendor/github.com/tecbot/gorocksdb/filter_policy.go
new file mode 100644
index 0000000000000000000000000000000000000000..62415d474c61aa28c85a1179cfb4ae6c5fa2d717
--- /dev/null
+++ b/vendor/github.com/tecbot/gorocksdb/filter_policy.go
@@ -0,0 +1,84 @@
+package gorocksdb
+
+// #include "rocksdb/c.h"
+import "C"
+
+// FilterPolicy is a factory type that allows the RocksDB database to create a
+// filter, such as a bloom filter, which will used to reduce reads.
+type FilterPolicy interface {
+	// keys contains a list of keys (potentially with duplicates)
+	// that are ordered according to the user supplied comparator.
+	CreateFilter(keys [][]byte) []byte
+
+	// "filter" contains the data appended by a preceding call to
+	// CreateFilter(). This method must return true if
+	// the key was in the list of keys passed to CreateFilter().
+	// This method may return true or false if the key was not on the
+	// list, but it should aim to return false with a high probability.
+	KeyMayMatch(key []byte, filter []byte) bool
+
+	// Return the name of this policy.
+	Name() string
+}
+
+// NewNativeFilterPolicy creates a FilterPolicy object.
+func NewNativeFilterPolicy(c *C.rocksdb_filterpolicy_t) FilterPolicy {
+	return nativeFilterPolicy{c}
+}
+
+type nativeFilterPolicy struct {
+	c *C.rocksdb_filterpolicy_t
+}
+
+func (fp nativeFilterPolicy) CreateFilter(keys [][]byte) []byte          { return nil }
+func (fp nativeFilterPolicy) KeyMayMatch(key []byte, filter []byte) bool { return false }
+func (fp nativeFilterPolicy) Name() string                               { return "" }
+
+// NewBloomFilter returns a new filter policy that uses a bloom filter with approximately
+// the specified number of bits per key.  A good value for bits_per_key
+// is 10, which yields a filter with ~1% false positive rate.
+//
+// Note: if you are using a custom comparator that ignores some parts
+// of the keys being compared, you must not use NewBloomFilterPolicy()
+// and must provide your own FilterPolicy that also ignores the
+// corresponding parts of the keys.  For example, if the comparator
+// ignores trailing spaces, it would be incorrect to use a
+// FilterPolicy (like NewBloomFilterPolicy) that does not ignore
+// trailing spaces in keys.
+func NewBloomFilter(bitsPerKey int) FilterPolicy {
+	return NewNativeFilterPolicy(C.rocksdb_filterpolicy_create_bloom(C.int(bitsPerKey)))
+}
+
+// Hold references to filter policies.
+var filterPolicies []FilterPolicy
+
+func registerFilterPolicy(fp FilterPolicy) int {
+	filterPolicies = append(filterPolicies, fp)
+	return len(filterPolicies) - 1
+}
+
+//export gorocksdb_filterpolicy_create_filter
+func gorocksdb_filterpolicy_create_filter(idx int, cKeys **C.char, cKeysLen *C.size_t, cNumKeys C.int, cDstLen *C.size_t) *C.char {
+	rawKeys := charSlice(cKeys, cNumKeys)
+	keysLen := sizeSlice(cKeysLen, cNumKeys)
+	keys := make([][]byte, int(cNumKeys))
+	for i, len := range keysLen {
+		keys[i] = charToByte(rawKeys[i], len)
+	}
+
+	dst := filterPolicies[idx].CreateFilter(keys)
+	*cDstLen = C.size_t(len(dst))
+	return cByteSlice(dst)
+}
+
+//export gorocksdb_filterpolicy_key_may_match
+func gorocksdb_filterpolicy_key_may_match(idx int, cKey *C.char, cKeyLen C.size_t, cFilter *C.char, cFilterLen C.size_t) C.uchar {
+	key := charToByte(cKey, cKeyLen)
+	filter := charToByte(cFilter, cFilterLen)
+	return boolToChar(filterPolicies[idx].KeyMayMatch(key, filter))
+}
+
+//export gorocksdb_filterpolicy_name
+func gorocksdb_filterpolicy_name(idx int) *C.char {
+	return stringToChar(filterPolicies[idx].Name())
+}
diff --git a/vendor/github.com/tecbot/gorocksdb/gorocksdb.c b/vendor/github.com/tecbot/gorocksdb/gorocksdb.c
new file mode 100644
index 0000000000000000000000000000000000000000..c8258376c5ba74c300f7ef17b6578f872d0c90ef
--- /dev/null
+++ b/vendor/github.com/tecbot/gorocksdb/gorocksdb.c
@@ -0,0 +1,66 @@
+#include "gorocksdb.h"
+#include "_cgo_export.h"
+
+/* Base */
+
+void gorocksdb_destruct_handler(void* state) { }
+
+/* Comparator */
+
+rocksdb_comparator_t* gorocksdb_comparator_create(uintptr_t idx) {
+    return rocksdb_comparator_create(
+        (void*)idx,
+        gorocksdb_destruct_handler,
+        (int (*)(void*, const char*, size_t, const char*, size_t))(gorocksdb_comparator_compare),
+        (const char *(*)(void*))(gorocksdb_comparator_name));
+}
+
+/* CompactionFilter */
+
+rocksdb_compactionfilter_t* gorocksdb_compactionfilter_create(uintptr_t idx) {
+    return rocksdb_compactionfilter_create(
+        (void*)idx,
+        gorocksdb_destruct_handler,
+        (unsigned char (*)(void*, int, const char*, size_t, const char*, size_t, char**, size_t*, unsigned char*))(gorocksdb_compactionfilter_filter),
+        (const char *(*)(void*))(gorocksdb_compactionfilter_name));
+}
+
+/* Filter Policy */
+
+rocksdb_filterpolicy_t* gorocksdb_filterpolicy_create(uintptr_t idx) {
+    return rocksdb_filterpolicy_create(
+        (void*)idx,
+        gorocksdb_destruct_handler,
+        (char* (*)(void*, const char* const*, const size_t*, int, size_t*))(gorocksdb_filterpolicy_create_filter),
+        (unsigned char (*)(void*, const char*, size_t, const char*, size_t))(gorocksdb_filterpolicy_key_may_match),
+        gorocksdb_filterpolicy_delete_filter,
+        (const char *(*)(void*))(gorocksdb_filterpolicy_name));
+}
+
+void gorocksdb_filterpolicy_delete_filter(void* state, const char* v, size_t s) { }
+
+/* Merge Operator */
+
+rocksdb_mergeoperator_t* gorocksdb_mergeoperator_create(uintptr_t idx) {
+    return rocksdb_mergeoperator_create(
+        (void*)idx,
+        gorocksdb_destruct_handler,
+        (char* (*)(void*, const char*, size_t, const char*, size_t, const char* const*, const size_t*, int, unsigned char*, size_t*))(gorocksdb_mergeoperator_full_merge),
+        (char* (*)(void*, const char*, size_t, const char* const*, const size_t*, int, unsigned char*, size_t*))(gorocksdb_mergeoperator_partial_merge_multi),
+        gorocksdb_mergeoperator_delete_value,
+        (const char* (*)(void*))(gorocksdb_mergeoperator_name));
+}
+
+void gorocksdb_mergeoperator_delete_value(void* id, const char* v, size_t s) { }
+
+/* Slice Transform */
+
+rocksdb_slicetransform_t* gorocksdb_slicetransform_create(uintptr_t idx) {
+    return rocksdb_slicetransform_create(
+    	(void*)idx,
+    	gorocksdb_destruct_handler,
+    	(char* (*)(void*, const char*, size_t, size_t*))(gorocksdb_slicetransform_transform),
+    	(unsigned char (*)(void*, const char*, size_t))(gorocksdb_slicetransform_in_domain),
+    	(unsigned char (*)(void*, const char*, size_t))(gorocksdb_slicetransform_in_range),
+    	(const char* (*)(void*))(gorocksdb_slicetransform_name));
+}
diff --git a/vendor/github.com/tecbot/gorocksdb/gorocksdb.h b/vendor/github.com/tecbot/gorocksdb/gorocksdb.h
new file mode 100644
index 0000000000000000000000000000000000000000..4a9968f08f82d529b4f7142aeffd643c8fdb000b
--- /dev/null
+++ b/vendor/github.com/tecbot/gorocksdb/gorocksdb.h
@@ -0,0 +1,30 @@
+#include <stdlib.h>
+#include "rocksdb/c.h"
+
+// This API provides convenient C wrapper functions for rocksdb client.
+
+/* Base */
+
+extern void gorocksdb_destruct_handler(void* state);
+
+/* CompactionFilter */
+
+extern rocksdb_compactionfilter_t* gorocksdb_compactionfilter_create(uintptr_t idx);
+
+/* Comparator */
+
+extern rocksdb_comparator_t* gorocksdb_comparator_create(uintptr_t idx);
+
+/* Filter Policy */
+
+extern rocksdb_filterpolicy_t* gorocksdb_filterpolicy_create(uintptr_t idx);
+extern void gorocksdb_filterpolicy_delete_filter(void* state, const char* v, size_t s);
+
+/* Merge Operator */
+
+extern rocksdb_mergeoperator_t* gorocksdb_mergeoperator_create(uintptr_t idx);
+extern void gorocksdb_mergeoperator_delete_value(void* state, const char* v, size_t s);
+
+/* Slice Transform */
+
+extern rocksdb_slicetransform_t* gorocksdb_slicetransform_create(uintptr_t idx);
diff --git a/vendor/github.com/tecbot/gorocksdb/iterator.go b/vendor/github.com/tecbot/gorocksdb/iterator.go
new file mode 100644
index 0000000000000000000000000000000000000000..41609f304e965534bfd7af4c850c6cb25b7a1fbc
--- /dev/null
+++ b/vendor/github.com/tecbot/gorocksdb/iterator.go
@@ -0,0 +1,112 @@
+package gorocksdb
+
+// #include <stdlib.h>
+// #include "rocksdb/c.h"
+import "C"
+import (
+	"bytes"
+	"errors"
+	"unsafe"
+)
+
+// Iterator provides a way to seek to specific keys and iterate through
+// the keyspace from that point, as well as access the values of those keys.
+//
+// For example:
+//
+//      it := db.NewIterator(readOpts)
+//      defer it.Close()
+//
+//      it.Seek([]byte("foo"))
+//		for ; it.Valid(); it.Next() {
+//          fmt.Printf("Key: %v Value: %v\n", it.Key().Data(), it.Value().Data())
+// 		}
+//
+//      if err := it.Err(); err != nil {
+//          return err
+//      }
+//
+type Iterator struct {
+	c *C.rocksdb_iterator_t
+}
+
+// NewNativeIterator creates a Iterator object.
+func NewNativeIterator(c unsafe.Pointer) *Iterator {
+	return &Iterator{(*C.rocksdb_iterator_t)(c)}
+}
+
+// Valid returns false only when an Iterator has iterated past either the
+// first or the last key in the database.
+func (iter *Iterator) Valid() bool {
+	return C.rocksdb_iter_valid(iter.c) != 0
+}
+
+// ValidForPrefix returns false only when an Iterator has iterated past the
+// first or the last key in the database or the specified prefix.
+func (iter *Iterator) ValidForPrefix(prefix []byte) bool {
+	return C.rocksdb_iter_valid(iter.c) != 0 && bytes.HasPrefix(iter.Key().Data(), prefix)
+}
+
+// Key returns the key the iterator currently holds.
+func (iter *Iterator) Key() *Slice {
+	var cLen C.size_t
+	cKey := C.rocksdb_iter_key(iter.c, &cLen)
+	if cKey == nil {
+		return nil
+	}
+	return &Slice{cKey, cLen, true}
+}
+
+// Value returns the value in the database the iterator currently holds.
+func (iter *Iterator) Value() *Slice {
+	var cLen C.size_t
+	cVal := C.rocksdb_iter_value(iter.c, &cLen)
+	if cVal == nil {
+		return nil
+	}
+	return &Slice{cVal, cLen, true}
+}
+
+// Next moves the iterator to the next sequential key in the database.
+func (iter *Iterator) Next() {
+	C.rocksdb_iter_next(iter.c)
+}
+
+// Prev moves the iterator to the previous sequential key in the database.
+func (iter *Iterator) Prev() {
+	C.rocksdb_iter_prev(iter.c)
+}
+
+// SeekToFirst moves the iterator to the first key in the database.
+func (iter *Iterator) SeekToFirst() {
+	C.rocksdb_iter_seek_to_first(iter.c)
+}
+
+// SeekToLast moves the iterator to the last key in the database.
+func (iter *Iterator) SeekToLast() {
+	C.rocksdb_iter_seek_to_last(iter.c)
+}
+
+// Seek moves the iterator to the position greater than or equal to the key.
+func (iter *Iterator) Seek(key []byte) {
+	cKey := byteToChar(key)
+	C.rocksdb_iter_seek(iter.c, cKey, C.size_t(len(key)))
+}
+
+// Err returns nil if no errors happened during iteration, or the actual
+// error otherwise.
+func (iter *Iterator) Err() error {
+	var cErr *C.char
+	C.rocksdb_iter_get_error(iter.c, &cErr)
+	if cErr != nil {
+		defer C.free(unsafe.Pointer(cErr))
+		return errors.New(C.GoString(cErr))
+	}
+	return nil
+}
+
+// Close closes the iterator.
+func (iter *Iterator) Close() {
+	C.rocksdb_iter_destroy(iter.c)
+	iter.c = nil
+}
diff --git a/vendor/github.com/tecbot/gorocksdb/merge_operator.go b/vendor/github.com/tecbot/gorocksdb/merge_operator.go
new file mode 100644
index 0000000000000000000000000000000000000000..379d5e88e9a4474b54bb9c6d1cfbad7e22e12701
--- /dev/null
+++ b/vendor/github.com/tecbot/gorocksdb/merge_operator.go
@@ -0,0 +1,128 @@
+package gorocksdb
+
+// #include "rocksdb/c.h"
+import "C"
+
+// A MergeOperator specifies the SEMANTICS of a merge, which only
+// client knows. It could be numeric addition, list append, string
+// concatenation, edit data structure, ... , anything.
+// The library, on the other hand, is concerned with the exercise of this
+// interface, at the right time (during get, iteration, compaction...)
+//
+// Please read the RocksDB documentation <http://rocksdb.org/> for
+// more details and example implementations.
+type MergeOperator interface {
+	// Gives the client a way to express the read -> modify -> write semantics
+	// key:           The key that's associated with this merge operation.
+	//                Client could multiplex the merge operator based on it
+	//                if the key space is partitioned and different subspaces
+	//                refer to different types of data which have different
+	//                merge operation semantics.
+	// existingValue: null indicates that the key does not exist before this op.
+	// operands:      the sequence of merge operations to apply, front() first.
+	//
+	// Return true on success.
+	//
+	// All values passed in will be client-specific values. So if this method
+	// returns false, it is because client specified bad data or there was
+	// internal corruption. This will be treated as an error by the library.
+	FullMerge(key, existingValue []byte, operands [][]byte) ([]byte, bool)
+
+	// This function performs merge(left_op, right_op)
+	// when both the operands are themselves merge operation types
+	// that you would have passed to a db.Merge() call in the same order
+	// (i.e.: db.Merge(key,left_op), followed by db.Merge(key,right_op)).
+	//
+	// PartialMerge should combine them into a single merge operation.
+	// The return value should be constructed such that a call to
+	// db.Merge(key, new_value) would yield the same result as a call
+	// to db.Merge(key, left_op) followed by db.Merge(key, right_op).
+	//
+	// If it is impossible or infeasible to combine the two operations, return false.
+	// The library will internally keep track of the operations, and apply them in the
+	// correct order once a base-value (a Put/Delete/End-of-Database) is seen.
+	PartialMerge(key, leftOperand, rightOperand []byte) ([]byte, bool)
+
+	// The name of the MergeOperator.
+	Name() string
+}
+
+// NewNativeMergeOperator creates a MergeOperator object.
+func NewNativeMergeOperator(c *C.rocksdb_mergeoperator_t) MergeOperator {
+	return nativeMergeOperator{c}
+}
+
+type nativeMergeOperator struct {
+	c *C.rocksdb_mergeoperator_t
+}
+
+func (mo nativeMergeOperator) FullMerge(key, existingValue []byte, operands [][]byte) ([]byte, bool) {
+	return nil, false
+}
+func (mo nativeMergeOperator) PartialMerge(key, leftOperand, rightOperand []byte) ([]byte, bool) {
+	return nil, false
+}
+func (mo nativeMergeOperator) Name() string { return "" }
+
+// Hold references to merge operators.
+var mergeOperators []MergeOperator
+
+func registerMergeOperator(merger MergeOperator) int {
+	mergeOperators = append(mergeOperators, merger)
+	return len(mergeOperators) - 1
+}
+
+//export gorocksdb_mergeoperator_full_merge
+func gorocksdb_mergeoperator_full_merge(idx int, cKey *C.char, cKeyLen C.size_t, cExistingValue *C.char, cExistingValueLen C.size_t, cOperands **C.char, cOperandsLen *C.size_t, cNumOperands C.int, cSuccess *C.uchar, cNewValueLen *C.size_t) *C.char {
+	key := charToByte(cKey, cKeyLen)
+	rawOperands := charSlice(cOperands, cNumOperands)
+	operandsLen := sizeSlice(cOperandsLen, cNumOperands)
+	existingValue := charToByte(cExistingValue, cExistingValueLen)
+	operands := make([][]byte, int(cNumOperands))
+	for i, len := range operandsLen {
+		operands[i] = charToByte(rawOperands[i], len)
+	}
+
+	newValue, success := mergeOperators[idx].FullMerge(key, existingValue, operands)
+	newValueLen := len(newValue)
+
+	*cNewValueLen = C.size_t(newValueLen)
+	*cSuccess = boolToChar(success)
+
+	return cByteSlice(newValue)
+}
+
+//export gorocksdb_mergeoperator_partial_merge_multi
+func gorocksdb_mergeoperator_partial_merge_multi(idx int, cKey *C.char, cKeyLen C.size_t, cOperands **C.char, cOperandsLen *C.size_t, cNumOperands C.int, cSuccess *C.uchar, cNewValueLen *C.size_t) *C.char {
+	key := charToByte(cKey, cKeyLen)
+	rawOperands := charSlice(cOperands, cNumOperands)
+	operandsLen := sizeSlice(cOperandsLen, cNumOperands)
+	operands := make([][]byte, int(cNumOperands))
+	for i, len := range operandsLen {
+		operands[i] = charToByte(rawOperands[i], len)
+	}
+
+	var newValue []byte
+	success := true
+
+	merger := mergeOperators[idx]
+	leftOperand := operands[0]
+	for i := 1; i < int(cNumOperands); i++ {
+		newValue, success = merger.PartialMerge(key, leftOperand, operands[i])
+		if !success {
+			break
+		}
+		leftOperand = newValue
+	}
+
+	newValueLen := len(newValue)
+	*cNewValueLen = C.size_t(newValueLen)
+	*cSuccess = boolToChar(success)
+
+	return cByteSlice(newValue)
+}
+
+//export gorocksdb_mergeoperator_name
+func gorocksdb_mergeoperator_name(idx int) *C.char {
+	return stringToChar(mergeOperators[idx].Name())
+}
diff --git a/vendor/github.com/tecbot/gorocksdb/options.go b/vendor/github.com/tecbot/gorocksdb/options.go
new file mode 100644
index 0000000000000000000000000000000000000000..83d02564ba73c241e49431dd37976ef8149465a8
--- /dev/null
+++ b/vendor/github.com/tecbot/gorocksdb/options.go
@@ -0,0 +1,1002 @@
+package gorocksdb
+
+// #include "rocksdb/c.h"
+// #include "gorocksdb.h"
+import "C"
+import "unsafe"
+
+// CompressionType specifies the block compression.
+// DB contents are stored in a set of blocks, each of which holds a
+// sequence of key,value pairs. Each block may be compressed before
+// being stored in a file. The following enum describes which
+// compression method (if any) is used to compress a block.
+type CompressionType uint
+
+// Compression types.
+const (
+	NoCompression     = CompressionType(C.rocksdb_no_compression)
+	SnappyCompression = CompressionType(C.rocksdb_snappy_compression)
+	ZLibCompression   = CompressionType(C.rocksdb_zlib_compression)
+	Bz2Compression    = CompressionType(C.rocksdb_bz2_compression)
+)
+
+// CompactionStyle specifies the compaction style.
+type CompactionStyle uint
+
+// Compaction styles.
+const (
+	LevelCompactionStyle     = CompactionStyle(C.rocksdb_level_compaction)
+	UniversalCompactionStyle = CompactionStyle(C.rocksdb_universal_compaction)
+	FIFOCompactionStyle      = CompactionStyle(C.rocksdb_fifo_compaction)
+)
+
+// CompactionAccessPattern specifies the access patern in compaction.
+type CompactionAccessPattern uint
+
+// Access patterns for compaction.
+const (
+	NoneCompactionAccessPattern       = CompactionAccessPattern(0)
+	NormalCompactionAccessPattern     = CompactionAccessPattern(1)
+	SequentialCompactionAccessPattern = CompactionAccessPattern(2)
+	WillneedCompactionAccessPattern   = CompactionAccessPattern(3)
+)
+
+// InfoLogLevel describes the log level.
+type InfoLogLevel uint
+
+// Log leves.
+const (
+	DebugInfoLogLevel = InfoLogLevel(0)
+	InfoInfoLogLevel  = InfoLogLevel(1)
+	WarnInfoLogLevel  = InfoLogLevel(2)
+	ErrorInfoLogLevel = InfoLogLevel(3)
+	FatalInfoLogLevel = InfoLogLevel(4)
+)
+
+// Options represent all of the available options when opening a database with Open.
+type Options struct {
+	c *C.rocksdb_options_t
+
+	// Hold references for GC.
+	env  *Env
+	bbto *BlockBasedTableOptions
+
+	// We keep these so we can free their memory in Destroy.
+	ccmp *C.rocksdb_comparator_t
+	cmo  *C.rocksdb_mergeoperator_t
+	cst  *C.rocksdb_slicetransform_t
+	ccf  *C.rocksdb_compactionfilter_t
+}
+
+// NewDefaultOptions creates the default Options.
+func NewDefaultOptions() *Options {
+	return NewNativeOptions(C.rocksdb_options_create())
+}
+
+// NewNativeOptions creates a Options object.
+func NewNativeOptions(c *C.rocksdb_options_t) *Options {
+	return &Options{c: c}
+}
+
+// -------------------
+// Parameters that affect behavior
+
+// SetCompactionFilter sets the specified compaction filter
+// which will be applied on compactions.
+// Default: nil
+func (opts *Options) SetCompactionFilter(value CompactionFilter) {
+	if nc, ok := value.(nativeCompactionFilter); ok {
+		opts.ccf = nc.c
+	} else {
+		idx := registerCompactionFilter(value)
+		opts.ccf = C.gorocksdb_compactionfilter_create(C.uintptr_t(idx))
+	}
+	C.rocksdb_options_set_compaction_filter(opts.c, opts.ccf)
+}
+
+// SetComparator sets the comparator which define the order of keys in the table.
+// Default: a comparator that uses lexicographic byte-wise ordering
+func (opts *Options) SetComparator(value Comparator) {
+	if nc, ok := value.(nativeComparator); ok {
+		opts.ccmp = nc.c
+	} else {
+		idx := registerComperator(value)
+		opts.ccmp = C.gorocksdb_comparator_create(C.uintptr_t(idx))
+	}
+	C.rocksdb_options_set_comparator(opts.c, opts.ccmp)
+}
+
+// SetMergeOperator sets the merge operator which will be called
+// if a merge operations are used.
+// Default: nil
+func (opts *Options) SetMergeOperator(value MergeOperator) {
+	if nmo, ok := value.(nativeMergeOperator); ok {
+		opts.cmo = nmo.c
+	} else {
+		idx := registerMergeOperator(value)
+		opts.cmo = C.gorocksdb_mergeoperator_create(C.uintptr_t(idx))
+	}
+	C.rocksdb_options_set_merge_operator(opts.c, opts.cmo)
+}
+
+// A single CompactionFilter instance to call into during compaction.
+// Allows an application to modify/delete a key-value during background
+// compaction.
+//
+// If the client requires a new compaction filter to be used for different
+// compaction runs, it can specify compaction_filter_factory instead of this
+// option. The client should specify only one of the two.
+// compaction_filter takes precedence over compaction_filter_factory if
+// client specifies both.
+//
+// If multithreaded compaction is being used, the supplied CompactionFilter
+// instance may be used from different threads concurrently and so should be
+// thread-safe.
+//
+// Default: nil
+// TODO: implement in C
+//func (opts *Options) SetCompactionFilter(value *CompactionFilter) {
+//	C.rocksdb_options_set_compaction_filter(opts.c, value.filter)
+//}
+
+// This is a factory that provides compaction filter objects which allow
+// an application to modify/delete a key-value during background compaction.
+//
+// A new filter will be created on each compaction run.  If multithreaded
+// compaction is being used, each created CompactionFilter will only be used
+// from a single thread and so does not need to be thread-safe.
+//
+// Default: a factory that doesn't provide any object
+// std::shared_ptr<CompactionFilterFactory> compaction_filter_factory;
+// TODO: implement in C and Go
+
+// Version TWO of the compaction_filter_factory
+// It supports rolling compaction
+//
+// Default: a factory that doesn't provide any object
+// std::shared_ptr<CompactionFilterFactoryV2> compaction_filter_factory_v2;
+// TODO: implement in C and Go
+
+// SetCreateIfMissing specifies whether the database
+// should be created if it is missing.
+// Default: false
+func (opts *Options) SetCreateIfMissing(value bool) {
+	C.rocksdb_options_set_create_if_missing(opts.c, boolToChar(value))
+}
+
+// SetErrorIfExists specifies whether an error should be raised
+// if the database already exists.
+// Default: false
+func (opts *Options) SetErrorIfExists(value bool) {
+	C.rocksdb_options_set_error_if_exists(opts.c, boolToChar(value))
+}
+
+// SetParanoidChecks enable/disable paranoid checks.
+//
+// If true, the implementation will do aggressive checking of the
+// data it is processing and will stop early if it detects any
+// errors. This may have unforeseen ramifications: for example, a
+// corruption of one DB entry may cause a large number of entries to
+// become unreadable or for the entire DB to become unopenable.
+// If any of the  writes to the database fails (Put, Delete, Merge, Write),
+// the database will switch to read-only mode and fail all other
+// Write operations.
+// Default: false
+func (opts *Options) SetParanoidChecks(value bool) {
+	C.rocksdb_options_set_paranoid_checks(opts.c, boolToChar(value))
+}
+
+// SetEnv sets the specified object to interact with the environment,
+// e.g. to read/write files, schedule background work, etc.
+// Default: DefaultEnv
+func (opts *Options) SetEnv(value *Env) {
+	opts.env = value
+
+	C.rocksdb_options_set_env(opts.c, value.c)
+}
+
+// SetInfoLogLevel sets the info log level.
+// Default: InfoInfoLogLevel
+func (opts *Options) SetInfoLogLevel(value InfoLogLevel) {
+	C.rocksdb_options_set_info_log_level(opts.c, C.int(value))
+}
+
+// IncreaseParallelism sets the parallelism.
+//
+// By default, RocksDB uses only one background thread for flush and
+// compaction. Calling this function will set it up such that total of
+// `total_threads` is used. Good value for `total_threads` is the number of
+// cores. You almost definitely want to call this function if your system is
+// bottlenecked by RocksDB.
+func (opts *Options) IncreaseParallelism(total_threads int) {
+	C.rocksdb_options_increase_parallelism(opts.c, C.int(total_threads))
+}
+
+// OptimizeForPointLookup optimize the DB for point lookups.
+//
+// Use this if you don't need to keep the data sorted, i.e. you'll never use
+// an iterator, only Put() and Get() API calls
+func (opts *Options) OptimizeForPointLookup(block_cache_size_mb uint64) {
+	C.rocksdb_options_optimize_for_point_lookup(opts.c, C.uint64_t(block_cache_size_mb))
+}
+
+// OptimizeLevelStyleCompaction optimize the DB for leveld compaction.
+//
+// Default values for some parameters in ColumnFamilyOptions are not
+// optimized for heavy workloads and big datasets, which means you might
+// observe write stalls under some conditions. As a starting point for tuning
+// RocksDB options, use the following two functions:
+// * OptimizeLevelStyleCompaction -- optimizes level style compaction
+// * OptimizeUniversalStyleCompaction -- optimizes universal style compaction
+// Universal style compaction is focused on reducing Write Amplification
+// Factor for big data sets, but increases Space Amplification. You can learn
+// more about the different styles here:
+// https://github.com/facebook/rocksdb/wiki/Rocksdb-Architecture-Guide
+// Make sure to also call IncreaseParallelism(), which will provide the
+// biggest performance gains.
+// Note: we might use more memory than memtable_memory_budget during high
+// write rate period
+func (opts *Options) OptimizeLevelStyleCompaction(memtable_memory_budget uint64) {
+	C.rocksdb_options_optimize_level_style_compaction(opts.c, C.uint64_t(memtable_memory_budget))
+}
+
+// OptimizeUniversalStyleCompaction optimize the DB for universal compaction.
+// See note on OptimizeLevelStyleCompaction.
+func (opts *Options) OptimizeUniversalStyleCompaction(memtable_memory_budget uint64) {
+	C.rocksdb_options_optimize_universal_style_compaction(opts.c, C.uint64_t(memtable_memory_budget))
+}
+
+// SetWriteBufferSize sets the amount of data to build up in memory
+// (backed by an unsorted log on disk) before converting to a sorted on-disk file.
+//
+// Larger values increase performance, especially during bulk loads.
+// Up to max_write_buffer_number write buffers may be held in memory
+// at the same time,
+// so you may wish to adjust this parameter to control memory usage.
+// Also, a larger write buffer will result in a longer recovery time
+// the next time the database is opened.
+// Default: 4MB
+func (opts *Options) SetWriteBufferSize(value int) {
+	C.rocksdb_options_set_write_buffer_size(opts.c, C.size_t(value))
+}
+
+// SetMaxWriteBufferNumber sets the maximum number of write buffers
+// that are built up in memory.
+//
+// The default is 2, so that when 1 write buffer is being flushed to
+// storage, new writes can continue to the other write buffer.
+// Default: 2
+func (opts *Options) SetMaxWriteBufferNumber(value int) {
+	C.rocksdb_options_set_max_write_buffer_number(opts.c, C.int(value))
+}
+
+// SetMinWriteBufferNumberToMerge sets the minimum number of write buffers
+// that will be merged together before writing to storage.
+//
+// If set to 1, then all write buffers are flushed to L0 as individual files
+// and this increases read amplification because a get request has to check
+// in all of these files. Also, an in-memory merge may result in writing lesser
+// data to storage if there are duplicate records in each of these
+// individual write buffers.
+// Default: 1
+func (opts *Options) SetMinWriteBufferNumberToMerge(value int) {
+	C.rocksdb_options_set_min_write_buffer_number_to_merge(opts.c, C.int(value))
+}
+
+// SetMaxOpenFiles sets the number of open files that can be used by the DB.
+//
+// You may need to increase this if your database has a large working set
+// (budget one open file per 2MB of working set).
+// Default: 1000
+func (opts *Options) SetMaxOpenFiles(value int) {
+	C.rocksdb_options_set_max_open_files(opts.c, C.int(value))
+}
+
+// SetCompression sets the compression algorithm.
+// Default: SnappyCompression, which gives lightweight but fast
+// compression.
+func (opts *Options) SetCompression(value CompressionType) {
+	C.rocksdb_options_set_compression(opts.c, C.int(value))
+}
+
+// SetCompressionPerLevel sets different compression algorithm per level.
+//
+// Different levels can have different compression policies. There
+// are cases where most lower levels would like to quick compression
+// algorithm while the higher levels (which have more data) use
+// compression algorithms that have better compression but could
+// be slower. This array should have an entry for
+// each level of the database. This array overrides the
+// value specified in the previous field 'compression'.
+func (opts *Options) SetCompressionPerLevel(value []CompressionType) {
+	cLevels := make([]C.int, len(value))
+	for i, v := range value {
+		cLevels[i] = C.int(v)
+	}
+
+	C.rocksdb_options_set_compression_per_level(opts.c, &cLevels[0], C.size_t(len(value)))
+}
+
+// SetMinLevelToCompress sets the start level to use compression.
+func (opts *Options) SetMinLevelToCompress(value int) {
+	C.rocksdb_options_set_min_level_to_compress(opts.c, C.int(value))
+}
+
+// SetCompressionOptions sets different options for compression algorithms.
+// Default: nil
+func (opts *Options) SetCompressionOptions(value *CompressionOptions) {
+	C.rocksdb_options_set_compression_options(opts.c, C.int(value.WindowBits), C.int(value.Level), C.int(value.Strategy))
+}
+
+// SetPrefixExtractor sets the prefic extractor.
+//
+// If set, use the specified function to determine the
+// prefixes for keys. These prefixes will be placed in the filter.
+// Depending on the workload, this can reduce the number of read-IOP
+// cost for scans when a prefix is passed via ReadOptions to
+// db.NewIterator().
+// Default: nil
+func (opts *Options) SetPrefixExtractor(value SliceTransform) {
+	if nst, ok := value.(nativeSliceTransform); ok {
+		opts.cst = nst.c
+	} else {
+		idx := registerSliceTransform(value)
+		opts.cst = C.gorocksdb_slicetransform_create(C.uintptr_t(idx))
+	}
+	C.rocksdb_options_set_prefix_extractor(opts.c, opts.cst)
+}
+
+// SetNumLevels sets the number of levels for this database.
+// Default: 7
+func (opts *Options) SetNumLevels(value int) {
+	C.rocksdb_options_set_num_levels(opts.c, C.int(value))
+}
+
+// SetLevel0FileNumCompactionTrigger sets the number of files
+// to trigger level-0 compaction.
+//
+// A value <0 means that level-0 compaction will not be
+// triggered by number of files at all.
+// Default: 4
+func (opts *Options) SetLevel0FileNumCompactionTrigger(value int) {
+	C.rocksdb_options_set_level0_file_num_compaction_trigger(opts.c, C.int(value))
+}
+
+// SetLevel0SlowdownWritesTrigger sets the soft limit on number of level-0 files.
+//
+// We start slowing down writes at this point.
+// A value <0 means that no writing slow down will be triggered by
+// number of files in level-0.
+// Default: 8
+func (opts *Options) SetLevel0SlowdownWritesTrigger(value int) {
+	C.rocksdb_options_set_level0_slowdown_writes_trigger(opts.c, C.int(value))
+}
+
+// SetLevel0StopWritesTrigger sets the maximum number of level-0 files.
+// We stop writes at this point.
+// Default: 12
+func (opts *Options) SetLevel0StopWritesTrigger(value int) {
+	C.rocksdb_options_set_level0_stop_writes_trigger(opts.c, C.int(value))
+}
+
+// SetMaxMemCompactionLevel sets the maximum level
+// to which a new compacted memtable is pushed if it does not create overlap.
+//
+// We try to push to level 2 to avoid the
+// relatively expensive level 0=>1 compactions and to avoid some
+// expensive manifest file operations. We do not push all the way to
+// the largest level since that can generate a lot of wasted disk
+// space if the same key space is being repeatedly overwritten.
+// Default: 2
+func (opts *Options) SetMaxMemCompactionLevel(value int) {
+	C.rocksdb_options_set_max_mem_compaction_level(opts.c, C.int(value))
+}
+
+// SetTargetFileSizeBase sets the target file size for compaction.
+//
+// Target file size is per-file size for level-1.
+// Target file size for level L can be calculated by
+// target_file_size_base * (target_file_size_multiplier ^ (L-1))
+//
+// For example, if target_file_size_base is 2MB and
+// target_file_size_multiplier is 10, then each file on level-1 will
+// be 2MB, and each file on level 2 will be 20MB,
+// and each file on level-3 will be 200MB.
+// Default: 2MB
+func (opts *Options) SetTargetFileSizeBase(value uint64) {
+	C.rocksdb_options_set_target_file_size_base(opts.c, C.uint64_t(value))
+}
+
+// SetTargetFileSizeMultiplier sets the target file size multiplier for compaction.
+// Default: 1
+func (opts *Options) SetTargetFileSizeMultiplier(value int) {
+	C.rocksdb_options_set_target_file_size_multiplier(opts.c, C.int(value))
+}
+
+// SetMaxBytesForLevelBase sets the maximum total data size for a level.
+//
+// It is the max total for level-1.
+// Maximum number of bytes for level L can be calculated as
+// (max_bytes_for_level_base) * (max_bytes_for_level_multiplier ^ (L-1))
+//
+// For example, if max_bytes_for_level_base is 20MB, and if
+// max_bytes_for_level_multiplier is 10, total data size for level-1
+// will be 20MB, total file size for level-2 will be 200MB,
+// and total file size for level-3 will be 2GB.
+// Default: 10MB
+func (opts *Options) SetMaxBytesForLevelBase(value uint64) {
+	C.rocksdb_options_set_max_bytes_for_level_base(opts.c, C.uint64_t(value))
+}
+
+// SetMaxBytesForLevelMultiplier sets the max Bytes for level multiplier.
+// Default: 10
+func (opts *Options) SetMaxBytesForLevelMultiplier(value int) {
+	C.rocksdb_options_set_max_bytes_for_level_multiplier(opts.c, C.int(value))
+}
+
+// SetMaxBytesForLevelMultiplierAdditional sets different max-size multipliers
+// for different levels.
+//
+// These are multiplied by max_bytes_for_level_multiplier to arrive
+// at the max-size of each level.
+// Default: 1 for each level
+func (opts *Options) SetMaxBytesForLevelMultiplierAdditional(value []int) {
+	cLevels := make([]C.int, len(value))
+	for i, v := range value {
+		cLevels[i] = C.int(v)
+	}
+
+	C.rocksdb_options_set_max_bytes_for_level_multiplier_additional(opts.c, &cLevels[0], C.size_t(len(value)))
+}
+
+// SetExpandedCompactionFactor sets the maximum number of bytes
+// in all compacted files.
+//
+// We avoid expanding the lower level file set of a compaction
+// if it would make the total compaction cover more than
+// (expanded_compaction_factor * targetFileSizeLevel()) many bytes.
+// Default: 25
+func (opts *Options) SetExpandedCompactionFactor(value int) {
+	C.rocksdb_options_set_expanded_compaction_factor(opts.c, C.int(value))
+}
+
+// SetSourceCompactionFactor sets the maximum number of bytes
+// in all source files to be compacted in a single compaction run.
+//
+// We avoid picking too many files in the
+// source level so that we do not exceed the total source bytes
+// for compaction to exceed
+// (source_compaction_factor * targetFileSizeLevel()) many bytes.
+// Default: 1
+func (opts *Options) SetSourceCompactionFactor(value int) {
+	C.rocksdb_options_set_source_compaction_factor(opts.c, C.int(value))
+}
+
+// SetMaxGrandparentOverlapFactor sets the maximum bytes
+// of overlaps in grandparent (i.e., level+2) before we
+// stop building a single file in a level->level+1 compaction.
+// Default: 10
+func (opts *Options) SetMaxGrandparentOverlapFactor(value int) {
+	C.rocksdb_options_set_max_grandparent_overlap_factor(opts.c, C.int(value))
+}
+
+// SetDisableDataSync enable/disable data sync.
+//
+// If true, then the contents of data files are not synced
+// to stable storage. Their contents remain in the OS buffers till the
+// OS decides to flush them. This option is good for bulk-loading
+// of data. Once the bulk-loading is complete, please issue a
+// sync to the OS to flush all dirty buffers to stable storage.
+// Default: false
+func (opts *Options) SetDisableDataSync(value bool) {
+	C.rocksdb_options_set_disable_data_sync(opts.c, C.int(btoi(value)))
+}
+
+// SetUseFsync enable/disable fsync.
+//
+// If true, then every store to stable storage will issue a fsync.
+// If false, then every store to stable storage will issue a fdatasync.
+// This parameter should be set to true while storing data to
+// filesystem like ext3 that can lose files after a reboot.
+// Default: false
+func (opts *Options) SetUseFsync(value bool) {
+	C.rocksdb_options_set_use_fsync(opts.c, C.int(btoi(value)))
+}
+
+// SetDbLogDir specifies the absolute info LOG dir.
+//
+// If it is empty, the log files will be in the same dir as data.
+// If it is non empty, the log files will be in the specified dir,
+// and the db data dir's absolute path will be used as the log file
+// name's prefix.
+// Default: empty
+func (opts *Options) SetDbLogDir(value string) {
+	cvalue := C.CString(value)
+	defer C.free(unsafe.Pointer(cvalue))
+	C.rocksdb_options_set_db_log_dir(opts.c, cvalue)
+}
+
+// SetWalDir specifies the absolute dir path for write-ahead logs (WAL).
+//
+// If it is empty, the log files will be in the same dir as data.
+// If it is non empty, the log files will be in the specified dir,
+// When destroying the db, all log files and the dir itopts is deleted.
+// Default: empty
+func (opts *Options) SetWalDir(value string) {
+	cvalue := C.CString(value)
+	defer C.free(unsafe.Pointer(cvalue))
+	C.rocksdb_options_set_wal_dir(opts.c, cvalue)
+}
+
+// SetDeleteObsoleteFilesPeriodMicros sets the periodicity
+// when obsolete files get deleted.
+//
+// The files that get out of scope by compaction
+// process will still get automatically delete on every compaction,
+// regardless of this setting.
+// Default: 6 hours
+func (opts *Options) SetDeleteObsoleteFilesPeriodMicros(value uint64) {
+	C.rocksdb_options_set_delete_obsolete_files_period_micros(opts.c, C.uint64_t(value))
+}
+
+// SetMaxBackgroundCompactions sets the maximum number of
+// concurrent background jobs, submitted to
+// the default LOW priority thread pool
+// Default: 1
+func (opts *Options) SetMaxBackgroundCompactions(value int) {
+	C.rocksdb_options_set_max_background_compactions(opts.c, C.int(value))
+}
+
+// SetMaxBackgroundFlushes sets the maximum number of
+// concurrent background memtable flush jobs, submitted to
+// the HIGH priority thread pool.
+//
+// By default, all background jobs (major compaction and memtable flush) go
+// to the LOW priority pool. If this option is set to a positive number,
+// memtable flush jobs will be submitted to the HIGH priority pool.
+// It is important when the same Env is shared by multiple db instances.
+// Without a separate pool, long running major compaction jobs could
+// potentially block memtable flush jobs of other db instances, leading to
+// unnecessary Put stalls.
+// Default: 0
+func (opts *Options) SetMaxBackgroundFlushes(value int) {
+	C.rocksdb_options_set_max_background_flushes(opts.c, C.int(value))
+}
+
+// SetMaxLogFileSize sets the maximal size of the info log file.
+//
+// If the log file is larger than `max_log_file_size`, a new info log
+// file will be created.
+// If max_log_file_size == 0, all logs will be written to one log file.
+// Default: 0
+func (opts *Options) SetMaxLogFileSize(value int) {
+	C.rocksdb_options_set_max_log_file_size(opts.c, C.size_t(value))
+}
+
+// SetLogFileTimeToRoll sets the time for the info log file to roll (in seconds).
+//
+// If specified with non-zero value, log file will be rolled
+// if it has been active longer than `log_file_time_to_roll`.
+// Default: 0 (disabled)
+func (opts *Options) SetLogFileTimeToRoll(value int) {
+	C.rocksdb_options_set_log_file_time_to_roll(opts.c, C.size_t(value))
+}
+
+// SetKeepLogFileNum sets the maximal info log files to be kept.
+// Default: 1000
+func (opts *Options) SetKeepLogFileNum(value int) {
+	C.rocksdb_options_set_keep_log_file_num(opts.c, C.size_t(value))
+}
+
+// SetSoftRateLimit sets the soft rate limit.
+//
+// Puts are delayed 0-1 ms when any level has a compaction score that exceeds
+// soft_rate_limit. This is ignored when == 0.0.
+// CONSTRAINT: soft_rate_limit <= hard_rate_limit. If this constraint does not
+// hold, RocksDB will set soft_rate_limit = hard_rate_limit
+// Default: 0.0 (disabled)
+func (opts *Options) SetSoftRateLimit(value float64) {
+	C.rocksdb_options_set_soft_rate_limit(opts.c, C.double(value))
+}
+
+// SetHardRateLimit sets the hard rate limit.
+//
+// Puts are delayed 1ms at a time when any level has a compaction score that
+// exceeds hard_rate_limit. This is ignored when <= 1.0.
+// Default: 0.0 (disabled)
+func (opts *Options) SetHardRateLimit(value float64) {
+	C.rocksdb_options_set_hard_rate_limit(opts.c, C.double(value))
+}
+
+// SetRateLimitDelayMaxMilliseconds sets the max time
+// a put will be stalled when hard_rate_limit is enforced.
+// If 0, then there is no limit.
+// Default: 1000
+func (opts *Options) SetRateLimitDelayMaxMilliseconds(value uint) {
+	C.rocksdb_options_set_rate_limit_delay_max_milliseconds(opts.c, C.uint(value))
+}
+
+// SetMaxManifestFileSize sets the maximal manifest file size until is rolled over.
+// The older manifest file be deleted.
+// Default: MAX_INT so that roll-over does not take place.
+func (opts *Options) SetMaxManifestFileSize(value uint64) {
+	C.rocksdb_options_set_max_manifest_file_size(opts.c, C.size_t(value))
+}
+
+// SetTableCacheNumshardbits sets the number of shards used for table cache.
+// Default: 4
+func (opts *Options) SetTableCacheNumshardbits(value int) {
+	C.rocksdb_options_set_table_cache_numshardbits(opts.c, C.int(value))
+}
+
+// SetTableCacheRemoveScanCountLimit sets the count limit during a scan.
+//
+// During data eviction of table's LRU cache, it would be inefficient
+// to strictly follow LRU because this piece of memory will not really
+// be released unless its refcount falls to zero. Instead, make two
+// passes: the first pass will release items with refcount = 1,
+// and if not enough space releases after scanning the number of
+// elements specified by this parameter, we will remove items in LRU order.
+// Default: 16
+func (opts *Options) SetTableCacheRemoveScanCountLimit(value int) {
+	C.rocksdb_options_set_table_cache_remove_scan_count_limit(opts.c, C.int(value))
+}
+
+// SetArenaBlockSize sets the size of one block in arena memory allocation.
+//
+// If <= 0, a proper value is automatically calculated (usually 1/10 of
+// writer_buffer_size).
+// Default: 0
+func (opts *Options) SetArenaBlockSize(value int) {
+	C.rocksdb_options_set_arena_block_size(opts.c, C.size_t(value))
+}
+
+// SetDisableAutoCompactions enable/disable automatic compactions.
+//
+// Manual compactions can still be issued on this database.
+// Default: false
+func (opts *Options) SetDisableAutoCompactions(value bool) {
+	C.rocksdb_options_set_disable_auto_compactions(opts.c, C.int(btoi(value)))
+}
+
+// SetWALTtlSeconds sets the WAL ttl in seconds.
+//
+// The following two options affect how archived logs will be deleted.
+// 1. If both set to 0, logs will be deleted asap and will not get into
+//    the archive.
+// 2. If wal_ttl_seconds is 0 and wal_size_limit_mb is not 0,
+//    WAL files will be checked every 10 min and if total size is greater
+//    then wal_size_limit_mb, they will be deleted starting with the
+//    earliest until size_limit is met. All empty files will be deleted.
+// 3. If wal_ttl_seconds is not 0 and wall_size_limit_mb is 0, then
+//    WAL files will be checked every wal_ttl_seconds / 2 and those that
+//    are older than wal_ttl_seconds will be deleted.
+// 4. If both are not 0, WAL files will be checked every 10 min and both
+//    checks will be performed with ttl being first.
+// Default: 0
+func (opts *Options) SetWALTtlSeconds(value uint64) {
+	C.rocksdb_options_set_WAL_ttl_seconds(opts.c, C.uint64_t(value))
+}
+
+// SetWalSizeLimitMb sets the WAL size limit in MB.
+//
+// If total size of WAL files is greater then wal_size_limit_mb,
+// they will be deleted starting with the earliest until size_limit is met
+// Default: 0
+func (opts *Options) SetWalSizeLimitMb(value uint64) {
+	C.rocksdb_options_set_WAL_size_limit_MB(opts.c, C.uint64_t(value))
+}
+
+// SetManifestPreallocationSize sets the number of bytes
+// to preallocate (via fallocate) the manifest files.
+//
+// Default is 4mb, which is reasonable to reduce random IO
+// as well as prevent overallocation for mounts that preallocate
+// large amounts of data (such as xfs's allocsize option).
+// Default: 4mb
+func (opts *Options) SetManifestPreallocationSize(value int) {
+	C.rocksdb_options_set_manifest_preallocation_size(opts.c, C.size_t(value))
+}
+
+// SetPurgeRedundantKvsWhileFlush enable/disable purging of
+// duplicate/deleted keys when a memtable is flushed to storage.
+// Default: true
+func (opts *Options) SetPurgeRedundantKvsWhileFlush(value bool) {
+	C.rocksdb_options_set_purge_redundant_kvs_while_flush(opts.c, boolToChar(value))
+}
+
+// SetAllowOsBuffer enable/disable os buffer.
+//
+// Data being read from file storage may be buffered in the OS
+// Default: true
+func (opts *Options) SetAllowOsBuffer(value bool) {
+	C.rocksdb_options_set_allow_os_buffer(opts.c, boolToChar(value))
+}
+
+// SetAllowMmapReads enable/disable mmap reads for reading sst tables.
+// Default: false
+func (opts *Options) SetAllowMmapReads(value bool) {
+	C.rocksdb_options_set_allow_mmap_reads(opts.c, boolToChar(value))
+}
+
+// SetAllowMmapWrites enable/disable mmap writes for writing sst tables.
+// Default: true
+func (opts *Options) SetAllowMmapWrites(value bool) {
+	C.rocksdb_options_set_allow_mmap_writes(opts.c, boolToChar(value))
+}
+
+// SetIsFdCloseOnExec enable/dsiable child process inherit open files.
+// Default: true
+func (opts *Options) SetIsFdCloseOnExec(value bool) {
+	C.rocksdb_options_set_is_fd_close_on_exec(opts.c, boolToChar(value))
+}
+
+// SetSkipLogErrorOnRecovery enable/disable skipping of
+// log corruption error on recovery (If client is ok with
+// losing most recent changes)
+// Default: false
+func (opts *Options) SetSkipLogErrorOnRecovery(value bool) {
+	C.rocksdb_options_set_skip_log_error_on_recovery(opts.c, boolToChar(value))
+}
+
+// SetStatsDumpPeriodSec sets the stats dump period in seconds.
+//
+// If not zero, dump stats to LOG every stats_dump_period_sec
+// Default: 3600 (1 hour)
+func (opts *Options) SetStatsDumpPeriodSec(value uint) {
+	C.rocksdb_options_set_stats_dump_period_sec(opts.c, C.uint(value))
+}
+
+// SetAdviseRandomOnOpen specifies whether we will hint the underlying
+// file system that the file access pattern is random, when a sst file is opened.
+// Default: true
+func (opts *Options) SetAdviseRandomOnOpen(value bool) {
+	C.rocksdb_options_set_advise_random_on_open(opts.c, boolToChar(value))
+}
+
+// SetAccessHintOnCompactionStart specifies the file access pattern
+// once a compaction is started.
+//
+// It will be applied to all input files of a compaction.
+// Default: NormalCompactionAccessPattern
+func (opts *Options) SetAccessHintOnCompactionStart(value CompactionAccessPattern) {
+	C.rocksdb_options_set_access_hint_on_compaction_start(opts.c, C.int(value))
+}
+
+// SetUseAdaptiveMutex enable/disable adaptive mutex, which spins
+// in the user space before resorting to kernel.
+//
+// This could reduce context switch when the mutex is not
+// heavily contended. However, if the mutex is hot, we could end up
+// wasting spin time.
+// Default: false
+func (opts *Options) SetUseAdaptiveMutex(value bool) {
+	C.rocksdb_options_set_use_adaptive_mutex(opts.c, boolToChar(value))
+}
+
+// SetBytesPerSync sets the bytes per sync.
+//
+// Allows OS to incrementally sync files to disk while they are being
+// written, asynchronously, in the background.
+// Issue one request for every bytes_per_sync written.
+// Default: 0 (disabled)
+func (opts *Options) SetBytesPerSync(value uint64) {
+	C.rocksdb_options_set_bytes_per_sync(opts.c, C.uint64_t(value))
+}
+
+// SetCompactionStyle sets the compaction style.
+// Default: LevelCompactionStyle
+func (opts *Options) SetCompactionStyle(value CompactionStyle) {
+	C.rocksdb_options_set_compaction_style(opts.c, C.int(value))
+}
+
+// SetUniversalCompactionOptions sets the options needed
+// to support Universal Style compactions.
+// Default: nil
+func (opts *Options) SetUniversalCompactionOptions(value *UniversalCompactionOptions) {
+	C.rocksdb_options_set_universal_compaction_options(opts.c, value.c)
+}
+
+// SetFIFOCompactionOptions sets the options for FIFO compaction style.
+// Default: nil
+func (opts *Options) SetFIFOCompactionOptions(value *FIFOCompactionOptions) {
+	C.rocksdb_options_set_fifo_compaction_options(opts.c, value.c)
+}
+
+// SetVerifyChecksumsInCompaction enable/disable checksum verification.
+//
+// If true, compaction will verify checksum on every read that happens
+// as part of compaction
+// Default: true
+func (opts *Options) SetVerifyChecksumsInCompaction(value bool) {
+	C.rocksdb_options_set_verify_checksums_in_compaction(opts.c, boolToChar(value))
+}
+
+// SetFilterDeletes enable/disable filtering of deleted keys.
+//
+// Use KeyMayExist API to filter deletes when this is true.
+// If KeyMayExist returns false, i.e. the key definitely does not exist, then
+// the delete is a noop. KeyMayExist only incurs in-memory look up.
+// This optimization avoids writing the delete to storage when appropriate.
+// Default: false
+func (opts *Options) SetFilterDeletes(value bool) {
+	C.rocksdb_options_set_filter_deletes(opts.c, boolToChar(value))
+}
+
+// SetMaxSequentialSkipInIterations specifies whether an iteration->Next()
+// sequentially skips over keys with the same user-key or not.
+//
+// This number specifies the number of keys (with the same userkey)
+// that will be sequentially skipped before a reseek is issued.
+// Default: 8
+func (opts *Options) SetMaxSequentialSkipInIterations(value uint64) {
+	C.rocksdb_options_set_max_sequential_skip_in_iterations(opts.c, C.uint64_t(value))
+}
+
+// SetInplaceUpdateSupport enable/disable thread-safe inplace updates.
+//
+// Requires updates if
+// * key exists in current memtable
+// * new sizeof(new_value) <= sizeof(old_value)
+// * old_value for that key is a put i.e. kTypeValue
+// Default: false.
+func (opts *Options) SetInplaceUpdateSupport(value bool) {
+	C.rocksdb_options_set_inplace_update_support(opts.c, boolToChar(value))
+}
+
+// SetInplaceUpdateNumLocks sets the number of locks used for inplace update.
+// Default: 10000, if inplace_update_support = true, else 0.
+func (opts *Options) SetInplaceUpdateNumLocks(value int) {
+	C.rocksdb_options_set_inplace_update_num_locks(opts.c, C.size_t(value))
+}
+
+// SetMemtablePrefixBloomBits sets the bloom bits for prefix extractor.
+//
+// If prefix_extractor is set and bloom_bits is not 0, create prefix bloom
+// for memtable.
+// Default: 0
+func (opts *Options) SetMemtablePrefixBloomBits(value uint32) {
+	C.rocksdb_options_set_memtable_prefix_bloom_bits(opts.c, C.uint32_t(value))
+}
+
+// SetMemtablePrefixBloomProbes sets the number of hash probes per key.
+// Default: 6
+func (opts *Options) SetMemtablePrefixBloomProbes(value uint32) {
+	C.rocksdb_options_set_memtable_prefix_bloom_probes(opts.c, C.uint32_t(value))
+}
+
+// SetBloomLocality sets the bloom locality.
+//
+// Control locality of bloom filter probes to improve cache miss rate.
+// This option only applies to memtable prefix bloom and plaintable
+// prefix bloom. It essentially limits the max number of cache lines each
+// bloom filter check can touch.
+// This optimization is turned off when set to 0. The number should never
+// be greater than number of probes. This option can boost performance
+// for in-memory workload but should use with care since it can cause
+// higher false positive rate.
+// Default: 0
+func (opts *Options) SetBloomLocality(value uint32) {
+	C.rocksdb_options_set_bloom_locality(opts.c, C.uint32_t(value))
+}
+
+// SetMaxSuccessiveMerges sets the maximum number of
+// successive merge operations on a key in the memtable.
+//
+// When a merge operation is added to the memtable and the maximum number of
+// successive merges is reached, the value of the key will be calculated and
+// inserted into the memtable instead of the merge operation. This will
+// ensure that there are never more than max_successive_merges merge
+// operations in the memtable.
+// Default: 0 (disabled)
+func (opts *Options) SetMaxSuccessiveMerges(value int) {
+	C.rocksdb_options_set_max_successive_merges(opts.c, C.size_t(value))
+}
+
+// SetMinPartialMergeOperands sets the number of partial merge operands
+// to accumulate before partial merge will be performed.
+//
+// Partial merge will not be called if the list of values to merge
+// is less than min_partial_merge_operands.
+// If min_partial_merge_operands < 2, then it will be treated as 2.
+// Default: 2
+func (opts *Options) SetMinPartialMergeOperands(value uint32) {
+	C.rocksdb_options_set_min_partial_merge_operands(opts.c, C.uint32_t(value))
+}
+
+// EnableStatistics enable statistics.
+func (opts *Options) EnableStatistics() {
+	C.rocksdb_options_enable_statistics(opts.c)
+}
+
+// PrepareForBulkLoad prepare the DB for bulk loading.
+//
+// All data will be in level 0 without any automatic compaction.
+// It's recommended to manually call CompactRange(NULL, NULL) before reading
+// from the database, because otherwise the read can be very slow.
+func (opts *Options) PrepareForBulkLoad() {
+	C.rocksdb_options_prepare_for_bulk_load(opts.c)
+}
+
+// SetMemtableVectorRep sets a MemTableRep which is backed by a vector.
+//
+// On iteration, the vector is sorted. This is useful for workloads where
+// iteration is very rare and writes are generally not issued after reads begin.
+func (opts *Options) SetMemtableVectorRep() {
+	C.rocksdb_options_set_memtable_vector_rep(opts.c)
+}
+
+// SetHashSkipListRep sets a hash skip list as MemTableRep.
+//
+// It contains a fixed array of buckets, each
+// pointing to a skiplist (null if the bucket is empty).
+//
+// bucketCount:             number of fixed array buckets
+// skiplistHeight:          the max height of the skiplist
+// skiplistBranchingFactor: probabilistic size ratio between adjacent
+//                          link lists in the skiplist
+func (opts *Options) SetHashSkipListRep(bucketCount int, skiplistHeight, skiplistBranchingFactor int32) {
+	C.rocksdb_options_set_hash_skip_list_rep(opts.c, C.size_t(bucketCount), C.int32_t(skiplistHeight), C.int32_t(skiplistBranchingFactor))
+}
+
+// SetHashLinkListRep sets a hashed linked list as MemTableRep.
+//
+// It contains a fixed array of buckets, each pointing to a sorted single
+// linked list (null if the bucket is empty).
+//
+// bucketCount: number of fixed array buckets
+func (opts *Options) SetHashLinkListRep(bucketCount int) {
+	C.rocksdb_options_set_hash_link_list_rep(opts.c, C.size_t(bucketCount))
+}
+
+// SetPlainTableFactory sets a plain table factory with prefix-only seek.
+//
+// For this factory, you need to set prefix_extractor properly to make it
+// work. Look-up will starts with prefix hash lookup for key prefix. Inside the
+// hash bucket found, a binary search is executed for hash conflicts. Finally,
+// a linear search is used.
+//
+// keyLen: 			plain table has optimization for fix-sized keys,
+// 					which can be specified via keyLen.
+// bloomBitsPerKey: the number of bits used for bloom filer per prefix. You
+//                  may disable it by passing a zero.
+// hashTableRatio:  the desired utilization of the hash table used for prefix
+//                  hashing. hashTableRatio = number of prefixes / #buckets
+//                  in the hash table
+// indexSparseness: inside each prefix, need to build one index record for how
+//                  many keys for binary search inside each hash bucket.
+func (opts *Options) SetPlainTableFactory(keyLen uint32, bloomBitsPerKey int, hashTableRatio float64, indexSparseness int) {
+	C.rocksdb_options_set_plain_table_factory(opts.c, C.uint32_t(keyLen), C.int(bloomBitsPerKey), C.double(hashTableRatio), C.size_t(indexSparseness))
+}
+
+// SetCreateIfMissingColumnFamilies specifies whether the column families
+// should be created if they are missing.
+func (opts *Options) SetCreateIfMissingColumnFamilies(value bool) {
+	C.rocksdb_options_set_create_missing_column_families(opts.c, boolToChar(value))
+}
+
+// SetBlockBasedTableFactory sets the block based table factory.
+func (opts *Options) SetBlockBasedTableFactory(value *BlockBasedTableOptions) {
+	opts.bbto = value
+	C.rocksdb_options_set_block_based_table_factory(opts.c, value.c)
+}
+
+// Destroy deallocates the Options object.
+func (opts *Options) Destroy() {
+	C.rocksdb_options_destroy(opts.c)
+	if opts.ccmp != nil {
+		C.rocksdb_comparator_destroy(opts.ccmp)
+	}
+	if opts.cmo != nil {
+		C.rocksdb_mergeoperator_destroy(opts.cmo)
+	}
+	if opts.cst != nil {
+		C.rocksdb_slicetransform_destroy(opts.cst)
+	}
+	if opts.ccf != nil {
+		C.rocksdb_compactionfilter_destroy(opts.ccf)
+	}
+	opts.c = nil
+	opts.env = nil
+	opts.bbto = nil
+}
diff --git a/vendor/github.com/tecbot/gorocksdb/options_block_based_table.go b/vendor/github.com/tecbot/gorocksdb/options_block_based_table.go
new file mode 100644
index 0000000000000000000000000000000000000000..e6cc47f80d0b06bbe54abd658fdb59be611feaf7
--- /dev/null
+++ b/vendor/github.com/tecbot/gorocksdb/options_block_based_table.go
@@ -0,0 +1,111 @@
+package gorocksdb
+
+// #include "rocksdb/c.h"
+// #include "gorocksdb.h"
+import "C"
+
+// BlockBasedTableOptions represents block-based table options.
+type BlockBasedTableOptions struct {
+	c *C.rocksdb_block_based_table_options_t
+
+	// Hold references for GC.
+	cache     *Cache
+	compCache *Cache
+
+	// We keep these so we can free their memory in Destroy.
+	cFp *C.rocksdb_filterpolicy_t
+}
+
+// NewDefaultBlockBasedTableOptions creates a default BlockBasedTableOptions object.
+func NewDefaultBlockBasedTableOptions() *BlockBasedTableOptions {
+	return NewNativeBlockBasedTableOptions(C.rocksdb_block_based_options_create())
+}
+
+// NewNativeBlockBasedTableOptions creates a BlockBasedTableOptions object.
+func NewNativeBlockBasedTableOptions(c *C.rocksdb_block_based_table_options_t) *BlockBasedTableOptions {
+	return &BlockBasedTableOptions{c: c}
+}
+
+// Destroy deallocates the BlockBasedTableOptions object.
+func (opts *BlockBasedTableOptions) Destroy() {
+	C.rocksdb_block_based_options_destroy(opts.c)
+	opts.c = nil
+	opts.cache = nil
+	opts.compCache = nil
+}
+
+// SetBlockSize sets the approximate size of user data packed per block.
+// Note that the block size specified here corresponds opts uncompressed data.
+// The actual size of the unit read from disk may be smaller if
+// compression is enabled. This parameter can be changed dynamically.
+// Default: 4K
+func (opts *BlockBasedTableOptions) SetBlockSize(blockSize int) {
+	C.rocksdb_block_based_options_set_block_size(opts.c, C.size_t(blockSize))
+}
+
+// SetBlockSizeDeviation sets the block size deviation.
+// This is used opts close a block before it reaches the configured
+// 'block_size'. If the percentage of free space in the current block is less
+// than this specified number and adding a new record opts the block will
+// exceed the configured block size, then this block will be closed and the
+// new record will be written opts the next block.
+// Default: 10
+func (opts *BlockBasedTableOptions) SetBlockSizeDeviation(blockSizeDeviation int) {
+	C.rocksdb_block_based_options_set_block_size_deviation(opts.c, C.int(blockSizeDeviation))
+}
+
+// SetBlockRestartInterval sets the number of keys between
+// restart points for delta encoding of keys.
+// This parameter can be changed dynamically. Most clients should
+// leave this parameter alone.
+// Default: 16
+func (opts *BlockBasedTableOptions) SetBlockRestartInterval(blockRestartInterval int) {
+	C.rocksdb_block_based_options_set_block_restart_interval(opts.c, C.int(blockRestartInterval))
+}
+
+// SetFilterPolicy sets the filter policy opts reduce disk reads.
+// Many applications will benefit from passing the result of
+// NewBloomFilterPolicy() here.
+// Default: nil
+func (opts *BlockBasedTableOptions) SetFilterPolicy(fp FilterPolicy) {
+	if nfp, ok := fp.(nativeFilterPolicy); ok {
+		opts.cFp = nfp.c
+	} else {
+		idx := registerFilterPolicy(fp)
+		opts.cFp = C.gorocksdb_filterpolicy_create(C.uintptr_t(idx))
+	}
+	C.rocksdb_block_based_options_set_filter_policy(opts.c, opts.cFp)
+}
+
+// SetNoBlockCache specify whether block cache should be used or not.
+// Default: false
+func (opts *BlockBasedTableOptions) SetNoBlockCache(value bool) {
+	C.rocksdb_block_based_options_set_no_block_cache(opts.c, boolToChar(value))
+}
+
+// SetBlockCache sets the control over blocks (user data is soptsred in a set of blocks, and
+// a block is the unit of reading from disk).
+//
+// If set, use the specified cache for blocks.
+// If nil, rocksdb will auoptsmatically create and use an 8MB internal cache.
+// Default: nil
+func (opts *BlockBasedTableOptions) SetBlockCache(cache *Cache) {
+	opts.cache = cache
+	C.rocksdb_block_based_options_set_block_cache(opts.c, cache.c)
+}
+
+// SetBlockCacheCompressed sets the cache for compressed blocks.
+// If nil, rocksdb will not use a compressed block cache.
+// Default: nil
+func (opts *BlockBasedTableOptions) SetBlockCacheCompressed(cache *Cache) {
+	opts.compCache = cache
+	C.rocksdb_block_based_options_set_block_cache_compressed(opts.c, cache.c)
+}
+
+// SetWholeKeyFiltering specify if whole keys in the filter (not just prefixes)
+// should be placed.
+// This must generally be true for gets opts be efficient.
+// Default: true
+func (opts *BlockBasedTableOptions) SetWholeKeyFiltering(value bool) {
+	C.rocksdb_block_based_options_set_whole_key_filtering(opts.c, boolToChar(value))
+}
diff --git a/vendor/github.com/tecbot/gorocksdb/options_compaction.go b/vendor/github.com/tecbot/gorocksdb/options_compaction.go
new file mode 100644
index 0000000000000000000000000000000000000000..a7db213674a2b08883647df5f5a862d9277ec4f1
--- /dev/null
+++ b/vendor/github.com/tecbot/gorocksdb/options_compaction.go
@@ -0,0 +1,130 @@
+package gorocksdb
+
+// #include "rocksdb/c.h"
+import "C"
+
+// UniversalCompactionStopStyle describes a algorithm used to make a
+// compaction request stop picking new files into a single compaction run.
+type UniversalCompactionStopStyle uint
+
+// Compaction stop style types.
+const (
+	CompactionStopStyleSimilarSize = UniversalCompactionStopStyle(C.rocksdb_similar_size_compaction_stop_style)
+	CompactionStopStyleTotalSize   = UniversalCompactionStopStyle(C.rocksdb_total_size_compaction_stop_style)
+)
+
+// FIFOCompactionOptions represent all of the available options for
+// FIFO compaction.
+type FIFOCompactionOptions struct {
+	c *C.rocksdb_fifo_compaction_options_t
+}
+
+// NewDefaultFIFOCompactionOptions creates a default FIFOCompactionOptions object.
+func NewDefaultFIFOCompactionOptions() *FIFOCompactionOptions {
+	return NewNativeFIFOCompactionOptions(C.rocksdb_fifo_compaction_options_create())
+}
+
+// NewNativeFIFOCompactionOptions creates a native FIFOCompactionOptions object.
+func NewNativeFIFOCompactionOptions(c *C.rocksdb_fifo_compaction_options_t) *FIFOCompactionOptions {
+	return &FIFOCompactionOptions{c}
+}
+
+// SetMaxTableFilesSize sets the max table file size.
+// Once the total sum of table files reaches this, we will delete the oldest
+// table file
+// Default: 1GB
+func (opts *FIFOCompactionOptions) SetMaxTableFilesSize(value uint64) {
+	C.rocksdb_fifo_compaction_options_set_max_table_files_size(opts.c, C.uint64_t(value))
+}
+
+// Destroy deallocates the FIFOCompactionOptions object.
+func (opts *FIFOCompactionOptions) Destroy() {
+	C.rocksdb_fifo_compaction_options_destroy(opts.c)
+}
+
+// UniversalCompactionOptions represent all of the available options for
+// universal compaction.
+type UniversalCompactionOptions struct {
+	c *C.rocksdb_universal_compaction_options_t
+}
+
+// NewDefaultUniversalCompactionOptions creates a default UniversalCompactionOptions
+// object.
+func NewDefaultUniversalCompactionOptions() *UniversalCompactionOptions {
+	return NewNativeUniversalCompactionOptions(C.rocksdb_universal_compaction_options_create())
+}
+
+// NewNativeUniversalCompactionOptions creates a UniversalCompactionOptions
+// object.
+func NewNativeUniversalCompactionOptions(c *C.rocksdb_universal_compaction_options_t) *UniversalCompactionOptions {
+	return &UniversalCompactionOptions{c}
+}
+
+// SetSizeRatio sets the percentage flexibilty while comparing file size.
+// If the candidate file(s) size is 1% smaller than the next file's size,
+// then include next file into this candidate set.
+// Default: 1
+func (opts *UniversalCompactionOptions) SetSizeRatio(value uint) {
+	C.rocksdb_universal_compaction_options_set_size_ratio(opts.c, C.int(value))
+}
+
+// SetMinMergeWidth sets the minimum number of files in a single compaction run.
+// Default: 2
+func (opts *UniversalCompactionOptions) SetMinMergeWidth(value uint) {
+	C.rocksdb_universal_compaction_options_set_min_merge_width(opts.c, C.int(value))
+}
+
+// SetMaxMergeWidth sets the maximum number of files in a single compaction run.
+// Default: UINT_MAX
+func (opts *UniversalCompactionOptions) SetMaxMergeWidth(value uint) {
+	C.rocksdb_universal_compaction_options_set_max_merge_width(opts.c, C.int(value))
+}
+
+// SetMaxSizeAmplificationPercent sets the size amplification.
+// It is defined as the amount (in percentage) of
+// additional storage needed to store a single byte of data in the database.
+// For example, a size amplification of 2% means that a database that
+// contains 100 bytes of user-data may occupy upto 102 bytes of
+// physical storage. By this definition, a fully compacted database has
+// a size amplification of 0%. Rocksdb uses the following heuristic
+// to calculate size amplification: it assumes that all files excluding
+// the earliest file contribute to the size amplification.
+// Default: 200, which means that a 100 byte database could require upto
+// 300 bytes of storage.
+func (opts *UniversalCompactionOptions) SetMaxSizeAmplificationPercent(value uint) {
+	C.rocksdb_universal_compaction_options_set_max_size_amplification_percent(opts.c, C.int(value))
+}
+
+// SetCompressionSizePercent sets the percentage of compression size.
+//
+// If this option is set to be -1, all the output files
+// will follow compression type specified.
+//
+// If this option is not negative, we will try to make sure compressed
+// size is just above this value. In normal cases, at least this percentage
+// of data will be compressed.
+// When we are compacting to a new file, here is the criteria whether
+// it needs to be compressed: assuming here are the list of files sorted
+// by generation time:
+//    A1...An B1...Bm C1...Ct
+// where A1 is the newest and Ct is the oldest, and we are going to compact
+// B1...Bm, we calculate the total size of all the files as total_size, as
+// well as  the total size of C1...Ct as total_C, the compaction output file
+// will be compressed iff
+//   total_C / total_size < this percentage
+// Default: -1
+func (opts *UniversalCompactionOptions) SetCompressionSizePercent(value int) {
+	C.rocksdb_universal_compaction_options_set_compression_size_percent(opts.c, C.int(value))
+}
+
+// SetStopStyle sets the algorithm used to stop picking files into a single compaction run.
+// Default: CompactionStopStyleTotalSize
+func (opts *UniversalCompactionOptions) SetStopStyle(value UniversalCompactionStopStyle) {
+	C.rocksdb_universal_compaction_options_set_stop_style(opts.c, C.int(value))
+}
+
+// Destroy deallocates the UniversalCompactionOptions object.
+func (opts *UniversalCompactionOptions) Destroy() {
+	C.rocksdb_universal_compaction_options_destroy(opts.c)
+	opts.c = nil
+}
diff --git a/vendor/github.com/tecbot/gorocksdb/options_compression.go b/vendor/github.com/tecbot/gorocksdb/options_compression.go
new file mode 100644
index 0000000000000000000000000000000000000000..df7d2100e005ca475053008d8cb32d05668c4d5d
--- /dev/null
+++ b/vendor/github.com/tecbot/gorocksdb/options_compression.go
@@ -0,0 +1,22 @@
+package gorocksdb
+
+// CompressionOptions represents options for different compression algorithms like Zlib.
+type CompressionOptions struct {
+	WindowBits int
+	Level      int
+	Strategy   int
+}
+
+// NewDefaultCompressionOptions creates a default CompressionOptions object.
+func NewDefaultCompressionOptions() *CompressionOptions {
+	return NewCompressionOptions(-14, -1, 0)
+}
+
+// NewCompressionOptions creates a CompressionOptions object.
+func NewCompressionOptions(windowBits, level, strategy int) *CompressionOptions {
+	return &CompressionOptions{
+		WindowBits: windowBits,
+		Level:      level,
+		Strategy:   strategy,
+	}
+}
diff --git a/vendor/github.com/tecbot/gorocksdb/options_flush.go b/vendor/github.com/tecbot/gorocksdb/options_flush.go
new file mode 100644
index 0000000000000000000000000000000000000000..518236a5d96a2994000aff903578daecf1801442
--- /dev/null
+++ b/vendor/github.com/tecbot/gorocksdb/options_flush.go
@@ -0,0 +1,32 @@
+package gorocksdb
+
+// #include "rocksdb/c.h"
+import "C"
+
+// FlushOptions represent all of the available options when manual flushing the
+// database.
+type FlushOptions struct {
+	c *C.rocksdb_flushoptions_t
+}
+
+// NewDefaultFlushOptions creates a default FlushOptions object.
+func NewDefaultFlushOptions() *FlushOptions {
+	return NewNativeFlushOptions(C.rocksdb_flushoptions_create())
+}
+
+// NewNativeFlushOptions creates a FlushOptions object.
+func NewNativeFlushOptions(c *C.rocksdb_flushoptions_t) *FlushOptions {
+	return &FlushOptions{c}
+}
+
+// SetWait specify if the flush will wait until the flush is done.
+// Default: true
+func (opts *FlushOptions) SetWait(value bool) {
+	C.rocksdb_flushoptions_set_wait(opts.c, boolToChar(value))
+}
+
+// Destroy deallocates the FlushOptions object.
+func (opts *FlushOptions) Destroy() {
+	C.rocksdb_flushoptions_destroy(opts.c)
+	opts.c = nil
+}
diff --git a/vendor/github.com/tecbot/gorocksdb/options_read.go b/vendor/github.com/tecbot/gorocksdb/options_read.go
new file mode 100644
index 0000000000000000000000000000000000000000..8c028ae2ebed9b2194fcf6940273a6872e6633eb
--- /dev/null
+++ b/vendor/github.com/tecbot/gorocksdb/options_read.go
@@ -0,0 +1,89 @@
+package gorocksdb
+
+// #include "rocksdb/c.h"
+import "C"
+import "unsafe"
+
+// ReadTier controls fetching of data during a read request.
+// An application can issue a read request (via Get/Iterators) and specify
+// if that read should process data that ALREADY resides on a specified cache
+// level. For example, if an application specifies BlockCacheTier then the
+// Get call will process data that is already processed in the memtable or
+// the block cache. It will not page in data from the OS cache or data that
+// resides in storage.
+type ReadTier uint
+
+const (
+	// ReadAllTier reads data in memtable, block cache, OS cache or storage.
+	ReadAllTier = ReadTier(0)
+	// BlockCacheTier reads data in memtable or block cache.
+	BlockCacheTier = ReadTier(1)
+)
+
+// ReadOptions represent all of the available options when reading from a
+// database.
+type ReadOptions struct {
+	c *C.rocksdb_readoptions_t
+}
+
+// NewDefaultReadOptions creates a default ReadOptions object.
+func NewDefaultReadOptions() *ReadOptions {
+	return NewNativeReadOptions(C.rocksdb_readoptions_create())
+}
+
+// NewNativeReadOptions creates a ReadOptions object.
+func NewNativeReadOptions(c *C.rocksdb_readoptions_t) *ReadOptions {
+	return &ReadOptions{c}
+}
+
+// UnsafeGetReadOptions returns the underlying c read options object.
+func (opts *ReadOptions) UnsafeGetReadOptions() unsafe.Pointer {
+	return unsafe.Pointer(opts.c)
+}
+
+// SetVerifyChecksums speciy if all data read from underlying storage will be
+// verified against corresponding checksums.
+// Default: false
+func (opts *ReadOptions) SetVerifyChecksums(value bool) {
+	C.rocksdb_readoptions_set_verify_checksums(opts.c, boolToChar(value))
+}
+
+// SetFillCache specify whether the "data block"/"index block"/"filter block"
+// read for this iteration should be cached in memory?
+// Callers may wish to set this field to false for bulk scans.
+// Default: true
+func (opts *ReadOptions) SetFillCache(value bool) {
+	C.rocksdb_readoptions_set_fill_cache(opts.c, boolToChar(value))
+}
+
+// SetSnapshot sets the snapshot which should be used for the read.
+// The snapshot must belong to the DB that is being read and must
+// not have been released.
+// Default: nil
+func (opts *ReadOptions) SetSnapshot(snap *Snapshot) {
+	C.rocksdb_readoptions_set_snapshot(opts.c, snap.c)
+}
+
+// SetReadTier specify if this read request should process data that ALREADY
+// resides on a particular cache. If the required data is not
+// found at the specified cache, then Status::Incomplete is returned.
+// Default: ReadAllTier
+func (opts *ReadOptions) SetReadTier(value ReadTier) {
+	C.rocksdb_readoptions_set_read_tier(opts.c, C.int(value))
+}
+
+// SetTailing specify if to create a tailing iterator.
+// A special iterator that has a view of the complete database
+// (i.e. it can also be used to read newly added data) and
+// is optimized for sequential reads. It will return records
+// that were inserted into the database after the creation of the iterator.
+// Default: false
+func (opts *ReadOptions) SetTailing(value bool) {
+	C.rocksdb_readoptions_set_tailing(opts.c, boolToChar(value))
+}
+
+// Destroy deallocates the ReadOptions object.
+func (opts *ReadOptions) Destroy() {
+	C.rocksdb_readoptions_destroy(opts.c)
+	opts.c = nil
+}
diff --git a/vendor/github.com/tecbot/gorocksdb/options_write.go b/vendor/github.com/tecbot/gorocksdb/options_write.go
new file mode 100644
index 0000000000000000000000000000000000000000..01cd9c9ab02b3c0ad793c6bae10a437e88ec6f96
--- /dev/null
+++ b/vendor/github.com/tecbot/gorocksdb/options_write.go
@@ -0,0 +1,42 @@
+package gorocksdb
+
+// #include "rocksdb/c.h"
+import "C"
+
+// WriteOptions represent all of the available options when writing to a
+// database.
+type WriteOptions struct {
+	c *C.rocksdb_writeoptions_t
+}
+
+// NewDefaultWriteOptions creates a default WriteOptions object.
+func NewDefaultWriteOptions() *WriteOptions {
+	return NewNativeWriteOptions(C.rocksdb_writeoptions_create())
+}
+
+// NewNativeWriteOptions creates a WriteOptions object.
+func NewNativeWriteOptions(c *C.rocksdb_writeoptions_t) *WriteOptions {
+	return &WriteOptions{c}
+}
+
+// SetSync sets the sync mode. If true, the write will be flushed
+// from the operating system buffer cache before the write is considered complete.
+// If this flag is true, writes will be slower.
+// Default: false
+func (opts *WriteOptions) SetSync(value bool) {
+	C.rocksdb_writeoptions_set_sync(opts.c, boolToChar(value))
+}
+
+// DisableWAL sets whether WAL should be active or not.
+// If true, writes will not first go to the write ahead log,
+// and the write may got lost after a crash.
+// Default: false
+func (opts *WriteOptions) DisableWAL(value bool) {
+	C.rocksdb_writeoptions_disable_WAL(opts.c, C.int(btoi(value)))
+}
+
+// Destroy deallocates the WriteOptions object.
+func (opts *WriteOptions) Destroy() {
+	C.rocksdb_writeoptions_destroy(opts.c)
+	opts.c = nil
+}
diff --git a/vendor/github.com/tecbot/gorocksdb/slice.go b/vendor/github.com/tecbot/gorocksdb/slice.go
new file mode 100644
index 0000000000000000000000000000000000000000..acc69b414c20a401d2f2ee012c9f8447e0f6aed3
--- /dev/null
+++ b/vendor/github.com/tecbot/gorocksdb/slice.go
@@ -0,0 +1,35 @@
+package gorocksdb
+
+// #include <stdlib.h>
+import "C"
+import "unsafe"
+
+// Slice is used as a wrapper for non-copy values
+type Slice struct {
+	data  *C.char
+	size  C.size_t
+	freed bool
+}
+
+// NewSlice returns a slice with the given data.
+func NewSlice(data *C.char, size C.size_t) *Slice {
+	return &Slice{data, size, false}
+}
+
+// Data returns the data of the slice.
+func (s *Slice) Data() []byte {
+	return charToByte(s.data, s.size)
+}
+
+// Size returns the size of the data.
+func (s *Slice) Size() int {
+	return int(s.size)
+}
+
+// Free frees the slice data.
+func (s *Slice) Free() {
+	if !s.freed {
+		C.free(unsafe.Pointer(s.data))
+		s.freed = true
+	}
+}
diff --git a/vendor/github.com/tecbot/gorocksdb/slice_transform.go b/vendor/github.com/tecbot/gorocksdb/slice_transform.go
new file mode 100644
index 0000000000000000000000000000000000000000..bd42bc63bb880931e154b7f899dd974210dc61f1
--- /dev/null
+++ b/vendor/github.com/tecbot/gorocksdb/slice_transform.go
@@ -0,0 +1,73 @@
+package gorocksdb
+
+// #include "rocksdb/c.h"
+import "C"
+
+// A SliceTransform can be used as a prefix extractor.
+type SliceTransform interface {
+	// Transform a src in domain to a dst in the range.
+	Transform(src []byte) []byte
+
+	// Determine whether this is a valid src upon the function applies.
+	InDomain(src []byte) bool
+
+	// Determine whether dst=Transform(src) for some src.
+	InRange(src []byte) bool
+
+	// Return the name of this transformation.
+	Name() string
+}
+
+// NewFixedPrefixTransform creates a new fixed prefix transform.
+func NewFixedPrefixTransform(prefixLen int) SliceTransform {
+	return NewNativeSliceTransform(C.rocksdb_slicetransform_create_fixed_prefix(C.size_t(prefixLen)))
+}
+
+// NewNativeSliceTransform creates a SliceTransform object.
+func NewNativeSliceTransform(c *C.rocksdb_slicetransform_t) SliceTransform {
+	return nativeSliceTransform{c}
+}
+
+type nativeSliceTransform struct {
+	c *C.rocksdb_slicetransform_t
+}
+
+func (st nativeSliceTransform) Transform(src []byte) []byte { return nil }
+func (st nativeSliceTransform) InDomain(src []byte) bool    { return false }
+func (st nativeSliceTransform) InRange(src []byte) bool     { return false }
+func (st nativeSliceTransform) Name() string                { return "" }
+
+// Hold references to slice transforms.
+var sliceTransforms []SliceTransform
+
+func registerSliceTransform(st SliceTransform) int {
+	sliceTransforms = append(sliceTransforms, st)
+	return len(sliceTransforms) - 1
+}
+
+//export gorocksdb_slicetransform_transform
+func gorocksdb_slicetransform_transform(idx int, cKey *C.char, cKeyLen C.size_t, cDstLen *C.size_t) *C.char {
+	key := charToByte(cKey, cKeyLen)
+	dst := sliceTransforms[idx].Transform(key)
+	*cDstLen = C.size_t(len(dst))
+	return cByteSlice(dst)
+}
+
+//export gorocksdb_slicetransform_in_domain
+func gorocksdb_slicetransform_in_domain(idx int, cKey *C.char, cKeyLen C.size_t) C.uchar {
+	key := charToByte(cKey, cKeyLen)
+	inDomain := sliceTransforms[idx].InDomain(key)
+	return boolToChar(inDomain)
+}
+
+//export gorocksdb_slicetransform_in_range
+func gorocksdb_slicetransform_in_range(idx int, cKey *C.char, cKeyLen C.size_t) C.uchar {
+	key := charToByte(cKey, cKeyLen)
+	inRange := sliceTransforms[idx].InRange(key)
+	return boolToChar(inRange)
+}
+
+//export gorocksdb_slicetransform_name
+func gorocksdb_slicetransform_name(idx int) *C.char {
+	return stringToChar(sliceTransforms[idx].Name())
+}
diff --git a/vendor/github.com/tecbot/gorocksdb/snapshot.go b/vendor/github.com/tecbot/gorocksdb/snapshot.go
new file mode 100644
index 0000000000000000000000000000000000000000..bb678fe70337846a2a442f829cda5f1735d41519
--- /dev/null
+++ b/vendor/github.com/tecbot/gorocksdb/snapshot.go
@@ -0,0 +1,21 @@
+package gorocksdb
+
+// #include "rocksdb/c.h"
+import "C"
+
+// Snapshot provides a consistent view of read operations in a DB.
+type Snapshot struct {
+	c   *C.rocksdb_snapshot_t
+	cDb *C.rocksdb_t
+}
+
+// NewNativeSnapshot creates a Snapshot object.
+func NewNativeSnapshot(c *C.rocksdb_snapshot_t, cDb *C.rocksdb_t) *Snapshot {
+	return &Snapshot{c, cDb}
+}
+
+// Release removes the snapshot from the database's list of snapshots.
+func (s *Snapshot) Release() {
+	C.rocksdb_release_snapshot(s.cDb, s.c)
+	s.c, s.cDb = nil, nil
+}
diff --git a/vendor/github.com/tecbot/gorocksdb/util.go b/vendor/github.com/tecbot/gorocksdb/util.go
new file mode 100644
index 0000000000000000000000000000000000000000..9c37330655b5fe0e3a4448a90fc33fee53c31000
--- /dev/null
+++ b/vendor/github.com/tecbot/gorocksdb/util.go
@@ -0,0 +1,74 @@
+package gorocksdb
+
+import "C"
+import (
+	"reflect"
+	"unsafe"
+)
+
+// btoi converts a bool value to int.
+func btoi(b bool) int {
+	if b {
+		return 1
+	}
+	return 0
+}
+
+// boolToChar converts a bool value to C.uchar.
+func boolToChar(b bool) C.uchar {
+	if b {
+		return 1
+	}
+	return 0
+}
+
+// charToByte converts a *C.char to a byte slice.
+func charToByte(data *C.char, len C.size_t) []byte {
+	var value []byte
+	sH := (*reflect.SliceHeader)(unsafe.Pointer(&value))
+	sH.Cap, sH.Len, sH.Data = int(len), int(len), uintptr(unsafe.Pointer(data))
+	return value
+}
+
+// byteToChar returns *C.char from byte slice.
+func byteToChar(b []byte) *C.char {
+	var c *C.char
+	if len(b) > 0 {
+		c = (*C.char)(unsafe.Pointer(&b[0]))
+	}
+	return c
+}
+
+// Go []byte to C string
+// The C string is allocated in the C heap using malloc.
+func cByteSlice(b []byte) *C.char {
+	var c *C.char
+	if len(b) > 0 {
+		cData := C.malloc(C.size_t(len(b)))
+		copy((*[1 << 24]byte)(cData)[0:len(b)], b)
+		c = (*C.char)(cData)
+	}
+	return c
+}
+
+// stringToChar returns *C.char from string.
+func stringToChar(s string) *C.char {
+	ptrStr := (*reflect.StringHeader)(unsafe.Pointer(&s))
+	return (*C.char)(unsafe.Pointer(ptrStr.Data))
+}
+
+// charSlice converts a C array of *char to a []*C.char.
+func charSlice(data **C.char, len C.int) []*C.char {
+	var value []*C.char
+	sH := (*reflect.SliceHeader)(unsafe.Pointer(&value))
+	sH.Cap, sH.Len, sH.Data = int(len), int(len), uintptr(unsafe.Pointer(data))
+	return value
+}
+
+// sizeSlice converts a C array of size_t to a []C.size_t.
+func sizeSlice(data *C.size_t, len C.int) []C.size_t {
+	var value []C.size_t
+	sH := (*reflect.SliceHeader)(unsafe.Pointer(&value))
+	sH.Cap, sH.Len, sH.Data = int(len), int(len), uintptr(unsafe.Pointer(data))
+	return value
+}
diff --git a/vendor/github.com/tecbot/gorocksdb/write_batch.go b/vendor/github.com/tecbot/gorocksdb/write_batch.go
new file mode 100644
index 0000000000000000000000000000000000000000..a4a5b8ba225c5e88414c1f47cc328754223f3461
--- /dev/null
+++ b/vendor/github.com/tecbot/gorocksdb/write_batch.go
@@ -0,0 +1,189 @@
+package gorocksdb
+
+// #include "rocksdb/c.h"
+import "C"
+import "io"
+
+// WriteBatch is a batching of Puts, Merges and Deletes.
+type WriteBatch struct {
+	c *C.rocksdb_writebatch_t
+}
+
+// NewWriteBatch create a WriteBatch object.
+func NewWriteBatch() *WriteBatch {
+	return NewNativeWriteBatch(C.rocksdb_writebatch_create())
+}
+
+// NewNativeWriteBatch create a WriteBatch object.
+func NewNativeWriteBatch(c *C.rocksdb_writebatch_t) *WriteBatch {
+	return &WriteBatch{c}
+}
+
+// WriteBatchFrom creates a write batch from a serialized WriteBatch.
+func WriteBatchFrom(data []byte) *WriteBatch {
+	return NewNativeWriteBatch(C.rocksdb_writebatch_create_from(byteToChar(data), C.size_t(len(data))))
+}
+
+// Put queues a key-value pair.
+func (wb *WriteBatch) Put(key, value []byte) {
+	cKey := byteToChar(key)
+	cValue := byteToChar(value)
+	C.rocksdb_writebatch_put(wb.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)))
+}
+
+// PutCF queues a key-value pair in a column family.
+func (wb *WriteBatch) PutCF(cf *ColumnFamilyHandle, key, value []byte) {
+	cKey := byteToChar(key)
+	cValue := byteToChar(value)
+	C.rocksdb_writebatch_put_cf(wb.c, cf.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)))
+}
+
+// Merge queues a merge of "value" with the existing value of "key".
+func (wb *WriteBatch) Merge(key, value []byte) {
+	cKey := byteToChar(key)
+	cValue := byteToChar(value)
+	C.rocksdb_writebatch_merge(wb.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)))
+}
+
+// MergeCF queues a merge of "value" with the existing value of "key" in a
+// column family.
+func (wb *WriteBatch) MergeCF(cf *ColumnFamilyHandle, key, value []byte) {
+	cKey := byteToChar(key)
+	cValue := byteToChar(value)
+	C.rocksdb_writebatch_merge_cf(wb.c, cf.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)))
+}
+
+// Delete queues a deletion of the data at key.
+func (wb *WriteBatch) Delete(key []byte) {
+	cKey := byteToChar(key)
+	C.rocksdb_writebatch_delete(wb.c, cKey, C.size_t(len(key)))
+}
+
+// DeleteCF queues a deletion of the data at key in a column family.
+func (wb *WriteBatch) DeleteCF(cf *ColumnFamilyHandle, key []byte) {
+	cKey := byteToChar(key)
+	C.rocksdb_writebatch_delete_cf(wb.c, cf.c, cKey, C.size_t(len(key)))
+}
+
+// Data returns the serialized version of this batch.
+func (wb *WriteBatch) Data() []byte {
+	var cSize C.size_t
+	cValue := C.rocksdb_writebatch_data(wb.c, &cSize)
+	return charToByte(cValue, cSize)
+}
+
+// Count returns the number of updates in the batch.
+func (wb *WriteBatch) Count() int {
+	return int(C.rocksdb_writebatch_count(wb.c))
+}
+
+// NewIterator returns a iterator to iterate over the records in the batch.
+func (wb *WriteBatch) NewIterator() *WriteBatchIterator {
+	data := wb.Data()
+	if len(data) < 8+4 {
+		return &WriteBatchIterator{}
+	}
+	return &WriteBatchIterator{data: data[12:]}
+}
+
+// Clear removes all the enqueued Put and Deletes.
+func (wb *WriteBatch) Clear() {
+	C.rocksdb_writebatch_clear(wb.c)
+}
+
+// Destroy deallocates the WriteBatch object.
+func (wb *WriteBatch) Destroy() {
+	C.rocksdb_writebatch_destroy(wb.c)
+	wb.c = nil
+}
+
+// WriteBatchRecordType describes the type of a batch record.
+type WriteBatchRecordType byte
+
+// Types of batch records.
+const (
+	WriteBatchRecordTypeDeletion WriteBatchRecordType = 0x0
+	WriteBatchRecordTypeValue    WriteBatchRecordType = 0x1
+	WriteBatchRecordTypeMerge    WriteBatchRecordType = 0x2
+	WriteBatchRecordTypeLogData  WriteBatchRecordType = 0x3
+)
+
+// WriteBatchRecord represents a record inside a WriteBatch.
+type WriteBatchRecord struct {
+	Key   []byte
+	Value []byte
+	Type  WriteBatchRecordType
+}
+
+// WriteBatchIterator represents a iterator to iterator over records.
+type WriteBatchIterator struct {
+	data   []byte
+	record WriteBatchRecord
+	err    error
+}
+
+// Next returns the next record.
+// Returns false if no further record exists.
+func (iter *WriteBatchIterator) Next() bool {
+	if iter.err != nil || len(iter.data) == 0 {
+		return false
+	}
+	// reset the current record
+	iter.record.Key = nil
+	iter.record.Value = nil
+
+	// parse the record type
+	recordType := WriteBatchRecordType(iter.data[0])
+	iter.record.Type = recordType
+	iter.data = iter.data[1:]
+
+	// parse the key
+	x, n := iter.decodeVarint(iter.data)
+	if n == 0 {
+		iter.err = io.ErrShortBuffer
+		return false
+	}
+	k := n + int(x)
+	iter.record.Key = iter.data[n:k]
+	iter.data = iter.data[k:]
+
+	// parse the data
+	if recordType == WriteBatchRecordTypeValue || recordType == WriteBatchRecordTypeMerge {
+		x, n := iter.decodeVarint(iter.data)
+		if n == 0 {
+			iter.err = io.ErrShortBuffer
+			return false
+		}
+		k := n + int(x)
+		iter.record.Value = iter.data[n:k]
+		iter.data = iter.data[k:]
+	}
+	return true
+}
+
+// Record returns the current record.
+func (iter *WriteBatchIterator) Record() *WriteBatchRecord {
+	return &iter.record
+}
+
+// Error returns the error if the iteration is failed.
+func (iter *WriteBatchIterator) Error() error {
+	return iter.err
+}
+
+func (iter *WriteBatchIterator) decodeVarint(buf []byte) (x uint64, n int) {
+	// x, n already 0
+	for shift := uint(0); shift < 64; shift += 7 {
+		if n >= len(buf) {
+			return 0, 0
+		}
+		b := uint64(buf[n])
+		n++
+		x |= (b & 0x7F) << shift
+		if (b & 0x80) == 0 {
+			return x, n
+		}
+	}
+	// The number is too large to represent in a 64-bit value.
+	return 0, 0
+}
diff --git a/vendor/vendor.json b/vendor/vendor.json
index fde588845babbdccdeb753bf5baec398759b15f8..b7373bef01987c4d3798e3743422869bd7e5db8b 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -26,6 +26,12 @@
 			"revision": "b4db88808f5c1f8f36d2f67952c8b7b3022e28ea",
 			"revisionTime": "2015-10-08T07:23:26Z"
 		},
+		{
+			"checksumSHA1": "W1EGygayPbG7X+UK13VHKl0XOy8=",
+			"path": "github.com/tecbot/gorocksdb",
+			"revision": "59ab8def01399fb7ded1c3bff5e3e4cbd14b6348",
+			"revisionTime": "2016-03-10T21:12:00Z"
+		},
 		{
 			"checksumSHA1": "7LaLud3qlAQZ+xftxLTbYg3cGpo=",
 			"path": "github.com/willf/bitset",