Skip to content
Snippets Groups Projects
Unverified Commit ad430e67 authored by Ashwin Ramesh's avatar Ashwin Ramesh
Browse files

Add gorocksdb to govendor

parent da8738fb
No related branches found
No related tags found
No related merge requests found
package gorocksdb
// CompressionOptions represents options for different compression algorithms like Zlib.
type CompressionOptions struct {
WindowBits int
Level int
Strategy int
}
// NewDefaultCompressionOptions creates a default CompressionOptions object.
func NewDefaultCompressionOptions() *CompressionOptions {
return NewCompressionOptions(-14, -1, 0)
}
// NewCompressionOptions creates a CompressionOptions object.
func NewCompressionOptions(windowBits, level, strategy int) *CompressionOptions {
return &CompressionOptions{
WindowBits: windowBits,
Level: level,
Strategy: strategy,
}
}
package gorocksdb
// #include "rocksdb/c.h"
import "C"
// FlushOptions represent all of the available options when manual flushing the
// database.
type FlushOptions struct {
c *C.rocksdb_flushoptions_t
}
// NewDefaultFlushOptions creates a default FlushOptions object.
func NewDefaultFlushOptions() *FlushOptions {
return NewNativeFlushOptions(C.rocksdb_flushoptions_create())
}
// NewNativeFlushOptions creates a FlushOptions object.
func NewNativeFlushOptions(c *C.rocksdb_flushoptions_t) *FlushOptions {
return &FlushOptions{c}
}
// SetWait specify if the flush will wait until the flush is done.
// Default: true
func (opts *FlushOptions) SetWait(value bool) {
C.rocksdb_flushoptions_set_wait(opts.c, boolToChar(value))
}
// Destroy deallocates the FlushOptions object.
func (opts *FlushOptions) Destroy() {
C.rocksdb_flushoptions_destroy(opts.c)
opts.c = nil
}
package gorocksdb
// #include "rocksdb/c.h"
import "C"
import "unsafe"
// ReadTier controls fetching of data during a read request.
// An application can issue a read request (via Get/Iterators) and specify
// if that read should process data that ALREADY resides on a specified cache
// level. For example, if an application specifies BlockCacheTier then the
// Get call will process data that is already processed in the memtable or
// the block cache. It will not page in data from the OS cache or data that
// resides in storage.
type ReadTier uint
const (
// ReadAllTier reads data in memtable, block cache, OS cache or storage.
ReadAllTier = ReadTier(0)
// BlockCacheTier reads data in memtable or block cache.
BlockCacheTier = ReadTier(1)
)
// ReadOptions represent all of the available options when reading from a
// database.
type ReadOptions struct {
c *C.rocksdb_readoptions_t
}
// NewDefaultReadOptions creates a default ReadOptions object.
func NewDefaultReadOptions() *ReadOptions {
return NewNativeReadOptions(C.rocksdb_readoptions_create())
}
// NewNativeReadOptions creates a ReadOptions object.
func NewNativeReadOptions(c *C.rocksdb_readoptions_t) *ReadOptions {
return &ReadOptions{c}
}
// UnsafeGetReadOptions returns the underlying c read options object.
func (opts *ReadOptions) UnsafeGetReadOptions() unsafe.Pointer {
return unsafe.Pointer(opts.c)
}
// SetVerifyChecksums speciy if all data read from underlying storage will be
// verified against corresponding checksums.
// Default: false
func (opts *ReadOptions) SetVerifyChecksums(value bool) {
C.rocksdb_readoptions_set_verify_checksums(opts.c, boolToChar(value))
}
// SetFillCache specify whether the "data block"/"index block"/"filter block"
// read for this iteration should be cached in memory?
// Callers may wish to set this field to false for bulk scans.
// Default: true
func (opts *ReadOptions) SetFillCache(value bool) {
C.rocksdb_readoptions_set_fill_cache(opts.c, boolToChar(value))
}
// SetSnapshot sets the snapshot which should be used for the read.
// The snapshot must belong to the DB that is being read and must
// not have been released.
// Default: nil
func (opts *ReadOptions) SetSnapshot(snap *Snapshot) {
C.rocksdb_readoptions_set_snapshot(opts.c, snap.c)
}
// SetReadTier specify if this read request should process data that ALREADY
// resides on a particular cache. If the required data is not
// found at the specified cache, then Status::Incomplete is returned.
// Default: ReadAllTier
func (opts *ReadOptions) SetReadTier(value ReadTier) {
C.rocksdb_readoptions_set_read_tier(opts.c, C.int(value))
}
// SetTailing specify if to create a tailing iterator.
// A special iterator that has a view of the complete database
// (i.e. it can also be used to read newly added data) and
// is optimized for sequential reads. It will return records
// that were inserted into the database after the creation of the iterator.
// Default: false
func (opts *ReadOptions) SetTailing(value bool) {
C.rocksdb_readoptions_set_tailing(opts.c, boolToChar(value))
}
// Destroy deallocates the ReadOptions object.
func (opts *ReadOptions) Destroy() {
C.rocksdb_readoptions_destroy(opts.c)
opts.c = nil
}
package gorocksdb
// #include "rocksdb/c.h"
import "C"
// WriteOptions represent all of the available options when writing to a
// database.
type WriteOptions struct {
c *C.rocksdb_writeoptions_t
}
// NewDefaultWriteOptions creates a default WriteOptions object.
func NewDefaultWriteOptions() *WriteOptions {
return NewNativeWriteOptions(C.rocksdb_writeoptions_create())
}
// NewNativeWriteOptions creates a WriteOptions object.
func NewNativeWriteOptions(c *C.rocksdb_writeoptions_t) *WriteOptions {
return &WriteOptions{c}
}
// SetSync sets the sync mode. If true, the write will be flushed
// from the operating system buffer cache before the write is considered complete.
// If this flag is true, writes will be slower.
// Default: false
func (opts *WriteOptions) SetSync(value bool) {
C.rocksdb_writeoptions_set_sync(opts.c, boolToChar(value))
}
// DisableWAL sets whether WAL should be active or not.
// If true, writes will not first go to the write ahead log,
// and the write may got lost after a crash.
// Default: false
func (opts *WriteOptions) DisableWAL(value bool) {
C.rocksdb_writeoptions_disable_WAL(opts.c, C.int(btoi(value)))
}
// Destroy deallocates the WriteOptions object.
func (opts *WriteOptions) Destroy() {
C.rocksdb_writeoptions_destroy(opts.c)
opts.c = nil
}
package gorocksdb
// #include <stdlib.h>
import "C"
import "unsafe"
// Slice is used as a wrapper for non-copy values
type Slice struct {
data *C.char
size C.size_t
freed bool
}
// NewSlice returns a slice with the given data.
func NewSlice(data *C.char, size C.size_t) *Slice {
return &Slice{data, size, false}
}
// Data returns the data of the slice.
func (s *Slice) Data() []byte {
return charToByte(s.data, s.size)
}
// Size returns the size of the data.
func (s *Slice) Size() int {
return int(s.size)
}
// Free frees the slice data.
func (s *Slice) Free() {
if !s.freed {
C.free(unsafe.Pointer(s.data))
s.freed = true
}
}
package gorocksdb
// #include "rocksdb/c.h"
import "C"
// A SliceTransform can be used as a prefix extractor.
type SliceTransform interface {
// Transform a src in domain to a dst in the range.
Transform(src []byte) []byte
// Determine whether this is a valid src upon the function applies.
InDomain(src []byte) bool
// Determine whether dst=Transform(src) for some src.
InRange(src []byte) bool
// Return the name of this transformation.
Name() string
}
// NewFixedPrefixTransform creates a new fixed prefix transform.
func NewFixedPrefixTransform(prefixLen int) SliceTransform {
return NewNativeSliceTransform(C.rocksdb_slicetransform_create_fixed_prefix(C.size_t(prefixLen)))
}
// NewNativeSliceTransform creates a SliceTransform object.
func NewNativeSliceTransform(c *C.rocksdb_slicetransform_t) SliceTransform {
return nativeSliceTransform{c}
}
type nativeSliceTransform struct {
c *C.rocksdb_slicetransform_t
}
func (st nativeSliceTransform) Transform(src []byte) []byte { return nil }
func (st nativeSliceTransform) InDomain(src []byte) bool { return false }
func (st nativeSliceTransform) InRange(src []byte) bool { return false }
func (st nativeSliceTransform) Name() string { return "" }
// Hold references to slice transforms.
var sliceTransforms []SliceTransform
func registerSliceTransform(st SliceTransform) int {
sliceTransforms = append(sliceTransforms, st)
return len(sliceTransforms) - 1
}
//export gorocksdb_slicetransform_transform
func gorocksdb_slicetransform_transform(idx int, cKey *C.char, cKeyLen C.size_t, cDstLen *C.size_t) *C.char {
key := charToByte(cKey, cKeyLen)
dst := sliceTransforms[idx].Transform(key)
*cDstLen = C.size_t(len(dst))
return cByteSlice(dst)
}
//export gorocksdb_slicetransform_in_domain
func gorocksdb_slicetransform_in_domain(idx int, cKey *C.char, cKeyLen C.size_t) C.uchar {
key := charToByte(cKey, cKeyLen)
inDomain := sliceTransforms[idx].InDomain(key)
return boolToChar(inDomain)
}
//export gorocksdb_slicetransform_in_range
func gorocksdb_slicetransform_in_range(idx int, cKey *C.char, cKeyLen C.size_t) C.uchar {
key := charToByte(cKey, cKeyLen)
inRange := sliceTransforms[idx].InRange(key)
return boolToChar(inRange)
}
//export gorocksdb_slicetransform_name
func gorocksdb_slicetransform_name(idx int) *C.char {
return stringToChar(sliceTransforms[idx].Name())
}
package gorocksdb
// #include "rocksdb/c.h"
import "C"
// Snapshot provides a consistent view of read operations in a DB.
type Snapshot struct {
c *C.rocksdb_snapshot_t
cDb *C.rocksdb_t
}
// NewNativeSnapshot creates a Snapshot object.
func NewNativeSnapshot(c *C.rocksdb_snapshot_t, cDb *C.rocksdb_t) *Snapshot {
return &Snapshot{c, cDb}
}
// Release removes the snapshot from the database's list of snapshots.
func (s *Snapshot) Release() {
C.rocksdb_release_snapshot(s.cDb, s.c)
s.c, s.cDb = nil, nil
}
package gorocksdb
import "C"
import (
"reflect"
"unsafe"
)
// btoi converts a bool value to int.
func btoi(b bool) int {
if b {
return 1
}
return 0
}
// boolToChar converts a bool value to C.uchar.
func boolToChar(b bool) C.uchar {
if b {
return 1
}
return 0
}
// charToByte converts a *C.char to a byte slice.
func charToByte(data *C.char, len C.size_t) []byte {
var value []byte
sH := (*reflect.SliceHeader)(unsafe.Pointer(&value))
sH.Cap, sH.Len, sH.Data = int(len), int(len), uintptr(unsafe.Pointer(data))
return value
}
// byteToChar returns *C.char from byte slice.
func byteToChar(b []byte) *C.char {
var c *C.char
if len(b) > 0 {
c = (*C.char)(unsafe.Pointer(&b[0]))
}
return c
}
// Go []byte to C string
// The C string is allocated in the C heap using malloc.
func cByteSlice(b []byte) *C.char {
var c *C.char
if len(b) > 0 {
cData := C.malloc(C.size_t(len(b)))
copy((*[1 << 24]byte)(cData)[0:len(b)], b)
c = (*C.char)(cData)
}
return c
}
// stringToChar returns *C.char from string.
func stringToChar(s string) *C.char {
ptrStr := (*reflect.StringHeader)(unsafe.Pointer(&s))
return (*C.char)(unsafe.Pointer(ptrStr.Data))
}
// charSlice converts a C array of *char to a []*C.char.
func charSlice(data **C.char, len C.int) []*C.char {
var value []*C.char
sH := (*reflect.SliceHeader)(unsafe.Pointer(&value))
sH.Cap, sH.Len, sH.Data = int(len), int(len), uintptr(unsafe.Pointer(data))
return value
}
// sizeSlice converts a C array of size_t to a []C.size_t.
func sizeSlice(data *C.size_t, len C.int) []C.size_t {
var value []C.size_t
sH := (*reflect.SliceHeader)(unsafe.Pointer(&value))
sH.Cap, sH.Len, sH.Data = int(len), int(len), uintptr(unsafe.Pointer(data))
return value
}
package gorocksdb
// #include "rocksdb/c.h"
import "C"
import "io"
// WriteBatch is a batching of Puts, Merges and Deletes.
type WriteBatch struct {
c *C.rocksdb_writebatch_t
}
// NewWriteBatch create a WriteBatch object.
func NewWriteBatch() *WriteBatch {
return NewNativeWriteBatch(C.rocksdb_writebatch_create())
}
// NewNativeWriteBatch create a WriteBatch object.
func NewNativeWriteBatch(c *C.rocksdb_writebatch_t) *WriteBatch {
return &WriteBatch{c}
}
// WriteBatchFrom creates a write batch from a serialized WriteBatch.
func WriteBatchFrom(data []byte) *WriteBatch {
return NewNativeWriteBatch(C.rocksdb_writebatch_create_from(byteToChar(data), C.size_t(len(data))))
}
// Put queues a key-value pair.
func (wb *WriteBatch) Put(key, value []byte) {
cKey := byteToChar(key)
cValue := byteToChar(value)
C.rocksdb_writebatch_put(wb.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)))
}
// PutCF queues a key-value pair in a column family.
func (wb *WriteBatch) PutCF(cf *ColumnFamilyHandle, key, value []byte) {
cKey := byteToChar(key)
cValue := byteToChar(value)
C.rocksdb_writebatch_put_cf(wb.c, cf.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)))
}
// Merge queues a merge of "value" with the existing value of "key".
func (wb *WriteBatch) Merge(key, value []byte) {
cKey := byteToChar(key)
cValue := byteToChar(value)
C.rocksdb_writebatch_merge(wb.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)))
}
// MergeCF queues a merge of "value" with the existing value of "key" in a
// column family.
func (wb *WriteBatch) MergeCF(cf *ColumnFamilyHandle, key, value []byte) {
cKey := byteToChar(key)
cValue := byteToChar(value)
C.rocksdb_writebatch_merge_cf(wb.c, cf.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)))
}
// Delete queues a deletion of the data at key.
func (wb *WriteBatch) Delete(key []byte) {
cKey := byteToChar(key)
C.rocksdb_writebatch_delete(wb.c, cKey, C.size_t(len(key)))
}
// DeleteCF queues a deletion of the data at key in a column family.
func (wb *WriteBatch) DeleteCF(cf *ColumnFamilyHandle, key []byte) {
cKey := byteToChar(key)
C.rocksdb_writebatch_delete_cf(wb.c, cf.c, cKey, C.size_t(len(key)))
}
// Data returns the serialized version of this batch.
func (wb *WriteBatch) Data() []byte {
var cSize C.size_t
cValue := C.rocksdb_writebatch_data(wb.c, &cSize)
return charToByte(cValue, cSize)
}
// Count returns the number of updates in the batch.
func (wb *WriteBatch) Count() int {
return int(C.rocksdb_writebatch_count(wb.c))
}
// NewIterator returns a iterator to iterate over the records in the batch.
func (wb *WriteBatch) NewIterator() *WriteBatchIterator {
data := wb.Data()
if len(data) < 8+4 {
return &WriteBatchIterator{}
}
return &WriteBatchIterator{data: data[12:]}
}
// Clear removes all the enqueued Put and Deletes.
func (wb *WriteBatch) Clear() {
C.rocksdb_writebatch_clear(wb.c)
}
// Destroy deallocates the WriteBatch object.
func (wb *WriteBatch) Destroy() {
C.rocksdb_writebatch_destroy(wb.c)
wb.c = nil
}
// WriteBatchRecordType describes the type of a batch record.
type WriteBatchRecordType byte
// Types of batch records.
const (
WriteBatchRecordTypeDeletion WriteBatchRecordType = 0x0
WriteBatchRecordTypeValue WriteBatchRecordType = 0x1
WriteBatchRecordTypeMerge WriteBatchRecordType = 0x2
WriteBatchRecordTypeLogData WriteBatchRecordType = 0x3
)
// WriteBatchRecord represents a record inside a WriteBatch.
type WriteBatchRecord struct {
Key []byte
Value []byte
Type WriteBatchRecordType
}
// WriteBatchIterator represents a iterator to iterator over records.
type WriteBatchIterator struct {
data []byte
record WriteBatchRecord
err error
}
// Next returns the next record.
// Returns false if no further record exists.
func (iter *WriteBatchIterator) Next() bool {
if iter.err != nil || len(iter.data) == 0 {
return false
}
// reset the current record
iter.record.Key = nil
iter.record.Value = nil
// parse the record type
recordType := WriteBatchRecordType(iter.data[0])
iter.record.Type = recordType
iter.data = iter.data[1:]
// parse the key
x, n := iter.decodeVarint(iter.data)
if n == 0 {
iter.err = io.ErrShortBuffer
return false
}
k := n + int(x)
iter.record.Key = iter.data[n:k]
iter.data = iter.data[k:]
// parse the data
if recordType == WriteBatchRecordTypeValue || recordType == WriteBatchRecordTypeMerge {
x, n := iter.decodeVarint(iter.data)
if n == 0 {
iter.err = io.ErrShortBuffer
return false
}
k := n + int(x)
iter.record.Value = iter.data[n:k]
iter.data = iter.data[k:]
}
return true
}
// Record returns the current record.
func (iter *WriteBatchIterator) Record() *WriteBatchRecord {
return &iter.record
}
// Error returns the error if the iteration is failed.
func (iter *WriteBatchIterator) Error() error {
return iter.err
}
func (iter *WriteBatchIterator) decodeVarint(buf []byte) (x uint64, n int) {
// x, n already 0
for shift := uint(0); shift < 64; shift += 7 {
if n >= len(buf) {
return 0, 0
}
b := uint64(buf[n])
n++
x |= (b & 0x7F) << shift
if (b & 0x80) == 0 {
return x, n
}
}
// The number is too large to represent in a 64-bit value.
return 0, 0
}
......@@ -26,6 +26,12 @@
"revision": "b4db88808f5c1f8f36d2f67952c8b7b3022e28ea",
"revisionTime": "2015-10-08T07:23:26Z"
},
{
"checksumSHA1": "W1EGygayPbG7X+UK13VHKl0XOy8=",
"path": "github.com/tecbot/gorocksdb",
"revision": "59ab8def01399fb7ded1c3bff5e3e4cbd14b6348",
"revisionTime": "2016-03-10T21:12:00Z"
},
{
"checksumSHA1": "7LaLud3qlAQZ+xftxLTbYg3cGpo=",
"path": "github.com/willf/bitset",
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment