diff --git a/store/rocksdb/LICENSE b/store/rocksdb/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..c7c73befc3b514b8a07a1c524b1c0eebeb1e8c9d
--- /dev/null
+++ b/store/rocksdb/LICENSE
@@ -0,0 +1,7 @@
+Copyright (c) 2012 Jeffrey M Hodges
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/store/rocksdb/README b/store/rocksdb/README
new file mode 100644
index 0000000000000000000000000000000000000000..3c7b319b9c350d9ea65e1ee4d163e71d03d16bc8
--- /dev/null
+++ b/store/rocksdb/README
@@ -0,0 +1 @@
+Imported from [https://github.com/DanielMorsing/rocksdb].
diff --git a/store/rocksdb/batch.go b/store/rocksdb/batch.go
new file mode 100644
index 0000000000000000000000000000000000000000..348a0f096a86837b8a658bb0a532b1f5d3d449d4
--- /dev/null
+++ b/store/rocksdb/batch.go
@@ -0,0 +1,66 @@
+package rocksdb
+
+// #cgo LDFLAGS: -lrocksdb
+// #include "rocksdb/c.h"
+import "C"
+
+import (
+	"unsafe"
+)
+
+// WriteBatch is a batching of Puts, and Deletes to be written atomically to a
+// database. A WriteBatch is written when passed to DB.Write.
+//
+// To prevent memory leaks, call Close when the program no longer needs the
+// WriteBatch object.
+type WriteBatch struct {
+	wbatch *C.rocksdb_writebatch_t
+}
+
+// NewWriteBatch creates a fully allocated WriteBatch.
+func NewWriteBatch() *WriteBatch {
+	wb := C.rocksdb_writebatch_create()
+	return &WriteBatch{wb}
+}
+
+// Close releases the underlying memory of a WriteBatch.
+func (w *WriteBatch) Close() {
+	C.rocksdb_writebatch_destroy(w.wbatch)
+}
+
+// Put places a key-value pair into the WriteBatch for writing later.
+//
+// Both the key and value byte slices may be reused as WriteBatch takes a copy
+// of them before returning.
+//
+func (w *WriteBatch) Put(key, value []byte) {
+	// rocksdb_writebatch_put, and _delete call memcpy() (by way of
+	// Memtable::Add) when called, so we do not need to worry about these
+	// []byte being reclaimed by GC.
+	var k, v *C.char
+	if len(key) != 0 {
+		k = (*C.char)(unsafe.Pointer(&key[0]))
+	}
+	if len(value) != 0 {
+		v = (*C.char)(unsafe.Pointer(&value[0]))
+	}
+
+	lenk := len(key)
+	lenv := len(value)
+
+	C.rocksdb_writebatch_put(w.wbatch, k, C.size_t(lenk), v, C.size_t(lenv))
+}
+
+// Delete queues a deletion of the data at key to be deleted later.
+//
+// The key byte slice may be reused safely. Delete takes a copy of
+// them before returning.
+func (w *WriteBatch) Delete(key []byte) {
+	C.rocksdb_writebatch_delete(w.wbatch,
+		(*C.char)(unsafe.Pointer(&key[0])), C.size_t(len(key)))
+}
+
+// Clear removes all the enqueued Put and Deletes in the WriteBatch.
+func (w *WriteBatch) Clear() {
+	C.rocksdb_writebatch_clear(w.wbatch)
+}
diff --git a/store/rocksdb/cache.go b/store/rocksdb/cache.go
new file mode 100644
index 0000000000000000000000000000000000000000..b4f103014f1e7d937894d3d2d8ea351b693da1cd
--- /dev/null
+++ b/store/rocksdb/cache.go
@@ -0,0 +1,32 @@
+package rocksdb
+
+// #cgo LDFLAGS: -lrocksdb
+// #include <stdint.h>
+// #include "rocksdb/c.h"
+import "C"
+
+// Cache is a cache used to store data read from data in memory.
+//
+// Typically, NewLRUCache is all you will need, but advanced users may
+// implement their own *C.rocksdb_cache_t and create a Cache.
+//
+// To prevent memory leaks, a Cache must have Close called on it when it is
+// no longer needed by the program. Note: if the process is shutting down,
+// this may not be necessary and could be avoided to shorten shutdown time.
+type Cache struct {
+	Cache *C.rocksdb_cache_t
+}
+
+// NewLRUCache creates a new Cache object with the capacity given.
+//
+// To prevent memory leaks, Close should be called on the Cache when the
+// program no longer needs it. Note: if the process is shutting down, this may
+// not be necessary and could be avoided to shorten shutdown time.
+func NewLRUCache(capacity int) *Cache {
+	return &Cache{C.rocksdb_cache_create_lru(C.size_t(capacity))}
+}
+
+// Close deallocates the underlying memory of the Cache object.
+func (c *Cache) Close() {
+	C.rocksdb_cache_destroy(c.Cache)
+}
diff --git a/store/rocksdb/comparator.go b/store/rocksdb/comparator.go
new file mode 100644
index 0000000000000000000000000000000000000000..5a2d185336ee08e0855f85bd6468bf4359983fec
--- /dev/null
+++ b/store/rocksdb/comparator.go
@@ -0,0 +1,13 @@
+package rocksdb
+
+// #cgo LDFLAGS: -lrocksdb
+// #include "rocksdb/c.h"
+import "C"
+
+// DestroyComparator deallocates a *C.rocksdb_comparator_t.
+//
+// This is provided as a convienience to advanced users that have implemented
+// their own comparators in C in their own code.
+func DestroyComparator(cmp *C.rocksdb_comparator_t) {
+	C.rocksdb_comparator_destroy(cmp)
+}
diff --git a/store/rocksdb/conv.go b/store/rocksdb/conv.go
new file mode 100644
index 0000000000000000000000000000000000000000..902e7264201bbcb1e9ea84d39c62f7d000afbe58
--- /dev/null
+++ b/store/rocksdb/conv.go
@@ -0,0 +1,19 @@
+package rocksdb
+
+// #include "rocksdb/c.h"
+import "C"
+
+func boolToUchar(b bool) C.uchar {
+	uc := C.uchar(0)
+	if b {
+		uc = C.uchar(1)
+	}
+	return uc
+}
+
+func ucharToBool(uc C.uchar) bool {
+	if uc == C.uchar(0) {
+		return false
+	}
+	return true
+}
diff --git a/store/rocksdb/db.go b/store/rocksdb/db.go
new file mode 100644
index 0000000000000000000000000000000000000000..5b6a4f5bef8ae7c79e3560c02b764f65ff32a03c
--- /dev/null
+++ b/store/rocksdb/db.go
@@ -0,0 +1,320 @@
+package rocksdb
+
+/*
+#cgo LDFLAGS: -lrocksdb
+#include <stdlib.h>
+#include "rocksdb/c.h"
+
+// This function exists only to clean up lack-of-const warnings when
+// rocksdb_approximate_sizes is called from Go-land.
+void rocksdb_rocksdb_approximate_sizes(
+    rocksdb_t* db,
+    int num_ranges,
+    char** range_start_key, const size_t* range_start_key_len,
+    char** range_limit_key, const size_t* range_limit_key_len,
+    uint64_t* sizes) {
+  rocksdb_approximate_sizes(db,
+                            num_ranges,
+                            (const char* const*)range_start_key,
+                            range_start_key_len,
+                            (const char* const*)range_limit_key,
+                            range_limit_key_len,
+                            sizes);
+}
+*/
+import "C"
+
+import (
+	"unsafe"
+)
+
+type DatabaseError string
+
+func (e DatabaseError) Error() string {
+	return string(e)
+}
+
+// DB is a reusable handle to a LevelDB database on disk, created by Open.
+//
+// To avoid memory and file descriptor leaks, call Close when the process no
+// longer needs the handle. Calls to any DB method made after Close will
+// panic.
+//
+// The DB instance may be shared between goroutines. The usual data race
+// conditions will occur if the same key is written to from more than one, of
+// course.
+type DB struct {
+	Ldb *C.rocksdb_t
+}
+
+// Range is a range of keys in the database. GetApproximateSizes calls with it
+// begin at the key Start and end right before the key Limit.
+type Range struct {
+	Start []byte
+	Limit []byte
+}
+
+// Snapshot provides a consistent view of read operations in a DB. It is set
+// on to a ReadOptions and passed in. It is only created by DB.NewSnapshot.
+//
+// To prevent memory leaks and resource strain in the database, the snapshot
+// returned must be released with DB.ReleaseSnapshot method on the DB that
+// created it.
+type Snapshot struct {
+	snap *C.rocksdb_snapshot_t
+}
+
+// Open opens a database.
+//
+// Creating a new database is done by calling SetCreateIfMissing(true) on the
+// Options passed to Open.
+//
+// It is usually wise to set a Cache object on the Options with SetCache to
+// keep recently used data from that database in memory.
+func Open(dbname string, o *Options) (*DB, error) {
+	var errStr *C.char
+	ldbname := C.CString(dbname)
+	defer C.free(unsafe.Pointer(ldbname))
+
+	rocksdb := C.rocksdb_open(o.Opt, ldbname, &errStr)
+	if errStr != nil {
+		gs := C.GoString(errStr)
+		C.free(unsafe.Pointer(errStr))
+		return nil, DatabaseError(gs)
+	}
+	return &DB{rocksdb}, nil
+}
+
+// DestroyDatabase removes a database entirely, removing everything from the
+// filesystem.
+func DestroyDatabase(dbname string, o *Options) error {
+	var errStr *C.char
+	ldbname := C.CString(dbname)
+	defer C.free(unsafe.Pointer(ldbname))
+
+	C.rocksdb_destroy_db(o.Opt, ldbname, &errStr)
+	if errStr != nil {
+		gs := C.GoString(errStr)
+		C.free(unsafe.Pointer(errStr))
+		return DatabaseError(gs)
+	}
+	return nil
+}
+
+// RepairDatabase attempts to repair a database.
+//
+// If the database is unrepairable, an error is returned.
+func RepairDatabase(dbname string, o *Options) error {
+	var errStr *C.char
+	ldbname := C.CString(dbname)
+	defer C.free(unsafe.Pointer(ldbname))
+
+	C.rocksdb_repair_db(o.Opt, ldbname, &errStr)
+	if errStr != nil {
+		gs := C.GoString(errStr)
+		C.free(unsafe.Pointer(errStr))
+		return DatabaseError(gs)
+	}
+	return nil
+}
+
+// Put writes data associated with a key to the database.
+//
+// If a nil []byte is passed in as value, it will be returned by Get as an
+// zero-length slice.
+//
+// The key and value byte slices may be reused safely. Put takes a copy of
+// them before returning.
+func (db *DB) Put(wo *WriteOptions, key, value []byte) error {
+	var errStr *C.char
+	// rocksdb_put, _get, and _delete call memcpy() (by way of Memtable::Add)
+	// when called, so we do not need to worry about these []byte being
+	// reclaimed by GC.
+	var k, v *C.char
+	if len(key) != 0 {
+		k = (*C.char)(unsafe.Pointer(&key[0]))
+	}
+	if len(value) != 0 {
+		v = (*C.char)(unsafe.Pointer(&value[0]))
+	}
+
+	lenk := len(key)
+	lenv := len(value)
+	C.rocksdb_put(
+		db.Ldb, wo.Opt, k, C.size_t(lenk), v, C.size_t(lenv), &errStr)
+
+	if errStr != nil {
+		gs := C.GoString(errStr)
+		C.free(unsafe.Pointer(errStr))
+		return DatabaseError(gs)
+	}
+	return nil
+}
+
+// Get returns the data associated with the key from the database.
+//
+// If the key does not exist in the database, a nil []byte is returned. If the
+// key does exist, but the data is zero-length in the database, a zero-length
+// []byte will be returned.
+//
+// The key byte slice may be reused safely. Get takes a copy of
+// them before returning.
+func (db *DB) Get(ro *ReadOptions, key []byte) ([]byte, error) {
+	var errStr *C.char
+	var vallen C.size_t
+	var k *C.char
+	if len(key) != 0 {
+		k = (*C.char)(unsafe.Pointer(&key[0]))
+	}
+
+	value := C.rocksdb_get(
+		db.Ldb, ro.Opt, k, C.size_t(len(key)), &vallen, &errStr)
+
+	if errStr != nil {
+		gs := C.GoString(errStr)
+		C.free(unsafe.Pointer(errStr))
+		return nil, DatabaseError(gs)
+	}
+
+	if value == nil {
+		return nil, nil
+	}
+
+	defer C.free(unsafe.Pointer(value))
+	return C.GoBytes(unsafe.Pointer(value), C.int(vallen)), nil
+}
+
+// Delete removes the data associated with the key from the database.
+//
+// The key byte slice may be reused safely. Delete takes a copy of
+// them before returning.
+func (db *DB) Delete(wo *WriteOptions, key []byte) error {
+	var errStr *C.char
+	var k *C.char
+	if len(key) != 0 {
+		k = (*C.char)(unsafe.Pointer(&key[0]))
+	}
+
+	C.rocksdb_delete(
+		db.Ldb, wo.Opt, k, C.size_t(len(key)), &errStr)
+
+	if errStr != nil {
+		gs := C.GoString(errStr)
+		C.free(unsafe.Pointer(errStr))
+		return DatabaseError(gs)
+	}
+	return nil
+}
+
+// Write atomically writes a WriteBatch to disk.
+func (db *DB) Write(wo *WriteOptions, w *WriteBatch) error {
+	var errStr *C.char
+	C.rocksdb_write(db.Ldb, wo.Opt, w.wbatch, &errStr)
+	if errStr != nil {
+		gs := C.GoString(errStr)
+		C.free(unsafe.Pointer(errStr))
+		return DatabaseError(gs)
+	}
+	return nil
+}
+
+// NewIterator returns an Iterator over the the database that uses the
+// ReadOptions given.
+//
+// Often, this is used for large, offline bulk reads while serving live
+// traffic. In that case, it may be wise to disable caching so that the data
+// processed by the returned Iterator does not displace the already cached
+// data. This can be done by calling SetFillCache(false) on the ReadOptions
+// before passing it here.
+//
+// Similiarly, ReadOptions.SetSnapshot is also useful.
+func (db *DB) NewIterator(ro *ReadOptions) *Iterator {
+	it := C.rocksdb_create_iterator(db.Ldb, ro.Opt)
+	return &Iterator{Iter: it}
+}
+
+// GetApproximateSizes returns the approximate number of bytes of file system
+// space used by one or more key ranges.
+//
+// The keys counted will begin at Range.Start and end on the key before
+// Range.Limit.
+func (db *DB) GetApproximateSizes(ranges []Range) []uint64 {
+	starts := make([]*C.char, len(ranges))
+	limits := make([]*C.char, len(ranges))
+	startLens := make([]C.size_t, len(ranges))
+	limitLens := make([]C.size_t, len(ranges))
+	for i, r := range ranges {
+		starts[i] = C.CString(string(r.Start))
+		startLens[i] = C.size_t(len(r.Start))
+		limits[i] = C.CString(string(r.Limit))
+		limitLens[i] = C.size_t(len(r.Limit))
+	}
+	sizes := make([]uint64, len(ranges))
+	numranges := C.int(len(ranges))
+	startsPtr := &starts[0]
+	limitsPtr := &limits[0]
+	startLensPtr := &startLens[0]
+	limitLensPtr := &limitLens[0]
+	sizesPtr := (*C.uint64_t)(&sizes[0])
+	C.rocksdb_rocksdb_approximate_sizes(
+		db.Ldb, numranges, startsPtr, startLensPtr,
+		limitsPtr, limitLensPtr, sizesPtr)
+	for i := range ranges {
+		C.free(unsafe.Pointer(starts[i]))
+		C.free(unsafe.Pointer(limits[i]))
+	}
+	return sizes
+}
+
+// PropertyValue returns the value of a database property.
+//
+// Examples of properties include "rocksdb.stats", "rocksdb.sstables",
+// and "rocksdb.num-files-at-level0".
+func (db *DB) PropertyValue(propName string) string {
+	cname := C.CString(propName)
+	value := C.GoString(C.rocksdb_property_value(db.Ldb, cname))
+	C.free(unsafe.Pointer(cname))
+	return value
+}
+
+// NewSnapshot creates a new snapshot of the database.
+//
+// The snapshot, when used in a ReadOptions, provides a consistent view of
+// state of the database at the the snapshot was created.
+//
+// To prevent memory leaks and resource strain in the database, the snapshot
+// returned must be released with DB.ReleaseSnapshot method on the DB that
+// created it.
+//
+// See the LevelDB documentation for details.
+func (db *DB) NewSnapshot() *Snapshot {
+	return &Snapshot{C.rocksdb_create_snapshot(db.Ldb)}
+}
+
+// ReleaseSnapshot removes the snapshot from the database's list of snapshots,
+// and deallocates it.
+func (db *DB) ReleaseSnapshot(snap *Snapshot) {
+	C.rocksdb_release_snapshot(db.Ldb, snap.snap)
+}
+
+// CompactRange runs a manual compaction on the Range of keys given. This is
+// not likely to be needed for typical usage.
+func (db *DB) CompactRange(r Range) {
+	var start, limit *C.char
+	if len(r.Start) != 0 {
+		start = (*C.char)(unsafe.Pointer(&r.Start[0]))
+	}
+	if len(r.Limit) != 0 {
+		limit = (*C.char)(unsafe.Pointer(&r.Limit[0]))
+	}
+	C.rocksdb_compact_range(
+		db.Ldb, start, C.size_t(len(r.Start)), limit, C.size_t(len(r.Limit)))
+}
+
+// Close closes the database, rendering it unusable for I/O, by deallocating
+// the underlying handle.
+//
+// Any attempts to use the DB after Close is called will panic.
+func (db *DB) Close() {
+	C.rocksdb_close(db.Ldb)
+}
diff --git a/store/rocksdb/doc.go b/store/rocksdb/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..011fb70a18762753e23c1b4388821a28202901b4
--- /dev/null
+++ b/store/rocksdb/doc.go
@@ -0,0 +1,74 @@
+/*
+
+Package rocksdb is a fork of the levigo package with the identifiers changed to target rocksdb and
+the package name changed to rocksdb.
+
+This was accomplished by running a sed script over the source code.
+Many thanks to Jeff Hodges for creating levigo without which this package would not exist.
+
+Original package documentation follows.
+
+Package rocksdb provides the ability to create and access LevelDB databases.
+
+rocksdb.Open opens and creates databases.
+
+	opts := rocksdb.NewOptions()
+	opts.SetCache(rocksdb.NewLRUCache(3<<30))
+	opts.SetCreateIfMissing(true)
+	db, err := rocksdb.Open("/path/to/db", opts)
+
+The DB struct returned by Open provides DB.Get, DB.Put and DB.Delete to modify
+and query the database.
+
+	ro := rocksdb.NewReadOptions()
+	wo := rocksdb.NewWriteOptions()
+	// if ro and wo are not used again, be sure to Close them.
+	data, err := db.Get(ro, []byte("key"))
+	...
+	err = db.Put(wo, []byte("anotherkey"), data)
+	...
+	err = db.Delete(wo, []byte("key"))
+
+For bulk reads, use an Iterator. If you want to avoid disturbing your live
+traffic while doing the bulk read, be sure to call SetFillCache(false) on the
+ReadOptions you use when creating the Iterator.
+
+	ro := rocksdb.NewReadOptions()
+	ro.SetFillCache(false)
+	it := db.NewIterator(ro)
+	defer it.Close()
+	it.Seek(mykey)
+	for it = it; it.Valid(); it.Next() {
+		munge(it.Key(), it.Value())
+	}
+	if err := it.GetError(); err != nil {
+		...
+	}
+
+Batched, atomic writes can be performed with a WriteBatch and
+DB.Write.
+
+	wb := rocksdb.NewWriteBatch()
+	// defer wb.Close or use wb.Clear and reuse.
+	wb.Delete([]byte("removed"))
+	wb.Put([]byte("added"), []byte("data"))
+	wb.Put([]byte("anotheradded"), []byte("more"))
+	err := db.Write(wo, wb)
+
+If your working dataset does not fit in memory, you'll want to add a bloom
+filter to your database. NewBloomFilter and Options.SetFilterPolicy is what
+you want. NewBloomFilter is amount of bits in the filter to use per key in
+your database.
+
+	filter := rocksdb.NewBloomFilter(10)
+	opts.SetFilterPolicy(filter)
+	db, err := rocksdb.Open("/path/to/db", opts)
+
+If you're using a custom comparator in your code, be aware you may have to
+make your own filter policy object.
+
+This documentation is not a complete discussion of LevelDB. Please read the
+LevelDB documentation <http://code.google.com/p/rocksdb> for information on
+its operation. You'll find lots of goodies there.
+*/
+package rocksdb
diff --git a/store/rocksdb/env.go b/store/rocksdb/env.go
new file mode 100644
index 0000000000000000000000000000000000000000..acb2912326b2a2fae674c11f9d6febf071e305a7
--- /dev/null
+++ b/store/rocksdb/env.go
@@ -0,0 +1,29 @@
+package rocksdb
+
+// #cgo LDFLAGS: -lrocksdb
+// #include "rocksdb/c.h"
+import "C"
+
+// Env is a system call environment used by a database.
+//
+// Typically, NewDefaultEnv is all you need. Advanced users may create their
+// own Env with a *C.rocksdb_env_t of their own creation.
+//
+// To prevent memory leaks, an Env must have Close called on it when it is
+// no longer needed by the program.
+type Env struct {
+	Env *C.rocksdb_env_t
+}
+
+// NewDefaultEnv creates a default environment for use in an Options.
+//
+// To prevent memory leaks, the Env returned should be deallocated with
+// Close.
+func NewDefaultEnv() *Env {
+	return &Env{C.rocksdb_create_default_env()}
+}
+
+// Close deallocates the Env, freeing the underlying struct.
+func (env *Env) Close() {
+	C.rocksdb_env_destroy(env.Env)
+}
diff --git a/store/rocksdb/examples/comparator_example.go b/store/rocksdb/examples/comparator_example.go
new file mode 100644
index 0000000000000000000000000000000000000000..5ad68f6098a410385d849c8e83f2fcaef501af42
--- /dev/null
+++ b/store/rocksdb/examples/comparator_example.go
@@ -0,0 +1,46 @@
+package main
+
+/*
+#cgo LDFLAGS: -lrocksdb
+#include <string.h>
+#include <rocksdb/c.h>
+
+static void CmpDestroy(void* arg) { }
+
+static int CmpCompare(void* arg, const char* a, size_t alen,
+                      const char* b, size_t blen) {
+  int n = (alen < blen) ? alen : blen;
+  int r = memcmp(a, b, n);
+  if (r == 0) {
+    if (alen < blen) r = -1;
+    else if (alen > blen) r = +1;
+  }
+  return r;
+}
+
+static const char* CmpName(void* arg) {
+  return "foo";
+}
+
+static rocksdb_comparator_t* CmpFooNew() {
+  return rocksdb_comparator_create(NULL, CmpDestroy, CmpCompare, CmpName);
+}
+
+*/
+import "C"
+
+type Comparator struct {
+	Comparator *C.rocksdb_comparator_t
+}
+
+func NewFooComparator() *Comparator {
+	return &Comparator{C.CmpFooNew()}
+}
+
+func (cmp *Comparator) Close() {
+	C.rocksdb_comparator_destroy(cmp.Comparator)
+}
+
+func main() {
+	NewFooComparator().Close()
+}
diff --git a/store/rocksdb/filterpolicy.go b/store/rocksdb/filterpolicy.go
new file mode 100644
index 0000000000000000000000000000000000000000..2a6ce73d69e6ef9f810044c35c10a94626ffc0bf
--- /dev/null
+++ b/store/rocksdb/filterpolicy.go
@@ -0,0 +1,32 @@
+package rocksdb
+
+// #cgo LDFLAGS: -lrocksdb
+// #include <stdlib.h>
+// #include "rocksdb/c.h"
+import "C"
+
+// FilterPolicy is a factory type that allows the LevelDB database to create a
+// filter, such as a bloom filter, that is stored in the sstables and used by
+// DB.Get to reduce reads.
+//
+// An instance of this struct may be supplied to Options when opening a
+// DB. Typical usage is to call NewBloomFilter to get an instance.
+//
+// To prevent memory leaks, a FilterPolicy must have Close called on it when
+// it is no longer needed by the program.
+type FilterPolicy struct {
+	Policy *C.rocksdb_filterpolicy_t
+}
+
+// NewBloomFilter creates a filter policy that will create a bloom filter when
+// necessary with the given number of bits per key.
+//
+// See the FilterPolicy documentation for more.
+func NewBloomFilter(bitsPerKey int) *FilterPolicy {
+	policy := C.rocksdb_filterpolicy_create_bloom(C.int(bitsPerKey))
+	return &FilterPolicy{policy}
+}
+
+func (fp *FilterPolicy) Close() {
+	C.rocksdb_filterpolicy_destroy(fp.Policy)
+}
diff --git a/store/rocksdb/iterator.go b/store/rocksdb/iterator.go
new file mode 100644
index 0000000000000000000000000000000000000000..259fa78bd818013e40f9f1dc8089f9eb377b8f55
--- /dev/null
+++ b/store/rocksdb/iterator.go
@@ -0,0 +1,150 @@
+package rocksdb
+
+// #cgo LDFLAGS: -lrocksdb
+// #include <stdlib.h>
+// #include "rocksdb/c.h"
+import "C"
+
+import (
+	"unsafe"
+)
+
+type IteratorError string
+
+func (e IteratorError) Error() string {
+	return string(e)
+}
+
+// Iterator is a read-only iterator through a LevelDB database. It provides a
+// way to seek to specific keys and iterate through the keyspace from that
+// point, as well as access the values of those keys.
+//
+// Care must be taken when using an Iterator. If the method Valid returns
+// false, calls to Key, Value, Next, and Prev will result in panics. However,
+// Seek, SeekToFirst, SeekToLast, GetError, Valid, and Close will still be
+// safe to call.
+//
+// GetError will only return an error in the event of a LevelDB error. It will
+// return a nil on iterators that are simply invalid. Given that behavior,
+// GetError is not a replacement for a Valid.
+//
+// A typical use looks like:
+//
+// 	db := rocksdb.Open(...)
+//
+// 	it := db.NewIterator(readOpts)
+// 	defer it.Close()
+// 	it.Seek(mykey)
+// 	for it = it; it.Valid(); it.Next() {
+// 		useKeyAndValue(it.Key(), it.Value())
+// 	}
+// 	if err := it.GetError() {
+// 		...
+// 	}
+//
+// To prevent memory leaks, an Iterator must have Close called on it when it
+// is no longer needed by the program.
+type Iterator struct {
+	Iter *C.rocksdb_iterator_t
+}
+
+// Valid returns false only when an Iterator has iterated past either the
+// first or the last key in the database.
+func (it *Iterator) Valid() bool {
+	return ucharToBool(C.rocksdb_iter_valid(it.Iter))
+}
+
+// Key returns a copy the key in the database the iterator currently holds.
+//
+// If Valid returns false, this method will panic.
+func (it *Iterator) Key() []byte {
+	var klen C.size_t
+	kdata := C.rocksdb_iter_key(it.Iter, &klen)
+	if kdata == nil {
+		return nil
+	}
+	// Unlike DB.Get, the key, kdata, returned is not meant to be freed by the
+	// client. It's a direct reference to data managed by the iterator_t
+	// instead of a copy.  So, we must not free it here but simply copy it
+	// with GoBytes.
+	return C.GoBytes(unsafe.Pointer(kdata), C.int(klen))
+}
+
+// Value returns a copy of the value in the database the iterator currently
+// holds.
+//
+// If Valid returns false, this method will panic.
+func (it *Iterator) Value() []byte {
+	var vlen C.size_t
+	vdata := C.rocksdb_iter_value(it.Iter, &vlen)
+	if vdata == nil {
+		return nil
+	}
+	// Unlike DB.Get, the value, vdata, returned is not meant to be freed by
+	// the client. It's a direct reference to data managed by the iterator_t
+	// instead of a copy. So, we must not free it here but simply copy it with
+	// GoBytes.
+	return C.GoBytes(unsafe.Pointer(vdata), C.int(vlen))
+}
+
+// Next moves the iterator to the next sequential key in the database, as
+// defined by the Comparator in the ReadOptions used to create this Iterator.
+//
+// If Valid returns false, this method will panic.
+func (it *Iterator) Next() {
+	C.rocksdb_iter_next(it.Iter)
+}
+
+// Prev moves the iterator to the previous sequential key in the database, as
+// defined by the Comparator in the ReadOptions used to create this Iterator.
+//
+// If Valid returns false, this method will panic.
+func (it *Iterator) Prev() {
+	C.rocksdb_iter_prev(it.Iter)
+}
+
+// SeekToFirst moves the iterator to the first key in the database, as defined
+// by the Comparator in the ReadOptions used to create this Iterator.
+//
+// This method is safe to call when Valid returns false.
+func (it *Iterator) SeekToFirst() {
+	C.rocksdb_iter_seek_to_first(it.Iter)
+}
+
+// SeekToLast moves the iterator to the last key in the database, as defined
+// by the Comparator in the ReadOptions used to create this Iterator.
+//
+// This method is safe to call when Valid returns false.
+func (it *Iterator) SeekToLast() {
+	C.rocksdb_iter_seek_to_last(it.Iter)
+}
+
+// Seek moves the iterator the position of the key given or, if the key
+// doesn't exist, the next key that does exist in the database. If the key
+// doesn't exist, and there is no next key, the Iterator becomes invalid.
+//
+// This method is safe to call when Valid returns false.
+func (it *Iterator) Seek(key []byte) {
+	C.rocksdb_iter_seek(it.Iter, (*C.char)(unsafe.Pointer(&key[0])), C.size_t(len(key)))
+}
+
+// GetError returns an IteratorError from LevelDB if it had one during
+// iteration.
+//
+// This method is safe to call when Valid returns false.
+func (it *Iterator) GetError() error {
+	var errStr *C.char
+	C.rocksdb_iter_get_error(it.Iter, &errStr)
+	if errStr != nil {
+		gs := C.GoString(errStr)
+		C.free(unsafe.Pointer(errStr))
+		return IteratorError(gs)
+	}
+	return nil
+}
+
+// Close deallocates the given Iterator, freeing the underlying C struct.
+func (it *Iterator) Close() {
+	C.rocksdb_iter_destroy(it.Iter)
+	it.Iter = nil
+}
diff --git a/store/rocksdb/options.go b/store/rocksdb/options.go
new file mode 100644
index 0000000000000000000000000000000000000000..a5b07fc42efeae3341ffc643ca8161124ae97f84
--- /dev/null
+++ b/store/rocksdb/options.go
@@ -0,0 +1,237 @@
+package rocksdb
+
+// #cgo LDFLAGS: -lrocksdb
+// #include "rocksdb/c.h"
+import "C"
+
+// CompressionOpt is a value for Options.SetCompression.
+type CompressionOpt int
+
+// Known compression arguments for Options.SetCompression.
+const (
+	NoCompression     = CompressionOpt(0)
+	SnappyCompression = CompressionOpt(1)
+)
+
+// Options represent all of the available options when opening a database with
+// Open. Options should be created with NewOptions.
+//
+// It is usually with to call SetCache with a cache object. Otherwise, all
+// data will be read off disk.
+//
+// To prevent memory leaks, Close must be called on an Options when the
+// program no longer needs it.
+type Options struct {
+	Opt *C.rocksdb_options_t
+}
+
+// ReadOptions represent all of the available options when reading from a
+// database.
+//
+// To prevent memory leaks, Close must called on a ReadOptions when the
+// program no longer needs it.
+type ReadOptions struct {
+	Opt *C.rocksdb_readoptions_t
+}
+
+// WriteOptions represent all of the available options when writeing from a
+// database.
+//
+// To prevent memory leaks, Close must called on a WriteOptions when the
+// program no longer needs it.
+type WriteOptions struct {
+	Opt *C.rocksdb_writeoptions_t
+}
+
+// NewOptions allocates a new Options object.
+func NewOptions() *Options {
+	opt := C.rocksdb_options_create()
+	return &Options{opt}
+}
+
+// NewReadOptions allocates a new ReadOptions object.
+func NewReadOptions() *ReadOptions {
+	opt := C.rocksdb_readoptions_create()
+	return &ReadOptions{opt}
+}
+
+// NewWriteOptions allocates a new WriteOptions object.
+func NewWriteOptions() *WriteOptions {
+	opt := C.rocksdb_writeoptions_create()
+	return &WriteOptions{opt}
+}
+
+// Close deallocates the Options, freeing its underlying C struct.
+func (o *Options) Close() {
+	C.rocksdb_options_destroy(o.Opt)
+}
+
+// SetComparator sets the comparator to be used for all read and write
+// operations.
+//
+// The comparator that created a database must be the same one (technically,
+// one with the same name string) that is used to perform read and write
+// operations.
+//
+// The default comparator is usually sufficient.
+func (o *Options) SetComparator(cmp *C.rocksdb_comparator_t) {
+	C.rocksdb_options_set_comparator(o.Opt, cmp)
+}
+
+// SetErrorIfExists, if passed true, will cause the opening of a database that
+// already exists to throw an error.
+func (o *Options) SetErrorIfExists(error_if_exists bool) {
+	eie := boolToUchar(error_if_exists)
+	C.rocksdb_options_set_error_if_exists(o.Opt, eie)
+}
+
+// SetCache places a cache object in the database when a database is opened.
+//
+// This is usually wise to use. See also ReadOptions.SetFillCache.
+/*
+func (o *Options) SetCache(cache *Cache) {
+	C.rocksdb_options_set_cache(o.Opt, cache.Cache)
+}
+*/
+
+// SetEnv sets the Env object for the new database handle.
+func (o *Options) SetEnv(env *Env) {
+	C.rocksdb_options_set_env(o.Opt, env.Env)
+}
+
+// SetInfoLog sets a *C.rocksdb_logger_t object as the informational logger
+// for the database.
+func (o *Options) SetInfoLog(log *C.rocksdb_logger_t) {
+	C.rocksdb_options_set_info_log(o.Opt, log)
+}
+
+// SetWriteBufferSize sets the number of bytes the database will build up in
+// memory (backed by an unsorted log on disk) before converting to a sorted
+// on-disk file.
+func (o *Options) SetWriteBufferSize(s int) {
+	C.rocksdb_options_set_write_buffer_size(o.Opt, C.size_t(s))
+}
+
+// SetParanoidChecks, when called with true, will cause the database to do
+// aggressive checking of the data it is processing and will stop early if it
+// detects errors.
+//
+// See the LevelDB documentation docs for details.
+func (o *Options) SetParanoidChecks(pc bool) {
+	C.rocksdb_options_set_paranoid_checks(o.Opt, boolToUchar(pc))
+}
+
+// SetMaxOpenFiles sets the number of files than can be used at once by the
+// database.
+//
+// See the LevelDB documentation for details.
+func (o *Options) SetMaxOpenFiles(n int) {
+	C.rocksdb_options_set_max_open_files(o.Opt, C.int(n))
+}
+
+// SetBlockSize sets the approximate size of user data packed per block.
+//
+// The default is roughly 4096 uncompressed bytes. A better setting depends on
+// your use case. See the LevelDB documentation for details.
+/*
+func (o *Options) SetBlockSize(s int) {
+	C.rocksdb_options_set_block_size(o.Opt, C.size_t(s))
+}
+*/
+
+// SetBlockRestartInterval is the number of keys between restarts points for
+// delta encoding keys.
+//
+// Most clients should leave this parameter alone. See the LevelDB
+// documentation for details.
+/*
+func (o *Options) SetBlockRestartInterval(n int) {
+	C.rocksdb_options_set_block_restart_interval(o.Opt, C.int(n))
+}
+*/
+
+// SetCompression sets whether to compress blocks using the specified
+// compresssion algorithm.
+//
+// The default value is SnappyCompression and it is fast enough that it is
+// unlikely you want to turn it off. The other option is NoCompression.
+//
+// If the LevelDB library was built without Snappy compression enabled, the
+// SnappyCompression setting will be ignored.
+func (o *Options) SetCompression(t CompressionOpt) {
+	C.rocksdb_options_set_compression(o.Opt, C.int(t))
+}
+
+// SetCreateIfMissing causes Open to create a new database on disk if it does
+// not already exist.
+func (o *Options) SetCreateIfMissing(b bool) {
+	C.rocksdb_options_set_create_if_missing(o.Opt, boolToUchar(b))
+}
+
+// SetFilterPolicy causes Open to create a new database that will uses filter
+// created from the filter policy passed in.
+/*
+func (o *Options) SetFilterPolicy(fp *FilterPolicy) {
+	var policy *C.rocksdb_filterpolicy_t
+	if fp != nil {
+		policy = fp.Policy
+	}
+	C.rocksdb_options_set_filter_policy(o.Opt, policy)
+}
+*/
+
+// Close deallocates the ReadOptions, freeing its underlying C struct.
+func (ro *ReadOptions) Close() {
+	C.rocksdb_readoptions_destroy(ro.Opt)
+}
+
+// SetVerifyChecksums controls whether all data read with this ReadOptions
+// will be verified against corresponding checksums.
+//
+// It defaults to false. See the LevelDB documentation for details.
+func (ro *ReadOptions) SetVerifyChecksums(b bool) {
+	C.rocksdb_readoptions_set_verify_checksums(ro.Opt, boolToUchar(b))
+}
+
+// SetFillCache controls whether reads performed with this ReadOptions will
+// fill the Cache of the server. It defaults to true.
+//
+// It is useful to turn this off on ReadOptions for DB.Iterator (and DB.Get)
+// calls used in offline threads to prevent bulk scans from flushing out live
+// user data in the cache.
+//
+// See also Options.SetCache
+func (ro *ReadOptions) SetFillCache(b bool) {
+	C.rocksdb_readoptions_set_fill_cache(ro.Opt, boolToUchar(b))
+}
+
+// SetSnapshot causes reads to provided as they were when the passed in
+// Snapshot was created by DB.NewSnapshot. This is useful for getting
+// consistent reads during a bulk operation.
+//
+// See the LevelDB documentation for details.
+func (ro *ReadOptions) SetSnapshot(snap *Snapshot) {
+	var s *C.rocksdb_snapshot_t
+	if snap != nil {
+		s = snap.snap
+	}
+	C.rocksdb_readoptions_set_snapshot(ro.Opt, s)
+}
+
+// Close deallocates the WriteOptions, freeing its underlying C struct.
+func (wo *WriteOptions) Close() {
+	C.rocksdb_writeoptions_destroy(wo.Opt)
+}
+
+// SetSync controls whether each write performed with this WriteOptions will
+// be flushed from the operating system buffer cache before the write is
+// considered complete.
+//
+// If called with true, this will signficantly slow down writes. If called
+// with false, and the host machine crashes, some recent writes may be
+// lost. The default is false.
+//
+// See the LevelDB documentation for details.
+func (wo *WriteOptions) SetSync(b bool) {
+	C.rocksdb_writeoptions_set_sync(wo.Opt, boolToUchar(b))
+}
diff --git a/store/rocksdb/rocksdb_test.go b/store/rocksdb/rocksdb_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..420f1dc78dd0041d546ed0359333c2cc0830cf13
--- /dev/null
+++ b/store/rocksdb/rocksdb_test.go
@@ -0,0 +1,355 @@
+package rocksdb
+
+import (
+	"bytes"
+	"fmt"
+	"math/rand"
+	"os"
+	"path/filepath"
+	"testing"
+	"time"
+)
+
+func init() {
+	rand.Seed(int64(time.Now().Nanosecond()))
+}
+
+// This testcase is a port of rocksdb's c_test.c.
+func TestC(t *testing.T) {
+	dbname := tempDir(t)
+	defer deleteDBDirectory(t, dbname)
+	env := NewDefaultEnv()
+	cache := NewLRUCache(1 << 20)
+
+	options := NewOptions()
+	// options.SetComparator(cmp)
+	options.SetErrorIfExists(true)
+	// options.SetCache(cache)
+	options.SetEnv(env)
+	options.SetInfoLog(nil)
+	options.SetWriteBufferSize(1 << 20)
+	options.SetParanoidChecks(true)
+	options.SetMaxOpenFiles(10)
+	// options.SetBlockSize(1024)
+	// options.SetBlockRestartInterval(8)
+	options.SetCompression(NoCompression)
+
+	roptions := NewReadOptions()
+	roptions.SetVerifyChecksums(true)
+	roptions.SetFillCache(false)
+
+	woptions := NewWriteOptions()
+	woptions.SetSync(true)
+
+	_ = DestroyDatabase(dbname, options)
+
+	db, err := Open(dbname, options)
+	if err == nil {
+		t.Errorf("Open on missing db should have failed")
+	}
+
+	options.SetCreateIfMissing(true)
+	db, err = Open(dbname, options)
+	if err != nil {
+		t.Fatalf("Open failed: %v", err)
+	}
+
+	putKey := []byte("foo")
+	putValue := []byte("hello")
+	err = db.Put(woptions, putKey, putValue)
+	if err != nil {
+		t.Errorf("Put failed: %v", err)
+	}
+
+	CheckGet(t, "after Put", db, roptions, putKey, putValue)
+
+	wb := NewWriteBatch()
+	wb.Put([]byte("foo"), []byte("a"))
+	wb.Clear()
+	wb.Put([]byte("bar"), []byte("b"))
+	wb.Put([]byte("box"), []byte("c"))
+	wb.Delete([]byte("bar"))
+	err = db.Write(woptions, wb)
+	if err != nil {
+		t.Errorf("Write batch failed: %v", err)
+	}
+	CheckGet(t, "after WriteBatch", db, roptions, []byte("foo"), []byte("hello"))
+	CheckGet(t, "after WriteBatch", db, roptions, []byte("bar"), nil)
+	CheckGet(t, "after WriteBatch", db, roptions, []byte("box"), []byte("c"))
+	// TODO: WriteBatch iteration isn't easy. Suffers same problems as
+	// Comparator.
+	// wbiter := &TestWBIter{t: t}
+	// wb.Iterate(wbiter)
+	// if wbiter.pos != 3 {
+	// 	t.Errorf("After Iterate, on the wrong pos: %d", wbiter.pos)
+	// }
+	wb.Close()
+
+	iter := db.NewIterator(roptions)
+	if iter.Valid() {
+		t.Errorf("Read iterator should not be valid, yet")
+	}
+	iter.SeekToFirst()
+	if !iter.Valid() {
+		t.Errorf("Read iterator should be valid after seeking to first record")
+	}
+	CheckIter(t, iter, []byte("box"), []byte("c"))
+	iter.Next()
+	CheckIter(t, iter, []byte("foo"), []byte("hello"))
+	iter.Prev()
+	CheckIter(t, iter, []byte("box"), []byte("c"))
+	iter.Prev()
+	if iter.Valid() {
+		t.Errorf("Read iterator should not be valid after go back past the first record")
+	}
+	iter.SeekToLast()
+	CheckIter(t, iter, []byte("foo"), []byte("hello"))
+	iter.Seek([]byte("b"))
+	CheckIter(t, iter, []byte("box"), []byte("c"))
+	if iter.GetError() != nil {
+		t.Errorf("Read iterator has an error we didn't expect: %v", iter.GetError())
+	}
+	iter.Close()
+
+	// approximate sizes
+	n := 20000
+	woptions.SetSync(false)
+	for i := 0; i < n; i++ {
+		keybuf := []byte(fmt.Sprintf("k%020d", i))
+		valbuf := []byte(fmt.Sprintf("v%020d", i))
+		err := db.Put(woptions, keybuf, valbuf)
+		if err != nil {
+			t.Errorf("Put error in approximate size test: %v", err)
+		}
+	}
+
+	ranges := []Range{
+		{[]byte("a"), []byte("k00000000000000010000")},
+		{[]byte("k00000000000000010000"), []byte("z")},
+	}
+	sizes := db.GetApproximateSizes(ranges)
+	if len(sizes) == 2 {
+		if sizes[0] <= 0 {
+			t.Errorf("First size range was %d", sizes[0])
+		}
+		if sizes[1] <= 0 {
+			t.Errorf("Second size range was %d", sizes[1])
+		}
+	} else {
+		t.Errorf("Expected 2 approx. sizes back, got %d", len(sizes))
+	}
+
+	// property
+	prop := db.PropertyValue("nosuchprop")
+	if prop != "" {
+		t.Errorf("property nosuchprop should not have a value")
+	}
+	prop = db.PropertyValue("rocksdb.stats")
+	if prop == "" {
+		t.Errorf("property rocksdb.stats should have a value")
+	}
+
+	// snapshot
+	snap := db.NewSnapshot()
+	err = db.Delete(woptions, []byte("foo"))
+	if err != nil {
+		t.Errorf("Delete during snapshot test errored: %v", err)
+	}
+	roptions.SetSnapshot(snap)
+	CheckGet(t, "from snapshot", db, roptions, []byte("foo"), []byte("hello"))
+	roptions.SetSnapshot(nil)
+	CheckGet(t, "from snapshot", db, roptions, []byte("foo"), nil)
+	db.ReleaseSnapshot(snap)
+
+	// repair
+	db.Close()
+	options.SetCreateIfMissing(false)
+	options.SetErrorIfExists(false)
+	err = RepairDatabase(dbname, options)
+	if err != nil {
+		t.Errorf("Repairing db failed: %v", err)
+	}
+	db, err = Open(dbname, options)
+	if err != nil {
+		t.Errorf("Unable to open repaired db: %v", err)
+	}
+	CheckGet(t, "repair", db, roptions, []byte("foo"), nil)
+	CheckGet(t, "repair", db, roptions, []byte("bar"), nil)
+	CheckGet(t, "repair", db, roptions, []byte("box"), []byte("c"))
+	options.SetCreateIfMissing(true)
+	options.SetErrorIfExists(true)
+
+	// filter
+	policy := NewBloomFilter(10)
+	db.Close()
+	DestroyDatabase(dbname, options)
+	// options.SetFilterPolicy(policy)
+	db, err = Open(dbname, options)
+	if err != nil {
+		t.Fatalf("Unable to recreate db for filter tests: %v", err)
+	}
+	err = db.Put(woptions, []byte("foo"), []byte("foovalue"))
+	if err != nil {
+		t.Errorf("Unable to put 'foo' with filter: %v", err)
+	}
+	err = db.Put(woptions, []byte("bar"), []byte("barvalue"))
+	if err != nil {
+		t.Errorf("Unable to put 'bar' with filter: %v", err)
+	}
+	db.CompactRange(Range{nil, nil})
+	CheckGet(t, "filter", db, roptions, []byte("foo"), []byte("foovalue"))
+	CheckGet(t, "filter", db, roptions, []byte("bar"), []byte("barvalue"))
+	// options.SetFilterPolicy(nil)
+	policy.Close()
+
+	// cleanup
+	db.Close()
+	options.Close()
+	roptions.Close()
+	woptions.Close()
+	cache.Close()
+	// DestroyComparator(cmp)
+	env.Close()
+}
+
+func TestNilSlicesInDb(t *testing.T) {
+	dbname := tempDir(t)
+	defer deleteDBDirectory(t, dbname)
+	options := NewOptions()
+	options.SetErrorIfExists(true)
+	options.SetCreateIfMissing(true)
+	ro := NewReadOptions()
+	_ = DestroyDatabase(dbname, options)
+	db, err := Open(dbname, options)
+	if err != nil {
+		t.Fatalf("Database could not be opened: %v", err)
+	}
+	val, err := db.Get(ro, []byte("missing"))
+	if err != nil {
+		t.Errorf("Get failed: %v", err)
+	}
+	if val != nil {
+		t.Errorf("A key not in the db should return nil, not %v", val)
+	}
+	wo := NewWriteOptions()
+	db.Put(wo, nil, []byte("love"))
+	val, err = db.Get(ro, nil)
+	if !bytes.Equal([]byte("love"), val) {
+		t.Errorf("Get should see the nil key: %v", val)
+	}
+	val, err = db.Get(ro, []byte{})
+	if !bytes.Equal([]byte("love"), val) {
+		t.Errorf("Get shouldn't distinguish between nil key and empty slice key: %v", val)
+	}
+
+	err = db.Put(wo, []byte("nilvalue"), nil)
+	if err != nil {
+		t.Errorf("nil value Put errored: %v", err)
+	}
+	// Compare with the []byte("missing") case. We expect Get to return a
+	// []byte{} here, but expect a nil returned there.
+	CheckGet(t, "nil value Put", db, ro, []byte("nilvalue"), []byte{})
+
+	err = db.Put(wo, []byte("emptyvalue"), []byte{})
+	if err != nil {
+		t.Errorf("empty value Put errored: %v", err)
+	}
+	CheckGet(t, "empty value Put", db, ro, []byte("emptyvalue"), []byte{})
+
+	err = db.Delete(wo, nil)
+	if err != nil {
+		t.Errorf("nil key Delete errored: %v", err)
+	}
+	err = db.Delete(wo, []byte{})
+	if err != nil {
+		t.Errorf("empty slice key Delete errored: %v", err)
+	}
+
+}
+
+func TestIterationValidityLimits(t *testing.T) {
+	dbname := tempDir(t)
+	defer deleteDBDirectory(t, dbname)
+	options := NewOptions()
+	options.SetErrorIfExists(true)
+	options.SetCreateIfMissing(true)
+	ro := NewReadOptions()
+	wo := NewWriteOptions()
+	_ = DestroyDatabase(dbname, options)
+	db, err := Open(dbname, options)
+	if err != nil {
+		t.Fatalf("Database could not be opened: %v", err)
+	}
+	defer db.Close()
+	db.Put(wo, []byte("bat"), []byte("somedata"))
+	db.Put(wo, []byte("done"), []byte("somedata"))
+	it := db.NewIterator(ro)
+	defer it.Close()
+	if it.Valid() {
+		t.Errorf("new Iterator was valid")
+	}
+	it.Seek([]byte("bat"))
+	if !it.Valid() {
+		t.Errorf("Seek to %#v failed.", []byte("bat"))
+	}
+	if !bytes.Equal([]byte("bat"), it.Key()) {
+		t.Errorf("did not seek to []byte(\"bat\")")
+	}
+	key := it.Key()
+	it.Next()
+	if bytes.Equal(key, it.Key()) {
+		t.Errorf("key should be a copy of last key")
+	}
+	it.Next()
+	if it.Valid() {
+		t.Errorf("iterating off the db should result in an invalid iterator")
+	}
+	err = it.GetError()
+	if err != nil {
+		t.Errorf("should not have seen an error on an invalid iterator")
+	}
+	it.Seek([]byte("bat"))
+	if !it.Valid() {
+		t.Errorf("Iterator should be valid again")
+	}
+}
+
+func CheckGet(t *testing.T, where string, db *DB, roptions *ReadOptions, key, expected []byte) {
+	getValue, err := db.Get(roptions, key)
+
+	if err != nil {
+		t.Errorf("%s, Get failed: %v", where, err)
+	}
+	if !bytes.Equal(getValue, expected) {
+		t.Errorf("%s, expected Get value %v, got %v", where, expected, getValue)
+	}
+}
+
+func WBIterCheckEqual(t *testing.T, where string, which string, pos int, expected, given []byte) {
+	if !bytes.Equal(expected, given) {
+		t.Errorf("%s at pos %d, %s expected: %v, got: %v", where, pos, which, expected, given)
+	}
+}
+
+func CheckIter(t *testing.T, it *Iterator, key, value []byte) {
+	if !bytes.Equal(key, it.Key()) {
+		t.Errorf("Iterator: expected key %v, got %v", key, it.Key())
+	}
+	if !bytes.Equal(value, it.Value()) {
+		t.Errorf("Iterator: expected value %v, got %v", value, it.Value())
+	}
+}
+
+func deleteDBDirectory(t *testing.T, dirPath string) {
+	err := os.RemoveAll(dirPath)
+	if err != nil {
+		t.Errorf("Unable to remove database directory: %s", dirPath)
+	}
+}
+
+func tempDir(t *testing.T) string {
+	bottom := fmt.Sprintf("rocksdb-test-%d", rand.Int())
+	path := filepath.Join(os.TempDir(), bottom)
+	deleteDBDirectory(t, path)
+	return path
+}