Skip to content

Commit

Permalink
NO-ISSUE Adapt rocksdb 6.13.3 (#20)
Browse files Browse the repository at this point in the history
  • Loading branch information
linxGnu authored Oct 17, 2020
1 parent 1fc50de commit 03d18d1
Show file tree
Hide file tree
Showing 67 changed files with 998 additions and 466 deletions.
13 changes: 0 additions & 13 deletions .travis.yml

This file was deleted.

5 changes: 3 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
# grocksdb, a Go wrapper for RocksDB

[![Build Status](https://travis-ci.org/linxGnu/grocksdb.svg?branch=master)](https://travis-ci.org/linxGnu/grocksdb)
[![](https://github.com/linxGnu/grocksdb/workflows/Build/badge.svg)]()
[![Go Report Card](https://goreportcard.com/badge/github.com/linxGnu/grocksdb)](https://goreportcard.com/report/github.com/linxGnu/grocksdb)
[![godoc](https://img.shields.io/badge/docs-GoDoc-green.svg)](https://godoc.org/github.com/linxGnu/grocksdb)
[![Coverage Status](https://coveralls.io/repos/github/linxGnu/grocksdb/badge.svg?branch=master)](https://coveralls.io/github/linxGnu/grocksdb?branch=master)
[![godoc](https://img.shields.io/badge/docs-GoDoc-green.svg)](https://godoc.org/github.com/linxGnu/grocksdb))

This is a `Fork` from [tecbot/gorocksdb](https://github.com/tecbot/gorocksdb). I respect the author work and community contribution.
The `LICENSE` still remains as upstream.
Expand Down
2 changes: 1 addition & 1 deletion build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ cd $BUILD_PATH && wget https://github.com/facebook/zstd/releases/download/v1.4.5
$CMAKE_REQUIRED_PARAMS -DZSTD_ZLIB_SUPPORT=ON -DZSTD_LZMA_SUPPORT=OFF -DCMAKE_BUILD_TYPE=Release .. && make -j16 install && \
cd $BUILD_PATH && rm -rf *

cd $BUILD_PATH && wget https://github.com/facebook/rocksdb/archive/v6.12.7.tar.gz && tar xzf v6.12.7.tar.gz && cd rocksdb-6.12.7/ && \
cd $BUILD_PATH && wget https://github.com/facebook/rocksdb/archive/v6.13.3.tar.gz && tar xzf v6.13.3.tar.gz && cd rocksdb-6.13.3/ && \
mkdir -p build_place && cd build_place && cmake -DCMAKE_BUILD_TYPE=Release $CMAKE_REQUIRED_PARAMS -DCMAKE_PREFIX_PATH=$INSTALL_PREFIX -DWITH_TESTS=OFF -DWITH_GFLAGS=OFF \
-DWITH_BENCHMARK_TOOLS=OFF -DWITH_TOOLS=OFF -DWITH_MD_LIBRARY=OFF -DWITH_RUNTIME_DEBUG=OFF -DROCKSDB_BUILD_SHARED=OFF -DWITH_SNAPPY=ON -DWITH_LZ4=ON -DWITH_ZLIB=ON \
-DWITH_ZSTD=ON -DWITH_BZ2=OFF -WITH_GFLAGS=OFF .. && make -j16 install/strip && \
Expand Down
8 changes: 4 additions & 4 deletions cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,10 @@ func (c *Cache) SetCapacity(value uint64) {
C.rocksdb_cache_set_capacity(c.c, C.size_t(value))
}

// // GetCapacity returns capacity of the cache.
// func (c *Cache) GetCapacity() uint64 {
// return uint64(C.rocksdb_cache_get_capacity(c.c))
// }
// GetCapacity returns capacity of the cache.
func (c *Cache) GetCapacity() uint64 {
return uint64(C.rocksdb_cache_get_capacity(c.c))
}

// Destroy deallocates the Cache object.
func (c *Cache) Destroy() {
Expand Down
22 changes: 11 additions & 11 deletions cache_test.go
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
package grocksdb

// import (
// "testing"
import (
"testing"

// "github.com/stretchr/testify/require"
// )
"github.com/stretchr/testify/require"
)

// func TestCache(t *testing.T) {
// cache := NewLRUCache(19)
// defer cache.Destroy()
func TestCache(t *testing.T) {
cache := NewLRUCache(19)
defer cache.Destroy()

// require.EqualValues(t, 19, cache.GetCapacity())
// cache.SetCapacity(128)
// require.EqualValues(t, 128, cache.GetCapacity())
// }
require.EqualValues(t, 19, cache.GetCapacity())
cache.SetCapacity(128)
require.EqualValues(t, 128, cache.GetCapacity())
}
198 changes: 99 additions & 99 deletions db.go
Original file line number Diff line number Diff line change
Expand Up @@ -174,76 +174,76 @@ func OpenDbColumnFamilies(
return
}

// // OpenDbColumnFamiliesWithTTL opens a database with the specified column families along with their ttls.
// //
// // BEHAVIOUR:
// // TTL is accepted in seconds
// // (int32_t)Timestamp(creation) is suffixed to values in Put internally
// // Expired TTL values deleted in compaction only:(Timestamp+ttl<time_now)
// // Get/Iterator may return expired entries(compaction not run on them yet)
// // Different TTL may be used during different Opens
// // Example: Open1 at t=0 with ttl=4 and insert k1,k2, close at t=2
// // Open2 at t=3 with ttl=5. Now k1,k2 should be deleted at t>=5
// // read_only=true opens in the usual read-only mode. Compactions will not be
// // triggered(neither manual nor automatic), so no expired entries removed
// //
// // CONSTRAINTS:
// // Not specifying/passing or non-positive TTL behaves like TTL = infinity
// func OpenDbColumnFamiliesWithTTL(
// opts *Options,
// name string,
// cfNames []string,
// cfOpts []*Options,
// ttls []C.int,
// ) (db *DB, cfHandles []*ColumnFamilyHandle, err error) {
// numColumnFamilies := len(cfNames)
// if numColumnFamilies != len(cfOpts) {
// err = ErrColumnFamilyMustMatch
// return
// }

// cName := C.CString(name)
// cNames := make([]*C.char, numColumnFamilies)
// for i, s := range cfNames {
// cNames[i] = C.CString(s)
// }

// cOpts := make([]*C.rocksdb_options_t, numColumnFamilies)
// for i, o := range cfOpts {
// cOpts[i] = o.c
// }

// cHandles := make([]*C.rocksdb_column_family_handle_t, numColumnFamilies)

// var cErr *C.char
// _db := C.rocksdb_open_column_families_with_ttl(
// opts.c,
// cName,
// C.int(numColumnFamilies),
// &cNames[0],
// &cOpts[0],
// &cHandles[0],
// &ttls[0],
// &cErr,
// )
// if err = fromCError(cErr); err == nil {
// db = &DB{
// name: name,
// c: _db,
// opts: opts,
// }
// cfHandles = make([]*ColumnFamilyHandle, numColumnFamilies)
// for i, c := range cHandles {
// cfHandles[i] = NewNativeColumnFamilyHandle(c)
// }
// }

// C.free(unsafe.Pointer(cName))
// for _, s := range cNames {
// C.free(unsafe.Pointer(s))
// }
// return
// }
// OpenDbColumnFamiliesWithTTL opens a database with the specified column families along with their ttls.
//
// BEHAVIOUR:
// TTL is accepted in seconds
// (int32_t)Timestamp(creation) is suffixed to values in Put internally
// Expired TTL values deleted in compaction only:(Timestamp+ttl<time_now)
// Get/Iterator may return expired entries(compaction not run on them yet)
// Different TTL may be used during different Opens
// Example: Open1 at t=0 with ttl=4 and insert k1,k2, close at t=2
// Open2 at t=3 with ttl=5. Now k1,k2 should be deleted at t>=5
// read_only=true opens in the usual read-only mode. Compactions will not be
// triggered(neither manual nor automatic), so no expired entries removed
//
// CONSTRAINTS:
// Not specifying/passing or non-positive TTL behaves like TTL = infinity
func OpenDbColumnFamiliesWithTTL(
opts *Options,
name string,
cfNames []string,
cfOpts []*Options,
ttls []C.int,
) (db *DB, cfHandles []*ColumnFamilyHandle, err error) {
numColumnFamilies := len(cfNames)
if numColumnFamilies != len(cfOpts) {
err = ErrColumnFamilyMustMatch
return
}

cName := C.CString(name)
cNames := make([]*C.char, numColumnFamilies)
for i, s := range cfNames {
cNames[i] = C.CString(s)
}

cOpts := make([]*C.rocksdb_options_t, numColumnFamilies)
for i, o := range cfOpts {
cOpts[i] = o.c
}

cHandles := make([]*C.rocksdb_column_family_handle_t, numColumnFamilies)

var cErr *C.char
_db := C.rocksdb_open_column_families_with_ttl(
opts.c,
cName,
C.int(numColumnFamilies),
&cNames[0],
&cOpts[0],
&cHandles[0],
&ttls[0],
&cErr,
)
if err = fromCError(cErr); err == nil {
db = &DB{
name: name,
c: _db,
opts: opts,
}
cfHandles = make([]*ColumnFamilyHandle, numColumnFamilies)
for i, c := range cHandles {
cfHandles[i] = NewNativeColumnFamilyHandle(c)
}
}

C.free(unsafe.Pointer(cName))
for _, s := range cNames {
C.free(unsafe.Pointer(s))
}
return
}

// OpenDbForReadOnlyColumnFamilies opens a database with the specified column
// families in read only mode.
Expand Down Expand Up @@ -886,35 +886,35 @@ func (db *DB) CreateColumnFamily(opts *Options, name string) (handle *ColumnFami
return
}

// // CreateColumnFamilyWithTTL create a new column family along with its ttl.
// //
// // BEHAVIOUR:
// // TTL is accepted in seconds
// // (int32_t)Timestamp(creation) is suffixed to values in Put internally
// // Expired TTL values deleted in compaction only:(Timestamp+ttl<time_now)
// // Get/Iterator may return expired entries(compaction not run on them yet)
// // Different TTL may be used during different Opens
// // Example: Open1 at t=0 with ttl=4 and insert k1,k2, close at t=2
// // Open2 at t=3 with ttl=5. Now k1,k2 should be deleted at t>=5
// // read_only=true opens in the usual read-only mode. Compactions will not be
// // triggered(neither manual nor automatic), so no expired entries removed
// //
// // CONSTRAINTS:
// // Not specifying/passing or non-positive TTL behaves like TTL = infinity
// func (db *DB) CreateColumnFamilyWithTTL(opts *Options, name string, ttl C.int) (handle *ColumnFamilyHandle, err error) {
// var (
// cErr *C.char
// cName = C.CString(name)
// )

// cHandle := C.rocksdb_create_column_family_with_ttl(db.c, opts.c, cName, ttl, &cErr)
// if err = fromCError(cErr); err == nil {
// handle = NewNativeColumnFamilyHandle(cHandle)
// }

// C.free(unsafe.Pointer(cName))
// return
// }
// CreateColumnFamilyWithTTL create a new column family along with its ttl.
//
// BEHAVIOUR:
// TTL is accepted in seconds
// (int32_t)Timestamp(creation) is suffixed to values in Put internally
// Expired TTL values deleted in compaction only:(Timestamp+ttl<time_now)
// Get/Iterator may return expired entries(compaction not run on them yet)
// Different TTL may be used during different Opens
// Example: Open1 at t=0 with ttl=4 and insert k1,k2, close at t=2
// Open2 at t=3 with ttl=5. Now k1,k2 should be deleted at t>=5
// read_only=true opens in the usual read-only mode. Compactions will not be
// triggered(neither manual nor automatic), so no expired entries removed
//
// CONSTRAINTS:
// Not specifying/passing or non-positive TTL behaves like TTL = infinity
func (db *DB) CreateColumnFamilyWithTTL(opts *Options, name string, ttl C.int) (handle *ColumnFamilyHandle, err error) {
var (
cErr *C.char
cName = C.CString(name)
)

cHandle := C.rocksdb_create_column_family_with_ttl(db.c, opts.c, cName, ttl, &cErr)
if err = fromCError(cErr); err == nil {
handle = NewNativeColumnFamilyHandle(cHandle)
}

C.free(unsafe.Pointer(cName))
return
}

// DropColumnFamily drops a column family.
func (db *DB) DropColumnFamily(c *ColumnFamilyHandle) (err error) {
Expand Down
47 changes: 46 additions & 1 deletion dist/darwin_amd64/include/rocksdb/advanced_options.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,14 @@

#include <memory>

#include "rocksdb/compression_type.h"
#include "rocksdb/memtablerep.h"
#include "rocksdb/universal_compaction.h"

namespace ROCKSDB_NAMESPACE {

class Slice;
class SliceTransform;
enum CompressionType : unsigned char;
class TablePropertiesCollectorFactory;
class TableFactory;
struct Options;
Expand Down Expand Up @@ -717,6 +717,51 @@ struct AdvancedColumnFamilyOptions {
// data is left uncompressed (unless compression is also requested).
uint64_t sample_for_compression = 0;

// UNDER CONSTRUCTION -- DO NOT USE
// When set, large values (blobs) are written to separate blob files, and
// only pointers to them are stored in SST files. This can reduce write
// amplification for large-value use cases at the cost of introducing a level
// of indirection for reads. See also the options min_blob_size,
// blob_file_size, and blob_compression_type below.
//
// Default: false
//
// Dynamically changeable through the SetOptions() API
bool enable_blob_files = false;

// UNDER CONSTRUCTION -- DO NOT USE
// The size of the smallest value to be stored separately in a blob file.
// Values which have an uncompressed size smaller than this threshold are
// stored alongside the keys in SST files in the usual fashion. A value of
// zero for this option means that all values are stored in blob files. Note
// that enable_blob_files has to be set in order for this option to have any
// effect.
//
// Default: 0
//
// Dynamically changeable through the SetOptions() API
uint64_t min_blob_size = 0;

// UNDER CONSTRUCTION -- DO NOT USE
// The size limit for blob files. When writing blob files, a new file is
// opened once this limit is reached. Note that enable_blob_files has to be
// set in order for this option to have any effect.
//
// Default: 256 MB
//
// Dynamically changeable through the SetOptions() API
uint64_t blob_file_size = 1ULL << 28;

// UNDER CONSTRUCTION -- DO NOT USE
// The compression algorithm to use for large values stored in blob files.
// Note that enable_blob_files has to be set in order for this option to have
// any effect.
//
// Default: no compression
//
// Dynamically changeable through the SetOptions() API
CompressionType blob_compression_type = kNoCompression;

// Create ColumnFamilyOptions with default values for all fields
AdvancedColumnFamilyOptions();
// Create ColumnFamilyOptions from Options
Expand Down
Loading

0 comments on commit 03d18d1

Please sign in to comment.