mirror of
https://github.com/0glabs/0g-chain.git
synced 2024-11-10 10:05:18 +00:00
Rocksdb Metrics (#1692)
* Rocksdb Metrics * Add rocksdb namespace for options * Adding help to the metrics * CR's fixes * CR's fixes * CR's fixes
This commit is contained in:
parent
495444586a
commit
d91bd688e7
166
cmd/kava/opendb/metrics.go
Normal file
166
cmd/kava/opendb/metrics.go
Normal file
@ -0,0 +1,166 @@
|
||||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
package opendb
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics"
|
||||
"github.com/go-kit/kit/metrics/prometheus"
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// rocksdbMetrics will be initialized in registerMetrics() if enableRocksdbMetrics flag set to true
|
||||
var rocksdbMetrics *Metrics
|
||||
|
||||
// Metrics contains all rocksdb metrics which will be reported to prometheus
|
||||
type Metrics struct {
|
||||
// Keys
|
||||
NumberKeysWritten metrics.Gauge
|
||||
NumberKeysRead metrics.Gauge
|
||||
NumberKeysUpdated metrics.Gauge
|
||||
EstimateNumKeys metrics.Gauge
|
||||
|
||||
// Files
|
||||
NumberFileOpens metrics.Gauge
|
||||
NumberFileErrors metrics.Gauge
|
||||
|
||||
// Memory
|
||||
BlockCacheUsage metrics.Gauge
|
||||
EstimateTableReadersMem metrics.Gauge
|
||||
CurSizeAllMemTables metrics.Gauge
|
||||
BlockCachePinnedUsage metrics.Gauge
|
||||
|
||||
// Cache
|
||||
BlockCacheMiss metrics.Gauge
|
||||
BlockCacheHit metrics.Gauge
|
||||
BlockCacheAdd metrics.Gauge
|
||||
BlockCacheAddFailures metrics.Gauge
|
||||
}
|
||||
|
||||
// registerMetrics registers metrics in prometheus and initializes rocksdbMetrics variable
|
||||
func registerMetrics() {
|
||||
if rocksdbMetrics != nil {
|
||||
// metrics already registered
|
||||
return
|
||||
}
|
||||
|
||||
labels := make([]string, 0)
|
||||
rocksdbMetrics = &Metrics{
|
||||
// Keys
|
||||
NumberKeysWritten: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "key",
|
||||
Name: "number_keys_written",
|
||||
Help: "",
|
||||
}, labels),
|
||||
NumberKeysRead: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "key",
|
||||
Name: "number_keys_read",
|
||||
Help: "",
|
||||
}, labels),
|
||||
NumberKeysUpdated: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "key",
|
||||
Name: "number_keys_updated",
|
||||
Help: "",
|
||||
}, labels),
|
||||
EstimateNumKeys: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "key",
|
||||
Name: "estimate_num_keys",
|
||||
Help: "estimated number of total keys in the active and unflushed immutable memtables and storage",
|
||||
}, labels),
|
||||
|
||||
// Files
|
||||
NumberFileOpens: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "file",
|
||||
Name: "number_file_opens",
|
||||
Help: "",
|
||||
}, labels),
|
||||
NumberFileErrors: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "file",
|
||||
Name: "number_file_errors",
|
||||
Help: "",
|
||||
}, labels),
|
||||
|
||||
// Memory
|
||||
BlockCacheUsage: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "memory",
|
||||
Name: "block_cache_usage",
|
||||
Help: "memory size for the entries residing in block cache",
|
||||
}, labels),
|
||||
EstimateTableReadersMem: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "memory",
|
||||
Name: "estimate_table_readers_mem",
|
||||
Help: "estimated memory used for reading SST tables, excluding memory used in block cache (e.g., filter and index blocks)",
|
||||
}, labels),
|
||||
CurSizeAllMemTables: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "memory",
|
||||
Name: "cur_size_all_mem_tables",
|
||||
Help: "approximate size of active and unflushed immutable memtables (bytes)",
|
||||
}, labels),
|
||||
BlockCachePinnedUsage: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "memory",
|
||||
Name: "block_cache_pinned_usage",
|
||||
Help: "returns the memory size for the entries being pinned",
|
||||
}, labels),
|
||||
|
||||
// Cache
|
||||
BlockCacheMiss: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "cache",
|
||||
Name: "block_cache_miss",
|
||||
Help: "block_cache_miss == block_cache_index_miss + block_cache_filter_miss + block_cache_data_miss",
|
||||
}, labels),
|
||||
BlockCacheHit: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "cache",
|
||||
Name: "block_cache_hit",
|
||||
Help: "block_cache_hit == block_cache_index_hit + block_cache_filter_hit + block_cache_data_hit",
|
||||
}, labels),
|
||||
BlockCacheAdd: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "cache",
|
||||
Name: "block_cache_add",
|
||||
Help: "number of blocks added to block cache",
|
||||
}, labels),
|
||||
BlockCacheAddFailures: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "cache",
|
||||
Name: "block_cache_add_failures",
|
||||
Help: "number of failures when adding blocks to block cache",
|
||||
}, labels),
|
||||
}
|
||||
}
|
||||
|
||||
// report reports metrics to prometheus based on rocksdb props and stats
|
||||
func (m *Metrics) report(props *properties, stats *stats) {
|
||||
// Keys
|
||||
m.NumberKeysWritten.Set(float64(stats.NumberKeysWritten))
|
||||
m.NumberKeysRead.Set(float64(stats.NumberKeysRead))
|
||||
m.NumberKeysUpdated.Set(float64(stats.NumberKeysUpdated))
|
||||
m.EstimateNumKeys.Set(float64(props.EstimateNumKeys))
|
||||
|
||||
// Files
|
||||
m.NumberFileOpens.Set(float64(stats.NumberFileOpens))
|
||||
m.NumberFileErrors.Set(float64(stats.NumberFileErrors))
|
||||
|
||||
// Memory
|
||||
m.BlockCacheUsage.Set(float64(props.BlockCacheUsage))
|
||||
m.EstimateTableReadersMem.Set(float64(props.EstimateTableReadersMem))
|
||||
m.CurSizeAllMemTables.Set(float64(props.CurSizeAllMemTables))
|
||||
m.BlockCachePinnedUsage.Set(float64(props.BlockCachePinnedUsage))
|
||||
|
||||
// Cache
|
||||
m.BlockCacheMiss.Set(float64(stats.BlockCacheMiss))
|
||||
m.BlockCacheHit.Set(float64(stats.BlockCacheHit))
|
||||
m.BlockCacheAdd.Set(float64(stats.BlockCacheAdd))
|
||||
m.BlockCacheAddFailures.Set(float64(stats.BlockCacheAddFailures))
|
||||
}
|
@ -27,6 +27,7 @@ import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cosmos/cosmos-sdk/server/types"
|
||||
"github.com/linxGnu/grocksdb"
|
||||
@ -42,11 +43,15 @@ const (
|
||||
|
||||
defaultColumnFamilyName = "default"
|
||||
|
||||
maxOpenFilesDBOptName = "max_open_files"
|
||||
maxFileOpeningThreadsDBOptName = "max_file_opening_threads"
|
||||
enableMetricsOptName = "rocksdb.enable-metrics"
|
||||
reportMetricsIntervalSecsOptName = "rocksdb.report-metrics-interval-secs"
|
||||
defaultReportMetricsIntervalSecs = 15
|
||||
|
||||
writeBufferSizeCFOptName = "write_buffer_size"
|
||||
numLevelsCFOptName = "num_levels"
|
||||
maxOpenFilesDBOptName = "rocksdb.max-open-files"
|
||||
maxFileOpeningThreadsDBOptName = "rocksdb.max-file-opening-threads"
|
||||
|
||||
writeBufferSizeCFOptName = "rocksdb.write-buffer-size"
|
||||
numLevelsCFOptName = "rocksdb.num-levels"
|
||||
)
|
||||
|
||||
func OpenDB(appOpts types.AppOptions, home string, backendType dbm.BackendType) (dbm.DB, error) {
|
||||
@ -69,7 +74,13 @@ func openRocksdb(dir string, appOpts types.AppOptions) (dbm.DB, error) {
|
||||
dbOpts = overrideDBOpts(dbOpts, appOpts)
|
||||
cfOpts = overrideCFOpts(cfOpts, appOpts)
|
||||
|
||||
return newRocksDBWithOptions("application", dir, dbOpts, cfOpts)
|
||||
enableMetrics := cast.ToBool(appOpts.Get(enableMetricsOptName))
|
||||
reportMetricsIntervalSecs := cast.ToInt64(appOpts.Get(reportMetricsIntervalSecsOptName))
|
||||
if reportMetricsIntervalSecs == 0 {
|
||||
reportMetricsIntervalSecs = defaultReportMetricsIntervalSecs
|
||||
}
|
||||
|
||||
return newRocksDBWithOptions("application", dir, dbOpts, cfOpts, enableMetrics, reportMetricsIntervalSecs)
|
||||
}
|
||||
|
||||
// loadLatestOptions loads and returns database and column family options
|
||||
@ -128,7 +139,14 @@ func overrideCFOpts(cfOpts *grocksdb.Options, appOpts types.AppOptions) *grocksd
|
||||
|
||||
// newRocksDBWithOptions opens rocksdb with provided database and column family options
|
||||
// newRocksDBWithOptions expects that db has only one column family named default
|
||||
func newRocksDBWithOptions(name string, dir string, dbOpts, cfOpts *grocksdb.Options) (*dbm.RocksDB, error) {
|
||||
func newRocksDBWithOptions(
|
||||
name string,
|
||||
dir string,
|
||||
dbOpts *grocksdb.Options,
|
||||
cfOpts *grocksdb.Options,
|
||||
enableMetrics bool,
|
||||
reportMetricsIntervalSecs int64,
|
||||
) (*dbm.RocksDB, error) {
|
||||
dbPath := filepath.Join(dir, name+".db")
|
||||
|
||||
// Ensure path exists
|
||||
@ -136,10 +154,21 @@ func newRocksDBWithOptions(name string, dir string, dbOpts, cfOpts *grocksdb.Opt
|
||||
return nil, fmt.Errorf("failed to create db path: %w", err)
|
||||
}
|
||||
|
||||
// EnableStatistics adds overhead so shouldn't be enabled in production
|
||||
if enableMetrics {
|
||||
dbOpts.EnableStatistics()
|
||||
}
|
||||
|
||||
db, _, err := grocksdb.OpenDbColumnFamilies(dbOpts, dbPath, []string{defaultColumnFamilyName}, []*grocksdb.Options{cfOpts})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if enableMetrics {
|
||||
registerMetrics()
|
||||
go reportMetrics(db, time.Second*time.Duration(reportMetricsIntervalSecs))
|
||||
}
|
||||
|
||||
ro := grocksdb.NewDefaultReadOptions()
|
||||
wo := grocksdb.NewDefaultWriteOptions()
|
||||
woSync := grocksdb.NewDefaultWriteOptions()
|
||||
@ -168,3 +197,42 @@ func newDefaultOptions() *grocksdb.Options {
|
||||
|
||||
return opts
|
||||
}
|
||||
|
||||
// reportMetrics periodically requests stats from rocksdb and reports to prometheus
|
||||
// NOTE: should be launched as a goroutine
|
||||
func reportMetrics(db *grocksdb.DB, interval time.Duration) {
|
||||
ticker := time.NewTicker(interval)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
props, stats, err := getPropsAndStats(db)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
rocksdbMetrics.report(props, stats)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getPropsAndStats gets statistics from rocksdb
|
||||
func getPropsAndStats(db *grocksdb.DB) (*properties, *stats, error) {
|
||||
propsLoader := newPropsLoader(db)
|
||||
props, err := propsLoader.load()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
statMap, err := parseSerializedStats(props.OptionsStatistics)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
statLoader := newStatLoader(statMap)
|
||||
stats, err := statLoader.load()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return props, stats, nil
|
||||
}
|
||||
|
@ -186,7 +186,7 @@ func TestLoadLatestOptions(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
db, err := newRocksDBWithOptions(name, dir, tc.dbOpts, tc.cfOpts)
|
||||
db, err := newRocksDBWithOptions(name, dir, tc.dbOpts, tc.cfOpts, true, defaultReportMetricsIntervalSecs)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.Close())
|
||||
|
||||
@ -337,7 +337,7 @@ func TestNewRocksDBWithOptions(t *testing.T) {
|
||||
cfOpts := newDefaultOptions()
|
||||
cfOpts.SetWriteBufferSize(999_999)
|
||||
|
||||
db, err := newRocksDBWithOptions(name, dir, dbOpts, cfOpts)
|
||||
db, err := newRocksDBWithOptions(name, dir, dbOpts, cfOpts, true, defaultReportMetricsIntervalSecs)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.Close())
|
||||
|
||||
|
87
cmd/kava/opendb/props_loader.go
Normal file
87
cmd/kava/opendb/props_loader.go
Normal file
@ -0,0 +1,87 @@
|
||||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
package opendb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"errors"
|
||||
)
|
||||
|
||||
type propsGetter interface {
|
||||
GetProperty(propName string) (value string)
|
||||
GetIntProperty(propName string) (value uint64, success bool)
|
||||
}
|
||||
|
||||
type propsLoader struct {
|
||||
db propsGetter
|
||||
errorMsgs []string
|
||||
}
|
||||
|
||||
func newPropsLoader(db propsGetter) *propsLoader {
|
||||
return &propsLoader{
|
||||
db: db,
|
||||
errorMsgs: make([]string, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (l *propsLoader) load() (*properties, error) {
|
||||
props := &properties{
|
||||
BaseLevel: l.getIntProperty("rocksdb.base-level"),
|
||||
BlockCacheCapacity: l.getIntProperty("rocksdb.block-cache-capacity"),
|
||||
BlockCachePinnedUsage: l.getIntProperty("rocksdb.block-cache-pinned-usage"),
|
||||
BlockCacheUsage: l.getIntProperty("rocksdb.block-cache-usage"),
|
||||
CurSizeActiveMemTable: l.getIntProperty("rocksdb.cur-size-active-mem-table"),
|
||||
CurSizeAllMemTables: l.getIntProperty("rocksdb.cur-size-all-mem-tables"),
|
||||
EstimateLiveDataSize: l.getIntProperty("rocksdb.estimate-live-data-size"),
|
||||
EstimateNumKeys: l.getIntProperty("rocksdb.estimate-num-keys"),
|
||||
EstimateTableReadersMem: l.getIntProperty("rocksdb.estimate-table-readers-mem"),
|
||||
LiveSSTFilesSize: l.getIntProperty("rocksdb.live-sst-files-size"),
|
||||
SizeAllMemTables: l.getIntProperty("rocksdb.size-all-mem-tables"),
|
||||
OptionsStatistics: l.getProperty("rocksdb.options-statistics"),
|
||||
}
|
||||
|
||||
if len(l.errorMsgs) != 0 {
|
||||
errorMsg := strings.Join(l.errorMsgs, ";")
|
||||
return nil, errors.New(errorMsg)
|
||||
}
|
||||
|
||||
return props, nil
|
||||
}
|
||||
|
||||
func (l *propsLoader) getProperty(propName string) string {
|
||||
value := l.db.GetProperty(propName)
|
||||
if value == "" {
|
||||
l.errorMsgs = append(l.errorMsgs, fmt.Sprintf("property %v is empty", propName))
|
||||
return ""
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
func (l *propsLoader) getIntProperty(propName string) uint64 {
|
||||
value, ok := l.db.GetIntProperty(propName)
|
||||
if !ok {
|
||||
l.errorMsgs = append(l.errorMsgs, fmt.Sprintf("can't get %v int property", propName))
|
||||
return 0
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
type properties struct {
|
||||
BaseLevel uint64
|
||||
BlockCacheCapacity uint64
|
||||
BlockCachePinnedUsage uint64
|
||||
BlockCacheUsage uint64
|
||||
CurSizeActiveMemTable uint64
|
||||
CurSizeAllMemTables uint64
|
||||
EstimateLiveDataSize uint64
|
||||
EstimateNumKeys uint64
|
||||
EstimateTableReadersMem uint64
|
||||
LiveSSTFilesSize uint64
|
||||
SizeAllMemTables uint64
|
||||
OptionsStatistics string
|
||||
}
|
112
cmd/kava/opendb/props_loader_test.go
Normal file
112
cmd/kava/opendb/props_loader_test.go
Normal file
@ -0,0 +1,112 @@
|
||||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
package opendb
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type mockPropsGetter struct {
|
||||
props map[string]string
|
||||
intProps map[string]uint64
|
||||
}
|
||||
|
||||
func newMockPropsGetter(
|
||||
props map[string]string,
|
||||
intProps map[string]uint64,
|
||||
) *mockPropsGetter {
|
||||
return &mockPropsGetter{
|
||||
props: props,
|
||||
intProps: intProps,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *mockPropsGetter) GetProperty(propName string) string {
|
||||
return m.props[propName]
|
||||
}
|
||||
|
||||
func (m *mockPropsGetter) GetIntProperty(propName string) (uint64, bool) {
|
||||
prop, ok := m.intProps[propName]
|
||||
return prop, ok
|
||||
}
|
||||
|
||||
func TestPropsLoader(t *testing.T) {
|
||||
defaultProps := map[string]string{
|
||||
"rocksdb.options-statistics": "1",
|
||||
}
|
||||
defaultIntProps := map[string]uint64{
|
||||
"rocksdb.base-level": 1,
|
||||
"rocksdb.block-cache-capacity": 2,
|
||||
"rocksdb.block-cache-pinned-usage": 3,
|
||||
"rocksdb.block-cache-usage": 4,
|
||||
"rocksdb.cur-size-active-mem-table": 5,
|
||||
"rocksdb.cur-size-all-mem-tables": 6,
|
||||
"rocksdb.estimate-live-data-size": 7,
|
||||
"rocksdb.estimate-num-keys": 8,
|
||||
"rocksdb.estimate-table-readers-mem": 9,
|
||||
"rocksdb.live-sst-files-size": 10,
|
||||
"rocksdb.size-all-mem-tables": 11,
|
||||
}
|
||||
missingProps := make(map[string]string)
|
||||
missingIntProps := make(map[string]uint64)
|
||||
defaultExpectedProps := properties{
|
||||
BaseLevel: 1,
|
||||
BlockCacheCapacity: 2,
|
||||
BlockCachePinnedUsage: 3,
|
||||
BlockCacheUsage: 4,
|
||||
CurSizeActiveMemTable: 5,
|
||||
CurSizeAllMemTables: 6,
|
||||
EstimateLiveDataSize: 7,
|
||||
EstimateNumKeys: 8,
|
||||
EstimateTableReadersMem: 9,
|
||||
LiveSSTFilesSize: 10,
|
||||
SizeAllMemTables: 11,
|
||||
OptionsStatistics: "1",
|
||||
}
|
||||
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
props map[string]string
|
||||
intProps map[string]uint64
|
||||
expectedProps *properties
|
||||
success bool
|
||||
}{
|
||||
{
|
||||
desc: "success case",
|
||||
props: defaultProps,
|
||||
intProps: defaultIntProps,
|
||||
expectedProps: &defaultExpectedProps,
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
desc: "missing props",
|
||||
props: missingProps,
|
||||
intProps: defaultIntProps,
|
||||
expectedProps: nil,
|
||||
success: false,
|
||||
},
|
||||
{
|
||||
desc: "missing integer props",
|
||||
props: defaultProps,
|
||||
intProps: missingIntProps,
|
||||
expectedProps: nil,
|
||||
success: false,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
mockPropsGetter := newMockPropsGetter(tc.props, tc.intProps)
|
||||
|
||||
propsLoader := newPropsLoader(mockPropsGetter)
|
||||
actualProps, err := propsLoader.load()
|
||||
if tc.success {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
}
|
||||
require.Equal(t, tc.expectedProps, actualProps)
|
||||
})
|
||||
}
|
||||
}
|
111
cmd/kava/opendb/stat_parser.go
Normal file
111
cmd/kava/opendb/stat_parser.go
Normal file
@ -0,0 +1,111 @@
|
||||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
package opendb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"errors"
|
||||
)
|
||||
|
||||
// stat represents one line from rocksdb statistics data, stat may have one or more properties
|
||||
// examples:
|
||||
// - rocksdb.block.cache.miss COUNT : 5
|
||||
// - rocksdb.compaction.times.micros P50 : 21112 P95 : 21112 P99 : 21112 P100 : 21112 COUNT : 1 SUM : 21112
|
||||
// `rocksdb.compaction.times.micros` is name of stat, P50, COUNT, SUM, etc... are props of stat
|
||||
type stat struct {
|
||||
name string
|
||||
props map[string]string
|
||||
}
|
||||
|
||||
// parseSerializedStats parses serialisedStats into map of stat objects
|
||||
// example of serializedStats:
|
||||
// rocksdb.block.cache.miss COUNT : 5
|
||||
// rocksdb.compaction.times.micros P50 : 21112 P95 : 21112 P99 : 21112 P100 : 21112 COUNT : 1 SUM : 21112
|
||||
func parseSerializedStats(serializedStats string) (map[string]*stat, error) {
|
||||
stats := make(map[string]*stat, 0)
|
||||
|
||||
serializedStatList := strings.Split(serializedStats, "\n")
|
||||
if len(serializedStatList) == 0 {
|
||||
return nil, errors.New("serializedStats is empty")
|
||||
}
|
||||
serializedStatList = serializedStatList[:len(serializedStatList)-1]
|
||||
// iterate over stats line by line
|
||||
for _, serializedStat := range serializedStatList {
|
||||
stat, err := parseSerializedStat(serializedStat)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats[stat.name] = stat
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
// parseSerializedStat parses serialisedStat into stat object
|
||||
// example of serializedStat:
|
||||
// rocksdb.block.cache.miss COUNT : 5
|
||||
func parseSerializedStat(serializedStat string) (*stat, error) {
|
||||
tokens := strings.Split(serializedStat, " ")
|
||||
tokensNum := len(tokens)
|
||||
if err := validateTokens(tokens); err != nil {
|
||||
return nil, fmt.Errorf("tokens are invalid: %v", err)
|
||||
}
|
||||
|
||||
props := make(map[string]string)
|
||||
for idx := 1; idx < tokensNum; idx += 3 {
|
||||
// never should happen, but double check to avoid unexpected panic
|
||||
if idx+2 >= tokensNum {
|
||||
break
|
||||
}
|
||||
|
||||
key := tokens[idx]
|
||||
sep := tokens[idx+1]
|
||||
value := tokens[idx+2]
|
||||
|
||||
if err := validateStatProperty(key, value, sep); err != nil {
|
||||
return nil, fmt.Errorf("invalid stat property: %v", err)
|
||||
}
|
||||
|
||||
props[key] = value
|
||||
}
|
||||
|
||||
return &stat{
|
||||
name: tokens[0],
|
||||
props: props,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// validateTokens validates that tokens contains name + N triples (key, sep, value)
|
||||
func validateTokens(tokens []string) error {
|
||||
tokensNum := len(tokens)
|
||||
if tokensNum < 4 {
|
||||
return fmt.Errorf("invalid number of tokens: %v, tokens: %v", tokensNum, tokens)
|
||||
}
|
||||
if (tokensNum-1)%3 != 0 {
|
||||
return fmt.Errorf("invalid number of tokens: %v, tokens: %v", tokensNum, tokens)
|
||||
}
|
||||
if tokens[0] == "" {
|
||||
return fmt.Errorf("stat name shouldn't be empty")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateStatProperty validates that key and value are divided by separator and aren't empty
|
||||
func validateStatProperty(key, value, sep string) error {
|
||||
if key == "" {
|
||||
return fmt.Errorf("key shouldn't be empty")
|
||||
}
|
||||
if sep != ":" {
|
||||
return fmt.Errorf("separator should be :")
|
||||
}
|
||||
if value == "" {
|
||||
return fmt.Errorf("value shouldn't be empty")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
208
cmd/kava/opendb/stat_parser_test.go
Normal file
208
cmd/kava/opendb/stat_parser_test.go
Normal file
@ -0,0 +1,208 @@
|
||||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
package opendb
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestParseSerializedStats(t *testing.T) {
|
||||
defaultSerializedStats := `rocksdb.block.cache.miss COUNT : 1
|
||||
rocksdb.block.cache.hit COUNT : 2
|
||||
rocksdb.block.cache.add COUNT : 3
|
||||
rocksdb.block.cache.add.failures COUNT : 4
|
||||
rocksdb.compaction.times.micros P50 : 1 P95 : 2 P99 : 3 P100 : 4 COUNT : 5 SUM : 6
|
||||
rocksdb.compaction.times.cpu_micros P50 : 7 P95 : 8 P99 : 9 P100 : 10 COUNT : 11 SUM : 12
|
||||
`
|
||||
defaultExpectedStatMap := map[string]*stat{
|
||||
"rocksdb.block.cache.miss": {
|
||||
name: "rocksdb.block.cache.miss",
|
||||
props: map[string]string{
|
||||
"COUNT": "1",
|
||||
},
|
||||
},
|
||||
"rocksdb.block.cache.hit": {
|
||||
name: "rocksdb.block.cache.hit",
|
||||
props: map[string]string{
|
||||
"COUNT": "2",
|
||||
},
|
||||
},
|
||||
"rocksdb.block.cache.add": {
|
||||
name: "rocksdb.block.cache.add",
|
||||
props: map[string]string{
|
||||
"COUNT": "3",
|
||||
},
|
||||
},
|
||||
"rocksdb.block.cache.add.failures": {
|
||||
name: "rocksdb.block.cache.add.failures",
|
||||
props: map[string]string{
|
||||
"COUNT": "4",
|
||||
},
|
||||
},
|
||||
"rocksdb.compaction.times.micros": {
|
||||
name: "rocksdb.compaction.times.micros",
|
||||
props: map[string]string{
|
||||
"P50": "1",
|
||||
"P95": "2",
|
||||
"P99": "3",
|
||||
"P100": "4",
|
||||
"COUNT": "5",
|
||||
"SUM": "6",
|
||||
},
|
||||
},
|
||||
"rocksdb.compaction.times.cpu_micros": {
|
||||
name: "rocksdb.compaction.times.cpu_micros",
|
||||
props: map[string]string{
|
||||
"P50": "7",
|
||||
"P95": "8",
|
||||
"P99": "9",
|
||||
"P100": "10",
|
||||
"COUNT": "11",
|
||||
"SUM": "12",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
serializedStats string
|
||||
expectedStatMap map[string]*stat
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
desc: "success case",
|
||||
serializedStats: defaultSerializedStats,
|
||||
expectedStatMap: defaultExpectedStatMap,
|
||||
errMsg: "",
|
||||
},
|
||||
{
|
||||
desc: "missing value #1",
|
||||
serializedStats: `rocksdb.block.cache.miss COUNT :
|
||||
`,
|
||||
expectedStatMap: nil,
|
||||
errMsg: "invalid number of tokens",
|
||||
},
|
||||
{
|
||||
desc: "missing value #2",
|
||||
serializedStats: `rocksdb.compaction.times.micros P50 : 1 P95 :
|
||||
`,
|
||||
expectedStatMap: nil,
|
||||
errMsg: "invalid number of tokens",
|
||||
},
|
||||
{
|
||||
desc: "missing stat name",
|
||||
serializedStats: ` COUNT : 1
|
||||
`,
|
||||
expectedStatMap: nil,
|
||||
errMsg: "stat name shouldn't be empty",
|
||||
},
|
||||
{
|
||||
desc: "empty stat",
|
||||
serializedStats: ``,
|
||||
expectedStatMap: make(map[string]*stat),
|
||||
errMsg: "",
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
actualStatMap, err := parseSerializedStats(tc.serializedStats)
|
||||
if tc.errMsg == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), tc.errMsg)
|
||||
}
|
||||
require.Equal(t, tc.expectedStatMap, actualStatMap)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateTokens(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
tokens []string
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
desc: "success case",
|
||||
tokens: []string{"name", "key", ":", "value"},
|
||||
errMsg: "",
|
||||
},
|
||||
{
|
||||
desc: "missing value #1",
|
||||
tokens: []string{"name", "key", ":"},
|
||||
errMsg: "invalid number of tokens",
|
||||
},
|
||||
{
|
||||
desc: "missing value #2",
|
||||
tokens: []string{"name", "key", ":", "value", "key2", ":"},
|
||||
errMsg: "invalid number of tokens",
|
||||
},
|
||||
{
|
||||
desc: "empty stat name",
|
||||
tokens: []string{"", "key", ":", "value"},
|
||||
errMsg: "stat name shouldn't be empty",
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
err := validateTokens(tc.tokens)
|
||||
if tc.errMsg == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), tc.errMsg)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateStatProperty(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
key string
|
||||
value string
|
||||
sep string
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
desc: "success case",
|
||||
key: "key",
|
||||
value: "value",
|
||||
sep: ":",
|
||||
errMsg: "",
|
||||
},
|
||||
{
|
||||
desc: "missing key",
|
||||
key: "",
|
||||
value: "value",
|
||||
sep: ":",
|
||||
errMsg: "key shouldn't be empty",
|
||||
},
|
||||
{
|
||||
desc: "missing value",
|
||||
key: "key",
|
||||
value: "",
|
||||
sep: ":",
|
||||
errMsg: "value shouldn't be empty",
|
||||
},
|
||||
{
|
||||
desc: "invalid separator",
|
||||
key: "key",
|
||||
value: "value",
|
||||
sep: "#",
|
||||
errMsg: "separator should be :",
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
err := validateStatProperty(tc.key, tc.value, tc.sep)
|
||||
if tc.errMsg == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), tc.errMsg)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
263
cmd/kava/opendb/stats_loader.go
Normal file
263
cmd/kava/opendb/stats_loader.go
Normal file
@ -0,0 +1,263 @@
|
||||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
package opendb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const (
|
||||
sum = "SUM"
|
||||
count = "COUNT"
|
||||
p50 = "P50"
|
||||
p95 = "P95"
|
||||
p99 = "P99"
|
||||
p100 = "P100"
|
||||
)
|
||||
|
||||
type statLoader struct {
|
||||
// statMap contains map of stat objects returned by parseSerializedStats function
|
||||
// example of stats:
|
||||
// #1: rocksdb.block.cache.miss COUNT : 5
|
||||
// #2: rocksdb.compaction.times.micros P50 : 21112 P95 : 21112 P99 : 21112 P100 : 21112 COUNT : 1 SUM : 21112
|
||||
// #1 case will be cast into int64
|
||||
// #2 case will be cast into float64Histogram
|
||||
statMap map[string]*stat
|
||||
|
||||
// NOTE: some methods accumulate errors instead of returning them, these methods are private and not intended to use outside
|
||||
errors []error
|
||||
}
|
||||
|
||||
func newStatLoader(statMap map[string]*stat) *statLoader {
|
||||
return &statLoader{
|
||||
statMap: statMap,
|
||||
errors: make([]error, 0),
|
||||
}
|
||||
}
|
||||
|
||||
type stats struct {
|
||||
NumberKeysWritten int64
|
||||
NumberKeysRead int64
|
||||
NumberKeysUpdated int64
|
||||
|
||||
// total block cache misses
|
||||
// BLOCK_CACHE_MISS == BLOCK_CACHE_INDEX_MISS +
|
||||
// BLOCK_CACHE_FILTER_MISS +
|
||||
// BLOCK_CACHE_DATA_MISS;
|
||||
// BLOCK_CACHE_INDEX_MISS: # of times cache miss when accessing index block from block cache.
|
||||
// BLOCK_CACHE_FILTER_MISS: # of times cache miss when accessing filter block from block cache.
|
||||
// BLOCK_CACHE_DATA_MISS: # of times cache miss when accessing data block from block cache.
|
||||
BlockCacheMiss int64
|
||||
|
||||
// total block cache hit
|
||||
// BLOCK_CACHE_HIT == BLOCK_CACHE_INDEX_HIT +
|
||||
// BLOCK_CACHE_FILTER_HIT +
|
||||
// BLOCK_CACHE_DATA_HIT;
|
||||
// BLOCK_CACHE_INDEX_HIT: # of times cache hit when accessing index block from block cache.
|
||||
// BLOCK_CACHE_FILTER_HIT: # of times cache hit when accessing filter block from block cache.
|
||||
// BLOCK_CACHE_DATA_HIT: # of times cache hit when accessing data block from block cache.
|
||||
BlockCacheHit int64
|
||||
|
||||
// # of blocks added to block cache.
|
||||
BlockCacheAdd int64
|
||||
// # of failures when adding blocks to block cache.
|
||||
BlockCacheAddFailures int64
|
||||
|
||||
CompactReadBytes int64 // Bytes read during compaction
|
||||
CompactWriteBytes int64 // Bytes written during compaction
|
||||
|
||||
CompactionTimesMicros *float64Histogram
|
||||
CompactionTimesCPUMicros *float64Histogram
|
||||
NumFilesInSingleCompaction *float64Histogram
|
||||
|
||||
// Read amplification statistics.
|
||||
// Read amplification can be calculated using this formula
|
||||
// (READ_AMP_TOTAL_READ_BYTES / READ_AMP_ESTIMATE_USEFUL_BYTES)
|
||||
//
|
||||
// REQUIRES: ReadOptions::read_amp_bytes_per_bit to be enabled
|
||||
// TODO(yevhenii): seems not working?
|
||||
ReadAmpEstimateUsefulBytes int64 // Estimate of total bytes actually used.
|
||||
ReadAmpTotalReadBytes int64 // Total size of loaded data blocks.
|
||||
|
||||
NumberFileOpens int64
|
||||
NumberFileErrors int64
|
||||
|
||||
// # of times bloom filter has avoided file reads, i.e., negatives.
|
||||
BloomFilterUseful int64
|
||||
// # of times bloom FullFilter has not avoided the reads.
|
||||
BloomFilterFullPositive int64
|
||||
// # of times bloom FullFilter has not avoided the reads and data actually
|
||||
// exist.
|
||||
BloomFilterFullTruePositive int64
|
||||
|
||||
// # of memtable hits.
|
||||
MemtableHit int64
|
||||
// # of memtable misses.
|
||||
MemtableMiss int64
|
||||
|
||||
// # of Get() queries served by L0
|
||||
GetHitL0 int64
|
||||
// # of Get() queries served by L1
|
||||
GetHitL1 int64
|
||||
// # of Get() queries served by L2 and up
|
||||
GetHitL2AndUp int64
|
||||
|
||||
// The number of uncompressed bytes issued by DB::Put(), DB::Delete(),
|
||||
// DB::Merge(), and DB::Write().
|
||||
BytesWritten int64
|
||||
// The number of uncompressed bytes read from DB::Get(). It could be
|
||||
// either from memtables, cache, or table files.
|
||||
// For the number of logical bytes read from DB::MultiGet(),
|
||||
// please use NUMBER_MULTIGET_BYTES_READ.
|
||||
BytesRead int64
|
||||
|
||||
// Writer has to wait for compaction or flush to finish.
|
||||
StallMicros int64
|
||||
|
||||
// Last level and non-last level read statistics
|
||||
LastLevelReadBytes int64
|
||||
LastLevelReadCount int64
|
||||
NonLastLevelReadBytes int64
|
||||
NonLastLevelReadCount int64
|
||||
|
||||
DBGetMicros *float64Histogram
|
||||
DBWriteMicros *float64Histogram
|
||||
|
||||
// Value size distribution in each operation
|
||||
BytesPerRead *float64Histogram
|
||||
BytesPerWrite *float64Histogram
|
||||
BytesPerMultiget *float64Histogram
|
||||
|
||||
// Time spent flushing memtable to disk
|
||||
FlushMicros *float64Histogram
|
||||
}
|
||||
|
||||
type float64Histogram struct {
|
||||
Sum float64
|
||||
Count float64
|
||||
P50 float64
|
||||
P95 float64
|
||||
P99 float64
|
||||
P100 float64
|
||||
}
|
||||
|
||||
func (l *statLoader) error() error {
|
||||
if len(l.errors) != 0 {
|
||||
return fmt.Errorf("%v", l.errors)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *statLoader) load() (*stats, error) {
|
||||
stats := &stats{
|
||||
NumberKeysWritten: l.getInt64StatValue("rocksdb.number.keys.written", count),
|
||||
NumberKeysRead: l.getInt64StatValue("rocksdb.number.keys.read", count),
|
||||
NumberKeysUpdated: l.getInt64StatValue("rocksdb.number.keys.updated", count),
|
||||
BlockCacheMiss: l.getInt64StatValue("rocksdb.block.cache.miss", count),
|
||||
BlockCacheHit: l.getInt64StatValue("rocksdb.block.cache.hit", count),
|
||||
BlockCacheAdd: l.getInt64StatValue("rocksdb.block.cache.add", count),
|
||||
BlockCacheAddFailures: l.getInt64StatValue("rocksdb.block.cache.add.failures", count),
|
||||
CompactReadBytes: l.getInt64StatValue("rocksdb.compact.read.bytes", count),
|
||||
CompactWriteBytes: l.getInt64StatValue("rocksdb.compact.write.bytes", count),
|
||||
CompactionTimesMicros: l.getFloat64HistogramStatValue("rocksdb.compaction.times.micros"),
|
||||
CompactionTimesCPUMicros: l.getFloat64HistogramStatValue("rocksdb.compaction.times.cpu_micros"),
|
||||
NumFilesInSingleCompaction: l.getFloat64HistogramStatValue("rocksdb.numfiles.in.singlecompaction"),
|
||||
ReadAmpEstimateUsefulBytes: l.getInt64StatValue("rocksdb.read.amp.estimate.useful.bytes", count),
|
||||
ReadAmpTotalReadBytes: l.getInt64StatValue("rocksdb.read.amp.total.read.bytes", count),
|
||||
NumberFileOpens: l.getInt64StatValue("rocksdb.no.file.opens", count),
|
||||
NumberFileErrors: l.getInt64StatValue("rocksdb.no.file.errors", count),
|
||||
BloomFilterUseful: l.getInt64StatValue("rocksdb.bloom.filter.useful", count),
|
||||
BloomFilterFullPositive: l.getInt64StatValue("rocksdb.bloom.filter.full.positive", count),
|
||||
BloomFilterFullTruePositive: l.getInt64StatValue("rocksdb.bloom.filter.full.true.positive", count),
|
||||
MemtableHit: l.getInt64StatValue("rocksdb.memtable.hit", count),
|
||||
MemtableMiss: l.getInt64StatValue("rocksdb.memtable.miss", count),
|
||||
GetHitL0: l.getInt64StatValue("rocksdb.l0.hit", count),
|
||||
GetHitL1: l.getInt64StatValue("rocksdb.l1.hit", count),
|
||||
GetHitL2AndUp: l.getInt64StatValue("rocksdb.l2andup.hit", count),
|
||||
BytesWritten: l.getInt64StatValue("rocksdb.bytes.written", count),
|
||||
BytesRead: l.getInt64StatValue("rocksdb.bytes.read", count),
|
||||
StallMicros: l.getInt64StatValue("rocksdb.stall.micros", count),
|
||||
LastLevelReadBytes: l.getInt64StatValue("rocksdb.last.level.read.bytes", count),
|
||||
LastLevelReadCount: l.getInt64StatValue("rocksdb.last.level.read.count", count),
|
||||
NonLastLevelReadBytes: l.getInt64StatValue("rocksdb.non.last.level.read.bytes", count),
|
||||
NonLastLevelReadCount: l.getInt64StatValue("rocksdb.non.last.level.read.count", count),
|
||||
DBGetMicros: l.getFloat64HistogramStatValue("rocksdb.db.get.micros"),
|
||||
DBWriteMicros: l.getFloat64HistogramStatValue("rocksdb.db.write.micros"),
|
||||
BytesPerRead: l.getFloat64HistogramStatValue("rocksdb.bytes.per.read"),
|
||||
BytesPerWrite: l.getFloat64HistogramStatValue("rocksdb.bytes.per.write"),
|
||||
BytesPerMultiget: l.getFloat64HistogramStatValue("rocksdb.bytes.per.multiget"),
|
||||
FlushMicros: l.getFloat64HistogramStatValue("rocksdb.db.flush.micros"),
|
||||
}
|
||||
|
||||
err := l.error()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
// getFloat64HistogramStatValue converts stat object into float64Histogram
|
||||
func (l *statLoader) getFloat64HistogramStatValue(statName string) *float64Histogram {
|
||||
return &float64Histogram{
|
||||
Sum: l.getFloat64StatValue(statName, sum),
|
||||
Count: l.getFloat64StatValue(statName, count),
|
||||
P50: l.getFloat64StatValue(statName, p50),
|
||||
P95: l.getFloat64StatValue(statName, p95),
|
||||
P99: l.getFloat64StatValue(statName, p99),
|
||||
P100: l.getFloat64StatValue(statName, p100),
|
||||
}
|
||||
}
|
||||
|
||||
// getInt64StatValue converts property of stat object into int64
|
||||
func (l *statLoader) getInt64StatValue(statName, propName string) int64 {
|
||||
stringVal := l.getStatValue(statName, propName)
|
||||
if stringVal == "" {
|
||||
l.errors = append(l.errors, fmt.Errorf("can't get stat by name: %v", statName))
|
||||
return 0
|
||||
}
|
||||
|
||||
intVal, err := strconv.ParseInt(stringVal, 10, 64)
|
||||
if err != nil {
|
||||
l.errors = append(l.errors, fmt.Errorf("can't parse int: %v", err))
|
||||
return 0
|
||||
}
|
||||
|
||||
return intVal
|
||||
}
|
||||
|
||||
// getFloat64StatValue converts property of stat object into float64
|
||||
func (l *statLoader) getFloat64StatValue(statName, propName string) float64 {
|
||||
stringVal := l.getStatValue(statName, propName)
|
||||
if stringVal == "" {
|
||||
l.errors = append(l.errors, fmt.Errorf("can't get stat by name: %v", statName))
|
||||
return 0
|
||||
}
|
||||
|
||||
floatVal, err := strconv.ParseFloat(stringVal, 64)
|
||||
if err != nil {
|
||||
l.errors = append(l.errors, fmt.Errorf("can't parse float: %v", err))
|
||||
return 0
|
||||
}
|
||||
|
||||
return floatVal
|
||||
}
|
||||
|
||||
// getStatValue gets property of stat object
|
||||
func (l *statLoader) getStatValue(statName, propName string) string {
|
||||
stat, ok := l.statMap[statName]
|
||||
if !ok {
|
||||
l.errors = append(l.errors, fmt.Errorf("stat %v doesn't exist", statName))
|
||||
return ""
|
||||
}
|
||||
prop, ok := stat.props[propName]
|
||||
if !ok {
|
||||
l.errors = append(l.errors, fmt.Errorf("stat %v doesn't have %v property", statName, propName))
|
||||
return ""
|
||||
}
|
||||
|
||||
return prop
|
||||
}
|
80
cmd/kava/opendb/stats_loader_test.go
Normal file
80
cmd/kava/opendb/stats_loader_test.go
Normal file
@ -0,0 +1,80 @@
|
||||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
package opendb
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestStatsLoader(t *testing.T) {
|
||||
defaultStat := stat{
|
||||
props: map[string]string{
|
||||
"COUNT": "1",
|
||||
},
|
||||
}
|
||||
defaultHistogramStat := stat{
|
||||
props: map[string]string{
|
||||
"P50": "1",
|
||||
"P95": "2",
|
||||
"P99": "3",
|
||||
"P100": "4",
|
||||
"COUNT": "5",
|
||||
"SUM": "6",
|
||||
},
|
||||
}
|
||||
defaultStatMap := map[string]*stat{
|
||||
"rocksdb.number.keys.written": &defaultStat,
|
||||
"rocksdb.number.keys.read": &defaultStat,
|
||||
"rocksdb.number.keys.updated": &defaultStat,
|
||||
"rocksdb.block.cache.miss": &defaultStat,
|
||||
"rocksdb.block.cache.hit": &defaultStat,
|
||||
"rocksdb.block.cache.add": &defaultStat,
|
||||
"rocksdb.block.cache.add.failures": &defaultStat,
|
||||
"rocksdb.compact.read.bytes": &defaultStat,
|
||||
"rocksdb.compact.write.bytes": &defaultStat,
|
||||
"rocksdb.compaction.times.micros": &defaultHistogramStat,
|
||||
"rocksdb.compaction.times.cpu_micros": &defaultHistogramStat,
|
||||
"rocksdb.numfiles.in.singlecompaction": &defaultHistogramStat,
|
||||
"rocksdb.read.amp.estimate.useful.bytes": &defaultStat,
|
||||
"rocksdb.read.amp.total.read.bytes": &defaultStat,
|
||||
"rocksdb.no.file.opens": &defaultStat,
|
||||
"rocksdb.no.file.errors": &defaultStat,
|
||||
"rocksdb.bloom.filter.useful": &defaultStat,
|
||||
"rocksdb.bloom.filter.full.positive": &defaultStat,
|
||||
"rocksdb.bloom.filter.full.true.positive": &defaultStat,
|
||||
"rocksdb.memtable.hit": &defaultStat,
|
||||
"rocksdb.memtable.miss": &defaultStat,
|
||||
"rocksdb.l0.hit": &defaultStat,
|
||||
"rocksdb.l1.hit": &defaultStat,
|
||||
"rocksdb.l2andup.hit": &defaultStat,
|
||||
"rocksdb.bytes.written": &defaultStat,
|
||||
"rocksdb.bytes.read": &defaultStat,
|
||||
"rocksdb.stall.micros": &defaultStat,
|
||||
"rocksdb.last.level.read.bytes": &defaultStat,
|
||||
"rocksdb.last.level.read.count": &defaultStat,
|
||||
"rocksdb.non.last.level.read.bytes": &defaultStat,
|
||||
"rocksdb.non.last.level.read.count": &defaultStat,
|
||||
"rocksdb.db.get.micros": &defaultHistogramStat,
|
||||
"rocksdb.db.write.micros": &defaultHistogramStat,
|
||||
"rocksdb.bytes.per.read": &defaultHistogramStat,
|
||||
"rocksdb.bytes.per.write": &defaultHistogramStat,
|
||||
"rocksdb.bytes.per.multiget": &defaultHistogramStat,
|
||||
"rocksdb.db.flush.micros": &defaultHistogramStat,
|
||||
}
|
||||
|
||||
statLoader := newStatLoader(defaultStatMap)
|
||||
stats, err := statLoader.load()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, stats.NumberKeysWritten, int64(1))
|
||||
require.Equal(t, stats.NumberKeysRead, int64(1))
|
||||
require.Equal(t, stats.CompactionTimesMicros.P50, float64(1))
|
||||
require.Equal(t, stats.CompactionTimesMicros.P95, float64(2))
|
||||
require.Equal(t, stats.CompactionTimesMicros.P99, float64(3))
|
||||
require.Equal(t, stats.CompactionTimesMicros.P100, float64(4))
|
||||
require.Equal(t, stats.CompactionTimesMicros.Count, float64(5))
|
||||
require.Equal(t, stats.CompactionTimesMicros.Sum, float64(6))
|
||||
}
|
Loading…
Reference in New Issue
Block a user