diff --git a/.gitignore b/.gitignore
index 4e6417a74efe..d404dff495eb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -33,5 +33,8 @@ hack/tls-setup/certs
/tools/local-tester/bridge/bridge
/tools/proto-annotations/proto-annotations
/tools/benchmark/benchmark
+/tools/rw-benchmark/rw-benchmark
+/tools/rw-benchmark/rw_benchmark.html
+/tools/rw-benchmark/*.csv
/out
/etcd-dump-logs
diff --git a/CHANGELOG/CHANGELOG-3.6.md b/CHANGELOG/CHANGELOG-3.6.md
index edd0ca37b21f..cb05db217c90 100644
--- a/CHANGELOG/CHANGELOG-3.6.md
+++ b/CHANGELOG/CHANGELOG-3.6.md
@@ -80,9 +80,10 @@ See [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0...v3.6.0).
- Add [`etcd grpc-proxy start --endpoints-auto-sync-interval`](https://github.com/etcd-io/etcd/pull/14354) flag to enable and configure interval of auto sync of endpoints with server.
- Add [`etcd grpc-proxy start --listen-cipher-suites`](https://github.com/etcd-io/etcd/pull/14308) flag to support adding configurable cipher list.
-### tools/benchmark
+### tools
-- [Add etcd client autoSync flag](https://github.com/etcd-io/etcd/pull/13416)
+- [Add etcd client autoSync flag for benchmark](https://github.com/etcd-io/etcd/pull/13416)
+- [Reimplement the rw-heatmaps using Golang and rename it to rw-benchmark](https://github.com/etcd-io/etcd/pull/15060)
### Metrics, Monitoring
diff --git a/bill-of-materials.json b/bill-of-materials.json
index 9638e8e54aa2..3b7c4743cac8 100644
--- a/bill-of-materials.json
+++ b/bill-of-materials.json
@@ -8,6 +8,15 @@
}
]
},
+ {
+ "project": "github.com/ahrtr/gocontainer",
+ "licenses": [
+ {
+ "type": "MIT License",
+ "confidence": 1
+ }
+ ]
+ },
{
"project": "github.com/anishathalye/porcupine",
"licenses": [
@@ -125,6 +134,15 @@
}
]
},
+ {
+ "project": "github.com/go-echarts/go-echarts/v2",
+ "licenses": [
+ {
+ "type": "MIT License",
+ "confidence": 1
+ }
+ ]
+ },
{
"project": "github.com/go-logr/logr",
"licenses": [
diff --git a/go.mod b/go.mod
index 6457a5813b21..d391e9936472 100644
--- a/go.mod
+++ b/go.mod
@@ -15,10 +15,12 @@ replace (
)
require (
+ github.com/ahrtr/gocontainer v0.3.0
github.com/bgentry/speakeasy v0.1.0
github.com/cheggaaa/pb/v3 v3.1.2
github.com/coreos/go-semver v0.3.1
github.com/dustin/go-humanize v1.0.1
+ github.com/go-echarts/go-echarts/v2 v2.2.5-0.20211021024243-33ae1aa415d6
github.com/spf13/cobra v1.6.1
github.com/stretchr/testify v1.8.2
go.etcd.io/bbolt v1.3.7
diff --git a/go.sum b/go.sum
index d6511996e0c6..d61c33568b73 100644
--- a/go.sum
+++ b/go.sum
@@ -42,6 +42,8 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
+github.com/ahrtr/gocontainer v0.3.0 h1:/4wM0VhaLEYZMoF6WT8ZHUmf2n9BVpCD3uMaKrA0iHY=
+github.com/ahrtr/gocontainer v0.3.0/go.mod h1:cQoR5/JTMoDNEkk5vGaohPZ+nnTQVB2nk2Y012WJWsM=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
@@ -103,6 +105,8 @@ github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w=
github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg=
github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-echarts/go-echarts/v2 v2.2.5-0.20211021024243-33ae1aa415d6 h1:+p0u+1svKoBC2xS6GzpmcDHShkAGqD+wUQLpxIpygM0=
+github.com/go-echarts/go-echarts/v2 v2.2.5-0.20211021024243-33ae1aa415d6/go.mod h1:6TOomEztzGDVDkOSCFBq3ed7xOYfbOqhaBzD0YV771A=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -312,6 +316,7 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
diff --git a/scripts/build_lib.sh b/scripts/build_lib.sh
index 5fe6dd67e692..170137f45dc2 100755
--- a/scripts/build_lib.sh
+++ b/scripts/build_lib.sh
@@ -64,6 +64,7 @@ tools_build() {
out="bin"
if [[ -n "${BINDIR}" ]]; then out="${BINDIR}"; fi
tools_path="tools/benchmark
+ tools/rw-benchmark
tools/etcd-dump-db
tools/etcd-dump-logs
tools/local-tester/bridge"
diff --git a/tools/rw-benchmark/README.md b/tools/rw-benchmark/README.md
new file mode 100644
index 000000000000..0edb86389775
--- /dev/null
+++ b/tools/rw-benchmark/README.md
@@ -0,0 +1,54 @@
+# etcd/tools/rw-benchmark
+
+`etcd/tools/rw-benchmark` is the mixed read/write performance evaluation tool for etcd clusters.
+
+## Execute
+
+### Benchmark
+To get a mixed read/write performance evaluation result:
+```sh
+# run with default configurations and specify the working directory
+./rw-benchmark.sh -w ${WORKING_DIR}
+```
+`rw-benchmark.sh` will automatically use the etcd binary compiled under `etcd/bin/tools` directory.
+
+Note: the result csv file will be saved to current working directory. The working directory is where etcd database is saved. The working directory is designed for scenarios where a different mounted disk is preferred.
+
+### Plot Graphs
+The tool `rw-benchmark` can generate an HTML page including all the line charts based on the benchmark result csv files. See usage below,
+```sh
+$ ./rw-benchmark -h
+rw-benchmark is a tool for visualize etcd read-write performance result.
+
+Usage:
+ rw-benchmark [options] result-file1.csv [result-file2.csv]
+
+Additional options:
+ -legend: Comma separated names of legends, such as "main,pr", defaults to "1" or "1,2" depending on the number of CSV files provided.
+ -layout: The layout of the page, valid values: none, center and flex, defaults to "flex".
+ -width: The width(pixel) of the each line chart, defaults to 600.
+ -height: The height(pixel) of the each line chart, defaults to 300.
+ -o: The HTML file name in which the benchmark data will be rendered, defaults to "rw_benchmark.html".
+ -h: Print usage.
+```
+
+See examples below,
+```sh
+# To generate a HTML page with each chart including one pair of read & write
+# benchmark results from one data csv file.
+./rw-benchmark ${FIRST_CSV_FILE}
+
+# To generate a HTML page with each chart including two pair of read & write
+# benchmark results from two data csv files respectively.
+./rw-benchmark ${FIRST_CSV_FILE} ${SECOND_CSV_FILE}
+
+# Set the legend to "main,dev"
+./rw-benchmark -legend "main,dev" ${FIRST_CSV_FILE} ${SECOND_CSV_FILE}
+
+# Set the width and height of each line chart to 800 and 400px respectively
+./rw-benchmark -width 800 -height 400 ${FIRST_CSV_FILE} ${SECOND_CSV_FILE}
+```
+
+The read QPS is displayed as blue, and write QPS is displayed as red.
+The data in the second CSV file is rendered as dashed line if present. See example in [example/rw_benchmark.html](example/rw_benchmark.html).
+Note each line in the line chart can be hidden or displayed by clicking on the related legend.
diff --git a/tools/rw-benchmark/plot_data.go b/tools/rw-benchmark/plot_data.go
new file mode 100644
index 000000000000..576950064c4e
--- /dev/null
+++ b/tools/rw-benchmark/plot_data.go
@@ -0,0 +1,498 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "encoding/csv"
+ "flag"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "github.com/ahrtr/gocontainer/map/linkedmap"
+ "github.com/go-echarts/go-echarts/v2/charts"
+ "github.com/go-echarts/go-echarts/v2/components"
+ "github.com/go-echarts/go-echarts/v2/opts"
+)
+
+const (
+ minWidth = 400
+ minHeight = 200
+ defaultWidth = 600
+ defaultHeight = 300
+)
+
+type params struct {
+ csvFiles []string
+ outFile string
+
+ width int // width(pixel) of each line chart
+ height int // height(pixel) of each line chart
+ legends []string
+ layout components.Layout
+}
+
+func usage() string {
+ return strings.TrimLeft(`
+rw-benchmark is a tool for visualize etcd read-write performance result.
+
+Usage:
+ rw-benchmark [options] result-file1.csv [result-file2.csv]
+
+Additional options:
+ -legend: Comma separated names of legends, such as "main,pr", defaults to "1" or "1,2" depending on the number of CSV files provided.
+ -layout: The layout of the page, valid values: none, center and flex, defaults to "flex".
+ -width: The width(pixel) of the each line chart, defaults to 600.
+ -height: The height(pixel) of the each line chart, defaults to 300.
+ -o: The HTML file name in which the benchmark data will be rendered, defaults to "rw_benchmark.html".
+ -h: Print usage.
+`, "\n")
+}
+
+// parseParams parses all the options and arguments
+func parseParams(args ...string) (params, error) {
+ var ps params
+ // Require at least one argument.
+ //
+ // Usually when users use a tool in the first time, they don't know how
+ // to use it, so usually they just execute the command without any
+ // arguments. So it would be better to display the usage instead of an
+ // error message in this case.
+ if len(args) == 0 {
+ fmt.Fprintln(os.Stderr, usage())
+ os.Exit(2)
+ }
+
+ var (
+ help bool
+
+ legend string
+ layout string
+ width int
+ height int
+
+ outFile string
+
+ err error
+ )
+ fs := flag.NewFlagSet("", flag.ContinueOnError)
+ fs.BoolVar(&help, "h", false, "Print the usage")
+ fs.IntVar(&width, "width", defaultWidth, "The width(pixel) of the each line chart.")
+ fs.IntVar(&height, "height", defaultHeight, "The height(pixel) of the each line chart.")
+ fs.StringVar(&legend, "legend", "", "The comma separated names of legends.")
+ fs.StringVar(&layout, "layout", "flex", "The layout of the page, valid values: none, center and flex.")
+ fs.StringVar(&outFile, "o", "rw_benchmark.html", "The HTML file name in which the benchmark data will be rendered.")
+ if err = fs.Parse(args); err != nil {
+ return ps, err
+ } else if help {
+ fmt.Fprint(os.Stderr, usage())
+ os.Exit(2)
+ }
+
+ // At most two csv files can be provided.
+ if fs.NArg() < 1 || fs.NArg() > 2 {
+ return ps, fmt.Errorf("unexpected number of arguments: %d, only 1 or 2 csv files are expected", fs.NArg())
+ }
+
+ if ps.layout, err = parseLayout(layout); err != nil {
+ return ps, err
+ }
+
+ ps.width = width
+ if ps.width < minWidth {
+ ps.width = minWidth
+ }
+ ps.height = height
+ if ps.height < minHeight {
+ ps.height = minHeight
+ }
+ ps.outFile = outFile
+
+ csvFile1 := fs.Arg(0)
+ ps.csvFiles = append(ps.csvFiles, csvFile1)
+ csvFile2 := fs.Arg(1)
+ if len(csvFile2) > 0 {
+ ps.csvFiles = append(ps.csvFiles, csvFile2)
+ }
+
+ if len(legend) > 0 {
+ ps.legends = strings.Split(legend, ",")
+ } else {
+ // If no legend is provided, defaults to "1" and "2".
+ for i := range ps.csvFiles {
+ ps.legends = append(ps.legends, fmt.Sprintf("%d", i+1))
+ }
+ }
+
+ if len(ps.legends) != len(ps.csvFiles) {
+ return ps, fmt.Errorf("the number of legends(%d) doesn't match the number of csv files(%d)", len(ps.legends), len(ps.csvFiles))
+ }
+
+ return ps, nil
+}
+
+func parseLayout(layout string) (components.Layout, error) {
+ switch layout {
+ case "none":
+ return components.PageNoneLayout, nil
+ case "center":
+ return components.PageCenterLayout, nil
+ case "flex":
+ return components.PageFlexLayout, nil
+ default:
+ return components.PageNoneLayout, fmt.Errorf("invalid layout %q", layout)
+ }
+}
+
+// loadCSV loads the data in a given csv file
+//
+// The return value is a map with three levels:
+//
+// Level 1 (lmRatio): Ratio --> lmValueSize
+// Level 2 (lmValueSize): ValueSize --> lmConns
+// Level 3 (lmConns): ConnSize --> [2]uint64
+//
+// In the value(array) of the 3rd map, the first value is the
+// read-qps, the second value is the write-qps.
+func loadCSV(filename string) (linkedmap.Interface, error) {
+ // Open the CSV file
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, fmt.Errorf("failed to open csv file %q, error: %w", filename, err)
+ }
+ defer f.Close()
+
+ // Read the CSV file
+ csvReader := csv.NewReader(f)
+ records, err := csvReader.ReadAll()
+ if err != nil {
+ return nil, fmt.Errorf("failed to read csv file %q, error: %w", filename, err)
+ }
+
+ lmRatio := linkedmap.New()
+ // Parse the data
+ for i, rec := range records {
+ // When `REPEAT_COUNT` is 1, then there are 6 fields in each record.
+ // Example:
+ // DATA,0.007,32,16,245.3039:35907.7856,
+ if len(rec) >= 6 && rec[0] == "DATA" {
+ // 0.007 in above example
+ ratio, err := strconv.ParseFloat(rec[1], 64)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse ratio %q at file %q:%d, error: %w", rec[1], filename, i, err)
+ }
+
+ // 32 in above example
+ conns, err := strconv.ParseUint(rec[2], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse conns %q at file %q:%d, error: %w", rec[2], filename, i, err)
+ }
+
+ // 16 in above example
+ valSize, err := strconv.ParseUint(rec[3], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse value_size %q at file %q:%d, error: %w", rec[3], filename, i, err)
+ }
+
+ // parse all the QPS values. Note: the last column is empty.
+ var (
+ cnt = len(rec) - 5
+ avgReadQPS float64
+ avgWriteQPS float64
+ sumReadQPS float64
+ sumWriteQPS float64
+ )
+ for j := 4; j < len(rec)-1; j++ {
+ // 245.3039:35907.7856 in above example
+ qps := strings.Split(rec[j], ":")
+ if len(qps) != 2 {
+ return nil, fmt.Errorf("unexpected qps values %q at file %q:%d", rec[j], filename, i)
+ }
+ readQPS, err := strconv.ParseFloat(qps[0], 64)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse read qps %q at file %q:%d, error: %w", qps[0], filename, i, err)
+ }
+ sumReadQPS += readQPS
+ writeQPS, err := strconv.ParseFloat(qps[1], 64)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse write qps %q at file %q:%d, error: %w", qps[1], filename, i, err)
+ }
+ sumWriteQPS += writeQPS
+ }
+ avgReadQPS, avgWriteQPS = sumReadQPS/float64(cnt), sumWriteQPS/float64(cnt)
+
+ // Save the data into LinkedMap.
+ // The first level map: lmRatio
+ var (
+ lmValueSize linkedmap.Interface
+ lmConn linkedmap.Interface
+ )
+ lm := lmRatio.Get(ratio)
+ if lm == nil {
+ lmValueSize = linkedmap.New()
+ lmRatio.Put(ratio, lmValueSize)
+ } else {
+ lmValueSize = lm.(linkedmap.Interface)
+ }
+
+ // The second level map: lmValueSize
+ lm = lmValueSize.Get(valSize)
+ if lm == nil {
+ lmConn = linkedmap.New()
+ lmValueSize.Put(valSize, lmConn)
+ } else {
+ lmConn = lm.(linkedmap.Interface)
+ }
+
+ // The third level map: lmConns
+ lmConn.Put(conns, [2]uint64{uint64(avgReadQPS), uint64(avgWriteQPS)})
+ }
+ }
+ return lmRatio, nil
+}
+
+func loadData(files ...string) ([]linkedmap.Interface, error) {
+ var dataMaps []linkedmap.Interface
+ for _, f := range files {
+ lm, err := loadCSV(f)
+ if err != nil {
+ return nil, err
+ }
+ dataMaps = append(dataMaps, lm)
+ }
+
+ return dataMaps, nil
+}
+
+// convertBenchmarkData converts the benchmark data to format
+// which is suitable for the line chart.
+func convertBenchmarkData(lmConn linkedmap.Interface) ([]uint64, []uint64, []uint64) {
+ var (
+ conns []uint64
+ rQPS []uint64
+ wQPS []uint64
+ )
+ it, hasNext := lmConn.Iterator()
+ var k, v interface{}
+ for hasNext {
+ k, v, hasNext = it()
+ connSize := k.(uint64)
+ rwQPS := v.([2]uint64)
+ conns = append(conns, connSize)
+ rQPS = append(rQPS, rwQPS[0])
+ wQPS = append(wQPS, rwQPS[1])
+ }
+
+ return conns, rQPS, wQPS
+}
+
+func generateLineData(qps []uint64) []opts.LineData {
+ items := make([]opts.LineData, 0)
+ for _, v := range qps {
+ items = append(items, opts.LineData{Value: v})
+ }
+ return items
+}
+
+// renderChart visualizes the benchmark data in a line chart.
+// Note:
+// 1. Each line chart is related to a ratio and valueSize combination.
+// 2. The data in both CSV files are rendered in one line chart if the second file is present.
+func renderChart(page *components.Page, ratio float64, valueSize uint64, conns []uint64, rQPSs [][]uint64, wQPSs [][]uint64, ps params) {
+ // create a new line instance
+ line := charts.NewLine()
+
+ width := fmt.Sprintf("%dpx", ps.width)
+ height := fmt.Sprintf("%dpx", ps.height)
+ // set some global options like Title/Legend/ToolTip
+ line.SetGlobalOptions(
+ charts.WithInitializationOpts(opts.Initialization{
+ Width: width,
+ Height: height,
+ }),
+ charts.WithTitleOpts(opts.Title{
+ Title: fmt.Sprintf("R/W benchmark (RW Ratio: %g, Value Size: %d)", ratio, valueSize),
+ }),
+ charts.WithYAxisOpts(opts.YAxis{
+ Name: "QPS (Request/sec)",
+ }),
+ charts.WithXAxisOpts(opts.XAxis{
+ Name: "Connections",
+ }),
+ charts.WithTooltipOpts(opts.Tooltip{Show: true}),
+ charts.WithLegendOpts(opts.Legend{
+ Show: true,
+ Orient: "vertical",
+ Left: "right",
+ Top: "middle",
+ }),
+ charts.WithToolboxOpts(opts.Toolbox{
+ Show: true,
+ Orient: "horizontal",
+ Right: "100",
+ Feature: &opts.ToolBoxFeature{
+ SaveAsImage: &opts.ToolBoxFeatureSaveAsImage{
+ Show: true, Title: "Save as image"},
+ DataView: &opts.ToolBoxFeatureDataView{
+ Show: true,
+ Title: "Show as table",
+ Lang: []string{"Data view", "Turn off", "Refresh"},
+ },
+ }}))
+
+ // Set data for X axis
+ line.SetXAxis(conns)
+
+ // Render read QPS from the first CSV file
+ line.AddSeries(fmt.Sprintf("%s R", ps.legends[0]), generateLineData(rQPSs[0]),
+ charts.WithLineStyleOpts(opts.LineStyle{
+ Color: "Blue",
+ Type: "solid",
+ }))
+
+ // Render read QPS from the second CSV file
+ if len(rQPSs) > 1 {
+ line.AddSeries(fmt.Sprintf("%s R", ps.legends[1]), generateLineData(rQPSs[1]),
+ charts.WithLineStyleOpts(opts.LineStyle{
+ Color: "Blue",
+ Type: "dashed",
+ }))
+ }
+
+ // Render write QPS from the first CSV file
+ line.AddSeries(fmt.Sprintf("%s W", ps.legends[0]), generateLineData(wQPSs[0]),
+ charts.WithLineStyleOpts(opts.LineStyle{
+ Color: "Red",
+ Type: "solid",
+ }))
+
+ // Render write QPS from the second CSV file
+ if len(wQPSs) > 1 {
+ line.AddSeries(fmt.Sprintf("%s W", ps.legends[1]), generateLineData(wQPSs[1]),
+ charts.WithLineStyleOpts(opts.LineStyle{
+ Color: "Red",
+ Type: "dashed",
+ }))
+ }
+
+ page.AddCharts(line)
+}
+
+// renderPage renders all data in one HTML page, which may contain multiple
+// line charts, each of which is related to a read/write ratio and valueSize
+// combination.
+//
+// Each element in the `dataMap` is a map with three levels, please see
+// comment for function `loadCSV`.
+func renderPage(dataMap []linkedmap.Interface, ps params) error {
+ page := components.NewPage()
+ page.SetLayout(ps.layout)
+
+ it1, hasNext1 := dataMap[0].Iterator()
+ var k1, v1 interface{}
+ // Loop the first level map (lmRatio)
+ for hasNext1 {
+ k1, v1, hasNext1 = it1()
+
+ ratio := k1.(float64)
+ lmValueSize := v1.(linkedmap.Interface)
+
+ // Loop the second level map (lmValueSize)
+ it2, hasNext2 := lmValueSize.Iterator()
+ var k2, v2 interface{}
+ for hasNext2 {
+ k2, v2, hasNext2 = it2()
+ valueSize := k2.(uint64)
+ lmConn := v2.(linkedmap.Interface)
+
+ var (
+ conns []uint64
+ rQPSs [][]uint64
+ wQPSs [][]uint64
+ )
+ // Loop the third level map (lmConn) to convert the benchmark data
+ conns, rQPS1, wQPS1 := convertBenchmarkData(lmConn)
+ rQPSs = append(rQPSs, rQPS1)
+ wQPSs = append(wQPSs, wQPS1)
+
+ // Convert the related benchmark data in the second CSV file if present.
+ if len(dataMap) > 1 {
+ if lm1 := dataMap[1].Get(ratio); lm1 != nil {
+ lmValueSize2 := lm1.(linkedmap.Interface)
+
+ if lm2 := lmValueSize2.Get(valueSize); lm2 != nil {
+ lmConn2 := lm2.(linkedmap.Interface)
+ conn2, rQPS2, wQPS2 := convertBenchmarkData(lmConn2)
+ if reflect.DeepEqual(conns, conn2) {
+ rQPSs = append(rQPSs, rQPS2)
+ wQPSs = append(wQPSs, wQPS2)
+ } else {
+ fmt.Fprintf(os.Stderr, "[Ratio: %g, ValueSize: %d] ignore the benchmark data in the second CSV file due to different conns, %v vs %v\n",
+ ratio, valueSize, conns, conn2)
+ }
+ } else {
+ fmt.Fprintf(os.Stderr, "[Ratio: %g, ValueSize: %d] ignore the benchmark data in the second CSV file due to valueSize not found\n",
+ ratio, valueSize)
+ }
+ } else {
+ fmt.Fprintf(os.Stderr, "[Ratio: %g, ValueSize: %d] ignore the benchmark data in the second CSV file due to ratio not found\n",
+ ratio, valueSize)
+ }
+ }
+
+ renderChart(page, ratio, valueSize, conns, rQPSs, wQPSs, ps)
+ }
+ }
+
+ f, err := os.Create(ps.outFile)
+ if err != nil {
+ return fmt.Errorf("failed to create file: %w", err)
+ }
+ page.Render(io.MultiWriter(f))
+
+ return nil
+}
+
+func main() {
+ // parse CLI flags and arguments
+ //legends, csvFiles, outFile, err := parseParams(os.Args[1:]...)
+ ps, err := parseParams(os.Args[1:]...)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to parse the parameters: %v\n", err)
+ exit()
+ }
+
+ // load data of CSV files (1 or 2 files are expected)
+ dataMap, err := loadData(ps.csvFiles...)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to load data file(s): %v\n", err)
+ exit()
+ }
+
+ // render all data in one HTML page
+ if err = renderPage(dataMap, ps); err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to render data to HTML page: %v\n", err)
+ exit()
+ }
+}
+
+func exit() {
+ fmt.Fprintf(os.Stderr, "\n")
+ fmt.Fprintf(os.Stderr, "Run `rw-benchmark -h` to print usage info.\n")
+ os.Exit(1)
+}
diff --git a/tools/rw-heatmaps/rw-benchmark.sh b/tools/rw-benchmark/rw-benchmark.sh
similarity index 100%
rename from tools/rw-heatmaps/rw-benchmark.sh
rename to tools/rw-benchmark/rw-benchmark.sh
diff --git a/tools/rw-heatmaps/README.md b/tools/rw-heatmaps/README.md
deleted file mode 100644
index 893ea9871cd3..000000000000
--- a/tools/rw-heatmaps/README.md
+++ /dev/null
@@ -1,26 +0,0 @@
-# etcd/tools/rw-heatmaps
-
-`etcd/tools/rw-heatmaps` is the mixed read/write performance evaluation tool for etcd clusters.
-
-## Execute
-
-### Benchmark
-To get a mixed read/write performance evaluation result:
-```sh
-# run with default configurations and specify the working directory
-./rw-benchmark.sh -w ${WORKING_DIR}
-```
-`rw-benchmark.sh` will automatically use the etcd binary compiled under `etcd/bin/` directory.
-
-Note: the result csv file will be saved to current working directory. The working directory is where etcd database is saved. The working directory is designed for scenarios where a different mounted disk is preferred.
-
-### Plot Graphs
-To generate two images (read and write) based on the benchmark result csv file:
-```sh
-# to generate a pair of read & write images from one data csv file
-./plot_data.py ${FIRST_CSV_FILE} -t ${IMAGE_TITLE} -o ${OUTPUT_IMAGE_NAME}
-
-
-# to generate a pair of read & write images by comparing two data csv files
-./plot_data.py ${FIRST_CSV_FILE} ${SECOND_CSV_FILE} -t ${IMAGE_TITLE} -o ${OUTPUT_IMAGE_NAME}
-```
diff --git a/tools/rw-heatmaps/plot_data.py b/tools/rw-heatmaps/plot_data.py
deleted file mode 100755
index 217eb6f8a8b1..000000000000
--- a/tools/rw-heatmaps/plot_data.py
+++ /dev/null
@@ -1,281 +0,0 @@
-#!/usr/bin/env python3
-import sys
-import os
-import argparse
-import logging
-import pandas as pd
-import numpy as np
-import matplotlib.pyplot as plt
-import matplotlib.colors as colors
-
-logging.basicConfig(format='[%(levelname)s %(asctime)s %(name)s] %(message)s')
-logger = logging.getLogger(__name__)
-logger.setLevel(logging.INFO)
-
-params = None
-
-
-def parse_args():
- parser = argparse.ArgumentParser(
- description='plot graph using mixed read/write result file.')
- parser.add_argument('input_file_a', type=str,
- help='first input data files in csv format. (required)')
- parser.add_argument('input_file_b', type=str, nargs='?',
- help='second input data files in csv format. (optional)')
- parser.add_argument('-t', '--title', dest='title', type=str, required=True,
- help='plot graph title string')
- parser.add_argument('-z', '--zero-centered', dest='zero', action='store_true', required=False,
- help='plot the improvement graph with white color represents 0.0',
- default=True)
- parser.add_argument('--no-zero-centered', dest='zero', action='store_false', required=False,
- help='plot the improvement graph without white color represents 0.0')
- parser.add_argument('-o', '--output-image-file', dest='output', type=str, required=True,
- help='output image filename')
- parser.add_argument('-F', '--output-format', dest='format', type=str, default='png',
- help='output image file format. default: jpg')
- return parser.parse_args()
-
-
-def load_data_files(*args):
- df_list = []
- try:
- for i in args:
- if i is not None:
- logger.debug('loading csv file {}'.format(i))
- df_list.append(pd.read_csv(i))
- except FileNotFoundError as e:
- logger.error(str(e))
- sys.exit(1)
- res = []
- try:
- for df in df_list:
- param_df = df[df['type'] == 'PARAM']
- param_str = ''
- if len(param_df) != 0:
- param_str = param_df['comment'].iloc[0]
- new_df = df[df['type'] == 'DATA'][[
- 'ratio', 'conn_size', 'value_size']].copy()
- cols = [x for x in df.columns if x.find('iter') != -1]
- tmp = [df[df['type'] == 'DATA'][x].str.split(':') for x in cols]
-
- read_df = [x.apply(lambda x: float(x[0])) for x in tmp]
- read_avg = sum(read_df) / len(read_df)
- new_df['read'] = read_avg
-
- write_df = [x.apply(lambda x: float(x[1])) for x in tmp]
- write_avg = sum(write_df) / len(write_df)
- new_df['write'] = write_avg
-
- new_df['ratio'] = new_df['ratio'].astype(float)
- new_df['conn_size'] = new_df['conn_size'].astype(int)
- new_df['value_size'] = new_df['value_size'].astype(int)
- res.append({
- 'dataframe': new_df,
- 'param': param_str
- })
- except Exception as e:
- logger.error(str(e))
- sys.exit(1)
- return res
-
-
-# This is copied directly from matplotlib source code. Some early versions of matplotlib
-# do not have CenteredNorm class
-class CenteredNorm(colors.Normalize):
-
- def __init__(self, vcenter=0, halfrange=None, clip=False):
- """
- Normalize symmetrical data around a center (0 by default).
-
- Unlike `TwoSlopeNorm`, `CenteredNorm` applies an equal rate of change
- around the center.
-
- Useful when mapping symmetrical data around a conceptual center
- e.g., data that range from -2 to 4, with 0 as the midpoint, and
- with equal rates of change around that midpoint.
-
- Parameters
- ----------
- vcenter : float, default: 0
- The data value that defines ``0.5`` in the normalization.
- halfrange : float, optional
- The range of data values that defines a range of ``0.5`` in the
- normalization, so that *vcenter* - *halfrange* is ``0.0`` and
- *vcenter* + *halfrange* is ``1.0`` in the normalization.
- Defaults to the largest absolute difference to *vcenter* for
- the values in the dataset.
-
- Examples
- --------
- This maps data values -2 to 0.25, 0 to 0.5, and 4 to 1.0
- (assuming equal rates of change above and below 0.0):
-
- >>> import matplotlib.colors as mcolors
- >>> norm = mcolors.CenteredNorm(halfrange=4.0)
- >>> data = [-2., 0., 4.]
- >>> norm(data)
- array([0.25, 0.5 , 1. ])
- """
- self._vcenter = vcenter
- self.vmin = None
- self.vmax = None
- # calling the halfrange setter to set vmin and vmax
- self.halfrange = halfrange
- self.clip = clip
-
- def _set_vmin_vmax(self):
- """
- Set *vmin* and *vmax* based on *vcenter* and *halfrange*.
- """
- self.vmax = self._vcenter + self._halfrange
- self.vmin = self._vcenter - self._halfrange
-
- def autoscale(self, A):
- """
- Set *halfrange* to ``max(abs(A-vcenter))``, then set *vmin* and *vmax*.
- """
- A = np.asanyarray(A)
- self._halfrange = max(self._vcenter-A.min(),
- A.max()-self._vcenter)
- self._set_vmin_vmax()
-
- def autoscale_None(self, A):
- """Set *vmin* and *vmax*."""
- A = np.asanyarray(A)
- if self._halfrange is None and A.size:
- self.autoscale(A)
-
- @property
- def vcenter(self):
- return self._vcenter
-
- @vcenter.setter
- def vcenter(self, vcenter):
- self._vcenter = vcenter
- if self.vmax is not None:
- # recompute halfrange assuming vmin and vmax represent
- # min and max of data
- self._halfrange = max(self._vcenter-self.vmin,
- self.vmax-self._vcenter)
- self._set_vmin_vmax()
-
- @property
- def halfrange(self):
- return self._halfrange
-
- @halfrange.setter
- def halfrange(self, halfrange):
- if halfrange is None:
- self._halfrange = None
- self.vmin = None
- self.vmax = None
- else:
- self._halfrange = abs(halfrange)
-
- def __call__(self, value, clip=None):
- if self._halfrange is not None:
- # enforce symmetry, reset vmin and vmax
- self._set_vmin_vmax()
- return super().__call__(value, clip=clip)
-
-
-# plot type is the type of the data to plot. Either 'read' or 'write'
-def plot_data(title, plot_type, cmap_name_default, *args):
- if len(args) == 1:
- fig_size = (12, 16)
- df0 = args[0]['dataframe']
- df0param = args[0]['param']
- fig = plt.figure(figsize=fig_size)
- count = 0
- for val, df in df0.groupby('ratio'):
- count += 1
- plt.subplot(4, 2, count)
- plt.tripcolor(df['conn_size'], df['value_size'], df[plot_type])
- plt.title('R/W Ratio {:.4f} [{:.2f}, {:.2f}]'.format(val, df[plot_type].min(),
- df[plot_type].max()))
- plt.yscale('log', base=2)
- plt.ylabel('Value Size')
- plt.xscale('log', base=2)
- plt.xlabel('Connections Amount')
- plt.colorbar()
- plt.tight_layout()
- fig.suptitle('{} [{}]\n{}'.format(title, plot_type.upper(), df0param))
- elif len(args) == 2:
- fig_size = (12, 26)
- df0 = args[0]['dataframe']
- df0param = args[0]['param']
- df1 = args[1]['dataframe']
- df1param = args[1]['param']
- fig = plt.figure(figsize=fig_size)
- col = 0
- delta_df = df1.copy()
- delta_df[[plot_type]] = ((df1[[plot_type]] - df0[[plot_type]]) /
- df0[[plot_type]]) * 100
- for tmp in [df0, df1, delta_df]:
- row = 0
- for val, df in tmp.groupby('ratio'):
- pos = row * 3 + col + 1
- plt.subplot(8, 3, pos)
- norm = None
- if col == 2:
- cmap_name = 'bwr'
- if params.zero:
- norm = CenteredNorm()
- else:
- cmap_name = cmap_name_default
- plt.tripcolor(df['conn_size'], df['value_size'], df[plot_type],
- norm=norm,
- cmap=plt.get_cmap(cmap_name))
- if row == 0:
- if col == 0:
- plt.title('{}\nR/W Ratio {:.4f} [{:.1f}, {:.1f}]'.format(
- os.path.basename(params.input_file_a),
- val, df[plot_type].min(), df[plot_type].max()))
- elif col == 1:
- plt.title('{}\nR/W Ratio {:.4f} [{:.1f}, {:.1f}]'.format(
- os.path.basename(params.input_file_b),
- val, df[plot_type].min(), df[plot_type].max()))
- elif col == 2:
- plt.title('Gain\nR/W Ratio {:.4f} [{:.2f}%, {:.2f}%]'.format(val, df[plot_type].min(),
- df[plot_type].max()))
- else:
- if col == 2:
- plt.title('R/W Ratio {:.4f} [{:.2f}%, {:.2f}%]'.format(val, df[plot_type].min(),
- df[plot_type].max()))
- else:
- plt.title('R/W Ratio {:.4f} [{:.1f}, {:.1f}]'.format(val, df[plot_type].min(),
- df[plot_type].max()))
- plt.yscale('log', base=2)
- plt.ylabel('Value Size')
- plt.xscale('log', base=2)
- plt.xlabel('Connections Amount')
-
- if col == 2:
- plt.colorbar(format='%.2f%%')
- else:
- plt.colorbar()
- plt.tight_layout()
- row += 1
- col += 1
- fig.suptitle('{} [{}]\n{} {}\n{} {}'.format(
- title, plot_type.upper(), os.path.basename(params.input_file_a), df0param,
- os.path.basename(params.input_file_b), df1param))
- else:
- raise Exception('invalid plot input data')
- fig.subplots_adjust(top=0.93)
- plt.savefig("{}_{}.{}".format(params.output, plot_type,
- params.format), format=params.format)
-
-
-def main():
- global params
- logging.basicConfig()
- params = parse_args()
- result = load_data_files(params.input_file_a, params.input_file_b)
- for i in [('read', 'viridis'), ('write', 'plasma')]:
- plot_type, cmap_name = i
- plot_data(params.title, plot_type, cmap_name, *result)
-
-
-if __name__ == '__main__':
- main()