diff --git a/README.md b/README.md index ba2cbce7..804acb6b 100644 --- a/README.md +++ b/README.md @@ -9,20 +9,20 @@ S3 benchmarking tool. ## Build with source -Warp require minimum version is `go1.22`, please ensure you have compatible version for this build. +Warp requires minimum Go `go1.21`, please ensure you have compatible version for this build. You can follow easy step below to build project - Clone project ``` -git clone https://github.com/minio/warp.git +λ git clone https://github.com/minio/warp.git ``` - Change directory and build ``` -cd warp && go build +λ cd warp && go build ``` - To run a test, please run ``` -./warp [options] +λ ./warp [options] ``` # Configuration @@ -49,15 +49,28 @@ If your server is incompatible with [AWS v4 signatures](https://docs.aws.amazon. # Usage -`warp command [options]` +`λ warp command [options]` Example running a mixed type benchmark against 8 servers named `s3-server-1` to `s3-server-8` on port 9000 with the provided keys: -`warp mixed --host=s3-server{1...8}:9000 --access-key=minio --secret-key=minio123 --autoterm` +`λ warp mixed --host=s3-server{1...8}:9000 --access-key=minio --secret-key=minio123 --autoterm` This will run the benchmark for up to 5 minutes and print the results. +## YAML configuration + +As an alternative configuration option you can use an on-disk YAML configuration file. + +See [yml-samples](https://github.com/minio/warp/tree/master/yml-samples) for a collection of +configuration files for each benchmark type. + +To run a benchmark use `λ warp run `. + +Values can be injected from the commandline using one or multiple `-var VarName=Value`. +These values can be referenced inside YAML files with `{{.VarName}}`. +Go [text templates](https://pkg.go.dev/text/template) are used for this. + # Benchmarks All benchmarks operate concurrently. By default, 20 operations will run concurrently. @@ -110,7 +123,7 @@ WARNING: Never run warp clients on a publicly exposed port. Clients have the pot Clients are started with ``` -warp client [listenaddress:port] +λ warp client [listenaddress:port] ``` `warp client` Only accepts an optional host/ip to listen on, but otherwise no specific parameters. @@ -141,7 +154,7 @@ If no host port is specified the default is added. Example: ``` -warp get --duration=3m --warp-client=client-{1...10} --host=minio-server-{1...16} --access-key=minio --secret-key=minio123 +λ warp get --duration=3m --warp-client=client-{1...10} --host=minio-server-{1...16} --access-key=minio --secret-key=minio123 ``` Note that parameters apply to *each* client. @@ -309,8 +322,6 @@ will attempt to run `--concurrent` concurrent downloads. The analysis will include the upload stats as `PUT` operations and the `GET` operations. - - ``` Operation: GET * Average: 94.10 MiB/s, 9866.97 obj/s @@ -407,7 +418,7 @@ Since the object size is of little importance, only objects per second is report Example: ``` -$ warp stat --autoterm +λ warp stat --autoterm [...] ------------------- Operation: STAT @@ -735,7 +746,7 @@ These are the data fields exported: | `ops_started` | Operations started within segment | | `ops_ended` | Operations ended within the segment | | `errors` | Errors logged on operations ending within the segment | -| `mb_per_sec` | MiB/s of operations within the segment (*distributed*) | +| `mb_per_sec` | MiB/s of operations within the segment (*distributed*) | | `ops_ended_per_sec` | Operations that ended within the segment per second | | `objs_per_sec` | Objects per second processed in the segment (*distributed*) | | `start_time` | Absolute start time of the segment | @@ -783,7 +794,7 @@ The usual analysis parameters can be applied to define segment lengths. ## Merging Benchmarks -It is possible to merge runs from several clients using the `warp merge (file1) (file2) [additional files...]` command. +It is possible to merge runs from several clients using the `λ warp merge (file1) (file2) [additional files...]` command. The command will output a combined data file with all data that overlap in time. diff --git a/cli/analyze.go b/cli/analyze.go index 80bedcff..073b2862 100644 --- a/cli/analyze.go +++ b/cli/analyze.go @@ -424,6 +424,12 @@ func writeSegs(ctx *cli.Context, wrSegs io.Writer, ops bench.Operations, allThre // Write segments per endpoint eps := ops.SortSplitByEndpoint() + if len(eps) == 1 { + cl := ops.SortSplitByClient() + if len(cl) > 1 { + eps = cl + } + } epsSorted := stringKeysSorted(eps) if details && len(eps) > 1 { for _, ep := range epsSorted { diff --git a/cli/benchmark.go b/cli/benchmark.go index e44ab15a..51cf323e 100644 --- a/cli/benchmark.go +++ b/cli/benchmark.go @@ -515,6 +515,8 @@ func checkBenchmark(ctx *cli.Context) { madmin.ProfilerBlock, madmin.ProfilerMutex, madmin.ProfilerTrace, + madmin.ProfilerCPUIO, + madmin.ProfilerThreads, } _, err := parseInfluxURL(ctx) diff --git a/cli/cli.go b/cli/cli.go index 4c34c889..8decd149 100644 --- a/cli/cli.go +++ b/cli/cli.go @@ -104,6 +104,7 @@ func init() { cmpCmd, mergeCmd, clientCmd, + runCmd, } appCmds = append(append(appCmds, a...), b...) benchCmds = a @@ -214,7 +215,7 @@ func registerApp(name string, appCmds []cli.Command) *cli.App { app.Commands = commands app.Author = "MinIO, Inc." app.Version = pkg.Version + " - " + pkg.ShortCommitID - app.Copyright = "(c) 2020-2023 MinIO, Inc." + app.Copyright = "(c) 2020-2024 MinIO, Inc." app.Compiled, _ = time.Parse(time.RFC3339, pkg.ReleaseTime) app.Flags = append(app.Flags, profileFlags...) app.Flags = append(app.Flags, globalFlags...) diff --git a/cli/client.go b/cli/client.go index d423d2fb..b90bdb96 100644 --- a/cli/client.go +++ b/cli/client.go @@ -187,7 +187,7 @@ func clientTransport(ctx *cli.Context) http.RoundTripper { }).DialContext, MaxIdleConnsPerHost: ctx.Int("concurrent"), WriteBufferSize: ctx.Int("sndbuf"), // Configure beyond 4KiB default buffer size. - ReadBufferSize: ctx.Int("sndbuf"), // Configure beyond 4KiB default buffer size. + ReadBufferSize: ctx.Int("rcvbuf"), // Configure beyond 4KiB default buffer size. IdleConnTimeout: 90 * time.Second, TLSHandshakeTimeout: 15 * time.Second, ExpectContinueTimeout: 10 * time.Second, diff --git a/cli/flags.go b/cli/flags.go index 7da08db7..31259e1a 100644 --- a/cli/flags.go +++ b/cli/flags.go @@ -105,10 +105,10 @@ var globalWG sync.WaitGroup // Set global states. NOTE: It is deliberately kept monolithic to ensure we dont miss out any flags. func setGlobalsFromContext(ctx *cli.Context) error { - quiet := ctx.IsSet("quiet") - debug := ctx.IsSet("debug") - json := ctx.IsSet("json") - noColor := ctx.IsSet("no-color") + quiet := ctx.Bool("quiet") + debug := ctx.Bool("debug") + json := ctx.Bool("json") + noColor := ctx.Bool("no-color") setGlobals(quiet, debug, json, noColor) return nil } @@ -209,9 +209,16 @@ var ioFlags = []cli.Flag{ Usage: "Run this many concurrent operations per warp client", }, cli.IntFlag{ - Name: "sndbuf", - Value: 32 * 1024, // 32KiB up from 4KiB default - Usage: "specify custom read/write socket buffer size in bytes", + Name: "sndbuf", + Value: 32 * 1024, // 32KiB up from 4KiB default + Usage: "specify custom write socket buffer size in bytes", + Hidden: true, + }, + cli.IntFlag{ + Name: "rcvbuf", + Value: 32 * 1024, // 32KiB up from 4KiB default + Usage: "specify custom read socket buffer size in bytes", + Hidden: true, }, cli.BoolFlag{ Name: "noprefix", diff --git a/cli/mixed.go b/cli/mixed.go index f40ad72e..0d082d59 100644 --- a/cli/mixed.go +++ b/cli/mixed.go @@ -55,7 +55,7 @@ var mixedFlags = []cli.Flag{ }, cli.Float64Flag{ Name: "delete-distrib", - Usage: "The amount of DELETE operations. Must be at least the same as PUT.", + Usage: "The amount of DELETE operations. Must be same or lower than -put-distrib", Value: 10, }, } diff --git a/cli/run.go b/cli/run.go new file mode 100644 index 00000000..449398ce --- /dev/null +++ b/cli/run.go @@ -0,0 +1,304 @@ +/* + * Warp (C) 2019-2024 MinIO, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +package cli + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "strings" + "text/template" + + "github.com/minio/cli" + "github.com/minio/mc/pkg/probe" + "gopkg.in/yaml.v3" +) + +var runCmd = cli.Command{ + Name: "run", + Usage: "run benchmark defined in YAML file", + Action: mainExec, + Before: setGlobalsFromContext, + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "debug", + Usage: "enable debug logging before executing", + Hidden: true, + }, + cli.StringSliceFlag{ + Name: "var", + Usage: "Set variables for template replacement. Can be used multiple times. Example: ObjSize=1KB", + }, + }, + CustomHelpTemplate: `NAME: + {{.HelpName}} - {{.Usage}} + + Execute the benchmark as defined in YAML file. +USAGE: + {{.HelpName}} + -> see https://github.com/minio/warp#run + +FLAGS: + {{range .VisibleFlags}}{{.}} + {{end}}`, +} + +// mainExec is the entry point for exe command. +func mainExec(ctx *cli.Context) error { + var yFile []byte + switch ctx.NArg() { + case 1: + b, err := os.ReadFile(ctx.Args()[0]) + if err != nil { + fatal(probe.NewError(err), "error reading input file") + } + yFile = b + default: + fatal(errInvalidArgument(), "No YAML file specified") + } + + // Do template replacements + if vars := ctx.StringSlice("var"); len(vars) > 0 { + replacements := make(map[string]string) + for _, v := range vars { + idx := strings.Index(v, "=") + name := v[:idx] + replacements[name] = v[idx+1:] + } + t, err := template.New("benchmark").Parse(string(yFile)) + if err != nil { + fatal(probe.NewError(err), "error parsing template") + } + dst := new(bytes.Buffer) + err = t.ExecuteTemplate(dst, "benchmark", replacements) + if err != nil { + fatal(probe.NewError(err), "error parsing template") + } + yFile = dst.Bytes() + } + + // Unmarshal into map. + var doc map[string]any + err := yaml.Unmarshal(yFile, &doc) + if err != nil { + fatal(probe.NewError(err), "error parsing YAML file") + } + doc, ok := doc["warp"].(map[string]any) + if !ok { + fatal(probe.NewError(err), "Expected top level 'warp' element could not be found") + } + switch ver := mustGetString(doc, "api"); ver { + case "v1": + default: + fatal(probe.NewError(fmt.Errorf("unsupported api: %s", ver)), "Incompatible API version") + } + delete(doc, "api") + op := mustGetString(doc, "benchmark") + var benchCmd *cli.Command + for i, cmd := range benchCmds { + if cmd.Name == op { + benchCmd = &benchCmds[i] + break + } + } + if benchCmd == nil { + fatal(probe.NewError(fmt.Errorf("unknown benchmark: %s", op)), "Unknown benchmark") + } + delete(doc, "benchmark") + + // Rename input fields to commandline params: + rename := map[string]string{ + "sse-c-encrypt": "encrypt", + "analyze.verbose": "analyze.v", + "obj.part-size": "part.size", + "analyze.skip-duration": "analyze.skip", + "analyze.segment-duration": "analyze.dur", + "analyze.filter-op": "analyze.op", + "server-profile": "serverprof", + "bench-data": "benchdata", + "autoterm.enabled": "autoterm", + "obj.versions": "versions", + "obj.rand-size": "obj.randsize", + "no-prefix": "noprefix", + "no-clear": "noclear", + "distribution.get": "get-distrib", + "distribution.stat": "stat-distrib", + "distribution.put": "put-distrib", + "distribution.delete": "delete-distrib", + "obj.parts": "parts", + } + + // Allow some fields to be string lists + commaListed := map[string]bool{ + "server-profile": true, + "remote.host": true, + "warp-client": true, + } + + var prefixStack []string + var currDept []string + var flags = map[string]string{} + setFlag := func(key string, value any) { + name := strings.Join(append(prefixStack, key), ".") + ogName := strings.Join(append(currDept, key), ".") + + if alt := rename[name]; alt != "" { + name = alt + } + var flag cli.Flag + for _, f := range benchCmd.Flags { + if strings.Split(f.GetName(), ",")[0] == name { + flag = f + break + } + } + if value == nil { + if globalDebug { + fmt.Printf("%s => --%s=(default)\n", ogName, name) + } + // Ignore unset values + return + } + if flag == nil { + section := strings.Join(currDept, ".") + fatal(probe.NewError(fmt.Errorf("unknown key: %q in section %q", name, section)), "Unknown benchmark flag") + } + + var err error + var setFlag []byte + switch flag.(type) { + case cli.BoolFlag: + if v, ok := value.(bool); !ok { + err = fmt.Errorf("value of %s must be a bool", ogName) + } else { + setFlag, err = json.Marshal(v) + } + case cli.StringFlag, cli.DurationFlag: + var wasList bool + if commaListed[ogName] { + if v, ok := value.([]any); ok { + wasList = true + var all []string + for i, v := range v { + value, ok := v.(string) + if !ok { + err = fmt.Errorf("value of %s item %d must be a string", ogName, i+1) + break + } + all = append(all, value) + } + setFlag = []byte(strings.Join(all, ",")) + } + } + if !wasList { + if v, ok := value.(string); !ok { + err = fmt.Errorf("value of %s must be a string, was %T", ogName, value) + } else { + setFlag = []byte(v) + } + } + case cli.IntFlag, cli.Float64Flag, cli.UintFlag, cli.Uint64Flag: + switch v := value.(type) { + case float64, int: + setFlag, err = json.Marshal(v) + default: + err = fmt.Errorf("value of %s must be a number, was %T", ogName, value) + } + + default: + err = fmt.Errorf("unknown flag type %T for key %s", flag, ogName) + } + if err != nil { + fatal(probe.NewError(err), "error parsing config") + return + } + if _, ok := flags[name]; ok { + fatal(probe.NewError(fmt.Errorf("duplicate benchmark flag: %s", ogName)), "duplicate benchmark flag") + } + flags[name] = string(setFlag) + if globalDebug { + fmt.Printf("%s => --%s=%s\n", ogName, name, string(setFlag)) + } + } + parseDoc(doc, &prefixStack, &currDept, setFlag) + + // Reconstruct command + app := registerApp("warp", benchCmds) + fs, err := flagSet(benchCmd.Name, benchCmd.Flags, nil) + if err != nil { + fatal(probe.NewError(err), "error setting flags") + } + ctx2 := cli.NewContext(app, fs, ctx) + ctx2.Command = *benchCmd + for k, v := range flags { + err := ctx2.Set(k, v) + if err != nil { + err := fmt.Errorf("parsing parameters (%v:%v): %w", k, v, err) + fatal(probe.NewError(err), "error setting flags") + } + } + + return runCommand(ctx2, benchCmd) +} + +func parseDoc(doc map[string]any, prefixStack, printStack *[]string, setFlag func(key string, value any)) { + push := func(stack *[]string, s string) func() { + v := *stack + v = append(v, s) + *stack = v + return func() { + v := *stack + v = v[:len(v)-1] + *stack = v + } + } + for k, v := range doc { + switch k { + case "analyze", "obj", "autoterm", "distribution": + // These automatically adds the prefix to the flag name. + pop := push(prefixStack, k) + pop2 := push(printStack, k) + for k, v := range v.(map[string]any) { + setFlag(k, v) + } + pop() + pop2() + + case "io", "remote", "params", "advanced": + // These are just added to the top level. + pop := push(printStack, k) + parseDoc(v.(map[string]any), prefixStack, printStack, setFlag) + pop() + default: + setFlag(k, v) + } + } +} + +func mustGetString(m map[string]any, key string) string { + v, ok := m[key] + if !ok { + fatal(probe.NewError(fmt.Errorf("value of '%s' not found", key)), "Missing key") + } + val, ok := v.(string) + if !ok { + fatal(probe.NewError(fmt.Errorf("value of '%s' must be a string", key)), "Invalid type") + } + return val +} diff --git a/go.mod b/go.mod index 39374cfd..23a100c7 100644 --- a/go.mod +++ b/go.mod @@ -20,6 +20,7 @@ require ( github.com/secure-io/sio-go v0.3.1 golang.org/x/net v0.25.0 golang.org/x/time v0.5.0 + gopkg.in/yaml.v3 v3.0.1 ) require ( diff --git a/pkg/aggregate/aggregate.go b/pkg/aggregate/aggregate.go index 083f2b5c..53e79b63 100644 --- a/pkg/aggregate/aggregate.go +++ b/pkg/aggregate/aggregate.go @@ -134,6 +134,12 @@ func Aggregate(o bench.Operations, opts Options) Aggregated { } eps := o.SortSplitByEndpoint() + if len(eps) == 1 { + cl := ops.SortSplitByClient() + if len(cl) > 1 { + eps = cl + } + } a.MixedThroughputByHost = make(map[string]Throughput, len(eps)) var wg sync.WaitGroup var mu sync.Mutex @@ -229,6 +235,12 @@ func Aggregate(o bench.Operations, opts Options) Aggregated { } eps := allOps.SortSplitByEndpoint() + if len(eps) == 1 { + cl := ops.SortSplitByClient() + if len(cl) > 1 { + eps = cl + } + } a.ThroughputByHost = make(map[string]Throughput, len(eps)) var epMu sync.Mutex var epWg sync.WaitGroup diff --git a/pkg/aggregate/requests.go b/pkg/aggregate/requests.go index 9e285dd2..bddfb7a4 100644 --- a/pkg/aggregate/requests.go +++ b/pkg/aggregate/requests.go @@ -233,13 +233,21 @@ func RequestAnalysisSingleSized(o bench.Operations, allThreads bool) *SingleSize res.fillFirstLast(o) res.HostNames = o.Endpoints() res.ByHost = RequestAnalysisHostsSingleSized(o) - + if len(res.HostNames) != len(res.ByHost) { + res.HostNames = o.ClientIDs() + } return &res } // RequestAnalysisHostsSingleSized performs host analysis where all objects have equal size. func RequestAnalysisHostsSingleSized(o bench.Operations) map[string]SingleSizedRequests { eps := o.SortSplitByEndpoint() + if len(eps) == 1 { + cl := o.SortSplitByClient() + if len(cl) > 1 { + eps = cl + } + } res := make(map[string]SingleSizedRequests, len(eps)) var wg sync.WaitGroup var mu sync.Mutex @@ -276,6 +284,9 @@ func RequestAnalysisMultiSized(o bench.Operations, allThreads bool) *MultiSizedR res.fill(active) res.ByHost = RequestAnalysisHostsMultiSized(active) res.HostNames = active.Endpoints() + if len(res.HostNames) != len(res.ByHost) { + res.HostNames = o.ClientIDs() + } return &res } @@ -283,6 +294,12 @@ func RequestAnalysisMultiSized(o bench.Operations, allThreads bool) *MultiSizedR func RequestAnalysisHostsMultiSized(o bench.Operations) map[string]RequestSizeRange { start, end := o.TimeRange() eps := o.SortSplitByEndpoint() + if len(eps) == 1 { + cl := o.SortSplitByClient() + if len(cl) > 1 { + eps = cl + } + } res := make(map[string]RequestSizeRange, len(eps)) var wg sync.WaitGroup var mu sync.Mutex diff --git a/pkg/bench/multipart.go b/pkg/bench/multipart.go index 43ca7ac0..69065708 100644 --- a/pkg/bench/multipart.go +++ b/pkg/bench/multipart.go @@ -80,8 +80,11 @@ func (g *Multipart) Prepare(ctx context.Context) error { var wg sync.WaitGroup wg.Add(g.Concurrency) g.addCollector() - objs := splitObjs(g.CreateParts, g.Concurrency) - + obj := make(chan int, g.CreateParts) + for i := 0; i < g.CreateParts; i++ { + obj <- i + g.PartStart + } + close(obj) rcv := g.Collector.rcv var groupErr error var mu sync.Mutex @@ -89,8 +92,8 @@ func (g *Multipart) Prepare(ctx context.Context) error { if g.Custom == nil { g.Custom = make(map[string]string, g.CreateParts) } - for i, obj := range objs { - go func(i int, obj []struct{}) { + for i := 0; i < g.Concurrency; i++ { + go func(i int) { defer wg.Done() src := g.Source() opts := g.PutOpts @@ -161,7 +164,7 @@ func (g *Multipart) Prepare(ctx context.Context) error { mu.Unlock() rcv <- op } - }(i, obj) + }(i) } wg.Wait() return groupErr diff --git a/pkg/bench/ops.go b/pkg/bench/ops.go index 2236e7a5..5eb7891b 100644 --- a/pkg/bench/ops.go +++ b/pkg/bench/ops.go @@ -222,6 +222,25 @@ func (o Operations) SortByEndpoint() { }) } +// SortByClient will sort the operations by client. +// Earliest operations first. +func (o Operations) SortByClient() { + if sort.SliceIsSorted(o, func(i, j int) bool { + if o[i].ClientID == o[j].ClientID { + return o[i].Start.Before(o[j].Start) + } + return o[i].ClientID < o[j].ClientID + }) { + return + } + sort.Slice(o, func(i, j int) bool { + if o[i].ClientID == o[j].ClientID { + return o[i].Start.Before(o[j].Start) + } + return o[i].ClientID < o[j].ClientID + }) +} + // SortByOpType will sort the operations by operation type. // Earliest operations first. func (o Operations) SortByOpType() { @@ -366,6 +385,30 @@ func (o Operations) SortSplitByEndpoint() map[string]Operations { return dst } +// SortSplitByClient will sort operations by endpoint and split by host. +func (o Operations) SortSplitByClient() map[string]Operations { + clients := o.Clients() + o.SortByClient() + dst := make(map[string]Operations, clients) + cl := "" + start := 0 + for i, op := range o { + if op.ClientID == cl { + continue + } + if cl != "" { + dst[cl] = o[start:i] + } + cl = op.ClientID + start = i + } + if cl != "" { + dst[cl] = o[start:] + } + + return dst +} + // SortSplitByOpType will sort operations by op + start time and split by op. func (o Operations) SortSplitByOpType() map[string]Operations { o.SortByOpType() @@ -848,6 +891,22 @@ func (o Operations) Endpoints() []string { return dst } +func (o Operations) ClientIDs() []string { + if len(o) == 0 { + return nil + } + found := make(map[string]struct{}, 1) + for _, op := range o { + found[op.ClientID] = struct{}{} + } + dst := make([]string, 0, len(found)) + for k := range found { + dst = append(dst, k) + } + sort.Strings(dst) + return dst +} + // Errors returns the errors found. func (o Operations) Errors() []string { if len(o) == 0 { diff --git a/pkg/generator/options.go b/pkg/generator/options.go index 749c9822..24d200be 100644 --- a/pkg/generator/options.go +++ b/pkg/generator/options.go @@ -99,7 +99,7 @@ func WithSize(n int64) Option { // WithRandomSize will randomize the size from 1 byte to the total size set. func WithRandomSize(b bool) Option { return func(o *Options) error { - if o.totalSize > 0 && o.totalSize < 256 { + if b && o.totalSize > 0 && o.totalSize < 256 { return errors.New("WithRandomSize: Random sized objects should be at least 256 bytes") } o.randSize = b diff --git a/yml-samples/delete.yml b/yml-samples/delete.yml new file mode 100644 index 00000000..a6737ba3 --- /dev/null +++ b/yml-samples/delete.yml @@ -0,0 +1,196 @@ +warp: + api: v1 + + # Benchmark to run. + # Corresponds to warp [benchmark] command. + benchmark: delete + + # Do not print any output. + quiet: false + + # Disable terminal color output. + no-color: false + + # Print results and errors as JSON. + json: false + + # Output benchmark+profile data to this file. + # By default a unique filename is generated. + bench-data: + + # Connect to warp clients and run benchmarks there. + # See https://github.com/minio/warp?tab=readme-ov-file#distributed-benchmarking + # Can be a single value or a list. + warp-client: + + # Run MinIO server profiling during benchmark; + # possible values are 'cpu', 'cpuio', 'mem', 'block', 'mutex', 'threads' and 'trace'. + # Can be single value or a list. + server-profile: + + # Remote host parameters and connection info. + remote: + # Specify custom region + region: us-east-1 + + # Access key and Secret key + access-key: 'Q3AM3UQ867SPQQA43P2F' + secret-key: 'zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG' + + # Specify one or more hosts. + # The benchmark will be run against all hosts concurrently. + # Multiple servers can be specified with elipsis notation; + # for example '10.0.0.{1...10}:9000' specifies 10 hosts. + # See more at https://github.com/minio/warp?tab=readme-ov-file#multiple-hosts + host: + - 'play.min.io' + + # Use TLS for calls. + tls: true + + # Allow TLS with unverified certificates. + insecure: false + + # Stream benchmark statistics to Influx DB instance. + # See more at https://github.com/minio/warp?tab=readme-ov-file#influxdb-output + influxdb: '' + + # Bucket to use for benchmark data. + # + # CAREFUL: ALL DATA WILL BE DELETED IN BUCKET! + # + # By default, 'warp-benchmark-bucket' will be created or used. + bucket: + + # params specifies the benchmark parameters. + # The fields here depend on the benchmark type. + params: + # Duration to run the benchmark. + # Use 's' and 'm' to specify seconds and minutes. + duration: 5m + + # Concurrent operations to run per warp instance. + concurrent: 8 + + # The number of objects to upload before starting the benchmark. + # Upload enough objects to ensure that any remote caching is bypassed. + objects: 32000 + + # Number of DELETE operations per batch. + batch: 100 + + # Properties of uploaded objects. + obj: + # Size of each uploaded object + size: 1KiB + + # Randomize the size of each object within certain constraints. + # See https://github.com/minio/warp?tab=readme-ov-file#random-file-sizes + rand-size: false + + # Force specific size of each multipart part. + # Must be '5MB' or bigger. + part-size: + + # Use automatic termination when traffic stabilizes. + # Can not be used with distributed warp setup. + # See https://github.com/minio/warp?tab=readme-ov-file#automatic-termination + autoterm: + enabled: false + dur: 10s + pct: 7.5 + + # Instead of preparing the bench by PUTing some objects, + # only use objects already in the bucket. + # Does not perform any deletes before or after benchmark. + list-existing: false + + # When using list-existing, do not use recursive listing + list-flat: false + + # Do not clear bucket before or after running benchmarks. + no-clear: false + + # Leave benchmark data. Do not run cleanup after benchmark. + # Bucket will still be cleaned prior to benchmark. + keep-data: false + + + # The io section specifies custom IO properties for uploaded objects. + io: + # Use a custom prefix + prefix: + + # Do not use separate prefix for each thread + no-prefix: false + + # Add MD5 sum to uploads + md5: false + + # Disable multipart uploads + disable-multipart: false + + # Disable calculating sha256 on client side for uploads + disable-sha256-payload: false + + # Server-side sse-s3 encrypt/decrypt objects + sse-s3-encrypt: false + + # Encrypt/decrypt objects (using server-side encryption with random keys) + sse-c-encrypt: false + + # Override storage class. + # Default storage class will be used unless specified. + storage-class: + + analyze: + # Display additional analysis data. + verbose: false + # Only output for this host. + host: '' + # Only output for this operation. Can be 'GET', 'PUT', 'DELETE', etc. + filter-op: '' + # Split analysis into durations of this length. + # Can be '1s', '5s', '1m', etc. + segment-duration: + # Output aggregated data as to file. + out: + # Additional time duration to skip when analyzing data. + skip-duration: + # Max operations to load for analysis. + limit: + # Skip this number of operations before starting analysis. + offset: + + advanced: + # Stress test only and discard output. + stress: false + + # Print requests. + debug: false + + # Disable HTTP Keep-Alive + disable-http-keepalive: false + + # Enable HTTP2 support if server supports it + http2: false + + # Rate limit each instance to this number of requests per second + rps-limit: + + # Host selection algorithm. + # Can be 'weighed' or 'roundrobin' + host-select: weighed + + # "Resolve the host(s) ip(s) (including multiple A/AAAA records). + # This can break SSL certificates, use --insecure if so + resolve-host: false + + # Specify custom write socket buffer size in bytes + sndbuf: 32768 + + # Specify custom read socket buffer size in bytes + rcvbuf: 32768 + + # When running benchmarks open a webserver to fetch results remotely, eg: localhost:7762 + serve: diff --git a/yml-samples/get.yml b/yml-samples/get.yml new file mode 100644 index 00000000..36bce56d --- /dev/null +++ b/yml-samples/get.yml @@ -0,0 +1,203 @@ +warp: + api: v1 + + # Benchmark to run. + # Corresponds to warp [benchmark] command. + benchmark: get + + # Do not print any output. + quiet: false + + # Disable terminal color output. + no-color: false + + # Print results and errors as JSON. + json: false + + # Output benchmark+profile data to this file. + # By default a unique filename is generated. + bench-data: + + # Connect to warp clients and run benchmarks there. + # See https://github.com/minio/warp?tab=readme-ov-file#distributed-benchmarking + # Can be a single value or a list. + warp-client: + + # Run MinIO server profiling during benchmark; + # possible values are 'cpu', 'cpuio', 'mem', 'block', 'mutex', 'threads' and 'trace'. + # Can be single value or a list. + server-profile: + + # Remote host parameters and connection info. + remote: + # Specify custom region + region: us-east-1 + + # Access key and Secret key + access-key: 'Q3AM3UQ867SPQQA43P2F' + secret-key: 'zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG' + + # Specify one or more hosts. + # The benchmark will be run against all hosts concurrently. + # Multiple servers can be specified with elipsis notation; + # for example '10.0.0.{1...10}:9000' specifies 10 hosts. + # See more at https://github.com/minio/warp?tab=readme-ov-file#multiple-hosts + host: + - 'play.min.io' + + # Use TLS for calls. + tls: true + + # Allow TLS with unverified certificates. + insecure: false + + # Stream benchmark statistics to Influx DB instance. + # See more at https://github.com/minio/warp?tab=readme-ov-file#influxdb-output + influxdb: '' + + # Bucket to use for benchmark data. + # + # CAREFUL: ALL DATA WILL BE DELETED IN BUCKET! + # + # By default, 'warp-benchmark-bucket' will be created or used. + bucket: + + # params specifies the benchmark parameters. + # The fields here depend on the benchmark type. + params: + # Duration to run the benchmark. + # Use 's' and 'm' to specify seconds and minutes. + duration: 1m + + # Concurrent operations to run per warp instance. + concurrent: 8 + + # The number of objects to upload before starting the benchmark. + # Upload enough objects to ensure that any remote caching is bypassed. + objects: 1000 + + # Properties of uploaded objects. + obj: + # Size of each uploaded object + size: 100KiB + + # Number of versions to upload of each object + versions: 2 + + # Randomize the size of each object within certain constraints. + # See https://github.com/minio/warp?tab=readme-ov-file#random-file-sizes + rand-size: false + + # Force specific size of each multipart part. + # Must be '5MB' or bigger. + part-size: + + # Use automatic termination when traffic stabilizes. + # Can not be used with distributed warp setup. + # See https://github.com/minio/warp?tab=readme-ov-file#automatic-termination + autoterm: + enabled: false + dur: 10s + pct: 7.5 + + # Do RANGE get operations. Will request with random offset and length. + range: false + + # Use a fixed range size while doing random range offsets. + # Sizes can be '1KB', '2MB', etc. 'range' is implied. + range-size: + + # Instead of preparing the bench by PUTing some objects, + # only use objects already in the bucket. + # Does not perform any deletes before or after benchmark. + list-existing: false + + # When using list-existing, do not use recursive listing + list-flat: false + + # Do not clear bucket before or after running benchmarks. + no-clear: false + + # Leave benchmark data. Do not run cleanup after benchmark. + # Bucket will still be cleaned prior to benchmark. + keep-data: false + + + # The io section specifies custom IO properties for uploaded objects. + io: + # Use a custom prefix + prefix: + + # Do not use separate prefix for each thread + no-prefix: false + + # Add MD5 sum to uploads + md5: false + + # Disable multipart uploads + disable-multipart: false + + # Disable calculating sha256 on client side for uploads + disable-sha256-payload: false + + # Server-side sse-s3 encrypt/decrypt objects + sse-s3-encrypt: false + + # Encrypt/decrypt objects (using server-side encryption with random keys) + sse-c-encrypt: false + + # Override storage class. + # Default storage class will be used unless specified. + storage-class: + + analyze: + # Display additional analysis data. + verbose: false + # Only output for this host. + host: '' + # Only output for this operation. Can be 'GET', 'PUT', 'DELETE', etc. + filter-op: '' + # Split analysis into durations of this length. + # Can be '1s', '5s', '1m', etc. + segment-duration: + # Output aggregated data as to file. + out: + # Additional time duration to skip when analyzing data. + skip-duration: + # Max operations to load for analysis. + limit: + # Skip this number of operations before starting analysis. + offset: + + advanced: + # Stress test only and discard output. + stress: false + + # Print requests. + debug: false + + # Disable HTTP Keep-Alive + disable-http-keepalive: false + + # Enable HTTP2 support if server supports it + http2: false + + # Rate limit each instance to this number of requests per second + rps-limit: + + # Host selection algorithm. + # Can be 'weighed' or 'roundrobin' + host-select: weighed + + # "Resolve the host(s) ip(s) (including multiple A/AAAA records). + # This can break SSL certificates, use --insecure if so + resolve-host: false + + # Specify custom write socket buffer size in bytes + sndbuf: 32768 + + # Specify custom read socket buffer size in bytes + rcvbuf: 32768 + + # When running benchmarks open a webserver to fetch results remotely, eg: localhost:7762 + serve: diff --git a/yml-samples/list.yml b/yml-samples/list.yml new file mode 100644 index 00000000..4921a5be --- /dev/null +++ b/yml-samples/list.yml @@ -0,0 +1,192 @@ +warp: + api: v1 + + # Benchmark to run. + # Corresponds to warp [benchmark] command. + benchmark: list + + # Do not print any output. + quiet: false + + # Disable terminal color output. + no-color: false + + # Print results and errors as JSON. + json: false + + # Output benchmark+profile data to this file. + # By default a unique filename is generated. + bench-data: + + # Connect to warp clients and run benchmarks there. + # See https://github.com/minio/warp?tab=readme-ov-file#distributed-benchmarking + # Can be a single value or a list. + warp-client: + + # Run MinIO server profiling during benchmark; + # possible values are 'cpu', 'cpuio', 'mem', 'block', 'mutex', 'threads' and 'trace'. + # Can be single value or a list. + server-profile: + + # Remote host parameters and connection info. + remote: + # Specify custom region + region: us-east-1 + + # Access key and Secret key + access-key: 'Q3AM3UQ867SPQQA43P2F' + secret-key: 'zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG' + + # Specify one or more hosts. + # The benchmark will be run against all hosts concurrently. + # Multiple servers can be specified with elipsis notation; + # for example '10.0.0.{1...10}:9000' specifies 10 hosts. + # See more at https://github.com/minio/warp?tab=readme-ov-file#multiple-hosts + host: + - 'play.min.io' + + # Use TLS for calls. + tls: true + + # Allow TLS with unverified certificates. + insecure: false + + # Stream benchmark statistics to Influx DB instance. + # See more at https://github.com/minio/warp?tab=readme-ov-file#influxdb-output + influxdb: '' + + # Bucket to use for benchmark data. + # + # CAREFUL: ALL DATA WILL BE DELETED IN BUCKET! + # + # By default, 'warp-benchmark-bucket' will be created or used. + bucket: + + # params specifies the benchmark parameters. + # The fields here depend on the benchmark type. + params: + # Duration to run the benchmark. + # Use 's' and 'm' to specify seconds and minutes. + duration: 1m + + # Concurrent operations to run per warp instance. + concurrent: 8 + + # The number of objects to upload before starting the benchmark. + # Upload enough objects to ensure that any remote caching is bypassed. + objects: 10000 + + # Enable extended MinIO ListObjects with metadata, + # by default this benchmarking uses ListObjectsV2 API + metadata: false + + # Properties of uploaded objects. + obj: + # Size of each uploaded object + size: 1B + + # Number of versions to upload of each object + versions: 2 + + # Randomize the size of each object within certain constraints. + # See https://github.com/minio/warp?tab=readme-ov-file#random-file-sizes + rand-size: false + + # Force specific size of each multipart part. + # Must be '5MB' or bigger. + part-size: + + # Use automatic termination when traffic stabilizes. + # Can not be used with distributed warp setup. + # See https://github.com/minio/warp?tab=readme-ov-file#automatic-termination + autoterm: + enabled: false + dur: 10s + pct: 7.5 + + # Do not clear bucket before or after running benchmarks. + no-clear: false + + # Leave benchmark data. Do not run cleanup after benchmark. + # Bucket will still be cleaned prior to benchmark. + keep-data: false + + + # The io section specifies custom IO properties for uploaded objects. + io: + # Use a custom prefix + prefix: + + # Do not use separate prefix for each thread + no-prefix: false + + # Add MD5 sum to uploads + md5: false + + # Disable multipart uploads + disable-multipart: false + + # Disable calculating sha256 on client side for uploads + disable-sha256-payload: false + + # Server-side sse-s3 encrypt/decrypt objects + sse-s3-encrypt: false + + # Encrypt/decrypt objects (using server-side encryption with random keys) + sse-c-encrypt: false + + # Override storage class. + # Default storage class will be used unless specified. + storage-class: + + analyze: + # Display additional analysis data. + verbose: false + # Only output for this host. + host: '' + # Only output for this operation. Can be 'GET', 'PUT', 'DELETE', etc. + filter-op: '' + # Split analysis into durations of this length. + # Can be '1s', '5s', '1m', etc. + segment-duration: + # Output aggregated data as to file. + out: + # Additional time duration to skip when analyzing data. + skip-duration: + # Max operations to load for analysis. + limit: + # Skip this number of operations before starting analysis. + offset: + + advanced: + # Stress test only and discard output. + stress: false + + # Print requests. + debug: false + + # Disable HTTP Keep-Alive + disable-http-keepalive: false + + # Enable HTTP2 support if server supports it + http2: false + + # Rate limit each instance to this number of requests per second + rps-limit: + + # Host selection algorithm. + # Can be 'weighed' or 'roundrobin' + host-select: weighed + + # "Resolve the host(s) ip(s) (including multiple A/AAAA records). + # This can break SSL certificates, use --insecure if so + resolve-host: false + + # Specify custom write socket buffer size in bytes + sndbuf: 32768 + + # Specify custom read socket buffer size in bytes + rcvbuf: 32768 + + # When running benchmarks open a webserver to fetch results remotely, eg: localhost:7762 + serve: diff --git a/yml-samples/mixed.yml b/yml-samples/mixed.yml new file mode 100644 index 00000000..74599535 --- /dev/null +++ b/yml-samples/mixed.yml @@ -0,0 +1,193 @@ +warp: + api: v1 + + # Benchmark to run. + # Corresponds to warp [benchmark] command. + benchmark: mixed + + # Do not print any output. + quiet: false + + # Disable terminal color output. + no-color: false + + # Print results and errors as JSON. + json: false + + # Output benchmark+profile data to this file. + # By default a unique filename is generated. + bench-data: + + # Connect to warp clients and run benchmarks there. + # See https://github.com/minio/warp?tab=readme-ov-file#distributed-benchmarking + # Can be a single value or a list. + warp-client: + + # Run MinIO server profiling during benchmark; + # possible values are 'cpu', 'cpuio', 'mem', 'block', 'mutex', 'threads' and 'trace'. + # Can be single value or a list. + server-profile: + + # Remote host parameters and connection info. + remote: + # Specify custom region + region: us-east-1 + + # Access key and Secret key + access-key: 'Q3AM3UQ867SPQQA43P2F' + secret-key: 'zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG' + + # Specify one or more hosts. + # The benchmark will be run against all hosts concurrently. + # Multiple servers can be specified with elipsis notation; + # for example '10.0.0.{1...10}:9000' specifies 10 hosts. + # See more at https://github.com/minio/warp?tab=readme-ov-file#multiple-hosts + host: + - 'play.min.io' + + # Use TLS for calls. + tls: true + + # Allow TLS with unverified certificates. + insecure: false + + # Stream benchmark statistics to Influx DB instance. + # See more at https://github.com/minio/warp?tab=readme-ov-file#influxdb-output + influxdb: '' + + # Bucket to use for benchmark data. + # + # CAREFUL: ALL DATA WILL BE DELETED IN BUCKET! + # + # By default, 'warp-benchmark-bucket' will be created or used. + bucket: + + # params specifies the benchmark parameters. + # The fields here depend on the benchmark type. + params: + # Duration to run the benchmark. + # Use 's' and 'm' to specify seconds and minutes. + duration: 1m + + # Concurrent operations to run per warp instance. + concurrent: 8 + + # The number of objects to upload before starting the benchmark. + # Upload enough objects to ensure that any remote caching is bypassed. + objects: 1000 + + # Adjust the distribution of each operation type + # The final distribution will be determined by the fraction of each value of the total. + distribution: + get: 45.0 + stat: 30.0 + put: 15.0 + delete: 10.0 # Must be same or lower than 'put'. + + # Properties of uploaded objects. + obj: + # Size of each uploaded object + size: 100KiB + + # Randomize the size of each object within certain constraints. + # See https://github.com/minio/warp?tab=readme-ov-file#random-file-sizes + rand-size: false + + # Force specific size of each multipart part. + # Must be '5MB' or bigger. + part-size: + + # Use automatic termination when traffic stabilizes. + # Can not be used with distributed warp setup. + # See https://github.com/minio/warp?tab=readme-ov-file#automatic-termination + autoterm: + enabled: false + dur: 10s + pct: 7.5 + + # Do not clear bucket before or after running benchmarks. + no-clear: false + + # Leave benchmark data. Do not run cleanup after benchmark. + # Bucket will still be cleaned prior to benchmark. + keep-data: false + + + # The io section specifies custom IO properties for uploaded objects. + io: + # Use a custom prefix + prefix: + + # Do not use separate prefix for each thread + no-prefix: false + + # Add MD5 sum to uploads + md5: false + + # Disable multipart uploads + disable-multipart: false + + # Disable calculating sha256 on client side for uploads + disable-sha256-payload: false + + # Server-side sse-s3 encrypt/decrypt objects + sse-s3-encrypt: false + + # Encrypt/decrypt objects (using server-side encryption with random keys) + sse-c-encrypt: false + + # Override storage class. + # Default storage class will be used unless specified. + storage-class: + + analyze: + # Display additional analysis data. + verbose: false + # Only output for this host. + host: '' + # Only output for this operation. Can be 'GET', 'PUT', 'DELETE', etc. + filter-op: '' + # Split analysis into durations of this length. + # Can be '1s', '5s', '1m', etc. + segment-duration: + # Output aggregated data as to file. + out: + # Additional time duration to skip when analyzing data. + skip-duration: + # Max operations to load for analysis. + limit: + # Skip this number of operations before starting analysis. + offset: + + advanced: + # Stress test only and discard output. + stress: false + + # Print requests. + debug: false + + # Disable HTTP Keep-Alive + disable-http-keepalive: false + + # Enable HTTP2 support if server supports it + http2: false + + # Rate limit each instance to this number of requests per second + rps-limit: + + # Host selection algorithm. + # Can be 'weighed' or 'roundrobin' + host-select: weighed + + # "Resolve the host(s) ip(s) (including multiple A/AAAA records). + # This can break SSL certificates, use --insecure if so + resolve-host: false + + # Specify custom write socket buffer size in bytes + sndbuf: 32768 + + # Specify custom read socket buffer size in bytes + rcvbuf: 32768 + + # When running benchmarks open a webserver to fetch results remotely, eg: localhost:7762 + serve: diff --git a/yml-samples/multipart.yml b/yml-samples/multipart.yml new file mode 100644 index 00000000..1bfcec7d --- /dev/null +++ b/yml-samples/multipart.yml @@ -0,0 +1,181 @@ +warp: + api: v1 + + # Benchmark to run. + # Corresponds to warp [benchmark] command. + benchmark: multipart + + # Do not print any output. + quiet: false + + # Disable terminal color output. + no-color: false + + # Print results and errors as JSON. + json: false + + # Output benchmark+profile data to this file. + # By default a unique filename is generated. + bench-data: + + # Connect to warp clients and run benchmarks there. + # See https://github.com/minio/warp?tab=readme-ov-file#distributed-benchmarking + # Can be a single value or a list. + warp-client: + + # Run MinIO server profiling during benchmark; + # possible values are 'cpu', 'cpuio', 'mem', 'block', 'mutex', 'threads' and 'trace'. + # Can be single value or a list. + server-profile: + + # Remote host parameters and connection info. + remote: + # Specify custom region + region: us-east-1 + + # Access key and Secret key + access-key: 'Q3AM3UQ867SPQQA43P2F' + secret-key: 'zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG' + + # Specify one or more hosts. + # The benchmark will be run against all hosts concurrently. + # Multiple servers can be specified with elipsis notation; + # for example '10.0.0.{1...10}:9000' specifies 10 hosts. + # See more at https://github.com/minio/warp?tab=readme-ov-file#multiple-hosts + host: + - 'play.min.io' + + # Use TLS for calls. + tls: true + + # Allow TLS with unverified certificates. + insecure: false + + # Stream benchmark statistics to Influx DB instance. + # See more at https://github.com/minio/warp?tab=readme-ov-file#influxdb-output + influxdb: '' + + # Bucket to use for benchmark data. + # + # CAREFUL: ALL DATA WILL BE DELETED IN BUCKET! + # + # By default, 'warp-benchmark-bucket' will be created or used. + bucket: + + # params specifies the benchmark parameters. + # The fields here depend on the benchmark type. + params: + # Duration to run the benchmark. + # Use 's' and 'm' to specify seconds and minutes. + duration: 1m + + # Concurrent operations to run per warp instance. + concurrent: 8 + + # Properties of uploaded object. + obj: + # Object name + name: 'warp-multipart.bin' + + # Parts to add per warp client + parts: 16 + + # Size of each multipart part. + # Size of each part. Can be a number or MiB/GiB. + # Must be greater than or equal to 5MiB. + part-size: 5MiB + + # Use automatic termination when traffic stabilizes. + # Can not be used with distributed warp setup. + # See https://github.com/minio/warp?tab=readme-ov-file#automatic-termination + autoterm: + enabled: false + dur: 10s + pct: 7.5 + + # Do not clear bucket before or after running benchmarks. + no-clear: false + + # Leave benchmark data. Do not run cleanup after benchmark. + # Bucket will still be cleaned prior to benchmark. + keep-data: false + + + # The io section specifies custom IO properties for uploaded objects. + io: + # Use a custom prefix + prefix: + + # Do not use separate prefix for each thread + no-prefix: false + + # Add MD5 sum to uploads + md5: false + + # Disable multipart uploads + disable-multipart: false + + # Disable calculating sha256 on client side for uploads + disable-sha256-payload: false + + # Server-side sse-s3 encrypt/decrypt objects + sse-s3-encrypt: false + + # Encrypt/decrypt objects (using server-side encryption with random keys) + sse-c-encrypt: false + + # Override storage class. + # Default storage class will be used unless specified. + storage-class: + + analyze: + # Display additional analysis data. + verbose: false + # Only output for this host. + host: '' + # Only output for this operation. Can be 'GET', 'PUT', 'DELETE', etc. + filter-op: '' + # Split analysis into durations of this length. + # Can be '1s', '5s', '1m', etc. + segment-duration: + # Output aggregated data as to file. + out: + # Additional time duration to skip when analyzing data. + skip-duration: + # Max operations to load for analysis. + limit: + # Skip this number of operations before starting analysis. + offset: + + advanced: + # Stress test only and discard output. + stress: false + + # Print requests. + debug: false + + # Disable HTTP Keep-Alive + disable-http-keepalive: false + + # Enable HTTP2 support if server supports it + http2: false + + # Rate limit each instance to this number of requests per second + rps-limit: + + # Host selection algorithm. + # Can be 'weighed' or 'roundrobin' + host-select: weighed + + # "Resolve the host(s) ip(s) (including multiple A/AAAA records). + # This can break SSL certificates, use --insecure if so + resolve-host: false + + # Specify custom write socket buffer size in bytes + sndbuf: 32768 + + # Specify custom read socket buffer size in bytes + rcvbuf: 32768 + + # When running benchmarks open a webserver to fetch results remotely, eg: localhost:7762 + serve: diff --git a/yml-samples/put.yml b/yml-samples/put.yml new file mode 100644 index 00000000..d9d8417f --- /dev/null +++ b/yml-samples/put.yml @@ -0,0 +1,181 @@ +warp: + api: v1 + + # Benchmark to run. + # Corresponds to warp [benchmark] command. + benchmark: put + + # Do not print any output. + quiet: false + + # Disable terminal color output. + no-color: false + + # Print results and errors as JSON. + json: false + + # Output benchmark+profile data to this file. + # By default a unique filename is generated. + bench-data: + + # Connect to warp clients and run benchmarks there. + # See https://github.com/minio/warp?tab=readme-ov-file#distributed-benchmarking + # Can be a single value or a list. + warp-client: + + # Run MinIO server profiling during benchmark; + # possible values are 'cpu', 'cpuio', 'mem', 'block', 'mutex', 'threads' and 'trace'. + # Can be single value or a list. + server-profile: + + # Remote host parameters and connection info. + remote: + # Specify custom region + region: us-east-1 + + # Access key and Secret key + access-key: 'Q3AM3UQ867SPQQA43P2F' + secret-key: 'zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG' + + # Specify one or more hosts. + # The benchmark will be run against all hosts concurrently. + # Multiple servers can be specified with elipsis notation; + # for example '10.0.0.{1...10}:9000' specifies 10 hosts. + # See more at https://github.com/minio/warp?tab=readme-ov-file#multiple-hosts + host: + - 'play.min.io' + + # Use TLS for calls. + tls: true + + # Allow TLS with unverified certificates. + insecure: false + + # Stream benchmark statistics to Influx DB instance. + # See more at https://github.com/minio/warp?tab=readme-ov-file#influxdb-output + influxdb: '' + + # Bucket to use for benchmark data. + # + # CAREFUL: ALL DATA WILL BE DELETED IN BUCKET! + # + # By default, 'warp-benchmark-bucket' will be created or used. + bucket: + + # params specifies the benchmark parameters. + # The fields here depend on the benchmark type. + params: + # Duration to run the benchmark. + # Use 's' and 'm' to specify seconds and minutes. + duration: 1m + + # Concurrent operations to run per warp instance. + concurrent: 8 + + # Properties of uploaded objects. + obj: + # Size of each uploaded object + size: 100KiB + + # Randomize the size of each object within certain constraints. + # See https://github.com/minio/warp?tab=readme-ov-file#random-file-sizes + rand-size: false + + # Force specific size of each multipart part. + # Must be '5MB' or bigger. + part-size: + + # Use automatic termination when traffic stabilizes. + # Can not be used with distributed warp setup. + # See https://github.com/minio/warp?tab=readme-ov-file#automatic-termination + autoterm: + enabled: false + dur: 10s + pct: 7.5 + + # Do not clear bucket before or after running benchmarks. + no-clear: false + + # Leave benchmark data. Do not run cleanup after benchmark. + # Bucket will still be cleaned prior to benchmark. + keep-data: false + + + # The io section specifies custom IO properties for uploaded objects. + io: + # Use a custom prefix + prefix: + + # Do not use separate prefix for each thread + no-prefix: false + + # Add MD5 sum to uploads + md5: false + + # Disable multipart uploads + disable-multipart: false + + # Disable calculating sha256 on client side for uploads + disable-sha256-payload: false + + # Server-side sse-s3 encrypt/decrypt objects + sse-s3-encrypt: false + + # Encrypt/decrypt objects (using server-side encryption with random keys) + sse-c-encrypt: false + + # Override storage class. + # Default storage class will be used unless specified. + storage-class: + + analyze: + # Display additional analysis data. + verbose: false + # Only output for this host. + host: '' + # Only output for this operation. Can be 'GET', 'PUT', 'DELETE', etc. + filter-op: '' + # Split analysis into durations of this length. + # Can be '1s', '5s', '1m', etc. + segment-duration: + # Output aggregated data as to file. + out: + # Additional time duration to skip when analyzing data. + skip-duration: + # Max operations to load for analysis. + limit: + # Skip this number of operations before starting analysis. + offset: + + advanced: + # Stress test only and discard output. + stress: false + + # Print requests. + debug: false + + # Disable HTTP Keep-Alive + disable-http-keepalive: false + + # Enable HTTP2 support if server supports it + http2: false + + # Rate limit each instance to this number of requests per second + rps-limit: + + # Host selection algorithm. + # Can be 'weighed' or 'roundrobin' + host-select: weighed + + # "Resolve the host(s) ip(s) (including multiple A/AAAA records). + # This can break SSL certificates, use --insecure if so + resolve-host: false + + # Specify custom write socket buffer size in bytes + sndbuf: 32768 + + # Specify custom read socket buffer size in bytes + rcvbuf: 32768 + + # When running benchmarks open a webserver to fetch results remotely, eg: localhost:7762 + serve: diff --git a/yml-samples/stat.yml b/yml-samples/stat.yml new file mode 100644 index 00000000..dd123320 --- /dev/null +++ b/yml-samples/stat.yml @@ -0,0 +1,188 @@ +warp: + api: v1 + + # Benchmark to run. + # Corresponds to warp [benchmark] command. + benchmark: stat + + # Do not print any output. + quiet: false + + # Disable terminal color output. + no-color: false + + # Print results and errors as JSON. + json: false + + # Output benchmark+profile data to this file. + # By default a unique filename is generated. + bench-data: + + # Connect to warp clients and run benchmarks there. + # See https://github.com/minio/warp?tab=readme-ov-file#distributed-benchmarking + # Can be a single value or a list. + warp-client: + + # Run MinIO server profiling during benchmark; + # possible values are 'cpu', 'cpuio', 'mem', 'block', 'mutex', 'threads' and 'trace'. + # Can be single value or a list. + server-profile: + + # Remote host parameters and connection info. + remote: + # Specify custom region + region: us-east-1 + + # Access key and Secret key + access-key: 'Q3AM3UQ867SPQQA43P2F' + secret-key: 'zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG' + + # Specify one or more hosts. + # The benchmark will be run against all hosts concurrently. + # Multiple servers can be specified with elipsis notation; + # for example '10.0.0.{1...10}:9000' specifies 10 hosts. + # See more at https://github.com/minio/warp?tab=readme-ov-file#multiple-hosts + host: + - 'play.min.io' + + # Use TLS for calls. + tls: true + + # Allow TLS with unverified certificates. + insecure: false + + # Stream benchmark statistics to Influx DB instance. + # See more at https://github.com/minio/warp?tab=readme-ov-file#influxdb-output + influxdb: '' + + # Bucket to use for benchmark data. + # + # CAREFUL: ALL DATA WILL BE DELETED IN BUCKET! + # + # By default, 'warp-benchmark-bucket' will be created or used. + bucket: + + # params specifies the benchmark parameters. + # The fields here depend on the benchmark type. + params: + # Duration to run the benchmark. + # Use 's' and 'm' to specify seconds and minutes. + duration: 1m + + # Concurrent operations to run per warp instance. + concurrent: 16 + + # The number of objects to upload before starting the benchmark. + # Upload enough objects to ensure that any remote caching is bypassed. + objects: 5000 + + # Properties of uploaded objects. + obj: + # Size of each uploaded object + size: 1B + + # Number of versions to upload of each object + versions: 2 + + # Randomize the size of each object within certain constraints. + # See https://github.com/minio/warp?tab=readme-ov-file#random-file-sizes + rand-size: false + + # Force specific size of each multipart part. + # Must be '5MB' or bigger. + part-size: + + # Use automatic termination when traffic stabilizes. + # Can not be used with distributed warp setup. + # See https://github.com/minio/warp?tab=readme-ov-file#automatic-termination + autoterm: + enabled: false + dur: 10s + pct: 7.5 + + # Do not clear bucket before or after running benchmarks. + no-clear: false + + # Leave benchmark data. Do not run cleanup after benchmark. + # Bucket will still be cleaned prior to benchmark. + keep-data: false + + + # The io section specifies custom IO properties for uploaded objects. + io: + # Use a custom prefix + prefix: + + # Do not use separate prefix for each thread + no-prefix: false + + # Add MD5 sum to uploads + md5: false + + # Disable multipart uploads + disable-multipart: false + + # Disable calculating sha256 on client side for uploads + disable-sha256-payload: false + + # Server-side sse-s3 encrypt/decrypt objects + sse-s3-encrypt: false + + # Encrypt/decrypt objects (using server-side encryption with random keys) + sse-c-encrypt: false + + # Override storage class. + # Default storage class will be used unless specified. + storage-class: + + analyze: + # Display additional analysis data. + verbose: false + # Only output for this host. + host: '' + # Only output for this operation. Can be 'GET', 'PUT', 'DELETE', etc. + filter-op: '' + # Split analysis into durations of this length. + # Can be '1s', '5s', '1m', etc. + segment-duration: + # Output aggregated data as to file. + out: + # Additional time duration to skip when analyzing data. + skip-duration: + # Max operations to load for analysis. + limit: + # Skip this number of operations before starting analysis. + offset: + + advanced: + # Stress test only and discard output. + stress: false + + # Print requests. + debug: false + + # Disable HTTP Keep-Alive + disable-http-keepalive: false + + # Enable HTTP2 support if server supports it + http2: false + + # Rate limit each instance to this number of requests per second + rps-limit: + + # Host selection algorithm. + # Can be 'weighed' or 'roundrobin' + host-select: weighed + + # "Resolve the host(s) ip(s) (including multiple A/AAAA records). + # This can break SSL certificates, use --insecure if so + resolve-host: false + + # Specify custom write socket buffer size in bytes + sndbuf: 32768 + + # Specify custom read socket buffer size in bytes + rcvbuf: 32768 + + # When running benchmarks open a webserver to fetch results remotely, eg: localhost:7762 + serve: diff --git a/yml-samples/versioned.yml b/yml-samples/versioned.yml new file mode 100644 index 00000000..5cd4ac5e --- /dev/null +++ b/yml-samples/versioned.yml @@ -0,0 +1,193 @@ +warp: + api: v1 + + # Benchmark to run. + # Corresponds to warp [benchmark] command. + benchmark: versioned + + # Do not print any output. + quiet: false + + # Disable terminal color output. + no-color: false + + # Print results and errors as JSON. + json: false + + # Output benchmark+profile data to this file. + # By default a unique filename is generated. + bench-data: + + # Connect to warp clients and run benchmarks there. + # See https://github.com/minio/warp?tab=readme-ov-file#distributed-benchmarking + # Can be a single value or a list. + warp-client: + + # Run MinIO server profiling during benchmark; + # possible values are 'cpu', 'cpuio', 'mem', 'block', 'mutex', 'threads' and 'trace'. + # Can be single value or a list. + server-profile: + + # Remote host parameters and connection info. + remote: + # Specify custom region + region: us-east-1 + + # Access key and Secret key + access-key: 'Q3AM3UQ867SPQQA43P2F' + secret-key: 'zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG' + + # Specify one or more hosts. + # The benchmark will be run against all hosts concurrently. + # Multiple servers can be specified with elipsis notation; + # for example '10.0.0.{1...10}:9000' specifies 10 hosts. + # See more at https://github.com/minio/warp?tab=readme-ov-file#multiple-hosts + host: + - 'play.min.io' + + # Use TLS for calls. + tls: true + + # Allow TLS with unverified certificates. + insecure: false + + # Stream benchmark statistics to Influx DB instance. + # See more at https://github.com/minio/warp?tab=readme-ov-file#influxdb-output + influxdb: '' + + # Bucket to use for benchmark data. + # + # CAREFUL: ALL DATA WILL BE DELETED IN BUCKET! + # + # By default, 'warp-benchmark-bucket' will be created or used. + bucket: + + # params specifies the benchmark parameters. + # The fields here depend on the benchmark type. + params: + # Duration to run the benchmark. + # Use 's' and 'm' to specify seconds and minutes. + duration: 1m + + # Concurrent operations to run per warp instance. + concurrent: 8 + + # The number of objects to upload before starting the benchmark. + # Upload enough objects to ensure that any remote caching is bypassed. + objects: 1000 + + # Adjust the distribution of each operation type + # The final distribution will be determined by the fraction of each value of the total. + distribution: + get: 45.0 + stat: 30.0 + put: 15.0 + delete: 10.0 # Must be same or lower than 'put'. + + # Properties of uploaded objects. + obj: + # Size of each uploaded object + size: 100KiB + + # Randomize the size of each object within certain constraints. + # See https://github.com/minio/warp?tab=readme-ov-file#random-file-sizes + rand-size: false + + # Force specific size of each multipart part. + # Must be '5MB' or bigger. + part-size: + + # Use automatic termination when traffic stabilizes. + # Can not be used with distributed warp setup. + # See https://github.com/minio/warp?tab=readme-ov-file#automatic-termination + autoterm: + enabled: false + dur: 10s + pct: 7.5 + + # Do not clear bucket before or after running benchmarks. + no-clear: false + + # Leave benchmark data. Do not run cleanup after benchmark. + # Bucket will still be cleaned prior to benchmark. + keep-data: false + + + # The io section specifies custom IO properties for uploaded objects. + io: + # Use a custom prefix + prefix: + + # Do not use separate prefix for each thread + no-prefix: false + + # Add MD5 sum to uploads + md5: false + + # Disable multipart uploads + disable-multipart: false + + # Disable calculating sha256 on client side for uploads + disable-sha256-payload: false + + # Server-side sse-s3 encrypt/decrypt objects + sse-s3-encrypt: false + + # Encrypt/decrypt objects (using server-side encryption with random keys) + sse-c-encrypt: false + + # Override storage class. + # Default storage class will be used unless specified. + storage-class: + + analyze: + # Display additional analysis data. + verbose: false + # Only output for this host. + host: '' + # Only output for this operation. Can be 'GET', 'PUT', 'DELETE', etc. + filter-op: '' + # Split analysis into durations of this length. + # Can be '1s', '5s', '1m', etc. + segment-duration: + # Output aggregated data as to file. + out: + # Additional time duration to skip when analyzing data. + skip-duration: + # Max operations to load for analysis. + limit: + # Skip this number of operations before starting analysis. + offset: + + advanced: + # Stress test only and discard output. + stress: false + + # Print requests. + debug: false + + # Disable HTTP Keep-Alive + disable-http-keepalive: false + + # Enable HTTP2 support if server supports it + http2: false + + # Rate limit each instance to this number of requests per second + rps-limit: + + # Host selection algorithm. + # Can be 'weighed' or 'roundrobin' + host-select: weighed + + # "Resolve the host(s) ip(s) (including multiple A/AAAA records). + # This can break SSL certificates, use --insecure if so + resolve-host: false + + # Specify custom write socket buffer size in bytes + sndbuf: 32768 + + # Specify custom read socket buffer size in bytes + rcvbuf: 32768 + + # When running benchmarks open a webserver to fetch results remotely, eg: localhost:7762 + serve: diff --git a/yml-samples/zip.yml b/yml-samples/zip.yml new file mode 100644 index 00000000..12d9637e --- /dev/null +++ b/yml-samples/zip.yml @@ -0,0 +1,180 @@ +warp: + api: v1 + + # Benchmark to run. + # Corresponds to warp [benchmark] command. + benchmark: zip + + # Do not print any output. + quiet: false + + # Disable terminal color output. + no-color: false + + # Print results and errors as JSON. + json: false + + # Output benchmark+profile data to this file. + # By default a unique filename is generated. + bench-data: + + # Connect to warp clients and run benchmarks there. + # See https://github.com/minio/warp?tab=readme-ov-file#distributed-benchmarking + # Can be a single value or a list. + warp-client: + + # Run MinIO server profiling during benchmark; + # possible values are 'cpu', 'cpuio', 'mem', 'block', 'mutex', 'threads' and 'trace'. + # Can be single value or a list. + server-profile: + + # Remote host parameters and connection info. + remote: + # Specify custom region + region: us-east-1 + + # Access key and Secret key + access-key: 'Q3AM3UQ867SPQQA43P2F' + secret-key: 'zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG' + + # Specify one or more hosts. + # The benchmark will be run against all hosts concurrently. + # Multiple servers can be specified with elipsis notation; + # for example '10.0.0.{1...10}:9000' specifies 10 hosts. + # See more at https://github.com/minio/warp?tab=readme-ov-file#multiple-hosts + host: + - 'play.min.io' + + # Use TLS for calls. + tls: true + + # Allow TLS with unverified certificates. + insecure: false + + # Stream benchmark statistics to Influx DB instance. + # See more at https://github.com/minio/warp?tab=readme-ov-file#influxdb-output + influxdb: '' + + # Bucket to use for benchmark data. + # + # CAREFUL: ALL DATA WILL BE DELETED IN BUCKET! + # + # By default, 'warp-benchmark-bucket' will be created or used. + bucket: + + # params specifies the benchmark parameters. + # The fields here depend on the benchmark type. + params: + # Duration to run the benchmark. + # Use 's' and 'm' to specify seconds and minutes. + duration: 1m + + # Concurrent operations to run per warp instance. + concurrent: 8 + + # Number of files to upload in the zip file + files: 10000 + + # Properties of uploaded objects. + obj: + # Size of each uploaded object inside the zip file. + size: 1KiB + + # Randomize the size of each object within certain constraints. + # See https://github.com/minio/warp?tab=readme-ov-file#random-file-sizes + rand-size: false + + # Use automatic termination when traffic stabilizes. + # Can not be used with distributed warp setup. + # See https://github.com/minio/warp?tab=readme-ov-file#automatic-termination + autoterm: + enabled: false + dur: 10s + pct: 7.5 + + # Do not clear bucket before or after running benchmarks. + no-clear: false + + # Leave benchmark data. Do not run cleanup after benchmark. + # Bucket will still be cleaned prior to benchmark. + keep-data: false + + + # The io section specifies custom IO properties for uploaded objects. + io: + # Use a custom prefix + prefix: + + # Do not use separate prefix for each thread + no-prefix: false + + # Add MD5 sum to uploads + md5: false + + # Disable multipart uploads + disable-multipart: false + + # Disable calculating sha256 on client side for uploads + disable-sha256-payload: false + + # Server-side sse-s3 encrypt/decrypt objects + sse-s3-encrypt: false + + # Encrypt/decrypt objects (using server-side encryption with random keys) + sse-c-encrypt: false + + # Override storage class. + # Default storage class will be used unless specified. + storage-class: + + analyze: + # Display additional analysis data. + verbose: false + # Only output for this host. + host: '' + # Only output for this operation. Can be 'GET', 'PUT', 'DELETE', etc. + filter-op: '' + # Split analysis into durations of this length. + # Can be '1s', '5s', '1m', etc. + segment-duration: + # Output aggregated data as to file. + out: + # Additional time duration to skip when analyzing data. + skip-duration: + # Max operations to load for analysis. + limit: + # Skip this number of operations before starting analysis. + offset: + + advanced: + # Stress test only and discard output. + stress: false + + # Print requests. + debug: false + + # Disable HTTP Keep-Alive + disable-http-keepalive: false + + # Enable HTTP2 support if server supports it + http2: false + + # Rate limit each instance to this number of requests per second + rps-limit: + + # Host selection algorithm. + # Can be 'weighed' or 'roundrobin' + host-select: weighed + + # "Resolve the host(s) ip(s) (including multiple A/AAAA records). + # This can break SSL certificates, use --insecure if so + resolve-host: false + + # Specify custom write socket buffer size in bytes + sndbuf: 32768 + + # Specify custom read socket buffer size in bytes + rcvbuf: 32768 + + # When running benchmarks open a webserver to fetch results remotely, eg: localhost:7762 + serve: