diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 515c78e5..44a5fffe 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -18,7 +18,7 @@ jobs: matrix: rust: - stable - - 1.60.0 # MSRV + - 1.64.0 # MSRV - nightly steps: diff --git a/Cargo.toml b/Cargo.toml index 03da2380..d686a6bd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,7 +26,7 @@ serde_json = "1.0" serde_derive = "1.0" ciborium = "0.2.0" is-terminal = "0.4.6" -clap = { version = "3.1", default-features = false, features = ["std"] } +clap = { version = "4", default-features = false, features = ["std"] } walkdir = "2.3" tinytemplate = "1.1" cast = "0.3" diff --git a/src/lib.rs b/src/lib.rs index d81fe453..855c68ff 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -768,7 +768,7 @@ impl Criterion { #[must_use] #[cfg_attr(feature = "cargo-clippy", allow(clippy::cognitive_complexity))] pub fn configure_from_args(mut self) -> Criterion { - use clap::{Arg, Command}; + use clap::{value_parser, Arg, Command}; let matches = Command::new("Criterion Benchmark") .arg(Arg::new("FILTER") .help("Skip benchmarks whose names do not contain FILTER.") @@ -777,21 +777,23 @@ impl Criterion { .short('c') .long("color") .alias("colour") - .takes_value(true) - .possible_values(["auto", "always", "never"]) + .value_parser(["auto", "always", "never"]) .default_value("auto") .help("Configure coloring of output. always = always colorize output, never = never colorize output, auto = colorize output if output is a tty and compiled for unix.")) .arg(Arg::new("verbose") .short('v') .long("verbose") + .num_args(0) .help("Print additional statistical information.")) .arg(Arg::new("quiet") .long("quiet") + .num_args(0) .conflicts_with("verbose") .help("Print only the benchmark results.")) .arg(Arg::new("noplot") .short('n') .long("noplot") + .num_args(0) .help("Disable plot and HTML generation.")) .arg(Arg::new("save-baseline") .short('s') @@ -800,110 +802,115 @@ impl Criterion { .help("Save results under a named baseline.")) .arg(Arg::new("discard-baseline") .long("discard-baseline") - .conflicts_with_all(&["save-baseline", "baseline", "baseline-lenient"]) + .num_args(0) + .conflicts_with_all(["save-baseline", "baseline", "baseline-lenient"]) .help("Discard benchmark results.")) .arg(Arg::new("baseline") .short('b') .long("baseline") - .takes_value(true) - .conflicts_with_all(&["save-baseline", "baseline-lenient"]) + .conflicts_with_all(["save-baseline", "baseline-lenient"]) .help("Compare to a named baseline. If any benchmarks do not have the specified baseline this command fails.")) .arg(Arg::new("baseline-lenient") .long("baseline-lenient") - .takes_value(true) - .conflicts_with_all(&["save-baseline", "baseline"]) + .conflicts_with_all(["save-baseline", "baseline"]) .help("Compare to a named baseline. If any benchmarks do not have the specified baseline then just those benchmarks are not compared against the baseline while every other benchmark is compared against the baseline.")) .arg(Arg::new("list") .long("list") + .num_args(0) .help("List all benchmarks") - .conflicts_with_all(&["test", "profile-time"])) + .conflicts_with_all(["test", "profile-time"])) .arg(Arg::new("format") .long("format") - .possible_values(["pretty", "terse"]) + .value_parser(["pretty", "terse"]) .default_value("pretty") // Note that libtest's --format also works during test execution, but criterion // doesn't support that at the moment. .help("Output formatting")) .arg(Arg::new("ignored") .long("ignored") + .num_args(0) .help("List or run ignored benchmarks (currently means skip all benchmarks)")) .arg(Arg::new("exact") .long("exact") + .num_args(0) .help("Run benchmarks that exactly match the provided filter")) .arg(Arg::new("profile-time") .long("profile-time") - .takes_value(true) + .value_parser(value_parser!(f64)) .help("Iterate each benchmark for approximately the given number of seconds, doing no analysis and without storing the results. Useful for running the benchmarks in a profiler.") - .conflicts_with_all(&["test", "list"])) + .conflicts_with_all(["test", "list"])) .arg(Arg::new("load-baseline") .long("load-baseline") - .takes_value(true) .conflicts_with("profile-time") .requires("baseline") .help("Load a previous baseline instead of sampling new data.")) .arg(Arg::new("sample-size") .long("sample-size") - .takes_value(true) - .help(&*format!("Changes the default size of the sample for this run. [default: {}]", self.config.sample_size))) + .value_parser(value_parser!(usize)) + .help(format!("Changes the default size of the sample for this run. [default: {}]", self.config.sample_size))) .arg(Arg::new("warm-up-time") .long("warm-up-time") - .takes_value(true) - .help(&*format!("Changes the default warm up time for this run. [default: {}]", self.config.warm_up_time.as_secs()))) + .value_parser(value_parser!(f64)) + .help(format!("Changes the default warm up time for this run. [default: {}]", self.config.warm_up_time.as_secs()))) .arg(Arg::new("measurement-time") .long("measurement-time") - .takes_value(true) - .help(&*format!("Changes the default measurement time for this run. [default: {}]", self.config.measurement_time.as_secs()))) + .value_parser(value_parser!(f64)) + .help(format!("Changes the default measurement time for this run. [default: {}]", self.config.measurement_time.as_secs()))) .arg(Arg::new("nresamples") .long("nresamples") - .takes_value(true) - .help(&*format!("Changes the default number of resamples for this run. [default: {}]", self.config.nresamples))) + .value_parser(value_parser!(usize)) + .help(format!("Changes the default number of resamples for this run. [default: {}]", self.config.nresamples))) .arg(Arg::new("noise-threshold") .long("noise-threshold") - .takes_value(true) - .help(&*format!("Changes the default noise threshold for this run. [default: {}]", self.config.noise_threshold))) + .value_parser(value_parser!(f64)) + .help(format!("Changes the default noise threshold for this run. [default: {}]", self.config.noise_threshold))) .arg(Arg::new("confidence-level") .long("confidence-level") - .takes_value(true) - .help(&*format!("Changes the default confidence level for this run. [default: {}]", self.config.confidence_level))) + .value_parser(value_parser!(f64)) + .help(format!("Changes the default confidence level for this run. [default: {}]", self.config.confidence_level))) .arg(Arg::new("significance-level") .long("significance-level") - .takes_value(true) - .help(&*format!("Changes the default significance level for this run. [default: {}]", self.config.significance_level))) + .value_parser(value_parser!(f64)) + .help(format!("Changes the default significance level for this run. [default: {}]", self.config.significance_level))) .arg(Arg::new("quick") .long("quick") + .num_args(0) .conflicts_with("sample-size") - .help(&*format!("Benchmark only until the significance level has been reached [default: {}]", self.config.quick_mode))) + .help(format!("Benchmark only until the significance level has been reached [default: {}]", self.config.quick_mode))) .arg(Arg::new("test") .hide(true) .long("test") + .num_args(0) .help("Run the benchmarks once, to verify that they execute successfully, but do not measure or report the results.") - .conflicts_with_all(&["list", "profile-time"])) + .conflicts_with_all(["list", "profile-time"])) .arg(Arg::new("bench") .hide(true) - .long("bench")) + .long("bench") + .num_args(0)) .arg(Arg::new("plotting-backend") .long("plotting-backend") - .takes_value(true) - .possible_values(["gnuplot", "plotters"]) + .value_parser(["gnuplot", "plotters"]) .help("Set the plotting backend. By default, Criterion.rs will use the gnuplot backend if gnuplot is available, or the plotters backend if it isn't.")) .arg(Arg::new("output-format") .long("output-format") - .takes_value(true) - .possible_values(["criterion", "bencher"]) + .value_parser(["criterion", "bencher"]) .default_value("criterion") .help("Change the CLI output format. By default, Criterion.rs will use its own format. If output format is set to 'bencher', Criterion.rs will print output in a format that resembles the 'bencher' crate.")) .arg(Arg::new("nocapture") .long("nocapture") + .num_args(0) .hide(true) .help("Ignored, but added for compatibility with libtest.")) .arg(Arg::new("show-output") .long("show-output") - .hidden(true) + .num_args(0) + .hide(true) .help("Ignored, but added for compatibility with libtest.")) .arg(Arg::new("version") - .hidden(true) + .hide(true) .short('V') - .long("version")) + .long("version") + .num_args(0)) .after_help(" This executable is a Criterion.rs benchmark. See https://github.com/bheisler/criterion.rs for more details. @@ -920,50 +927,50 @@ https://bheisler.github.io/criterion.rs/book/faq.html .get_matches(); if self.connection.is_some() { - if let Some(color) = matches.value_of("color") { + if let Some(color) = matches.get_one::("color") { if color != "auto" { eprintln!("Warning: --color will be ignored when running with cargo-criterion. Use `cargo criterion --color {} -- ` instead.", color); } } - if matches.is_present("verbose") { + if matches.get_flag("verbose") { eprintln!("Warning: --verbose will be ignored when running with cargo-criterion. Use `cargo criterion --output-format verbose -- ` instead."); } - if matches.is_present("noplot") { + if matches.get_flag("noplot") { eprintln!("Warning: --noplot will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend disabled -- ` instead."); } - if let Some(backend) = matches.value_of("plotting-backend") { + if let Some(backend) = matches.get_one::("plotting-backend") { eprintln!("Warning: --plotting-backend will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend {} -- ` instead.", backend); } - if let Some(format) = matches.value_of("output-format") { + if let Some(format) = matches.get_one::("output-format") { if format != "criterion" { eprintln!("Warning: --output-format will be ignored when running with cargo-criterion. Use `cargo criterion --output-format {} -- ` instead.", format); } } - if matches.is_present("baseline") + if matches.contains_id("baseline") || matches - .value_of("save-baseline") - .map(|base| base != "base") - .unwrap_or(false) - || matches.is_present("load-baseline") + .get_one::("save-baseline") + .map_or(false, |base| base != "base") + || matches.contains_id("load-baseline") { eprintln!("Error: baselines are not supported when running with cargo-criterion."); std::process::exit(1); } } - let bench = matches.is_present("bench"); - let test = matches.is_present("test"); + let bench = matches.get_flag("bench"); + let test = matches.get_flag("test"); let test_mode = match (bench, test) { (true, true) => true, // cargo bench -- --test should run tests (true, false) => false, // cargo bench should run benchmarks (false, _) => true, // cargo test --benches should run tests }; - self.mode = if matches.is_present("list") { + self.mode = if matches.get_flag("list") { let list_format = match matches - .value_of("format") + .get_one::("format") .expect("a default value was provided for this") + .as_str() { "pretty" => ListFormat::Pretty, "terse" => ListFormat::Terse, @@ -975,9 +982,7 @@ https://bheisler.github.io/criterion.rs/book/faq.html Mode::List(list_format) } else if test_mode { Mode::Test - } else if matches.is_present("profile-time") { - let num_seconds = matches.value_of_t_or_exit("profile-time"); - + } else if let Some(&num_seconds) = matches.get_one("profile-time") { if num_seconds < 1.0 { eprintln!("Profile time must be at least one second."); std::process::exit(1); @@ -993,11 +998,11 @@ https://bheisler.github.io/criterion.rs/book/faq.html self.connection = None; } - let filter = if matches.is_present("ignored") { + let filter = if matches.get_flag("ignored") { // --ignored overwrites any name-based filters passed in. BenchmarkFilter::RejectAll - } else if let Some(filter) = matches.value_of("FILTER") { - if matches.is_present("exact") { + } else if let Some(filter) = matches.get_one::("FILTER") { + if matches.get_flag("exact") { BenchmarkFilter::Exact(filter.to_owned()) } else { let regex = Regex::new(filter).unwrap_or_else(|err| { @@ -1013,7 +1018,7 @@ https://bheisler.github.io/criterion.rs/book/faq.html }; self = self.with_benchmark_filter(filter); - match matches.value_of("plotting-backend") { + match matches.get_one("plotting-backend").map(String::as_str) { // Use plotting_backend() here to re-use the panic behavior if Gnuplot is not available. Some("gnuplot") => self = self.plotting_backend(PlottingBackend::Gnuplot), Some("plotters") => self = self.plotting_backend(PlottingBackend::Plotters), @@ -1021,22 +1026,22 @@ https://bheisler.github.io/criterion.rs/book/faq.html None => {} } - if matches.is_present("noplot") { + if matches.get_flag("noplot") { self = self.without_plots(); } - if let Some(dir) = matches.value_of("save-baseline") { + if let Some(dir) = matches.get_one::("save-baseline") { self.baseline = Baseline::Save; self.baseline_directory = dir.to_owned() } - if matches.is_present("discard-baseline") { + if matches.get_flag("discard-baseline") { self.baseline = Baseline::Discard; } - if let Some(dir) = matches.value_of("baseline") { + if let Some(dir) = matches.get_one::("baseline") { self.baseline = Baseline::CompareStrict; self.baseline_directory = dir.to_owned(); } - if let Some(dir) = matches.value_of("baseline-lenient") { + if let Some(dir) = matches.get_one::("baseline-lenient") { self.baseline = Baseline::CompareLenient; self.baseline_directory = dir.to_owned(); } @@ -1048,16 +1053,16 @@ https://bheisler.github.io/criterion.rs/book/faq.html self.report.csv_enabled = false; self.report.html = None; } else { - match matches.value_of("output-format") { + match matches.get_one("output-format").map(String::as_str) { Some("bencher") => { self.report.bencher_enabled = true; self.report.cli_enabled = false; } _ => { - let verbose = matches.is_present("verbose"); + let verbose = matches.get_flag("verbose"); let verbosity = if verbose { CliVerbosity::Verbose - } else if matches.is_present("quiet") { + } else if matches.get_flag("quiet") { CliVerbosity::Quiet } else { CliVerbosity::Normal @@ -1065,7 +1070,7 @@ https://bheisler.github.io/criterion.rs/book/faq.html let stdout_isatty = stdout().is_terminal(); let mut enable_text_overwrite = stdout_isatty && !verbose && !debug_enabled(); let enable_text_coloring; - match matches.value_of("color") { + match matches.get_one("color").map(String::as_str) { Some("always") => { enable_text_coloring = true; } @@ -1083,62 +1088,48 @@ https://bheisler.github.io/criterion.rs/book/faq.html }; } - if let Some(dir) = matches.value_of("load-baseline") { + if let Some(dir) = matches.get_one::("load-baseline") { self.load_baseline = Some(dir.to_owned()); } - if matches.is_present("sample-size") { - let num_size = matches.value_of_t_or_exit("sample-size"); - + if let Some(&num_size) = matches.get_one("sample-size") { assert!(num_size >= 10); self.config.sample_size = num_size; } - if matches.is_present("warm-up-time") { - let num_seconds = matches.value_of_t_or_exit("warm-up-time"); - + if let Some(&num_seconds) = matches.get_one("warm-up-time") { let dur = std::time::Duration::from_secs_f64(num_seconds); assert!(dur.as_nanos() > 0); self.config.warm_up_time = dur; } - if matches.is_present("measurement-time") { - let num_seconds = matches.value_of_t_or_exit("measurement-time"); - + if let Some(&num_seconds) = matches.get_one("measurement-time") { let dur = std::time::Duration::from_secs_f64(num_seconds); assert!(dur.as_nanos() > 0); self.config.measurement_time = dur; } - if matches.is_present("nresamples") { - let num_resamples = matches.value_of_t_or_exit("nresamples"); - + if let Some(&num_resamples) = matches.get_one("nresamples") { assert!(num_resamples > 0); self.config.nresamples = num_resamples; } - if matches.is_present("noise-threshold") { - let num_noise_threshold = matches.value_of_t_or_exit("noise-threshold"); - + if let Some(&num_noise_threshold) = matches.get_one("noise-threshold") { assert!(num_noise_threshold > 0.0); self.config.noise_threshold = num_noise_threshold; } - if matches.is_present("confidence-level") { - let num_confidence_level = matches.value_of_t_or_exit("confidence-level"); - + if let Some(&num_confidence_level) = matches.get_one("confidence-level") { assert!(num_confidence_level > 0.0 && num_confidence_level < 1.0); self.config.confidence_level = num_confidence_level; } - if matches.is_present("significance-level") { - let num_significance_level = matches.value_of_t_or_exit("significance-level"); - + if let Some(&num_significance_level) = matches.get_one("significance-level") { assert!(num_significance_level > 0.0 && num_significance_level < 1.0); self.config.significance_level = num_significance_level; } - if matches.is_present("quick") { + if matches.get_flag("quick") { self.config.quick_mode = true; }