From 0bc4f0447b05468f043e06278a3ca2b1c5646f9b Mon Sep 17 00:00:00 2001 From: Andrew Gallant Date: Mon, 17 Feb 2020 18:08:47 -0500 Subject: [PATCH] style: rustfmt everything This is why I was so intent on clearing the PR queue. This will effectively invalidate all existing patches, so I wanted to start from a clean slate. We do make one little tweak: we put the default type definitions in their own file and tell rustfmt to keep its grubby mits off of it. We also sort it lexicographically and hopefully will enforce that from here on. --- build.rs | 18 +- globset/src/glob.rs | 231 ++-- globset/src/lib.rs | 71 +- globset/src/pathutil.rs | 2 +- grep-cli/src/decompress.rs | 30 +- grep-cli/src/escape.rs | 92 +- grep-cli/src/human.rs | 50 +- grep-cli/src/lib.rs | 16 +- grep-cli/src/pattern.rs | 32 +- grep-cli/src/process.rs | 31 +- grep-matcher/src/interpolate.rs | 8 +- grep-matcher/src/lib.rs | 69 +- grep-matcher/tests/test_matcher.rs | 172 +-- grep-matcher/tests/util.rs | 23 +- grep-pcre2/src/matcher.rs | 58 +- grep-printer/src/color.rs | 105 +- grep-printer/src/json.rs | 105 +- grep-printer/src/jsont.rs | 34 +- grep-printer/src/lib.rs | 4 +- grep-printer/src/standard.rs | 251 ++-- grep-printer/src/stats.rs | 4 +- grep-printer/src/summary.rs | 167 +-- grep-printer/src/util.rs | 61 +- grep-regex/src/ast.rs | 2 +- grep-regex/src/config.rs | 16 +- grep-regex/src/crlf.rs | 7 +- grep-regex/src/literal.rs | 60 +- grep-regex/src/matcher.rs | 93 +- grep-regex/src/multi.rs | 18 +- grep-regex/src/non_matching.rs | 32 +- grep-regex/src/strip.rs | 13 +- grep-regex/src/util.rs | 2 +- grep-regex/src/word.rs | 26 +- grep-searcher/examples/search-stdin.rs | 20 +- grep-searcher/src/lib.rs | 9 +- grep-searcher/src/line_buffer.rs | 28 +- grep-searcher/src/lines.rs | 39 +- grep-searcher/src/searcher/core.rs | 64 +- grep-searcher/src/searcher/glue.rs | 97 +- grep-searcher/src/searcher/mod.rs | 104 +- grep-searcher/src/sink.rs | 35 +- grep-searcher/src/testutil.rs | 50 +- grep/examples/simplegrep.rs | 12 +- ignore/src/default_types.rs | 244 ++++ ignore/src/dir.rs | 77 +- ignore/src/gitignore.rs | 55 +- ignore/src/lib.rs | 98 +- ignore/src/overrides.rs | 18 +- ignore/src/pathutil.rs | 4 +- ignore/src/types.rs | 288 +---- ignore/src/walk.rs | 161 +-- ...gnore_matched_path_or_any_parents_tests.rs | 43 +- rustfmt.toml | 3 +- src/app.rs | 1092 ++++++++++------- src/args.rs | 234 ++-- src/config.rs | 47 +- src/path_printer.rs | 5 +- src/search.rs | 81 +- src/subject.rs | 10 +- tests/binary.rs | 111 +- tests/feature.rs | 140 +-- tests/json.rs | 70 +- tests/macros.rs | 2 +- tests/misc.rs | 41 +- tests/multiline.rs | 36 +- tests/regression.rs | 102 +- tests/util.rs | 67 +- 67 files changed, 2711 insertions(+), 2679 deletions(-) create mode 100644 ignore/src/default_types.rs diff --git a/build.rs b/build.rs index 43f667cad..53f7259e5 100644 --- a/build.rs +++ b/build.rs @@ -21,7 +21,8 @@ fn main() { eprintln!( "OUT_DIR environment variable not defined. \ Please file a bug: \ - https://github.com/BurntSushi/ripgrep/issues/new"); + https://github.com/BurntSushi/ripgrep/issues/new" + ); process::exit(1); } }; @@ -90,8 +91,10 @@ fn generate_man_page>(outdir: P) -> io::Result<()> { File::create(&txt_path)?.write_all(tpl.as_bytes())?; let result = process::Command::new("a2x") .arg("--no-xmllint") - .arg("--doctype").arg("manpage") - .arg("--format").arg("manpage") + .arg("--doctype") + .arg("manpage") + .arg("--format") + .arg("manpage") .arg(&txt_path) .spawn()? .wait()?; @@ -114,7 +117,7 @@ fn formatted_options() -> io::Result { // ripgrep only has two positional arguments, and probably will only // ever have two positional arguments, so we just hardcode them into // the template. - if let app::RGArgKind::Positional{..} = arg.kind { + if let app::RGArgKind::Positional { .. } = arg.kind { continue; } formatted.push(formatted_arg(&arg)?); @@ -124,7 +127,9 @@ fn formatted_options() -> io::Result { fn formatted_arg(arg: &RGArg) -> io::Result { match arg.kind { - RGArgKind::Positional{..} => panic!("unexpected positional argument"), + RGArgKind::Positional { .. } => { + panic!("unexpected positional argument") + } RGArgKind::Switch { long, short, multiple } => { let mut out = vec![]; @@ -163,7 +168,8 @@ fn formatted_arg(arg: &RGArg) -> io::Result { } fn formatted_doc_txt(arg: &RGArg) -> io::Result { - let paragraphs: Vec = arg.doc_long + let paragraphs: Vec = arg + .doc_long .replace("{", "{") .replace("}", r"}") .split("\n\n") diff --git a/globset/src/glob.rs b/globset/src/glob.rs index c8dedba2a..50502b962 100644 --- a/globset/src/glob.rs +++ b/globset/src/glob.rs @@ -2,13 +2,13 @@ use std::fmt; use std::hash; use std::iter; use std::ops::{Deref, DerefMut}; -use std::path::{Path, is_separator}; +use std::path::{is_separator, Path}; use std::str; use regex; use regex::bytes::Regex; -use {Candidate, Error, ErrorKind, new_regex}; +use {new_regex, Candidate, Error, ErrorKind}; /// Describes a matching strategy for a particular pattern. /// @@ -85,16 +85,16 @@ pub struct Glob { } impl PartialEq for Glob { - fn eq(&self, other: &Glob) -> bool { - self.glob == other.glob && self.opts == other.opts - } + fn eq(&self, other: &Glob) -> bool { + self.glob == other.glob && self.opts == other.opts + } } impl hash::Hash for Glob { - fn hash(&self, state: &mut H) { - self.glob.hash(state); - self.opts.hash(state); - } + fn hash(&self, state: &mut H) { + self.glob.hash(state); + self.opts.hash(state); + } } impl fmt::Display for Glob { @@ -227,11 +227,15 @@ struct Tokens(Vec); impl Deref for Tokens { type Target = Vec; - fn deref(&self) -> &Vec { &self.0 } + fn deref(&self) -> &Vec { + &self.0 + } } impl DerefMut for Tokens { - fn deref_mut(&mut self) -> &mut Vec { &mut self.0 } + fn deref_mut(&mut self) -> &mut Vec { + &mut self.0 + } } #[derive(Clone, Debug, Eq, PartialEq)] @@ -242,10 +246,7 @@ enum Token { RecursivePrefix, RecursiveSuffix, RecursiveZeroOrMore, - Class { - negated: bool, - ranges: Vec<(char, char)>, - }, + Class { negated: bool, ranges: Vec<(char, char)> }, Alternates(Vec), } @@ -257,12 +258,9 @@ impl Glob { /// Returns a matcher for this pattern. pub fn compile_matcher(&self) -> GlobMatcher { - let re = new_regex(&self.re) - .expect("regex compilation shouldn't fail"); - GlobMatcher { - pat: self.clone(), - re: re, - } + let re = + new_regex(&self.re).expect("regex compilation shouldn't fail"); + GlobMatcher { pat: self.clone(), re: re } } /// Returns a strategic matcher. @@ -273,13 +271,9 @@ impl Glob { #[cfg(test)] fn compile_strategic_matcher(&self) -> GlobStrategic { let strategy = MatchStrategy::new(self); - let re = new_regex(&self.re) - .expect("regex compilation shouldn't fail"); - GlobStrategic { - strategy: strategy, - pat: self.clone(), - re: re, - } + let re = + new_regex(&self.re).expect("regex compilation shouldn't fail"); + GlobStrategic { strategy: strategy, pat: self.clone(), re: re } } /// Returns the original glob pattern used to build this pattern. @@ -537,7 +531,7 @@ impl Glob { | Token::RecursiveZeroOrMore => { return None; } - Token::Class{..} | Token::Alternates(..) => { + Token::Class { .. } | Token::Alternates(..) => { // We *could* be a little smarter here, but either one // of these is going to prevent our literal optimizations // anyway, so give up. @@ -574,10 +568,7 @@ impl<'a> GlobBuilder<'a> { /// /// The pattern is not compiled until `build` is called. pub fn new(glob: &'a str) -> GlobBuilder<'a> { - GlobBuilder { - glob: glob, - opts: GlobOptions::default(), - } + GlobBuilder { glob: glob, opts: GlobOptions::default() } } /// Parses and builds the pattern. @@ -875,25 +866,22 @@ impl<'a> Parser<'a> { return Ok(()); } } - let is_suffix = - match self.peek() { - None => { - assert!(self.bump().is_none()); - true - } - Some(',') | Some('}') if self.stack.len() >= 2 => { - true - } - Some(c) if is_separator(c) => { - assert!(self.bump().map(is_separator).unwrap_or(false)); - false - } - _ => { - self.push_token(Token::ZeroOrMore)?; - self.push_token(Token::ZeroOrMore)?; - return Ok(()); - } - }; + let is_suffix = match self.peek() { + None => { + assert!(self.bump().is_none()); + true + } + Some(',') | Some('}') if self.stack.len() >= 2 => true, + Some(c) if is_separator(c) => { + assert!(self.bump().map(is_separator).unwrap_or(false)); + false + } + _ => { + self.push_token(Token::ZeroOrMore)?; + self.push_token(Token::ZeroOrMore)?; + return Ok(()); + } + }; match self.pop_token()? { Token::RecursivePrefix => { self.push_token(Token::RecursivePrefix)?; @@ -973,7 +961,10 @@ impl<'a> Parser<'a> { // invariant: in_range is only set when there is // already at least one character seen. add_to_last_range( - &self.glob, ranges.last_mut().unwrap(), c)?; + &self.glob, + ranges.last_mut().unwrap(), + c, + )?; } else { ranges.push((c, c)); } @@ -987,10 +978,7 @@ impl<'a> Parser<'a> { // it as a literal. ranges.push(('-', '-')); } - self.push_token(Token::Class { - negated: negated, - ranges: ranges, - }) + self.push_token(Token::Class { negated: negated, ranges: ranges }) } fn bump(&mut self) -> Option { @@ -1019,9 +1007,9 @@ fn ends_with(needle: &[u8], haystack: &[u8]) -> bool { #[cfg(test)] mod tests { - use {GlobSetBuilder, ErrorKind}; - use super::{Glob, GlobBuilder, Token}; use super::Token::*; + use super::{Glob, GlobBuilder, Token}; + use {ErrorKind, GlobSetBuilder}; #[derive(Clone, Copy, Debug, Default)] struct Options { @@ -1037,7 +1025,7 @@ mod tests { let pat = Glob::new($pat).unwrap(); assert_eq!($tokens, pat.tokens.0); } - } + }; } macro_rules! syntaxerr { @@ -1047,7 +1035,7 @@ mod tests { let err = Glob::new($pat).unwrap_err(); assert_eq!(&$err, err.kind()); } - } + }; } macro_rules! toregex { @@ -1129,7 +1117,9 @@ mod tests { }; } - fn s(string: &str) -> String { string.to_string() } + fn s(string: &str) -> String { + string.to_string() + } fn class(s: char, e: char) -> Token { Class { negated: false, ranges: vec![(s, e)] } @@ -1153,16 +1143,20 @@ mod tests { syntax!(any2, "a?b", vec![Literal('a'), Any, Literal('b')]); syntax!(seq1, "*", vec![ZeroOrMore]); syntax!(seq2, "a*b", vec![Literal('a'), ZeroOrMore, Literal('b')]); - syntax!(seq3, "*a*b*", vec![ - ZeroOrMore, Literal('a'), ZeroOrMore, Literal('b'), ZeroOrMore, - ]); + syntax!( + seq3, + "*a*b*", + vec![ZeroOrMore, Literal('a'), ZeroOrMore, Literal('b'), ZeroOrMore,] + ); syntax!(rseq1, "**", vec![RecursivePrefix]); syntax!(rseq2, "**/", vec![RecursivePrefix]); syntax!(rseq3, "/**", vec![RecursiveSuffix]); syntax!(rseq4, "/**/", vec![RecursiveZeroOrMore]); - syntax!(rseq5, "a/**/b", vec![ - Literal('a'), RecursiveZeroOrMore, Literal('b'), - ]); + syntax!( + rseq5, + "a/**/b", + vec![Literal('a'), RecursiveZeroOrMore, Literal('b'),] + ); syntax!(cls1, "[a]", vec![class('a', 'a')]); syntax!(cls2, "[!a]", vec![classn('a', 'a')]); syntax!(cls3, "[a-z]", vec![class('a', 'z')]); @@ -1174,9 +1168,11 @@ mod tests { syntax!(cls9, "[a-]", vec![rclass(&[('a', 'a'), ('-', '-')])]); syntax!(cls10, "[-a-z]", vec![rclass(&[('-', '-'), ('a', 'z')])]); syntax!(cls11, "[a-z-]", vec![rclass(&[('a', 'z'), ('-', '-')])]); - syntax!(cls12, "[-a-z-]", vec![ - rclass(&[('-', '-'), ('a', 'z'), ('-', '-')]), - ]); + syntax!( + cls12, + "[-a-z-]", + vec![rclass(&[('-', '-'), ('a', 'z'), ('-', '-')]),] + ); syntax!(cls13, "[]-z]", vec![class(']', 'z')]); syntax!(cls14, "[--z]", vec![class('-', 'z')]); syntax!(cls15, "[ --]", vec![class(' ', '-')]); @@ -1194,26 +1190,14 @@ mod tests { syntaxerr!(err_range1, "[z-a]", ErrorKind::InvalidRange('z', 'a')); syntaxerr!(err_range2, "[z--]", ErrorKind::InvalidRange('z', '-')); - const CASEI: Options = Options { - casei: Some(true), - litsep: None, - bsesc: None, - }; - const SLASHLIT: Options = Options { - casei: None, - litsep: Some(true), - bsesc: None, - }; - const NOBSESC: Options = Options { - casei: None, - litsep: None, - bsesc: Some(false), - }; - const BSESC: Options = Options { - casei: None, - litsep: None, - bsesc: Some(true), - }; + const CASEI: Options = + Options { casei: Some(true), litsep: None, bsesc: None }; + const SLASHLIT: Options = + Options { casei: None, litsep: Some(true), bsesc: None }; + const NOBSESC: Options = + Options { casei: None, litsep: None, bsesc: Some(false) }; + const BSESC: Options = + Options { casei: None, litsep: None, bsesc: Some(true) }; toregex!(re_casei, "a", "(?i)^a$", &CASEI); @@ -1311,8 +1295,11 @@ mod tests { matches!(matchpat4, "*hello.txt", "some\\path\\to\\hello.txt"); matches!(matchpat5, "*hello.txt", "/an/absolute/path/to/hello.txt"); matches!(matchpat6, "*some/path/to/hello.txt", "some/path/to/hello.txt"); - matches!(matchpat7, "*some/path/to/hello.txt", - "a/bigger/some/path/to/hello.txt"); + matches!( + matchpat7, + "*some/path/to/hello.txt", + "a/bigger/some/path/to/hello.txt" + ); matches!(matchescape, "_[[]_[]]_[?]_[*]_!_", "_[_]_?_*_!_"); @@ -1375,28 +1362,44 @@ mod tests { nmatches!(matchnot15, "[!-]", "-"); nmatches!(matchnot16, "*hello.txt", "hello.txt-and-then-some"); nmatches!(matchnot17, "*hello.txt", "goodbye.txt"); - nmatches!(matchnot18, "*some/path/to/hello.txt", - "some/path/to/hello.txt-and-then-some"); - nmatches!(matchnot19, "*some/path/to/hello.txt", - "some/other/path/to/hello.txt"); + nmatches!( + matchnot18, + "*some/path/to/hello.txt", + "some/path/to/hello.txt-and-then-some" + ); + nmatches!( + matchnot19, + "*some/path/to/hello.txt", + "some/other/path/to/hello.txt" + ); nmatches!(matchnot20, "a", "foo/a"); nmatches!(matchnot21, "./foo", "foo"); nmatches!(matchnot22, "**/foo", "foofoo"); nmatches!(matchnot23, "**/foo/bar", "foofoo/bar"); nmatches!(matchnot24, "/*.c", "mozilla-sha1/sha1.c"); nmatches!(matchnot25, "*.c", "mozilla-sha1/sha1.c", SLASHLIT); - nmatches!(matchnot26, "**/m4/ltoptions.m4", - "csharp/src/packages/repositories.config", SLASHLIT); + nmatches!( + matchnot26, + "**/m4/ltoptions.m4", + "csharp/src/packages/repositories.config", + SLASHLIT + ); nmatches!(matchnot27, "a[^0-9]b", "a0b"); nmatches!(matchnot28, "a[^0-9]b", "a9b"); nmatches!(matchnot29, "[^-]", "-"); nmatches!(matchnot30, "some/*/needle.txt", "some/needle.txt"); nmatches!( matchrec31, - "some/*/needle.txt", "some/one/two/needle.txt", SLASHLIT); + "some/*/needle.txt", + "some/one/two/needle.txt", + SLASHLIT + ); nmatches!( matchrec32, - "some/*/needle.txt", "some/one/two/three/needle.txt", SLASHLIT); + "some/*/needle.txt", + "some/one/two/three/needle.txt", + SLASHLIT + ); macro_rules! extract { ($which:ident, $name:ident, $pat:expr, $expect:expr) => { @@ -1458,19 +1461,27 @@ mod tests { literal!(extract_lit7, "foo/bar", Some(s("foo/bar"))); literal!(extract_lit8, "**/foo/bar", None); - basetokens!(extract_basetoks1, "**/foo", Some(&*vec![ - Literal('f'), Literal('o'), Literal('o'), - ])); + basetokens!( + extract_basetoks1, + "**/foo", + Some(&*vec![Literal('f'), Literal('o'), Literal('o'),]) + ); basetokens!(extract_basetoks2, "**/foo", None, CASEI); - basetokens!(extract_basetoks3, "**/foo", Some(&*vec![ - Literal('f'), Literal('o'), Literal('o'), - ]), SLASHLIT); + basetokens!( + extract_basetoks3, + "**/foo", + Some(&*vec![Literal('f'), Literal('o'), Literal('o'),]), + SLASHLIT + ); basetokens!(extract_basetoks4, "*foo", None, SLASHLIT); basetokens!(extract_basetoks5, "*foo", None); basetokens!(extract_basetoks6, "**/fo*o", None); - basetokens!(extract_basetoks7, "**/fo*o", Some(&*vec![ - Literal('f'), Literal('o'), ZeroOrMore, Literal('o'), - ]), SLASHLIT); + basetokens!( + extract_basetoks7, + "**/fo*o", + Some(&*vec![Literal('f'), Literal('o'), ZeroOrMore, Literal('o'),]), + SLASHLIT + ); ext!(extract_ext1, "**/*.rs", Some(s(".rs"))); ext!(extract_ext2, "**/*.rs.bak", None); diff --git a/globset/src/lib.rs b/globset/src/lib.rs index 0b3b7b6cb..862be689e 100644 --- a/globset/src/lib.rs +++ b/globset/src/lib.rs @@ -119,12 +119,12 @@ use std::path::Path; use std::str; use aho_corasick::AhoCorasick; -use bstr::{B, ByteSlice, ByteVec}; +use bstr::{ByteSlice, ByteVec, B}; use regex::bytes::{Regex, RegexBuilder, RegexSet}; -use pathutil::{file_name, file_name_ext, normalize_path}; use glob::MatchStrategy; pub use glob::{Glob, GlobBuilder, GlobMatcher}; +use pathutil::{file_name, file_name_ext, normalize_path}; mod glob; mod pathutil; @@ -202,9 +202,7 @@ impl ErrorKind { ErrorKind::UnclosedClass => { "unclosed character class; missing ']'" } - ErrorKind::InvalidRange(_, _) => { - "invalid character range" - } + ErrorKind::InvalidRange(_, _) => "invalid character range", ErrorKind::UnopenedAlternates => { "unopened alternate group; missing '{' \ (maybe escape '}' with '[}]'?)" @@ -216,9 +214,7 @@ impl ErrorKind { ErrorKind::NestedAlternates => { "nested alternate groups are not allowed" } - ErrorKind::DanglingEscape => { - "dangling '\\'" - } + ErrorKind::DanglingEscape => "dangling '\\'", ErrorKind::Regex(ref err) => err, ErrorKind::__Nonexhaustive => unreachable!(), } @@ -245,9 +241,7 @@ impl fmt::Display for ErrorKind { | ErrorKind::UnclosedAlternates | ErrorKind::NestedAlternates | ErrorKind::DanglingEscape - | ErrorKind::Regex(_) => { - write!(f, "{}", self.description()) - } + | ErrorKind::Regex(_) => write!(f, "{}", self.description()), ErrorKind::InvalidRange(s, e) => { write!(f, "invalid range; '{}' > '{}'", s, e) } @@ -262,21 +256,20 @@ fn new_regex(pat: &str) -> Result { .size_limit(10 * (1 << 20)) .dfa_size_limit(10 * (1 << 20)) .build() - .map_err(|err| { - Error { - glob: Some(pat.to_string()), - kind: ErrorKind::Regex(err.to_string()), - } + .map_err(|err| Error { + glob: Some(pat.to_string()), + kind: ErrorKind::Regex(err.to_string()), }) } fn new_regex_set(pats: I) -> Result - where S: AsRef, I: IntoIterator { - RegexSet::new(pats).map_err(|err| { - Error { - glob: None, - kind: ErrorKind::Regex(err.to_string()), - } +where + S: AsRef, + I: IntoIterator, +{ + RegexSet::new(pats).map_err(|err| Error { + glob: None, + kind: ErrorKind::Regex(err.to_string()), }) } @@ -294,10 +287,7 @@ impl GlobSet { /// Create an empty `GlobSet`. An empty set matches nothing. #[inline] pub fn empty() -> GlobSet { - GlobSet { - len: 0, - strats: vec![], - } + GlobSet { len: 0, strats: vec![] } } /// Returns true if this set is empty, and therefore matches nothing. @@ -432,11 +422,17 @@ impl GlobSet { } } } - debug!("built glob set; {} literals, {} basenames, {} extensions, \ + debug!( + "built glob set; {} literals, {} basenames, {} extensions, \ {} prefixes, {} suffixes, {} required extensions, {} regexes", - lits.0.len(), base_lits.0.len(), exts.0.len(), - prefixes.literals.len(), suffixes.literals.len(), - required_exts.0.len(), regexes.literals.len()); + lits.0.len(), + base_lits.0.len(), + exts.0.len(), + prefixes.literals.len(), + suffixes.literals.len(), + required_exts.0.len(), + regexes.literals.len() + ); Ok(GlobSet { len: pats.len(), strats: vec![ @@ -446,7 +442,8 @@ impl GlobSet { GlobSetMatchStrategy::Suffix(suffixes.suffix()), GlobSetMatchStrategy::Prefix(prefixes.prefix()), GlobSetMatchStrategy::RequiredExtension( - required_exts.build()?), + required_exts.build()?, + ), GlobSetMatchStrategy::Regex(regexes.regex_set()?), ], }) @@ -501,11 +498,7 @@ impl<'a> Candidate<'a> { let path = normalize_path(Vec::from_path_lossy(path.as_ref())); let basename = file_name(&path).unwrap_or(Cow::Borrowed(B(""))); let ext = file_name_ext(&basename).unwrap_or(Cow::Borrowed(B(""))); - Candidate { - path: path, - basename: basename, - ext: ext, - } + Candidate { path: path, basename: basename, ext: ext } } fn path_prefix(&self, max: usize) -> &[u8] { @@ -767,11 +760,7 @@ struct MultiStrategyBuilder { impl MultiStrategyBuilder { fn new() -> MultiStrategyBuilder { - MultiStrategyBuilder { - literals: vec![], - map: vec![], - longest: 0, - } + MultiStrategyBuilder { literals: vec![], map: vec![], longest: 0 } } fn add(&mut self, global_index: usize, literal: String) { diff --git a/globset/src/pathutil.rs b/globset/src/pathutil.rs index 6b4609e83..26b496d3b 100644 --- a/globset/src/pathutil.rs +++ b/globset/src/pathutil.rs @@ -84,7 +84,7 @@ pub fn normalize_path(mut path: Cow<[u8]>) -> Cow<[u8]> { mod tests { use std::borrow::Cow; - use bstr::{B, ByteVec}; + use bstr::{ByteVec, B}; use super::{file_name_ext, normalize_path}; diff --git a/grep-cli/src/decompress.rs b/grep-cli/src/decompress.rs index e53d2a591..c2b2738bb 100644 --- a/grep-cli/src/decompress.rs +++ b/grep-cli/src/decompress.rs @@ -38,10 +38,7 @@ impl Default for DecompressionMatcherBuilder { impl DecompressionMatcherBuilder { /// Create a new builder for configuring a decompression matcher. pub fn new() -> DecompressionMatcherBuilder { - DecompressionMatcherBuilder { - commands: vec![], - defaults: true, - } + DecompressionMatcherBuilder { commands: vec![], defaults: true } } /// Build a matcher for determining how to decompress files. @@ -49,12 +46,11 @@ impl DecompressionMatcherBuilder { /// If there was a problem compiling the matcher, then an error is /// returned. pub fn build(&self) -> Result { - let defaults = - if !self.defaults { - vec![] - } else { - default_decompression_commands() - }; + let defaults = if !self.defaults { + vec![] + } else { + default_decompression_commands() + }; let mut glob_builder = GlobSetBuilder::new(); let mut commands = vec![]; for decomp_cmd in defaults.iter().chain(&self.commands) { @@ -93,17 +89,15 @@ impl DecompressionMatcherBuilder { program: P, args: I, ) -> &mut DecompressionMatcherBuilder - where P: AsRef, - I: IntoIterator, - A: AsRef, + where + P: AsRef, + I: IntoIterator, + A: AsRef, { - let glob = glob.to_string(); let bin = program.as_ref().to_os_string(); - let args = args - .into_iter() - .map(|a| a.as_ref().to_os_string()) - .collect(); + let args = + args.into_iter().map(|a| a.as_ref().to_os_string()).collect(); self.commands.push(DecompressionCommand { glob, bin, args }); self } diff --git a/grep-cli/src/escape.rs b/grep-cli/src/escape.rs index bf0d5b4f1..6d06abb5d 100644 --- a/grep-cli/src/escape.rs +++ b/grep-cli/src/escape.rs @@ -95,51 +95,61 @@ pub fn unescape(s: &str) -> Vec { let mut state = Literal; for c in s.chars() { match state { - Escape => { - match c { - '\\' => { bytes.push(b'\\'); state = Literal; } - 'n' => { bytes.push(b'\n'); state = Literal; } - 'r' => { bytes.push(b'\r'); state = Literal; } - 't' => { bytes.push(b'\t'); state = Literal; } - 'x' => { state = HexFirst; } - c => { - bytes.extend(format!(r"\{}", c).into_bytes()); - state = Literal; - } + Escape => match c { + '\\' => { + bytes.push(b'\\'); + state = Literal; } - } - HexFirst => { - match c { - '0'..='9' | 'A'..='F' | 'a'..='f' => { - state = HexSecond(c); - } - c => { - bytes.extend(format!(r"\x{}", c).into_bytes()); - state = Literal; - } + 'n' => { + bytes.push(b'\n'); + state = Literal; } - } - HexSecond(first) => { - match c { - '0'..='9' | 'A'..='F' | 'a'..='f' => { - let ordinal = format!("{}{}", first, c); - let byte = u8::from_str_radix(&ordinal, 16).unwrap(); - bytes.push(byte); - state = Literal; - } - c => { - let original = format!(r"\x{}{}", first, c); - bytes.extend(original.into_bytes()); - state = Literal; - } + 'r' => { + bytes.push(b'\r'); + state = Literal; } - } - Literal => { - match c { - '\\' => { state = Escape; } - c => { bytes.extend(c.to_string().as_bytes()); } + 't' => { + bytes.push(b'\t'); + state = Literal; } - } + 'x' => { + state = HexFirst; + } + c => { + bytes.extend(format!(r"\{}", c).into_bytes()); + state = Literal; + } + }, + HexFirst => match c { + '0'..='9' | 'A'..='F' | 'a'..='f' => { + state = HexSecond(c); + } + c => { + bytes.extend(format!(r"\x{}", c).into_bytes()); + state = Literal; + } + }, + HexSecond(first) => match c { + '0'..='9' | 'A'..='F' | 'a'..='f' => { + let ordinal = format!("{}{}", first, c); + let byte = u8::from_str_radix(&ordinal, 16).unwrap(); + bytes.push(byte); + state = Literal; + } + c => { + let original = format!(r"\x{}{}", first, c); + bytes.extend(original.into_bytes()); + state = Literal; + } + }, + Literal => match c { + '\\' => { + state = Escape; + } + c => { + bytes.extend(c.to_string().as_bytes()); + } + }, } } match state { diff --git a/grep-cli/src/human.rs b/grep-cli/src/human.rs index a69fd3761..68d50ab7a 100644 --- a/grep-cli/src/human.rs +++ b/grep-cli/src/human.rs @@ -46,7 +46,9 @@ impl ParseSizeError { } impl error::Error for ParseSizeError { - fn description(&self) -> &str { "invalid size" } + fn description(&self) -> &str { + "invalid size" + } } impl fmt::Display for ParseSizeError { @@ -54,26 +56,19 @@ impl fmt::Display for ParseSizeError { use self::ParseSizeErrorKind::*; match self.kind { - InvalidFormat => { - write!( - f, - "invalid format for size '{}', which should be a sequence \ + InvalidFormat => write!( + f, + "invalid format for size '{}', which should be a sequence \ of digits followed by an optional 'K', 'M' or 'G' \ suffix", - self.original - ) - } - InvalidInt(ref err) => { - write!( - f, - "invalid integer found in size '{}': {}", - self.original, - err - ) - } - Overflow => { - write!(f, "size too big in '{}'", self.original) - } + self.original + ), + InvalidInt(ref err) => write!( + f, + "invalid integer found in size '{}': {}", + self.original, err + ), + Overflow => write!(f, "size too big in '{}'", self.original), } } } @@ -104,17 +99,16 @@ pub fn parse_human_readable_size(size: &str) -> Result { Some(caps) => caps, None => return Err(ParseSizeError::format(size)), }; - let value: u64 = caps[1].parse().map_err(|err| { - ParseSizeError::int(size, err) - })?; + let value: u64 = + caps[1].parse().map_err(|err| ParseSizeError::int(size, err))?; let suffix = match caps.get(2) { None => return Ok(value), Some(cap) => cap.as_str(), }; let bytes = match suffix { - "K" => value.checked_mul(1<<10), - "M" => value.checked_mul(1<<20), - "G" => value.checked_mul(1<<30), + "K" => value.checked_mul(1 << 10), + "M" => value.checked_mul(1 << 20), + "G" => value.checked_mul(1 << 30), // Because if the regex matches this group, it must be [KMG]. _ => unreachable!(), }; @@ -134,19 +128,19 @@ mod tests { #[test] fn suffix_k() { let x = parse_human_readable_size("123K").unwrap(); - assert_eq!(123 * (1<<10), x); + assert_eq!(123 * (1 << 10), x); } #[test] fn suffix_m() { let x = parse_human_readable_size("123M").unwrap(); - assert_eq!(123 * (1<<20), x); + assert_eq!(123 * (1 << 20), x); } #[test] fn suffix_g() { let x = parse_human_readable_size("123G").unwrap(); - assert_eq!(123 * (1<<30), x); + assert_eq!(123 * (1 << 30), x); } #[test] diff --git a/grep-cli/src/lib.rs b/grep-cli/src/lib.rs index 9c5d71ad0..452ea141b 100644 --- a/grep-cli/src/lib.rs +++ b/grep-cli/src/lib.rs @@ -179,20 +179,18 @@ mod process; mod wtr; pub use decompress::{ - DecompressionMatcher, DecompressionMatcherBuilder, - DecompressionReader, DecompressionReaderBuilder, + DecompressionMatcher, DecompressionMatcherBuilder, DecompressionReader, + DecompressionReaderBuilder, }; pub use escape::{escape, escape_os, unescape, unescape_os}; -pub use human::{ParseSizeError, parse_human_readable_size}; +pub use human::{parse_human_readable_size, ParseSizeError}; pub use pattern::{ - InvalidPatternError, - pattern_from_os, pattern_from_bytes, - patterns_from_path, patterns_from_reader, patterns_from_stdin, + pattern_from_bytes, pattern_from_os, patterns_from_path, + patterns_from_reader, patterns_from_stdin, InvalidPatternError, }; pub use process::{CommandError, CommandReader, CommandReaderBuilder}; pub use wtr::{ - StandardStream, - stdout, stdout_buffered_line, stdout_buffered_block, + stdout, stdout_buffered_block, stdout_buffered_line, StandardStream, }; /// Returns true if and only if stdin is believed to be readable. @@ -205,8 +203,8 @@ pub use wtr::{ pub fn is_readable_stdin() -> bool { #[cfg(unix)] fn imp() -> bool { - use std::os::unix::fs::FileTypeExt; use same_file::Handle; + use std::os::unix::fs::FileTypeExt; let ft = match Handle::stdin().and_then(|h| h.as_file().metadata()) { Err(_) => return false, diff --git a/grep-cli/src/pattern.rs b/grep-cli/src/pattern.rs index eb5e73117..8341e4daf 100644 --- a/grep-cli/src/pattern.rs +++ b/grep-cli/src/pattern.rs @@ -29,7 +29,9 @@ impl InvalidPatternError { } impl error::Error for InvalidPatternError { - fn description(&self) -> &str { "invalid pattern" } + fn description(&self) -> &str { + "invalid pattern" + } } impl fmt::Display for InvalidPatternError { @@ -39,8 +41,7 @@ impl fmt::Display for InvalidPatternError { "found invalid UTF-8 in pattern at byte offset {} \ (use hex escape sequences to match arbitrary bytes \ in a pattern, e.g., \\xFF): '{}'", - self.valid_up_to, - self.original, + self.valid_up_to, self.original, ) } } @@ -79,11 +80,9 @@ pub fn pattern_from_os(pattern: &OsStr) -> Result<&str, InvalidPatternError> { pub fn pattern_from_bytes( pattern: &[u8], ) -> Result<&str, InvalidPatternError> { - str::from_utf8(pattern).map_err(|err| { - InvalidPatternError { - original: escape(pattern), - valid_up_to: err.valid_up_to(), - } + str::from_utf8(pattern).map_err(|err| InvalidPatternError { + original: escape(pattern), + valid_up_to: err.valid_up_to(), }) } @@ -119,10 +118,7 @@ pub fn patterns_from_stdin() -> io::Result> { let stdin = io::stdin(); let locked = stdin.lock(); patterns_from_reader(locked).map_err(|err| { - io::Error::new( - io::ErrorKind::Other, - format!(":{}", err), - ) + io::Error::new(io::ErrorKind::Other, format!(":{}", err)) }) } @@ -166,12 +162,10 @@ pub fn patterns_from_reader(rdr: R) -> io::Result> { patterns.push(pattern.to_string()); Ok(true) } - Err(err) => { - Err(io::Error::new( - io::ErrorKind::Other, - format!("{}: {}", line_number, err), - )) - } + Err(err) => Err(io::Error::new( + io::ErrorKind::Other, + format!("{}: {}", line_number, err), + )), } })?; Ok(patterns) @@ -191,8 +185,8 @@ mod tests { #[test] #[cfg(unix)] fn os() { - use std::os::unix::ffi::OsStrExt; use std::ffi::OsStr; + use std::os::unix::ffi::OsStrExt; let pat = OsStr::from_bytes(b"abc\xFFxyz"); let err = pattern_from_os(pat).unwrap_err(); diff --git a/grep-cli/src/process.rs b/grep-cli/src/process.rs index 017dd0c3a..4ec5af7f2 100644 --- a/grep-cli/src/process.rs +++ b/grep-cli/src/process.rs @@ -33,7 +33,9 @@ impl CommandError { } impl error::Error for CommandError { - fn description(&self) -> &str { "command error" } + fn description(&self) -> &str { + "command error" + } } impl fmt::Display for CommandError { @@ -46,7 +48,12 @@ impl fmt::Display for CommandError { write!(f, "") } else { let div = iter::repeat('-').take(79).collect::(); - write!(f, "\n{div}\n{msg}\n{div}", div=div, msg=msg.trim()) + write!( + f, + "\n{div}\n{msg}\n{div}", + div = div, + msg = msg.trim() + ) } } } @@ -101,12 +108,11 @@ impl CommandReaderBuilder { .stderr(process::Stdio::piped()) .spawn()?; let stdout = child.stdout.take().unwrap(); - let stderr = - if self.async_stderr { - StderrReader::async(child.stderr.take().unwrap()) - } else { - StderrReader::sync(child.stderr.take().unwrap()) - }; + let stderr = if self.async_stderr { + StderrReader::async(child.stderr.take().unwrap()) + } else { + StderrReader::sync(child.stderr.take().unwrap()) + }; Ok(CommandReader { child: child, stdout: stdout, @@ -226,9 +232,8 @@ enum StderrReader { impl StderrReader { /// Create a reader for stderr that reads contents asynchronously. fn async(mut stderr: process::ChildStderr) -> StderrReader { - let handle = thread::spawn(move || { - stderr_to_command_error(&mut stderr) - }); + let handle = + thread::spawn(move || stderr_to_command_error(&mut stderr)); StderrReader::Async(Some(handle)) } @@ -247,9 +252,7 @@ impl StderrReader { let handle = handle .take() .expect("read_to_end cannot be called more than once"); - handle - .join() - .expect("stderr reading thread does not panic") + handle.join().expect("stderr reading thread does not panic") } StderrReader::Sync(ref mut stderr) => { stderr_to_command_error(stderr) diff --git a/grep-matcher/src/interpolate.rs b/grep-matcher/src/interpolate.rs index 445518de1..c768bfddf 100644 --- a/grep-matcher/src/interpolate.rs +++ b/grep-matcher/src/interpolate.rs @@ -19,7 +19,7 @@ pub fn interpolate( dst: &mut Vec, ) where A: FnMut(usize, &mut Vec), - N: FnMut(&str) -> Option + N: FnMut(&str) -> Option, { while !replacement.is_empty() { match memchr(b'$', replacement) { @@ -134,14 +134,14 @@ fn find_cap_ref(replacement: &[u8]) -> Option { /// Returns true if and only if the given byte is allowed in a capture name. fn is_valid_cap_letter(b: &u8) -> bool { match *b { - b'0' ..= b'9' | b'a' ..= b'z' | b'A' ..= b'Z' | b'_' => true, + b'0'..=b'9' | b'a'..=b'z' | b'A'..=b'Z' | b'_' => true, _ => false, } } #[cfg(test)] mod tests { - use super::{CaptureRef, find_cap_ref, interpolate}; + use super::{find_cap_ref, interpolate, CaptureRef}; macro_rules! find { ($name:ident, $text:expr) => { @@ -211,7 +211,7 @@ mod tests { fn $name() { assert_eq!($expected, interpolate_string($map, $caps, $hay)); } - } + }; } interp!( diff --git a/grep-matcher/src/lib.rs b/grep-matcher/src/lib.rs index 9a067efae..2bcd0c12c 100644 --- a/grep-matcher/src/lib.rs +++ b/grep-matcher/src/lib.rs @@ -278,7 +278,7 @@ impl LineTerminator { } } -impl Default for LineTerminator { +impl Default for LineTerminator { #[inline] fn default() -> LineTerminator { LineTerminator::byte(b'\n') @@ -439,7 +439,8 @@ pub trait Captures { haystack: &[u8], replacement: &[u8], dst: &mut Vec, - ) where F: FnMut(&str) -> Option + ) where + F: FnMut(&str) -> Option, { interpolate( replacement, @@ -463,12 +464,18 @@ pub struct NoCaptures(()); impl NoCaptures { /// Create an empty set of capturing groups. - pub fn new() -> NoCaptures { NoCaptures(()) } + pub fn new() -> NoCaptures { + NoCaptures(()) + } } impl Captures for NoCaptures { - fn len(&self) -> usize { 0 } - fn get(&self, _: usize) -> Option { None } + fn len(&self) -> usize { + 0 + } + fn get(&self, _: usize) -> Option { + None + } } /// NoError provides an error type for matchers that never produce errors. @@ -481,7 +488,9 @@ impl Captures for NoCaptures { pub struct NoError(()); impl ::std::error::Error for NoError { - fn description(&self) -> &str { "no error" } + fn description(&self) -> &str { + "no error" + } } impl fmt::Display for NoError { @@ -599,10 +608,7 @@ pub trait Matcher { /// /// The text encoding of `haystack` is not strictly specified. Matchers are /// advised to assume UTF-8, or at worst, some ASCII compatible encoding. - fn find( - &self, - haystack: &[u8], - ) -> Result, Self::Error> { + fn find(&self, haystack: &[u8]) -> Result, Self::Error> { self.find_at(haystack, 0) } @@ -614,7 +620,8 @@ pub trait Matcher { haystack: &[u8], mut matched: F, ) -> Result<(), Self::Error> - where F: FnMut(Match) -> bool + where + F: FnMut(Match) -> bool, { self.try_find_iter(haystack, |m| Ok(matched(m))) .map(|r: Result<(), ()>| r.unwrap()) @@ -632,7 +639,8 @@ pub trait Matcher { haystack: &[u8], mut matched: F, ) -> Result, Self::Error> - where F: FnMut(Match) -> Result + where + F: FnMut(Match) -> Result, { let mut last_end = 0; let mut last_match = None; @@ -690,7 +698,8 @@ pub trait Matcher { caps: &mut Self::Captures, mut matched: F, ) -> Result<(), Self::Error> - where F: FnMut(&Self::Captures) -> bool + where + F: FnMut(&Self::Captures) -> bool, { self.try_captures_iter(haystack, caps, |caps| Ok(matched(caps))) .map(|r: Result<(), ()>| r.unwrap()) @@ -709,7 +718,8 @@ pub trait Matcher { caps: &mut Self::Captures, mut matched: F, ) -> Result, Self::Error> - where F: FnMut(&Self::Captures) -> Result + where + F: FnMut(&Self::Captures) -> Result, { let mut last_end = 0; let mut last_match = None; @@ -787,7 +797,8 @@ pub trait Matcher { dst: &mut Vec, mut append: F, ) -> Result<(), Self::Error> - where F: FnMut(Match, &mut Vec) -> bool + where + F: FnMut(Match, &mut Vec) -> bool, { let mut last_match = 0; self.find_iter(haystack, |m| { @@ -810,7 +821,8 @@ pub trait Matcher { dst: &mut Vec, mut append: F, ) -> Result<(), Self::Error> - where F: FnMut(&Self::Captures, &mut Vec) -> bool + where + F: FnMut(&Self::Captures, &mut Vec) -> bool, { let mut last_match = 0; self.captures_iter(haystack, caps, |caps| { @@ -1012,10 +1024,7 @@ impl<'a, M: Matcher> Matcher for &'a M { (*self).capture_count() } - fn find( - &self, - haystack: &[u8] - ) -> Result, Self::Error> { + fn find(&self, haystack: &[u8]) -> Result, Self::Error> { (*self).find(haystack) } @@ -1024,7 +1033,8 @@ impl<'a, M: Matcher> Matcher for &'a M { haystack: &[u8], matched: F, ) -> Result<(), Self::Error> - where F: FnMut(Match) -> bool + where + F: FnMut(Match) -> bool, { (*self).find_iter(haystack, matched) } @@ -1034,7 +1044,8 @@ impl<'a, M: Matcher> Matcher for &'a M { haystack: &[u8], matched: F, ) -> Result, Self::Error> - where F: FnMut(Match) -> Result + where + F: FnMut(Match) -> Result, { (*self).try_find_iter(haystack, matched) } @@ -1053,7 +1064,8 @@ impl<'a, M: Matcher> Matcher for &'a M { caps: &mut Self::Captures, matched: F, ) -> Result<(), Self::Error> - where F: FnMut(&Self::Captures) -> bool + where + F: FnMut(&Self::Captures) -> bool, { (*self).captures_iter(haystack, caps, matched) } @@ -1064,7 +1076,8 @@ impl<'a, M: Matcher> Matcher for &'a M { caps: &mut Self::Captures, matched: F, ) -> Result, Self::Error> - where F: FnMut(&Self::Captures) -> Result + where + F: FnMut(&Self::Captures) -> Result, { (*self).try_captures_iter(haystack, caps, matched) } @@ -1075,7 +1088,8 @@ impl<'a, M: Matcher> Matcher for &'a M { dst: &mut Vec, append: F, ) -> Result<(), Self::Error> - where F: FnMut(Match, &mut Vec) -> bool + where + F: FnMut(Match, &mut Vec) -> bool, { (*self).replace(haystack, dst, append) } @@ -1087,7 +1101,8 @@ impl<'a, M: Matcher> Matcher for &'a M { dst: &mut Vec, append: F, ) -> Result<(), Self::Error> - where F: FnMut(&Self::Captures, &mut Vec) -> bool + where + F: FnMut(&Self::Captures, &mut Vec) -> bool, { (*self).replace_with_captures(haystack, caps, dst, append) } @@ -1099,7 +1114,7 @@ impl<'a, M: Matcher> Matcher for &'a M { fn is_match_at( &self, haystack: &[u8], - at: usize + at: usize, ) -> Result { (*self).is_match_at(haystack, at) } diff --git a/grep-matcher/tests/test_matcher.rs b/grep-matcher/tests/test_matcher.rs index 9edbdf696..b550df6b3 100644 --- a/grep-matcher/tests/test_matcher.rs +++ b/grep-matcher/tests/test_matcher.rs @@ -25,18 +25,22 @@ fn find() { fn find_iter() { let matcher = matcher(r"(\w+)\s+(\w+)"); let mut matches = vec![]; - matcher.find_iter(b"aa bb cc dd", |m| { - matches.push(m); - true - }).unwrap(); + matcher + .find_iter(b"aa bb cc dd", |m| { + matches.push(m); + true + }) + .unwrap(); assert_eq!(matches, vec![m(0, 5), m(6, 11)]); // Test that find_iter respects short circuiting. matches.clear(); - matcher.find_iter(b"aa bb cc dd", |m| { - matches.push(m); - false - }).unwrap(); + matcher + .find_iter(b"aa bb cc dd", |m| { + matches.push(m); + false + }) + .unwrap(); assert_eq!(matches, vec![m(0, 5)]); } @@ -47,14 +51,17 @@ fn try_find_iter() { let matcher = matcher(r"(\w+)\s+(\w+)"); let mut matches = vec![]; - let err = matcher.try_find_iter(b"aa bb cc dd", |m| { - if matches.is_empty() { - matches.push(m); - Ok(true) - } else { - Err(MyError) - } - }).unwrap().unwrap_err(); + let err = matcher + .try_find_iter(b"aa bb cc dd", |m| { + if matches.is_empty() { + matches.push(m); + Ok(true) + } else { + Err(MyError) + } + }) + .unwrap() + .unwrap_err(); assert_eq!(matches, vec![m(0, 5)]); assert_eq!(err, MyError); } @@ -89,28 +96,30 @@ fn captures_iter() { let matcher = matcher(r"(?P\w+)\s+(?P\w+)"); let mut caps = matcher.new_captures().unwrap(); let mut matches = vec![]; - matcher.captures_iter(b"aa bb cc dd", &mut caps, |caps| { - matches.push(caps.get(0).unwrap()); - matches.push(caps.get(1).unwrap()); - matches.push(caps.get(2).unwrap()); - true - }).unwrap(); - assert_eq!(matches, vec![ - m(0, 5), m(0, 2), m(3, 5), - m(6, 11), m(6, 8), m(9, 11), - ]); + matcher + .captures_iter(b"aa bb cc dd", &mut caps, |caps| { + matches.push(caps.get(0).unwrap()); + matches.push(caps.get(1).unwrap()); + matches.push(caps.get(2).unwrap()); + true + }) + .unwrap(); + assert_eq!( + matches, + vec![m(0, 5), m(0, 2), m(3, 5), m(6, 11), m(6, 8), m(9, 11),] + ); // Test that captures_iter respects short circuiting. matches.clear(); - matcher.captures_iter(b"aa bb cc dd", &mut caps, |caps| { - matches.push(caps.get(0).unwrap()); - matches.push(caps.get(1).unwrap()); - matches.push(caps.get(2).unwrap()); - false - }).unwrap(); - assert_eq!(matches, vec![ - m(0, 5), m(0, 2), m(3, 5), - ]); + matcher + .captures_iter(b"aa bb cc dd", &mut caps, |caps| { + matches.push(caps.get(0).unwrap()); + matches.push(caps.get(1).unwrap()); + matches.push(caps.get(2).unwrap()); + false + }) + .unwrap(); + assert_eq!(matches, vec![m(0, 5), m(0, 2), m(3, 5),]); } #[test] @@ -121,16 +130,19 @@ fn try_captures_iter() { let matcher = matcher(r"(?P\w+)\s+(?P\w+)"); let mut caps = matcher.new_captures().unwrap(); let mut matches = vec![]; - let err = matcher.try_captures_iter(b"aa bb cc dd", &mut caps, |caps| { - if matches.is_empty() { - matches.push(caps.get(0).unwrap()); - matches.push(caps.get(1).unwrap()); - matches.push(caps.get(2).unwrap()); - Ok(true) - } else { - Err(MyError) - } - }).unwrap().unwrap_err(); + let err = matcher + .try_captures_iter(b"aa bb cc dd", &mut caps, |caps| { + if matches.is_empty() { + matches.push(caps.get(0).unwrap()); + matches.push(caps.get(1).unwrap()); + matches.push(caps.get(2).unwrap()); + Ok(true) + } else { + Err(MyError) + } + }) + .unwrap() + .unwrap_err(); assert_eq!(matches, vec![m(0, 5), m(0, 2), m(3, 5)]); assert_eq!(err, MyError); } @@ -150,10 +162,12 @@ fn no_captures() { assert!(!matcher.captures(b"homer simpson", &mut caps).unwrap()); let mut called = false; - matcher.captures_iter(b"homer simpson", &mut caps, |_| { - called = true; - true - }).unwrap(); + matcher + .captures_iter(b"homer simpson", &mut caps, |_| { + called = true; + true + }) + .unwrap(); assert!(!called); } @@ -161,18 +175,22 @@ fn no_captures() { fn replace() { let matcher = matcher(r"(\w+)\s+(\w+)"); let mut dst = vec![]; - matcher.replace(b"aa bb cc dd", &mut dst, |_, dst| { - dst.push(b'z'); - true - }).unwrap(); + matcher + .replace(b"aa bb cc dd", &mut dst, |_, dst| { + dst.push(b'z'); + true + }) + .unwrap(); assert_eq!(dst, b"z z"); // Test that replacements respect short circuiting. dst.clear(); - matcher.replace(b"aa bb cc dd", &mut dst, |_, dst| { - dst.push(b'z'); - false - }).unwrap(); + matcher + .replace(b"aa bb cc dd", &mut dst, |_, dst| { + dst.push(b'z'); + false + }) + .unwrap(); assert_eq!(dst, b"z cc dd"); } @@ -182,27 +200,31 @@ fn replace_with_captures() { let haystack = b"aa bb cc dd"; let mut caps = matcher.new_captures().unwrap(); let mut dst = vec![]; - matcher.replace_with_captures(haystack, &mut caps, &mut dst, |caps, dst| { - caps.interpolate( - |name| matcher.capture_index(name), - haystack, - b"$2 $1", - dst, - ); - true - }).unwrap(); + matcher + .replace_with_captures(haystack, &mut caps, &mut dst, |caps, dst| { + caps.interpolate( + |name| matcher.capture_index(name), + haystack, + b"$2 $1", + dst, + ); + true + }) + .unwrap(); assert_eq!(dst, b"bb aa dd cc"); // Test that replacements respect short circuiting. dst.clear(); - matcher.replace_with_captures(haystack, &mut caps, &mut dst, |caps, dst| { - caps.interpolate( - |name| matcher.capture_index(name), - haystack, - b"$2 $1", - dst, - ); - false - }).unwrap(); + matcher + .replace_with_captures(haystack, &mut caps, &mut dst, |caps, dst| { + caps.interpolate( + |name| matcher.capture_index(name), + haystack, + b"$2 $1", + dst, + ); + false + }) + .unwrap(); assert_eq!(dst, b"bb aa cc dd"); } diff --git a/grep-matcher/tests/util.rs b/grep-matcher/tests/util.rs index 57b8fc602..c99d55c7c 100644 --- a/grep-matcher/tests/util.rs +++ b/grep-matcher/tests/util.rs @@ -18,10 +18,7 @@ impl RegexMatcher { names.insert(name.to_string(), i); } } - RegexMatcher { - re: re, - names: names, - } + RegexMatcher { re: re, names: names } } } @@ -31,12 +28,9 @@ impl Matcher for RegexMatcher { type Captures = RegexCaptures; type Error = NoError; - fn find_at( - &self, - haystack: &[u8], - at: usize, - ) -> Result> { - Ok(self.re + fn find_at(&self, haystack: &[u8], at: usize) -> Result> { + Ok(self + .re .find_at(haystack, at) .map(|m| Match::new(m.start(), m.end()))) } @@ -75,12 +69,9 @@ impl Matcher for RegexMatcherNoCaps { type Captures = NoCaptures; type Error = NoError; - fn find_at( - &self, - haystack: &[u8], - at: usize, - ) -> Result> { - Ok(self.0 + fn find_at(&self, haystack: &[u8], at: usize) -> Result> { + Ok(self + .0 .find_at(haystack, at) .map(|m| Match::new(m.start(), m.end()))) } diff --git a/grep-pcre2/src/matcher.rs b/grep-pcre2/src/matcher.rs index 2c87fa1ae..a921c91b6 100644 --- a/grep-pcre2/src/matcher.rs +++ b/grep-pcre2/src/matcher.rs @@ -33,13 +33,12 @@ impl RegexMatcherBuilder { if self.case_smart && !has_uppercase_literal(pattern) { builder.caseless(true); } - let res = - if self.word { - let pattern = format!(r"(? Result, Error> { - Ok(self.regex + Ok(self + .regex .find_at(haystack, at) .map_err(Error::regex)? .map(|m| Match::new(m.start(), m.end()))) @@ -297,7 +297,8 @@ impl Matcher for RegexMatcher { haystack: &[u8], mut matched: F, ) -> Result, Error> - where F: FnMut(Match) -> Result + where + F: FnMut(Match) -> Result, { for result in self.regex.find_iter(haystack) { let m = result.map_err(Error::regex)?; @@ -316,10 +317,11 @@ impl Matcher for RegexMatcher { at: usize, caps: &mut RegexCaptures, ) -> Result { - Ok(self.regex - .captures_read_at(&mut caps.locs, haystack, at) - .map_err(Error::regex)? - .is_some()) + Ok(self + .regex + .captures_read_at(&mut caps.locs, haystack, at) + .map_err(Error::regex)? + .is_some()) } } @@ -383,23 +385,19 @@ fn has_uppercase_literal(pattern: &str) -> bool { #[cfg(test)] mod tests { - use grep_matcher::{LineMatchKind, Matcher}; use super::*; + use grep_matcher::{LineMatchKind, Matcher}; // Test that enabling word matches does the right thing and demonstrate // the difference between it and surrounding the regex in `\b`. #[test] fn word() { - let matcher = RegexMatcherBuilder::new() - .word(true) - .build(r"-2") - .unwrap(); + let matcher = + RegexMatcherBuilder::new().word(true).build(r"-2").unwrap(); assert!(matcher.is_match(b"abc -2 foo").unwrap()); - let matcher = RegexMatcherBuilder::new() - .word(false) - .build(r"\b-2\b") - .unwrap(); + let matcher = + RegexMatcherBuilder::new().word(false).build(r"\b-2\b").unwrap(); assert!(!matcher.is_match(b"abc -2 foo").unwrap()); } @@ -432,16 +430,12 @@ mod tests { // Test that smart case works. #[test] fn case_smart() { - let matcher = RegexMatcherBuilder::new() - .case_smart(true) - .build(r"abc") - .unwrap(); + let matcher = + RegexMatcherBuilder::new().case_smart(true).build(r"abc").unwrap(); assert!(matcher.is_match(b"ABC").unwrap()); - let matcher = RegexMatcherBuilder::new() - .case_smart(true) - .build(r"aBc") - .unwrap(); + let matcher = + RegexMatcherBuilder::new().case_smart(true).build(r"aBc").unwrap(); assert!(!matcher.is_match(b"ABC").unwrap()); } @@ -455,9 +449,7 @@ mod tests { } } - let matcher = RegexMatcherBuilder::new() - .build(r"\wfoo\s") - .unwrap(); + let matcher = RegexMatcherBuilder::new().build(r"\wfoo\s").unwrap(); let m = matcher.find_candidate_line(b"afoo ").unwrap().unwrap(); assert!(is_confirmed(m)); } diff --git a/grep-printer/src/color.rs b/grep-printer/src/color.rs index 394f5ccf6..fb091465b 100644 --- a/grep-printer/src/color.rs +++ b/grep-printer/src/color.rs @@ -62,42 +62,32 @@ impl ColorError { impl fmt::Display for ColorError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { - ColorError::UnrecognizedOutType(ref name) => { - write!( - f, - "unrecognized output type '{}'. Choose from: \ + ColorError::UnrecognizedOutType(ref name) => write!( + f, + "unrecognized output type '{}'. Choose from: \ path, line, column, match.", - name, - ) - } - ColorError::UnrecognizedSpecType(ref name) => { - write!( - f, - "unrecognized spec type '{}'. Choose from: \ + name, + ), + ColorError::UnrecognizedSpecType(ref name) => write!( + f, + "unrecognized spec type '{}'. Choose from: \ fg, bg, style, none.", - name, - ) - } - ColorError::UnrecognizedColor(_, ref msg) => { - write!(f, "{}", msg) - } - ColorError::UnrecognizedStyle(ref name) => { - write!( - f, - "unrecognized style attribute '{}'. Choose from: \ + name, + ), + ColorError::UnrecognizedColor(_, ref msg) => write!(f, "{}", msg), + ColorError::UnrecognizedStyle(ref name) => write!( + f, + "unrecognized style attribute '{}'. Choose from: \ nobold, bold, nointense, intense, nounderline, \ underline.", - name, - ) - } - ColorError::InvalidFormat(ref original) => { - write!( - f, - "invalid color spec format: '{}'. Valid format \ + name, + ), + ColorError::InvalidFormat(ref original) => write!( + f, + "invalid color spec format: '{}'. Valid format \ is '(path|line|column|match):(fg|bg|style):(value)'.", - original, - ) - } + original, + ), } } } @@ -227,7 +217,7 @@ enum Style { Intense, NoIntense, Underline, - NoUnderline + NoUnderline, } impl ColorSpecs { @@ -288,18 +278,32 @@ impl SpecValue { fn merge_into(&self, cspec: &mut ColorSpec) { match *self { SpecValue::None => cspec.clear(), - SpecValue::Fg(ref color) => { cspec.set_fg(Some(color.clone())); } - SpecValue::Bg(ref color) => { cspec.set_bg(Some(color.clone())); } - SpecValue::Style(ref style) => { - match *style { - Style::Bold => { cspec.set_bold(true); } - Style::NoBold => { cspec.set_bold(false); } - Style::Intense => { cspec.set_intense(true); } - Style::NoIntense => { cspec.set_intense(false); } - Style::Underline => { cspec.set_underline(true); } - Style::NoUnderline => { cspec.set_underline(false); } - } + SpecValue::Fg(ref color) => { + cspec.set_fg(Some(color.clone())); + } + SpecValue::Bg(ref color) => { + cspec.set_bg(Some(color.clone())); } + SpecValue::Style(ref style) => match *style { + Style::Bold => { + cspec.set_bold(true); + } + Style::NoBold => { + cspec.set_bold(false); + } + Style::Intense => { + cspec.set_intense(true); + } + Style::NoIntense => { + cspec.set_intense(false); + } + Style::Underline => { + cspec.set_underline(true); + } + Style::NoUnderline => { + cspec.set_underline(false); + } + }, } } } @@ -315,10 +319,7 @@ impl FromStr for UserColorSpec { let otype: OutType = pieces[0].parse()?; match pieces[1].parse()? { SpecType::None => { - Ok(UserColorSpec { - ty: otype, - value: SpecValue::None, - }) + Ok(UserColorSpec { ty: otype, value: SpecValue::None }) } SpecType::Style => { if pieces.len() < 3 { @@ -331,18 +332,16 @@ impl FromStr for UserColorSpec { if pieces.len() < 3 { return Err(ColorError::InvalidFormat(s.to_string())); } - let color: Color = pieces[2] - .parse() - .map_err(ColorError::from_parse_error)?; + let color: Color = + pieces[2].parse().map_err(ColorError::from_parse_error)?; Ok(UserColorSpec { ty: otype, value: SpecValue::Fg(color) }) } SpecType::Bg => { if pieces.len() < 3 { return Err(ColorError::InvalidFormat(s.to_string())); } - let color: Color = pieces[2] - .parse() - .map_err(ColorError::from_parse_error)?; + let color: Color = + pieces[2].parse().map_err(ColorError::from_parse_error)?; Ok(UserColorSpec { ty: otype, value: SpecValue::Bg(color) }) } } diff --git a/grep-printer/src/json.rs b/grep-printer/src/json.rs index 50387bf8c..1b3bc4f15 100644 --- a/grep-printer/src/json.rs +++ b/grep-printer/src/json.rs @@ -4,8 +4,8 @@ use std::time::Instant; use grep_matcher::{Match, Matcher}; use grep_searcher::{ - Searcher, - Sink, SinkError, SinkContext, SinkContextKind, SinkFinish, SinkMatch, + Searcher, Sink, SinkContext, SinkContextKind, SinkError, SinkFinish, + SinkMatch, }; use serde_json as json; @@ -27,11 +27,7 @@ struct Config { impl Default for Config { fn default() -> Config { - Config { - pretty: false, - max_matches: None, - always_begin_end: false, - } + Config { pretty: false, max_matches: None, always_begin_end: false } } } @@ -492,8 +488,9 @@ impl JSON { matcher: M, path: &'p P, ) -> JSONSink<'p, 's, M, W> - where M: Matcher, - P: ?Sized + AsRef, + where + M: Matcher, + P: ?Sized + AsRef, { JSONSink { matcher: matcher, @@ -615,10 +612,12 @@ impl<'p, 's, M: Matcher, W: io::Write> JSONSink<'p, 's, M, W> { // the extent that it's easy to ensure that we never do more than // one search to find the matches. let matches = &mut self.json.matches; - self.matcher.find_iter(bytes, |m| { - matches.push(m); - true - }).map_err(io::Error::error_message)?; + self.matcher + .find_iter(bytes, |m| { + matches.push(m); + true + }) + .map_err(io::Error::error_message)?; // Don't report empty matches appearing at the end of the bytes. if !matches.is_empty() && matches.last().unwrap().is_empty() @@ -650,9 +649,7 @@ impl<'p, 's, M: Matcher, W: io::Write> JSONSink<'p, 's, M, W> { if self.begin_printed { return Ok(()); } - let msg = jsont::Message::Begin(jsont::Begin { - path: self.path, - }); + let msg = jsont::Message::Begin(jsont::Begin { path: self.path }); self.json.write_message(&msg)?; self.begin_printed = true; Ok(()) @@ -699,13 +696,12 @@ impl<'p, 's, M: Matcher, W: io::Write> Sink for JSONSink<'p, 's, M, W> { self.after_context_remaining = self.after_context_remaining.saturating_sub(1); } - let submatches = - if searcher.invert_match() { - self.record_matches(ctx.bytes())?; - SubMatches::new(ctx.bytes(), &self.json.matches) - } else { - SubMatches::empty() - }; + let submatches = if searcher.invert_match() { + self.record_matches(ctx.bytes())?; + SubMatches::new(ctx.bytes(), &self.json.matches) + } else { + SubMatches::empty() + }; let msg = jsont::Message::Context(jsont::Context { path: self.path, lines: ctx.bytes(), @@ -717,10 +713,7 @@ impl<'p, 's, M: Matcher, W: io::Write> Sink for JSONSink<'p, 's, M, W> { Ok(!self.should_quit()) } - fn begin( - &mut self, - _searcher: &Searcher, - ) -> Result { + fn begin(&mut self, _searcher: &Searcher) -> Result { self.json.wtr.reset_count(); self.start_time = Instant::now(); self.match_count = 0; @@ -779,7 +772,7 @@ enum SubMatches<'a> { impl<'a> SubMatches<'a> { /// Create a new set of match ranges from a set of matches and the /// corresponding bytes that those matches apply to. - fn new(bytes: &'a[u8], matches: &[Match]) -> SubMatches<'a> { + fn new(bytes: &'a [u8], matches: &[Match]) -> SubMatches<'a> { if matches.len() == 1 { let mat = matches[0]; SubMatches::Small([jsont::SubMatch { @@ -817,11 +810,11 @@ impl<'a> SubMatches<'a> { #[cfg(test)] mod tests { - use grep_regex::{RegexMatcher, RegexMatcherBuilder}; use grep_matcher::LineTerminator; + use grep_regex::{RegexMatcher, RegexMatcherBuilder}; use grep_searcher::SearcherBuilder; - use super::{JSON, JSONBuilder}; + use super::{JSONBuilder, JSON}; const SHERLOCK: &'static [u8] = b"\ For the Doctor Watsons of this world, as opposed to the Sherlock @@ -832,9 +825,7 @@ but Doctor Watson has to have it taken out for him and dusted, and exhibited clearly, with a label attached. "; - fn printer_contents( - printer: &mut JSON>, - ) -> String { + fn printer_contents(printer: &mut JSON>) -> String { String::from_utf8(printer.get_mut().to_owned()).unwrap() } @@ -851,11 +842,8 @@ but Doctor Watson has to have it taken out for him and dusted, and exhibited clearly, with a label attached.\ "; - let matcher = RegexMatcher::new( - r"Watson" - ).unwrap(); - let mut printer = JSONBuilder::new() - .build(vec![]); + let matcher = RegexMatcher::new(r"Watson").unwrap(); + let mut printer = JSONBuilder::new().build(vec![]); SearcherBuilder::new() .binary_detection(BinaryDetection::quit(b'\x00')) .heap_limit(Some(80)) @@ -871,12 +859,9 @@ and exhibited clearly, with a label attached.\ #[test] fn max_matches() { - let matcher = RegexMatcher::new( - r"Watson" - ).unwrap(); - let mut printer = JSONBuilder::new() - .max_matches(Some(1)) - .build(vec![]); + let matcher = RegexMatcher::new(r"Watson").unwrap(); + let mut printer = + JSONBuilder::new().max_matches(Some(1)).build(vec![]); SearcherBuilder::new() .build() .search_reader(&matcher, SHERLOCK, printer.sink(&matcher)) @@ -888,11 +873,8 @@ and exhibited clearly, with a label attached.\ #[test] fn no_match() { - let matcher = RegexMatcher::new( - r"DOES NOT MATCH" - ).unwrap(); - let mut printer = JSONBuilder::new() - .build(vec![]); + let matcher = RegexMatcher::new(r"DOES NOT MATCH").unwrap(); + let mut printer = JSONBuilder::new().build(vec![]); SearcherBuilder::new() .build() .search_reader(&matcher, SHERLOCK, printer.sink(&matcher)) @@ -904,12 +886,9 @@ and exhibited clearly, with a label attached.\ #[test] fn always_begin_end_no_match() { - let matcher = RegexMatcher::new( - r"DOES NOT MATCH" - ).unwrap(); - let mut printer = JSONBuilder::new() - .always_begin_end(true) - .build(vec![]); + let matcher = RegexMatcher::new(r"DOES NOT MATCH").unwrap(); + let mut printer = + JSONBuilder::new().always_begin_end(true).build(vec![]); SearcherBuilder::new() .build() .search_reader(&matcher, SHERLOCK, printer.sink(&matcher)) @@ -924,11 +903,8 @@ and exhibited clearly, with a label attached.\ fn missing_crlf() { let haystack = "test\r\n".as_bytes(); - let matcher = RegexMatcherBuilder::new() - .build("test") - .unwrap(); - let mut printer = JSONBuilder::new() - .build(vec![]); + let matcher = RegexMatcherBuilder::new().build("test").unwrap(); + let mut printer = JSONBuilder::new().build(vec![]); SearcherBuilder::new() .build() .search_reader(&matcher, haystack, printer.sink(&matcher)) @@ -941,12 +917,9 @@ and exhibited clearly, with a label attached.\ got.lines().nth(1).unwrap(), ); - let matcher = RegexMatcherBuilder::new() - .crlf(true) - .build("test") - .unwrap(); - let mut printer = JSONBuilder::new() - .build(vec![]); + let matcher = + RegexMatcherBuilder::new().crlf(true).build("test").unwrap(); + let mut printer = JSONBuilder::new().build(vec![]); SearcherBuilder::new() .line_terminator(LineTerminator::crlf()) .build() diff --git a/grep-printer/src/jsont.rs b/grep-printer/src/jsont.rs index f98dabbbe..f42447477 100644 --- a/grep-printer/src/jsont.rs +++ b/grep-printer/src/jsont.rs @@ -80,7 +80,9 @@ pub struct SubMatch<'a> { #[derive(Clone, Debug, Hash, PartialEq, Eq, Serialize)] #[serde(untagged)] enum Data<'a> { - Text { text: Cow<'a, str> }, + Text { + text: Cow<'a, str>, + }, Bytes { #[serde(serialize_with = "to_base64")] bytes: &'a [u8], @@ -116,32 +118,26 @@ impl<'a> Data<'a> { } } -fn to_base64( - bytes: T, - ser: S, -) -> Result -where T: AsRef<[u8]>, - S: Serializer +fn to_base64(bytes: T, ser: S) -> Result +where + T: AsRef<[u8]>, + S: Serializer, { ser.serialize_str(&base64::encode(&bytes)) } -fn ser_bytes( - bytes: T, - ser: S, -) -> Result -where T: AsRef<[u8]>, - S: Serializer +fn ser_bytes(bytes: T, ser: S) -> Result +where + T: AsRef<[u8]>, + S: Serializer, { Data::from_bytes(bytes.as_ref()).serialize(ser) } -fn ser_path( - path: &Option

, - ser: S, -) -> Result -where P: AsRef, - S: Serializer +fn ser_path(path: &Option

, ser: S) -> Result +where + P: AsRef, + S: Serializer, { path.as_ref().map(|p| Data::from_path(p.as_ref())).serialize(ser) } diff --git a/grep-printer/src/lib.rs b/grep-printer/src/lib.rs index 1e7d5c6ed..7309dc8b6 100644 --- a/grep-printer/src/lib.rs +++ b/grep-printer/src/lib.rs @@ -84,9 +84,9 @@ extern crate serde_derive; extern crate serde_json; extern crate termcolor; -pub use color::{ColorError, ColorSpecs, UserColorSpec, default_color_specs}; +pub use color::{default_color_specs, ColorError, ColorSpecs, UserColorSpec}; #[cfg(feature = "serde1")] -pub use json::{JSON, JSONBuilder, JSONSink}; +pub use json::{JSONBuilder, JSONSink, JSON}; pub use standard::{Standard, StandardBuilder, StandardSink}; pub use stats::Stats; pub use summary::{Summary, SummaryBuilder, SummaryKind, SummarySink}; diff --git a/grep-printer/src/standard.rs b/grep-printer/src/standard.rs index 89f44ad44..a0e9668af 100644 --- a/grep-printer/src/standard.rs +++ b/grep-printer/src/standard.rs @@ -8,16 +8,15 @@ use std::time::Instant; use bstr::ByteSlice; use grep_matcher::{Match, Matcher}; use grep_searcher::{ - LineStep, Searcher, - Sink, SinkError, - SinkContext, SinkContextKind, SinkFinish, SinkMatch, + LineStep, Searcher, Sink, SinkContext, SinkContextKind, SinkError, + SinkFinish, SinkMatch, }; use termcolor::{ColorSpec, NoColor, WriteColor}; use color::ColorSpecs; use counter::CounterWriter; use stats::Stats; -use util::{PrinterPath, Replacer, Sunk, trim_ascii_prefix}; +use util::{trim_ascii_prefix, PrinterPath, Replacer, Sunk}; /// The configuration for the standard printer. /// @@ -151,10 +150,7 @@ impl StandardBuilder { /// This completely overrides any previous color specifications. This does /// not add to any previously provided color specifications on this /// builder. - pub fn color_specs( - &mut self, - specs: ColorSpecs, - ) -> &mut StandardBuilder { + pub fn color_specs(&mut self, specs: ColorSpecs) -> &mut StandardBuilder { self.config.colors = specs; self } @@ -409,10 +405,7 @@ impl StandardBuilder { /// A typical use for this option is to permit cygwin users on Windows to /// set the path separator to `/` instead of using the system default of /// `\`. - pub fn separator_path( - &mut self, - sep: Option, - ) -> &mut StandardBuilder { + pub fn separator_path(&mut self, sep: Option) -> &mut StandardBuilder { self.config.separator_path = sep; self } @@ -487,12 +480,7 @@ impl Standard { &'s mut self, matcher: M, ) -> StandardSink<'static, 's, M, W> { - let stats = - if self.config.stats { - Some(Stats::new()) - } else { - None - }; + let stats = if self.config.stats { Some(Stats::new()) } else { None }; let needs_match_granularity = self.needs_match_granularity(); StandardSink { matcher: matcher, @@ -517,20 +505,18 @@ impl Standard { matcher: M, path: &'p P, ) -> StandardSink<'p, 's, M, W> - where M: Matcher, - P: ?Sized + AsRef, + where + M: Matcher, + P: ?Sized + AsRef, { if !self.config.path { return self.sink(matcher); } - let stats = - if self.config.stats { - Some(Stats::new()) - } else { - None - }; + let stats = if self.config.stats { Some(Stats::new()) } else { None }; let ppath = PrinterPath::with_separator( - path.as_ref(), self.config.separator_path); + path.as_ref(), + self.config.separator_path, + ); let needs_match_granularity = self.needs_match_granularity(); StandardSink { matcher: matcher, @@ -689,10 +675,12 @@ impl<'p, 's, M: Matcher, W: WriteColor> StandardSink<'p, 's, M, W> { // one search to find the matches (well, for replacements, we do one // additional search to perform the actual replacement). let matches = &mut self.standard.matches; - self.matcher.find_iter(bytes, |m| { - matches.push(m); - true - }).map_err(io::Error::error_message)?; + self.matcher + .find_iter(bytes, |m| { + matches.push(m); + true + }) + .map_err(io::Error::error_message)?; // Don't report empty matches appearing at the end of the bytes. if !matches.is_empty() && matches.last().unwrap().is_empty() @@ -714,11 +702,7 @@ impl<'p, 's, M: Matcher, W: WriteColor> StandardSink<'p, 's, M, W> { .as_ref() .map(|r| &*r) .unwrap(); - self.replacer.replace_all( - &self.matcher, - bytes, - replacement, - )?; + self.replacer.replace_all(&self.matcher, bytes, replacement)?; } Ok(()) } @@ -811,10 +795,7 @@ impl<'p, 's, M: Matcher, W: WriteColor> Sink for StandardSink<'p, 's, M, W> { Ok(true) } - fn begin( - &mut self, - _searcher: &Searcher, - ) -> Result { + fn begin(&mut self, _searcher: &Searcher) -> Result { self.standard.wtr.borrow_mut().reset_count(); self.start_time = Instant::now(); self.match_count = 0; @@ -887,10 +868,7 @@ impl<'a, M: Matcher, W: WriteColor> StandardImpl<'a, M, W> { &sink.standard.matches, sink.replacer.replacement(), ); - StandardImpl { - sunk: sunk, - ..StandardImpl::new(searcher, sink) - } + StandardImpl { sunk: sunk, ..StandardImpl::new(searcher, sink) } } /// Bundle self with a searcher and return the core implementation of Sink @@ -905,10 +883,7 @@ impl<'a, M: Matcher, W: WriteColor> StandardImpl<'a, M, W> { &sink.standard.matches, sink.replacer.replacement(), ); - StandardImpl { - sunk: sunk, - ..StandardImpl::new(searcher, sink) - } + StandardImpl { sunk: sunk, ..StandardImpl::new(searcher, sink) } } fn sink(&self) -> io::Result<()> { @@ -1084,10 +1059,7 @@ impl<'a, M: Matcher, W: WriteColor> StandardImpl<'a, M, W> { line = line.with_start(upto); if self.exceeds_max_columns(&bytes[this_line]) { self.write_exceeded_line( - bytes, - this_line, - matches, - &mut midx, + bytes, this_line, matches, &mut midx, )?; } else { self.write_spec(spec, &bytes[this_line])?; @@ -1178,14 +1150,14 @@ impl<'a, M: Matcher, W: WriteColor> StandardImpl<'a, M, W> { } #[inline(always)] - fn write_line( - &self, - line: &[u8], - ) -> io::Result<()> { + fn write_line(&self, line: &[u8]) -> io::Result<()> { if self.exceeds_max_columns(line) { let range = Match::new(0, line.len()); self.write_exceeded_line( - line, range, self.sunk.matches(), &mut 0, + line, + range, + self.sunk.matches(), + &mut 0, )?; } else { self.write_trim(line)?; @@ -1279,7 +1251,8 @@ impl<'a, M: Matcher, W: WriteColor> StandardImpl<'a, M, W> { .map(|(_, end, _)| end) .take(self.config().max_columns.unwrap_or(0) as usize) .last() - .unwrap_or(0) + line.start(); + .unwrap_or(0) + + line.start(); line = line.with_end(end); self.write_colored_matches(bytes, line, matches, match_index)?; @@ -1292,16 +1265,12 @@ impl<'a, M: Matcher, W: WriteColor> StandardImpl<'a, M, W> { m.start() >= line.end() && m.start() < original.end() }) .count(); - let tense = - if remaining == 1 { - "match" - } else { - "matches" - }; + let tense = if remaining == 1 { "match" } else { "matches" }; write!( self.wtr().borrow_mut(), " [... {} more {}]", - remaining, tense, + remaining, + tense, )?; } self.write_line_term()?; @@ -1396,7 +1365,8 @@ impl<'a, M: Matcher, W: WriteColor> StandardImpl<'a, M, W> { } let remainder = format!( "after match (found {:?} byte around offset {})\n", - [byte].as_bstr(), offset, + [byte].as_bstr(), + offset, ); self.write(remainder.as_bytes())?; } else if let Some(byte) = bin.convert_byte() { @@ -1407,7 +1377,8 @@ impl<'a, M: Matcher, W: WriteColor> StandardImpl<'a, M, W> { } let remainder = format!( "matches (found {:?} byte around offset {})\n", - [byte].as_bstr(), offset, + [byte].as_bstr(), + offset, ); self.write(remainder.as_bytes())?; } @@ -1600,17 +1571,14 @@ but Doctor Watson has to have it taken out for him and dusted,\r and exhibited clearly, with a label attached.\ "; - fn printer_contents( - printer: &mut Standard>>, - ) -> String { + fn printer_contents(printer: &mut Standard>>) -> String { String::from_utf8(printer.get_mut().get_ref().to_owned()).unwrap() } #[test] fn reports_match() { let matcher = RegexMatcher::new("Sherlock").unwrap(); - let mut printer = StandardBuilder::new() - .build(NoColor::new(vec![])); + let mut printer = StandardBuilder::new().build(NoColor::new(vec![])); let mut sink = printer.sink(&matcher); SearcherBuilder::new() .line_number(false) @@ -1620,8 +1588,7 @@ and exhibited clearly, with a label attached.\ assert!(sink.has_match()); let matcher = RegexMatcher::new("zzzzz").unwrap(); - let mut printer = StandardBuilder::new() - .build(NoColor::new(vec![])); + let mut printer = StandardBuilder::new().build(NoColor::new(vec![])); let mut sink = printer.sink(&matcher); SearcherBuilder::new() .line_number(false) @@ -1636,8 +1603,7 @@ and exhibited clearly, with a label attached.\ use grep_searcher::BinaryDetection; let matcher = RegexMatcher::new("Sherlock").unwrap(); - let mut printer = StandardBuilder::new() - .build(NoColor::new(vec![])); + let mut printer = StandardBuilder::new().build(NoColor::new(vec![])); let mut sink = printer.sink(&matcher); SearcherBuilder::new() .line_number(false) @@ -1647,8 +1613,7 @@ and exhibited clearly, with a label attached.\ assert!(sink.binary_byte_offset().is_none()); let matcher = RegexMatcher::new(".+").unwrap(); - let mut printer = StandardBuilder::new() - .build(NoColor::new(vec![])); + let mut printer = StandardBuilder::new().build(NoColor::new(vec![])); let mut sink = printer.sink(&matcher); SearcherBuilder::new() .line_number(false) @@ -1664,9 +1629,8 @@ and exhibited clearly, with a label attached.\ use std::time::Duration; let matcher = RegexMatcher::new("Sherlock|opposed").unwrap(); - let mut printer = StandardBuilder::new() - .stats(true) - .build(NoColor::new(vec![])); + let mut printer = + StandardBuilder::new().stats(true).build(NoColor::new(vec![])); let stats = { let mut sink = printer.sink(&matcher); SearcherBuilder::new() @@ -1685,7 +1649,6 @@ and exhibited clearly, with a label attached.\ assert_eq!(stats.bytes_printed(), buf.len() as u64); assert_eq!(stats.matched_lines(), 2); assert_eq!(stats.matches(), 3); - } #[test] @@ -1693,9 +1656,8 @@ and exhibited clearly, with a label attached.\ use std::time::Duration; let matcher = RegexMatcher::new("Sherlock|opposed").unwrap(); - let mut printer = StandardBuilder::new() - .stats(true) - .build(NoColor::new(vec![])); + let mut printer = + StandardBuilder::new().stats(true).build(NoColor::new(vec![])); let stats = { let mut sink = printer.sink(&matcher); SearcherBuilder::new() @@ -1860,9 +1822,8 @@ and exhibited clearly, with a label attached. #[test] fn path() { let matcher = RegexMatcher::new("Watson").unwrap(); - let mut printer = StandardBuilder::new() - .path(false) - .build(NoColor::new(vec![])); + let mut printer = + StandardBuilder::new().path(false).build(NoColor::new(vec![])); SearcherBuilder::new() .line_number(true) .build() @@ -1963,9 +1924,8 @@ books/sherlockZbut Doctor Watson has to have it taken out for him and dusted, #[test] fn heading() { let matcher = RegexMatcher::new("Watson").unwrap(); - let mut printer = StandardBuilder::new() - .heading(true) - .build(NoColor::new(vec![])); + let mut printer = + StandardBuilder::new().heading(true).build(NoColor::new(vec![])); SearcherBuilder::new() .line_number(false) .build() @@ -1988,9 +1948,8 @@ but Doctor Watson has to have it taken out for him and dusted, #[test] fn no_heading() { let matcher = RegexMatcher::new("Watson").unwrap(); - let mut printer = StandardBuilder::new() - .heading(false) - .build(NoColor::new(vec![])); + let mut printer = + StandardBuilder::new().heading(false).build(NoColor::new(vec![])); SearcherBuilder::new() .line_number(false) .build() @@ -2012,9 +1971,8 @@ sherlock:but Doctor Watson has to have it taken out for him and dusted, #[test] fn no_heading_multiple() { let matcher = RegexMatcher::new("Watson").unwrap(); - let mut printer = StandardBuilder::new() - .heading(false) - .build(NoColor::new(vec![])); + let mut printer = + StandardBuilder::new().heading(false).build(NoColor::new(vec![])); SearcherBuilder::new() .line_number(false) .build() @@ -2049,9 +2007,8 @@ sherlock:be, to a very large extent, the result of luck. Sherlock Holmes #[test] fn heading_multiple() { let matcher = RegexMatcher::new("Watson").unwrap(); - let mut printer = StandardBuilder::new() - .heading(true) - .build(NoColor::new(vec![])); + let mut printer = + StandardBuilder::new().heading(true).build(NoColor::new(vec![])); SearcherBuilder::new() .line_number(false) .build() @@ -2161,8 +2118,7 @@ Watson #[test] fn line_number() { let matcher = RegexMatcher::new("Watson").unwrap(); - let mut printer = StandardBuilder::new() - .build(NoColor::new(vec![])); + let mut printer = StandardBuilder::new().build(NoColor::new(vec![])); SearcherBuilder::new() .line_number(true) .build() @@ -2184,8 +2140,7 @@ Watson #[test] fn line_number_multi_line() { let matcher = RegexMatcher::new("(?s)Watson.+Watson").unwrap(); - let mut printer = StandardBuilder::new() - .build(NoColor::new(vec![])); + let mut printer = StandardBuilder::new().build(NoColor::new(vec![])); SearcherBuilder::new() .line_number(true) .multi_line(true) @@ -2211,9 +2166,8 @@ Watson #[test] fn column_number() { let matcher = RegexMatcher::new("Watson").unwrap(); - let mut printer = StandardBuilder::new() - .column(true) - .build(NoColor::new(vec![])); + let mut printer = + StandardBuilder::new().column(true).build(NoColor::new(vec![])); SearcherBuilder::new() .line_number(false) .build() @@ -2235,9 +2189,8 @@ Watson #[test] fn column_number_multi_line() { let matcher = RegexMatcher::new("(?s)Watson.+Watson").unwrap(); - let mut printer = StandardBuilder::new() - .column(true) - .build(NoColor::new(vec![])); + let mut printer = + StandardBuilder::new().column(true).build(NoColor::new(vec![])); SearcherBuilder::new() .line_number(false) .multi_line(true) @@ -2440,9 +2393,8 @@ and exhibited clearly, with a label attached. #[test] fn max_columns_with_count_preview_two_matches() { - let matcher = RegexMatcher::new( - "exhibited|dusted|has to have it", - ).unwrap(); + let matcher = + RegexMatcher::new("exhibited|dusted|has to have it").unwrap(); let mut printer = StandardBuilder::new() .stats(true) .max_columns(Some(46)) @@ -2493,9 +2445,9 @@ but Doctor Watson has to have it taken out for him and dusted, #[test] fn max_columns_multi_line_preview() { - let matcher = RegexMatcher::new( - "(?s)clew|cigar ash.+have it|exhibited", - ).unwrap(); + let matcher = + RegexMatcher::new("(?s)clew|cigar ash.+have it|exhibited") + .unwrap(); let mut printer = StandardBuilder::new() .stats(true) .max_columns(Some(46)) @@ -2673,9 +2625,8 @@ For the Doctor Watsons of this world, as opposed to the Sherlock #[test] fn max_matches_multi_line2() { - let matcher = RegexMatcher::new( - r"(?s)Watson.+?(Holmeses|clearly)" - ).unwrap(); + let matcher = + RegexMatcher::new(r"(?s)Watson.+?(Holmeses|clearly)").unwrap(); let mut printer = StandardBuilder::new() .max_matches(Some(1)) .build(NoColor::new(vec![])); @@ -2726,9 +2677,8 @@ Holmeses, success in the province of detective work must always #[test] fn only_matching_multi_line1() { - let matcher = RegexMatcher::new( - r"(?s:.{0})(Doctor Watsons|Sherlock)" - ).unwrap(); + let matcher = + RegexMatcher::new(r"(?s:.{0})(Doctor Watsons|Sherlock)").unwrap(); let mut printer = StandardBuilder::new() .only_matching(true) .column(true) @@ -2755,9 +2705,8 @@ Holmeses, success in the province of detective work must always #[test] fn only_matching_multi_line2() { - let matcher = RegexMatcher::new( - r"(?s)Watson.+?(Holmeses|clearly)" - ).unwrap(); + let matcher = + RegexMatcher::new(r"(?s)Watson.+?(Holmeses|clearly)").unwrap(); let mut printer = StandardBuilder::new() .only_matching(true) .column(true) @@ -2844,9 +2793,8 @@ Holmeses, success in the province of detective work must always // can match across multiple lines without actually doing so. This is // so we can test multi-line handling in the case of a match on only // one line. - let matcher = RegexMatcher::new( - r"(?s:.{0})(Doctor Watsons|Sherlock)" - ).unwrap(); + let matcher = + RegexMatcher::new(r"(?s:.{0})(Doctor Watsons|Sherlock)").unwrap(); let mut printer = StandardBuilder::new() .only_matching(true) .max_columns(Some(10)) @@ -2878,9 +2826,8 @@ Holmeses, success in the province of detective work must always // can match across multiple lines without actually doing so. This is // so we can test multi-line handling in the case of a match on only // one line. - let matcher = RegexMatcher::new( - r"(?s:.{0})(Doctor Watsons|Sherlock)" - ).unwrap(); + let matcher = + RegexMatcher::new(r"(?s:.{0})(Doctor Watsons|Sherlock)").unwrap(); let mut printer = StandardBuilder::new() .only_matching(true) .max_columns(Some(10)) @@ -2909,9 +2856,8 @@ Holmeses, success in the province of detective work must always #[test] fn only_matching_max_columns_multi_line2() { - let matcher = RegexMatcher::new( - r"(?s)Watson.+?(Holmeses|clearly)" - ).unwrap(); + let matcher = + RegexMatcher::new(r"(?s)Watson.+?(Holmeses|clearly)").unwrap(); let mut printer = StandardBuilder::new() .only_matching(true) .max_columns(Some(50)) @@ -2940,9 +2886,8 @@ Holmeses, success in the province of detective work must always #[test] fn only_matching_max_columns_preview_multi_line2() { - let matcher = RegexMatcher::new( - r"(?s)Watson.+?(Holmeses|clearly)" - ).unwrap(); + let matcher = + RegexMatcher::new(r"(?s)Watson.+?(Holmeses|clearly)").unwrap(); let mut printer = StandardBuilder::new() .only_matching(true) .max_columns(Some(50)) @@ -2998,9 +2943,8 @@ Holmeses, success in the province of detective work must always #[test] fn per_match_multi_line1() { - let matcher = RegexMatcher::new( - r"(?s:.{0})(Doctor Watsons|Sherlock)" - ).unwrap(); + let matcher = + RegexMatcher::new(r"(?s:.{0})(Doctor Watsons|Sherlock)").unwrap(); let mut printer = StandardBuilder::new() .per_match(true) .column(true) @@ -3027,9 +2971,8 @@ Holmeses, success in the province of detective work must always #[test] fn per_match_multi_line2() { - let matcher = RegexMatcher::new( - r"(?s)Watson.+?(Holmeses|clearly)", - ).unwrap(); + let matcher = + RegexMatcher::new(r"(?s)Watson.+?(Holmeses|clearly)").unwrap(); let mut printer = StandardBuilder::new() .per_match(true) .column(true) @@ -3057,9 +3000,8 @@ Holmeses, success in the province of detective work must always #[test] fn per_match_multi_line3() { - let matcher = RegexMatcher::new( - r"(?s)Watson.+?Holmeses|always.+?be", - ).unwrap(); + let matcher = + RegexMatcher::new(r"(?s)Watson.+?Holmeses|always.+?be").unwrap(); let mut printer = StandardBuilder::new() .per_match(true) .column(true) @@ -3194,9 +3136,8 @@ Holmeses, success in the province of detective work must always #[test] fn replacement_max_columns_preview2() { - let matcher = RegexMatcher::new( - "exhibited|dusted|has to have it", - ).unwrap(); + let matcher = + RegexMatcher::new("exhibited|dusted|has to have it").unwrap(); let mut printer = StandardBuilder::new() .max_columns(Some(43)) .max_columns_preview(true) @@ -3277,8 +3218,7 @@ and xxx clearly, with a label attached. #[test] fn invert() { let matcher = RegexMatcher::new(r"Sherlock").unwrap(); - let mut printer = StandardBuilder::new() - .build(NoColor::new(vec![])); + let mut printer = StandardBuilder::new().build(NoColor::new(vec![])); SearcherBuilder::new() .line_number(true) .invert_match(true) @@ -3303,8 +3243,7 @@ and xxx clearly, with a label attached. #[test] fn invert_multi_line() { let matcher = RegexMatcher::new(r"(?s:.{0})Sherlock").unwrap(); - let mut printer = StandardBuilder::new() - .build(NoColor::new(vec![])); + let mut printer = StandardBuilder::new().build(NoColor::new(vec![])); SearcherBuilder::new() .multi_line(true) .line_number(true) @@ -3330,8 +3269,7 @@ and xxx clearly, with a label attached. #[test] fn invert_context() { let matcher = RegexMatcher::new(r"Sherlock").unwrap(); - let mut printer = StandardBuilder::new() - .build(NoColor::new(vec![])); + let mut printer = StandardBuilder::new().build(NoColor::new(vec![])); SearcherBuilder::new() .line_number(true) .invert_match(true) @@ -3360,8 +3298,7 @@ and xxx clearly, with a label attached. #[test] fn invert_context_multi_line() { let matcher = RegexMatcher::new(r"(?s:.{0})Sherlock").unwrap(); - let mut printer = StandardBuilder::new() - .build(NoColor::new(vec![])); + let mut printer = StandardBuilder::new().build(NoColor::new(vec![])); SearcherBuilder::new() .multi_line(true) .line_number(true) diff --git a/grep-printer/src/stats.rs b/grep-printer/src/stats.rs index a62aead97..0fd723a1f 100644 --- a/grep-printer/src/stats.rs +++ b/grep-printer/src/stats.rs @@ -34,8 +34,8 @@ impl<'a> Add<&'a Stats> for Stats { Stats { elapsed: NiceDuration(self.elapsed.0 + rhs.elapsed.0), searches: self.searches + rhs.searches, - searches_with_match: - self.searches_with_match + rhs.searches_with_match, + searches_with_match: self.searches_with_match + + rhs.searches_with_match, bytes_searched: self.bytes_searched + rhs.bytes_searched, bytes_printed: self.bytes_printed + rhs.bytes_printed, matched_lines: self.matched_lines + rhs.matched_lines, diff --git a/grep-printer/src/summary.rs b/grep-printer/src/summary.rs index d10ca17ab..a27fc0050 100644 --- a/grep-printer/src/summary.rs +++ b/grep-printer/src/summary.rs @@ -168,10 +168,7 @@ impl SummaryBuilder { /// /// This is a convenience routine for /// `SummaryBuilder::build(termcolor::NoColor::new(wtr))`. - pub fn build_no_color( - &self, - wtr: W, - ) -> Summary> { + pub fn build_no_color(&self, wtr: W) -> Summary> { self.build(NoColor::new(wtr)) } @@ -204,10 +201,7 @@ impl SummaryBuilder { /// builder. /// /// The default color specifications provide no styling. - pub fn color_specs( - &mut self, - specs: ColorSpecs, - ) -> &mut SummaryBuilder { + pub fn color_specs(&mut self, specs: ColorSpecs) -> &mut SummaryBuilder { self.config.colors = specs; self } @@ -281,10 +275,7 @@ impl SummaryBuilder { /// `CountMatches` modes. /// /// By default, this is set to `:`. - pub fn separator_field( - &mut self, - sep: Vec, - ) -> &mut SummaryBuilder { + pub fn separator_field(&mut self, sep: Vec) -> &mut SummaryBuilder { self.config.separator_field = Arc::new(sep); self } @@ -300,10 +291,7 @@ impl SummaryBuilder { /// `\`. /// /// This is disabled by default. - pub fn separator_path( - &mut self, - sep: Option, - ) -> &mut SummaryBuilder { + pub fn separator_path(&mut self, sep: Option) -> &mut SummaryBuilder { self.config.separator_path = sep; self } @@ -382,12 +370,11 @@ impl Summary { &'s mut self, matcher: M, ) -> SummarySink<'static, 's, M, W> { - let stats = - if self.config.stats || self.config.kind.requires_stats() { - Some(Stats::new()) - } else { - None - }; + let stats = if self.config.stats || self.config.kind.requires_stats() { + Some(Stats::new()) + } else { + None + }; SummarySink { matcher: matcher, summary: self, @@ -408,20 +395,22 @@ impl Summary { matcher: M, path: &'p P, ) -> SummarySink<'p, 's, M, W> - where M: Matcher, - P: ?Sized + AsRef, + where + M: Matcher, + P: ?Sized + AsRef, { if !self.config.path && !self.config.kind.requires_path() { return self.sink(matcher); } - let stats = - if self.config.stats || self.config.kind.requires_stats() { - Some(Stats::new()) - } else { - None - }; + let stats = if self.config.stats || self.config.kind.requires_stats() { + Some(Stats::new()) + } else { + None + }; let ppath = PrinterPath::with_separator( - path.as_ref(), self.config.separator_path); + path.as_ref(), + self.config.separator_path, + ); SummarySink { matcher: matcher, summary: self, @@ -596,10 +585,12 @@ impl<'p, 's, M: Matcher, W: WriteColor> Sink for SummarySink<'p, 's, M, W> { self.match_count += 1; if let Some(ref mut stats) = self.stats { let mut match_count = 0; - self.matcher.find_iter(mat.bytes(), |_| { - match_count += 1; - true - }).map_err(io::Error::error_message)?; + self.matcher + .find_iter(mat.bytes(), |_| { + match_count += 1; + true + }) + .map_err(io::Error::error_message)?; stats.add_matches(match_count); stats.add_matched_lines(mat.lines().count() as u64); } else if self.summary.config.kind.quit_early() { @@ -608,10 +599,7 @@ impl<'p, 's, M: Matcher, W: WriteColor> Sink for SummarySink<'p, 's, M, W> { Ok(!self.should_quit()) } - fn begin( - &mut self, - _searcher: &Searcher, - ) -> Result { + fn begin(&mut self, _searcher: &Searcher) -> Result { if self.path.is_none() && self.summary.config.kind.requires_path() { return Err(io::Error::error_message(format!( "output kind {:?} requires a file path", @@ -674,8 +662,7 @@ impl<'p, 's, M: Matcher, W: WriteColor> Sink for SummarySink<'p, 's, M, W> { } let show_count = - !self.summary.config.exclude_zero - || self.match_count > 0; + !self.summary.config.exclude_zero || self.match_count > 0; match self.summary.config.kind { SummaryKind::Count => { if show_count { @@ -686,7 +673,8 @@ impl<'p, 's, M: Matcher, W: WriteColor> Sink for SummarySink<'p, 's, M, W> { } SummaryKind::CountMatches => { if show_count { - let stats = self.stats + let stats = self + .stats .as_ref() .expect("CountMatches should enable stats tracking"); self.write_path_field()?; @@ -716,7 +704,7 @@ mod tests { use grep_searcher::SearcherBuilder; use termcolor::NoColor; - use super::{Summary, SummaryKind, SummaryBuilder}; + use super::{Summary, SummaryBuilder, SummaryKind}; const SHERLOCK: &'static [u8] = b"\ For the Doctor Watsons of this world, as opposed to the Sherlock @@ -727,45 +715,41 @@ but Doctor Watson has to have it taken out for him and dusted, and exhibited clearly, with a label attached. "; - fn printer_contents( - printer: &mut Summary>>, - ) -> String { + fn printer_contents(printer: &mut Summary>>) -> String { String::from_utf8(printer.get_mut().get_ref().to_owned()).unwrap() } #[test] fn path_with_match_error() { - let matcher = RegexMatcher::new( - r"Watson" - ).unwrap(); + let matcher = RegexMatcher::new(r"Watson").unwrap(); let mut printer = SummaryBuilder::new() .kind(SummaryKind::PathWithMatch) .build_no_color(vec![]); - let res = SearcherBuilder::new() - .build() - .search_reader(&matcher, SHERLOCK, printer.sink(&matcher)); + let res = SearcherBuilder::new().build().search_reader( + &matcher, + SHERLOCK, + printer.sink(&matcher), + ); assert!(res.is_err()); } #[test] fn path_without_match_error() { - let matcher = RegexMatcher::new( - r"Watson" - ).unwrap(); + let matcher = RegexMatcher::new(r"Watson").unwrap(); let mut printer = SummaryBuilder::new() .kind(SummaryKind::PathWithoutMatch) .build_no_color(vec![]); - let res = SearcherBuilder::new() - .build() - .search_reader(&matcher, SHERLOCK, printer.sink(&matcher)); + let res = SearcherBuilder::new().build().search_reader( + &matcher, + SHERLOCK, + printer.sink(&matcher), + ); assert!(res.is_err()); } #[test] fn count_no_path() { - let matcher = RegexMatcher::new( - r"Watson" - ).unwrap(); + let matcher = RegexMatcher::new(r"Watson").unwrap(); let mut printer = SummaryBuilder::new() .kind(SummaryKind::Count) .build_no_color(vec![]); @@ -780,9 +764,7 @@ and exhibited clearly, with a label attached. #[test] fn count_no_path_even_with_path() { - let matcher = RegexMatcher::new( - r"Watson" - ).unwrap(); + let matcher = RegexMatcher::new(r"Watson").unwrap(); let mut printer = SummaryBuilder::new() .kind(SummaryKind::Count) .path(false) @@ -802,9 +784,7 @@ and exhibited clearly, with a label attached. #[test] fn count_path() { - let matcher = RegexMatcher::new( - r"Watson" - ).unwrap(); + let matcher = RegexMatcher::new(r"Watson").unwrap(); let mut printer = SummaryBuilder::new() .kind(SummaryKind::Count) .build_no_color(vec![]); @@ -823,9 +803,7 @@ and exhibited clearly, with a label attached. #[test] fn count_path_with_zero() { - let matcher = RegexMatcher::new( - r"NO MATCH" - ).unwrap(); + let matcher = RegexMatcher::new(r"NO MATCH").unwrap(); let mut printer = SummaryBuilder::new() .kind(SummaryKind::Count) .exclude_zero(false) @@ -845,9 +823,7 @@ and exhibited clearly, with a label attached. #[test] fn count_path_without_zero() { - let matcher = RegexMatcher::new( - r"NO MATCH" - ).unwrap(); + let matcher = RegexMatcher::new(r"NO MATCH").unwrap(); let mut printer = SummaryBuilder::new() .kind(SummaryKind::Count) .exclude_zero(true) @@ -867,9 +843,7 @@ and exhibited clearly, with a label attached. #[test] fn count_path_field_separator() { - let matcher = RegexMatcher::new( - r"Watson" - ).unwrap(); + let matcher = RegexMatcher::new(r"Watson").unwrap(); let mut printer = SummaryBuilder::new() .kind(SummaryKind::Count) .separator_field(b"ZZ".to_vec()) @@ -889,9 +863,7 @@ and exhibited clearly, with a label attached. #[test] fn count_path_terminator() { - let matcher = RegexMatcher::new( - r"Watson" - ).unwrap(); + let matcher = RegexMatcher::new(r"Watson").unwrap(); let mut printer = SummaryBuilder::new() .kind(SummaryKind::Count) .path_terminator(Some(b'\x00')) @@ -911,9 +883,7 @@ and exhibited clearly, with a label attached. #[test] fn count_path_separator() { - let matcher = RegexMatcher::new( - r"Watson" - ).unwrap(); + let matcher = RegexMatcher::new(r"Watson").unwrap(); let mut printer = SummaryBuilder::new() .kind(SummaryKind::Count) .separator_path(Some(b'\\')) @@ -933,9 +903,7 @@ and exhibited clearly, with a label attached. #[test] fn count_max_matches() { - let matcher = RegexMatcher::new( - r"Watson" - ).unwrap(); + let matcher = RegexMatcher::new(r"Watson").unwrap(); let mut printer = SummaryBuilder::new() .kind(SummaryKind::Count) .max_matches(Some(1)) @@ -951,9 +919,7 @@ and exhibited clearly, with a label attached. #[test] fn count_matches() { - let matcher = RegexMatcher::new( - r"Watson|Sherlock" - ).unwrap(); + let matcher = RegexMatcher::new(r"Watson|Sherlock").unwrap(); let mut printer = SummaryBuilder::new() .kind(SummaryKind::CountMatches) .build_no_color(vec![]); @@ -972,9 +938,7 @@ and exhibited clearly, with a label attached. #[test] fn path_with_match_found() { - let matcher = RegexMatcher::new( - r"Watson" - ).unwrap(); + let matcher = RegexMatcher::new(r"Watson").unwrap(); let mut printer = SummaryBuilder::new() .kind(SummaryKind::PathWithMatch) .build_no_color(vec![]); @@ -993,9 +957,7 @@ and exhibited clearly, with a label attached. #[test] fn path_with_match_not_found() { - let matcher = RegexMatcher::new( - r"ZZZZZZZZ" - ).unwrap(); + let matcher = RegexMatcher::new(r"ZZZZZZZZ").unwrap(); let mut printer = SummaryBuilder::new() .kind(SummaryKind::PathWithMatch) .build_no_color(vec![]); @@ -1012,12 +974,9 @@ and exhibited clearly, with a label attached. assert_eq_printed!("", got); } - #[test] fn path_without_match_found() { - let matcher = RegexMatcher::new( - r"ZZZZZZZZZ" - ).unwrap(); + let matcher = RegexMatcher::new(r"ZZZZZZZZZ").unwrap(); let mut printer = SummaryBuilder::new() .kind(SummaryKind::PathWithoutMatch) .build_no_color(vec![]); @@ -1036,9 +995,7 @@ and exhibited clearly, with a label attached. #[test] fn path_without_match_not_found() { - let matcher = RegexMatcher::new( - r"Watson" - ).unwrap(); + let matcher = RegexMatcher::new(r"Watson").unwrap(); let mut printer = SummaryBuilder::new() .kind(SummaryKind::PathWithoutMatch) .build_no_color(vec![]); @@ -1057,9 +1014,7 @@ and exhibited clearly, with a label attached. #[test] fn quiet() { - let matcher = RegexMatcher::new( - r"Watson|Sherlock" - ).unwrap(); + let matcher = RegexMatcher::new(r"Watson|Sherlock").unwrap(); let mut printer = SummaryBuilder::new() .kind(SummaryKind::Quiet) .build_no_color(vec![]); @@ -1081,9 +1036,7 @@ and exhibited clearly, with a label attached. #[test] fn quiet_with_stats() { - let matcher = RegexMatcher::new( - r"Watson|Sherlock" - ).unwrap(); + let matcher = RegexMatcher::new(r"Watson|Sherlock").unwrap(); let mut printer = SummaryBuilder::new() .kind(SummaryKind::Quiet) .stats(true) diff --git a/grep-printer/src/util.rs b/grep-printer/src/util.rs index d4a19eb73..3948d970a 100644 --- a/grep-printer/src/util.rs +++ b/grep-printer/src/util.rs @@ -7,8 +7,7 @@ use std::time; use bstr::{ByteSlice, ByteVec}; use grep_matcher::{Captures, LineTerminator, Match, Matcher}; use grep_searcher::{ - LineIter, - SinkError, SinkContext, SinkContextKind, SinkMatch, + LineIter, SinkContext, SinkContextKind, SinkError, SinkMatch, }; #[cfg(feature = "serde1")] use serde::{Serialize, Serializer}; @@ -58,19 +57,13 @@ impl Replacer { replacement: &[u8], ) -> io::Result<()> { { - let &mut Space { - ref mut dst, - ref mut caps, - ref mut matches, - } = self.allocate(matcher)?; + let &mut Space { ref mut dst, ref mut caps, ref mut matches } = + self.allocate(matcher)?; dst.clear(); matches.clear(); - matcher.replace_with_captures( - subject, - caps, - dst, - |caps, dst| { + matcher + .replace_with_captures(subject, caps, dst, |caps, dst| { let start = dst.len(); caps.interpolate( |name| matcher.capture_index(name), @@ -81,8 +74,8 @@ impl Replacer { let end = dst.len(); matches.push(Match::new(start, end)); true - }, - ).map_err(io::Error::error_message)?; + }) + .map_err(io::Error::error_message)?; } Ok(()) } @@ -122,14 +115,10 @@ impl Replacer { /// matcher fails. fn allocate(&mut self, matcher: &M) -> io::Result<&mut Space> { if self.space.is_none() { - let caps = matcher - .new_captures() - .map_err(io::Error::error_message)?; - self.space = Some(Space { - caps: caps, - dst: vec![], - matches: vec![], - }); + let caps = + matcher.new_captures().map_err(io::Error::error_message)?; + self.space = + Some(Space { caps: caps, dst: vec![], matches: vec![] }); } Ok(self.space.as_mut().unwrap()) } @@ -176,9 +165,8 @@ impl<'a> Sunk<'a> { original_matches: &'a [Match], replacement: Option<(&'a [u8], &'a [Match])>, ) -> Sunk<'a> { - let (bytes, matches) = replacement.unwrap_or_else(|| { - (sunk.bytes(), original_matches) - }); + let (bytes, matches) = + replacement.unwrap_or_else(|| (sunk.bytes(), original_matches)); Sunk { bytes: bytes, absolute_byte_offset: sunk.absolute_byte_offset(), @@ -195,9 +183,8 @@ impl<'a> Sunk<'a> { original_matches: &'a [Match], replacement: Option<(&'a [u8], &'a [Match])>, ) -> Sunk<'a> { - let (bytes, matches) = replacement.unwrap_or_else(|| { - (sunk.bytes(), original_matches) - }); + let (bytes, matches) = + replacement.unwrap_or_else(|| (sunk.bytes(), original_matches)); Sunk { bytes: bytes, absolute_byte_offset: sunk.absolute_byte_offset(), @@ -289,13 +276,17 @@ impl<'a> PrinterPath<'a> { /// path separators that are both replaced by `new_sep`. In all other /// environments, only `/` is treated as a path separator. fn replace_separator(&mut self, new_sep: u8) { - let transformed_path: Vec = self.0.bytes().map(|b| { - if b == b'/' || (cfg!(windows) && b == b'\\') { - new_sep - } else { - b - } - }).collect(); + let transformed_path: Vec = self + .0 + .bytes() + .map(|b| { + if b == b'/' || (cfg!(windows) && b == b'\\') { + new_sep + } else { + b + } + }) + .collect(); self.0 = Cow::Owned(transformed_path); } diff --git a/grep-regex/src/ast.rs b/grep-regex/src/ast.rs index 4e6067ee9..a4979deda 100644 --- a/grep-regex/src/ast.rs +++ b/grep-regex/src/ast.rs @@ -1,5 +1,5 @@ -use regex_syntax::ast::{self, Ast}; use regex_syntax::ast::parse::Parser; +use regex_syntax::ast::{self, Ast}; /// The results of analyzing AST of a regular expression (e.g., for supporting /// smart case). diff --git a/grep-regex/src/config.rs b/grep-regex/src/config.rs index e04582e43..1f81a8028 100644 --- a/grep-regex/src/config.rs +++ b/grep-regex/src/config.rs @@ -51,8 +51,8 @@ impl Default for Config { octal: false, // These size limits are much bigger than what's in the regex // crate. - size_limit: 100 * (1<<20), - dfa_size_limit: 1000 * (1<<20), + size_limit: 100 * (1 << 20), + dfa_size_limit: 1000 * (1 << 20), nest_limit: 250, line_terminator: None, crlf: false, @@ -95,10 +95,7 @@ impl Config { /// Accounting for the `smart_case` config knob, return true if and only if /// this pattern should be matched case insensitively. - fn is_case_insensitive( - &self, - analysis: &AstAnalysis, - ) -> bool { + fn is_case_insensitive(&self, analysis: &AstAnalysis) -> bool { if self.case_insensitive { return true; } @@ -116,9 +113,7 @@ impl Config { /// are enabled, since if multi-line can impact the match semantics of a /// regex, then it is by definition not a simple alternation of literals. pub fn can_plain_aho_corasick(&self) -> bool { - !self.word - && !self.case_insensitive - && !self.case_smart + !self.word && !self.case_insensitive && !self.case_smart } /// Perform analysis on the AST of this pattern. @@ -203,8 +198,7 @@ impl ConfiguredHIR { pub fn with_pattern String>( &self, mut f: F, - ) -> Result - { + ) -> Result { self.pattern_to_hir(&f(&self.expr.to_string())) } diff --git a/grep-regex/src/crlf.rs b/grep-regex/src/crlf.rs index 361e087ed..09e78b9fc 100644 --- a/grep-regex/src/crlf.rs +++ b/grep-regex/src/crlf.rs @@ -76,9 +76,8 @@ impl Matcher for CRLFMatcher { caps: &mut RegexCaptures, ) -> Result { caps.strip_crlf(false); - let r = self.regex.captures_read_at( - caps.locations_mut(), haystack, at, - ); + let r = + self.regex.captures_read_at(caps.locations_mut(), haystack, at); if !r.is_some() { return Ok(false); } @@ -163,8 +162,8 @@ pub fn crlfify(expr: Hir) -> Hir { #[cfg(test)] mod tests { - use regex_syntax::Parser; use super::crlfify; + use regex_syntax::Parser; fn roundtrip(pattern: &str) -> String { let expr1 = Parser::new().parse(pattern).unwrap(); diff --git a/grep-regex/src/literal.rs b/grep-regex/src/literal.rs index 52f0bc7ff..f690bde29 100644 --- a/grep-regex/src/literal.rs +++ b/grep-regex/src/literal.rs @@ -5,8 +5,8 @@ the regex engine doesn't look for inner literals. Since we're doing line based searching, we can use them, so we need to do it ourselves. */ -use regex_syntax::hir::{self, Hir, HirKind}; use regex_syntax::hir::literal::{Literal, Literals}; +use regex_syntax::hir::{self, Hir, HirKind}; use util; @@ -159,10 +159,8 @@ impl LiteralSets { }; debug!("prefix/suffix literals found: {:?}", lits); - let alts: Vec = lits - .into_iter() - .map(|x| util::bytes_to_regex(x)) - .collect(); + let alts: Vec = + lits.into_iter().map(|x| util::bytes_to_regex(x)).collect(); // We're matching raw bytes, so disable Unicode mode. Some(format!("(?-u:{})", alts.join("|"))) } else { @@ -194,24 +192,28 @@ fn union_required(expr: &Hir, lits: &mut Literals) { HirKind::Group(hir::Group { ref hir, .. }) => { union_required(&**hir, lits); } - HirKind::Repetition(ref x) => { - match x.kind { - hir::RepetitionKind::ZeroOrOne => lits.cut(), - hir::RepetitionKind::ZeroOrMore => lits.cut(), - hir::RepetitionKind::OneOrMore => { - union_required(&x.hir, lits); - } - hir::RepetitionKind::Range(ref rng) => { - let (min, max) = match *rng { - hir::RepetitionRange::Exactly(m) => (m, Some(m)), - hir::RepetitionRange::AtLeast(m) => (m, None), - hir::RepetitionRange::Bounded(m, n) => (m, Some(n)), - }; - repeat_range_literals( - &x.hir, min, max, x.greedy, lits, union_required); - } + HirKind::Repetition(ref x) => match x.kind { + hir::RepetitionKind::ZeroOrOne => lits.cut(), + hir::RepetitionKind::ZeroOrMore => lits.cut(), + hir::RepetitionKind::OneOrMore => { + union_required(&x.hir, lits); } - } + hir::RepetitionKind::Range(ref rng) => { + let (min, max) = match *rng { + hir::RepetitionRange::Exactly(m) => (m, Some(m)), + hir::RepetitionRange::AtLeast(m) => (m, None), + hir::RepetitionRange::Bounded(m, n) => (m, Some(n)), + }; + repeat_range_literals( + &x.hir, + min, + max, + x.greedy, + lits, + union_required, + ); + } + }, HirKind::Concat(ref es) if es.is_empty() => {} HirKind::Concat(ref es) if es.len() == 1 => { union_required(&es[0], lits) @@ -310,9 +312,9 @@ fn is_simple(expr: &Hir) -> bool { | HirKind::Repetition(_) | HirKind::Concat(_) | HirKind::Alternation(_) => true, - HirKind::Anchor(_) - | HirKind::WordBoundary(_) - | HirKind::Group(_) => false, + HirKind::Anchor(_) | HirKind::WordBoundary(_) | HirKind::Group(_) => { + false + } } } @@ -328,8 +330,8 @@ fn count_byte_class(cls: &hir::ClassBytes) -> u32 { #[cfg(test)] mod tests { - use regex_syntax::Parser; use super::LiteralSets; + use regex_syntax::Parser; fn sets(pattern: &str) -> LiteralSets { let hir = Parser::new().parse(pattern).unwrap(); @@ -380,8 +382,10 @@ mod tests { fn regression_1319() { // Regression from: // https://github.com/BurntSushi/ripgrep/issues/1319 - assert_eq!(one_regex(r"TTGAGTCCAGGAG[ATCG]{2}C"), + assert_eq!( + one_regex(r"TTGAGTCCAGGAG[ATCG]{2}C"), pat("TTGAGTCCAGGAGA|TTGAGTCCAGGAGC|\ - TTGAGTCCAGGAGG|TTGAGTCCAGGAGT")); + TTGAGTCCAGGAGG|TTGAGTCCAGGAGT") + ); } } diff --git a/grep-regex/src/matcher.rs b/grep-regex/src/matcher.rs index 425046566..a85bca66c 100644 --- a/grep-regex/src/matcher.rs +++ b/grep-regex/src/matcher.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; use grep_matcher::{ - Captures, LineMatchKind, LineTerminator, Match, Matcher, NoError, ByteSet, + ByteSet, Captures, LineMatchKind, LineTerminator, Match, Matcher, NoError, }; use regex::bytes::{CaptureLocations, Regex}; @@ -34,9 +34,7 @@ impl Default for RegexMatcherBuilder { impl RegexMatcherBuilder { /// Create a new builder for configuring a regex matcher. pub fn new() -> RegexMatcherBuilder { - RegexMatcherBuilder { - config: Config::default(), - } + RegexMatcherBuilder { config: Config::default() } } /// Build a new matcher using the current configuration for the provided @@ -382,9 +380,7 @@ impl RegexMatcher { /// given pattern contains a literal `\n`. Other uses of `\n` (such as in /// `\s`) are removed transparently. pub fn new_line_matcher(pattern: &str) -> Result { - RegexMatcherBuilder::new() - .line_terminator(Some(b'\n')) - .build(pattern) + RegexMatcherBuilder::new().line_terminator(Some(b'\n')).build(pattern) } } @@ -499,12 +495,9 @@ impl Matcher for RegexMatcher { } } - fn find_iter( - &self, - haystack: &[u8], - matched: F, - ) -> Result<(), NoError> - where F: FnMut(Match) -> bool + fn find_iter(&self, haystack: &[u8], matched: F) -> Result<(), NoError> + where + F: FnMut(Match) -> bool, { use self::RegexMatcherImpl::*; match self.matcher { @@ -520,7 +513,8 @@ impl Matcher for RegexMatcher { haystack: &[u8], matched: F, ) -> Result, NoError> - where F: FnMut(Match) -> Result + where + F: FnMut(Match) -> Result, { use self::RegexMatcherImpl::*; match self.matcher { @@ -551,7 +545,8 @@ impl Matcher for RegexMatcher { caps: &mut RegexCaptures, matched: F, ) -> Result<(), NoError> - where F: FnMut(&RegexCaptures) -> bool + where + F: FnMut(&RegexCaptures) -> bool, { use self::RegexMatcherImpl::*; match self.matcher { @@ -568,7 +563,8 @@ impl Matcher for RegexMatcher { caps: &mut RegexCaptures, matched: F, ) -> Result, NoError> - where F: FnMut(&RegexCaptures) -> Result + where + F: FnMut(&RegexCaptures) -> Result, { use self::RegexMatcherImpl::*; match self.matcher { @@ -602,7 +598,8 @@ impl Matcher for RegexMatcher { dst: &mut Vec, append: F, ) -> Result<(), NoError> - where F: FnMut(Match, &mut Vec) -> bool + where + F: FnMut(Match, &mut Vec) -> bool, { use self::RegexMatcherImpl::*; match self.matcher { @@ -620,7 +617,8 @@ impl Matcher for RegexMatcher { dst: &mut Vec, append: F, ) -> Result<(), NoError> - where F: FnMut(&Self::Captures, &mut Vec) -> bool + where + F: FnMut(&Self::Captures, &mut Vec) -> bool, { use self::RegexMatcherImpl::*; match self.matcher { @@ -745,7 +743,8 @@ impl Matcher for StandardMatcher { haystack: &[u8], at: usize, ) -> Result, NoError> { - Ok(self.regex + Ok(self + .regex .find_at(haystack, at) .map(|m| Match::new(m.start(), m.end()))) } @@ -767,7 +766,8 @@ impl Matcher for StandardMatcher { haystack: &[u8], mut matched: F, ) -> Result, NoError> - where F: FnMut(Match) -> Result + where + F: FnMut(Match) -> Result, { for m in self.regex.find_iter(haystack) { match matched(Match::new(m.start(), m.end())) { @@ -785,9 +785,10 @@ impl Matcher for StandardMatcher { at: usize, caps: &mut RegexCaptures, ) -> Result { - Ok(self.regex.captures_read_at( - &mut caps.locations_mut(), haystack, at, - ).is_some()) + Ok(self + .regex + .captures_read_at(&mut caps.locations_mut(), haystack, at) + .is_some()) } fn shortest_match_at( @@ -901,7 +902,9 @@ impl RegexCaptures { offset: usize, ) -> RegexCaptures { RegexCaptures(RegexCapturesImp::Regex { - locs, offset, strip_crlf: false, + locs, + offset, + strip_crlf: false, }) } @@ -910,9 +913,7 @@ impl RegexCaptures { RegexCapturesImp::AhoCorasick { .. } => { panic!("getting locations for simple captures is invalid") } - RegexCapturesImp::Regex { ref locs, .. } => { - locs - } + RegexCapturesImp::Regex { ref locs, .. } => locs, } } @@ -921,9 +922,7 @@ impl RegexCaptures { RegexCapturesImp::AhoCorasick { .. } => { panic!("getting locations for simple captures is invalid") } - RegexCapturesImp::Regex { ref mut locs, .. } => { - locs - } + RegexCapturesImp::Regex { ref mut locs, .. } => locs, } } @@ -952,23 +951,19 @@ impl RegexCaptures { #[cfg(test)] mod tests { - use grep_matcher::{LineMatchKind, Matcher}; use super::*; + use grep_matcher::{LineMatchKind, Matcher}; // Test that enabling word matches does the right thing and demonstrate // the difference between it and surrounding the regex in `\b`. #[test] fn word() { - let matcher = RegexMatcherBuilder::new() - .word(true) - .build(r"-2") - .unwrap(); + let matcher = + RegexMatcherBuilder::new().word(true).build(r"-2").unwrap(); assert!(matcher.is_match(b"abc -2 foo").unwrap()); - let matcher = RegexMatcherBuilder::new() - .word(false) - .build(r"\b-2\b") - .unwrap(); + let matcher = + RegexMatcherBuilder::new().word(false).build(r"\b-2\b").unwrap(); assert!(!matcher.is_match(b"abc -2 foo").unwrap()); } @@ -977,9 +972,7 @@ mod tests { #[test] fn line_terminator() { // This works, because there's no line terminator specified. - let matcher = RegexMatcherBuilder::new() - .build(r"abc\sxyz") - .unwrap(); + let matcher = RegexMatcherBuilder::new().build(r"abc\sxyz").unwrap(); assert!(matcher.is_match(b"abc\nxyz").unwrap()); // This doesn't. @@ -1029,16 +1022,12 @@ mod tests { // Test that smart case works. #[test] fn case_smart() { - let matcher = RegexMatcherBuilder::new() - .case_smart(true) - .build(r"abc") - .unwrap(); + let matcher = + RegexMatcherBuilder::new().case_smart(true).build(r"abc").unwrap(); assert!(matcher.is_match(b"ABC").unwrap()); - let matcher = RegexMatcherBuilder::new() - .case_smart(true) - .build(r"aBc") - .unwrap(); + let matcher = + RegexMatcherBuilder::new().case_smart(true).build(r"aBc").unwrap(); assert!(!matcher.is_match(b"ABC").unwrap()); } @@ -1060,9 +1049,7 @@ mod tests { // With no line terminator set, we can't employ any optimizations, // so we get a confirmed match. - let matcher = RegexMatcherBuilder::new() - .build(r"\wfoo\s") - .unwrap(); + let matcher = RegexMatcherBuilder::new().build(r"\wfoo\s").unwrap(); let m = matcher.find_candidate_line(b"afoo ").unwrap().unwrap(); assert!(is_confirmed(m)); diff --git a/grep-regex/src/multi.rs b/grep-regex/src/multi.rs index 6e43e9759..ef4e62c2c 100644 --- a/grep-regex/src/multi.rs +++ b/grep-regex/src/multi.rs @@ -1,5 +1,5 @@ use aho_corasick::{AhoCorasick, AhoCorasickBuilder, MatchKind}; -use grep_matcher::{Matcher, Match, NoError}; +use grep_matcher::{Match, Matcher, NoError}; use regex_syntax::hir::Hir; use error::Error; @@ -93,15 +93,13 @@ pub fn alternation_literals(expr: &Hir) -> Option>> { _ => return None, // one literal isn't worth it }; - let extendlit = |lit: &Literal, dst: &mut Vec| { - match *lit { - Literal::Unicode(c) => { - let mut buf = [0; 4]; - dst.extend_from_slice(c.encode_utf8(&mut buf).as_bytes()); - } - Literal::Byte(b) => { - dst.push(b); - } + let extendlit = |lit: &Literal, dst: &mut Vec| match *lit { + Literal::Unicode(c) => { + let mut buf = [0; 4]; + dst.extend_from_slice(c.encode_utf8(&mut buf).as_bytes()); + } + Literal::Byte(b) => { + dst.push(b); } }; diff --git a/grep-regex/src/non_matching.rs b/grep-regex/src/non_matching.rs index f2db2252e..2270f94d6 100644 --- a/grep-regex/src/non_matching.rs +++ b/grep-regex/src/non_matching.rs @@ -11,14 +11,9 @@ pub fn non_matching_bytes(expr: &Hir) -> ByteSet { /// Remove any bytes from the given set that can occur in a matched produced by /// the given expression. -fn remove_matching_bytes( - expr: &Hir, - set: &mut ByteSet, -) { +fn remove_matching_bytes(expr: &Hir, set: &mut ByteSet) { match *expr.kind() { - HirKind::Empty - | HirKind::Anchor(_) - | HirKind::WordBoundary(_) => {} + HirKind::Empty | HirKind::Anchor(_) | HirKind::WordBoundary(_) => {} HirKind::Literal(hir::Literal::Unicode(c)) => { for &b in c.encode_utf8(&mut [0; 4]).as_bytes() { set.remove(b); @@ -105,15 +100,20 @@ mod tests { #[test] fn dot() { - assert_eq!(sparse(&extract(".")), vec![ - b'\n', - 192, 193, 245, 246, 247, 248, 249, - 250, 251, 252, 253, 254, 255, - ]); - assert_eq!(sparse(&extract("(?s).")), vec![ - 192, 193, 245, 246, 247, 248, 249, - 250, 251, 252, 253, 254, 255, - ]); + assert_eq!( + sparse(&extract(".")), + vec![ + b'\n', 192, 193, 245, 246, 247, 248, 249, 250, 251, 252, 253, + 254, 255, + ] + ); + assert_eq!( + sparse(&extract("(?s).")), + vec![ + 192, 193, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, + 255, + ] + ); assert_eq!(sparse(&extract("(?-u).")), vec![b'\n']); assert_eq!(sparse(&extract("(?s-u).")), vec![]); } diff --git a/grep-regex/src/strip.rs b/grep-regex/src/strip.rs index 6cf3e47f9..5322e5c93 100644 --- a/grep-regex/src/strip.rs +++ b/grep-regex/src/strip.rs @@ -33,10 +33,7 @@ pub fn strip_from_match( /// The implementation of strip_from_match. The given byte must be ASCII. This /// function panics otherwise. -fn strip_from_match_ascii( - expr: Hir, - byte: u8, -) -> Result { +fn strip_from_match_ascii(expr: Hir, byte: u8) -> Result { assert!(byte <= 0x7F); let chr = byte as char; assert_eq!(chr.len_utf8(), 1); @@ -88,13 +85,15 @@ fn strip_from_match_ascii( Hir::group(x) } HirKind::Concat(xs) => { - let xs = xs.into_iter() + let xs = xs + .into_iter() .map(|e| strip_from_match_ascii(e, byte)) .collect::, Error>>()?; Hir::concat(xs) } HirKind::Alternation(xs) => { - let xs = xs.into_iter() + let xs = xs + .into_iter() .map(|e| strip_from_match_ascii(e, byte)) .collect::, Error>>()?; Hir::alternation(xs) @@ -106,8 +105,8 @@ fn strip_from_match_ascii( mod tests { use regex_syntax::Parser; + use super::{strip_from_match, LineTerminator}; use error::Error; - use super::{LineTerminator, strip_from_match}; fn roundtrip(pattern: &str, byte: u8) -> String { roundtrip_line_term(pattern, LineTerminator::byte(byte)).unwrap() diff --git a/grep-regex/src/util.rs b/grep-regex/src/util.rs index 9b4e67c7b..71b4ad7e7 100644 --- a/grep-regex/src/util.rs +++ b/grep-regex/src/util.rs @@ -1,8 +1,8 @@ /// Converts an arbitrary sequence of bytes to a literal suitable for building /// a regular expression. pub fn bytes_to_regex(bs: &[u8]) -> String { - use std::fmt::Write; use regex_syntax::is_meta_character; + use std::fmt::Write; let mut s = String::with_capacity(bs.len()); for &b in bs { diff --git a/grep-regex/src/word.rs b/grep-regex/src/word.rs index 09a1f8feb..18e2fe6d3 100644 --- a/grep-regex/src/word.rs +++ b/grep-regex/src/word.rs @@ -1,5 +1,5 @@ -use std::collections::HashMap; use std::cell::RefCell; +use std::collections::HashMap; use std::sync::Arc; use grep_matcher::{Match, Matcher, NoError}; @@ -45,9 +45,8 @@ impl WordMatcher { /// The given options are used to construct the regular expression /// internally. pub fn new(expr: &ConfiguredHIR) -> Result { - let original = expr.with_pattern(|pat| { - format!("^(?:{})$", pat) - })?.regex()?; + let original = + expr.with_pattern(|pat| format!("^(?:{})$", pat))?.regex()?; let word_expr = expr.with_pattern(|pat| { let pat = format!(r"(?:(?-m:^)|\W)({})(?:(?-m:$)|\W)", pat); debug!("word regex: {:?}", pat); @@ -112,9 +111,8 @@ impl WordMatcher { } let (_, slen) = bstr::decode_utf8(&haystack[cand]); let (_, elen) = bstr::decode_last_utf8(&haystack[cand]); - cand = cand - .with_start(cand.start() + slen) - .with_end(cand.end() - elen); + cand = + cand.with_start(cand.start() + slen).with_end(cand.end() - elen); if self.original.is_match(&haystack[cand]) { Ok(Some(cand)) } else { @@ -148,9 +146,8 @@ impl Matcher for WordMatcher { Err(()) => {} } - let cell = self.locs.get_or(|| { - RefCell::new(self.regex.capture_locations()) - }); + let cell = + self.locs.get_or(|| RefCell::new(self.regex.capture_locations())); let mut caps = cell.borrow_mut(); self.regex.captures_read_at(&mut caps, haystack, at); Ok(caps.get(1).map(|m| Match::new(m.0, m.1))) @@ -174,9 +171,8 @@ impl Matcher for WordMatcher { at: usize, caps: &mut RegexCaptures, ) -> Result { - let r = self.regex.captures_read_at( - caps.locations_mut(), haystack, at, - ); + let r = + self.regex.captures_read_at(caps.locations_mut(), haystack, at); Ok(r.is_some()) } @@ -187,9 +183,9 @@ impl Matcher for WordMatcher { #[cfg(test)] mod tests { - use grep_matcher::{Captures, Match, Matcher}; - use config::Config; use super::WordMatcher; + use config::Config; + use grep_matcher::{Captures, Match, Matcher}; fn matcher(pattern: &str) -> WordMatcher { let chir = Config::default().hir(pattern).unwrap(); diff --git a/grep-searcher/examples/search-stdin.rs b/grep-searcher/examples/search-stdin.rs index 9ce149151..cd0bc4e84 100644 --- a/grep-searcher/examples/search-stdin.rs +++ b/grep-searcher/examples/search-stdin.rs @@ -7,8 +7,8 @@ use std::io; use std::process; use grep_regex::RegexMatcher; -use grep_searcher::Searcher; use grep_searcher::sinks::UTF8; +use grep_searcher::Searcher; fn main() { if let Err(err) = example() { @@ -20,14 +20,18 @@ fn main() { fn example() -> Result<(), Box> { let pattern = match env::args().nth(1) { Some(pattern) => pattern, - None => return Err(From::from(format!( - "Usage: search-stdin " - ))), + None => { + return Err(From::from(format!("Usage: search-stdin "))) + } }; let matcher = RegexMatcher::new(&pattern)?; - Searcher::new().search_reader(&matcher, io::stdin(), UTF8(|lnum, line| { - print!("{}:{}", lnum, line); - Ok(true) - }))?; + Searcher::new().search_reader( + &matcher, + io::stdin(), + UTF8(|lnum, line| { + print!("{}:{}", lnum, line); + Ok(true) + }), + )?; Ok(()) } diff --git a/grep-searcher/src/lib.rs b/grep-searcher/src/lib.rs index 6a9f4ba7e..c37eace13 100644 --- a/grep-searcher/src/lib.rs +++ b/grep-searcher/src/lib.rs @@ -112,14 +112,13 @@ extern crate regex; pub use lines::{LineIter, LineStep}; pub use searcher::{ - BinaryDetection, ConfigError, Encoding, MmapChoice, - Searcher, SearcherBuilder, + BinaryDetection, ConfigError, Encoding, MmapChoice, Searcher, + SearcherBuilder, }; +pub use sink::sinks; pub use sink::{ - Sink, SinkError, - SinkContext, SinkContextKind, SinkFinish, SinkMatch, + Sink, SinkContext, SinkContextKind, SinkError, SinkFinish, SinkMatch, }; -pub use sink::sinks; #[macro_use] mod macros; diff --git a/grep-searcher/src/line_buffer.rs b/grep-searcher/src/line_buffer.rs index 96be997a5..bd3a273d0 100644 --- a/grep-searcher/src/line_buffer.rs +++ b/grep-searcher/src/line_buffer.rs @@ -4,7 +4,7 @@ use std::io; use bstr::ByteSlice; /// The default buffer capacity that we use for the line buffer. -pub(crate) const DEFAULT_BUFFER_CAPACITY: usize = 8 * (1<<10); // 8 KB +pub(crate) const DEFAULT_BUFFER_CAPACITY: usize = 8 * (1 << 10); // 8 KB /// The behavior of a searcher in the face of long lines and big contexts. /// @@ -442,16 +442,15 @@ impl LineBuffer { } } BinaryDetection::Convert(byte) => { - if let Some(i) = replace_bytes( - newbytes, - byte, - self.config.lineterm, - ) { + if let Some(i) = + replace_bytes(newbytes, byte, self.config.lineterm) + { // Record only the first binary offset. if self.binary_byte_offset.is_none() { - self.binary_byte_offset = - Some(self.absolute_byte_offset - + (oldend + i) as u64); + self.binary_byte_offset = Some( + self.absolute_byte_offset + + (oldend + i) as u64, + ); } } } @@ -542,9 +541,9 @@ fn replace_bytes(bytes: &mut [u8], src: u8, replacement: u8) -> Option { #[cfg(test)] mod tests { - use std::str; - use bstr::{ByteSlice, ByteVec}; use super::*; + use bstr::{ByteSlice, ByteVec}; + use std::str; const SHERLOCK: &'static str = "\ For the Doctor Watsons of this world, as opposed to the Sherlock @@ -858,10 +857,13 @@ and exhibited clearly, with a label attached.\ assert!(rdr.buffer().is_empty()); assert!(rdr.fill().unwrap()); - assert_eq!(rdr.bstr(), "\ + assert_eq!( + rdr.bstr(), + "\ For the Doctor Watsons of this world, as opposed to the Sherlock Holmeses, s\ -"); +" + ); rdr.consume_all(); assert!(!rdr.fill().unwrap()); diff --git a/grep-searcher/src/lines.rs b/grep-searcher/src/lines.rs index 57680b404..387a1b460 100644 --- a/grep-searcher/src/lines.rs +++ b/grep-searcher/src/lines.rs @@ -130,14 +130,9 @@ pub fn without_terminator(bytes: &[u8], line_term: LineTerminator) -> &[u8] { /// /// Line terminators are considered part of the line they terminate. #[inline(always)] -pub fn locate( - bytes: &[u8], - line_term: u8, - range: Match, -) -> Match { - let line_start = bytes[..range.start()] - .rfind_byte(line_term) - .map_or(0, |i| i + 1); +pub fn locate(bytes: &[u8], line_term: u8, range: Match) -> Match { + let line_start = + bytes[..range.start()].rfind_byte(line_term).map_or(0, |i| i + 1); let line_end = if range.end() > line_start && bytes[range.end() - 1] == line_term { range.end() @@ -201,10 +196,10 @@ fn preceding_by_pos( #[cfg(test)] mod tests { + use super::*; + use grep_matcher::Match; use std::ops::Range; use std::str; - use grep_matcher::Match; - use super::*; const SHERLOCK: &'static str = "\ For the Doctor Watsons of this world, as opposed to the Sherlock @@ -260,29 +255,37 @@ and exhibited clearly, with a label attached.\ assert_eq!( loc(t, lines[0].start, lines[0].end), - m(lines[0].start, lines[0].end)); + m(lines[0].start, lines[0].end) + ); assert_eq!( loc(t, lines[0].start + 1, lines[0].end), - m(lines[0].start, lines[0].end)); + m(lines[0].start, lines[0].end) + ); assert_eq!( loc(t, lines[0].end - 1, lines[0].end), - m(lines[0].start, lines[0].end)); + m(lines[0].start, lines[0].end) + ); assert_eq!( loc(t, lines[0].end, lines[0].end), - m(lines[1].start, lines[1].end)); + m(lines[1].start, lines[1].end) + ); assert_eq!( loc(t, lines[5].start, lines[5].end), - m(lines[5].start, lines[5].end)); + m(lines[5].start, lines[5].end) + ); assert_eq!( loc(t, lines[5].start + 1, lines[5].end), - m(lines[5].start, lines[5].end)); + m(lines[5].start, lines[5].end) + ); assert_eq!( loc(t, lines[5].end - 1, lines[5].end), - m(lines[5].start, lines[5].end)); + m(lines[5].start, lines[5].end) + ); assert_eq!( loc(t, lines[5].end, lines[5].end), - m(lines[5].start, lines[5].end)); + m(lines[5].start, lines[5].end) + ); } #[test] diff --git a/grep-searcher/src/searcher/core.rs b/grep-searcher/src/searcher/core.rs index d534b663a..fe4254ead 100644 --- a/grep-searcher/src/searcher/core.rs +++ b/grep-searcher/src/searcher/core.rs @@ -3,12 +3,11 @@ use std::cmp; use bstr::ByteSlice; use grep_matcher::{LineMatchKind, Matcher}; -use lines::{self, LineStep}; use line_buffer::BinaryDetection; +use lines::{self, LineStep}; use searcher::{Config, Range, Searcher}; use sink::{ - Sink, SinkError, - SinkFinish, SinkContext, SinkContextKind, SinkMatch, + Sink, SinkContext, SinkContextKind, SinkError, SinkFinish, SinkMatch, }; #[derive(Debug)] @@ -36,11 +35,7 @@ impl<'s, M: Matcher, S: Sink> Core<'s, M, S> { binary: bool, ) -> Core<'s, M, S> { let line_number = - if searcher.config.line_number { - Some(1) - } else { - None - }; + if searcher.config.line_number { Some(1) } else { None }; let core = Core { config: &searcher.config, matcher: matcher, @@ -108,10 +103,8 @@ impl<'s, M: Matcher, S: Sink> Core<'s, M, S> { ) -> Result<(), S::Error> { self.sink.finish( &self.searcher, - &SinkFinish { - byte_count, - binary_byte_offset, - }) + &SinkFinish { byte_count, binary_byte_offset }, + ) } pub fn match_by_line(&mut self, buf: &[u8]) -> Result { @@ -123,23 +116,22 @@ impl<'s, M: Matcher, S: Sink> Core<'s, M, S> { } pub fn roll(&mut self, buf: &[u8]) -> usize { - let consumed = - if self.config.max_context() == 0 { - buf.len() - } else { - // It might seem like all we need to care about here is just - // the "before context," but in order to sink the context - // separator (when before_context==0 and after_context>0), we - // need to know something about the position of the previous - // line visited, even if we're at the beginning of the buffer. - let context_start = lines::preceding( - buf, - self.config.line_term.as_byte(), - self.config.max_context(), - ); - let consumed = cmp::max(context_start, self.last_line_visited); - consumed - }; + let consumed = if self.config.max_context() == 0 { + buf.len() + } else { + // It might seem like all we need to care about here is just + // the "before context," but in order to sink the context + // separator (when before_context==0 and after_context>0), we + // need to know something about the position of the previous + // line visited, even if we're at the beginning of the buffer. + let context_start = lines::preceding( + buf, + self.config.line_term.as_byte(), + self.config.max_context(), + ); + let consumed = cmp::max(context_start, self.last_line_visited); + consumed + }; self.count_lines(buf, consumed); self.absolute_byte_offset += consumed as u64; self.last_line_counted = 0; @@ -185,11 +177,12 @@ impl<'s, M: Matcher, S: Sink> Core<'s, M, S> { if range.is_empty() { return Ok(true); } - let before_context_start = range.start() + lines::preceding( - &buf[range], - self.config.line_term.as_byte(), - self.config.before_context - 1, - ); + let before_context_start = range.start() + + lines::preceding( + &buf[range], + self.config.line_term.as_byte(), + self.config.before_context - 1, + ); let range = Range::new(before_context_start, range.end()); let mut stepper = LineStep::new( @@ -552,8 +545,7 @@ impl<'s, M: Matcher, S: Sink> Core<'s, M, S> { ) -> Result { let is_gap = self.last_line_visited < start_of_line; let any_context = - self.config.before_context > 0 - || self.config.after_context > 0; + self.config.before_context > 0 || self.config.after_context > 0; if !any_context || !self.has_sunk || !is_gap { Ok(true) diff --git a/grep-searcher/src/searcher/glue.rs b/grep-searcher/src/searcher/glue.rs index 4f362dabb..d82499a31 100644 --- a/grep-searcher/src/searcher/glue.rs +++ b/grep-searcher/src/searcher/glue.rs @@ -2,12 +2,12 @@ use std::cmp; use std::io; use grep_matcher::Matcher; +use line_buffer::{LineBufferReader, DEFAULT_BUFFER_CAPACITY}; use lines::{self, LineStep}; -use line_buffer::{DEFAULT_BUFFER_CAPACITY, LineBufferReader}; use sink::{Sink, SinkError}; -use searcher::{Config, Range, Searcher}; use searcher::core::Core; +use searcher::{Config, Range, Searcher}; #[derive(Debug)] pub struct ReadByLine<'s, M: 's, R, S> { @@ -17,9 +17,10 @@ pub struct ReadByLine<'s, M: 's, R, S> { } impl<'s, M, R, S> ReadByLine<'s, M, R, S> -where M: Matcher, - R: io::Read, - S: Sink +where + M: Matcher, + R: io::Read, + S: Sink, { pub fn new( searcher: &'s Searcher, @@ -38,9 +39,8 @@ where M: Matcher, pub fn run(mut self) -> Result<(), S::Error> { if self.core.begin()? { - while - self.fill()? && self.core.match_by_line(self.rdr.buffer())? - {} + while self.fill()? && self.core.match_by_line(self.rdr.buffer())? { + } } self.core.finish( self.rdr.absolute_byte_offset(), @@ -82,7 +82,7 @@ where M: Matcher, fn should_binary_quit(&self) -> bool { self.rdr.binary_byte_offset().is_some() - && self.config.binary.quit_byte().is_some() + && self.config.binary.quit_byte().is_some() } } @@ -111,14 +111,11 @@ impl<'s, M: Matcher, S: Sink> SliceByLine<'s, M, S> { pub fn run(mut self) -> Result<(), S::Error> { if self.core.begin()? { - let binary_upto = cmp::min( - self.slice.len(), - DEFAULT_BUFFER_CAPACITY, - ); + let binary_upto = + cmp::min(self.slice.len(), DEFAULT_BUFFER_CAPACITY); let binary_range = Range::new(0, binary_upto); if !self.core.detect_binary(self.slice, &binary_range)? { - while - !self.slice[self.core.pos()..].is_empty() + while !self.slice[self.core.pos()..].is_empty() && self.core.match_by_line(self.slice)? {} } @@ -163,10 +160,8 @@ impl<'s, M: Matcher, S: Sink> MultiLine<'s, M, S> { pub fn run(mut self) -> Result<(), S::Error> { if self.core.begin()? { - let binary_upto = cmp::min( - self.slice.len(), - DEFAULT_BUFFER_CAPACITY, - ); + let binary_upto = + cmp::min(self.slice.len(), DEFAULT_BUFFER_CAPACITY); let binary_range = Range::new(0, binary_upto); if !self.core.detect_binary(self.slice, &binary_range)? { let mut keepgoing = true; @@ -218,11 +213,8 @@ impl<'s, M: Matcher, S: Sink> MultiLine<'s, M, S> { }; self.advance(&mat); - let line = lines::locate( - self.slice, - self.config.line_term.as_byte(), - mat, - ); + let line = + lines::locate(self.slice, self.config.line_term.as_byte(), mat); // We delay sinking the match to make sure we group adjacent matches // together in a single sink. Adjacent matches are distinct matches // that start and end on the same line, respectively. This guarantees @@ -502,7 +494,8 @@ byte count:366 let byte_count = haystack.len(); let exp = format!( "4:abc\n8:defxxxabc\n18:defxxx\n\nbyte count:{}\n", - byte_count); + byte_count + ); SearcherTester::new(haystack, "abc\ndef") .by_line(false) @@ -517,7 +510,8 @@ byte count:366 let byte_count = haystack.len(); let exp = format!( "4:abc\n8:defabc\n15:defxxx\n\nbyte count:{}\n", - byte_count); + byte_count + ); SearcherTester::new(haystack, "abc\ndef") .by_line(false) @@ -571,9 +565,8 @@ d "; let byte_count = haystack.len(); let exp = format!("4:\n7:\n8:\n\nbyte count:{}\n", byte_count); - let exp_line = format!( - "3:4:\n5:7:\n6:8:\n\nbyte count:{}\n", - byte_count); + let exp_line = + format!("3:4:\n5:7:\n6:8:\n\nbyte count:{}\n", byte_count); SearcherTester::new(haystack, r"^$") .expected_no_line_number(&exp) @@ -595,9 +588,8 @@ c d"; let byte_count = haystack.len(); let exp = format!("4:\n7:\n8:\n\nbyte count:{}\n", byte_count); - let exp_line = format!( - "3:4:\n5:7:\n6:8:\n\nbyte count:{}\n", - byte_count); + let exp_line = + format!("3:4:\n5:7:\n6:8:\n\nbyte count:{}\n", byte_count); SearcherTester::new(haystack, r"^$") .expected_no_line_number(&exp) @@ -620,12 +612,9 @@ d "; let byte_count = haystack.len(); - let exp = format!( - "4:\n7:\n8:\n11:\n\nbyte count:{}\n", - byte_count); - let exp_line = format!( - "3:4:\n5:7:\n6:8:\n8:11:\n\nbyte count:{}\n", - byte_count); + let exp = format!("4:\n7:\n8:\n11:\n\nbyte count:{}\n", byte_count); + let exp_line = + format!("3:4:\n5:7:\n6:8:\n8:11:\n\nbyte count:{}\n", byte_count); SearcherTester::new(haystack, r"^$") .expected_no_line_number(&exp) @@ -667,11 +656,8 @@ d let mut searcher = SearcherBuilder::new() .heap_limit(Some(3)) // max line length is 4, one byte short .build(); - let result = searcher.search_reader( - &matcher, - haystack.as_bytes(), - &mut sink, - ); + let result = + searcher.search_reader(&matcher, haystack.as_bytes(), &mut sink); assert!(result.is_err()); } @@ -691,11 +677,8 @@ d .multi_line(true) .heap_limit(Some(haystack.len())) // actually need one more byte .build(); - let result = searcher.search_reader( - &matcher, - haystack.as_bytes(), - &mut sink, - ); + let result = + searcher.search_reader(&matcher, haystack.as_bytes(), &mut sink); assert!(result.is_err()); } @@ -1508,12 +1491,16 @@ and exhibited clearly, with a label attached.\ let haystack = SHERLOCK; let matcher = RegexMatcher::new("Sherlock"); - let mut searcher = SearcherBuilder::new() - .line_number(true) - .build(); - searcher.search_reader(&matcher, haystack, sinks::Lossy(|n, line| { - print!("{}:{}", n, line); - Ok(true) - })).unwrap(); + let mut searcher = SearcherBuilder::new().line_number(true).build(); + searcher + .search_reader( + &matcher, + haystack, + sinks::Lossy(|n, line| { + print!("{}:{}", n, line); + Ok(true) + }), + ) + .unwrap(); } } diff --git a/grep-searcher/src/searcher/mod.rs b/grep-searcher/src/searcher/mod.rs index e20e04a39..b64a85868 100644 --- a/grep-searcher/src/searcher/mod.rs +++ b/grep-searcher/src/searcher/mod.rs @@ -9,10 +9,10 @@ use encoding_rs; use encoding_rs_io::DecodeReaderBytesBuilder; use grep_matcher::{LineTerminator, Match, Matcher}; use line_buffer::{ - self, BufferAllocation, LineBuffer, LineBufferBuilder, LineBufferReader, - DEFAULT_BUFFER_CAPACITY, alloc_error, + self, alloc_error, BufferAllocation, LineBuffer, LineBufferBuilder, + LineBufferReader, DEFAULT_BUFFER_CAPACITY, }; -use searcher::glue::{ReadByLine, SliceByLine, MultiLine}; +use searcher::glue::{MultiLine, ReadByLine, SliceByLine}; use sink::{Sink, SinkError}; pub use self::mmap::MmapChoice; @@ -211,12 +211,11 @@ impl Config { .binary_detection(self.binary.0); if let Some(limit) = self.heap_limit { - let (capacity, additional) = - if limit <= DEFAULT_BUFFER_CAPACITY { - (limit, 0) - } else { - (DEFAULT_BUFFER_CAPACITY, limit - DEFAULT_BUFFER_CAPACITY) - }; + let (capacity, additional) = if limit <= DEFAULT_BUFFER_CAPACITY { + (limit, 0) + } else { + (DEFAULT_BUFFER_CAPACITY, limit - DEFAULT_BUFFER_CAPACITY) + }; builder .capacity(capacity) .buffer_alloc(BufferAllocation::Error(additional)); @@ -258,7 +257,9 @@ pub enum ConfigError { } impl ::std::error::Error for ConfigError { - fn description(&self) -> &str { "grep-searcher configuration error" } + fn description(&self) -> &str { + "grep-searcher configuration error" + } } impl fmt::Display for ConfigError { @@ -272,17 +273,14 @@ impl fmt::Display for ConfigError { f, "grep config error: mismatched line terminators, \ matcher has {:?} but searcher has {:?}", - matcher, - searcher - ) - } - ConfigError::UnknownEncoding { ref label } => { - write!( - f, - "grep config error: unknown encoding: {}", - String::from_utf8_lossy(label), + matcher, searcher ) } + ConfigError::UnknownEncoding { ref label } => write!( + f, + "grep config error: unknown encoding: {}", + String::from_utf8_lossy(label), + ), _ => panic!("BUG: unexpected variant found"), } } @@ -310,9 +308,7 @@ impl Default for SearcherBuilder { impl SearcherBuilder { /// Create a new searcher builder with a default configuration. pub fn new() -> SearcherBuilder { - SearcherBuilder { - config: Config::default(), - } + SearcherBuilder { config: Config::default() } } /// Build a searcher with the given matcher. @@ -334,7 +330,7 @@ impl SearcherBuilder { Searcher { config: config, decode_builder: decode_builder, - decode_buffer: RefCell::new(vec![0; 8 * (1<<10)]), + decode_buffer: RefCell::new(vec![0; 8 * (1 << 10)]), line_buffer: RefCell::new(self.config.line_buffer()), multi_line_buffer: RefCell::new(vec![]), } @@ -622,9 +618,10 @@ impl Searcher { path: P, write_to: S, ) -> Result<(), S::Error> - where P: AsRef, - M: Matcher, - S: Sink, + where + P: AsRef, + M: Matcher, + S: Sink, { let path = path.as_ref(); let file = File::open(path).map_err(S::Error::error_io)?; @@ -643,8 +640,9 @@ impl Searcher { file: &File, write_to: S, ) -> Result<(), S::Error> - where M: Matcher, - S: Sink, + where + M: Matcher, + S: Sink, { self.search_file_maybe_path(matcher, None, file, write_to) } @@ -656,8 +654,9 @@ impl Searcher { file: &File, write_to: S, ) -> Result<(), S::Error> - where M: Matcher, - S: Sink, + where + M: Matcher, + S: Sink, { if let Some(mmap) = self.config.mmap.open(file, path) { trace!("{:?}: searching via memory map", path); @@ -675,7 +674,8 @@ impl Searcher { matcher, &*self.multi_line_buffer.borrow(), write_to, - ).run() + ) + .run() } else { trace!("{:?}: searching using generic reader", path); self.search_reader(matcher, file, write_to) @@ -699,14 +699,16 @@ impl Searcher { read_from: R, write_to: S, ) -> Result<(), S::Error> - where M: Matcher, - R: io::Read, - S: Sink, + where + M: Matcher, + R: io::Read, + S: Sink, { self.check_config(&matcher).map_err(S::Error::error_config)?; let mut decode_buffer = self.decode_buffer.borrow_mut(); - let read_from = self.decode_builder + let read_from = self + .decode_builder .build_with_buffer(read_from, &mut *decode_buffer) .map_err(S::Error::error_io)?; @@ -719,7 +721,8 @@ impl Searcher { matcher, &*self.multi_line_buffer.borrow(), write_to, - ).run() + ) + .run() } else { let mut line_buffer = self.line_buffer.borrow_mut(); let rdr = LineBufferReader::new(read_from, &mut *line_buffer); @@ -736,8 +739,9 @@ impl Searcher { slice: &[u8], write_to: S, ) -> Result<(), S::Error> - where M: Matcher, - S: Sink, + where + M: Matcher, + S: Sink, { self.check_config(&matcher).map_err(S::Error::error_config)?; @@ -764,8 +768,7 @@ impl Searcher { /// Check that the searcher's configuration and the matcher are consistent /// with each other. fn check_config(&self, matcher: M) -> Result<(), ConfigError> { - if self.config.heap_limit == Some(0) - && !self.config.mmap.is_enabled() + if self.config.heap_limit == Some(0) && !self.config.mmap.is_enabled() { return Err(ConfigError::SearchUnavailable); } @@ -785,7 +788,7 @@ impl Searcher { /// Returns true if and only if the given slice needs to be transcoded. fn slice_needs_transcoding(&self, slice: &[u8]) -> bool { self.config.encoding.is_some() - || (self.config.bom_sniffing && slice_has_utf16_bom(slice)) + || (self.config.bom_sniffing && slice_has_utf16_bom(slice)) } } @@ -886,7 +889,8 @@ impl Searcher { assert!(self.config.multi_line); let mut decode_buffer = self.decode_buffer.borrow_mut(); - let mut read_from = self.decode_builder + let mut read_from = self + .decode_builder .build_with_buffer(file, &mut *decode_buffer) .map_err(S::Error::error_io)?; @@ -900,10 +904,8 @@ impl Searcher { if self.config.heap_limit.is_none() { let mut buf = self.multi_line_buffer.borrow_mut(); buf.clear(); - let cap = file - .metadata() - .map(|m| m.len() as usize + 1) - .unwrap_or(0); + let cap = + file.metadata().map(|m| m.len() as usize + 1).unwrap_or(0); buf.reserve(cap); read_from.read_to_end(&mut *buf).map_err(S::Error::error_io)?; return Ok(()); @@ -929,7 +931,9 @@ impl Searcher { let heap_limit = match self.config.heap_limit { Some(heap_limit) => heap_limit, None => { - read_from.read_to_end(&mut *buf).map_err(S::Error::error_io)?; + read_from + .read_to_end(&mut *buf) + .map_err(S::Error::error_io)?; return Ok(()); } }; @@ -983,16 +987,14 @@ fn slice_has_utf16_bom(slice: &[u8]) -> bool { #[cfg(test)] mod tests { - use testutil::{KitchenSink, RegexMatcher}; use super::*; + use testutil::{KitchenSink, RegexMatcher}; #[test] fn config_error_heap_limit() { let matcher = RegexMatcher::new(""); let sink = KitchenSink::new(); - let mut searcher = SearcherBuilder::new() - .heap_limit(Some(0)) - .build(); + let mut searcher = SearcherBuilder::new().heap_limit(Some(0)).build(); let res = searcher.search_slice(matcher, &[], sink); assert!(res.is_err()); } diff --git a/grep-searcher/src/sink.rs b/grep-searcher/src/sink.rs index 74fba00bc..750aefbeb 100644 --- a/grep-searcher/src/sink.rs +++ b/grep-searcher/src/sink.rs @@ -200,10 +200,7 @@ pub trait Sink { /// `finish` is not called and the error is bubbled back up to the caller /// of the searcher. #[inline] - fn begin( - &mut self, - _searcher: &Searcher, - ) -> Result { + fn begin(&mut self, _searcher: &Searcher) -> Result { Ok(true) } @@ -261,10 +258,7 @@ impl<'a, S: Sink> Sink for &'a mut S { } #[inline] - fn begin( - &mut self, - searcher: &Searcher, - ) -> Result { + fn begin(&mut self, searcher: &Searcher) -> Result { (**self).begin(searcher) } @@ -317,10 +311,7 @@ impl Sink for Box { } #[inline] - fn begin( - &mut self, - searcher: &Searcher, - ) -> Result { + fn begin(&mut self, searcher: &Searcher) -> Result { (**self).begin(searcher) } @@ -508,8 +499,8 @@ pub mod sinks { use std::io; use std::str; - use searcher::Searcher; use super::{Sink, SinkError, SinkMatch}; + use searcher::Searcher; /// A sink that provides line numbers and matches as strings while ignoring /// everything else. @@ -528,10 +519,12 @@ pub mod sinks { /// number of the first line in the match. #[derive(Clone, Debug)] pub struct UTF8(pub F) - where F: FnMut(u64, &str) -> Result; + where + F: FnMut(u64, &str) -> Result; impl Sink for UTF8 - where F: FnMut(u64, &str) -> Result + where + F: FnMut(u64, &str) -> Result, { type Error = io::Error; @@ -574,10 +567,12 @@ pub mod sinks { /// number of the first line in the match. #[derive(Clone, Debug)] pub struct Lossy(pub F) - where F: FnMut(u64, &str) -> Result; + where + F: FnMut(u64, &str) -> Result; impl Sink for Lossy - where F: FnMut(u64, &str) -> Result + where + F: FnMut(u64, &str) -> Result, { type Error = io::Error; @@ -622,10 +617,12 @@ pub mod sinks { /// number of the first line in the match. #[derive(Clone, Debug)] pub struct Bytes(pub F) - where F: FnMut(u64, &[u8]) -> Result; + where + F: FnMut(u64, &[u8]) -> Result; impl Sink for Bytes - where F: FnMut(u64, &[u8]) -> Result + where + F: FnMut(u64, &[u8]) -> Result, { type Error = io::Error; diff --git a/grep-searcher/src/testutil.rs b/grep-searcher/src/testutil.rs index ec7e29ba8..807e8dc60 100644 --- a/grep-searcher/src/testutil.rs +++ b/grep-searcher/src/testutil.rs @@ -52,10 +52,7 @@ impl RegexMatcher { /// Whether to return every line as a candidate or not. /// /// This forces searchers to handle the case of reporting a false positive. - pub fn every_line_is_candidate( - &mut self, - yes: bool, - ) -> &mut RegexMatcher { + pub fn every_line_is_candidate(&mut self, yes: bool) -> &mut RegexMatcher { self.every_line_is_candidate = yes; self } @@ -70,9 +67,10 @@ impl Matcher for RegexMatcher { haystack: &[u8], at: usize, ) -> Result, NoError> { - Ok(self.regex - .find_at(haystack, at) - .map(|m| Match::new(m.start(), m.end()))) + Ok(self + .regex + .find_at(haystack, at) + .map(|m| Match::new(m.start(), m.end()))) } fn new_captures(&self) -> Result { @@ -253,8 +251,10 @@ impl SearcherTester { panic!("an 'expected' string with NO line numbers must be given"); } if self.line_number && self.expected_with_line_number.is_none() { - panic!("an 'expected' string with line numbers must be given, \ - or disable testing with line numbers"); + panic!( + "an 'expected' string with line numbers must be given, \ + or disable testing with line numbers" + ); } let configs = self.configs(); @@ -465,18 +465,17 @@ impl SearcherTester { lens.sort(); lens.reverse(); - let context_count = - if self.passthru { - self.haystack.lines().count() - } else { - // Why do we add 2 here? Well, we need to add 1 in order to - // have room to search at least one line. We add another - // because the implementation will occasionally include - // an additional line when handling the context. There's - // no particularly good reason, other than keeping the - // implementation simple. - 2 + self.before_context + self.after_context - }; + let context_count = if self.passthru { + self.haystack.lines().count() + } else { + // Why do we add 2 here? Well, we need to add 1 in order to + // have room to search at least one line. We add another + // because the implementation will occasionally include + // an additional line when handling the context. There's + // no particularly good reason, other than keeping the + // implementation simple. + 2 + self.before_context + self.after_context + }; // We add 1 to each line since `str::lines` doesn't include the // line terminator. @@ -635,10 +634,11 @@ impl SearcherTester { if self.multi_line && self.line_number { let mut builder = builder.clone(); let expected_slice = match self.expected_slice_with_line_number { - None => { - self.expected_with_line_number - .as_ref().unwrap().to_string() - } + None => self + .expected_with_line_number + .as_ref() + .unwrap() + .to_string(), Some(ref e) => e.to_string(), }; diff --git a/grep/examples/simplegrep.rs b/grep/examples/simplegrep.rs index 37b6a0c42..749cff269 100644 --- a/grep/examples/simplegrep.rs +++ b/grep/examples/simplegrep.rs @@ -40,13 +40,11 @@ fn search(pattern: &str, paths: &[OsString]) -> Result<(), Box> { .build(); let mut printer = StandardBuilder::new() .color_specs(ColorSpecs::default_with_color()) - .build(cli::stdout( - if cli::is_tty_stdout() { - ColorChoice::Auto - } else { - ColorChoice::Never - } - )); + .build(cli::stdout(if cli::is_tty_stdout() { + ColorChoice::Auto + } else { + ColorChoice::Never + })); for path in paths { for result in WalkDir::new(path) { diff --git a/ignore/src/default_types.rs b/ignore/src/default_types.rs new file mode 100644 index 000000000..8a077796e --- /dev/null +++ b/ignore/src/default_types.rs @@ -0,0 +1,244 @@ +/// This list represents the default file types that ripgrep ships with. In +/// general, any file format is fair game, although it should generally be +/// limited to reasonably popular open formats. For other cases, you can add +/// types to each invocation of ripgrep with the '--type-add' flag. +/// +/// If you would like to add or improve this list, please file a PR: +/// https://github.com/BurntSushi/ripgrep +/// +/// Please try to keep this list sorted lexicographically and wrapped to 79 +/// columns (inclusive). +#[rustfmt::skip] +pub const DEFAULT_TYPES: &[(&str, &[&str])] = &[ + ("agda", &["*.agda", "*.lagda"]), + ("aidl", &["*.aidl"]), + ("amake", &["*.mk", "*.bp"]), + ("asciidoc", &["*.adoc", "*.asc", "*.asciidoc"]), + ("asm", &["*.asm", "*.s", "*.S"]), + ("asp", &[ + "*.aspx", "*.aspx.cs", "*.aspx.cs", "*.ascx", "*.ascx.cs", "*.ascx.vb", + ]), + ("ats", &["*.ats", "*.dats", "*.sats", "*.hats"]), + ("avro", &["*.avdl", "*.avpr", "*.avsc"]), + ("awk", &["*.awk"]), + ("bazel", &["*.bzl", "WORKSPACE", "BUILD", "BUILD.bazel"]), + ("bitbake", &["*.bb", "*.bbappend", "*.bbclass", "*.conf", "*.inc"]), + ("brotli", &["*.br"]), + ("buildstream", &["*.bst"]), + ("bzip2", &["*.bz2", "*.tbz2"]), + ("c", &["*.[chH]", "*.[chH].in", "*.cats"]), + ("cabal", &["*.cabal"]), + ("cbor", &["*.cbor"]), + ("ceylon", &["*.ceylon"]), + ("clojure", &["*.clj", "*.cljc", "*.cljs", "*.cljx"]), + ("cmake", &["*.cmake", "CMakeLists.txt"]), + ("coffeescript", &["*.coffee"]), + ("config", &["*.cfg", "*.conf", "*.config", "*.ini"]), + ("cpp", &[ + "*.[ChH]", "*.cc", "*.[ch]pp", "*.[ch]xx", "*.hh", "*.inl", + "*.[ChH].in", "*.cc.in", "*.[ch]pp.in", "*.[ch]xx.in", "*.hh.in", + ]), + ("creole", &["*.creole"]), + ("crystal", &["Projectfile", "*.cr"]), + ("cs", &["*.cs"]), + ("csharp", &["*.cs"]), + ("cshtml", &["*.cshtml"]), + ("css", &["*.css", "*.scss"]), + ("csv", &["*.csv"]), + ("cython", &["*.pyx", "*.pxi", "*.pxd"]), + ("d", &["*.d"]), + ("dart", &["*.dart"]), + ("dhall", &["*.dhall"]), + ("diff", &["*.patch", "*.diff"]), + ("docker", &["*Dockerfile*"]), + ("edn", &["*.edn"]), + ("elisp", &["*.el"]), + ("elixir", &["*.ex", "*.eex", "*.exs"]), + ("elm", &["*.elm"]), + ("erb", &["*.erb"]), + ("erlang", &["*.erl", "*.hrl"]), + ("fidl", &["*.fidl"]), + ("fish", &["*.fish"]), + ("fortran", &[ + "*.f", "*.F", "*.f77", "*.F77", "*.pfo", + "*.f90", "*.F90", "*.f95", "*.F95", + ]), + ("fsharp", &["*.fs", "*.fsx", "*.fsi"]), + ("gap", &["*.g", "*.gap", "*.gi", "*.gd", "*.tst"]), + ("gn", &["*.gn", "*.gni"]), + ("go", &["*.go"]), + ("gradle", &["*.gradle"]), + ("groovy", &["*.groovy", "*.gradle"]), + ("gzip", &["*.gz", "*.tgz"]), + ("h", &["*.h", "*.hpp"]), + ("haml", &["*.haml"]), + ("haskell", &["*.hs", "*.lhs", "*.cpphs", "*.c2hs", "*.hsc"]), + ("hbs", &["*.hbs"]), + ("hs", &["*.hs", "*.lhs"]), + ("html", &["*.htm", "*.html", "*.ejs"]), + ("idris", &["*.idr", "*.lidr"]), + ("java", &["*.java", "*.jsp", "*.jspx", "*.properties"]), + ("jinja", &["*.j2", "*.jinja", "*.jinja2"]), + ("jl", &["*.jl"]), + ("js", &["*.js", "*.jsx", "*.vue"]), + ("json", &["*.json", "composer.lock"]), + ("jsonl", &["*.jsonl"]), + ("julia", &["*.jl"]), + ("jupyter", &["*.ipynb", "*.jpynb"]), + ("kotlin", &["*.kt", "*.kts"]), + ("less", &["*.less"]), + ("license", &[ + // General + "COPYING", "COPYING[.-]*", + "COPYRIGHT", "COPYRIGHT[.-]*", + "EULA", "EULA[.-]*", + "licen[cs]e", "licen[cs]e.*", + "LICEN[CS]E", "LICEN[CS]E[.-]*", "*[.-]LICEN[CS]E*", + "NOTICE", "NOTICE[.-]*", + "PATENTS", "PATENTS[.-]*", + "UNLICEN[CS]E", "UNLICEN[CS]E[.-]*", + // GPL (gpl.txt, etc.) + "agpl[.-]*", + "gpl[.-]*", + "lgpl[.-]*", + // Other license-specific (APACHE-2.0.txt, etc.) + "AGPL-*[0-9]*", + "APACHE-*[0-9]*", + "BSD-*[0-9]*", + "CC-BY-*", + "GFDL-*[0-9]*", + "GNU-*[0-9]*", + "GPL-*[0-9]*", + "LGPL-*[0-9]*", + "MIT-*[0-9]*", + "MPL-*[0-9]*", + "OFL-*[0-9]*", + ]), + ("lisp", &["*.el", "*.jl", "*.lisp", "*.lsp", "*.sc", "*.scm"]), + ("lock", &["*.lock", "package-lock.json"]), + ("log", &["*.log"]), + ("lua", &["*.lua"]), + ("lz4", &["*.lz4"]), + ("lzma", &["*.lzma"]), + ("m4", &["*.ac", "*.m4"]), + ("make", &[ + "[Gg][Nn][Uu]makefile", "[Mm]akefile", + "[Gg][Nn][Uu]makefile.am", "[Mm]akefile.am", + "[Gg][Nn][Uu]makefile.in", "[Mm]akefile.in", + "*.mk", "*.mak" + ]), + ("mako", &["*.mako", "*.mao"]), + ("man", &["*.[0-9lnpx]", "*.[0-9][cEFMmpSx]"]), + ("markdown", &["*.markdown", "*.md", "*.mdown", "*.mkdn"]), + ("matlab", &["*.m"]), + ("md", &["*.markdown", "*.md", "*.mdown", "*.mkdn"]), + ("mk", &["mkfile"]), + ("ml", &["*.ml"]), + ("msbuild", &[ + "*.csproj", "*.fsproj", "*.vcxproj", "*.proj", "*.props", "*.targets", + ]), + ("nim", &["*.nim", "*.nimf", "*.nimble", "*.nims"]), + ("nix", &["*.nix"]), + ("objc", &["*.h", "*.m"]), + ("objcpp", &["*.h", "*.mm"]), + ("ocaml", &["*.ml", "*.mli", "*.mll", "*.mly"]), + ("org", &["*.org", "*.org_archive"]), + ("pascal", &["*.pas", "*.dpr", "*.lpr", "*.pp", "*.inc"]), + ("pdf", &["*.pdf"]), + ("perl", &["*.perl", "*.pl", "*.PL", "*.plh", "*.plx", "*.pm", "*.t"]), + ("php", &["*.php", "*.php3", "*.php4", "*.php5", "*.phtml"]), + ("pod", &["*.pod"]), + ("postscript", &["*.eps", "*.ps"]), + ("protobuf", &["*.proto"]), + ("ps", &["*.cdxml", "*.ps1", "*.ps1xml", "*.psd1", "*.psm1"]), + ("puppet", &["*.erb", "*.pp", "*.rb"]), + ("purs", &["*.purs"]), + ("py", &["*.py"]), + ("qmake", &["*.pro", "*.pri", "*.prf"]), + ("qml", &["*.qml"]), + ("r", &["*.R", "*.r", "*.Rmd", "*.Rnw"]), + ("rdoc", &["*.rdoc"]), + ("readme", &["README*", "*README"]), + ("robot", &["*.robot"]), + ("rst", &["*.rst"]), + ("ruby", &["Gemfile", "*.gemspec", ".irbrc", "Rakefile", "*.rb"]), + ("rust", &["*.rs"]), + ("sass", &["*.sass", "*.scss"]), + ("scala", &["*.scala", "*.sbt"]), + ("sh", &[ + // Portable/misc. init files + ".login", ".logout", ".profile", "profile", + // bash-specific init files + ".bash_login", "bash_login", + ".bash_logout", "bash_logout", + ".bash_profile", "bash_profile", + ".bashrc", "bashrc", "*.bashrc", + // csh-specific init files + ".cshrc", "*.cshrc", + // ksh-specific init files + ".kshrc", "*.kshrc", + // tcsh-specific init files + ".tcshrc", + // zsh-specific init files + ".zshenv", "zshenv", + ".zlogin", "zlogin", + ".zlogout", "zlogout", + ".zprofile", "zprofile", + ".zshrc", "zshrc", + // Extensions + "*.bash", "*.csh", "*.ksh", "*.sh", "*.tcsh", "*.zsh", + ]), + ("slim", &["*.skim", "*.slim", "*.slime"]), + ("smarty", &["*.tpl"]), + ("sml", &["*.sml", "*.sig"]), + ("soy", &["*.soy"]), + ("spark", &["*.spark"]), + ("spec", &["*.spec"]), + ("sql", &["*.sql", "*.psql"]), + ("stylus", &["*.styl"]), + ("sv", &["*.v", "*.vg", "*.sv", "*.svh", "*.h"]), + ("svg", &["*.svg"]), + ("swift", &["*.swift"]), + ("swig", &["*.def", "*.i"]), + ("systemd", &[ + "*.automount", "*.conf", "*.device", "*.link", "*.mount", "*.path", + "*.scope", "*.service", "*.slice", "*.socket", "*.swap", "*.target", + "*.timer", + ]), + ("taskpaper", &["*.taskpaper"]), + ("tcl", &["*.tcl"]), + ("tex", &["*.tex", "*.ltx", "*.cls", "*.sty", "*.bib", "*.dtx", "*.ins"]), + ("textile", &["*.textile"]), + ("tf", &["*.tf"]), + ("thrift", &["*.thrift"]), + ("toml", &["*.toml", "Cargo.lock"]), + ("ts", &["*.ts", "*.tsx"]), + ("twig", &["*.twig"]), + ("txt", &["*.txt"]), + ("typoscript", &["*.typoscript", "*.ts"]), + ("vala", &["*.vala"]), + ("vb", &["*.vb"]), + ("verilog", &["*.v", "*.vh", "*.sv", "*.svh"]), + ("vhdl", &["*.vhd", "*.vhdl"]), + ("vim", &["*.vim"]), + ("vimscript", &["*.vim"]), + ("webidl", &["*.idl", "*.webidl", "*.widl"]), + ("wiki", &["*.mediawiki", "*.wiki"]), + ("xml", &[ + "*.xml", "*.xml.dist", "*.dtd", "*.xsl", "*.xslt", "*.xsd", "*.xjb", + "*.rng", "*.sch", "*.xhtml", + ]), + ("xz", &["*.xz", "*.txz"]), + ("yacc", &["*.y"]), + ("yaml", &["*.yaml", "*.yml"]), + ("zig", &["*.zig"]), + ("zsh", &[ + ".zshenv", "zshenv", + ".zlogin", "zlogin", + ".zlogout", "zlogout", + ".zprofile", "zprofile", + ".zshrc", "zshrc", + "*.zsh", + ]), + ("zstd", &["*.zst", "*.zstd"]), +]; diff --git a/ignore/src/dir.rs b/ignore/src/dir.rs index 3f4d10bba..83a1faf91 100644 --- a/ignore/src/dir.rs +++ b/ignore/src/dir.rs @@ -157,7 +157,10 @@ impl Ignore { /// /// Note that this can only be called on an `Ignore` matcher with no /// parents (i.e., `is_root` returns `true`). This will panic otherwise. - pub fn add_parents>(&self, path: P) -> (Ignore, Option) { + pub fn add_parents>( + &self, + path: P, + ) -> (Ignore, Option) { if !self.0.opts.parents && !self.0.opts.git_ignore && !self.0.opts.git_exclude @@ -218,7 +221,10 @@ impl Ignore { /// returned if it exists. /// /// Note that all I/O errors are completely ignored. - pub fn add_child>(&self, dir: P) -> (Ignore, Option) { + pub fn add_child>( + &self, + dir: P, + ) -> (Ignore, Option) { let (ig, err) = self.add_child_path(dir.as_ref()); (Ignore(Arc::new(ig)), err) } @@ -313,7 +319,8 @@ impl Ignore { /// Returns true if at least one type of ignore rule should be matched. fn has_any_ignore_rules(&self) -> bool { let opts = self.0.opts; - let has_custom_ignore_files = !self.0.custom_ignore_filenames.is_empty(); + let has_custom_ignore_files = + !self.0.custom_ignore_filenames.is_empty(); let has_explicit_ignores = !self.0.explicit_ignores.is_empty(); opts.ignore @@ -325,7 +332,10 @@ impl Ignore { } /// Like `matched`, but works with a directory entry instead. - pub fn matched_dir_entry<'a>(&'a self, dent: &DirEntry) -> Match> { + pub fn matched_dir_entry<'a>( + &'a self, + dent: &DirEntry, + ) -> Match> { let m = self.matched(dent.path(), dent.is_dir()); if m.is_none() && self.0.opts.hidden && is_hidden(dent) { return Match::Ignore(IgnoreMatch::hidden()); @@ -337,7 +347,11 @@ impl Ignore { /// ignored or not. /// /// The match contains information about its origin. - fn matched<'a, P: AsRef>(&'a self, path: P, is_dir: bool) -> Match> { + fn matched<'a, P: AsRef>( + &'a self, + path: P, + is_dir: bool, + ) -> Match> { // We need to be careful with our path. If it has a leading ./, then // strip it because it causes nothing but trouble. let mut path = path.as_ref(); @@ -368,7 +382,8 @@ impl Ignore { } } if !self.0.types.is_empty() { - let mat = self.0.types.matched(path, is_dir).map(IgnoreMatch::types); + let mat = + self.0.types.matched(path, is_dir).map(IgnoreMatch::types); if mat.is_ignore() { return mat; } else if mat.is_whitelist() { @@ -380,17 +395,20 @@ impl Ignore { /// Performs matching only on the ignore files for this directory and /// all parent directories. - fn matched_ignore<'a>(&'a self, path: &Path, is_dir: bool) -> Match> { - let (mut m_custom_ignore, mut m_ignore, mut m_gi, mut m_gi_exclude, mut m_explicit) = ( - Match::None, - Match::None, - Match::None, - Match::None, - Match::None, - ); + fn matched_ignore<'a>( + &'a self, + path: &Path, + is_dir: bool, + ) -> Match> { + let ( + mut m_custom_ignore, + mut m_ignore, + mut m_gi, + mut m_gi_exclude, + mut m_explicit, + ) = (Match::None, Match::None, Match::None, Match::None, Match::None); let any_git = - !self.0.opts.require_git - || self.parents().any(|ig| ig.0.has_git); + !self.0.opts.require_git || self.parents().any(|ig| ig.0.has_git); let mut saw_git = false; for ig in self.parents().take_while(|ig| !ig.0.is_absolute_parent) { if m_custom_ignore.is_none() { @@ -422,7 +440,9 @@ impl Ignore { if self.0.opts.parents { if let Some(abs_parent_path) = self.absolute_base() { let path = abs_parent_path.join(path); - for ig in self.parents().skip_while(|ig| !ig.0.is_absolute_parent) { + for ig in + self.parents().skip_while(|ig| !ig.0.is_absolute_parent) + { if m_custom_ignore.is_none() { m_custom_ignore = ig.0.custom_ignore_matcher @@ -575,7 +595,9 @@ impl IgnoreBuilder { is_absolute_parent: true, absolute_base: None, explicit_ignores: Arc::new(self.explicit_ignores.clone()), - custom_ignore_filenames: Arc::new(self.custom_ignore_filenames.clone()), + custom_ignore_filenames: Arc::new( + self.custom_ignore_filenames.clone(), + ), custom_ignore_matcher: Gitignore::empty(), ignore_matcher: Gitignore::empty(), git_global_matcher: Arc::new(git_global_matcher), @@ -622,8 +644,7 @@ impl IgnoreBuilder { &mut self, file_name: S, ) -> &mut IgnoreBuilder { - self.custom_ignore_filenames - .push(file_name.as_ref().to_os_string()); + self.custom_ignore_filenames.push(file_name.as_ref().to_os_string()); self } @@ -705,7 +726,10 @@ impl IgnoreBuilder { /// Process ignore files case insensitively /// /// This is disabled by default. - pub fn ignore_case_insensitive(&mut self, yes: bool) -> &mut IgnoreBuilder { + pub fn ignore_case_insensitive( + &mut self, + yes: bool, + ) -> &mut IgnoreBuilder { self.opts.ignore_case_insensitive = yes; self } @@ -850,10 +874,8 @@ mod tests { let (gi, err) = Gitignore::new(td.path().join("not-an-ignore")); assert!(err.is_none()); - let (ig, err) = IgnoreBuilder::new() - .add_ignore(gi) - .build() - .add_child(td.path()); + let (ig, err) = + IgnoreBuilder::new().add_ignore(gi).build().add_child(td.path()); assert!(err.is_none()); assert!(ig.matched("foo", false).is_ignore()); assert!(ig.matched("bar", false).is_whitelist()); @@ -1125,9 +1147,8 @@ mod tests { mkdirp(git_dir.join("info")); wfile(git_dir.join("info/exclude"), "ignore_me"); mkdirp(git_dir.join("worktrees/linked-worktree")); - let commondir_path = || { - git_dir.join("worktrees/linked-worktree/commondir") - }; + let commondir_path = + || git_dir.join("worktrees/linked-worktree/commondir"); mkdirp(td.path().join("linked-worktree")); let worktree_git_dir_abs = format!( "gitdir: {}", diff --git a/ignore/src/gitignore.rs b/ignore/src/gitignore.rs index e9510dd05..2b8844b91 100644 --- a/ignore/src/gitignore.rs +++ b/ignore/src/gitignore.rs @@ -332,13 +332,10 @@ impl GitignoreBuilder { pub fn build(&self) -> Result { let nignore = self.globs.iter().filter(|g| !g.is_whitelist()).count(); let nwhite = self.globs.iter().filter(|g| g.is_whitelist()).count(); - let set = - self.builder.build().map_err(|err| { - Error::Glob { - glob: None, - err: err.to_string(), - } - })?; + let set = self + .builder + .build() + .map_err(|err| Error::Glob { glob: None, err: err.to_string() })?; Ok(Gitignore { set: set, root: self.root.clone(), @@ -499,18 +496,15 @@ impl GitignoreBuilder { if glob.actual.ends_with("/**") { glob.actual = format!("{}/*", glob.actual); } - let parsed = - GlobBuilder::new(&glob.actual) - .literal_separator(true) - .case_insensitive(self.case_insensitive) - .backslash_escape(true) - .build() - .map_err(|err| { - Error::Glob { - glob: Some(glob.original.clone()), - err: err.kind().to_string(), - } - })?; + let parsed = GlobBuilder::new(&glob.actual) + .literal_separator(true) + .case_insensitive(self.case_insensitive) + .backslash_escape(true) + .build() + .map_err(|err| Error::Glob { + glob: Some(glob.original.clone()), + err: err.kind().to_string(), + })?; self.builder.add(parsed); self.globs.push(glob); Ok(self) @@ -599,9 +593,8 @@ fn parse_excludes_file(data: &[u8]) -> Option { // probably works in more circumstances. I guess we would ideally have // a full INI parser. Yuck. lazy_static! { - static ref RE: Regex = Regex::new( - r"(?im)^\s*excludesfile\s*=\s*(.+)\s*$" - ).unwrap(); + static ref RE: Regex = + Regex::new(r"(?im)^\s*excludesfile\s*=\s*(.+)\s*$").unwrap(); }; let caps = match RE.captures(data) { None => return None, @@ -630,8 +623,8 @@ fn home_dir() -> Option { #[cfg(test)] mod tests { - use std::path::Path; use super::{Gitignore, GitignoreBuilder}; + use std::path::Path; fn gi_from_str>(root: P, s: &str) -> Gitignore { let mut builder = GitignoreBuilder::new(root); @@ -726,8 +719,11 @@ mod tests { not_ignored!(ignot12, ROOT, "\n\n\n", "foo"); not_ignored!(ignot13, ROOT, "foo/**", "foo", true); not_ignored!( - ignot14, "./third_party/protobuf", "m4/ltoptions.m4", - "./third_party/protobuf/csharp/src/packages/repositories.config"); + ignot14, + "./third_party/protobuf", + "m4/ltoptions.m4", + "./third_party/protobuf/csharp/src/packages/repositories.config" + ); not_ignored!(ignot15, ROOT, "!/bar", "foo/bar"); not_ignored!(ignot16, ROOT, "*\n!**/", "foo", true); not_ignored!(ignot17, ROOT, "src/*.rs", "src/grep/src/main.rs"); @@ -771,9 +767,12 @@ mod tests { #[test] fn case_insensitive() { let gi = GitignoreBuilder::new(ROOT) - .case_insensitive(true).unwrap() - .add_str(None, "*.html").unwrap() - .build().unwrap(); + .case_insensitive(true) + .unwrap() + .add_str(None, "*.html") + .unwrap() + .build() + .unwrap(); assert!(gi.matched("foo.html", false).is_ignore()); assert!(gi.matched("foo.HTML", false).is_ignore()); assert!(!gi.matched("foo.htm", false).is_ignore()); diff --git a/ignore/src/lib.rs b/ignore/src/lib.rs index 6cbf4af24..71b112c7c 100644 --- a/ignore/src/lib.rs +++ b/ignore/src/lib.rs @@ -66,10 +66,11 @@ use std::io; use std::path::{Path, PathBuf}; pub use walk::{ - DirEntry, Walk, WalkBuilder, WalkParallel, WalkState, - ParallelVisitorBuilder, ParallelVisitor, + DirEntry, ParallelVisitor, ParallelVisitorBuilder, Walk, WalkBuilder, + WalkParallel, WalkState, }; +mod default_types; mod dir; pub mod gitignore; pub mod overrides; @@ -137,22 +138,16 @@ impl Clone for Error { fn clone(&self) -> Error { match *self { Error::Partial(ref errs) => Error::Partial(errs.clone()), - Error::WithLineNumber { line, ref err } => Error::WithLineNumber { - line: line, - err: err.clone(), - }, - Error::WithPath { ref path, ref err } => Error::WithPath { - path: path.clone(), - err: err.clone(), - }, - Error::WithDepth { depth, ref err } => Error::WithDepth { - depth: depth, - err: err.clone(), - }, - Error::Loop { - ref ancestor, - ref child, - } => Error::Loop { + Error::WithLineNumber { line, ref err } => { + Error::WithLineNumber { line: line, err: err.clone() } + } + Error::WithPath { ref path, ref err } => { + Error::WithPath { path: path.clone(), err: err.clone() } + } + Error::WithDepth { depth, ref err } => { + Error::WithDepth { depth: depth, err: err.clone() } + } + Error::Loop { ref ancestor, ref child } => Error::Loop { ancestor: ancestor.clone(), child: child.clone(), }, @@ -160,11 +155,12 @@ impl Clone for Error { Some(e) => Error::Io(io::Error::from_raw_os_error(e)), None => Error::Io(io::Error::new(err.kind(), err.to_string())), }, - Error::Glob { ref glob, ref err } => Error::Glob { - glob: glob.clone(), - err: err.clone(), - }, - Error::UnrecognizedFileType(ref err) => Error::UnrecognizedFileType(err.clone()), + Error::Glob { ref glob, ref err } => { + Error::Glob { glob: glob.clone(), err: err.clone() } + } + Error::UnrecognizedFileType(ref err) => { + Error::UnrecognizedFileType(err.clone()) + } Error::InvalidDefinition => Error::InvalidDefinition, } } @@ -221,19 +217,14 @@ impl Error { /// Turn an error into a tagged error with the given depth. fn with_depth(self, depth: usize) -> Error { - Error::WithDepth { - depth: depth, - err: Box::new(self), - } + Error::WithDepth { depth: depth, err: Box::new(self) } } /// Turn an error into a tagged error with the given file path and line /// number. If path is empty, then it is omitted from the error. fn tagged>(self, path: P, lineno: u64) -> Error { - let errline = Error::WithLineNumber { - line: lineno, - err: Box::new(self), - }; + let errline = + Error::WithLineNumber { line: lineno, err: Box::new(self) }; if path.as_ref().as_os_str().is_empty() { return errline; } @@ -255,10 +246,7 @@ impl Error { let path = err.path().map(|p| p.to_path_buf()); let mut ig_err = Error::Io(io::Error::from(err)); if let Some(path) = path { - ig_err = Error::WithPath { - path: path, - err: Box::new(ig_err), - }; + ig_err = Error::WithPath { path: path, err: Box::new(ig_err) }; } ig_err } @@ -285,16 +273,18 @@ impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Error::Partial(ref errs) => { - let msgs: Vec = errs.iter().map(|err| err.to_string()).collect(); + let msgs: Vec = + errs.iter().map(|err| err.to_string()).collect(); write!(f, "{}", msgs.join("\n")) } - Error::WithLineNumber { line, ref err } => write!(f, "line {}: {}", line, err), - Error::WithPath { ref path, ref err } => write!(f, "{}: {}", path.display(), err), + Error::WithLineNumber { line, ref err } => { + write!(f, "line {}: {}", line, err) + } + Error::WithPath { ref path, ref err } => { + write!(f, "{}: {}", path.display(), err) + } Error::WithDepth { ref err, .. } => err.fmt(f), - Error::Loop { - ref ancestor, - ref child, - } => write!( + Error::Loop { ref ancestor, ref child } => write!( f, "File system loop found: \ {} points to an ancestor {}", @@ -302,15 +292,13 @@ impl fmt::Display for Error { ancestor.display() ), Error::Io(ref err) => err.fmt(f), - Error::Glob { - glob: None, - ref err, - } => write!(f, "{}", err), - Error::Glob { - glob: Some(ref glob), - ref err, - } => write!(f, "error parsing glob '{}': {}", glob, err), - Error::UnrecognizedFileType(ref ty) => write!(f, "unrecognized file type: {}", ty), + Error::Glob { glob: None, ref err } => write!(f, "{}", err), + Error::Glob { glob: Some(ref glob), ref err } => { + write!(f, "error parsing glob '{}': {}", glob, err) + } + Error::UnrecognizedFileType(ref ty) => { + write!(f, "unrecognized file type: {}", ty) + } Error::InvalidDefinition => write!( f, "invalid definition (format is type:glob, e.g., \ @@ -456,7 +444,8 @@ mod tests { use std::result; /// A convenient result type alias. - pub type Result = result::Result>; + pub type Result = + result::Result>; macro_rules! err { ($($tt:tt)*) => { @@ -494,8 +483,9 @@ mod tests { if path.is_dir() { continue; } - fs::create_dir_all(&path) - .map_err(|e| err!("failed to create {}: {}", path.display(), e))?; + fs::create_dir_all(&path).map_err(|e| { + err!("failed to create {}: {}", path.display(), e) + })?; return Ok(TempDir(path)); } Err(err!("failed to create temp dir after {} tries", TRIES)) diff --git a/ignore/src/overrides.rs b/ignore/src/overrides.rs index 08dbdac24..7c03fbf5d 100644 --- a/ignore/src/overrides.rs +++ b/ignore/src/overrides.rs @@ -115,9 +115,7 @@ impl OverrideBuilder { /// /// Matching is done relative to the directory path provided. pub fn new>(path: P) -> OverrideBuilder { - OverrideBuilder { - builder: GitignoreBuilder::new(path), - } + OverrideBuilder { builder: GitignoreBuilder::new(path) } } /// Builds a new override matcher from the globs added so far. @@ -240,9 +238,12 @@ mod tests { #[test] fn case_insensitive() { let ov = OverrideBuilder::new(ROOT) - .case_insensitive(true).unwrap() - .add("*.html").unwrap() - .build().unwrap(); + .case_insensitive(true) + .unwrap() + .add("*.html") + .unwrap() + .build() + .unwrap(); assert!(ov.matched("foo.html", false).is_whitelist()); assert!(ov.matched("foo.HTML", false).is_whitelist()); assert!(ov.matched("foo.htm", false).is_ignore()); @@ -251,9 +252,8 @@ mod tests { #[test] fn default_case_sensitive() { - let ov = OverrideBuilder::new(ROOT) - .add("*.html").unwrap() - .build().unwrap(); + let ov = + OverrideBuilder::new(ROOT).add("*.html").unwrap().build().unwrap(); assert!(ov.matched("foo.html", false).is_whitelist()); assert!(ov.matched("foo.HTML", false).is_ignore()); assert!(ov.matched("foo.htm", false).is_ignore()); diff --git a/ignore/src/pathutil.rs b/ignore/src/pathutil.rs index fbbc0f89b..210d1ecbb 100644 --- a/ignore/src/pathutil.rs +++ b/ignore/src/pathutil.rs @@ -91,8 +91,8 @@ pub fn strip_prefix<'a, P: AsRef + ?Sized>( /// the empty string. #[cfg(unix)] pub fn is_file_name>(path: P) -> bool { - use std::os::unix::ffi::OsStrExt; use memchr::memchr; + use std::os::unix::ffi::OsStrExt; let path = path.as_ref().as_os_str().as_bytes(); memchr(b'/', path).is_none() @@ -113,8 +113,8 @@ pub fn is_file_name>(path: P) -> bool { pub fn file_name<'a, P: AsRef + ?Sized>( path: &'a P, ) -> Option<&'a OsStr> { - use std::os::unix::ffi::OsStrExt; use memchr::memrchr; + use std::os::unix::ffi::OsStrExt; let path = path.as_ref().as_os_str().as_bytes(); if path.is_empty() { diff --git a/ignore/src/types.rs b/ignore/src/types.rs index 0b4464473..d77e54ebe 100644 --- a/ignore/src/types.rs +++ b/ignore/src/types.rs @@ -93,243 +93,10 @@ use globset::{GlobBuilder, GlobSet, GlobSetBuilder}; use regex::Regex; use thread_local::ThreadLocal; +use default_types::DEFAULT_TYPES; use pathutil::file_name; use {Error, Match}; -const DEFAULT_TYPES: &'static [(&'static str, &'static [&'static str])] = &[ - ("agda", &["*.agda", "*.lagda"]), - ("ats", &["*.ats", "*.dats", "*.sats", "*.hats"]), - ("aidl", &["*.aidl"]), - ("amake", &["*.mk", "*.bp"]), - ("asciidoc", &["*.adoc", "*.asc", "*.asciidoc"]), - ("asm", &["*.asm", "*.s", "*.S"]), - ("asp", &["*.aspx", "*.aspx.cs", "*.aspx.cs", "*.ascx", "*.ascx.cs", "*.ascx.vb"]), - ("avro", &["*.avdl", "*.avpr", "*.avsc"]), - ("awk", &["*.awk"]), - ("bazel", &["*.bzl", "WORKSPACE", "BUILD", "BUILD.bazel"]), - ("bitbake", &["*.bb", "*.bbappend", "*.bbclass", "*.conf", "*.inc"]), - ("brotli", &["*.br"]), - ("buildstream", &["*.bst"]), - ("bzip2", &["*.bz2", "*.tbz2"]), - ("c", &["*.[chH]", "*.[chH].in", "*.cats"]), - ("cabal", &["*.cabal"]), - ("cbor", &["*.cbor"]), - ("ceylon", &["*.ceylon"]), - ("clojure", &["*.clj", "*.cljc", "*.cljs", "*.cljx"]), - ("cmake", &["*.cmake", "CMakeLists.txt"]), - ("coffeescript", &["*.coffee"]), - ("creole", &["*.creole"]), - ("config", &["*.cfg", "*.conf", "*.config", "*.ini"]), - ("cpp", &[ - "*.[ChH]", "*.cc", "*.[ch]pp", "*.[ch]xx", "*.hh", "*.inl", - "*.[ChH].in", "*.cc.in", "*.[ch]pp.in", "*.[ch]xx.in", "*.hh.in", - ]), - ("crystal", &["Projectfile", "*.cr"]), - ("cs", &["*.cs"]), - ("csharp", &["*.cs"]), - ("cshtml", &["*.cshtml"]), - ("css", &["*.css", "*.scss"]), - ("csv", &["*.csv"]), - ("cython", &["*.pyx", "*.pxi", "*.pxd"]), - ("dart", &["*.dart"]), - ("d", &["*.d"]), - ("dhall", &["*.dhall"]), - ("diff", &["*.patch", "*.diff"]), - ("docker", &["*Dockerfile*"]), - ("edn", &["*.edn"]), - ("elisp", &["*.el"]), - ("elixir", &["*.ex", "*.eex", "*.exs"]), - ("elm", &["*.elm"]), - ("erb", &["*.erb"]), - ("erlang", &["*.erl", "*.hrl"]), - ("fidl", &["*.fidl"]), - ("fish", &["*.fish"]), - ("fortran", &[ - "*.f", "*.F", "*.f77", "*.F77", "*.pfo", - "*.f90", "*.F90", "*.f95", "*.F95", - ]), - ("fsharp", &["*.fs", "*.fsx", "*.fsi"]), - ("gap", &["*.g", "*.gap", "*.gi", "*.gd", "*.tst"]), - ("gn", &["*.gn", "*.gni"]), - ("go", &["*.go"]), - ("gzip", &["*.gz", "*.tgz"]), - ("groovy", &["*.groovy", "*.gradle"]), - ("gradle", &["*.gradle"]), - ("h", &["*.h", "*.hpp"]), - ("hbs", &["*.hbs"]), - ("haskell", &["*.hs", "*.lhs", "*.cpphs", "*.c2hs", "*.hsc"]), - ("haml", &["*.haml"]), - ("hs", &["*.hs", "*.lhs"]), - ("html", &["*.htm", "*.html", "*.ejs"]), - ("idris", &["*.idr", "*.lidr"]), - ("java", &["*.java", "*.jsp", "*.jspx", "*.properties"]), - ("jinja", &["*.j2", "*.jinja", "*.jinja2"]), - ("js", &[ - "*.js", "*.jsx", "*.vue", - ]), - ("json", &["*.json", "composer.lock"]), - ("jsonl", &["*.jsonl"]), - ("julia", &["*.jl"]), - ("jupyter", &["*.ipynb", "*.jpynb"]), - ("jl", &["*.jl"]), - ("kotlin", &["*.kt", "*.kts"]), - ("less", &["*.less"]), - ("license", &[ - // General - "COPYING", "COPYING[.-]*", - "COPYRIGHT", "COPYRIGHT[.-]*", - "EULA", "EULA[.-]*", - "licen[cs]e", "licen[cs]e.*", - "LICEN[CS]E", "LICEN[CS]E[.-]*", "*[.-]LICEN[CS]E*", - "NOTICE", "NOTICE[.-]*", - "PATENTS", "PATENTS[.-]*", - "UNLICEN[CS]E", "UNLICEN[CS]E[.-]*", - // GPL (gpl.txt, etc.) - "agpl[.-]*", - "gpl[.-]*", - "lgpl[.-]*", - // Other license-specific (APACHE-2.0.txt, etc.) - "AGPL-*[0-9]*", - "APACHE-*[0-9]*", - "BSD-*[0-9]*", - "CC-BY-*", - "GFDL-*[0-9]*", - "GNU-*[0-9]*", - "GPL-*[0-9]*", - "LGPL-*[0-9]*", - "MIT-*[0-9]*", - "MPL-*[0-9]*", - "OFL-*[0-9]*", - ]), - ("lisp", &["*.el", "*.jl", "*.lisp", "*.lsp", "*.sc", "*.scm"]), - ("lock", &["*.lock", "package-lock.json"]), - ("log", &["*.log"]), - ("lua", &["*.lua"]), - ("lzma", &["*.lzma"]), - ("lz4", &["*.lz4"]), - ("m4", &["*.ac", "*.m4"]), - ("make", &[ - "[Gg][Nn][Uu]makefile", "[Mm]akefile", - "[Gg][Nn][Uu]makefile.am", "[Mm]akefile.am", - "[Gg][Nn][Uu]makefile.in", "[Mm]akefile.in", - "*.mk", "*.mak" - ]), - ("mako", &["*.mako", "*.mao"]), - ("markdown", &["*.markdown", "*.md", "*.mdown", "*.mkdn"]), - ("md", &["*.markdown", "*.md", "*.mdown", "*.mkdn"]), - ("man", &["*.[0-9lnpx]", "*.[0-9][cEFMmpSx]"]), - ("matlab", &["*.m"]), - ("mk", &["mkfile"]), - ("ml", &["*.ml"]), - ("msbuild", &[ - "*.csproj", "*.fsproj", "*.vcxproj", "*.proj", "*.props", "*.targets" - ]), - ("nim", &["*.nim", "*.nimf", "*.nimble", "*.nims"]), - ("nix", &["*.nix"]), - ("objc", &["*.h", "*.m"]), - ("objcpp", &["*.h", "*.mm"]), - ("ocaml", &["*.ml", "*.mli", "*.mll", "*.mly"]), - ("org", &["*.org", "*.org_archive"]), - ("pascal", &["*.pas", "*.dpr", "*.lpr", "*.pp", "*.inc"]), - ("perl", &["*.perl", "*.pl", "*.PL", "*.plh", "*.plx", "*.pm", "*.t"]), - ("pdf", &["*.pdf"]), - ("php", &["*.php", "*.php3", "*.php4", "*.php5", "*.phtml"]), - ("pod", &["*.pod"]), - ("postscript", &["*.eps", "*.ps"]), - ("protobuf", &["*.proto"]), - ("ps", &["*.cdxml", "*.ps1", "*.ps1xml", "*.psd1", "*.psm1"]), - ("puppet", &["*.erb", "*.pp", "*.rb"]), - ("purs", &["*.purs"]), - ("py", &["*.py"]), - ("qmake", &["*.pro", "*.pri", "*.prf"]), - ("qml", &["*.qml"]), - ("readme", &["README*", "*README"]), - ("r", &["*.R", "*.r", "*.Rmd", "*.Rnw"]), - ("rdoc", &["*.rdoc"]), - ("robot", &["*.robot"]), - ("rst", &["*.rst"]), - ("ruby", &["Gemfile", "*.gemspec", ".irbrc", "Rakefile", "*.rb"]), - ("rust", &["*.rs"]), - ("sass", &["*.sass", "*.scss"]), - ("scala", &["*.scala", "*.sbt"]), - ("sh", &[ - // Portable/misc. init files - ".login", ".logout", ".profile", "profile", - // bash-specific init files - ".bash_login", "bash_login", - ".bash_logout", "bash_logout", - ".bash_profile", "bash_profile", - ".bashrc", "bashrc", "*.bashrc", - // csh-specific init files - ".cshrc", "*.cshrc", - // ksh-specific init files - ".kshrc", "*.kshrc", - // tcsh-specific init files - ".tcshrc", - // zsh-specific init files - ".zshenv", "zshenv", - ".zlogin", "zlogin", - ".zlogout", "zlogout", - ".zprofile", "zprofile", - ".zshrc", "zshrc", - // Extensions - "*.bash", "*.csh", "*.ksh", "*.sh", "*.tcsh", "*.zsh", - ]), - ("slim", &["*.skim", "*.slim", "*.slime"]), - ("smarty", &["*.tpl"]), - ("sml", &["*.sml", "*.sig"]), - ("soy", &["*.soy"]), - ("spark", &["*.spark"]), - ("spec", &["*.spec"]), - ("sql", &["*.sql", "*.psql"]), - ("stylus", &["*.styl"]), - ("sv", &["*.v", "*.vg", "*.sv", "*.svh", "*.h"]), - ("svg", &["*.svg"]), - ("swift", &["*.swift"]), - ("swig", &["*.def", "*.i"]), - ("systemd", &[ - "*.automount", "*.conf", "*.device", "*.link", "*.mount", "*.path", - "*.scope", "*.service", "*.slice", "*.socket", "*.swap", "*.target", - "*.timer", - ]), - ("taskpaper", &["*.taskpaper"]), - ("tcl", &["*.tcl"]), - ("tex", &["*.tex", "*.ltx", "*.cls", "*.sty", "*.bib", "*.dtx", "*.ins"]), - ("textile", &["*.textile"]), - ("thrift", &["*.thrift"]), - ("tf", &["*.tf"]), - ("ts", &["*.ts", "*.tsx"]), - ("txt", &["*.txt"]), - ("toml", &["*.toml", "Cargo.lock"]), - ("twig", &["*.twig"]), - ("typoscript", &["*.typoscript", "*.ts"]), - ("vala", &["*.vala"]), - ("vb", &["*.vb"]), - ("verilog", &["*.v", "*.vh", "*.sv", "*.svh"]), - ("vhdl", &["*.vhd", "*.vhdl"]), - ("vim", &["*.vim"]), - ("vimscript", &["*.vim"]), - ("wiki", &["*.mediawiki", "*.wiki"]), - ("webidl", &["*.idl", "*.webidl", "*.widl"]), - ("xml", &[ - "*.xml", "*.xml.dist", "*.dtd", "*.xsl", "*.xslt", "*.xsd", "*.xjb", - "*.rng", "*.sch", "*.xhtml", - ]), - ("xz", &["*.xz", "*.txz"]), - ("yacc", &["*.y"]), - ("yaml", &["*.yaml", "*.yml"]), - ("zig", &["*.zig"]), - ("zsh", &[ - ".zshenv", "zshenv", - ".zlogin", "zlogin", - ".zlogout", "zlogout", - ".zprofile", "zprofile", - ".zshrc", "zshrc", - "*.zsh", - ]), - ("zstd", &["*.zst", "*.zstd"]), -]; - /// Glob represents a single glob in a set of file type definitions. /// /// There may be more than one glob for a particular file type. @@ -359,7 +126,7 @@ enum GlobInner<'a> { which: usize, /// Whether the selection was negated or not. negated: bool, - } + }, } impl<'a> Glob<'a> { @@ -373,9 +140,7 @@ impl<'a> Glob<'a> { pub fn file_type_def(&self) -> Option<&FileTypeDef> { match self { Glob(GlobInner::UnmatchedIgnore) => None, - Glob(GlobInner::Matched { def, .. }) => { - Some(def) - }, + Glob(GlobInner::Matched { def, .. }) => Some(def), } } } @@ -561,10 +326,7 @@ impl TypesBuilder { /// of default type definitions can be added with `add_defaults`, and /// additional type definitions can be added with `select` and `negate`. pub fn new() -> TypesBuilder { - TypesBuilder { - types: HashMap::new(), - selections: vec![], - } + TypesBuilder { types: HashMap::new(), selections: vec![] } } /// Build the current set of file type definitions *and* selections into @@ -589,19 +351,18 @@ impl TypesBuilder { GlobBuilder::new(glob) .literal_separator(true) .build() - .map_err(|err| { - Error::Glob { - glob: Some(glob.to_string()), - err: err.kind().to_string(), - } - })?); + .map_err(|err| Error::Glob { + glob: Some(glob.to_string()), + err: err.kind().to_string(), + })?, + ); glob_to_selection.push((isel, iglob)); } selections.push(selection.clone().map(move |_| def)); } - let set = build_set.build().map_err(|err| { - Error::Glob { glob: None, err: err.to_string() } - })?; + let set = build_set + .build() + .map_err(|err| Error::Glob { glob: None, err: err.to_string() })?; Ok(Types { defs: defs, selections: selections, @@ -673,9 +434,14 @@ impl TypesBuilder { return Err(Error::InvalidDefinition); } let (key, glob) = (name.to_string(), glob.to_string()); - self.types.entry(key).or_insert_with(|| { - FileTypeDef { name: name.to_string(), globs: vec![] } - }).globs.push(glob); + self.types + .entry(key) + .or_insert_with(|| FileTypeDef { + name: name.to_string(), + globs: vec![], + }) + .globs + .push(glob); Ok(()) } @@ -702,7 +468,10 @@ impl TypesBuilder { 3 => { let name = parts[0]; let types_string = parts[2]; - if name.is_empty() || parts[1] != "include" || types_string.is_empty() { + if name.is_empty() + || parts[1] != "include" + || types_string.is_empty() + { return Err(Error::InvalidDefinition); } let types = types_string.split(','); @@ -712,14 +481,15 @@ impl TypesBuilder { return Err(Error::InvalidDefinition); } for type_name in types { - let globs = self.types.get(type_name).unwrap().globs.clone(); + let globs = + self.types.get(type_name).unwrap().globs.clone(); for glob in globs { self.add(name, &glob)?; } } Ok(()) } - _ => Err(Error::InvalidDefinition) + _ => Err(Error::InvalidDefinition), } } @@ -776,7 +546,7 @@ mod tests { "rust:*.rs", "js:*.js", "foo:*.{rs,foo}", - "combo:include:html,rust" + "combo:include:html,rust", ] } @@ -810,7 +580,7 @@ mod tests { "combo:include:html,python", // Bad format "combo:foobar:html,rust", - "" + "", ]; for def in bad_defs { assert!(btypes.add_def(def).is_err()); diff --git a/ignore/src/walk.rs b/ignore/src/walk.rs index ed321eedb..b2063cded 100644 --- a/ignore/src/walk.rs +++ b/ignore/src/walk.rs @@ -102,24 +102,15 @@ impl DirEntry { } fn new_stdin() -> DirEntry { - DirEntry { - dent: DirEntryInner::Stdin, - err: None, - } + DirEntry { dent: DirEntryInner::Stdin, err: None } } fn new_walkdir(dent: walkdir::DirEntry, err: Option) -> DirEntry { - DirEntry { - dent: DirEntryInner::Walkdir(dent), - err: err, - } + DirEntry { dent: DirEntryInner::Walkdir(dent), err: err } } fn new_raw(dent: DirEntryRaw, err: Option) -> DirEntry { - DirEntry { - dent: DirEntryInner::Raw(dent), - err: err, - } + DirEntry { dent: DirEntryInner::Raw(dent), err: err } } } @@ -185,9 +176,9 @@ impl DirEntryInner { )); Err(err.with_path("")) } - Walkdir(ref x) => x - .metadata() - .map_err(|err| Error::Io(io::Error::from(err)).with_path(x.path())), + Walkdir(ref x) => x.metadata().map_err(|err| { + Error::Io(io::Error::from(err)).with_path(x.path()) + }), Raw(ref x) => x.metadata(), } } @@ -314,9 +305,7 @@ impl DirEntryRaw { } fn file_name(&self) -> &OsStr { - self.path - .file_name() - .unwrap_or_else(|| self.path.as_os_str()) + self.path.file_name().unwrap_or_else(|| self.path.as_os_str()) } fn depth(&self) -> usize { @@ -328,13 +317,13 @@ impl DirEntryRaw { self.ino } - fn from_entry(depth: usize, ent: &fs::DirEntry) -> Result { + fn from_entry( + depth: usize, + ent: &fs::DirEntry, + ) -> Result { let ty = ent.file_type().map_err(|err| { let err = Error::Io(io::Error::from(err)).with_path(ent.path()); - Error::WithDepth { - depth: depth, - err: Box::new(err), - } + Error::WithDepth { depth: depth, err: Box::new(err) } })?; DirEntryRaw::from_entry_os(depth, ent, ty) } @@ -347,10 +336,7 @@ impl DirEntryRaw { ) -> Result { let md = ent.metadata().map_err(|err| { let err = Error::Io(io::Error::from(err)).with_path(ent.path()); - Error::WithDepth { - depth: depth, - err: Box::new(err), - } + Error::WithDepth { depth: depth, err: Box::new(err) } })?; Ok(DirEntryRaw { path: ent.path(), @@ -392,8 +378,13 @@ impl DirEntryRaw { } #[cfg(windows)] - fn from_path(depth: usize, pb: PathBuf, link: bool) -> Result { - let md = fs::metadata(&pb).map_err(|err| Error::Io(err).with_path(&pb))?; + fn from_path( + depth: usize, + pb: PathBuf, + link: bool, + ) -> Result { + let md = + fs::metadata(&pb).map_err(|err| Error::Io(err).with_path(&pb))?; Ok(DirEntryRaw { path: pb, ty: md.file_type(), @@ -404,10 +395,15 @@ impl DirEntryRaw { } #[cfg(unix)] - fn from_path(depth: usize, pb: PathBuf, link: bool) -> Result { + fn from_path( + depth: usize, + pb: PathBuf, + link: bool, + ) -> Result { use std::os::unix::fs::MetadataExt; - let md = fs::metadata(&pb).map_err(|err| Error::Io(err).with_path(&pb))?; + let md = + fs::metadata(&pb).map_err(|err| Error::Io(err).with_path(&pb))?; Ok(DirEntryRaw { path: pb, ty: md.file_type(), @@ -419,7 +415,11 @@ impl DirEntryRaw { // Placeholder implementation to allow compiling on non-standard platforms (e.g. wasm32). #[cfg(not(any(windows, unix)))] - fn from_path(depth: usize, pb: PathBuf, link: bool) -> Result { + fn from_path( + depth: usize, + pb: PathBuf, + link: bool, + ) -> Result { Err(Error::Io(io::Error::new( io::ErrorKind::Other, "unsupported platform", @@ -490,7 +490,9 @@ pub struct WalkBuilder { #[derive(Clone)] enum Sorter { - ByName(Arc cmp::Ordering + Send + Sync + 'static>), + ByName( + Arc cmp::Ordering + Send + Sync + 'static>, + ), ByPath(Arc cmp::Ordering + Send + Sync + 'static>), } @@ -550,10 +552,14 @@ impl WalkBuilder { if let Some(ref sorter) = sorter { match sorter.clone() { Sorter::ByName(cmp) => { - wd = wd.sort_by(move |a, b| cmp(a.file_name(), b.file_name())); + wd = wd.sort_by(move |a, b| { + cmp(a.file_name(), b.file_name()) + }); } Sorter::ByPath(cmp) => { - wd = wd.sort_by(move |a, b| cmp(a.path(), b.path())); + wd = wd.sort_by(move |a, b| { + cmp(a.path(), b.path()) + }); } } } @@ -1012,11 +1018,7 @@ enum WalkEvent { impl From for WalkEventIter { fn from(it: WalkDir) -> WalkEventIter { - WalkEventIter { - depth: 0, - it: it.into_iter(), - next: None, - } + WalkEventIter { depth: 0, it: it.into_iter(), next: None } } } @@ -1085,8 +1087,8 @@ pub trait ParallelVisitorBuilder<'s> { fn build(&mut self) -> Box; } -impl<'a, 's, P: ParallelVisitorBuilder<'s>> - ParallelVisitorBuilder<'s> for &'a mut P +impl<'a, 's, P: ParallelVisitorBuilder<'s>> ParallelVisitorBuilder<'s> + for &'a mut P { fn build(&mut self) -> Box { (**self).build() @@ -1109,16 +1111,17 @@ struct FnBuilder { builder: F, } -impl<'s, F: FnMut() -> FnVisitor<'s>> ParallelVisitorBuilder<'s> for FnBuilder { +impl<'s, F: FnMut() -> FnVisitor<'s>> ParallelVisitorBuilder<'s> + for FnBuilder +{ fn build(&mut self) -> Box { let visitor = (self.builder)(); Box::new(FnVisitorImp { visitor }) } } -type FnVisitor<'s> = Box< - dyn FnMut(Result) -> WalkState + Send + 's ->; +type FnVisitor<'s> = + Box) -> WalkState + Send + 's>; struct FnVisitorImp<'s> { visitor: FnVisitor<'s>, @@ -1263,7 +1266,8 @@ impl WalkParallel { for handle in handles { handle.join().unwrap(); } - }).unwrap(); // Pass along panics from threads + }) + .unwrap(); // Pass along panics from threads } fn threads(&self) -> usize { @@ -1488,7 +1492,9 @@ impl<'s> Worker<'s> { let fs_dent = match result { Ok(fs_dent) => fs_dent, Err(err) => { - return self.visitor.visit(Err(Error::from(err).with_depth(depth))); + return self + .visitor + .visit(Err(Error::from(err).with_depth(depth))); } }; let mut dent = match DirEntryRaw::from_entry(depth, &fs_dent) { @@ -1522,15 +1528,16 @@ impl<'s> Worker<'s> { } } let should_skip_path = should_skip_entry(ig, &dent); - let should_skip_filesize = if self.max_filesize.is_some() && !dent.is_dir() { - skip_filesize( - self.max_filesize.unwrap(), - dent.path(), - &dent.metadata().ok(), - ) - } else { - false - }; + let should_skip_filesize = + if self.max_filesize.is_some() && !dent.is_dir() { + skip_filesize( + self.max_filesize.unwrap(), + dent.path(), + &dent.metadata().ok(), + ) + } else { + false + }; if !should_skip_path && !should_skip_filesize { self.tx @@ -1579,11 +1586,12 @@ impl<'s> Worker<'s> { return None; } // Wait for next `Work` or `Quit` message. - value = Ok(self.rx.recv().expect( - "channel disconnected while worker is alive", - )); + value = Ok(self + .rx + .recv() + .expect("channel disconnected while worker is alive")); self.resume(); - }, + } Err(TryRecvError::Disconnected) => { unreachable!("channel disconnected while worker is alive"); } @@ -1619,18 +1627,11 @@ fn check_symlink_loop( child_depth: usize, ) -> Result<(), Error> { let hchild = Handle::from_path(child_path).map_err(|err| { - Error::from(err) - .with_path(child_path) - .with_depth(child_depth) + Error::from(err).with_path(child_path).with_depth(child_depth) })?; - for ig in ig_parent - .parents() - .take_while(|ig| !ig.is_absolute_parent()) - { + for ig in ig_parent.parents().take_while(|ig| !ig.is_absolute_parent()) { let h = Handle::from_path(ig.path()).map_err(|err| { - Error::from(err) - .with_path(child_path) - .with_depth(child_depth) + Error::from(err).with_path(child_path).with_depth(child_depth) })?; if hchild == h { return Err(Error::Loop { @@ -1645,7 +1646,11 @@ fn check_symlink_loop( // Before calling this function, make sure that you ensure that is really // necessary as the arguments imply a file stat. -fn skip_filesize(max_filesize: u64, path: &Path, ent: &Option) -> bool { +fn skip_filesize( + max_filesize: u64, + path: &Path, + ent: &Option, +) -> bool { let filesize = match *ent { Some(ref md) => Some(md.len()), None => None, @@ -1743,7 +1748,8 @@ fn walkdir_is_dir(dent: &walkdir::DirEntry) -> bool { /// Returns true if and only if the given path is on the same device as the /// given root device. fn is_same_file_system(root_device: u64, path: &Path) -> Result { - let dent_device = device_num(path).map_err(|err| Error::Io(err).with_path(path))?; + let dent_device = + device_num(path).map_err(|err| Error::Io(err).with_path(path))?; Ok(root_device == dent_device) } @@ -1825,7 +1831,10 @@ mod tests { paths } - fn walk_collect_parallel(prefix: &Path, builder: &WalkBuilder) -> Vec { + fn walk_collect_parallel( + prefix: &Path, + builder: &WalkBuilder, + ) -> Vec { let mut paths = vec![]; for dent in walk_collect_entries_parallel(builder) { let path = dent.path().strip_prefix(prefix).unwrap(); @@ -2079,7 +2088,9 @@ mod tests { assert_eq!(1, dents.len()); assert!(!dents[0].path_is_symlink()); - let dents = walk_collect_entries_parallel(&WalkBuilder::new(td.path().join("foo"))); + let dents = walk_collect_entries_parallel(&WalkBuilder::new( + td.path().join("foo"), + )); assert_eq!(1, dents.len()); assert!(!dents[0].path_is_symlink()); } diff --git a/ignore/tests/gitignore_matched_path_or_any_parents_tests.rs b/ignore/tests/gitignore_matched_path_or_any_parents_tests.rs index 28d8e2f84..b136b986a 100644 --- a/ignore/tests/gitignore_matched_path_or_any_parents_tests.rs +++ b/ignore/tests/gitignore_matched_path_or_any_parents_tests.rs @@ -55,7 +55,6 @@ fn test_files_in_root() { assert!(m("ROOT/file_root_33").is_none()); } - #[test] fn test_files_in_deep() { let gitignore = get_gitignore(); @@ -88,7 +87,6 @@ fn test_files_in_deep() { assert!(m("ROOT/parent_dir/file_deep_33").is_none()); } - #[test] fn test_dirs_in_root() { let gitignore = get_gitignore(); @@ -193,7 +191,6 @@ fn test_dirs_in_root() { assert!(m("ROOT/dir_root_33/child_dir/file", false).is_ignore()); } - #[test] fn test_dirs_in_deep() { let gitignore = get_gitignore(); @@ -205,17 +202,13 @@ fn test_dirs_in_deep() { assert!(m("ROOT/parent_dir/dir_deep_00", true).is_ignore()); assert!(m("ROOT/parent_dir/dir_deep_00/file", false).is_ignore()); assert!(m("ROOT/parent_dir/dir_deep_00/child_dir", true).is_ignore()); - assert!( - m("ROOT/parent_dir/dir_deep_00/child_dir/file", false).is_ignore() - ); + assert!(m("ROOT/parent_dir/dir_deep_00/child_dir/file", false).is_ignore()); // 01 assert!(m("ROOT/parent_dir/dir_deep_01", true).is_ignore()); assert!(m("ROOT/parent_dir/dir_deep_01/file", false).is_ignore()); assert!(m("ROOT/parent_dir/dir_deep_01/child_dir", true).is_ignore()); - assert!( - m("ROOT/parent_dir/dir_deep_01/child_dir/file", false).is_ignore() - ); + assert!(m("ROOT/parent_dir/dir_deep_01/child_dir/file", false).is_ignore()); // 02 assert!(m("ROOT/parent_dir/dir_deep_02", true).is_none()); @@ -257,67 +250,51 @@ fn test_dirs_in_deep() { assert!(m("ROOT/parent_dir/dir_deep_20", true).is_ignore()); assert!(m("ROOT/parent_dir/dir_deep_20/file", false).is_ignore()); assert!(m("ROOT/parent_dir/dir_deep_20/child_dir", true).is_ignore()); - assert!( - m("ROOT/parent_dir/dir_deep_20/child_dir/file", false).is_ignore() - ); + assert!(m("ROOT/parent_dir/dir_deep_20/child_dir/file", false).is_ignore()); // 21 assert!(m("ROOT/parent_dir/dir_deep_21", true).is_ignore()); assert!(m("ROOT/parent_dir/dir_deep_21/file", false).is_ignore()); assert!(m("ROOT/parent_dir/dir_deep_21/child_dir", true).is_ignore()); - assert!( - m("ROOT/parent_dir/dir_deep_21/child_dir/file", false).is_ignore() - ); + assert!(m("ROOT/parent_dir/dir_deep_21/child_dir/file", false).is_ignore()); // 22 // dir itself doesn't match assert!(m("ROOT/parent_dir/dir_deep_22", true).is_none()); assert!(m("ROOT/parent_dir/dir_deep_22/file", false).is_ignore()); assert!(m("ROOT/parent_dir/dir_deep_22/child_dir", true).is_ignore()); - assert!( - m("ROOT/parent_dir/dir_deep_22/child_dir/file", false).is_ignore() - ); + assert!(m("ROOT/parent_dir/dir_deep_22/child_dir/file", false).is_ignore()); // 23 // dir itself doesn't match assert!(m("ROOT/parent_dir/dir_deep_23", true).is_none()); assert!(m("ROOT/parent_dir/dir_deep_23/file", false).is_ignore()); assert!(m("ROOT/parent_dir/dir_deep_23/child_dir", true).is_ignore()); - assert!( - m("ROOT/parent_dir/dir_deep_23/child_dir/file", false).is_ignore() - ); + assert!(m("ROOT/parent_dir/dir_deep_23/child_dir/file", false).is_ignore()); // 30 assert!(m("ROOT/parent_dir/dir_deep_30", true).is_ignore()); assert!(m("ROOT/parent_dir/dir_deep_30/file", false).is_ignore()); assert!(m("ROOT/parent_dir/dir_deep_30/child_dir", true).is_ignore()); - assert!( - m("ROOT/parent_dir/dir_deep_30/child_dir/file", false).is_ignore() - ); + assert!(m("ROOT/parent_dir/dir_deep_30/child_dir/file", false).is_ignore()); // 31 assert!(m("ROOT/parent_dir/dir_deep_31", true).is_ignore()); assert!(m("ROOT/parent_dir/dir_deep_31/file", false).is_ignore()); assert!(m("ROOT/parent_dir/dir_deep_31/child_dir", true).is_ignore()); - assert!( - m("ROOT/parent_dir/dir_deep_31/child_dir/file", false).is_ignore() - ); + assert!(m("ROOT/parent_dir/dir_deep_31/child_dir/file", false).is_ignore()); // 32 // dir itself doesn't match assert!(m("ROOT/parent_dir/dir_deep_32", true).is_none()); assert!(m("ROOT/parent_dir/dir_deep_32/file", false).is_ignore()); assert!(m("ROOT/parent_dir/dir_deep_32/child_dir", true).is_ignore()); - assert!( - m("ROOT/parent_dir/dir_deep_32/child_dir/file", false).is_ignore() - ); + assert!(m("ROOT/parent_dir/dir_deep_32/child_dir/file", false).is_ignore()); // 33 // dir itself doesn't match assert!(m("ROOT/parent_dir/dir_deep_33", true).is_none()); assert!(m("ROOT/parent_dir/dir_deep_33/file", false).is_ignore()); assert!(m("ROOT/parent_dir/dir_deep_33/child_dir", true).is_ignore()); - assert!( - m("ROOT/parent_dir/dir_deep_33/child_dir/file", false).is_ignore() - ); + assert!(m("ROOT/parent_dir/dir_deep_33/child_dir/file", false).is_ignore()); } diff --git a/rustfmt.toml b/rustfmt.toml index c7ad93baf..aa37a218b 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1 +1,2 @@ -disable_all_formatting = true +max_width = 79 +use_small_heuristics = "max" diff --git a/src/app.rs b/src/app.rs index dd1e296be..7031d8042 100644 --- a/src/app.rs +++ b/src/app.rs @@ -9,7 +9,7 @@ // is where we read clap's configuration from the end user's arguments and turn // it into a ripgrep-specific configuration type that is not coupled with clap. -use clap::{self, App, AppSettings, crate_authors, crate_version}; +use clap::{self, crate_authors, crate_version, App, AppSettings}; use lazy_static::lazy_static; const ABOUT: &str = " @@ -284,7 +284,7 @@ pub enum RGArgKind { /// any value other than what's in this set, then clap will report an /// error. possible_values: Vec<&'static str>, - } + }, } impl RGArg { @@ -317,8 +317,7 @@ impl RGArg { /// check whether the flag is present or not. Otherwise, consumers may /// inspect the number of times the switch is used. fn switch(long_name: &'static str) -> RGArg { - let claparg = Arg::with_name(long_name) - .long(long_name); + let claparg = Arg::with_name(long_name).long(long_name); RGArg { claparg: claparg, name: long_name, @@ -361,7 +360,7 @@ impl RGArg { value_name: value_name, multiple: false, possible_values: vec![], - } + }, } } @@ -370,7 +369,7 @@ impl RGArg { /// This panics if this arg isn't a switch or a flag. fn short(mut self, name: &'static str) -> RGArg { match self.kind { - RGArgKind::Positional{..} => panic!("expected switch or flag"), + RGArgKind::Positional { .. } => panic!("expected switch or flag"), RGArgKind::Switch { ref mut short, .. } => { *short = Some(name); } @@ -450,11 +449,12 @@ impl RGArg { /// appropriate documentation for the choices in the "long" help text. fn possible_values(mut self, values: &[&'static str]) -> RGArg { match self.kind { - RGArgKind::Positional{..} => panic!("expected flag"), - RGArgKind::Switch{..} => panic!("expected flag"), + RGArgKind::Positional { .. } => panic!("expected flag"), + RGArgKind::Switch { .. } => panic!("expected flag"), RGArgKind::Flag { ref mut possible_values, .. } => { *possible_values = values.to_vec(); - self.claparg = self.claparg + self.claparg = self + .claparg .possible_values(values) .hide_possible_values(true); } @@ -475,9 +475,9 @@ impl RGArg { /// This panics if this arg is not a flag. fn allow_leading_hyphen(mut self) -> RGArg { match self.kind { - RGArgKind::Positional{..} => panic!("expected flag"), - RGArgKind::Switch{..} => panic!("expected flag"), - RGArgKind::Flag {..} => { + RGArgKind::Positional { .. } => panic!("expected flag"), + RGArgKind::Switch { .. } => panic!("expected flag"), + RGArgKind::Flag { .. } => { self.claparg = self.claparg.allow_hyphen_values(true); } } @@ -532,7 +532,9 @@ impl RGArg { // We add an extra space to long descriptions so that a blank line is inserted // between flag descriptions in --help output. macro_rules! long { - ($lit:expr) => { concat!($lit, " ") } + ($lit:expr) => { + concat!($lit, " ") + }; } /// Generate a sequence of all positional and flag arguments. @@ -642,7 +644,8 @@ pub fn all_args_and_flags() -> Vec { fn arg_pattern(args: &mut Vec) { const SHORT: &str = "A regular expression used for searching."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ A regular expression used for searching. To match a pattern beginning with a dash, use the -e/--regexp flag. @@ -654,36 +657,49 @@ You can also use the special '--' delimiter to indicate that no more flags will be provided. Namely, the following is equivalent to the above: rg -- -foo -"); +" + ); let arg = RGArg::positional("pattern", "PATTERN") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .required_unless(&[ - "file", "files", "regexp", "type-list", "pcre2-version", + "file", + "files", + "regexp", + "type-list", + "pcre2-version", ]); args.push(arg); } fn arg_path(args: &mut Vec) { const SHORT: &str = "A file or directory to search."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ A file or directory to search. Directories are searched recursively. Paths \ specified on the command line override glob and ignore rules. \ -"); +" + ); let arg = RGArg::positional("path", "PATH") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .multiple(); args.push(arg); } fn flag_after_context(args: &mut Vec) { const SHORT: &str = "Show NUM lines after each match."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Show NUM lines after each match. This overrides the --context flag. -"); - let arg = RGArg::flag("after-context", "NUM").short("A") - .help(SHORT).long_help(LONG) +" + ); + let arg = RGArg::flag("after-context", "NUM") + .short("A") + .help(SHORT) + .long_help(LONG) .number() .overrides("context"); args.push(arg); @@ -691,7 +707,8 @@ This overrides the --context flag. fn flag_auto_hybrid_regex(args: &mut Vec) { const SHORT: &str = "Dynamically use PCRE2 if necessary."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ When this flag is used, ripgrep will dynamically choose between supported regex engines depending on the features used in a pattern. When ripgrep chooses a regex engine, it applies that choice for every regex provided to ripgrep (e.g., @@ -717,9 +734,11 @@ to transparently support more advanced regex features like look-around and backreferences without explicitly needing to enable them. This flag can be disabled with --no-auto-hybrid-regex. -"); +" + ); let arg = RGArg::switch("auto-hybrid-regex") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .overrides("no-auto-hybrid-regex") .overrides("pcre2") .overrides("no-pcre2"); @@ -735,13 +754,17 @@ This flag can be disabled with --no-auto-hybrid-regex. fn flag_before_context(args: &mut Vec) { const SHORT: &str = "Show NUM lines before each match."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Show NUM lines before each match. This overrides the --context flag. -"); - let arg = RGArg::flag("before-context", "NUM").short("B") - .help(SHORT).long_help(LONG) +" + ); + let arg = RGArg::flag("before-context", "NUM") + .short("B") + .help(SHORT) + .long_help(LONG) .number() .overrides("context"); args.push(arg); @@ -749,7 +772,8 @@ This overrides the --context flag. fn flag_binary(args: &mut Vec) { const SHORT: &str = "Search binary files."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Enabling this flag will cause ripgrep to search binary files. By default, ripgrep attempts to automatically skip binary files in order to improve the relevance of results and make the search faster. @@ -780,9 +804,11 @@ this flag is automatically enabled. This flag can be disabled with '--no-binary'. It overrides the '-a/--text' flag. -"); +" + ); let arg = RGArg::switch("binary") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .overrides("no-binary") .overrides("text") .overrides("no-text"); @@ -798,7 +824,8 @@ flag. fn flag_block_buffered(args: &mut Vec) { const SHORT: &str = "Force block buffering."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ When enabled, ripgrep will use block buffering. That is, whenever a matching line is found, it will be written to an in-memory buffer and will not be written to stdout until the buffer reaches a certain size. This is the default @@ -810,9 +837,11 @@ Forceful block buffering can be disabled with --no-block-buffered. Note that using --no-block-buffered causes ripgrep to revert to its default behavior of automatically detecting the buffering strategy. To force line buffering, use the --line-buffered flag. -"); +" + ); let arg = RGArg::switch("block-buffered") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .overrides("no-block-buffered") .overrides("line-buffered") .overrides("no-line-buffered"); @@ -829,7 +858,8 @@ the --line-buffered flag. fn flag_byte_offset(args: &mut Vec) { const SHORT: &str = "Print the 0-based byte offset for each matching line."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Print the 0-based byte offset within the input file before each line of output. If -o (--only-matching) is specified, print the offset of the matching part itself. @@ -839,21 +869,26 @@ of transcoding and not the original data. This applies similarly to another transformation on the source, such as decompression or a --pre filter. Note that when the PCRE2 regex engine is used, then UTF-8 transcoding is done by default. -"); - let arg = RGArg::switch("byte-offset").short("b") - .help(SHORT).long_help(LONG); +" + ); + let arg = + RGArg::switch("byte-offset").short("b").help(SHORT).long_help(LONG); args.push(arg); } fn flag_case_sensitive(args: &mut Vec) { const SHORT: &str = "Search case sensitively (default)."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Search case sensitively. This overrides the -i/--ignore-case and -S/--smart-case flags. -"); - let arg = RGArg::switch("case-sensitive").short("s") - .help(SHORT).long_help(LONG) +" + ); + let arg = RGArg::switch("case-sensitive") + .short("s") + .help(SHORT) + .long_help(LONG) .overrides("ignore-case") .overrides("smart-case"); args.push(arg); @@ -861,7 +896,8 @@ This overrides the -i/--ignore-case and -S/--smart-case flags. fn flag_color(args: &mut Vec) { const SHORT: &str = "Controls when to use color."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ This flag controls when to use colors. The default setting is 'auto', which means ripgrep will try to guess when to use colors. For example, if ripgrep is printing to a terminal, then it will use colors, but if it is redirected to a @@ -879,9 +915,11 @@ The possible values for this flag are: When the --vimgrep flag is given to ripgrep, then the default value for the --color flag changes to 'never'. -"); +" + ); let arg = RGArg::flag("color", "WHEN") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .possible_values(&["never", "auto", "always", "ansi"]) .default_value_if("never", "vimgrep"); args.push(arg); @@ -889,7 +927,8 @@ When the --vimgrep flag is given to ripgrep, then the default value for the fn flag_colors(args: &mut Vec) { const SHORT: &str = "Configure color settings and styles."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ This flag specifies color settings for use in the output. This flag may be provided multiple times. Settings are applied iteratively. Colors are limited to one of eight choices: red, blue, green, cyan, magenta, yellow, white and @@ -922,43 +961,50 @@ or, equivalently, Note that the the intense and nointense style flags will have no effect when used alongside these extended color codes. -"); +" + ); let arg = RGArg::flag("colors", "COLOR_SPEC") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .multiple(); args.push(arg); } fn flag_column(args: &mut Vec) { const SHORT: &str = "Show column numbers."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Show column numbers (1-based). This only shows the column numbers for the first match on each line. This does not try to account for Unicode. One byte is equal to one column. This implies --line-number. This flag can be disabled with --no-column. -"); +" + ); let arg = RGArg::switch("column") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .overrides("no-column"); args.push(arg); - let arg = RGArg::switch("no-column") - .hidden() - .overrides("column"); + let arg = RGArg::switch("no-column").hidden().overrides("column"); args.push(arg); } fn flag_context(args: &mut Vec) { const SHORT: &str = "Show NUM lines before and after each match."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Show NUM lines before and after each match. This is equivalent to providing both the -B/--before-context and -A/--after-context flags with the same value. This overrides both the -B/--before-context and -A/--after-context flags. -"); - let arg = RGArg::flag("context", "NUM").short("C") - .help(SHORT).long_help(LONG) +" + ); + let arg = RGArg::flag("context", "NUM") + .short("C") + .help(SHORT) + .long_help(LONG) .number() .overrides("before-context") .overrides("after-context"); @@ -993,7 +1039,8 @@ is still inserted. To completely disable context separators, use the fn flag_count(args: &mut Vec) { const SHORT: &str = "Only show the count of matching lines for each file."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ This flag suppresses normal output and shows the number of lines that match the given patterns for each file searched. Each file containing a match has its path and count printed on each line. Note that this reports the number of lines @@ -1005,16 +1052,21 @@ path in this case. This overrides the --count-matches flag. Note that when --count is combined with --only-matching, then ripgrep behaves as if --count-matches was given. -"); - let arg = RGArg::switch("count").short("c") - .help(SHORT).long_help(LONG).overrides("count-matches"); +" + ); + let arg = RGArg::switch("count") + .short("c") + .help(SHORT) + .long_help(LONG) + .overrides("count-matches"); args.push(arg); } fn flag_count_matches(args: &mut Vec) { const SHORT: &str = "Only show the count of individual matches for each file."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ This flag suppresses normal output and shows the number of individual matches of the given patterns for each file searched. Each file containing matches has its path and match count printed on each line. @@ -1027,15 +1079,19 @@ path in this case. This overrides the --count flag. Note that when --count is combined with --only-matching, then ripgrep behaves as if --count-matches was given. -"); +" + ); let arg = RGArg::switch("count-matches") - .help(SHORT).long_help(LONG).overrides("count"); + .help(SHORT) + .long_help(LONG) + .overrides("count"); args.push(arg); } fn flag_crlf(args: &mut Vec) { const SHORT: &str = "Support CRLF line terminators (useful on Windows)."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ When enabled, ripgrep will treat CRLF ('\\r\\n') as a line terminator instead of just '\\n'. @@ -1046,22 +1102,23 @@ may produce slightly different than desired match offsets. It is intended as a work-around until the regex engine supports this natively. CRLF support can be disabled with --no-crlf. -"); +" + ); let arg = RGArg::switch("crlf") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .overrides("no-crlf") .overrides("null-data"); args.push(arg); - let arg = RGArg::switch("no-crlf") - .hidden() - .overrides("crlf"); + let arg = RGArg::switch("no-crlf").hidden().overrides("crlf"); args.push(arg); } fn flag_debug(args: &mut Vec) { const SHORT: &str = "Show debug messages."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Show debug messages. Please use this when filing a bug report. The --debug flag is generally useful for figuring out why ripgrep skipped @@ -1071,35 +1128,37 @@ skipped and why they were skipped. To get even more debug output, use the --trace flag, which implies --debug along with additional trace data. With --trace, the output could be quite large and is generally more useful for development. -"); - let arg = RGArg::switch("debug") - .help(SHORT).long_help(LONG); +" + ); + let arg = RGArg::switch("debug").help(SHORT).long_help(LONG); args.push(arg); - let arg = RGArg::switch("trace") - .hidden() - .overrides("debug"); + let arg = RGArg::switch("trace").hidden().overrides("debug"); args.push(arg); } fn flag_dfa_size_limit(args: &mut Vec) { const SHORT: &str = "The upper size limit of the regex DFA."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ The upper size limit of the regex DFA. The default limit is 10M. This should only be changed on very large regex inputs where the (slower) fallback regex engine may otherwise be used if the limit is reached. The argument accepts the same size suffixes as allowed in with the --max-filesize flag. -"); +" + ); let arg = RGArg::flag("dfa-size-limit", "NUM+SUFFIX?") - .help(SHORT).long_help(LONG); + .help(SHORT) + .long_help(LONG); args.push(arg); } fn flag_encoding(args: &mut Vec) { const SHORT: &str = "Specify the text encoding of files to search."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Specify the text encoding that ripgrep will use on all files searched. The default value is 'auto', which will cause ripgrep to do a best effort automatic detection of encoding on a per-file basis. Automatic detection in this case @@ -1114,29 +1173,34 @@ https://encoding.spec.whatwg.org/#concept-encoding-get For more details on encoding and how ripgrep deals with it, see GUIDE.md. This flag can be disabled with --no-encoding. -"); - let arg = RGArg::flag("encoding", "ENCODING").short("E") - .help(SHORT).long_help(LONG); +" + ); + let arg = RGArg::flag("encoding", "ENCODING") + .short("E") + .help(SHORT) + .long_help(LONG); args.push(arg); - let arg = RGArg::switch("no-encoding") - .hidden() - .overrides("encoding"); + let arg = RGArg::switch("no-encoding").hidden().overrides("encoding"); args.push(arg); } fn flag_file(args: &mut Vec) { const SHORT: &str = "Search for patterns from the given file."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Search for patterns from the given file, with one pattern per line. When this flag is used multiple times or in combination with the -e/--regexp flag, then all patterns provided are searched. Empty pattern lines will match all input lines, and the newline is not counted as part of the pattern. A line is printed if and only if it matches at least one of the patterns. -"); - let arg = RGArg::flag("file", "PATTERNFILE").short("f") - .help(SHORT).long_help(LONG) +" + ); + let arg = RGArg::flag("file", "PATTERNFILE") + .short("f") + .help(SHORT) + .long_help(LONG) .multiple() .allow_leading_hyphen(); args.push(arg); @@ -1144,12 +1208,15 @@ A line is printed if and only if it matches at least one of the patterns. fn flag_files(args: &mut Vec) { const SHORT: &str = "Print each file that would be searched."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Print each file that would be searched without actually performing the search. This is useful to determine whether a particular file is being searched or not. -"); +" + ); let arg = RGArg::switch("files") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) // This also technically conflicts with pattern, but the first file // path will actually be in pattern. .conflicts(&["file", "regexp", "type-list"]); @@ -1158,81 +1225,97 @@ This is useful to determine whether a particular file is being searched or not. fn flag_files_with_matches(args: &mut Vec) { const SHORT: &str = "Only print the paths with at least one match."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Only print the paths with at least one match. This overrides --files-without-match. -"); - let arg = RGArg::switch("files-with-matches").short("l") - .help(SHORT).long_help(LONG) +" + ); + let arg = RGArg::switch("files-with-matches") + .short("l") + .help(SHORT) + .long_help(LONG) .overrides("files-without-match"); args.push(arg); } fn flag_files_without_match(args: &mut Vec) { const SHORT: &str = "Only print the paths that contain zero matches."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Only print the paths that contain zero matches. This inverts/negates the --files-with-matches flag. This overrides --files-with-matches. -"); +" + ); let arg = RGArg::switch("files-without-match") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .overrides("files-with-matches"); args.push(arg); } fn flag_fixed_strings(args: &mut Vec) { const SHORT: &str = "Treat the pattern as a literal string."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Treat the pattern as a literal string instead of a regular expression. When this flag is used, special regular expression meta characters such as .(){}*+ do not need to be escaped. This flag can be disabled with --no-fixed-strings. -"); - let arg = RGArg::switch("fixed-strings").short("F") - .help(SHORT).long_help(LONG) +" + ); + let arg = RGArg::switch("fixed-strings") + .short("F") + .help(SHORT) + .long_help(LONG) .overrides("no-fixed-strings"); args.push(arg); - let arg = RGArg::switch("no-fixed-strings") - .hidden() - .overrides("fixed-strings"); + let arg = + RGArg::switch("no-fixed-strings").hidden().overrides("fixed-strings"); args.push(arg); } fn flag_follow(args: &mut Vec) { const SHORT: &str = "Follow symbolic links."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ When this flag is enabled, ripgrep will follow symbolic links while traversing directories. This is disabled by default. Note that ripgrep will check for symbolic link loops and report errors if it finds one. This flag can be disabled with --no-follow. -"); - let arg = RGArg::switch("follow").short("L") - .help(SHORT).long_help(LONG) +" + ); + let arg = RGArg::switch("follow") + .short("L") + .help(SHORT) + .long_help(LONG) .overrides("no-follow"); args.push(arg); - let arg = RGArg::switch("no-follow") - .hidden() - .overrides("follow"); + let arg = RGArg::switch("no-follow").hidden().overrides("follow"); args.push(arg); } fn flag_glob(args: &mut Vec) { const SHORT: &str = "Include or exclude files."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Include or exclude files and directories for searching that match the given glob. This always overrides any other ignore logic. Multiple glob flags may be used. Globbing rules match .gitignore globs. Precede a glob with a ! to exclude it. -"); - let arg = RGArg::flag("glob", "GLOB").short("g") - .help(SHORT).long_help(LONG) +" + ); + let arg = RGArg::flag("glob", "GLOB") + .short("g") + .help(SHORT) + .long_help(LONG) .multiple() .allow_leading_hyphen(); args.push(arg); @@ -1240,14 +1323,17 @@ it. fn flag_glob_case_insensitive(args: &mut Vec) { const SHORT: &str = "Process all glob patterns case insensitively."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Process glob patterns given with the -g/--glob flag case insensitively. This effectively treats --glob as --iglob. This flag can be disabled with the --no-glob-case-insensitive flag. -"); +" + ); let arg = RGArg::switch("glob-case-insensitive") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .overrides("no-glob-case-insensitive"); args.push(arg); @@ -1259,63 +1345,72 @@ This flag can be disabled with the --no-glob-case-insensitive flag. fn flag_heading(args: &mut Vec) { const SHORT: &str = "Print matches grouped by each file."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ This flag prints the file path above clusters of matches from each file instead of printing the file path as a prefix for each matched line. This is the default mode when printing to a terminal. This overrides the --no-heading flag. -"); +" + ); let arg = RGArg::switch("heading") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .overrides("no-heading"); args.push(arg); const NO_SHORT: &str = "Don't group matches by each file."; - const NO_LONG: &str = long!("\ + const NO_LONG: &str = long!( + "\ Don't group matches by each file. If --no-heading is provided in addition to the -H/--with-filename flag, then file paths will be printed as a prefix for every matched line. This is the default mode when not printing to a terminal. This overrides the --heading flag. -"); +" + ); let arg = RGArg::switch("no-heading") - .help(NO_SHORT).long_help(NO_LONG) + .help(NO_SHORT) + .long_help(NO_LONG) .overrides("heading"); args.push(arg); } fn flag_hidden(args: &mut Vec) { const SHORT: &str = "Search hidden files and directories."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Search hidden files and directories. By default, hidden files and directories are skipped. Note that if a hidden file or a directory is whitelisted in an ignore file, then it will be searched even if this flag isn't provided. This flag can be disabled with --no-hidden. -"); +" + ); let arg = RGArg::switch("hidden") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .overrides("no-hidden"); args.push(arg); - let arg = RGArg::switch("no-hidden") - .hidden() - .overrides("hidden"); + let arg = RGArg::switch("no-hidden").hidden().overrides("hidden"); args.push(arg); } fn flag_iglob(args: &mut Vec) { - const SHORT: &str = - "Include or exclude files case insensitively."; - const LONG: &str = long!("\ + const SHORT: &str = "Include or exclude files case insensitively."; + const LONG: &str = long!( + "\ Include or exclude files and directories for searching that match the given glob. This always overrides any other ignore logic. Multiple glob flags may be used. Globbing rules match .gitignore globs. Precede a glob with a ! to exclude it. Globs are matched case insensitively. -"); +" + ); let arg = RGArg::flag("iglob", "GLOB") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .multiple() .allow_leading_hyphen(); args.push(arg); @@ -1323,15 +1418,19 @@ it. Globs are matched case insensitively. fn flag_ignore_case(args: &mut Vec) { const SHORT: &str = "Case insensitive search."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ When this flag is provided, the given patterns will be searched case insensitively. The case insensitivity rules used by ripgrep conform to Unicode's \"simple\" case folding rules. This flag overrides -s/--case-sensitive and -S/--smart-case. -"); - let arg = RGArg::switch("ignore-case").short("i") - .help(SHORT).long_help(LONG) +" + ); + let arg = RGArg::switch("ignore-case") + .short("i") + .help(SHORT) + .long_help(LONG) .overrides("case-sensitive") .overrides("smart-case"); args.push(arg); @@ -1339,7 +1438,8 @@ This flag overrides -s/--case-sensitive and -S/--smart-case. fn flag_ignore_file(args: &mut Vec) { const SHORT: &str = "Specify additional ignore files."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Specifies a path to one or more .gitignore format rules files. These patterns are applied after the patterns found in .gitignore and .ignore are applied and are matched relative to the current working directory. Multiple additional @@ -1349,9 +1449,11 @@ than later files. If you are looking for a way to include or exclude files and directories directly on the command line, then used -g instead. -"); +" + ); let arg = RGArg::flag("ignore-file", "PATH") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .multiple() .allow_leading_hyphen(); args.push(arg); @@ -1359,15 +1461,18 @@ directly on the command line, then used -g instead. fn flag_ignore_file_case_insensitive(args: &mut Vec) { const SHORT: &str = "Process ignore files case insensitively."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Process ignore files (.gitignore, .ignore, etc.) case insensitively. Note that this comes with a performance penalty and is most useful on case insensitive file systems (such as Windows). This flag can be disabled with the --no-ignore-file-case-insensitive flag. -"); +" + ); let arg = RGArg::switch("ignore-file-case-insensitive") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .overrides("no-ignore-file-case-insensitive"); args.push(arg); @@ -1379,28 +1484,33 @@ This flag can be disabled with the --no-ignore-file-case-insensitive flag. fn flag_include_zero(args: &mut Vec) { const SHORT: &str = "Include files with zero matches in summary"; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ When used with --count or --count-matches, print the number of matches for each file even if there were zero matches. This is disabled by default but can be enabled to make ripgrep behave more like grep. -"); +" + ); let arg = RGArg::switch("include-zero").help(SHORT).long_help(LONG); args.push(arg); } fn flag_invert_match(args: &mut Vec) { const SHORT: &str = "Invert matching."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Invert matching. Show lines that do not match the given patterns. -"); - let arg = RGArg::switch("invert-match").short("v") - .help(SHORT).long_help(LONG); +" + ); + let arg = + RGArg::switch("invert-match").short("v").help(SHORT).long_help(LONG); args.push(arg); } fn flag_json(args: &mut Vec) { const SHORT: &str = "Show search results in a JSON Lines format."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Enable printing results in a JSON Lines format. When this flag is provided, ripgrep will emit a sequence of messages, each @@ -1442,25 +1552,29 @@ A more complete description of the JSON format used can be found here: https://docs.rs/grep-printer/*/grep_printer/struct.JSON.html The JSON Lines format can be disabled with --no-json. -"); +" + ); let arg = RGArg::switch("json") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .overrides("no-json") .conflicts(&[ - "count", "count-matches", - "files", "files-with-matches", "files-without-match", + "count", + "count-matches", + "files", + "files-with-matches", + "files-without-match", ]); args.push(arg); - let arg = RGArg::switch("no-json") - .hidden() - .overrides("json"); + let arg = RGArg::switch("no-json").hidden().overrides("json"); args.push(arg); } fn flag_line_buffered(args: &mut Vec) { const SHORT: &str = "Force line buffering."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ When enabled, ripgrep will use line buffering. That is, whenever a matching line is found, it will be flushed to stdout immediately. This is the default when ripgrep's stdout is connected to a terminal, but otherwise, ripgrep will @@ -1473,9 +1587,11 @@ Forceful line buffering can be disabled with --no-line-buffered. Note that using --no-line-buffered causes ripgrep to revert to its default behavior of automatically detecting the buffering strategy. To force block buffering, use the --block-buffered flag. -"); +" + ); let arg = RGArg::switch("line-buffered") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .overrides("no-line-buffered") .overrides("block-buffered") .overrides("no-block-buffered"); @@ -1491,58 +1607,75 @@ the --block-buffered flag. fn flag_line_number(args: &mut Vec) { const SHORT: &str = "Show line numbers."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Show line numbers (1-based). This is enabled by default when searching in a terminal. -"); - let arg = RGArg::switch("line-number").short("n") - .help(SHORT).long_help(LONG) +" + ); + let arg = RGArg::switch("line-number") + .short("n") + .help(SHORT) + .long_help(LONG) .overrides("no-line-number"); args.push(arg); const NO_SHORT: &str = "Suppress line numbers."; - const NO_LONG: &str = long!("\ + const NO_LONG: &str = long!( + "\ Suppress line numbers. This is enabled by default when not searching in a terminal. -"); - let arg = RGArg::switch("no-line-number").short("N") - .help(NO_SHORT).long_help(NO_LONG) +" + ); + let arg = RGArg::switch("no-line-number") + .short("N") + .help(NO_SHORT) + .long_help(NO_LONG) .overrides("line-number"); args.push(arg); } fn flag_line_regexp(args: &mut Vec) { const SHORT: &str = "Only show matches surrounded by line boundaries."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Only show matches surrounded by line boundaries. This is equivalent to putting ^...$ around all of the search patterns. In other words, this only prints lines where the entire line participates in a match. This overrides the --word-regexp flag. -"); - let arg = RGArg::switch("line-regexp").short("x") - .help(SHORT).long_help(LONG) +" + ); + let arg = RGArg::switch("line-regexp") + .short("x") + .help(SHORT) + .long_help(LONG) .overrides("word-regexp"); args.push(arg); } fn flag_max_columns(args: &mut Vec) { const SHORT: &str = "Don't print lines longer than this limit."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Don't print lines longer than this limit in bytes. Longer lines are omitted, and only the number of matches in that line is printed. When this flag is omitted or is set to 0, then it has no effect. -"); - let arg = RGArg::flag("max-columns", "NUM").short("M") - .help(SHORT).long_help(LONG) +" + ); + let arg = RGArg::flag("max-columns", "NUM") + .short("M") + .help(SHORT) + .long_help(LONG) .number(); args.push(arg); } fn flag_max_columns_preview(args: &mut Vec) { const SHORT: &str = "Print a preview for lines exceeding the limit."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ When the '--max-columns' flag is used, ripgrep will by default completely replace any line that is too long with a message indicating that a matching line was removed. When this flag is combined with '--max-columns', a preview @@ -1552,9 +1685,11 @@ of the line exceeding the limit is not shown. If the '--max-columns' flag is not set, then this has no effect. This flag can be disabled with '--no-max-columns-preview'. -"); +" + ); let arg = RGArg::switch("max-columns-preview") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .overrides("no-max-columns-preview"); args.push(arg); @@ -1566,27 +1701,34 @@ This flag can be disabled with '--no-max-columns-preview'. fn flag_max_count(args: &mut Vec) { const SHORT: &str = "Limit the number of matches."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Limit the number of matching lines per file searched to NUM. -"); - let arg = RGArg::flag("max-count", "NUM").short("m") - .help(SHORT).long_help(LONG) +" + ); + let arg = RGArg::flag("max-count", "NUM") + .short("m") + .help(SHORT) + .long_help(LONG) .number(); args.push(arg); } fn flag_max_depth(args: &mut Vec) { const SHORT: &str = "Descend at most NUM directories."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Limit the depth of directory traversal to NUM levels beyond the paths given. A value of zero only searches the explicitly given paths themselves. For example, 'rg --max-depth 0 dir/' is a no-op because dir/ will not be descended into. 'rg --max-depth 1 dir/' will search only the direct children of 'dir'. -"); +" + ); let arg = RGArg::flag("max-depth", "NUM") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .alias("maxdepth") .number(); args.push(arg); @@ -1594,7 +1736,8 @@ descended into. 'rg --max-depth 1 dir/' will search only the direct children of fn flag_max_filesize(args: &mut Vec) { const SHORT: &str = "Ignore files larger than NUM in size."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Ignore files larger than NUM in size. This does not apply to directories. The input format accepts suffixes of K, M or G which correspond to kilobytes, @@ -1602,15 +1745,17 @@ megabytes and gigabytes, respectively. If no suffix is provided the input is treated as bytes. Examples: --max-filesize 50K or --max-filesize 80M -"); - let arg = RGArg::flag("max-filesize", "NUM+SUFFIX?") - .help(SHORT).long_help(LONG); +" + ); + let arg = + RGArg::flag("max-filesize", "NUM+SUFFIX?").help(SHORT).long_help(LONG); args.push(arg); } fn flag_mmap(args: &mut Vec) { const SHORT: &str = "Search using memory maps when possible."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Search using memory maps when possible. This is enabled by default when ripgrep thinks it will be faster. @@ -1622,27 +1767,31 @@ Note that ripgrep may abort unexpectedly when --mmap if it searches a file that is simultaneously truncated. This flag overrides --no-mmap. -"); - let arg = RGArg::switch("mmap") - .help(SHORT).long_help(LONG) - .overrides("no-mmap"); +" + ); + let arg = + RGArg::switch("mmap").help(SHORT).long_help(LONG).overrides("no-mmap"); args.push(arg); const NO_SHORT: &str = "Never use memory maps."; - const NO_LONG: &str = long!("\ + const NO_LONG: &str = long!( + "\ Never use memory maps, even when they might be faster. This flag overrides --mmap. -"); +" + ); let arg = RGArg::switch("no-mmap") - .help(NO_SHORT).long_help(NO_LONG) + .help(NO_SHORT) + .long_help(NO_LONG) .overrides("mmap"); args.push(arg); } fn flag_multiline(args: &mut Vec) { const SHORT: &str = "Enable matching across multiple lines."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Enable matching across multiple lines. When multiline mode is enabled, ripgrep will lift the restriction that a match @@ -1678,21 +1827,23 @@ Nevertheless, if you only care about matches spanning at most one line, then it is always better to disable multiline mode. This flag can be disabled with --no-multiline. -"); - let arg = RGArg::switch("multiline").short("U") - .help(SHORT).long_help(LONG) +" + ); + let arg = RGArg::switch("multiline") + .short("U") + .help(SHORT) + .long_help(LONG) .overrides("no-multiline"); args.push(arg); - let arg = RGArg::switch("no-multiline") - .hidden() - .overrides("multiline"); + let arg = RGArg::switch("no-multiline").hidden().overrides("multiline"); args.push(arg); } fn flag_multiline_dotall(args: &mut Vec) { const SHORT: &str = "Make '.' match new lines when multiline is enabled."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ This flag enables \"dot all\" in your regex pattern, which causes '.' to match newlines when multiline searching is enabled. This flag has no effect if multiline searching isn't enabled with the --multiline flag. @@ -1709,9 +1860,11 @@ inline flags in the regex pattern itself, e.g., '(?s:.)' always enables \"dot all\" whereas '(?-s:.)' always disables \"dot all\". This flag can be disabled with --no-multiline-dotall. -"); +" + ); let arg = RGArg::switch("multiline-dotall") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .overrides("no-multiline-dotall"); args.push(arg); @@ -1723,65 +1876,71 @@ This flag can be disabled with --no-multiline-dotall. fn flag_no_config(args: &mut Vec) { const SHORT: &str = "Never read configuration files."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Never read configuration files. When this flag is present, ripgrep will not respect the RIPGREP_CONFIG_PATH environment variable. If ripgrep ever grows a feature to automatically read configuration files in pre-defined locations, then this flag will also disable that behavior as well. -"); - let arg = RGArg::switch("no-config") - .help(SHORT).long_help(LONG); +" + ); + let arg = RGArg::switch("no-config").help(SHORT).long_help(LONG); args.push(arg); } fn flag_no_ignore(args: &mut Vec) { const SHORT: &str = "Don't respect ignore files."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Don't respect ignore files (.gitignore, .ignore, etc.). This implies --no-ignore-parent, --no-ignore-dot and --no-ignore-vcs. This flag can be disabled with the --ignore flag. -"); +" + ); let arg = RGArg::switch("no-ignore") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .overrides("ignore"); args.push(arg); - let arg = RGArg::switch("ignore") - .hidden() - .overrides("no-ignore"); + let arg = RGArg::switch("ignore").hidden().overrides("no-ignore"); args.push(arg); } fn flag_no_ignore_dot(args: &mut Vec) { const SHORT: &str = "Don't respect .ignore files."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Don't respect .ignore files. This flag can be disabled with the --ignore-dot flag. -"); +" + ); let arg = RGArg::switch("no-ignore-dot") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .overrides("ignore-dot"); args.push(arg); - let arg = RGArg::switch("ignore-dot") - .hidden() - .overrides("no-ignore-dot"); + let arg = RGArg::switch("ignore-dot").hidden().overrides("no-ignore-dot"); args.push(arg); } fn flag_no_ignore_exclude(args: &mut Vec) { const SHORT: &str = "Don't respect local exclusion files."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Don't respect ignore files that are manually configured for the repository such as git's '.git/info/exclude'. This flag can be disabled with the --ignore-exclude flag. -"); +" + ); let arg = RGArg::switch("no-ignore-exclude") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .overrides("ignore-exclude"); args.push(arg); @@ -1791,37 +1950,41 @@ This flag can be disabled with the --ignore-exclude flag. args.push(arg); } - fn flag_no_ignore_global(args: &mut Vec) { const SHORT: &str = "Don't respect global ignore files."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Don't respect ignore files that come from \"global\" sources such as git's `core.excludesFile` configuration option (which defaults to `$HOME/.config/git/ignore`). This flag can be disabled with the --ignore-global flag. -"); +" + ); let arg = RGArg::switch("no-ignore-global") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .overrides("ignore-global"); args.push(arg); - let arg = RGArg::switch("ignore-global") - .hidden() - .overrides("no-ignore-global"); + let arg = + RGArg::switch("ignore-global").hidden().overrides("no-ignore-global"); args.push(arg); } fn flag_no_ignore_messages(args: &mut Vec) { const SHORT: &str = "Suppress gitignore parse error messages."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Suppresses all error messages related to parsing ignore files such as .ignore or .gitignore. This flag can be disabled with the --ignore-messages flag. -"); +" + ); let arg = RGArg::switch("no-ignore-messages") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .overrides("ignore-messages"); args.push(arg); @@ -1833,71 +1996,78 @@ This flag can be disabled with the --ignore-messages flag. fn flag_no_ignore_parent(args: &mut Vec) { const SHORT: &str = "Don't respect ignore files in parent directories."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Don't respect ignore files (.gitignore, .ignore, etc.) in parent directories. This flag can be disabled with the --ignore-parent flag. -"); +" + ); let arg = RGArg::switch("no-ignore-parent") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .overrides("ignore-parent"); args.push(arg); - let arg = RGArg::switch("ignore-parent") - .hidden() - .overrides("no-ignore-parent"); + let arg = + RGArg::switch("ignore-parent").hidden().overrides("no-ignore-parent"); args.push(arg); } fn flag_no_ignore_vcs(args: &mut Vec) { const SHORT: &str = "Don't respect VCS ignore files."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Don't respect version control ignore files (.gitignore, etc.). This implies --no-ignore-parent for VCS files. Note that .ignore files will continue to be respected. This flag can be disabled with the --ignore-vcs flag. -"); +" + ); let arg = RGArg::switch("no-ignore-vcs") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .overrides("ignore-vcs"); args.push(arg); - let arg = RGArg::switch("ignore-vcs") - .hidden() - .overrides("no-ignore-vcs"); + let arg = RGArg::switch("ignore-vcs").hidden().overrides("no-ignore-vcs"); args.push(arg); } fn flag_no_messages(args: &mut Vec) { const SHORT: &str = "Suppress some error messages."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Suppress all error messages related to opening and reading files. Error messages related to the syntax of the pattern given are still shown. This flag can be disabled with the --messages flag. -"); +" + ); let arg = RGArg::switch("no-messages") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .overrides("messages"); args.push(arg); - let arg = RGArg::switch("messages") - .hidden() - .overrides("no-messages"); + let arg = RGArg::switch("messages").hidden().overrides("no-messages"); args.push(arg); } fn flag_no_pcre2_unicode(args: &mut Vec) { const SHORT: &str = "Disable Unicode mode for PCRE2 matching."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ DEPRECATED. Use --no-unicode instead. This flag is now an alias for --no-unicode. And --pcre2-unicode is an alias for --unicode. -"); +" + ); let arg = RGArg::switch("no-pcre2-unicode") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .overrides("pcre2-unicode") .overrides("unicode"); args.push(arg); @@ -1911,7 +2081,8 @@ for --unicode. fn flag_no_require_git(args: &mut Vec) { const SHORT: &str = "Do not require a git repository to use gitignores."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ By default, ripgrep will only respect global gitignore rules, .gitignore rules and local exclude rules if ripgrep detects that you are searching inside a git repository. This flag allows you to relax this restriction such that @@ -1919,21 +2090,23 @@ ripgrep will respect all git related ignore rules regardless of whether you're searching in a git repository or not. This flag can be disabled with --require-git. -"); +" + ); let arg = RGArg::switch("no-require-git") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .overrides("require-git"); args.push(arg); - let arg = RGArg::switch("require-git") - .hidden() - .overrides("no-require-git"); + let arg = + RGArg::switch("require-git").hidden().overrides("no-require-git"); args.push(arg); } fn flag_no_unicode(args: &mut Vec) { const SHORT: &str = "Disable Unicode mode."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ By default, ripgrep will enable \"Unicode mode\" in all of its regexes. This has a number of consequences: @@ -1966,9 +2139,11 @@ is enabled, then pass the --no-encoding flag to disable all transcoding. The --no-unicode flag can be disabled with --unicode. Note that --no-pcre2-unicode and --pcre2-unicode are aliases for --no-unicode and --unicode, respectively. -"); +" + ); let arg = RGArg::switch("no-unicode") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .overrides("unicode") .overrides("pcre2-unicode"); args.push(arg); @@ -1982,20 +2157,22 @@ The --no-unicode flag can be disabled with --unicode. Note that fn flag_null(args: &mut Vec) { const SHORT: &str = "Print a NUL byte after file paths."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Whenever a file path is printed, follow it with a NUL byte. This includes printing file paths before matches, and when printing a list of matching files such as with --count, --files-with-matches and --files. This option is useful for use with xargs. -"); - let arg = RGArg::switch("null").short("0") - .help(SHORT).long_help(LONG); +" + ); + let arg = RGArg::switch("null").short("0").help(SHORT).long_help(LONG); args.push(arg); } fn flag_null_data(args: &mut Vec) { const SHORT: &str = "Use NUL as a line terminator instead of \\n."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Enabling this option causes ripgrep to use NUL as a line terminator instead of the default of '\\n'. @@ -2009,9 +2186,11 @@ This is also useful for processing NUL delimited data, such as that emitted when using ripgrep's -0/--null flag or find's --print0 flag. Using this flag implies -a/--text. -"); +" + ); let arg = RGArg::switch("null-data") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .overrides("crlf"); args.push(arg); } @@ -2019,7 +2198,8 @@ Using this flag implies -a/--text. fn flag_one_file_system(args: &mut Vec) { const SHORT: &str = "Do not descend into directories on other file systems."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ When enabled, ripgrep will not cross file system boundaries relative to where the search started from. @@ -2031,9 +2211,11 @@ not cross a file system boundary when traversing each path's directory tree. This is similar to find's '-xdev' or '-mount' flag. This flag can be disabled with --no-one-file-system. -"); +" + ); let arg = RGArg::switch("one-file-system") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .overrides("no-one-file-system"); args.push(arg); @@ -2045,31 +2227,36 @@ This flag can be disabled with --no-one-file-system. fn flag_only_matching(args: &mut Vec) { const SHORT: &str = "Print only matches parts of a line."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Print only the matched (non-empty) parts of a matching line, with each such part on a separate output line. -"); - let arg = RGArg::switch("only-matching").short("o") - .help(SHORT).long_help(LONG); +" + ); + let arg = + RGArg::switch("only-matching").short("o").help(SHORT).long_help(LONG); args.push(arg); } fn flag_path_separator(args: &mut Vec) { const SHORT: &str = "Set the path separator."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Set the path separator to use when printing file paths. This defaults to your platform's path separator, which is / on Unix and \\ on Windows. This flag is intended for overriding the default when the environment demands it (e.g., cygwin). A path separator is limited to a single byte. -"); - let arg = RGArg::flag("path-separator", "SEPARATOR") - .help(SHORT).long_help(LONG); +" + ); + let arg = + RGArg::flag("path-separator", "SEPARATOR").help(SHORT).long_help(LONG); args.push(arg); } fn flag_passthru(args: &mut Vec) { const SHORT: &str = "Print both matching and non-matching lines."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Print both matching and non-matching lines. Another way to achieve a similar effect is by modifying your pattern to match @@ -2077,16 +2264,19 @@ the empty string. For example, if you are searching using 'rg foo' then using 'rg \"^|foo\"' instead will emit every line in every file searched, but only occurrences of 'foo' will be highlighted. This flag enables the same behavior without needing to modify the pattern. -"); +" + ); let arg = RGArg::switch("passthru") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .alias("passthrough"); args.push(arg); } fn flag_pcre2(args: &mut Vec) { const SHORT: &str = "Enable PCRE2 matching."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ When this flag is present, ripgrep will use the PCRE2 regex engine instead of its default regex engine. @@ -2105,9 +2295,12 @@ regex engine). Related flags: --no-pcre2-unicode This flag can be disabled with --no-pcre2. -"); - let arg = RGArg::switch("pcre2").short("P") - .help(SHORT).long_help(LONG) +" + ); + let arg = RGArg::switch("pcre2") + .short("P") + .help(SHORT) + .long_help(LONG) .overrides("no-pcre2") .overrides("auto-hybrid-regex") .overrides("no-auto-hybrid-regex"); @@ -2123,19 +2316,21 @@ This flag can be disabled with --no-pcre2. fn flag_pcre2_version(args: &mut Vec) { const SHORT: &str = "Print the version of PCRE2 that ripgrep uses."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ When this flag is present, ripgrep will print the version of PCRE2 in use, along with other information, and then exit. If PCRE2 is not available, then ripgrep will print an error message and exit with an error code. -"); - let arg = RGArg::switch("pcre2-version") - .help(SHORT).long_help(LONG); +" + ); + let arg = RGArg::switch("pcre2-version").help(SHORT).long_help(LONG); args.push(arg); } fn flag_pre(args: &mut Vec) { const SHORT: &str = "search outputs of COMMAND FILE for each FILE"; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ For each input FILE, search the standard output of COMMAND FILE rather than the contents of FILE. This option expects the COMMAND program to either be an absolute path or to be available in your PATH. Either an empty string COMMAND @@ -2179,23 +2374,24 @@ file based on its contents. If it is a compressed file in the Zstandard format, then `pzstd` is used to decompress the contents to stdout. This overrides the -z/--search-zip flag. -"); +" + ); let arg = RGArg::flag("pre", "COMMAND") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .overrides("no-pre") .overrides("search-zip"); args.push(arg); - let arg = RGArg::switch("no-pre") - .hidden() - .overrides("pre"); + let arg = RGArg::switch("no-pre").hidden().overrides("pre"); args.push(arg); } fn flag_pre_glob(args: &mut Vec) { const SHORT: &str = "Include or exclude files from a preprocessing command."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ This flag works in conjunction with the --pre flag. Namely, when one or more --pre-glob flags are given, then only files that match the given set of globs will be handed to the command specified by the --pre flag. Any non-matching @@ -2217,9 +2413,11 @@ Multiple --pre-glob flags may be used. Globbing rules match .gitignore globs. Precede a glob with a ! to exclude it. This flag has no effect if the --pre flag is not used. -"); +" + ); let arg = RGArg::flag("pre-glob", "GLOB") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .multiple() .allow_leading_hyphen(); args.push(arg); @@ -2227,47 +2425,53 @@ This flag has no effect if the --pre flag is not used. fn flag_pretty(args: &mut Vec) { const SHORT: &str = "Alias for --color always --heading --line-number."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ This is a convenience alias for '--color always --heading --line-number'. This flag is useful when you still want pretty output even if you're piping ripgrep to another program or file. For example: 'rg -p foo | less -R'. -"); - let arg = RGArg::switch("pretty").short("p") - .help(SHORT).long_help(LONG); +" + ); + let arg = RGArg::switch("pretty").short("p").help(SHORT).long_help(LONG); args.push(arg); } fn flag_quiet(args: &mut Vec) { const SHORT: &str = "Do not print anything to stdout."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Do not print anything to stdout. If a match is found in a file, then ripgrep will stop searching. This is useful when ripgrep is used only for its exit code (which will be an error if no matches are found). When --files is used, then ripgrep will stop finding files after finding the first file that matches all ignore rules. -"); - let arg = RGArg::switch("quiet").short("q") - .help(SHORT).long_help(LONG); +" + ); + let arg = RGArg::switch("quiet").short("q").help(SHORT).long_help(LONG); args.push(arg); } fn flag_regex_size_limit(args: &mut Vec) { const SHORT: &str = "The upper size limit of the compiled regex."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ The upper size limit of the compiled regex. The default limit is 10M. The argument accepts the same size suffixes as allowed in the --max-filesize flag. -"); +" + ); let arg = RGArg::flag("regex-size-limit", "NUM+SUFFIX?") - .help(SHORT).long_help(LONG); + .help(SHORT) + .long_help(LONG); args.push(arg); } fn flag_regexp(args: &mut Vec) { const SHORT: &str = "A pattern to search for."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ A pattern to search for. This option can be provided multiple times, where all patterns given are searched. Lines matching at least one of the provided patterns are printed. This flag can also be used when searching for patterns @@ -2281,9 +2485,12 @@ You can also use the special '--' delimiter to indicate that no more flags will be provided. Namely, the following is equivalent to the above: rg -- -foo -"); - let arg = RGArg::flag("regexp", "PATTERN").short("e") - .help(SHORT).long_help(LONG) +" + ); + let arg = RGArg::flag("regexp", "PATTERN") + .short("e") + .help(SHORT) + .long_help(LONG) .multiple() .allow_leading_hyphen(); args.push(arg); @@ -2291,7 +2498,8 @@ will be provided. Namely, the following is equivalent to the above: fn flag_replace(args: &mut Vec) { const SHORT: &str = "Replace matches with the given text."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Replace every match with the text given when printing results. Neither this flag nor any other ripgrep flag will modify your files. @@ -2305,44 +2513,53 @@ Note that the replacement by default replaces each match, and NOT the entire line. To replace the entire line, you should match the entire line. This flag can be used with the -o/--only-matching flag. -"); - let arg = RGArg::flag("replace", "REPLACEMENT_TEXT").short("r") - .help(SHORT).long_help(LONG) +" + ); + let arg = RGArg::flag("replace", "REPLACEMENT_TEXT") + .short("r") + .help(SHORT) + .long_help(LONG) .allow_leading_hyphen(); args.push(arg); } fn flag_search_zip(args: &mut Vec) { const SHORT: &str = "Search in compressed files."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Search in compressed files. Currently gzip, bzip2, xz, LZ4, LZMA, Brotli and Zstd files are supported. This option expects the decompression binaries to be available in your PATH. This flag can be disabled with --no-search-zip. -"); - let arg = RGArg::switch("search-zip").short("z") - .help(SHORT).long_help(LONG) +" + ); + let arg = RGArg::switch("search-zip") + .short("z") + .help(SHORT) + .long_help(LONG) .overrides("no-search-zip") .overrides("pre"); args.push(arg); - let arg = RGArg::switch("no-search-zip") - .hidden() - .overrides("search-zip"); + let arg = RGArg::switch("no-search-zip").hidden().overrides("search-zip"); args.push(arg); } fn flag_smart_case(args: &mut Vec) { const SHORT: &str = "Smart case search."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Searches case insensitively if the pattern is all lowercase. Search case sensitively otherwise. This overrides the -s/--case-sensitive and -i/--ignore-case flags. -"); - let arg = RGArg::switch("smart-case").short("S") - .help(SHORT).long_help(LONG) +" + ); + let arg = RGArg::switch("smart-case") + .short("S") + .help(SHORT) + .long_help(LONG) .overrides("case-sensitive") .overrides("ignore-case"); args.push(arg); @@ -2350,16 +2567,19 @@ This overrides the -s/--case-sensitive and -i/--ignore-case flags. fn flag_sort_files(args: &mut Vec) { const SHORT: &str = "DEPRECATED"; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ DEPRECATED: Use --sort or --sortr instead. Sort results by file path. Note that this currently disables all parallelism and runs search in a single thread. This flag can be disabled with --no-sort-files. -"); +" + ); let arg = RGArg::switch("sort-files") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .hidden() .overrides("no-sort-files") .overrides("sort") @@ -2377,7 +2597,8 @@ This flag can be disabled with --no-sort-files. fn flag_sort(args: &mut Vec) { const SHORT: &str = "Sort results in ascending order. Implies --threads=1."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ This flag enables sorting of results in ascending order. The possible values for this flag are: @@ -2396,9 +2617,11 @@ this flag overrides --sortr. Note that sorting results currently always forces ripgrep to abandon parallelism and run in a single thread. -"); +" + ); let arg = RGArg::flag("sort", "SORTBY") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .possible_values(&["path", "modified", "accessed", "created", "none"]) .overrides("sortr") .overrides("sort-files") @@ -2409,7 +2632,8 @@ parallelism and run in a single thread. fn flag_sortr(args: &mut Vec) { const SHORT: &str = "Sort results in descending order. Implies --threads=1."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ This flag enables sorting of results in descending order. The possible values for this flag are: @@ -2428,9 +2652,11 @@ overrides --sort. Note that sorting results currently always forces ripgrep to abandon parallelism and run in a single thread. -"); +" + ); let arg = RGArg::flag("sortr", "SORTBY") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .possible_values(&["path", "modified", "accessed", "created", "none"]) .overrides("sort") .overrides("sort-files") @@ -2440,7 +2666,8 @@ parallelism and run in a single thread. fn flag_stats(args: &mut Vec) { const SHORT: &str = "Print statistics about this ripgrep search."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Print aggregate statistics about this ripgrep search. When this flag is present, ripgrep will print the following stats to stdout at the end of the search: number of matched lines, number of files with matches, number of files @@ -2452,21 +2679,22 @@ Note that this flag has no effect if --files, --files-with-matches or --files-without-match is passed. This flag can be disabled with --no-stats. -"); +" + ); let arg = RGArg::switch("stats") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .overrides("no-stats"); args.push(arg); - let arg = RGArg::switch("no-stats") - .hidden() - .overrides("stats"); + let arg = RGArg::switch("no-stats").hidden().overrides("stats"); args.push(arg); } fn flag_text(args: &mut Vec) { const SHORT: &str = "Search binary files as if they were text."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Search binary files as if they were text. When this flag is present, ripgrep's binary file detection is disabled. This means that when a binary file is searched, its contents may be printed if there is a match. This may cause @@ -2479,9 +2707,12 @@ Alternatively, if the '--binary' flag is used, then ripgrep will only quit when it sees a NUL byte after it sees a match (or searches the entire file). This flag can be disabled with '--no-text'. It overrides the '--binary' flag. -"); - let arg = RGArg::switch("text").short("a") - .help(SHORT).long_help(LONG) +" + ); + let arg = RGArg::switch("text") + .short("a") + .help(SHORT) + .long_help(LONG) .overrides("no-text") .overrides("binary") .overrides("no-binary"); @@ -2497,37 +2728,39 @@ This flag can be disabled with '--no-text'. It overrides the '--binary' flag. fn flag_threads(args: &mut Vec) { const SHORT: &str = "The approximate number of threads to use."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ The approximate number of threads to use. A value of 0 (which is the default) causes ripgrep to choose the thread count using heuristics. -"); - let arg = RGArg::flag("threads", "NUM").short("j") - .help(SHORT).long_help(LONG); +" + ); + let arg = + RGArg::flag("threads", "NUM").short("j").help(SHORT).long_help(LONG); args.push(arg); } fn flag_trim(args: &mut Vec) { const SHORT: &str = "Trim prefixed whitespace from matches."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ When set, all ASCII whitespace at the beginning of each line printed will be trimmed. This flag can be disabled with --no-trim. -"); - let arg = RGArg::switch("trim") - .help(SHORT).long_help(LONG) - .overrides("no-trim"); +" + ); + let arg = + RGArg::switch("trim").help(SHORT).long_help(LONG).overrides("no-trim"); args.push(arg); - let arg = RGArg::switch("no-trim") - .hidden() - .overrides("trim"); + let arg = RGArg::switch("no-trim").hidden().overrides("trim"); args.push(arg); } fn flag_type(args: &mut Vec) { const SHORT: &str = "Only search files matching TYPE."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Only search files matching TYPE. Multiple type flags may be provided. Use the --type-list flag to list all available types. @@ -2536,16 +2769,20 @@ was provided for every file type supported by ripgrep (including any custom file types). The end result is that '--type all' causes ripgrep to search in \"whitelist\" mode, where it will only search files it recognizes via its type definitions. -"); - let arg = RGArg::flag("type", "TYPE").short("t") - .help(SHORT).long_help(LONG) +" + ); + let arg = RGArg::flag("type", "TYPE") + .short("t") + .help(SHORT) + .long_help(LONG) .multiple(); args.push(arg); } fn flag_type_add(args: &mut Vec) { const SHORT: &str = "Add a new glob for a file type."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Add a new glob for a particular file type. Only one glob can be added at a time. Multiple --type-add flags can be provided. Unless --type-clear is used, globs are added to any existing globs defined inside of ripgrep. @@ -2572,47 +2809,59 @@ Additional glob rules can still be added to the src type by using the Note that type names must consist only of Unicode letters or numbers. Punctuation characters are not allowed. -"); +" + ); let arg = RGArg::flag("type-add", "TYPE_SPEC") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .multiple(); args.push(arg); } fn flag_type_clear(args: &mut Vec) { const SHORT: &str = "Clear globs for a file type."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Clear the file type globs previously defined for TYPE. This only clears the default type definitions that are found inside of ripgrep. Note that this MUST be passed to every invocation of ripgrep. Type settings are NOT persisted. -"); +" + ); let arg = RGArg::flag("type-clear", "TYPE") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) .multiple(); args.push(arg); } fn flag_type_not(args: &mut Vec) { const SHORT: &str = "Do not search files matching TYPE."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Do not search files matching TYPE. Multiple type-not flags may be provided. Use the --type-list flag to list all available types. -"); - let arg = RGArg::flag("type-not", "TYPE").short("T") - .help(SHORT).long_help(LONG) +" + ); + let arg = RGArg::flag("type-not", "TYPE") + .short("T") + .help(SHORT) + .long_help(LONG) .multiple(); args.push(arg); } fn flag_type_list(args: &mut Vec) { const SHORT: &str = "Show all supported file types."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Show all supported file types and their corresponding globs. -"); +" + ); let arg = RGArg::switch("type-list") - .help(SHORT).long_help(LONG) + .help(SHORT) + .long_help(LONG) // This also technically conflicts with PATTERN, but the first file // path will actually be in PATTERN. .conflicts(&["file", "files", "pattern", "regexp"]); @@ -2621,69 +2870,86 @@ Show all supported file types and their corresponding globs. fn flag_unrestricted(args: &mut Vec) { const SHORT: &str = "Reduce the level of \"smart\" searching."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Reduce the level of \"smart\" searching. A single -u won't respect .gitignore (etc.) files. Two -u flags will additionally search hidden files and directories. Three -u flags will additionally search binary files. 'rg -uuu' is roughly equivalent to 'grep -r'. -"); - let arg = RGArg::switch("unrestricted").short("u") - .help(SHORT).long_help(LONG) +" + ); + let arg = RGArg::switch("unrestricted") + .short("u") + .help(SHORT) + .long_help(LONG) .multiple(); args.push(arg); } fn flag_vimgrep(args: &mut Vec) { const SHORT: &str = "Show results in vim compatible format."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Show results with every match on its own line, including line numbers and column numbers. With this option, a line with more than one match will be printed more than once. -"); - let arg = RGArg::switch("vimgrep") - .help(SHORT).long_help(LONG); +" + ); + let arg = RGArg::switch("vimgrep").help(SHORT).long_help(LONG); args.push(arg); } fn flag_with_filename(args: &mut Vec) { const SHORT: &str = "Print the file path with the matched lines."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Display the file path for matches. This is the default when more than one file is searched. If --heading is enabled (the default when printing to a terminal), the file path will be shown above clusters of matches from each file; otherwise, the file name will be shown as a prefix for each matched line. This flag overrides --no-filename. -"); - let arg = RGArg::switch("with-filename").short("H") - .help(SHORT).long_help(LONG) +" + ); + let arg = RGArg::switch("with-filename") + .short("H") + .help(SHORT) + .long_help(LONG) .overrides("no-filename"); args.push(arg); const NO_SHORT: &str = "Never print the file path with the matched lines."; - const NO_LONG: &str = long!("\ + const NO_LONG: &str = long!( + "\ Never print the file path with the matched lines. This is the default when ripgrep is explicitly instructed to search one file or stdin. This flag overrides --with-filename. -"); - let arg = RGArg::switch("no-filename").short("I") - .help(NO_SHORT).long_help(NO_LONG) +" + ); + let arg = RGArg::switch("no-filename") + .short("I") + .help(NO_SHORT) + .long_help(NO_LONG) .overrides("with-filename"); args.push(arg); } fn flag_word_regexp(args: &mut Vec) { const SHORT: &str = "Only show matches surrounded by word boundaries."; - const LONG: &str = long!("\ + const LONG: &str = long!( + "\ Only show matches surrounded by word boundaries. This is roughly equivalent to putting \\b before and after all of the search patterns. This overrides the --line-regexp flag. -"); - let arg = RGArg::switch("word-regexp").short("w") - .help(SHORT).long_help(LONG) +" + ); + let arg = RGArg::switch("word-regexp") + .short("w") + .help(SHORT) + .long_help(LONG) .overrides("line-regexp"); args.push(arg); } diff --git a/src/args.rs b/src/args.rs index 4989f14d8..989f95c2f 100644 --- a/src/args.rs +++ b/src/args.rs @@ -17,11 +17,8 @@ use grep::pcre2::{ RegexMatcherBuilder as PCRE2RegexMatcherBuilder, }; use grep::printer::{ - ColorSpecs, Stats, - JSON, JSONBuilder, - Standard, StandardBuilder, - Summary, SummaryBuilder, SummaryKind, - default_color_specs, + default_color_specs, ColorSpecs, JSONBuilder, Standard, StandardBuilder, + Stats, Summary, SummaryBuilder, SummaryKind, JSON, }; use grep::regex::{ RegexMatcher as RustRegexMatcher, @@ -36,15 +33,12 @@ use ignore::{Walk, WalkBuilder, WalkParallel}; use log; use num_cpus; use regex; -use termcolor::{ - WriteColor, - BufferWriter, ColorChoice, -}; +use termcolor::{BufferWriter, ColorChoice, WriteColor}; use crate::app; use crate::config; use crate::logger::Logger; -use crate::messages::{set_messages, set_ignore_messages}; +use crate::messages::{set_ignore_messages, set_messages}; use crate::path_printer::{PathPrinter, PathPrinterBuilder}; use crate::search::{ PatternMatcher, Printer, SearchWorker, SearchWorkerBuilder, @@ -84,11 +78,9 @@ impl Command { match *self { Search | SearchParallel => true, - | SearchNever - | Files - | FilesParallel - | Types - | PCRE2Version => false, + SearchNever | Files | FilesParallel | Types | PCRE2Version => { + false + } } } } @@ -210,15 +202,12 @@ impl Args { .printer_standard(self.paths(), wtr, separator_search) .map(Printer::Standard) } - OutputKind::Summary => { - self.matches() - .printer_summary(self.paths(), wtr) - .map(Printer::Summary) - } + OutputKind::Summary => self + .matches() + .printer_summary(self.paths(), wtr) + .map(Printer::Summary), OutputKind::JSON => { - self.matches() - .printer_json(wtr) - .map(Printer::JSON) + self.matches().printer_json(wtr).map(Printer::JSON) } } } @@ -452,29 +441,23 @@ impl SortBy { } SortByKind::LastModified => { builder.sort_by_file_path(move |a, b| { - sort_by_metadata_time( - a, b, - self.reverse, - |md| md.modified(), - ) + sort_by_metadata_time(a, b, self.reverse, |md| { + md.modified() + }) }); } SortByKind::LastAccessed => { builder.sort_by_file_path(move |a, b| { - sort_by_metadata_time( - a, b, - self.reverse, - |md| md.accessed(), - ) + sort_by_metadata_time(a, b, self.reverse, |md| { + md.accessed() + }) }); } SortByKind::Created => { builder.sort_by_file_path(move |a, b| { - sort_by_metadata_time( - a, b, - self.reverse, - |md| md.created(), - ) + sort_by_metadata_time(a, b, self.reverse, |md| { + md.created() + }) }); } } @@ -520,7 +503,7 @@ impl EncodingMode { fn has_explicit_encoding(&self) -> bool { match self { EncodingMode::Some(_) => true, - _ => false + _ => false, } } } @@ -568,13 +551,12 @@ impl ArgMatches { let patterns = self.patterns()?; let matcher = self.matcher(&patterns)?; let mut paths = self.paths(); - let using_default_path = - if paths.is_empty() { - paths.push(self.path_default()); - true - } else { - false - }; + let using_default_path = if paths.is_empty() { + paths.push(self.path_default()); + true + } else { + false + }; Ok(Args(Arc::new(ArgsImp { matches: self, patterns: patterns, @@ -605,7 +587,8 @@ impl ArgMatches { Err(err) => err, }; log::debug!( - "error building Rust regex in hybrid mode:\n{}", rust_err, + "error building Rust regex in hybrid mode:\n{}", + rust_err, ); let pcre_err = match self.matcher_pcre2(patterns) { Ok(matcher) => return Ok(PatternMatcher::PCRE2(matcher)), @@ -616,7 +599,10 @@ impl ArgMatches { engine or with PCRE2.\n\n\ default regex engine error:\n{}\n{}\n{}\n\n\ PCRE2 regex engine error:\n{}", - "~".repeat(79), rust_err, "~".repeat(79), pcre_err, + "~".repeat(79), + rust_err, + "~".repeat(79), + pcre_err, ))) } else { let matcher = match self.matcher_rust(patterns) { @@ -660,14 +646,10 @@ impl ArgMatches { if self.is_present("multiline") { builder.dot_matches_new_line(self.is_present("multiline-dotall")); if self.is_present("crlf") { - builder - .crlf(true) - .line_terminator(None); + builder.crlf(true).line_terminator(None); } } else { - builder - .line_terminator(Some(b'\n')) - .dot_matches_new_line(false); + builder.line_terminator(Some(b'\n')).dot_matches_new_line(false); if self.is_present("crlf") { builder.crlf(true); } @@ -686,12 +668,11 @@ impl ArgMatches { if let Some(limit) = self.dfa_size_limit()? { builder.dfa_size_limit(limit); } - let res = - if self.is_present("fixed-strings") { - builder.build_literals(patterns) - } else { - builder.build(&patterns.join("|")) - }; + let res = if self.is_present("fixed-strings") { + builder.build_literals(patterns) + } else { + builder.build(&patterns.join("|")) + }; match res { Ok(m) => Ok(m), Err(err) => Err(From::from(suggest_multiline(err.to_string()))), @@ -718,7 +699,7 @@ impl ArgMatches { // The PCRE2 docs say that 32KB is the default, and that 1MB // should be big enough for anything. But let's crank it to // 10MB. - .max_jit_stack_size(Some(10 * (1<<20))); + .max_jit_stack_size(Some(10 * (1 << 20))); } if self.unicode() { builder.utf(true).ucp(true); @@ -822,14 +803,13 @@ impl ArgMatches { /// Build a searcher from the command line parameters. fn searcher(&self, paths: &[PathBuf]) -> Result { let (ctx_before, ctx_after) = self.contexts()?; - let line_term = - if self.is_present("crlf") { - LineTerminator::crlf() - } else if self.is_present("null-data") { - LineTerminator::byte(b'\x00') - } else { - LineTerminator::byte(b'\n') - }; + let line_term = if self.is_present("crlf") { + LineTerminator::crlf() + } else if self.is_present("null-data") { + LineTerminator::byte(b'\x00') + } else { + LineTerminator::byte(b'\n') + }; let mut builder = SearcherBuilder::new(); builder .line_terminator(line_term) @@ -902,12 +882,9 @@ impl ArgMatches { /// Returns the form of binary detection to perform on files that are /// implicitly searched via recursive directory traversal. fn binary_detection_implicit(&self) -> BinaryDetection { - let none = - self.is_present("text") - || self.is_present("null-data"); + let none = self.is_present("text") || self.is_present("null-data"); let convert = - self.is_present("binary") - || self.unrestricted_count() >= 3; + self.is_present("binary") || self.unrestricted_count() >= 3; if none { BinaryDetection::none() } else if convert { @@ -925,9 +902,7 @@ impl ArgMatches { /// as a filter (but quitting immediately once a NUL byte is seen), and we /// should never filter out files that the user wants to explicitly search. fn binary_detection_explicit(&self) -> BinaryDetection { - let none = - self.is_present("text") - || self.is_present("null-data"); + let none = self.is_present("text") || self.is_present("null-data"); if none { BinaryDetection::none() } else { @@ -955,8 +930,8 @@ impl ArgMatches { /// case is disabled. fn case_smart(&self) -> bool { self.is_present("smart-case") - && !self.is_present("ignore-case") - && !self.is_present("case-sensitive") + && !self.is_present("ignore-case") + && !self.is_present("case-sensitive") } /// Returns the user's color choice based on command line parameters and @@ -1012,11 +987,7 @@ impl ArgMatches { let after = self.usize_of("after-context")?.unwrap_or(0); let before = self.usize_of("before-context")?.unwrap_or(0); let both = self.usize_of("context")?.unwrap_or(0); - Ok(if both > 0 { - (both, both) - } else { - (before, after) - }) + Ok(if both > 0 { (both, both) } else { (before, after) }) } /// Returns the unescaped context separator in UTF-8 bytes. @@ -1111,8 +1082,8 @@ impl ArgMatches { false } else { cli::is_tty_stdout() - || self.is_present("heading") - || self.is_present("pretty") + || self.is_present("heading") + || self.is_present("pretty") } } @@ -1168,10 +1139,10 @@ impl ArgMatches { // tty for human consumption, except for one interesting case: when // we're only searching stdin. This makes pipelines work as expected. (cli::is_tty_stdout() && !self.is_only_stdin(paths)) - || self.is_present("line-number") - || self.is_present("column") - || self.is_present("pretty") - || self.is_present("vimgrep") + || self.is_present("line-number") + || self.is_present("column") + || self.is_present("pretty") + || self.is_present("vimgrep") } /// The maximum number of columns allowed on each line. @@ -1264,8 +1235,7 @@ impl ArgMatches { } let (count, count_matches) = self.counts(); - let summary = - count + let summary = count || count_matches || self.is_present("files-with-matches") || self.is_present("files-without-match"); @@ -1325,10 +1295,10 @@ impl ArgMatches { /// be used when ripgrep is not otherwise given at least one file path /// as a positional argument. fn path_default(&self) -> PathBuf { - let file_is_stdin = self.values_of_os("file") + let file_is_stdin = self + .values_of_os("file") .map_or(false, |mut files| files.any(|f| f == "-")); - let search_cwd = - !cli::is_readable_stdin() + let search_cwd = !cli::is_readable_stdin() || (self.is_present("file") && file_is_stdin) || self.is_present("files") || self.is_present("type-list") @@ -1357,8 +1327,8 @@ impl ArgMatches { the given separator is {} bytes: {}\n\ In some shells on Windows '/' is automatically \ expanded. Use '//' instead.", - sep.len(), - cli::escape(&sep), + sep.len(), + cli::escape(&sep), ))) } else { Ok(Some(sep[0])) @@ -1408,14 +1378,16 @@ impl ArgMatches { if let Some(paths) = self.values_of_os("file") { for path in paths { if path == "-" { - pats.extend(cli::patterns_from_stdin()? - .into_iter() - .map(|p| self.pattern_from_string(p)) + pats.extend( + cli::patterns_from_stdin()? + .into_iter() + .map(|p| self.pattern_from_string(p)), ); } else { - pats.extend(cli::patterns_from_path(path)? - .into_iter() - .map(|p| self.pattern_from_string(p)) + pats.extend( + cli::patterns_from_path(path)? + .into_iter() + .map(|p| self.pattern_from_string(p)), ); } } @@ -1528,7 +1500,7 @@ impl ArgMatches { None => match self.value_of_lossy("sortr") { None => return Ok(SortBy::none()), Some(choice) => SortBy::desc(SortByKind::new(&choice)), - } + }, Some(choice) => SortBy::asc(SortByKind::new(&choice)), }; Ok(sortby) @@ -1571,11 +1543,7 @@ impl ArgMatches { return Ok(1); } let threads = self.usize_of("threads")?.unwrap_or(0); - Ok(if threads == 0 { - cmp::min(12, num_cpus::get()) - } else { - threads - }) + Ok(if threads == 0 { cmp::min(12, num_cpus::get()) } else { threads }) } /// Builds a file type matcher from the command line flags. @@ -1623,9 +1591,11 @@ impl ArgMatches { } else { let path_stdin = Path::new("-"); self.is_present("with-filename") - || self.is_present("vimgrep") - || paths.len() > 1 - || paths.get(0).map_or(false, |p| p != path_stdin && p.is_dir()) + || self.is_present("vimgrep") + || paths.len() > 1 + || paths + .get(0) + .map_or(false, |p| p != path_stdin && p.is_dir()) } } } @@ -1648,11 +1618,7 @@ impl ArgMatches { None => return Ok(None), Some(n) => n, }; - Ok(if n == 0 { - None - } else { - Some(n) - }) + Ok(if n == 0 { None } else { Some(n) }) } /// Safely reads an arg value with the given name, and if it's present, @@ -1718,19 +1684,25 @@ fn suggest_pcre2(msg: String) -> String { if !msg.contains("backreferences") && !msg.contains("look-around") { msg } else { - format!("{} + format!( + "{} Consider enabling PCRE2 with the --pcre2 flag, which can handle backreferences -and look-around.", msg) +and look-around.", + msg + ) } } fn suggest_multiline(msg: String) -> String { if msg.contains("the literal") && msg.contains("not allowed") { - format!("{} + format!( + "{} Consider enabling multiline mode with the --multiline flag (or -U for short). -When multiline mode is enabled, new line characters can be matched.", msg) +When multiline mode is enabled, new line characters can be matched.", + msg + ) } else { msg } @@ -1738,10 +1710,7 @@ When multiline mode is enabled, new line characters can be matched.", msg) /// Convert the result of parsing a human readable file size to a `usize`, /// failing if the type does not fit. -fn u64_to_usize( - arg_name: &str, - value: Option, -) -> Result> { +fn u64_to_usize(arg_name: &str, value: Option) -> Result> { use std::usize; let value = match value { @@ -1766,7 +1735,8 @@ fn sort_by_metadata_time( reverse: bool, get_time: G, ) -> cmp::Ordering -where G: Fn(&fs::Metadata) -> io::Result +where + G: Fn(&fs::Metadata) -> io::Result, { let t1 = match p1.metadata().and_then(|md| get_time(&md)) { Ok(t) => t, @@ -1789,11 +1759,10 @@ where G: Fn(&fs::Metadata) -> io::Result /// corresponds to a `--help` or `--version` request. In which case, the /// corresponding output is printed and the current process is exited /// successfully. -fn clap_matches( - args: I, -) -> Result> -where I: IntoIterator, - T: Into + Clone +fn clap_matches(args: I) -> Result> +where + I: IntoIterator, + T: Into + Clone, { let err = match app::app().get_matches_from_safe(args) { Ok(matches) => return Ok(matches), @@ -1831,5 +1800,6 @@ fn current_dir() -> Result { "failed to get current working directory: {} \ --- did your CWD get deleted?", err, - ).into()) + ) + .into()) } diff --git a/src/config.rs b/src/config.rs index 3deade9af..5426554da 100644 --- a/src/config.rs +++ b/src/config.rs @@ -4,9 +4,9 @@ use std::env; use std::error::Error; +use std::ffi::OsString; use std::fs::File; use std::io; -use std::ffi::OsString; use std::path::{Path, PathBuf}; use bstr::{io::BufReadExt, ByteSlice}; @@ -102,12 +102,13 @@ fn parse_reader( #[cfg(test)] mod tests { - use std::ffi::OsString; use super::parse_reader; + use std::ffi::OsString; #[test] fn basic() { - let (args, errs) = parse_reader(&b"\ + let (args, errs) = parse_reader( + &b"\ # Test --context=0 --smart-case @@ -116,13 +117,13 @@ mod tests { # --bar --foo -"[..]).unwrap(); +"[..], + ) + .unwrap(); assert!(errs.is_empty()); let args: Vec = args.into_iter().map(|s| s.into_string().unwrap()).collect(); - assert_eq!(args, vec![ - "--context=0", "--smart-case", "-u", "--foo", - ]); + assert_eq!(args, vec!["--context=0", "--smart-case", "-u", "--foo",]); } // We test that we can handle invalid UTF-8 on Unix-like systems. @@ -131,32 +132,38 @@ mod tests { fn error() { use std::os::unix::ffi::OsStringExt; - let (args, errs) = parse_reader(&b"\ + let (args, errs) = parse_reader( + &b"\ quux foo\xFFbar baz -"[..]).unwrap(); +"[..], + ) + .unwrap(); assert!(errs.is_empty()); - assert_eq!(args, vec![ - OsString::from("quux"), - OsString::from_vec(b"foo\xFFbar".to_vec()), - OsString::from("baz"), - ]); + assert_eq!( + args, + vec![ + OsString::from("quux"), + OsString::from_vec(b"foo\xFFbar".to_vec()), + OsString::from("baz"), + ] + ); } // ... but test that invalid UTF-8 fails on Windows. #[test] #[cfg(not(unix))] fn error() { - let (args, errs) = parse_reader(&b"\ + let (args, errs) = parse_reader( + &b"\ quux foo\xFFbar baz -"[..]).unwrap(); +"[..], + ) + .unwrap(); assert_eq!(errs.len(), 1); - assert_eq!(args, vec![ - OsString::from("quux"), - OsString::from("baz"), - ]); + assert_eq!(args, vec![OsString::from("quux"), OsString::from("baz"),]); } } diff --git a/src/path_printer.rs b/src/path_printer.rs index 324a27c48..59cadbc87 100644 --- a/src/path_printer.rs +++ b/src/path_printer.rs @@ -37,10 +37,7 @@ impl PathPrinterBuilder { /// Create a new path printer with the current configuration that writes /// paths to the given writer. pub fn build(&self, wtr: W) -> PathPrinter { - PathPrinter { - config: self.config.clone(), - wtr: wtr, - } + PathPrinter { config: self.config.clone(), wtr: wtr } } /// Set the color specification for this printer. diff --git a/src/search.rs b/src/search.rs index 8597e80aa..af398da28 100644 --- a/src/search.rs +++ b/src/search.rs @@ -7,9 +7,9 @@ use std::time::Duration; use grep::cli; use grep::matcher::Matcher; #[cfg(feature = "pcre2")] -use grep::pcre2::{RegexMatcher as PCRE2RegexMatcher}; -use grep::printer::{JSON, Standard, Summary, Stats}; -use grep::regex::{RegexMatcher as RustRegexMatcher}; +use grep::pcre2::RegexMatcher as PCRE2RegexMatcher; +use grep::printer::{Standard, Stats, Summary, JSON}; +use grep::regex::RegexMatcher as RustRegexMatcher; use grep::searcher::{BinaryDetection, Searcher}; use ignore::overrides::Override; use serde_json as json; @@ -86,8 +86,12 @@ impl SearchWorkerBuilder { let command_builder = self.command_builder.clone(); let decomp_builder = self.decomp_builder.clone(); SearchWorker { - config, command_builder, decomp_builder, - matcher, searcher, printer, + config, + command_builder, + decomp_builder, + matcher, + searcher, + printer, } } @@ -227,9 +231,7 @@ impl Printer { stats: &Stats, ) -> io::Result<()> { match *self { - Printer::JSON(_) => { - self.print_stats_json(total_duration, stats) - } + Printer::JSON(_) => self.print_stats_json(total_duration, stats), Printer::Standard(_) | Printer::Summary(_) => { self.print_stats_human(total_duration, stats) } @@ -273,17 +275,20 @@ impl Printer { // the grep-printer crate. We simply "extend" it with the 'summary' // message type. let fractional = fractional_seconds(total_duration); - json::to_writer(self.get_mut(), &json!({ - "type": "summary", - "data": { - "stats": stats, - "elapsed_total": { - "secs": total_duration.as_secs(), - "nanos": total_duration.subsec_nanos(), - "human": format!("{:0.6}s", fractional), - }, - } - }))?; + json::to_writer( + self.get_mut(), + &json!({ + "type": "summary", + "data": { + "stats": stats, + "elapsed_total": { + "secs": total_duration.as_secs(), + "nanos": total_duration.subsec_nanos(), + "human": format!("{:0.6}s", fractional), + }, + } + }), + )?; write!(self.get_mut(), "\n") } @@ -315,12 +320,11 @@ pub struct SearchWorker { impl SearchWorker { /// Execute a search over the given subject. pub fn search(&mut self, subject: &Subject) -> io::Result { - let bin = - if subject.is_explicit() { - self.config.binary_explicit.clone() - } else { - self.config.binary_implicit.clone() - }; + let bin = if subject.is_explicit() { + self.config.binary_explicit.clone() + } else { + self.config.binary_implicit.clone() + }; self.searcher.set_binary_detection(bin); let path = subject.path(); @@ -389,19 +393,15 @@ impl SearchWorker { let mut cmd = Command::new(bin); cmd.arg(path).stdin(Stdio::from(File::open(path)?)); - let rdr = self - .command_builder - .build(&mut cmd) - .map_err(|err| { - io::Error::new( - io::ErrorKind::Other, - format!( - "preprocessor command could not start: '{:?}': {}", - cmd, - err, - ), - ) - })?; + let rdr = self.command_builder.build(&mut cmd).map_err(|err| { + io::Error::new( + io::ErrorKind::Other, + format!( + "preprocessor command could not start: '{:?}': {}", + cmd, err, + ), + ) + })?; self.search_reader(path, rdr).map_err(|err| { io::Error::new( io::ErrorKind::Other, @@ -413,10 +413,7 @@ impl SearchWorker { /// Attempt to decompress the data at the given file path and search the /// result. If the given file path isn't recognized as a compressed file, /// then search it without doing any decompression. - fn search_decompress( - &mut self, - path: &Path, - ) -> io::Result { + fn search_decompress(&mut self, path: &Path) -> io::Result { let rdr = self.decomp_builder.build(path)?; self.search_reader(path, rdr) } diff --git a/src/subject.rs b/src/subject.rs index 38e923597..d70c1a6cd 100644 --- a/src/subject.rs +++ b/src/subject.rs @@ -11,9 +11,7 @@ struct Config { impl Default for Config { fn default() -> Config { - Config { - strip_dot_prefix: false, - } + Config { strip_dot_prefix: false } } } @@ -78,9 +76,9 @@ impl SubjectBuilder { log::debug!( "ignoring {}: failed to pass subject filter: \ file type: {:?}, metadata: {:?}", - subj.dent.path().display(), - subj.dent.file_type(), - subj.dent.metadata() + subj.dent.path().display(), + subj.dent.file_type(), + subj.dent.metadata() ); } None diff --git a/tests/binary.rs b/tests/binary.rs index d2640988d..b16dcf9d5 100644 --- a/tests/binary.rs +++ b/tests/binary.rs @@ -36,9 +36,7 @@ const HAY: &'static [u8] = include_bytes!("./data/sherlock-nul.txt"); // that matches our file. rgtest!(after_match1_implicit, |dir: Dir, mut cmd: TestCommand| { dir.create_bytes("hay", HAY); - cmd.args(&[ - "--no-mmap", "-n", "Project Gutenberg EBook", "-g", "hay", - ]); + cmd.args(&["--no-mmap", "-n", "Project Gutenberg EBook", "-g", "hay"]); let expected = "\ hay:1:The Project Gutenberg EBook of A Study In Scarlet, by Arthur Conan Doyle @@ -51,9 +49,7 @@ WARNING: stopped searching binary file hay after match (found \"\\u{0}\" byte ar // explicitly. This results in identical behavior, but a different message. rgtest!(after_match1_explicit, |dir: Dir, mut cmd: TestCommand| { dir.create_bytes("hay", HAY); - cmd.args(&[ - "--no-mmap", "-n", "Project Gutenberg EBook", "hay", - ]); + cmd.args(&["--no-mmap", "-n", "Project Gutenberg EBook", "hay"]); let expected = "\ 1:The Project Gutenberg EBook of A Study In Scarlet, by Arthur Conan Doyle @@ -64,9 +60,7 @@ Binary file matches (found \"\\u{0}\" byte around offset 9741) // Like after_match1_explicit, except we feed our content on stdin. rgtest!(after_match1_stdin, |_: Dir, mut cmd: TestCommand| { - cmd.args(&[ - "--no-mmap", "-n", "Project Gutenberg EBook", - ]); + cmd.args(&["--no-mmap", "-n", "Project Gutenberg EBook"]); let expected = "\ 1:The Project Gutenberg EBook of A Study In Scarlet, by Arthur Conan Doyle @@ -81,7 +75,12 @@ Binary file matches (found \"\\u{0}\" byte around offset 9741) rgtest!(after_match1_implicit_binary, |dir: Dir, mut cmd: TestCommand| { dir.create_bytes("hay", HAY); cmd.args(&[ - "--no-mmap", "-n", "--binary", "Project Gutenberg EBook", "-g", "hay", + "--no-mmap", + "-n", + "--binary", + "Project Gutenberg EBook", + "-g", + "hay", ]); let expected = "\ @@ -96,7 +95,12 @@ Binary file hay matches (found \"\\u{0}\" byte around offset 9741) rgtest!(after_match1_implicit_text, |dir: Dir, mut cmd: TestCommand| { dir.create_bytes("hay", HAY); cmd.args(&[ - "--no-mmap", "-n", "--text", "Project Gutenberg EBook", "-g", "hay", + "--no-mmap", + "-n", + "--text", + "Project Gutenberg EBook", + "-g", + "hay", ]); let expected = "\ @@ -109,9 +113,7 @@ hay:1:The Project Gutenberg EBook of A Study In Scarlet, by Arthur Conan Doyle // detection should be performed. rgtest!(after_match1_explicit_text, |dir: Dir, mut cmd: TestCommand| { dir.create_bytes("hay", HAY); - cmd.args(&[ - "--no-mmap", "-n", "--text", "Project Gutenberg EBook", "hay", - ]); + cmd.args(&["--no-mmap", "-n", "--text", "Project Gutenberg EBook", "hay"]); let expected = "\ 1:The Project Gutenberg EBook of A Study In Scarlet, by Arthur Conan Doyle @@ -134,9 +136,7 @@ rgtest!(after_match1_explicit_text, |dir: Dir, mut cmd: TestCommand| { // --quiet flag is set. See the next test.) rgtest!(after_match1_implicit_path, |dir: Dir, mut cmd: TestCommand| { dir.create_bytes("hay", HAY); - cmd.args(&[ - "--no-mmap", "-l", "Project Gutenberg EBook", "-g", "hay", - ]); + cmd.args(&["--no-mmap", "-l", "Project Gutenberg EBook", "-g", "hay"]); eqnice!("hay\n", cmd.stdout()); }); @@ -145,9 +145,7 @@ rgtest!(after_match1_implicit_path, |dir: Dir, mut cmd: TestCommand| { // manifest as an exit code with no output.) rgtest!(after_match1_implicit_quiet, |dir: Dir, mut cmd: TestCommand| { dir.create_bytes("hay", HAY); - cmd.args(&[ - "--no-mmap", "-q", "Project Gutenberg EBook", "-g", "hay", - ]); + cmd.args(&["--no-mmap", "-q", "Project Gutenberg EBook", "-g", "hay"]); eqnice!("", cmd.stdout()); }); @@ -157,32 +155,34 @@ rgtest!(after_match1_implicit_quiet, |dir: Dir, mut cmd: TestCommand| { // detects the binary data and suppresses output. rgtest!(after_match1_implicit_count, |dir: Dir, mut cmd: TestCommand| { dir.create_bytes("hay", HAY); - cmd.args(&[ - "--no-mmap", "-c", "Project Gutenberg EBook", "-g", "hay", - ]); + cmd.args(&["--no-mmap", "-c", "Project Gutenberg EBook", "-g", "hay"]); cmd.assert_err(); }); // Like after_match1_implicit_count, except the --binary flag is provided, // which makes ripgrep disable binary data filtering even for implicit files. -rgtest!(after_match1_implicit_count_binary, |dir: Dir, mut cmd: TestCommand| { - dir.create_bytes("hay", HAY); - cmd.args(&[ - "--no-mmap", "-c", "--binary", - "Project Gutenberg EBook", - "-g", "hay", - ]); - eqnice!("hay:1\n", cmd.stdout()); -}); +rgtest!( + after_match1_implicit_count_binary, + |dir: Dir, mut cmd: TestCommand| { + dir.create_bytes("hay", HAY); + cmd.args(&[ + "--no-mmap", + "-c", + "--binary", + "Project Gutenberg EBook", + "-g", + "hay", + ]); + eqnice!("hay:1\n", cmd.stdout()); + } +); // Like after_match1_implicit_count, except the file path is provided // explicitly, so binary filtering is disabled and a count is correctly // reported. rgtest!(after_match1_explicit_count, |dir: Dir, mut cmd: TestCommand| { dir.create_bytes("hay", HAY); - cmd.args(&[ - "--no-mmap", "-c", "Project Gutenberg EBook", "hay", - ]); + cmd.args(&["--no-mmap", "-c", "Project Gutenberg EBook", "hay"]); eqnice!("1\n", cmd.stdout()); }); @@ -191,9 +191,11 @@ rgtest!(after_match1_explicit_count, |dir: Dir, mut cmd: TestCommand| { rgtest!(after_match2_implicit, |dir: Dir, mut cmd: TestCommand| { dir.create_bytes("hay", HAY); cmd.args(&[ - "--no-mmap", "-n", + "--no-mmap", + "-n", "Project Gutenberg EBook|a medical student", - "-g", "hay", + "-g", + "hay", ]); let expected = "\ @@ -208,9 +210,12 @@ WARNING: stopped searching binary file hay after match (found \"\\u{0}\" byte ar rgtest!(after_match2_implicit_text, |dir: Dir, mut cmd: TestCommand| { dir.create_bytes("hay", HAY); cmd.args(&[ - "--no-mmap", "-n", "--text", + "--no-mmap", + "-n", + "--text", "Project Gutenberg EBook|a medical student", - "-g", "hay", + "-g", + "hay", ]); let expected = "\ @@ -224,9 +229,7 @@ hay:236:\"And yet you say he is not a medical student?\" // after a NUL byte. rgtest!(before_match1_implicit, |dir: Dir, mut cmd: TestCommand| { dir.create_bytes("hay", HAY); - cmd.args(&[ - "--no-mmap", "-n", "Heaven", "-g", "hay", - ]); + cmd.args(&["--no-mmap", "-n", "Heaven", "-g", "hay"]); cmd.assert_err(); }); @@ -234,9 +237,7 @@ rgtest!(before_match1_implicit, |dir: Dir, mut cmd: TestCommand| { // occurs after a NUL byte when a file is explicitly searched. rgtest!(before_match1_explicit, |dir: Dir, mut cmd: TestCommand| { dir.create_bytes("hay", HAY); - cmd.args(&[ - "--no-mmap", "-n", "Heaven", "hay", - ]); + cmd.args(&["--no-mmap", "-n", "Heaven", "hay"]); let expected = "\ Binary file matches (found \"\\u{0}\" byte around offset 9741) @@ -249,9 +250,7 @@ Binary file matches (found \"\\u{0}\" byte around offset 9741) // the file were given explicitly. rgtest!(before_match1_implicit_binary, |dir: Dir, mut cmd: TestCommand| { dir.create_bytes("hay", HAY); - cmd.args(&[ - "--no-mmap", "-n", "--binary", "Heaven", "-g", "hay", - ]); + cmd.args(&["--no-mmap", "-n", "--binary", "Heaven", "-g", "hay"]); let expected = "\ Binary file hay matches (found \"\\u{0}\" byte around offset 9741) @@ -263,9 +262,7 @@ Binary file hay matches (found \"\\u{0}\" byte around offset 9741) // detection should be performed. rgtest!(before_match1_implicit_text, |dir: Dir, mut cmd: TestCommand| { dir.create_bytes("hay", HAY); - cmd.args(&[ - "--no-mmap", "-n", "--text", "Heaven", "-g", "hay", - ]); + cmd.args(&["--no-mmap", "-n", "--text", "Heaven", "-g", "hay"]); let expected = "\ hay:238:\"No. Heaven knows what the objects of his studies are. But here we @@ -277,9 +274,7 @@ hay:238:\"No. Heaven knows what the objects of his studies are. But here we // before a NUL byte, but within the same buffer as the NUL byte. rgtest!(before_match2_implicit, |dir: Dir, mut cmd: TestCommand| { dir.create_bytes("hay", HAY); - cmd.args(&[ - "--no-mmap", "-n", "a medical student", "-g", "hay", - ]); + cmd.args(&["--no-mmap", "-n", "a medical student", "-g", "hay"]); cmd.assert_err(); }); @@ -290,9 +285,7 @@ rgtest!(before_match2_implicit, |dir: Dir, mut cmd: TestCommand| { // the behavior of GNU grep.) rgtest!(before_match2_explicit, |dir: Dir, mut cmd: TestCommand| { dir.create_bytes("hay", HAY); - cmd.args(&[ - "--no-mmap", "-n", "a medical student", "hay", - ]); + cmd.args(&["--no-mmap", "-n", "a medical student", "hay"]); let expected = "\ Binary file matches (found \"\\u{0}\" byte around offset 9741) @@ -304,9 +297,7 @@ Binary file matches (found \"\\u{0}\" byte around offset 9741) // detection should be performed. rgtest!(before_match2_implicit_text, |dir: Dir, mut cmd: TestCommand| { dir.create_bytes("hay", HAY); - cmd.args(&[ - "--no-mmap", "-n", "--text", "a medical student", "-g", "hay", - ]); + cmd.args(&["--no-mmap", "-n", "--text", "a medical student", "-g", "hay"]); let expected = "\ hay:236:\"And yet you say he is not a medical student?\" diff --git a/tests/feature.rs b/tests/feature.rs index f3cf8463a..0ae21c1ac 100644 --- a/tests/feature.rs +++ b/tests/feature.rs @@ -1,5 +1,5 @@ use crate::hay::{SHERLOCK, SHERLOCK_CRLF}; -use crate::util::{Dir, TestCommand, sort_lines}; +use crate::util::{sort_lines, Dir, TestCommand}; // See: https://github.com/BurntSushi/ripgrep/issues/1 rgtest!(f1_sjis, |dir: Dir, mut cmd: TestCommand| { @@ -181,8 +181,10 @@ rgtest!(f45_precedence_internal, |dir: Dir, mut cmd: TestCommand| { dir.create("wat.log", "test"); cmd.args(&[ - "--ignore-file", ".not-an-ignore1", - "--ignore-file", ".not-an-ignore2", + "--ignore-file", + ".not-an-ignore1", + "--ignore-file", + ".not-an-ignore2", "test", ]); eqnice!("imp.log:test\n", cmd.stdout()); @@ -388,28 +390,34 @@ rgtest!(f362_exceeds_regex_size_limit, |dir: Dir, mut cmd: TestCommand| { // See: https://github.com/BurntSushi/ripgrep/issues/362 #[cfg(target_pointer_width = "32")] -rgtest!(f362_u64_to_narrow_usize_overflow, |dir: Dir, mut cmd: TestCommand| { - // --dfa-size-limit doesn't apply to PCRE2. - if dir.is_pcre2() { - return; +rgtest!( + f362_u64_to_narrow_usize_overflow, + |dir: Dir, mut cmd: TestCommand| { + // --dfa-size-limit doesn't apply to PCRE2. + if dir.is_pcre2() { + return; + } + dir.create_size("foo", 1000000); + + // 2^35 * 2^20 is ok for u64, but not for usize + cmd.arg("--dfa-size-limit").arg("34359738368M").arg("--files"); + cmd.assert_err(); } - dir.create_size("foo", 1000000); - - // 2^35 * 2^20 is ok for u64, but not for usize - cmd.arg("--dfa-size-limit").arg("34359738368M").arg("--files"); - cmd.assert_err(); -}); +); // See: https://github.com/BurntSushi/ripgrep/issues/411 -rgtest!(f411_single_threaded_search_stats, |dir: Dir, mut cmd: TestCommand| { - dir.create("sherlock", SHERLOCK); - - let lines = cmd.arg("--stats").arg("Sherlock").stdout(); - assert!(lines.contains("2 matched lines")); - assert!(lines.contains("1 files contained matches")); - assert!(lines.contains("1 files searched")); - assert!(lines.contains("seconds")); -}); +rgtest!( + f411_single_threaded_search_stats, + |dir: Dir, mut cmd: TestCommand| { + dir.create("sherlock", SHERLOCK); + + let lines = cmd.arg("--stats").arg("Sherlock").stdout(); + assert!(lines.contains("2 matched lines")); + assert!(lines.contains("1 files contained matches")); + assert!(lines.contains("1 files searched")); + assert!(lines.contains("seconds")); + } +); rgtest!(f411_parallel_search_stats, |dir: Dir, mut cmd: TestCommand| { dir.create("sherlock_1", SHERLOCK); @@ -568,7 +576,7 @@ rgtest!(f948_exit_code_error, |dir: Dir, mut cmd: TestCommand| { // See: https://github.com/BurntSushi/ripgrep/issues/917 rgtest!(f917_trim, |dir: Dir, mut cmd: TestCommand| { -const SHERLOCK: &'static str = "\ + const SHERLOCK: &'static str = "\ zzz For the Doctor Watsons of this world, as opposed to the Sherlock Holmeses, success in the province of detective work must always @@ -578,9 +586,7 @@ but Doctor Watson has to have it taken out for him and dusted, and exhibited clearly, with a label attached. "; dir.create("sherlock", SHERLOCK); - cmd.args(&[ - "-n", "-B1", "-A2", "--trim", "Holmeses", "sherlock", - ]); + cmd.args(&["-n", "-B1", "-A2", "--trim", "Holmeses", "sherlock"]); let expected = "\ 2-For the Doctor Watsons of this world, as opposed to the Sherlock @@ -596,7 +602,7 @@ but Doctor Watson has to have it taken out for him and dusted, // This is like f917_trim, except this tests that trimming occurs even when the // whitespace is part of a match. rgtest!(f917_trim_match, |dir: Dir, mut cmd: TestCommand| { -const SHERLOCK: &'static str = "\ + const SHERLOCK: &'static str = "\ zzz For the Doctor Watsons of this world, as opposed to the Sherlock Holmeses, success in the province of detective work must always @@ -606,9 +612,7 @@ but Doctor Watson has to have it taken out for him and dusted, and exhibited clearly, with a label attached. "; dir.create("sherlock", SHERLOCK); - cmd.args(&[ - "-n", "-B1", "-A2", "--trim", r"\s+Holmeses", "sherlock", - ]); + cmd.args(&["-n", "-B1", "-A2", "--trim", r"\s+Holmeses", "sherlock"]); let expected = "\ 2-For the Doctor Watsons of this world, as opposed to the Sherlock @@ -636,7 +640,8 @@ rgtest!(f993_null_data, |dir: Dir, mut cmd: TestCommand| { rgtest!(f1078_max_columns_preview1, |dir: Dir, mut cmd: TestCommand| { dir.create("sherlock", SHERLOCK); cmd.args(&[ - "-M46", "--max-columns-preview", + "-M46", + "--max-columns-preview", "exhibited|dusted|has to have it", ]); @@ -650,7 +655,8 @@ sherlock:and exhibited clearly, with a label attached. rgtest!(f1078_max_columns_preview2, |dir: Dir, mut cmd: TestCommand| { dir.create("sherlock", SHERLOCK); cmd.args(&[ - "-M43", "--max-columns-preview", + "-M43", + "--max-columns-preview", // Doing a replacement forces ripgrep to show the number of remaining // matches. Normally, this happens by default when printing a tty with // colors. @@ -702,10 +708,7 @@ sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock // Tests if without encoding 'none' flag null bytes are consumed by automatic // encoding detection. rgtest!(f1207_auto_encoding, |dir: Dir, mut cmd: TestCommand| { - dir.create_bytes( - "foo", - b"\xFF\xFE\x00\x62" - ); + dir.create_bytes("foo", b"\xFF\xFE\x00\x62"); cmd.arg("-a").arg("\\x00").arg("foo"); cmd.assert_exit_code(1); }); @@ -720,10 +723,7 @@ rgtest!(f1207_ignore_encoding, |dir: Dir, mut cmd: TestCommand| { return; } - dir.create_bytes( - "foo", - b"\xFF\xFE\x00\x62" - ); + dir.create_bytes("foo", b"\xFF\xFE\x00\x62"); cmd.arg("--encoding").arg("none").arg("-a").arg("\\x00").arg("foo"); eqnice!("\u{FFFD}\u{FFFD}\x00b\n", cmd.stdout()); }); @@ -734,25 +734,22 @@ rgtest!(f1414_no_require_git, |dir: Dir, mut cmd: TestCommand| { dir.create("foo", ""); dir.create("bar", ""); - let stdout = cmd.args(&[ - "--sort", "path", - "--files", - ]).stdout(); + let stdout = cmd.args(&["--sort", "path", "--files"]).stdout(); eqnice!("bar\nfoo\n", stdout); - let stdout = cmd.args(&[ - "--sort", "path", - "--files", - "--no-require-git", - ]).stdout(); + let stdout = + cmd.args(&["--sort", "path", "--files", "--no-require-git"]).stdout(); eqnice!("bar\n", stdout); - let stdout = cmd.args(&[ - "--sort", "path", - "--files", - "--no-require-git", - "--require-git", - ]).stdout(); + let stdout = cmd + .args(&[ + "--sort", + "path", + "--files", + "--no-require-git", + "--require-git", + ]) + .stdout(); eqnice!("bar\nfoo\n", stdout); }); @@ -770,12 +767,7 @@ rgtest!(f1420_no_ignore_dot, |dir: Dir, mut cmd: TestCommand| { rgtest!(no_context_sep, |dir: Dir, mut cmd: TestCommand| { dir.create("test", "foo\nctx\nbar\nctx\nfoo\nctx"); - cmd.args(&[ - "-A1", - "--no-context-separator", - "foo", - "test", - ]); + cmd.args(&["-A1", "--no-context-separator", "foo", "test"]); eqnice!("foo\nctx\nfoo\nctx\n", cmd.stdout()); }); @@ -783,7 +775,8 @@ rgtest!(no_context_sep_overrides, |dir: Dir, mut cmd: TestCommand| { dir.create("test", "foo\nctx\nbar\nctx\nfoo\nctx"); cmd.args(&[ "-A1", - "--context-separator", "AAA", + "--context-separator", + "AAA", "--no-context-separator", "foo", "test", @@ -796,7 +789,8 @@ rgtest!(no_context_sep_overridden, |dir: Dir, mut cmd: TestCommand| { cmd.args(&[ "-A1", "--no-context-separator", - "--context-separator", "AAA", + "--context-separator", + "AAA", "foo", "test", ]); @@ -805,33 +799,19 @@ rgtest!(no_context_sep_overridden, |dir: Dir, mut cmd: TestCommand| { rgtest!(context_sep, |dir: Dir, mut cmd: TestCommand| { dir.create("test", "foo\nctx\nbar\nctx\nfoo\nctx"); - cmd.args(&[ - "-A1", - "--context-separator", "AAA", - "foo", - "test", - ]); + cmd.args(&["-A1", "--context-separator", "AAA", "foo", "test"]); eqnice!("foo\nctx\nAAA\nfoo\nctx\n", cmd.stdout()); }); rgtest!(context_sep_default, |dir: Dir, mut cmd: TestCommand| { dir.create("test", "foo\nctx\nbar\nctx\nfoo\nctx"); - cmd.args(&[ - "-A1", - "foo", - "test", - ]); + cmd.args(&["-A1", "foo", "test"]); eqnice!("foo\nctx\n--\nfoo\nctx\n", cmd.stdout()); }); rgtest!(context_sep_empty, |dir: Dir, mut cmd: TestCommand| { dir.create("test", "foo\nctx\nbar\nctx\nfoo\nctx"); - cmd.args(&[ - "-A1", - "--context-separator", "", - "foo", - "test", - ]); + cmd.args(&["-A1", "--context-separator", "", "foo", "test"]); eqnice!("foo\nctx\n\nfoo\nctx\n", cmd.stdout()); }); diff --git a/tests/json.rs b/tests/json.rs index f49c90616..477b36dfa 100644 --- a/tests/json.rs +++ b/tests/json.rs @@ -108,8 +108,12 @@ enum Data { } impl Data { - fn text(s: &str) -> Data { Data::Text { text: s.to_string() } } - fn bytes(s: &str) -> Data { Data::Bytes { bytes: s.to_string() } } + fn text(s: &str) -> Data { + Data::Text { text: s.to_string() } + } + fn bytes(s: &str) -> Data { + Data::Bytes { bytes: s.to_string() } + } } #[derive(Clone, Debug, Deserialize, PartialEq, Eq)] @@ -172,31 +176,17 @@ rgtest!(basic, |dir: Dir, mut cmd: TestCommand| { ), line_number: Some(3), absolute_offset: 129, - submatches: vec![ - SubMatch { - m: Data::text("Sherlock Holmes"), - start: 48, - end: 63, - }, - ], + submatches: vec![SubMatch { + m: Data::text("Sherlock Holmes"), + start: 48, + end: 63, + },], } ); - assert_eq!( - msgs[3].unwrap_end().path, - Some(Data::text("sherlock")) - ); - assert_eq!( - msgs[3].unwrap_end().binary_offset, - None - ); - assert_eq!( - msgs[4].unwrap_summary().stats.searches_with_match, - 1 - ); - assert_eq!( - msgs[4].unwrap_summary().stats.bytes_printed, - 494 - ); + assert_eq!(msgs[3].unwrap_end().path, Some(Data::text("sherlock"))); + assert_eq!(msgs[3].unwrap_end().binary_offset, None); + assert_eq!(msgs[4].unwrap_summary().stats.searches_with_match, 1); + assert_eq!(msgs[4].unwrap_summary().stats.bytes_printed, 494); }); #[cfg(unix)] @@ -239,13 +229,11 @@ rgtest!(notutf8, |dir: Dir, mut cmd: TestCommand| { lines: Data::bytes("cXV1eP9iYXo="), line_number: Some(1), absolute_offset: 0, - submatches: vec![ - SubMatch { - m: Data::bytes("/w=="), - start: 4, - end: 5, - }, - ], + submatches: vec![SubMatch { + m: Data::bytes("/w=="), + start: 4, + end: 5, + },], } ); }); @@ -282,13 +270,11 @@ rgtest!(notutf8_file, |dir: Dir, mut cmd: TestCommand| { lines: Data::bytes("cXV1eP9iYXo="), line_number: Some(1), absolute_offset: 0, - submatches: vec![ - SubMatch { - m: Data::bytes("/w=="), - start: 4, - end: 5, - }, - ], + submatches: vec![SubMatch { + m: Data::bytes("/w=="), + start: 4, + end: 5, + },], } ); }); @@ -306,11 +292,7 @@ rgtest!(crlf, |dir: Dir, mut cmd: TestCommand| { assert_eq!( msgs[1].unwrap_match().submatches[0].clone(), - SubMatch { - m: Data::text("Sherlock"), - start: 56, - end: 64, - }, + SubMatch { m: Data::text("Sherlock"), start: 56, end: 64 }, ); }); diff --git a/tests/macros.rs b/tests/macros.rs index 28b799d92..7e5958c37 100644 --- a/tests/macros.rs +++ b/tests/macros.rs @@ -11,7 +11,7 @@ macro_rules! rgtest { $fun(dir, cmd); } } - } + }; } #[macro_export] diff --git a/tests/misc.rs b/tests/misc.rs index b40267023..3f8c3ea95 100644 --- a/tests/misc.rs +++ b/tests/misc.rs @@ -1,5 +1,5 @@ use crate::hay::SHERLOCK; -use crate::util::{Dir, TestCommand, cmd_exists, sort_lines}; +use crate::util::{cmd_exists, sort_lines, Dir, TestCommand}; // This file contains "miscellaneous" tests that were either written before // features were tracked more explicitly, or were simply written without @@ -65,8 +65,10 @@ rgtest!(with_heading, |dir: Dir, mut cmd: TestCommand| { cmd.args(&[ // This forces the issue since --with-filename is disabled by default // when searching one file. - "--with-filename", "--heading", - "Sherlock", "sherlock", + "--with-filename", + "--heading", + "Sherlock", + "sherlock", ]); let expected = "\ @@ -184,9 +186,7 @@ be, to a very large extent, the result of luck. FooBar Holmes rgtest!(replace_groups, |dir: Dir, mut cmd: TestCommand| { dir.create("sherlock", SHERLOCK); - cmd.args(&[ - "-r", "$2, $1", "([A-Z][a-z]+) ([A-Z][a-z]+)", "sherlock", - ]); + cmd.args(&["-r", "$2, $1", "([A-Z][a-z]+) ([A-Z][a-z]+)", "sherlock"]); let expected = "\ For the Watsons, Doctor of this world, as opposed to the Sherlock @@ -199,7 +199,8 @@ but Watson, Doctor has to have it taken out for him and dusted, rgtest!(replace_named_groups, |dir: Dir, mut cmd: TestCommand| { dir.create("sherlock", SHERLOCK); cmd.args(&[ - "-r", "$last, $first", + "-r", + "$last, $first", "(?P[A-Z][a-z]+) (?P[A-Z][a-z]+)", "sherlock", ]); @@ -279,9 +280,7 @@ rgtest!(file_type_add, |dir: Dir, mut cmd: TestCommand| { dir.create("file.py", "Sherlock"); dir.create("file.rs", "Sherlock"); dir.create("file.wat", "Sherlock"); - cmd.args(&[ - "--type-add", "wat:*.wat", "-t", "wat", "Sherlock", - ]); + cmd.args(&["--type-add", "wat:*.wat", "-t", "wat", "Sherlock"]); eqnice!("file.wat:Sherlock\n", cmd.stdout()); }); @@ -292,9 +291,12 @@ rgtest!(file_type_add_compose, |dir: Dir, mut cmd: TestCommand| { dir.create("file.rs", "Sherlock"); dir.create("file.wat", "Sherlock"); cmd.args(&[ - "--type-add", "wat:*.wat", - "--type-add", "combo:include:wat,py", - "-t", "combo", + "--type-add", + "wat:*.wat", + "--type-add", + "combo:include:wat,py", + "-t", + "combo", "Sherlock", ]); @@ -394,11 +396,7 @@ rgtest!(count_matches_via_only, |dir: Dir, mut cmd: TestCommand| { rgtest!(include_zero, |dir: Dir, mut cmd: TestCommand| { dir.create("sherlock", SHERLOCK); - cmd.args(&[ - "--count", - "--include-zero", - "nada", - ]); + cmd.args(&["--count", "--include-zero", "nada"]); cmd.assert_err(); let output = cmd.cmd().output().unwrap(); @@ -410,12 +408,7 @@ rgtest!(include_zero, |dir: Dir, mut cmd: TestCommand| { rgtest!(include_zero_override, |dir: Dir, mut cmd: TestCommand| { dir.create("sherlock", SHERLOCK); - cmd.args(&[ - "--count", - "--include-zero", - "--no-include-zero", - "nada", - ]); + cmd.args(&["--count", "--include-zero", "--no-include-zero", "nada"]); cmd.assert_err(); let output = cmd.cmd().output().unwrap(); diff --git a/tests/multiline.rs b/tests/multiline.rs index 64065f721..67fa650d3 100644 --- a/tests/multiline.rs +++ b/tests/multiline.rs @@ -20,9 +20,7 @@ rgtest!(overlap2, |dir: Dir, mut cmd: TestCommand| { // Tests that even in a multiline search, a '.' does not match a newline. rgtest!(dot_no_newline, |dir: Dir, mut cmd: TestCommand| { dir.create("sherlock", SHERLOCK); - cmd.args(&[ - "-n", "-U", "of this world.+detective work", "sherlock", - ]); + cmd.args(&["-n", "-U", "of this world.+detective work", "sherlock"]); cmd.assert_err(); }); @@ -30,8 +28,11 @@ rgtest!(dot_no_newline, |dir: Dir, mut cmd: TestCommand| { rgtest!(dot_all, |dir: Dir, mut cmd: TestCommand| { dir.create("sherlock", SHERLOCK); cmd.args(&[ - "-n", "-U", "--multiline-dotall", - "of this world.+detective work", "sherlock", + "-n", + "-U", + "--multiline-dotall", + "of this world.+detective work", + "sherlock", ]); let expected = "\ @@ -45,8 +46,11 @@ rgtest!(dot_all, |dir: Dir, mut cmd: TestCommand| { rgtest!(only_matching, |dir: Dir, mut cmd: TestCommand| { dir.create("sherlock", SHERLOCK); cmd.args(&[ - "-n", "-U", "--only-matching", - r"Watson|Sherlock\p{Any}+?Holmes", "sherlock", + "-n", + "-U", + "--only-matching", + r"Watson|Sherlock\p{Any}+?Holmes", + "sherlock", ]); let expected = "\ @@ -63,8 +67,11 @@ rgtest!(only_matching, |dir: Dir, mut cmd: TestCommand| { rgtest!(vimgrep, |dir: Dir, mut cmd: TestCommand| { dir.create("sherlock", SHERLOCK); cmd.args(&[ - "-n", "-U", "--vimgrep", - r"Watson|Sherlock\p{Any}+?Holmes", "sherlock", + "-n", + "-U", + "--vimgrep", + r"Watson|Sherlock\p{Any}+?Holmes", + "sherlock", ]); let expected = "\ @@ -81,9 +88,7 @@ sherlock:5:12:but Doctor Watson has to have it taken out for him and dusted, // important test because multiline search must read the entire contents of // what it is searching into memory before executing the search. rgtest!(stdin, |_: Dir, mut cmd: TestCommand| { - cmd.args(&[ - "-n", "-U", r"of this world\p{Any}+?detective work", - ]); + cmd.args(&["-n", "-U", r"of this world\p{Any}+?detective work"]); let expected = "\ 1:For the Doctor Watsons of this world, as opposed to the Sherlock 2:Holmeses, success in the province of detective work must always @@ -95,8 +100,11 @@ rgtest!(stdin, |_: Dir, mut cmd: TestCommand| { rgtest!(context, |dir: Dir, mut cmd: TestCommand| { dir.create("sherlock", SHERLOCK); cmd.args(&[ - "-n", "-U", "-C1", - r"detective work\p{Any}+?result of luck", "sherlock", + "-n", + "-U", + "-C1", + r"detective work\p{Any}+?result of luck", + "sherlock", ]); let expected = "\ diff --git a/tests/regression.rs b/tests/regression.rs index 29f15a275..89297b9c4 100644 --- a/tests/regression.rs +++ b/tests/regression.rs @@ -1,5 +1,5 @@ use crate::hay::SHERLOCK; -use crate::util::{Dir, TestCommand, sort_lines}; +use crate::util::{sort_lines, Dir, TestCommand}; // See: https://github.com/BurntSushi/ripgrep/issues/16 rgtest!(r16, |dir: Dir, mut cmd: TestCommand| { @@ -346,7 +346,10 @@ rgtest!(r391, |dir: Dir, mut cmd: TestCommand| { dir.create(".git/description", ""); cmd.args(&[ - "--no-ignore", "--hidden", "--follow", "--files", + "--no-ignore", + "--hidden", + "--follow", + "--files", "--glob", "!{.git,node_modules,plugged}/**", "--glob", @@ -371,14 +374,18 @@ rgtest!(r405, |dir: Dir, mut cmd: TestCommand| { rgtest!(r428_color_context_path, |dir: Dir, mut cmd: TestCommand| { dir.create("sherlock", "foo\nbar"); cmd.args(&[ - "-A1", "-H", "--no-heading", "-N", - "--colors=match:none", "--color=always", + "-A1", + "-H", + "--no-heading", + "-N", + "--colors=match:none", + "--color=always", "foo", ]); let expected = format!( "{colored_path}:foo\n{colored_path}-bar\n", - colored_path= + colored_path = "\x1b\x5b\x30\x6d\x1b\x5b\x33\x35\x6dsherlock\x1b\x5b\x30\x6d" ); eqnice!(expected, cmd.stdout()); @@ -414,9 +421,7 @@ rgtest!(r451_only_matching_as_in_issue, |dir: Dir, mut cmd: TestCommand| { // See: https://github.com/BurntSushi/ripgrep/issues/451 rgtest!(r451_only_matching, |dir: Dir, mut cmd: TestCommand| { dir.create("digits.txt", "1 2 3\n123\n"); - cmd.args(&[ - "--only-matching", "--column", r"[0-9]", "digits.txt", - ]); + cmd.args(&["--only-matching", "--column", r"[0-9]", "digits.txt"]); let expected = "\ 1:1:1 @@ -517,11 +522,16 @@ rgtest!(r568_leading_hyphen_option_args, |dir: Dir, mut cmd: TestCommand| { rgtest!(r599, |dir: Dir, mut cmd: TestCommand| { dir.create("input.txt", "\n\ntest\n"); cmd.args(&[ - "--color", "ansi", - "--colors", "path:none", - "--colors", "line:none", - "--colors", "match:fg:red", - "--colors", "match:style:nobold", + "--color", + "ansi", + "--colors", + "path:none", + "--colors", + "line:none", + "--colors", + "match:fg:red", + "--colors", + "match:style:nobold", "--line-number", r"^$", "input.txt", @@ -707,16 +717,19 @@ rgtest!(r1203_reverse_suffix_literal, |dir: Dir, _: TestCommand| { }); // See: https://github.com/BurntSushi/ripgrep/issues/1223 -rgtest!(r1223_no_dir_check_for_default_path, |dir: Dir, mut cmd: TestCommand| { - dir.create_dir("-"); - dir.create("a.json", "{}"); - dir.create("a.txt", "some text"); - - eqnice!( - "a.json\na.txt\n", - sort_lines(&cmd.arg("a").pipe(b"a.json\na.txt")) - ); -}); +rgtest!( + r1223_no_dir_check_for_default_path, + |dir: Dir, mut cmd: TestCommand| { + dir.create_dir("-"); + dir.create("a.json", "{}"); + dir.create("a.txt", "some text"); + + eqnice!( + "a.json\na.txt\n", + sort_lines(&cmd.arg("a").pipe(b"a.json\na.txt")) + ); + } +); // See: https://github.com/BurntSushi/ripgrep/issues/1259 rgtest!(r1259_drop_last_byte_nonl, |dir: Dir, mut cmd: TestCommand| { @@ -734,7 +747,8 @@ rgtest!(r1319, |dir: Dir, mut cmd: TestCommand| { dir.create("input", "CCAGCTACTCGGGAGGCTGAGGCTGGAGGATCGCTTGAGTCCAGGAGTTC"); eqnice!( "input:CCAGCTACTCGGGAGGCTGAGGCTGGAGGATCGCTTGAGTCCAGGAGTTC\n", - cmd.arg("TTGAGTCCAGGAG[ATCG]{2}C").stdout()); + cmd.arg("TTGAGTCCAGGAG[ATCG]{2}C").stdout() + ); }); // See: https://github.com/BurntSushi/ripgrep/issues/1334 @@ -753,27 +767,27 @@ rgtest!(r1389_bad_symlinks_no_biscuit, |dir: Dir, mut cmd: TestCommand| { dir.create("mydir/file.txt", "test"); dir.link_dir("mydir", "mylink"); - let stdout = cmd.args(&[ - "test", - "--no-ignore", - "--sort", "path", - "mylink", - ]).stdout(); + let stdout = cmd + .args(&["test", "--no-ignore", "--sort", "path", "mylink"]) + .stdout(); eqnice!("mylink/file.txt:test\n", stdout); }); // See: https://github.com/BurntSushi/ripgrep/pull/1446 -rgtest!(r1446_respect_excludes_in_worktree, |dir: Dir, mut cmd: TestCommand| { - dir.create_dir("repo/.git/info"); - dir.create("repo/.git/info/exclude", "ignored"); - dir.create_dir("repo/.git/worktrees/repotree"); - dir.create("repo/.git/worktrees/repotree/commondir", "../.."); - - dir.create_dir("repotree"); - dir.create("repotree/.git", "gitdir: repo/.git/worktrees/repotree"); - dir.create("repotree/ignored", ""); - dir.create("repotree/not-ignored", ""); - - cmd.arg("--sort").arg("path").arg("--files").arg("repotree"); - eqnice!("repotree/not-ignored\n", cmd.stdout()); -}); +rgtest!( + r1446_respect_excludes_in_worktree, + |dir: Dir, mut cmd: TestCommand| { + dir.create_dir("repo/.git/info"); + dir.create("repo/.git/info/exclude", "ignored"); + dir.create_dir("repo/.git/worktrees/repotree"); + dir.create("repo/.git/worktrees/repotree/commondir", "../.."); + + dir.create_dir("repotree"); + dir.create("repotree/.git", "gitdir: repo/.git/worktrees/repotree"); + dir.create("repotree/ignored", ""); + dir.create("repotree/not-ignored", ""); + + cmd.arg("--sort").arg("path").arg("--files").arg("repotree"); + eqnice!("repotree/not-ignored\n", cmd.stdout()); + } +); diff --git a/tests/util.rs b/tests/util.rs index b529ca55c..b6f6d3c83 100644 --- a/tests/util.rs +++ b/tests/util.rs @@ -72,19 +72,13 @@ impl Dir { .parent() .expect("executable's directory") .to_path_buf(); - let dir = env::temp_dir() - .join(TEST_DIR) - .join(name) - .join(&format!("{}", id)); + let dir = + env::temp_dir().join(TEST_DIR).join(name).join(&format!("{}", id)); if dir.exists() { nice_err(&dir, fs::remove_dir_all(&dir)); } nice_err(&dir, repeat(|| fs::create_dir_all(&dir))); - Dir { - root: root, - dir: dir, - pcre2: false, - } + Dir { root: root, dir: dir, pcre2: false } } /// Use PCRE2 for this test. @@ -262,12 +256,10 @@ impl TestCommand { } /// Add any number of arguments to the command. - pub fn args( - &mut self, - args: I, - ) -> &mut TestCommand - where I: IntoIterator, - A: AsRef + pub fn args(&mut self, args: I) -> &mut TestCommand + where + I: IntoIterator, + A: AsRef, { self.cmd.args(args); self @@ -292,8 +284,7 @@ impl TestCommand { Err(err) => { panic!( "could not convert from string: {:?}\n\n{}", - err, - stdout + err, stdout ); } } @@ -311,9 +302,7 @@ impl TestCommand { // risk of deadlock between parent and child process. let mut stdin = child.stdin.take().expect("expected standard input"); let input = input.to_owned(); - let worker = thread::spawn(move || { - stdin.write_all(&input) - }); + let worker = thread::spawn(move || stdin.write_all(&input)); let output = self.expect_success(child.wait_with_output().unwrap()); worker.join().unwrap().unwrap(); @@ -324,8 +313,7 @@ impl TestCommand { Err(err) => { panic!( "could not convert from string: {:?}\n\n{}", - err, - stdout + err, stdout ); } } @@ -368,9 +356,7 @@ impl TestCommand { \n\nexpected: {}\ \n\nfound: {}\ \n\n=====\n", - self.cmd, - expected_code, - code + self.cmd, expected_code, code ); } @@ -396,14 +382,14 @@ impl TestCommand { fn expect_success(&self, o: process::Output) -> process::Output { if !o.status.success() { - let suggest = - if o.stderr.is_empty() { - "\n\nDid your search end up with no results?".to_string() - } else { - "".to_string() - }; - - panic!("\n\n==========\n\ + let suggest = if o.stderr.is_empty() { + "\n\nDid your search end up with no results?".to_string() + } else { + "".to_string() + }; + + panic!( + "\n\n==========\n\ command failed but expected success!\ {}\ \n\ncommand: {:?}\ @@ -412,18 +398,19 @@ impl TestCommand { \n\nstdout: {}\ \n\nstderr: {}\ \n\n==========\n", - suggest, self.cmd, self.dir.dir.display(), o.status, - String::from_utf8_lossy(&o.stdout), - String::from_utf8_lossy(&o.stderr)); + suggest, + self.cmd, + self.dir.dir.display(), + o.status, + String::from_utf8_lossy(&o.stdout), + String::from_utf8_lossy(&o.stderr) + ); } o } } -fn nice_err( - path: &Path, - res: Result, -) -> T { +fn nice_err(path: &Path, res: Result) -> T { match res { Ok(t) => t, Err(err) => panic!("{}: {:?}", path.display(), err),