-
Notifications
You must be signed in to change notification settings - Fork 415
/
regex.rs
3649 lines (3520 loc) · 138 KB
/
regex.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
use core::{
borrow::Borrow,
panic::{RefUnwindSafe, UnwindSafe},
};
use alloc::{boxed::Box, sync::Arc, vec, vec::Vec};
use regex_syntax::{
ast,
hir::{self, Hir},
};
use crate::{
meta::{
error::BuildError,
strategy::{self, Strategy},
wrappers,
},
nfa::thompson::WhichCaptures,
util::{
captures::{Captures, GroupInfo},
iter,
pool::{Pool, PoolGuard},
prefilter::Prefilter,
primitives::{NonMaxUsize, PatternID},
search::{HalfMatch, Input, Match, MatchKind, PatternSet, Span},
},
};
/// A type alias for our pool of meta::Cache that fixes the type parameters to
/// what we use for the meta regex below.
type CachePool = Pool<Cache, CachePoolFn>;
/// Same as above, but for the guard returned by a pool.
type CachePoolGuard<'a> = PoolGuard<'a, Cache, CachePoolFn>;
/// The type of the closure we use to create new caches. We need to spell out
/// all of the marker traits or else we risk leaking !MARKER impls.
type CachePoolFn =
Box<dyn Fn() -> Cache + Send + Sync + UnwindSafe + RefUnwindSafe>;
/// A regex matcher that works by composing several other regex matchers
/// automatically.
///
/// In effect, a meta regex papers over a lot of the quirks or performance
/// problems in each of the regex engines in this crate. Its goal is to provide
/// an infallible and simple API that "just does the right thing" in the common
/// case.
///
/// A meta regex is the implementation of a `Regex` in the `regex` crate.
/// Indeed, the `regex` crate API is essentially just a light wrapper over
/// this type. This includes the `regex` crate's `RegexSet` API!
///
/// # Composition
///
/// This is called a "meta" matcher precisely because it uses other regex
/// matchers to provide a convenient high level regex API. Here are some
/// examples of how other regex matchers are composed:
///
/// * When calling [`Regex::captures`], instead of immediately
/// running a slower but more capable regex engine like the
/// [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM), the meta regex engine
/// will usually first look for the bounds of a match with a higher throughput
/// regex engine like a [lazy DFA](crate::hybrid). Only when a match is found
/// is a slower engine like `PikeVM` used to find the matching span for each
/// capture group.
/// * While higher throughout engines like the lazy DFA cannot handle
/// Unicode word boundaries in general, they can still be used on pure ASCII
/// haystacks by pretending that Unicode word boundaries are just plain ASCII
/// word boundaries. However, if a haystack is not ASCII, the meta regex engine
/// will automatically switch to a (possibly slower) regex engine that supports
/// Unicode word boundaries in general.
/// * In some cases where a regex pattern is just a simple literal or a small
/// set of literals, an actual regex engine won't be used at all. Instead,
/// substring or multi-substring search algorithms will be employed.
///
/// There are many other forms of composition happening too, but the above
/// should give a general idea. In particular, it may perhaps be surprising
/// that *multiple* regex engines might get executed for a single search. That
/// is, the decision of what regex engine to use is not _just_ based on the
/// pattern, but also based on the dynamic execution of the search itself.
///
/// The primary reason for this composition is performance. The fundamental
/// tension is that the faster engines tend to be less capable, and the more
/// capable engines tend to be slower.
///
/// Note that the forms of composition that are allowed are determined by
/// compile time crate features and configuration. For example, if the `hybrid`
/// feature isn't enabled, or if [`Config::hybrid`] has been disabled, then the
/// meta regex engine will never use a lazy DFA.
///
/// # Synchronization and cloning
///
/// Most of the regex engines in this crate require some kind of mutable
/// "scratch" space to read and write from while performing a search. Since
/// a meta regex composes these regex engines, a meta regex also requires
/// mutable scratch space. This scratch space is called a [`Cache`].
///
/// Most regex engines _also_ usually have a read-only component, typically
/// a [Thompson `NFA`](crate::nfa::thompson::NFA).
///
/// In order to make the `Regex` API convenient, most of the routines hide
/// the fact that a `Cache` is needed at all. To achieve this, a [memory
/// pool](crate::util::pool::Pool) is used internally to retrieve `Cache`
/// values in a thread safe way that also permits reuse. This in turn implies
/// that every such search call requires some form of synchronization. Usually
/// this synchronization is fast enough to not notice, but in some cases, it
/// can be a bottleneck. This typically occurs when all of the following are
/// true:
///
/// * The same `Regex` is shared across multiple threads simultaneously,
/// usually via a [`util::lazy::Lazy`](crate::util::lazy::Lazy) or something
/// similar from the `once_cell` or `lazy_static` crates.
/// * The primary unit of work in each thread is a regex search.
/// * Searches are run on very short haystacks.
///
/// This particular case can lead to high contention on the pool used by a
/// `Regex` internally, which can in turn increase latency to a noticeable
/// effect. This cost can be mitigated in one of the following ways:
///
/// * Use a distinct copy of a `Regex` in each thread, usually by cloning it.
/// Cloning a `Regex` _does not_ do a deep copy of its read-only component.
/// But it does lead to each `Regex` having its own memory pool, which in
/// turn eliminates the problem of contention. In general, this technique should
/// not result in any additional memory usage when compared to sharing the same
/// `Regex` across multiple threads simultaneously.
/// * Use lower level APIs, like [`Regex::search_with`], which permit passing
/// a `Cache` explicitly. In this case, it is up to you to determine how best
/// to provide a `Cache`. For example, you might put a `Cache` in thread-local
/// storage if your use case allows for it.
///
/// Overall, this is an issue that happens rarely in practice, but it can
/// happen.
///
/// # Warning: spin-locks may be used in alloc-only mode
///
/// When this crate is built without the `std` feature and the high level APIs
/// on a `Regex` are used, then a spin-lock will be used to synchronize access
/// to an internal pool of `Cache` values. This may be undesirable because
/// a spin-lock is [effectively impossible to implement correctly in user
/// space][spinlocks-are-bad]. That is, more concretely, the spin-lock could
/// result in a deadlock.
///
/// [spinlocks-are-bad]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html
///
/// If one wants to avoid the use of spin-locks when the `std` feature is
/// disabled, then you must use APIs that accept a `Cache` value explicitly.
/// For example, [`Regex::search_with`].
///
/// # Example
///
/// ```
/// use regex_automata::meta::Regex;
///
/// let re = Regex::new(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}$")?;
/// assert!(re.is_match("2010-03-14"));
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
///
/// # Example: anchored search
///
/// This example shows how to use [`Input::anchored`] to run an anchored
/// search, even when the regex pattern itself isn't anchored. An anchored
/// search guarantees that if a match is found, then the start offset of the
/// match corresponds to the offset at which the search was started.
///
/// ```
/// use regex_automata::{meta::Regex, Anchored, Input, Match};
///
/// let re = Regex::new(r"\bfoo\b")?;
/// let input = Input::new("xx foo xx").range(3..).anchored(Anchored::Yes);
/// // The offsets are in terms of the original haystack.
/// assert_eq!(Some(Match::must(0, 3..6)), re.find(input));
///
/// // Notice that no match occurs here, because \b still takes the
/// // surrounding context into account, even if it means looking back
/// // before the start of your search.
/// let hay = "xxfoo xx";
/// let input = Input::new(hay).range(2..).anchored(Anchored::Yes);
/// assert_eq!(None, re.find(input));
/// // Indeed, you cannot achieve the above by simply slicing the
/// // haystack itself, since the regex engine can't see the
/// // surrounding context. This is why 'Input' permits setting
/// // the bounds of a search!
/// let input = Input::new(&hay[2..]).anchored(Anchored::Yes);
/// // WRONG!
/// assert_eq!(Some(Match::must(0, 0..3)), re.find(input));
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
///
/// # Example: earliest search
///
/// This example shows how to use [`Input::earliest`] to run a search that
/// might stop before finding the typical leftmost match.
///
/// ```
/// use regex_automata::{meta::Regex, Anchored, Input, Match};
///
/// let re = Regex::new(r"[a-z]{3}|b")?;
/// let input = Input::new("abc").earliest(true);
/// assert_eq!(Some(Match::must(0, 1..2)), re.find(input));
///
/// // Note that "earliest" isn't really a match semantic unto itself.
/// // Instead, it is merely an instruction to whatever regex engine
/// // gets used internally to quit as soon as it can. For example,
/// // this regex uses a different search technique, and winds up
/// // producing a different (but valid) match!
/// let re = Regex::new(r"abc|b")?;
/// let input = Input::new("abc").earliest(true);
/// assert_eq!(Some(Match::must(0, 0..3)), re.find(input));
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
///
/// # Example: change the line terminator
///
/// This example shows how to enable multi-line mode by default and change
/// the line terminator to the NUL byte:
///
/// ```
/// use regex_automata::{meta::Regex, util::syntax, Match};
///
/// let re = Regex::builder()
/// .syntax(syntax::Config::new().multi_line(true))
/// .configure(Regex::config().line_terminator(b'\x00'))
/// .build(r"^foo$")?;
/// let hay = "\x00foo\x00";
/// assert_eq!(Some(Match::must(0, 1..4)), re.find(hay));
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
#[derive(Debug)]
pub struct Regex {
/// The actual regex implementation.
imp: Arc<RegexI>,
/// A thread safe pool of caches.
///
/// For the higher level search APIs, a `Cache` is automatically plucked
/// from this pool before running a search. The lower level `with` methods
/// permit the caller to provide their own cache, thereby bypassing
/// accesses to this pool.
///
/// Note that we put this outside the `Arc` so that cloning a `Regex`
/// results in creating a fresh `CachePool`. This in turn permits callers
/// to clone regexes into separate threads where each such regex gets
/// the pool's "thread owner" optimization. Otherwise, if one shares the
/// `Regex` directly, then the pool will go through a slower mutex path for
/// all threads except for the "owner."
pool: CachePool,
}
/// The internal implementation of `Regex`, split out so that it can be wrapped
/// in an `Arc`.
#[derive(Debug)]
struct RegexI {
/// The core matching engine.
///
/// Why is this reference counted when RegexI is already wrapped in an Arc?
/// Well, we need to capture this in a closure to our `Pool` below in order
/// to create new `Cache` values when needed. So since it needs to be in
/// two places, we make it reference counted.
///
/// We make `RegexI` itself reference counted too so that `Regex` itself
/// stays extremely small and very cheap to clone.
strat: Arc<dyn Strategy>,
/// Metadata about the regexes driving the strategy. The metadata is also
/// usually stored inside the strategy too, but we put it here as well
/// so that we can get quick access to it (without virtual calls) before
/// executing the regex engine. For example, we use this metadata to
/// detect a subset of cases where we know a match is impossible, and can
/// thus avoid calling into the strategy at all.
///
/// Since `RegexInfo` is stored in multiple places, it is also reference
/// counted.
info: RegexInfo,
}
/// Convenience constructors for a `Regex` using the default configuration.
impl Regex {
/// Builds a `Regex` from a single pattern string using the default
/// configuration.
///
/// If there was a problem parsing the pattern or a problem turning it into
/// a regex matcher, then an error is returned.
///
/// If you want to change the configuration of a `Regex`, use a [`Builder`]
/// with a [`Config`].
///
/// # Example
///
/// ```
/// use regex_automata::{meta::Regex, Match};
///
/// let re = Regex::new(r"(?Rm)^foo$")?;
/// let hay = "\r\nfoo\r\n";
/// assert_eq!(Some(Match::must(0, 2..5)), re.find(hay));
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
pub fn new(pattern: &str) -> Result<Regex, BuildError> {
Regex::builder().build(pattern)
}
/// Builds a `Regex` from many pattern strings using the default
/// configuration.
///
/// If there was a problem parsing any of the patterns or a problem turning
/// them into a regex matcher, then an error is returned.
///
/// If you want to change the configuration of a `Regex`, use a [`Builder`]
/// with a [`Config`].
///
/// # Example: simple lexer
///
/// This simplistic example leverages the multi-pattern support to build a
/// simple little lexer. The pattern ID in the match tells you which regex
/// matched, which in turn might be used to map back to the "type" of the
/// token returned by the lexer.
///
/// ```
/// use regex_automata::{meta::Regex, Match};
///
/// let re = Regex::new_many(&[
/// r"[[:space:]]",
/// r"[A-Za-z0-9][A-Za-z0-9_]+",
/// r"->",
/// r".",
/// ])?;
/// let haystack = "fn is_boss(bruce: i32, springsteen: String) -> bool;";
/// let matches: Vec<Match> = re.find_iter(haystack).collect();
/// assert_eq!(matches, vec![
/// Match::must(1, 0..2), // 'fn'
/// Match::must(0, 2..3), // ' '
/// Match::must(1, 3..10), // 'is_boss'
/// Match::must(3, 10..11), // '('
/// Match::must(1, 11..16), // 'bruce'
/// Match::must(3, 16..17), // ':'
/// Match::must(0, 17..18), // ' '
/// Match::must(1, 18..21), // 'i32'
/// Match::must(3, 21..22), // ','
/// Match::must(0, 22..23), // ' '
/// Match::must(1, 23..34), // 'springsteen'
/// Match::must(3, 34..35), // ':'
/// Match::must(0, 35..36), // ' '
/// Match::must(1, 36..42), // 'String'
/// Match::must(3, 42..43), // ')'
/// Match::must(0, 43..44), // ' '
/// Match::must(2, 44..46), // '->'
/// Match::must(0, 46..47), // ' '
/// Match::must(1, 47..51), // 'bool'
/// Match::must(3, 51..52), // ';'
/// ]);
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
///
/// One can write a lexer like the above using a regex like
/// `(?P<space>[[:space:]])|(?P<ident>[A-Za-z0-9][A-Za-z0-9_]+)|...`,
/// but then you need to ask whether capture group matched to determine
/// which branch in the regex matched, and thus, which token the match
/// corresponds to. In contrast, the above example includes the pattern ID
/// in the match. There's no need to use capture groups at all.
///
/// # Example: finding the pattern that caused an error
///
/// When a syntax error occurs, it is possible to ask which pattern
/// caused the syntax error.
///
/// ```
/// use regex_automata::{meta::Regex, PatternID};
///
/// let err = Regex::new_many(&["a", "b", r"\p{Foo}", "c"]).unwrap_err();
/// assert_eq!(Some(PatternID::must(2)), err.pattern());
/// ```
///
/// # Example: zero patterns is valid
///
/// Building a regex with zero patterns results in a regex that never
/// matches anything. Because this routine is generic, passing an empty
/// slice usually requires a turbo-fish (or something else to help type
/// inference).
///
/// ```
/// use regex_automata::{meta::Regex, util::syntax, Match};
///
/// let re = Regex::new_many::<&str>(&[])?;
/// assert_eq!(None, re.find(""));
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
pub fn new_many<P: AsRef<str>>(
patterns: &[P],
) -> Result<Regex, BuildError> {
Regex::builder().build_many(patterns)
}
/// Return a default configuration for a `Regex`.
///
/// This is a convenience routine to avoid needing to import the [`Config`]
/// type when customizing the construction of a `Regex`.
///
/// # Example: lower the NFA size limit
///
/// In some cases, the default size limit might be too big. The size limit
/// can be lowered, which will prevent large regex patterns from compiling.
///
/// ```
/// # if cfg!(miri) { return Ok(()); } // miri takes too long
/// use regex_automata::meta::Regex;
///
/// let result = Regex::builder()
/// .configure(Regex::config().nfa_size_limit(Some(20 * (1<<10))))
/// // Not even 20KB is enough to build a single large Unicode class!
/// .build(r"\pL");
/// assert!(result.is_err());
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
pub fn config() -> Config {
Config::new()
}
/// Return a builder for configuring the construction of a `Regex`.
///
/// This is a convenience routine to avoid needing to import the
/// [`Builder`] type in common cases.
///
/// # Example: change the line terminator
///
/// This example shows how to enable multi-line mode by default and change
/// the line terminator to the NUL byte:
///
/// ```
/// use regex_automata::{meta::Regex, util::syntax, Match};
///
/// let re = Regex::builder()
/// .syntax(syntax::Config::new().multi_line(true))
/// .configure(Regex::config().line_terminator(b'\x00'))
/// .build(r"^foo$")?;
/// let hay = "\x00foo\x00";
/// assert_eq!(Some(Match::must(0, 1..4)), re.find(hay));
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
pub fn builder() -> Builder {
Builder::new()
}
}
/// High level convenience routines for using a regex to search a haystack.
impl Regex {
/// Returns true if and only if this regex matches the given haystack.
///
/// This routine may short circuit if it knows that scanning future input
/// will never lead to a different result. (Consider how this might make
/// a difference given the regex `a+` on the haystack `aaaaaaaaaaaaaaa`.
/// This routine _may_ stop after it sees the first `a`, but routines like
/// `find` need to continue searching because `+` is greedy by default.)
///
/// # Example
///
/// ```
/// use regex_automata::meta::Regex;
///
/// let re = Regex::new("foo[0-9]+bar")?;
///
/// assert!(re.is_match("foo12345bar"));
/// assert!(!re.is_match("foobar"));
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
///
/// # Example: consistency with search APIs
///
/// `is_match` is guaranteed to return `true` whenever `find` returns a
/// match. This includes searches that are executed entirely within a
/// codepoint:
///
/// ```
/// use regex_automata::{meta::Regex, Input};
///
/// let re = Regex::new("a*")?;
///
/// // This doesn't match because the default configuration bans empty
/// // matches from splitting a codepoint.
/// assert!(!re.is_match(Input::new("☃").span(1..2)));
/// assert_eq!(None, re.find(Input::new("☃").span(1..2)));
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
///
/// Notice that when UTF-8 mode is disabled, then the above reports a
/// match because the restriction against zero-width matches that split a
/// codepoint has been lifted:
///
/// ```
/// use regex_automata::{meta::Regex, Input, Match};
///
/// let re = Regex::builder()
/// .configure(Regex::config().utf8_empty(false))
/// .build("a*")?;
///
/// assert!(re.is_match(Input::new("☃").span(1..2)));
/// assert_eq!(
/// Some(Match::must(0, 1..1)),
/// re.find(Input::new("☃").span(1..2)),
/// );
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
///
/// A similar idea applies when using line anchors with CRLF mode enabled,
/// which prevents them from matching between a `\r` and a `\n`.
///
/// ```
/// use regex_automata::{meta::Regex, Input, Match};
///
/// let re = Regex::new(r"(?Rm:$)")?;
/// assert!(!re.is_match(Input::new("\r\n").span(1..1)));
/// // A regular line anchor, which only considers \n as a
/// // line terminator, will match.
/// let re = Regex::new(r"(?m:$)")?;
/// assert!(re.is_match(Input::new("\r\n").span(1..1)));
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
#[inline]
pub fn is_match<'h, I: Into<Input<'h>>>(&self, input: I) -> bool {
let input = input.into().earliest(true);
if self.imp.info.is_impossible(&input) {
return false;
}
let mut guard = self.pool.get();
let result = self.imp.strat.is_match(&mut guard, &input);
// See 'Regex::search' for why we put the guard back explicitly.
PoolGuard::put(guard);
result
}
/// Executes a leftmost search and returns the first match that is found,
/// if one exists.
///
/// # Example
///
/// ```
/// use regex_automata::{meta::Regex, Match};
///
/// let re = Regex::new("foo[0-9]+")?;
/// assert_eq!(Some(Match::must(0, 0..8)), re.find("foo12345"));
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
#[inline]
pub fn find<'h, I: Into<Input<'h>>>(&self, input: I) -> Option<Match> {
self.search(&input.into())
}
/// Executes a leftmost forward search and writes the spans of capturing
/// groups that participated in a match into the provided [`Captures`]
/// value. If no match was found, then [`Captures::is_match`] is guaranteed
/// to return `false`.
///
/// # Example
///
/// ```
/// use regex_automata::{meta::Regex, Span};
///
/// let re = Regex::new(r"^([0-9]{4})-([0-9]{2})-([0-9]{2})$")?;
/// let mut caps = re.create_captures();
///
/// re.captures("2010-03-14", &mut caps);
/// assert!(caps.is_match());
/// assert_eq!(Some(Span::from(0..4)), caps.get_group(1));
/// assert_eq!(Some(Span::from(5..7)), caps.get_group(2));
/// assert_eq!(Some(Span::from(8..10)), caps.get_group(3));
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
#[inline]
pub fn captures<'h, I: Into<Input<'h>>>(
&self,
input: I,
caps: &mut Captures,
) {
self.search_captures(&input.into(), caps)
}
/// Returns an iterator over all non-overlapping leftmost matches in
/// the given haystack. If no match exists, then the iterator yields no
/// elements.
///
/// # Example
///
/// ```
/// use regex_automata::{meta::Regex, Match};
///
/// let re = Regex::new("foo[0-9]+")?;
/// let haystack = "foo1 foo12 foo123";
/// let matches: Vec<Match> = re.find_iter(haystack).collect();
/// assert_eq!(matches, vec![
/// Match::must(0, 0..4),
/// Match::must(0, 5..10),
/// Match::must(0, 11..17),
/// ]);
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
#[inline]
pub fn find_iter<'r, 'h, I: Into<Input<'h>>>(
&'r self,
input: I,
) -> FindMatches<'r, 'h> {
let cache = self.pool.get();
let it = iter::Searcher::new(input.into());
FindMatches { re: self, cache, it }
}
/// Returns an iterator over all non-overlapping `Captures` values. If no
/// match exists, then the iterator yields no elements.
///
/// This yields the same matches as [`Regex::find_iter`], but it includes
/// the spans of all capturing groups that participate in each match.
///
/// **Tip:** See [`util::iter::Searcher`](crate::util::iter::Searcher) for
/// how to correctly iterate over all matches in a haystack while avoiding
/// the creation of a new `Captures` value for every match. (Which you are
/// forced to do with an `Iterator`.)
///
/// # Example
///
/// ```
/// use regex_automata::{meta::Regex, Span};
///
/// let re = Regex::new("foo(?P<numbers>[0-9]+)")?;
///
/// let haystack = "foo1 foo12 foo123";
/// let matches: Vec<Span> = re
/// .captures_iter(haystack)
/// // The unwrap is OK since 'numbers' matches if the pattern matches.
/// .map(|caps| caps.get_group_by_name("numbers").unwrap())
/// .collect();
/// assert_eq!(matches, vec![
/// Span::from(3..4),
/// Span::from(8..10),
/// Span::from(14..17),
/// ]);
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
#[inline]
pub fn captures_iter<'r, 'h, I: Into<Input<'h>>>(
&'r self,
input: I,
) -> CapturesMatches<'r, 'h> {
let cache = self.pool.get();
let caps = self.create_captures();
let it = iter::Searcher::new(input.into());
CapturesMatches { re: self, cache, caps, it }
}
/// Returns an iterator of spans of the haystack given, delimited by a
/// match of the regex. Namely, each element of the iterator corresponds to
/// a part of the haystack that *isn't* matched by the regular expression.
///
/// # Example
///
/// To split a string delimited by arbitrary amounts of spaces or tabs:
///
/// ```
/// use regex_automata::meta::Regex;
///
/// let re = Regex::new(r"[ \t]+")?;
/// let hay = "a b \t c\td e";
/// let fields: Vec<&str> = re.split(hay).map(|span| &hay[span]).collect();
/// assert_eq!(fields, vec!["a", "b", "c", "d", "e"]);
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
///
/// # Example: more cases
///
/// Basic usage:
///
/// ```
/// use regex_automata::meta::Regex;
///
/// let re = Regex::new(r" ")?;
/// let hay = "Mary had a little lamb";
/// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect();
/// assert_eq!(got, vec!["Mary", "had", "a", "little", "lamb"]);
///
/// let re = Regex::new(r"X")?;
/// let hay = "";
/// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect();
/// assert_eq!(got, vec![""]);
///
/// let re = Regex::new(r"X")?;
/// let hay = "lionXXtigerXleopard";
/// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect();
/// assert_eq!(got, vec!["lion", "", "tiger", "leopard"]);
///
/// let re = Regex::new(r"::")?;
/// let hay = "lion::tiger::leopard";
/// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect();
/// assert_eq!(got, vec!["lion", "tiger", "leopard"]);
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
///
/// If a haystack contains multiple contiguous matches, you will end up
/// with empty spans yielded by the iterator:
///
/// ```
/// use regex_automata::meta::Regex;
///
/// let re = Regex::new(r"X")?;
/// let hay = "XXXXaXXbXc";
/// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect();
/// assert_eq!(got, vec!["", "", "", "", "a", "", "b", "c"]);
///
/// let re = Regex::new(r"/")?;
/// let hay = "(///)";
/// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect();
/// assert_eq!(got, vec!["(", "", "", ")"]);
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
///
/// Separators at the start or end of a haystack are neighbored by empty
/// spans.
///
/// ```
/// use regex_automata::meta::Regex;
///
/// let re = Regex::new(r"0")?;
/// let hay = "010";
/// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect();
/// assert_eq!(got, vec!["", "1", ""]);
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
///
/// When the empty string is used as a regex, it splits at every valid
/// UTF-8 boundary by default (which includes the beginning and end of the
/// haystack):
///
/// ```
/// use regex_automata::meta::Regex;
///
/// let re = Regex::new(r"")?;
/// let hay = "rust";
/// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect();
/// assert_eq!(got, vec!["", "r", "u", "s", "t", ""]);
///
/// // Splitting by an empty string is UTF-8 aware by default!
/// let re = Regex::new(r"")?;
/// let hay = "☃";
/// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect();
/// assert_eq!(got, vec!["", "☃", ""]);
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
///
/// But note that UTF-8 mode for empty strings can be disabled, which will
/// then result in a match at every byte offset in the haystack,
/// including between every UTF-8 code unit.
///
/// ```
/// use regex_automata::meta::Regex;
///
/// let re = Regex::builder()
/// .configure(Regex::config().utf8_empty(false))
/// .build(r"")?;
/// let hay = "☃".as_bytes();
/// let got: Vec<&[u8]> = re.split(hay).map(|sp| &hay[sp]).collect();
/// assert_eq!(got, vec![
/// // Writing byte string slices is just brutal. The problem is that
/// // b"foo" has type &[u8; 3] instead of &[u8].
/// &[][..], &[b'\xE2'][..], &[b'\x98'][..], &[b'\x83'][..], &[][..],
/// ]);
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
///
/// Contiguous separators (commonly shows up with whitespace), can lead to
/// possibly surprising behavior. For example, this code is correct:
///
/// ```
/// use regex_automata::meta::Regex;
///
/// let re = Regex::new(r" ")?;
/// let hay = " a b c";
/// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect();
/// assert_eq!(got, vec!["", "", "", "", "a", "", "b", "c"]);
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
///
/// It does *not* give you `["a", "b", "c"]`. For that behavior, you'd want
/// to match contiguous space characters:
///
/// ```
/// use regex_automata::meta::Regex;
///
/// let re = Regex::new(r" +")?;
/// let hay = " a b c";
/// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect();
/// // N.B. This does still include a leading empty span because ' +'
/// // matches at the beginning of the haystack.
/// assert_eq!(got, vec!["", "a", "b", "c"]);
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
#[inline]
pub fn split<'r, 'h, I: Into<Input<'h>>>(
&'r self,
input: I,
) -> Split<'r, 'h> {
Split { finder: self.find_iter(input), last: 0 }
}
/// Returns an iterator of at most `limit` spans of the haystack given,
/// delimited by a match of the regex. (A `limit` of `0` will return no
/// spans.) Namely, each element of the iterator corresponds to a part
/// of the haystack that *isn't* matched by the regular expression. The
/// remainder of the haystack that is not split will be the last element in
/// the iterator.
///
/// # Example
///
/// Get the first two words in some haystack:
///
/// ```
/// # if cfg!(miri) { return Ok(()); } // miri takes too long
/// use regex_automata::meta::Regex;
///
/// let re = Regex::new(r"\W+").unwrap();
/// let hay = "Hey! How are you?";
/// let fields: Vec<&str> =
/// re.splitn(hay, 3).map(|span| &hay[span]).collect();
/// assert_eq!(fields, vec!["Hey", "How", "are you?"]);
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
///
/// # Examples: more cases
///
/// ```
/// use regex_automata::meta::Regex;
///
/// let re = Regex::new(r" ")?;
/// let hay = "Mary had a little lamb";
/// let got: Vec<&str> = re.splitn(hay, 3).map(|sp| &hay[sp]).collect();
/// assert_eq!(got, vec!["Mary", "had", "a little lamb"]);
///
/// let re = Regex::new(r"X")?;
/// let hay = "";
/// let got: Vec<&str> = re.splitn(hay, 3).map(|sp| &hay[sp]).collect();
/// assert_eq!(got, vec![""]);
///
/// let re = Regex::new(r"X")?;
/// let hay = "lionXXtigerXleopard";
/// let got: Vec<&str> = re.splitn(hay, 3).map(|sp| &hay[sp]).collect();
/// assert_eq!(got, vec!["lion", "", "tigerXleopard"]);
///
/// let re = Regex::new(r"::")?;
/// let hay = "lion::tiger::leopard";
/// let got: Vec<&str> = re.splitn(hay, 2).map(|sp| &hay[sp]).collect();
/// assert_eq!(got, vec!["lion", "tiger::leopard"]);
///
/// let re = Regex::new(r"X")?;
/// let hay = "abcXdef";
/// let got: Vec<&str> = re.splitn(hay, 1).map(|sp| &hay[sp]).collect();
/// assert_eq!(got, vec!["abcXdef"]);
///
/// let re = Regex::new(r"X")?;
/// let hay = "abcdef";
/// let got: Vec<&str> = re.splitn(hay, 2).map(|sp| &hay[sp]).collect();
/// assert_eq!(got, vec!["abcdef"]);
///
/// let re = Regex::new(r"X")?;
/// let hay = "abcXdef";
/// let got: Vec<&str> = re.splitn(hay, 0).map(|sp| &hay[sp]).collect();
/// assert!(got.is_empty());
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
pub fn splitn<'r, 'h, I: Into<Input<'h>>>(
&'r self,
input: I,
limit: usize,
) -> SplitN<'r, 'h> {
SplitN { splits: self.split(input), limit }
}
}
/// Lower level search routines that give more control.
impl Regex {
/// Returns the start and end offset of the leftmost match. If no match
/// exists, then `None` is returned.
///
/// This is like [`Regex::find`] but, but it accepts a concrete `&Input`
/// instead of an `Into<Input>`.
///
/// # Example
///
/// ```
/// use regex_automata::{meta::Regex, Input, Match};
///
/// let re = Regex::new(r"Samwise|Sam")?;
/// let input = Input::new(
/// "one of the chief characters, Samwise the Brave",
/// );
/// assert_eq!(Some(Match::must(0, 29..36)), re.search(&input));
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
#[inline]
pub fn search(&self, input: &Input<'_>) -> Option<Match> {
if self.imp.info.is_impossible(input) {
return None;
}
let mut guard = self.pool.get();
let result = self.imp.strat.search(&mut guard, input);
// We do this dance with the guard and explicitly put it back in the
// pool because it seems to result in better codegen. If we let the
// guard's Drop impl put it back in the pool, then functions like
// ptr::drop_in_place get called and they *don't* get inlined. This
// isn't usually a big deal, but in latency sensitive benchmarks the
// extra function call can matter.
//
// I used `rebar measure -f '^grep/every-line$' -e meta` to measure
// the effects here.
//
// Note that this doesn't eliminate the latency effects of using the
// pool. There is still some (minor) cost for the "thread owner" of the
// pool. (i.e., The thread that first calls a regex search routine.)
// However, for other threads using the regex, the pool access can be
// quite expensive as it goes through a mutex. Callers can avoid this
// by either cloning the Regex (which creates a distinct copy of the
// pool), or callers can use the lower level APIs that accept a 'Cache'
// directly and do their own handling.
PoolGuard::put(guard);
result
}
/// Returns the end offset of the leftmost match. If no match exists, then
/// `None` is returned.
///
/// This is distinct from [`Regex::search`] in that it only returns the end
/// of a match and not the start of the match. Depending on a variety of
/// implementation details, this _may_ permit the regex engine to do less
/// overall work. For example, if a DFA is being used to execute a search,
/// then the start of a match usually requires running a separate DFA in
/// reverse to the find the start of a match. If one only needs the end of
/// a match, then the separate reverse scan to find the start of a match
/// can be skipped. (Note that the reverse scan is avoided even when using
/// `Regex::search` when possible, for example, in the case of an anchored
/// search.)
///
/// # Example
///
/// ```
/// use regex_automata::{meta::Regex, Input, HalfMatch};
///
/// let re = Regex::new(r"Samwise|Sam")?;
/// let input = Input::new(
/// "one of the chief characters, Samwise the Brave",
/// );
/// assert_eq!(Some(HalfMatch::must(0, 36)), re.search_half(&input));
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
#[inline]
pub fn search_half(&self, input: &Input<'_>) -> Option<HalfMatch> {
if self.imp.info.is_impossible(input) {
return None;
}
let mut guard = self.pool.get();
let result = self.imp.strat.search_half(&mut guard, input);
// See 'Regex::search' for why we put the guard back explicitly.
PoolGuard::put(guard);
result
}
/// Executes a leftmost forward search and writes the spans of capturing
/// groups that participated in a match into the provided [`Captures`]
/// value. If no match was found, then [`Captures::is_match`] is guaranteed
/// to return `false`.
///
/// This is like [`Regex::captures`], but it accepts a concrete `&Input`
/// instead of an `Into<Input>`.
///
/// # Example: specific pattern search
///
/// This example shows how to build a multi-pattern `Regex` that permits
/// searching for specific patterns.
///
/// ```
/// use regex_automata::{