/
dfa.cc
2132 lines (1904 loc) · 73.3 KB
/
dfa.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright 2008 The RE2 Authors. All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// A DFA (deterministic finite automaton)-based regular expression search.
//
// The DFA search has two main parts: the construction of the automaton,
// which is represented by a graph of State structures, and the execution
// of the automaton over a given input string.
//
// The basic idea is that the State graph is constructed so that the
// execution can simply start with a state s, and then for each byte c in
// the input string, execute "s = s->next[c]", checking at each point whether
// the current s represents a matching state.
//
// The simple explanation just given does convey the essence of this code,
// but it omits the details of how the State graph gets constructed as well
// as some performance-driven optimizations to the execution of the automaton.
// All these details are explained in the comments for the code following
// the definition of class DFA.
//
// See http://swtch.com/~rsc/regexp/ for a very bare-bones equivalent.
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <algorithm>
#include <atomic>
#include <deque>
#include <new>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/call_once.h"
#include "absl/base/macros.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "util/logging.h"
#include "util/strutil.h"
#include "re2/pod_array.h"
#include "re2/prog.h"
#include "re2/re2.h"
#include "re2/sparse_set.h"
// Silence "zero-sized array in struct/union" warning for DFA::State::next_.
#ifdef _MSC_VER
#pragma warning(disable: 4200)
#endif
namespace re2 {
// Controls whether the DFA should bail out early if the NFA would be faster.
static bool dfa_should_bail_when_slow = true;
void Prog::TESTING_ONLY_set_dfa_should_bail_when_slow(bool b) {
dfa_should_bail_when_slow = b;
}
// Changing this to true compiles in prints that trace execution of the DFA.
// Generates a lot of output -- only useful for debugging.
static const bool ExtraDebug = false;
// A DFA implementation of a regular expression program.
// Since this is entirely a forward declaration mandated by C++,
// some of the comments here are better understood after reading
// the comments in the sections that follow the DFA definition.
class DFA {
public:
DFA(Prog* prog, Prog::MatchKind kind, int64_t max_mem);
~DFA();
bool ok() const { return !init_failed_; }
Prog::MatchKind kind() { return kind_; }
// Searches for the regular expression in text, which is considered
// as a subsection of context for the purposes of interpreting flags
// like ^ and $ and \A and \z.
// Returns whether a match was found.
// If a match is found, sets *ep to the end point of the best match in text.
// If "anchored", the match must begin at the start of text.
// If "want_earliest_match", the match that ends first is used, not
// necessarily the best one.
// If "run_forward" is true, the DFA runs from text.begin() to text.end().
// If it is false, the DFA runs from text.end() to text.begin(),
// returning the leftmost end of the match instead of the rightmost one.
// If the DFA cannot complete the search (for example, if it is out of
// memory), it sets *failed and returns false.
bool Search(absl::string_view text, absl::string_view context, bool anchored,
bool want_earliest_match, bool run_forward, bool* failed,
const char** ep, SparseSet* matches);
// Builds out all states for the entire DFA.
// If cb is not empty, it receives one callback per state built.
// Returns the number of states built.
// FOR TESTING OR EXPERIMENTAL PURPOSES ONLY.
int BuildAllStates(const Prog::DFAStateCallback& cb);
// Computes min and max for matching strings. Won't return strings
// bigger than maxlen.
bool PossibleMatchRange(std::string* min, std::string* max, int maxlen);
// These data structures are logically private, but C++ makes it too
// difficult to mark them as such.
class RWLocker;
class StateSaver;
class Workq;
// A single DFA state. The DFA is represented as a graph of these
// States, linked by the next_ pointers. If in state s and reading
// byte c, the next state should be s->next_[c].
struct State {
inline bool IsMatch() const { return (flag_ & kFlagMatch) != 0; }
template <typename H>
friend H AbslHashValue(H h, const State& a) {
const absl::Span<const int> ainst(a.inst_, a.ninst_);
return H::combine(std::move(h), a.flag_, ainst);
}
friend bool operator==(const State& a, const State& b) {
const absl::Span<const int> ainst(a.inst_, a.ninst_);
const absl::Span<const int> binst(b.inst_, b.ninst_);
return &a == &b || (a.flag_ == b.flag_ && ainst == binst);
}
int* inst_; // Instruction pointers in the state.
int ninst_; // # of inst_ pointers.
uint32_t flag_; // Empty string bitfield flags in effect on the way
// into this state, along with kFlagMatch if this
// is a matching state.
std::atomic<State*> next_[]; // Outgoing arrows from State,
// one per input byte class
};
enum {
kByteEndText = 256, // imaginary byte at end of text
kFlagEmptyMask = 0xFF, // State.flag_: bits holding kEmptyXXX flags
kFlagMatch = 0x0100, // State.flag_: this is a matching state
kFlagLastWord = 0x0200, // State.flag_: last byte was a word char
kFlagNeedShift = 16, // needed kEmpty bits are or'ed in shifted left
};
struct StateHash {
size_t operator()(const State* a) const {
DCHECK(a != NULL);
return absl::Hash<State>()(*a);
}
};
struct StateEqual {
bool operator()(const State* a, const State* b) const {
DCHECK(a != NULL);
DCHECK(b != NULL);
return *a == *b;
}
};
typedef absl::flat_hash_set<State*, StateHash, StateEqual> StateSet;
private:
// Make it easier to swap in a scalable reader-writer mutex.
using CacheMutex = absl::Mutex;
enum {
// Indices into start_ for unanchored searches.
// Add kStartAnchored for anchored searches.
kStartBeginText = 0, // text at beginning of context
kStartBeginLine = 2, // text at beginning of line
kStartAfterWordChar = 4, // text follows a word character
kStartAfterNonWordChar = 6, // text follows non-word character
kMaxStart = 8,
kStartAnchored = 1,
};
// Resets the DFA State cache, flushing all saved State* information.
// Releases and reacquires cache_mutex_ via cache_lock, so any
// State* existing before the call are not valid after the call.
// Use a StateSaver to preserve important states across the call.
// cache_mutex_.r <= L < mutex_
// After: cache_mutex_.w <= L < mutex_
void ResetCache(RWLocker* cache_lock);
// Looks up and returns the State corresponding to a Workq.
// L >= mutex_
State* WorkqToCachedState(Workq* q, Workq* mq, uint32_t flag);
// Looks up and returns a State matching the inst, ninst, and flag.
// L >= mutex_
State* CachedState(int* inst, int ninst, uint32_t flag);
// Clear the cache entirely.
// Must hold cache_mutex_.w or be in destructor.
void ClearCache();
// Converts a State into a Workq: the opposite of WorkqToCachedState.
// L >= mutex_
void StateToWorkq(State* s, Workq* q);
// Runs a State on a given byte, returning the next state.
State* RunStateOnByteUnlocked(State*, int); // cache_mutex_.r <= L < mutex_
State* RunStateOnByte(State*, int); // L >= mutex_
// Runs a Workq on a given byte followed by a set of empty-string flags,
// producing a new Workq in nq. If a match instruction is encountered,
// sets *ismatch to true.
// L >= mutex_
void RunWorkqOnByte(Workq* q, Workq* nq,
int c, uint32_t flag, bool* ismatch);
// Runs a Workq on a set of empty-string flags, producing a new Workq in nq.
// L >= mutex_
void RunWorkqOnEmptyString(Workq* q, Workq* nq, uint32_t flag);
// Adds the instruction id to the Workq, following empty arrows
// according to flag.
// L >= mutex_
void AddToQueue(Workq* q, int id, uint32_t flag);
// For debugging, returns a text representation of State.
static std::string DumpState(State* state);
// For debugging, returns a text representation of a Workq.
static std::string DumpWorkq(Workq* q);
// Search parameters
struct SearchParams {
SearchParams(absl::string_view text, absl::string_view context,
RWLocker* cache_lock)
: text(text),
context(context),
anchored(false),
can_prefix_accel(false),
want_earliest_match(false),
run_forward(false),
start(NULL),
cache_lock(cache_lock),
failed(false),
ep(NULL),
matches(NULL) {}
absl::string_view text;
absl::string_view context;
bool anchored;
bool can_prefix_accel;
bool want_earliest_match;
bool run_forward;
State* start;
RWLocker* cache_lock;
bool failed; // "out" parameter: whether search gave up
const char* ep; // "out" parameter: end pointer for match
SparseSet* matches;
private:
SearchParams(const SearchParams&) = delete;
SearchParams& operator=(const SearchParams&) = delete;
};
// Before each search, the parameters to Search are analyzed by
// AnalyzeSearch to determine the state in which to start.
struct StartInfo {
StartInfo() : start(NULL) {}
std::atomic<State*> start;
};
// Fills in params->start and params->can_prefix_accel using
// the other search parameters. Returns true on success,
// false on failure.
// cache_mutex_.r <= L < mutex_
bool AnalyzeSearch(SearchParams* params);
bool AnalyzeSearchHelper(SearchParams* params, StartInfo* info,
uint32_t flags);
// The generic search loop, inlined to create specialized versions.
// cache_mutex_.r <= L < mutex_
// Might unlock and relock cache_mutex_ via params->cache_lock.
template <bool can_prefix_accel,
bool want_earliest_match,
bool run_forward>
inline bool InlinedSearchLoop(SearchParams* params);
// The specialized versions of InlinedSearchLoop. The three letters
// at the ends of the name denote the true/false values used as the
// last three parameters of InlinedSearchLoop.
// cache_mutex_.r <= L < mutex_
// Might unlock and relock cache_mutex_ via params->cache_lock.
bool SearchFFF(SearchParams* params);
bool SearchFFT(SearchParams* params);
bool SearchFTF(SearchParams* params);
bool SearchFTT(SearchParams* params);
bool SearchTFF(SearchParams* params);
bool SearchTFT(SearchParams* params);
bool SearchTTF(SearchParams* params);
bool SearchTTT(SearchParams* params);
// The main search loop: calls an appropriate specialized version of
// InlinedSearchLoop.
// cache_mutex_.r <= L < mutex_
// Might unlock and relock cache_mutex_ via params->cache_lock.
bool FastSearchLoop(SearchParams* params);
// Looks up bytes in bytemap_ but handles case c == kByteEndText too.
int ByteMap(int c) {
if (c == kByteEndText)
return prog_->bytemap_range();
return prog_->bytemap()[c];
}
// Constant after initialization.
Prog* prog_; // The regular expression program to run.
Prog::MatchKind kind_; // The kind of DFA.
bool init_failed_; // initialization failed (out of memory)
absl::Mutex mutex_; // mutex_ >= cache_mutex_.r
// Scratch areas, protected by mutex_.
Workq* q0_; // Two pre-allocated work queues.
Workq* q1_;
PODArray<int> stack_; // Pre-allocated stack for AddToQueue
// State* cache. Many threads use and add to the cache simultaneously,
// holding cache_mutex_ for reading and mutex_ (above) when adding.
// If the cache fills and needs to be discarded, the discarding is done
// while holding cache_mutex_ for writing, to avoid interrupting other
// readers. Any State* pointers are only valid while cache_mutex_
// is held.
CacheMutex cache_mutex_;
int64_t mem_budget_; // Total memory budget for all States.
int64_t state_budget_; // Amount of memory remaining for new States.
StateSet state_cache_; // All States computed so far.
StartInfo start_[kMaxStart];
DFA(const DFA&) = delete;
DFA& operator=(const DFA&) = delete;
};
// Shorthand for casting to uint8_t*.
static inline const uint8_t* BytePtr(const void* v) {
return reinterpret_cast<const uint8_t*>(v);
}
// Work queues
// Marks separate thread groups of different priority
// in the work queue when in leftmost-longest matching mode.
#define Mark (-1)
// Separates the match IDs from the instructions in inst_.
// Used only for "many match" DFA states.
#define MatchSep (-2)
// Internally, the DFA uses a sparse array of
// program instruction pointers as a work queue.
// In leftmost longest mode, marks separate sections
// of workq that started executing at different
// locations in the string (earlier locations first).
class DFA::Workq : public SparseSet {
public:
// Constructor: n is number of normal slots, maxmark number of mark slots.
Workq(int n, int maxmark) :
SparseSet(n+maxmark),
n_(n),
maxmark_(maxmark),
nextmark_(n),
last_was_mark_(true) {
}
bool is_mark(int i) { return i >= n_; }
int maxmark() { return maxmark_; }
void clear() {
SparseSet::clear();
nextmark_ = n_;
}
void mark() {
if (last_was_mark_)
return;
last_was_mark_ = false;
SparseSet::insert_new(nextmark_++);
}
int size() {
return n_ + maxmark_;
}
void insert(int id) {
if (contains(id))
return;
insert_new(id);
}
void insert_new(int id) {
last_was_mark_ = false;
SparseSet::insert_new(id);
}
private:
int n_; // size excluding marks
int maxmark_; // maximum number of marks
int nextmark_; // id of next mark
bool last_was_mark_; // last inserted was mark
Workq(const Workq&) = delete;
Workq& operator=(const Workq&) = delete;
};
DFA::DFA(Prog* prog, Prog::MatchKind kind, int64_t max_mem)
: prog_(prog),
kind_(kind),
init_failed_(false),
q0_(NULL),
q1_(NULL),
mem_budget_(max_mem) {
if (ExtraDebug)
absl::FPrintF(stderr, "\nkind %d\n%s\n", kind_, prog_->DumpUnanchored());
int nmark = 0;
if (kind_ == Prog::kLongestMatch)
nmark = prog_->size();
// See DFA::AddToQueue() for why this is so.
int nstack = prog_->inst_count(kInstCapture) +
prog_->inst_count(kInstEmptyWidth) +
prog_->inst_count(kInstNop) +
nmark + 1; // + 1 for start inst
// Account for space needed for DFA, q0, q1, stack.
mem_budget_ -= sizeof(DFA);
mem_budget_ -= (prog_->size() + nmark) *
(sizeof(int)+sizeof(int)) * 2; // q0, q1
mem_budget_ -= nstack * sizeof(int); // stack
if (mem_budget_ < 0) {
init_failed_ = true;
return;
}
state_budget_ = mem_budget_;
// Make sure there is a reasonable amount of working room left.
// At minimum, the search requires room for two states in order
// to limp along, restarting frequently. We'll get better performance
// if there is room for a larger number of states, say 20.
// Note that a state stores list heads only, so we use the program
// list count for the upper bound, not the program size.
int nnext = prog_->bytemap_range() + 1; // + 1 for kByteEndText slot
int64_t one_state = sizeof(State) + nnext*sizeof(std::atomic<State*>) +
(prog_->list_count()+nmark)*sizeof(int);
if (state_budget_ < 20*one_state) {
init_failed_ = true;
return;
}
q0_ = new Workq(prog_->size(), nmark);
q1_ = new Workq(prog_->size(), nmark);
stack_ = PODArray<int>(nstack);
}
DFA::~DFA() {
delete q0_;
delete q1_;
ClearCache();
}
// In the DFA state graph, s->next[c] == NULL means that the
// state has not yet been computed and needs to be. We need
// a different special value to signal that s->next[c] is a
// state that can never lead to a match (and thus the search
// can be called off). Hence DeadState.
#define DeadState reinterpret_cast<State*>(1)
// Signals that the rest of the string matches no matter what it is.
#define FullMatchState reinterpret_cast<State*>(2)
#define SpecialStateMax FullMatchState
// Debugging printouts
// For debugging, returns a string representation of the work queue.
std::string DFA::DumpWorkq(Workq* q) {
std::string s;
const char* sep = "";
for (Workq::iterator it = q->begin(); it != q->end(); ++it) {
if (q->is_mark(*it)) {
s += "|";
sep = "";
} else {
s += absl::StrFormat("%s%d", sep, *it);
sep = ",";
}
}
return s;
}
// For debugging, returns a string representation of the state.
std::string DFA::DumpState(State* state) {
if (state == NULL)
return "_";
if (state == DeadState)
return "X";
if (state == FullMatchState)
return "*";
std::string s;
const char* sep = "";
s += absl::StrFormat("(%p)", state);
for (int i = 0; i < state->ninst_; i++) {
if (state->inst_[i] == Mark) {
s += "|";
sep = "";
} else if (state->inst_[i] == MatchSep) {
s += "||";
sep = "";
} else {
s += absl::StrFormat("%s%d", sep, state->inst_[i]);
sep = ",";
}
}
s += absl::StrFormat(" flag=%#x", state->flag_);
return s;
}
//////////////////////////////////////////////////////////////////////
//
// DFA state graph construction.
//
// The DFA state graph is a heavily-linked collection of State* structures.
// The state_cache_ is a set of all the State structures ever allocated,
// so that if the same state is reached by two different paths,
// the same State structure can be used. This reduces allocation
// requirements and also avoids duplication of effort across the two
// identical states.
//
// A State is defined by an ordered list of instruction ids and a flag word.
//
// The choice of an ordered list of instructions differs from a typical
// textbook DFA implementation, which would use an unordered set.
// Textbook descriptions, however, only care about whether
// the DFA matches, not where it matches in the text. To decide where the
// DFA matches, we need to mimic the behavior of the dominant backtracking
// implementations like PCRE, which try one possible regular expression
// execution, then another, then another, stopping when one of them succeeds.
// The DFA execution tries these many executions in parallel, representing
// each by an instruction id. These pointers are ordered in the State.inst_
// list in the same order that the executions would happen in a backtracking
// search: if a match is found during execution of inst_[2], inst_[i] for i>=3
// can be discarded.
//
// Textbooks also typically do not consider context-aware empty string operators
// like ^ or $. These are handled by the flag word, which specifies the set
// of empty-string operators that should be matched when executing at the
// current text position. These flag bits are defined in prog.h.
// The flag word also contains two DFA-specific bits: kFlagMatch if the state
// is a matching state (one that reached a kInstMatch in the program)
// and kFlagLastWord if the last processed byte was a word character, for the
// implementation of \B and \b.
//
// The flag word also contains, shifted up 16 bits, the bits looked for by
// any kInstEmptyWidth instructions in the state. These provide a useful
// summary indicating when new flags might be useful.
//
// The permanent representation of a State's instruction ids is just an array,
// but while a state is being analyzed, these instruction ids are represented
// as a Workq, which is an array that allows iteration in insertion order.
// NOTE(rsc): The choice of State construction determines whether the DFA
// mimics backtracking implementations (so-called leftmost first matching) or
// traditional DFA implementations (so-called leftmost longest matching as
// prescribed by POSIX). This implementation chooses to mimic the
// backtracking implementations, because we want to replace PCRE. To get
// POSIX behavior, the states would need to be considered not as a simple
// ordered list of instruction ids, but as a list of unordered sets of instruction
// ids. A match by a state in one set would inhibit the running of sets
// farther down the list but not other instruction ids in the same set. Each
// set would correspond to matches beginning at a given point in the string.
// This is implemented by separating different sets with Mark pointers.
// Looks in the State cache for a State matching q, flag.
// If one is found, returns it. If one is not found, allocates one,
// inserts it in the cache, and returns it.
// If mq is not null, MatchSep and the match IDs in mq will be appended
// to the State.
DFA::State* DFA::WorkqToCachedState(Workq* q, Workq* mq, uint32_t flag) {
//mutex_.AssertHeld();
// Construct array of instruction ids for the new state.
// In some cases, kInstAltMatch may trigger an upgrade to FullMatchState.
// Otherwise, "compress" q down to list heads for storage; StateToWorkq()
// will "decompress" it for computation by exploring from each list head.
//
// Historically, only kInstByteRange, kInstEmptyWidth and kInstMatch were
// useful to keep, but it turned out that kInstAlt was necessary to keep:
//
// > [*] kInstAlt would seem useless to record in a state, since
// > we've already followed both its arrows and saved all the
// > interesting states we can reach from there. The problem
// > is that one of the empty-width instructions might lead
// > back to the same kInstAlt (if an empty-width operator is starred),
// > producing a different evaluation order depending on whether
// > we keep the kInstAlt to begin with. Sigh.
// > A specific case that this affects is /(^|a)+/ matching "a".
// > If we don't save the kInstAlt, we will match the whole "a" (0,1)
// > but in fact the correct leftmost-first match is the leading "" (0,0).
//
// Recall that flattening transformed the Prog from "tree" form to "list"
// form: in the former, kInstAlt existed explicitly... and abundantly; in
// the latter, it's implied between the instructions that compose a list.
// Thus, because the information wasn't lost, the bug doesn't remanifest.
PODArray<int> inst(q->size());
int n = 0;
uint32_t needflags = 0; // flags needed by kInstEmptyWidth instructions
bool sawmatch = false; // whether queue contains guaranteed kInstMatch
bool sawmark = false; // whether queue contains a Mark
if (ExtraDebug)
absl::FPrintF(stderr, "WorkqToCachedState %s [%#x]", DumpWorkq(q), flag);
for (Workq::iterator it = q->begin(); it != q->end(); ++it) {
int id = *it;
if (sawmatch && (kind_ == Prog::kFirstMatch || q->is_mark(id)))
break;
if (q->is_mark(id)) {
if (n > 0 && inst[n-1] != Mark) {
sawmark = true;
inst[n++] = Mark;
}
continue;
}
Prog::Inst* ip = prog_->inst(id);
switch (ip->opcode()) {
case kInstAltMatch:
// This state will continue to a match no matter what
// the rest of the input is. If it is the highest priority match
// being considered, return the special FullMatchState
// to indicate that it's all matches from here out.
if (kind_ != Prog::kManyMatch &&
(kind_ != Prog::kFirstMatch ||
(it == q->begin() && ip->greedy(prog_))) &&
(kind_ != Prog::kLongestMatch || !sawmark) &&
(flag & kFlagMatch)) {
if (ExtraDebug)
absl::FPrintF(stderr, " -> FullMatchState\n");
return FullMatchState;
}
ABSL_FALLTHROUGH_INTENDED;
default:
// Record iff id is the head of its list, which must
// be the case if id-1 is the last of *its* list. :)
if (prog_->inst(id-1)->last())
inst[n++] = *it;
if (ip->opcode() == kInstEmptyWidth)
needflags |= ip->empty();
if (ip->opcode() == kInstMatch && !prog_->anchor_end())
sawmatch = true;
break;
}
}
DCHECK_LE(n, q->size());
if (n > 0 && inst[n-1] == Mark)
n--;
// If there are no empty-width instructions waiting to execute,
// then the extra flag bits will not be used, so there is no
// point in saving them. (Discarding them reduces the number
// of distinct states.)
if (needflags == 0)
flag &= kFlagMatch;
// NOTE(rsc): The code above cannot do flag &= needflags,
// because if the right flags were present to pass the current
// kInstEmptyWidth instructions, new kInstEmptyWidth instructions
// might be reached that in turn need different flags.
// The only sure thing is that if there are no kInstEmptyWidth
// instructions at all, no flags will be needed.
// We could do the extra work to figure out the full set of
// possibly needed flags by exploring past the kInstEmptyWidth
// instructions, but the check above -- are any flags needed
// at all? -- handles the most common case. More fine-grained
// analysis can only be justified by measurements showing that
// too many redundant states are being allocated.
// If there are no Insts in the list, it's a dead state,
// which is useful to signal with a special pointer so that
// the execution loop can stop early. This is only okay
// if the state is *not* a matching state.
if (n == 0 && flag == 0) {
if (ExtraDebug)
absl::FPrintF(stderr, " -> DeadState\n");
return DeadState;
}
// If we're in longest match mode, the state is a sequence of
// unordered state sets separated by Marks. Sort each set
// to canonicalize, to reduce the number of distinct sets stored.
if (kind_ == Prog::kLongestMatch) {
int* ip = inst.data();
int* ep = ip + n;
while (ip < ep) {
int* markp = ip;
while (markp < ep && *markp != Mark)
markp++;
std::sort(ip, markp);
if (markp < ep)
markp++;
ip = markp;
}
}
// If we're in many match mode, canonicalize for similar reasons:
// we have an unordered set of states (i.e. we don't have Marks)
// and sorting will reduce the number of distinct sets stored.
if (kind_ == Prog::kManyMatch) {
int* ip = inst.data();
int* ep = ip + n;
std::sort(ip, ep);
}
// Append MatchSep and the match IDs in mq if necessary.
if (mq != NULL) {
inst[n++] = MatchSep;
for (Workq::iterator i = mq->begin(); i != mq->end(); ++i) {
int id = *i;
Prog::Inst* ip = prog_->inst(id);
if (ip->opcode() == kInstMatch)
inst[n++] = ip->match_id();
}
}
// Save the needed empty-width flags in the top bits for use later.
flag |= needflags << kFlagNeedShift;
State* state = CachedState(inst.data(), n, flag);
return state;
}
// Looks in the State cache for a State matching inst, ninst, flag.
// If one is found, returns it. If one is not found, allocates one,
// inserts it in the cache, and returns it.
DFA::State* DFA::CachedState(int* inst, int ninst, uint32_t flag) {
//mutex_.AssertHeld();
// Look in the cache for a pre-existing state.
// We have to initialise the struct like this because otherwise
// MSVC will complain about the flexible array member. :(
State state;
state.inst_ = inst;
state.ninst_ = ninst;
state.flag_ = flag;
StateSet::iterator it = state_cache_.find(&state);
if (it != state_cache_.end()) {
if (ExtraDebug)
absl::FPrintF(stderr, " -cached-> %s\n", DumpState(*it));
return *it;
}
// Must have enough memory for new state.
// In addition to what we're going to allocate,
// the state cache hash table seems to incur about 18 bytes per
// State*. Worst case for non-small sets is it being half full, where each
// value present takes up 1 byte hash sample plus the pointer itself.
const int kStateCacheOverhead = 18;
int nnext = prog_->bytemap_range() + 1; // + 1 for kByteEndText slot
int mem = sizeof(State) + nnext*sizeof(std::atomic<State*>);
int instmem = ninst*sizeof(int);
if (mem_budget_ < mem + instmem + kStateCacheOverhead) {
mem_budget_ = -1;
return NULL;
}
mem_budget_ -= mem + instmem + kStateCacheOverhead;
// Allocate new state along with room for next_ and inst_.
// inst_ is stored separately since it's colder; this also
// means that the States for a given DFA are the same size
// class, so the allocator can hopefully pack them better.
char* space = std::allocator<char>().allocate(mem);
State* s = new (space) State;
(void) new (s->next_) std::atomic<State*>[nnext];
// Work around a unfortunate bug in older versions of libstdc++.
// (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=64658)
for (int i = 0; i < nnext; i++)
(void) new (s->next_ + i) std::atomic<State*>(NULL);
s->inst_ = std::allocator<int>().allocate(ninst);
(void) new (s->inst_) int[ninst];
memmove(s->inst_, inst, instmem);
s->ninst_ = ninst;
s->flag_ = flag;
if (ExtraDebug)
absl::FPrintF(stderr, " -> %s\n", DumpState(s));
// Put state in cache and return it.
state_cache_.insert(s);
return s;
}
// Clear the cache. Must hold cache_mutex_.w or be in destructor.
void DFA::ClearCache() {
StateSet::iterator begin = state_cache_.begin();
StateSet::iterator end = state_cache_.end();
while (begin != end) {
StateSet::iterator tmp = begin;
++begin;
// Deallocate the instruction array, which is stored separately as above.
std::allocator<int>().deallocate((*tmp)->inst_, (*tmp)->ninst_);
// Deallocate the blob of memory that we allocated in DFA::CachedState().
// We recompute mem in order to benefit from sized delete where possible.
int nnext = prog_->bytemap_range() + 1; // + 1 for kByteEndText slot
int mem = sizeof(State) + nnext*sizeof(std::atomic<State*>);
std::allocator<char>().deallocate(reinterpret_cast<char*>(*tmp), mem);
}
state_cache_.clear();
}
// Copies insts in state s to the work queue q.
void DFA::StateToWorkq(State* s, Workq* q) {
q->clear();
for (int i = 0; i < s->ninst_; i++) {
if (s->inst_[i] == Mark) {
q->mark();
} else if (s->inst_[i] == MatchSep) {
// Nothing after this is an instruction!
break;
} else {
// Explore from the head of the list.
AddToQueue(q, s->inst_[i], s->flag_ & kFlagEmptyMask);
}
}
}
// Adds ip to the work queue, following empty arrows according to flag.
void DFA::AddToQueue(Workq* q, int id, uint32_t flag) {
// Use stack_ to hold our stack of instructions yet to process.
// It was preallocated as follows:
// one entry per Capture;
// one entry per EmptyWidth; and
// one entry per Nop.
// This reflects the maximum number of stack pushes that each can
// perform. (Each instruction can be processed at most once.)
// When using marks, we also added nmark == prog_->size().
// (Otherwise, nmark == 0.)
int* stk = stack_.data();
int nstk = 0;
stk[nstk++] = id;
while (nstk > 0) {
DCHECK_LE(nstk, stack_.size());
id = stk[--nstk];
Loop:
if (id == Mark) {
q->mark();
continue;
}
if (id == 0)
continue;
// If ip is already on the queue, nothing to do.
// Otherwise add it. We don't actually keep all the
// ones that get added, but adding all of them here
// increases the likelihood of q->contains(id),
// reducing the amount of duplicated work.
if (q->contains(id))
continue;
q->insert_new(id);
// Process instruction.
Prog::Inst* ip = prog_->inst(id);
switch (ip->opcode()) {
default:
LOG(DFATAL) << "unhandled opcode: " << ip->opcode();
break;
case kInstByteRange: // just save these on the queue
case kInstMatch:
if (ip->last())
break;
id = id+1;
goto Loop;
case kInstCapture: // DFA treats captures as no-ops.
case kInstNop:
if (!ip->last())
stk[nstk++] = id+1;
// If this instruction is the [00-FF]* loop at the beginning of
// a leftmost-longest unanchored search, separate with a Mark so
// that future threads (which will start farther to the right in
// the input string) are lower priority than current threads.
if (ip->opcode() == kInstNop && q->maxmark() > 0 &&
id == prog_->start_unanchored() && id != prog_->start())
stk[nstk++] = Mark;
id = ip->out();
goto Loop;
case kInstAltMatch:
DCHECK(!ip->last());
id = id+1;
goto Loop;
case kInstEmptyWidth:
if (!ip->last())
stk[nstk++] = id+1;
// Continue on if we have all the right flag bits.
if (ip->empty() & ~flag)
break;
id = ip->out();
goto Loop;
}
}
}
// Running of work queues. In the work queue, order matters:
// the queue is sorted in priority order. If instruction i comes before j,
// then the instructions that i produces during the run must come before
// the ones that j produces. In order to keep this invariant, all the
// work queue runners have to take an old queue to process and then
// also a new queue to fill in. It's not acceptable to add to the end of
// an existing queue, because new instructions will not end up in the
// correct position.
// Runs the work queue, processing the empty strings indicated by flag.
// For example, flag == kEmptyBeginLine|kEmptyEndLine means to match
// both ^ and $. It is important that callers pass all flags at once:
// processing both ^ and $ is not the same as first processing only ^
// and then processing only $. Doing the two-step sequence won't match
// ^$^$^$ but processing ^ and $ simultaneously will (and is the behavior
// exhibited by existing implementations).
void DFA::RunWorkqOnEmptyString(Workq* oldq, Workq* newq, uint32_t flag) {
newq->clear();
for (Workq::iterator i = oldq->begin(); i != oldq->end(); ++i) {
if (oldq->is_mark(*i))
AddToQueue(newq, Mark, flag);
else
AddToQueue(newq, *i, flag);
}
}
// Runs the work queue, processing the single byte c followed by any empty
// strings indicated by flag. For example, c == 'a' and flag == kEmptyEndLine,
// means to match c$. Sets the bool *ismatch to true if the end of the
// regular expression program has been reached (the regexp has matched).
void DFA::RunWorkqOnByte(Workq* oldq, Workq* newq,
int c, uint32_t flag, bool* ismatch) {
//mutex_.AssertHeld();
newq->clear();
for (Workq::iterator i = oldq->begin(); i != oldq->end(); ++i) {
if (oldq->is_mark(*i)) {
if (*ismatch)
return;
newq->mark();
continue;
}
int id = *i;
Prog::Inst* ip = prog_->inst(id);
switch (ip->opcode()) {
default:
LOG(DFATAL) << "unhandled opcode: " << ip->opcode();
break;
case kInstFail: // never succeeds
case kInstCapture: // already followed
case kInstNop: // already followed
case kInstAltMatch: // already followed
case kInstEmptyWidth: // already followed
break;
case kInstByteRange: // can follow if c is in range
if (!ip->Matches(c))
break;
AddToQueue(newq, ip->out(), flag);
if (ip->hint() != 0) {
// We have a hint, but we must cancel out the
// increment that will occur after the break.
i += ip->hint() - 1;
} else {
// We have no hint, so we must find the end
// of the current list and then skip to it.
Prog::Inst* ip0 = ip;
while (!ip->last())
++ip;
i += ip - ip0;
}
break;
case kInstMatch:
if (prog_->anchor_end() && c != kByteEndText &&
kind_ != Prog::kManyMatch)
break;
*ismatch = true;
if (kind_ == Prog::kFirstMatch) {
// Can stop processing work queue since we found a match.
return;
}