-
Notifications
You must be signed in to change notification settings - Fork 7
/
restack.h
executable file
·862 lines (734 loc) · 26.5 KB
/
restack.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
//+---------------------------------------------------------------------------
//
// Copyright ( C ) Microsoft, 1994 - 2002.
//
// File: restack.h
//
// Functions: a quick-'n'-dirty, type-unsafe stack used by the iterative
// regular expression algorithm
//
// Notes: Care must be taken when using this stack. You must pop off
// the correct type of object, otherwise you get garbage. Also,
// if you push anything that has a non-trivial destructor, then
// be sure to explicitely pop everything off the stack and don't
// use the unsafe_long_jump method.
//
// Author: Eric Niebler ( ericne@microsoft.com )
//
// History: 11/15/2001 ericne Created
//
//----------------------------------------------------------------------------
#ifndef HETERO_STACK_H
#define HETERO_STACK_H
#include <string>
#include <utility>
#include <typeinfo>
#include <stdexcept>
#include <functional>
#ifndef REGEX_CDECL
#ifdef _MSC_VER
#define REGEX_CDECL __cdecl
#else
#define REGEX_CDECL
#endif
#endif
#define COMMA ,
#if !defined(_MSC_VER) | 1200 < _MSC_VER
# define REGEX_VC6(x)
# define REGEX_NVC6(x) x
#else
# define REGEX_VC6(x) x
# define REGEX_NVC6(x)
#endif
namespace regex
{
namespace detail
{
// For compile-time assertions that generate
// no run-time overhead.
template< bool f > struct static_assert;
template<> struct static_assert<true> { static_assert() {} };
// Work-around for a template-template parameter problem on VC7.0
template< typename T > struct type2type { typedef T type; };
template< bool F > struct bool2type { enum { value = F }; };
typedef bool2type<true> true_t;
typedef bool2type<false> false_t;
#ifdef _MSC_VER
// warning C4127: conditional expression is constant
// warning C4189: local variable is initialized but not referenced
// warning C4244: conversion from 'T' to 'int', possible loss of data
// warning C4510: default constructor could not be generated
// warning C4610: struct can never be instantiated - user defined constructor required
// warning C4800: forcing value to bool 'true' or 'false' (performance warning)
#pragma warning( push )
#pragma warning( disable : 4127 4189 4244 4510 4610 4800 )
// Make sure nobody has tampered with the packing before defining the
// alignof structure
#pragma pack( push )
#pragma pack() // use the default packing
#endif
template< typename T >
class alignof
{
struct helper
{
helper();
char m_c;
T m_t;
};
public:
enum { value = sizeof(helper)-sizeof(T) < sizeof(T) ? sizeof(helper)-sizeof(T) : sizeof(T) };
};
#ifdef _MSC_VER
#pragma pack( pop )
#endif
//
// Type traits
//
typedef char (&yes_type)[1];
typedef char (&no_type)[2];
template< bool >
struct select_helper
{
template< typename T, typename U >
struct nested
{
typedef T type;
};
};
template<>
struct select_helper<false>
{
template< typename T, typename U >
struct nested
{
typedef U type;
};
};
// For use in conditional typedefs
template< bool F, typename T, typename U >
struct select
{
typedef typename select_helper<F>::template nested<T,U>::type type;
};
template< typename U >
struct convertible_helper
{
static yes_type check( U );
static no_type REGEX_CDECL check(...);
};
template< typename T >
struct factory
{
static T& make();
};
template< typename T, typename U >
struct is_convertible
{
enum { value = (sizeof(convertible_helper<U>::check(factory<T>::make()))==sizeof(yes_type)) };
};
template< size_t N >
struct is_power_of_two
{
enum { value = 1==N || 0==(N%2) && is_power_of_two<N/2>::value };
};
template<>
struct is_power_of_two<0>
{
enum { value = false };
};
// Very primative implementation of is_scalar. This doesn't work
// for void, reference types, array types or function types, but
// we don't use those types from hetero_stack.
struct bool_convertible { bool_convertible(bool); };
template< typename T >
struct is_scalar
{
enum { value = is_convertible<T,bool_convertible>::value };
};
template< typename T >
struct has_trivial_copy
{
enum { value = is_scalar<T>::value };
};
template< typename T >
struct has_trivial_assignment
{
enum { value = is_scalar<T>::value };
};
template< typename T >
struct has_trivial_destructor
{
enum { value = is_scalar<T>::value };
};
template< bool > struct destroyer_helper
{
template< typename T >
static void destroy( T const * pT )
{
pT, pT->~T();
}
};
template<> struct destroyer_helper<true>
{
template< typename T >
static void destroy( T const * )
{
}
};
template< typename T >
void destroy( T const * pT )
{
destroyer_helper<has_trivial_destructor<T>::value>::destroy( pT );
}
struct type_vtable
{
std::type_info const * typeinfo_ptr;
size_t size;
size_t aligned_size;
void (*destroy)( void * );
void (*copy)( void *, void const * );
};
template< typename T, size_t AlignmentT >
class type_info_ex
{
static void destroy( void * pv )
{
T const * pT = static_cast<T const*>( pv );
regex::detail::destroy( pT );
(void)pv;
(void)pT;
}
static void copy( void * dst, void const * src )
{
new ( dst ) T( *static_cast<T const *>( src ) );
}
public:
static type_vtable const vtable;
static bool equals( type_vtable const * ptm )
{
return ptm == & vtable || *ptm->typeinfo_ptr == typeid(T);
}
};
template< typename T,size_t AlignmentT >
type_vtable const type_info_ex<T,AlignmentT>::vtable =
{
&typeid(T),
sizeof(T),
( sizeof(T) + AlignmentT - 1 ) & ~( AlignmentT - 1 ),
has_trivial_destructor<T>::value ? 0 : &type_info_ex<T,AlignmentT>::destroy,
&type_info_ex<T,AlignmentT>::copy
};
template< typename T >
inline T & to_type( void * pv )
{
return *static_cast<T*>( pv );
}
} // namespace detail
// --------------------------------------------------------------------------
//
// Class: hetero_stack
//
// Description: Fast, heterogeneous stack.
//
// Methods: allocate - reserve space on stack
// unwind - unwind the stack
// hetero_stack - c'tor
// ~hetero_stack - d'tor, release all dynamic memory
// push - push an object on the stack
// pop - pop an object from the stack
//
// Members: m_first_node -
// m_current_node -
//
// Typedefs: byte_t -
//
// History: 10/19/2001 - ericne - Created
//
// --------------------------------------------------------------------------
template
<
size_t AlignmentT = sizeof(void*),
bool RuntimeTypeCheckT = true, // should we perform run-time type checking?
bool AssumePodT = false, // assume non-throwing copy/assign/destroy for better perf
size_t DynamicBlockSizeT = 4096, // blocks allocated from heap are this size
size_t StaticBlockSizeT = 1024 // initial block on stack is this size
>
class hetero_stack
{
typedef unsigned char byte_t;
typedef detail::type_vtable const* vtable_ptr;
public:
typedef hetero_stack<AlignmentT,RuntimeTypeCheckT,AssumePodT,DynamicBlockSizeT,StaticBlockSizeT> stack_type;
template< typename T >
struct aligned_sizeof
{
enum
{
// round up sizeof(T) to the nearest multiple of AlignmentT
no_rtti = ( sizeof( T ) + AlignmentT - 1 ) & ~( AlignmentT - 1 ),
with_rtti = RuntimeTypeCheckT ?
no_rtti + aligned_sizeof<vtable_ptr>::no_rtti :
no_rtti
};
};
private:
struct stack_node
{
struct header
{
stack_node * m_back;
stack_node * m_next;
byte_t * m_current; // ptr into m_mem. alloc from here
byte_t * m_end; // ptr to last+1 byte_t in m_mem
};
union
{
header m_head;
byte_t m_align[ aligned_sizeof<header>::no_rtti ];
};
// This is the buffer into which values will be pushed and popped.
// It is guaranteed to meet the AlignmentT requirements because of
// the union above.
byte_t m_mem[1];
size_t size() const // throw()
{
return static_cast<size_t>( m_head.m_end - m_mem );
}
};
enum
{
DYNAMIC_BLOCK_SIZE =
DynamicBlockSizeT > sizeof( stack_node ) ?
DynamicBlockSizeT : sizeof( stack_node )
};
union
{
stack_node m_node;
byte_t m_buf[ aligned_sizeof<stack_node::header>::no_rtti + StaticBlockSizeT ];
} m_first_node;
stack_node * m_current_node;
// Cache these for faster access
byte_t * m_begin;
byte_t * m_current;
byte_t * m_end;
byte_t * grow( size_t size ) // throw(std::bad_alloc)
{
// write the cached value of current into the node.
// OK to do this even if later statements throw.
m_current_node->m_head.m_current = m_current;
// Do we have a node with available memory already?
if( m_current_node->m_head.m_next )
{
// Does this node have enough room?
if( size <= m_current_node->m_head.m_next->size() )
{
m_current_node = m_current_node->m_head.m_next;
m_current = m_current_node->m_head.m_current = m_current_node->m_mem + size;
m_end = m_current_node->m_head.m_end;
return m_begin = m_current_node->m_mem;
}
// Create a new node and insert it into the list
stack_node * new_node = static_cast<stack_node*>(
::operator new( size + offsetof( stack_node, m_mem ) ) );
new_node->m_head.m_back = m_current_node;
new_node->m_head.m_next = m_current_node->m_head.m_next;
m_current = m_end = new_node->m_head.m_current =
new_node->m_head.m_end = new_node->m_mem + size;
m_current_node->m_head.m_next->m_head.m_back = new_node;
m_current_node->m_head.m_next = new_node;
m_current_node = new_node;
return m_begin = m_current_node->m_mem;
}
// We need to create a new node from scratch
size_t new_size = detail::regex_max( size,
static_cast<size_t>(DYNAMIC_BLOCK_SIZE) - offsetof( stack_node, m_mem ) );
stack_node * new_node = static_cast<stack_node*>(
::operator new( new_size + offsetof( stack_node, m_mem ) ) );
new_node->m_head.m_back = m_current_node;
new_node->m_head.m_next = 0;
m_current = new_node->m_head.m_current = new_node->m_mem + size;
m_end = new_node->m_head.m_end = new_node->m_mem + new_size;
m_current_node->m_head.m_next = new_node;
m_current_node = new_node;
return m_begin = m_current_node->m_mem;
}
byte_t * allocate( size_t size ) // throw(std::bad_alloc)
{
// This is the ptr to return
byte_t * mem = m_current;
// Advance the high-water mark
m_current += size;
// Check to see if we have overflowed this buffer
if( std::less<void*>()( m_end, m_current ) ) // if( m_end < m_current )
{
// oops, back this out.
m_current = mem;
// allocate a new block and return a ptr to the new memory
return grow( size );
}
return mem;
}
byte_t * unwind( byte_t * pb ) // throw()
{
// roll back the stack
m_current = pb;
// If we've unwound this whole block, then make the
// previous node the current node
if( m_current == m_begin )
{
// write the cached value of m_current into m_current_node
m_current_node->m_head.m_current = m_current;
m_current_node = m_current_node->m_head.m_back;
// update the cache
m_begin = m_current_node->m_mem;
m_current = m_current_node->m_head.m_current;
m_end = m_current_node->m_head.m_end;
}
return pb;
}
byte_t * unwind( size_t size ) // throw()
{
return unwind( m_current - size );
}
void long_jump_impl( void * jump_ptr, detail::bool2type<true> ) // throw()
{
safe_long_jump( jump_ptr );
}
void long_jump_impl( void * jump_ptr, detail::bool2type<false> ) // throw()
{
unsafe_long_jump( jump_ptr );
}
struct real_unwinder;
friend struct real_unwinder;
struct real_unwinder
{
real_unwinder( stack_type * pstack, size_t size ) // throw()
: m_pstack(pstack), m_size(size) {}
~real_unwinder() // throw()
{
if( m_pstack )
m_pstack->unwind( m_size );
}
void dismiss() // throw()
{
m_pstack = 0;
}
private:
real_unwinder( real_unwinder const & );
real_unwinder & operator=( real_unwinder const & );
stack_type * m_pstack;
size_t m_size;
};
struct dummy_unwinder
{
dummy_unwinder( stack_type *, size_t ) {} // throw()
void dismiss() {} // throw()
};
// Disallow these for now. Might implement them later.
hetero_stack( hetero_stack const & );
hetero_stack & operator=( hetero_stack const & );
public:
class type_error : public std::logic_error
{
std::type_info const * m_prequested_type;
std::type_info const * m_pactual_type;
public:
type_error
(
std::type_info const & requested_type,
std::type_info const & actual_type,
std::string const & s = "type error in hetero_stack"
) // throw()
: std::logic_error( s + " (requested type: " + requested_type.name()
+ ", actual type: " + actual_type.name() + ")" )
, m_prequested_type( &requested_type )
, m_pactual_type( &actual_type )
{
}
std::type_info const & requested_type() const // throw()
{
return *m_prequested_type;
}
std::type_info const & actual_type() const // throw()
{
return *m_pactual_type;
}
};
hetero_stack() // throw()
: m_current_node( &m_first_node.m_node )
{
m_first_node.m_node.m_head.m_back = & m_first_node.m_node;
m_first_node.m_node.m_head.m_next = 0;
m_begin = m_current = m_first_node.m_node.m_head.m_current = m_first_node.m_node.m_mem;
m_end = m_first_node.m_node.m_head.m_end = m_first_node.m_buf + sizeof( m_first_node );
}
~hetero_stack() // throw()
{
// AlignmentT must be a power of two
detail::static_assert< detail::is_power_of_two<AlignmentT>::value > const align_test;
// Call any destructors for objects still on the stack
if( RuntimeTypeCheckT && ! AssumePodT )
{
long_jump( m_first_node.m_node.m_mem );
}
// delete all the memory blocks
m_current_node = m_first_node.m_node.m_head.m_next;
for( stack_node * next_node; m_current_node; m_current_node = next_node )
{
next_node = m_current_node->m_head.m_next;
::operator delete( static_cast<void*>( m_current_node ) );
}
}
template< typename T >
inline void push( T const & t ) // throw(std::bad_alloc,...)
{
// Make sure that the alignment for type T is not worse
// than our declared alignment.
detail::static_assert<( AlignmentT >= detail::alignof<T>::value )> const align_test;
static_cast<void>(align_test);
// If T won't throw in copy c'tor then we don't need to use an unwinder object.
typedef typename detail::select< AssumePodT || detail::has_trivial_copy<T>::value,
dummy_unwinder, real_unwinder >::type unwinder;
// If this throws, it doesn't change state,
// so there is nothing to roll back.
byte_t * pb = allocate( aligned_sizeof<T>::with_rtti );
// Rolls back the allocate if later steps throw
// BUGBUG we can do the alloc, but not update m_current until after
// the copy c'tor to avoid the need for an unwinder object
unwinder guard( this, aligned_sizeof<T>::with_rtti );
new ( pb ) T( t ); // Could throw if ! has_trivial_copy<T>::value
// If we are debugging the stack, then push a pointer to the type_info
// for this type T. It will be checked in pop().
if( RuntimeTypeCheckT )
{
detail::to_type<vtable_ptr>( pb + aligned_sizeof<T>::no_rtti ) = & detail::type_info_ex<T,AlignmentT>::vtable;
}
// ok, everything succeeded -- dismiss the guard
guard.dismiss();
}
template< typename T >
inline void pop( T & t ) // throw(...)
{
detail::static_assert<( AlignmentT >= detail::alignof<T>::value )> const align_test;
static_cast<void>(align_test);
// If we are debugging the stack, then in push() we pushed a pointer
// to the type_info struct for this type T. Check it now.
if( RuntimeTypeCheckT )
{
byte_t * pti = m_current - aligned_sizeof<vtable_ptr>::no_rtti;
if( ! detail::type_info_ex<T,AlignmentT>::equals( detail::to_type<vtable_ptr>( pti ) ) )
throw type_error( typeid( T ), *detail::to_type<vtable_ptr>( pti )->typeinfo_ptr );
}
// Don't change state yet because assignment op could throw!
byte_t * pT = m_current - aligned_sizeof<T>::with_rtti;
t = detail::to_type<T const>( pT ); // could throw
T const & ref = detail::to_type<T const>( pT );
regex::detail::destroy( &ref );
unwind( pT );
}
// Call this version of pop when you don't need the popped value
template< typename T >
inline void pop( REGEX_VC6(detail::type2type<T> COMMA int) ) // throw(type_error,...)
{
detail::static_assert<( AlignmentT >= detail::alignof<T>::value )> const align_test;
static_cast<void>(align_test);
// If we are debugging the stack, then in push() we pushed a pointer
// to the type_info struct for this type T. Check it now.
if( RuntimeTypeCheckT )
{
byte_t * pti = m_current - aligned_sizeof<vtable_ptr>::no_rtti;
if( ! detail::type_info_ex<T,AlignmentT>::equals( detail::to_type<vtable_ptr>( pti ) ) )
throw type_error( typeid( T ), *detail::to_type<vtable_ptr>( pti )->typeinfo_ptr );
}
byte_t * pv = unwind( aligned_sizeof<T>::with_rtti );
T const & ref = detail::to_type<T const>( pv );
regex::detail::destroy( &ref );
}
// Call this version of pop when you don't need the popped value and
// throwing an exception isn't an option
template< typename T >
inline bool pop( std::nothrow_t const & ) // throw()
{
detail::static_assert<( AlignmentT >= detail::alignof<T>::value )> const align_test;
static_cast<void>(align_test);
// If we are debugging the stack, then in push() we pushed a pointer
// to the type_info struct for this type T. Check it now.
if( RuntimeTypeCheckT )
{
byte_t * pti = m_current - aligned_sizeof<vtable_ptr>::no_rtti;
if( ! detail::type_info_ex<T,AlignmentT>::equals( detail::to_type<vtable_ptr>( pti ) ) )
return false; // type error, can't throw so bail.
}
byte_t * pv = unwind( aligned_sizeof<T>::with_rtti );
T const & ref = detail::to_type<T const>( pv );
regex::detail::destroy( &ref );
return true;
}
template< typename T >
inline T & top( REGEX_VC6(detail::type2type<T>) ) const // throw(type_error,...)
{
detail::static_assert<( AlignmentT >= detail::alignof<T>::value )> const align_test;
static_cast<void>(align_test);
if( RuntimeTypeCheckT )
{
// If we are debugging the stack, then the top of the stack is a
// pointer to a type_info struct. Assert that we have the correct type.
byte_t * pti = m_current - aligned_sizeof<vtable_ptr>::no_rtti;
if( ! detail::type_info_ex<T,AlignmentT>::equals( detail::to_type<vtable_ptr>( pti ) ) )
throw type_error( typeid( T ), *detail::to_type<vtable_ptr>( pti )->typeinfo_ptr );
}
byte_t * pT = m_current - aligned_sizeof<T>::with_rtti;
return detail::to_type<T>( pT );
}
// Fetch the type_info for the element at the top of the stack
std::type_info const & top_type() const // throw()
{
detail::static_assert< RuntimeTypeCheckT > const type_check;
static_cast<void>(type_check);
byte_t * pti = m_current - aligned_sizeof<vtable_ptr>::no_rtti;
return *detail::to_type<vtable_ptr>( pti )->typeinfo_ptr;
}
// Get a pointer to the top of the stack
void * set_jump() const // throw()
{
return m_current;
}
// Quick and dirty stack unwind. Does not call destructors.
void unsafe_long_jump( void *const jump_ptr ) // throw()
{
for( ;; )
{
if( std::less<void*>()( jump_ptr, m_current_node->m_mem ) ||
std::less<void*>()( m_current_node->m_head.m_end, jump_ptr ) )
{
m_current_node->m_head.m_current = m_current_node->m_mem;
m_current_node = m_current_node->m_head.m_back;
}
else
{
m_begin = m_current_node->m_mem;
m_current = m_current_node->m_head.m_current = static_cast<byte_t*>( jump_ptr );
m_end = m_current_node->m_head.m_end;
return;
}
}
}
// Safe long jump; does call destructors if RuntimeTypeCheckT is true.
void safe_long_jump( void *const jump_ptr ) // throw()
{
detail::static_assert< RuntimeTypeCheckT > const type_check;
static_cast<void>(type_check);
while( m_current != jump_ptr )
{
// The top of the stack is a pointer to a type_vtable struct.
m_current -= aligned_sizeof<vtable_ptr>::no_rtti;
vtable_ptr pvtable = detail::to_type<vtable_ptr>( m_current );
// find the start of the object
m_current -= pvtable->aligned_size;
// call the destructor for T
if( pvtable->destroy )
{
pvtable->destroy( m_current );
}
// move to the previous buffer if necessary
if( m_current == m_begin && m_current != jump_ptr )
{
m_current_node->m_head.m_current = m_current;
m_current_node = m_current_node->m_head.m_back;
m_begin = m_current_node->m_mem;
m_current = m_current_node->m_head.m_current;
m_end = m_current_node->m_head.m_end;
}
}
}
// Stack unwind. If RuntimeTypeCheckT && !AssumePodT, then destructors
// are called. Otherwise they are not.
void long_jump( void * jump_ptr ) // throw()
{
long_jump_impl( jump_ptr, detail::bool2type<RuntimeTypeCheckT && !AssumePodT>() );
}
struct stack_guard
{
stack_type * m_ps;
void * m_jump_ptr;
explicit stack_guard( stack_type * ps )
: m_ps( ps )
, m_jump_ptr( ps->set_jump() )
{
}
~stack_guard()
{
m_ps->long_jump( m_jump_ptr );
}
};
bool empty() const // throw()
{
return m_current == m_first_node.m_node.m_mem;
}
// Use scoped_push for automatically pushing/popping
// things to and from the stack. This is especially useful
// if you want to push a bunch of things "atomically". For
// instance:
//
// typedef hetero_stack<>::scoped_pop scoped_pop;
// scoped_pop p1 = stack.scoped_push( int(1) ); // could throw
// scoped_pop p2 = stack.scoped_push( std::string("foo") ); // could throw
// stack.push( float(3.14159) ); // could throw
// p2.dismiss(); // ok, nothing threw, so ...
// p1.dismiss(); // ... dismiss the scoped_pops
//
// If p2 and p1 are not dismissed, as in the case when an
// exception gets thrown, then they automatically pop their
// arguments from the stack.
class scoped_pop_base
{
scoped_pop_base & operator=( scoped_pop_base const & ); // disallow assignment
protected:
mutable stack_type * m_pstack;
explicit scoped_pop_base( stack_type * pstack ) // throw(std::bad_alloc,...)
: m_pstack( pstack )
{
}
scoped_pop_base( scoped_pop_base const & right ) // throw() // destructive copy
: m_pstack( right.m_pstack )
{
right.dismiss();
}
public:
void dismiss() const // throw()
{
m_pstack = 0;
}
};
template< typename T >
class scoped_pop_t : public scoped_pop_base
{
scoped_pop_t & operator=( scoped_pop_t const & ); // disallow assignment
public:
scoped_pop_t( stack_type * pstack, T const & t ) // throw(std::bad_alloc,...)
: scoped_pop_base( pstack )
{
// Note that if this throws an exception the destructor
// will not get called, which is what we want.
m_pstack->push( t );
}
~scoped_pop_t() // throw()
{
// If we own this stack space, pop it.
if( m_pstack )
m_pstack->template pop<T>( std::nothrow );
}
};
template< typename T >
scoped_pop_t<T> scoped_push( T const & t ) // throw(...)
{
return scoped_pop_t<T>( this, t );
}
typedef scoped_pop_base const & scoped_pop;
};
#ifdef _MSC_VER
#pragma warning( pop )
#endif
} // namespace regex
#endif