Ugly, not-to-be-pushed sucking in of all of Boost to get windows to work.
[dyninst.git] / external / boost / detail / quick_allocator.hpp
1 #ifndef BOOST_DETAIL_QUICK_ALLOCATOR_HPP_INCLUDED
2 #define BOOST_DETAIL_QUICK_ALLOCATOR_HPP_INCLUDED
3
4 // MS compatible compilers support #pragma once
5
6 #if defined(_MSC_VER) && (_MSC_VER >= 1020)
7 # pragma once
8 #endif
9
10 //
11 //  detail/quick_allocator.hpp
12 //
13 //  Copyright (c) 2003 David Abrahams
14 //  Copyright (c) 2003 Peter Dimov
15 //
16 // Distributed under the Boost Software License, Version 1.0. (See
17 // accompanying file LICENSE_1_0.txt or copy at
18 // http://www.boost.org/LICENSE_1_0.txt)
19 //
20
21 #include <boost/config.hpp>
22
23 #include <boost/detail/lightweight_mutex.hpp>
24 #include <boost/type_traits/type_with_alignment.hpp>
25 #include <boost/type_traits/alignment_of.hpp>
26
27 #include <new>              // ::operator new, ::operator delete
28 #include <cstddef>          // std::size_t
29
30 namespace boost
31 {
32
33 namespace detail
34 {
35
36 template<unsigned size, unsigned align_> union freeblock
37 {
38     typedef typename boost::type_with_alignment<align_>::type aligner_type;
39     aligner_type aligner;
40     char bytes[size];
41     freeblock * next;
42 };
43
44 template<unsigned size, unsigned align_> struct allocator_impl
45 {
46     typedef freeblock<size, align_> block;
47
48     // It may seem odd to use such small pages.
49     //
50     // However, on a typical Windows implementation that uses
51     // the OS allocator, "normal size" pages interact with the
52     // "ordinary" operator new, slowing it down dramatically.
53     //
54     // 512 byte pages are handled by the small object allocator,
55     // and don't interfere with ::new.
56     //
57     // The other alternative is to use much bigger pages (1M.)
58     //
59     // It is surprisingly easy to hit pathological behavior by
60     // varying the page size. g++ 2.96 on Red Hat Linux 7.2,
61     // for example, passionately dislikes 496. 512 seems OK.
62
63 #if defined(BOOST_QA_PAGE_SIZE)
64
65     enum { items_per_page = BOOST_QA_PAGE_SIZE / size };
66
67 #else
68
69     enum { items_per_page = 512 / size }; // 1048560 / size
70
71 #endif
72
73 #ifdef BOOST_HAS_THREADS
74     static lightweight_mutex mutex;
75 #endif
76
77     static block * free;
78     static block * page;
79     static unsigned last;
80
81     static inline void * alloc()
82     {
83 #ifdef BOOST_HAS_THREADS
84         lightweight_mutex::scoped_lock lock(mutex);
85 #endif
86         if(block * x = free)
87         {
88             free = x->next;
89             return x;
90         }
91         else
92         {
93             if(last == items_per_page)
94             {
95                 // "Listen to me carefully: there is no memory leak"
96                 // -- Scott Meyers, Eff C++ 2nd Ed Item 10
97                 page = ::new block[items_per_page];
98                 last = 0;
99             }
100
101             return &page[last++];
102         }
103     }
104
105     static inline void * alloc(std::size_t n)
106     {
107         if(n != size) // class-specific new called for a derived object
108         {
109             return ::operator new(n);
110         }
111         else
112         {
113 #ifdef BOOST_HAS_THREADS
114             lightweight_mutex::scoped_lock lock(mutex);
115 #endif
116             if(block * x = free)
117             {
118                 free = x->next;
119                 return x;
120             }
121             else
122             {
123                 if(last == items_per_page)
124                 {
125                     page = ::new block[items_per_page];
126                     last = 0;
127                 }
128
129                 return &page[last++];
130             }
131         }
132     }
133
134     static inline void dealloc(void * pv)
135     {
136         if(pv != 0) // 18.4.1.1/13
137         {
138 #ifdef BOOST_HAS_THREADS
139             lightweight_mutex::scoped_lock lock(mutex);
140 #endif
141             block * pb = static_cast<block *>(pv);
142             pb->next = free;
143             free = pb;
144         }
145     }
146
147     static inline void dealloc(void * pv, std::size_t n)
148     {
149         if(n != size) // class-specific delete called for a derived object
150         {
151             ::operator delete(pv);
152         }
153         else if(pv != 0) // 18.4.1.1/13
154         {
155 #ifdef BOOST_HAS_THREADS
156             lightweight_mutex::scoped_lock lock(mutex);
157 #endif
158             block * pb = static_cast<block *>(pv);
159             pb->next = free;
160             free = pb;
161         }
162     }
163 };
164
165 #ifdef BOOST_HAS_THREADS
166 template<unsigned size, unsigned align_>
167   lightweight_mutex allocator_impl<size, align_>::mutex;
168 #endif
169
170 template<unsigned size, unsigned align_>
171   freeblock<size, align_> * allocator_impl<size, align_>::free = 0;
172
173 template<unsigned size, unsigned align_>
174   freeblock<size, align_> * allocator_impl<size, align_>::page = 0;
175
176 template<unsigned size, unsigned align_>
177   unsigned allocator_impl<size, align_>::last = allocator_impl<size, align_>::items_per_page;
178
179 template<class T>
180 struct quick_allocator: public allocator_impl< sizeof(T), boost::alignment_of<T>::value >
181 {
182 };
183
184 } // namespace detail
185
186 } // namespace boost
187
188 #endif  // #ifndef BOOST_DETAIL_QUICK_ALLOCATOR_HPP_INCLUDED