-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathinternal.h
183 lines (148 loc) · 5.36 KB
/
internal.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
/******************************************************************************
* internal.h
*
* Internal "global state" shared between garbage collector and friends.
*
* Copyright (c) 2001-2003, K A Fraser
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution. Neither the name of the Keir Fraser
* nor the names of its contributors may be used to endorse or
* promote products derived from this software without specific
* prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*#define MINIMAL_GC*/
/*#define YIELD_TO_HELP_PROGRESS*/
#define PROFILE_GC
/* Recycled nodes are filled with this value if WEAK_MEM_ORDER. */
#define INVALID_BYTE 0
#define INITIALISE_NODES(_p,_c) memset((_p), INVALID_BYTE, (_c));
/* Number of unique block sizes we can deal with. Equivalently, the
* number of unique object caches which can be created. */
#define MAX_SIZES 60
#define MAX_HOOKS 4
/*
* The initial number of allocation chunks for each per-blocksize list.
* Popular allocation lists will steadily increase the allocation unit
* in line with demand.
*/
#define ALLOC_CHUNKS_PER_LIST 10
/*
* How many times should a thread call gc_enter(), seeing the same epoch
* each time, before it makes a reclaim attempt?
*/
#define ENTRIES_PER_RECLAIM_ATTEMPT 100
/*
* 0: current epoch -- threads are moving to this;
* -1: some threads may still throw garbage into this epoch;
* -2: no threads can see this epoch => we can zero garbage lists;
* -3: all threads see zeros in these garbage lists => move to alloc lists.
*/
#ifdef WEAK_MEM_ORDER
#define NR_EPOCHS 4
#else
#define NR_EPOCHS 3
#endif
/*
* A chunk amortises the cost of allocation from shared lists. It also
* helps when zeroing nodes, as it increases per-cacheline pointer density
* and means that node locations don't need to be brought into the cache
* (most architectures have a non-temporal store instruction).
*/
#define BLKS_PER_CHUNK 100
typedef struct chunk_st chunk_t;
struct chunk_st
{
chunk_t *next; /* chunk chaining */
unsigned int i; /* the next entry in blk[] to use */
void *blk[BLKS_PER_CHUNK];
};
struct gc_global_st
{
CACHE_PAD(0);
/* The current epoch. */
VOLATILE unsigned int current;
CACHE_PAD(1);
/* Exclusive access to gc_reclaim(). */
VOLATILE unsigned int inreclaim;
CACHE_PAD(2);
/* Allocator caches currently defined */
long n_allocators;
/*
* RUN-TIME CONSTANTS (to first approximation)
*/
/* Memory page size, in bytes. */
unsigned int page_size;
/* Node sizes (run-time constants). */
int nr_sizes;
int blk_sizes[MAX_SIZES];
/* tags (trace support) */
const char *tags[MAX_SIZES];
/* Registered epoch hooks. */
int nr_hooks;
hook_fn_t hook_fns[MAX_HOOKS];
CACHE_PAD(3);
/*
* DATA WE MAY HIT HARD
*/
/* Chain of free, empty chunks. */
chunk_t * VOLATILE free_chunks;
/* Main allocation lists. */
chunk_t * VOLATILE alloc[MAX_SIZES];
VOLATILE unsigned int alloc_size[MAX_SIZES];
pthread_key_t ptst_key;
ptst_t *ptst_list;
#ifdef NEED_ID
static unsigned int next_id;
#endif
#ifdef PROFILE_GC
VOLATILE unsigned int total_size;
VOLATILE unsigned int allocations;
#endif
/* skiplist specifics. need better way to store per-global stuff. */
int *gc_id;
};
/* internal interator for ptst_list */
#define _ptst_first(gc_global) (gc_global->ptst_list)
/* Per-thread state. */
struct gc_st
{
/* Epoch that this thread sees. */
unsigned int epoch;
gc_global_t *global;
/* Number of calls to gc_entry() since last gc_reclaim() attempt. */
unsigned int entries_since_reclaim;
#ifdef YIELD_TO_HELP_PROGRESS
/* Number of calls to gc_reclaim() since we last yielded. */
unsigned int reclaim_attempts_since_yield;
#endif
/* Used by gc_async_barrier(). */
void *async_page;
int async_page_state;
/* Garbage lists. */
chunk_t *garbage[NR_EPOCHS][MAX_SIZES];
chunk_t *garbage_tail[NR_EPOCHS][MAX_SIZES];
chunk_t *chunk_cache;
/* Local allocation lists. */
chunk_t *alloc[MAX_SIZES];
unsigned int alloc_chunks[MAX_SIZES];
/* Hook pointer lists. */
chunk_t *hook[NR_EPOCHS][MAX_HOOKS];
};