forked from bottlerocket-os/bottlerocket
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path0007-malloc-Enable-merging-of-remainders-in-memalign-bug-.patch
300 lines (279 loc) · 10.8 KB
/
0007-malloc-Enable-merging-of-remainders-in-memalign-bug-.patch
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
From 98c293c61f770b6b7a22f89a6ea81b711ecb1952 Mon Sep 17 00:00:00 2001
From: Florian Weimer <fweimer@redhat.com>
Date: Fri, 11 Aug 2023 11:18:17 +0200
Subject: [PATCH] malloc: Enable merging of remainders in memalign (bug 30723)
Previously, calling _int_free from _int_memalign could put remainders
into the tcache or into fastbins, where they are invisible to the
low-level allocator. This results in missed merge opportunities
because once these freed chunks become available to the low-level
allocator, further memalign allocations (even of the same size are)
likely obstructing merges.
Furthermore, during forwards merging in _int_memalign, do not
completely give up when the remainder is too small to serve as a
chunk on its own. We can still give it back if it can be merged
with the following unused chunk. This makes it more likely that
memalign calls in a loop achieve a compact memory layout,
independently of initial heap layout.
Drop some useless (unsigned long) casts along the way, and tweak
the style to more closely match GNU on changed lines.
Reviewed-by: DJ Delorie <dj@redhat.com>
(cherry picked from commit 542b1105852568c3ebc712225ae78b8c8ba31a78)
---
malloc/malloc.c | 197 +++++++++++++++++++++++++++++-------------------
1 file changed, 121 insertions(+), 76 deletions(-)
diff --git a/malloc/malloc.c b/malloc/malloc.c
index e2f1a615a4..948f9759af 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -1086,6 +1086,11 @@ typedef struct malloc_chunk* mchunkptr;
static void* _int_malloc(mstate, size_t);
static void _int_free(mstate, mchunkptr, int);
+static void _int_free_merge_chunk (mstate, mchunkptr, INTERNAL_SIZE_T);
+static INTERNAL_SIZE_T _int_free_create_chunk (mstate,
+ mchunkptr, INTERNAL_SIZE_T,
+ mchunkptr, INTERNAL_SIZE_T);
+static void _int_free_maybe_consolidate (mstate, INTERNAL_SIZE_T);
static void* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
INTERNAL_SIZE_T);
static void* _int_memalign(mstate, size_t, size_t);
@@ -4637,31 +4642,52 @@ _int_free (mstate av, mchunkptr p, int have_lock)
if (!have_lock)
__libc_lock_lock (av->mutex);
- nextchunk = chunk_at_offset(p, size);
-
- /* Lightweight tests: check whether the block is already the
- top block. */
- if (__glibc_unlikely (p == av->top))
- malloc_printerr ("double free or corruption (top)");
- /* Or whether the next chunk is beyond the boundaries of the arena. */
- if (__builtin_expect (contiguous (av)
- && (char *) nextchunk
- >= ((char *) av->top + chunksize(av->top)), 0))
- malloc_printerr ("double free or corruption (out)");
- /* Or whether the block is actually not marked used. */
- if (__glibc_unlikely (!prev_inuse(nextchunk)))
- malloc_printerr ("double free or corruption (!prev)");
-
- nextsize = chunksize(nextchunk);
- if (__builtin_expect (chunksize_nomask (nextchunk) <= CHUNK_HDR_SZ, 0)
- || __builtin_expect (nextsize >= av->system_mem, 0))
- malloc_printerr ("free(): invalid next size (normal)");
+ _int_free_merge_chunk (av, p, size);
- free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);
+ if (!have_lock)
+ __libc_lock_unlock (av->mutex);
+ }
+ /*
+ If the chunk was allocated via mmap, release via munmap().
+ */
+
+ else {
+ munmap_chunk (p);
+ }
+}
+
+/* Try to merge chunk P of SIZE bytes with its neighbors. Put the
+ resulting chunk on the appropriate bin list. P must not be on a
+ bin list yet, and it can be in use. */
+static void
+_int_free_merge_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size)
+{
+ mchunkptr nextchunk = chunk_at_offset(p, size);
+
+ /* Lightweight tests: check whether the block is already the
+ top block. */
+ if (__glibc_unlikely (p == av->top))
+ malloc_printerr ("double free or corruption (top)");
+ /* Or whether the next chunk is beyond the boundaries of the arena. */
+ if (__builtin_expect (contiguous (av)
+ && (char *) nextchunk
+ >= ((char *) av->top + chunksize(av->top)), 0))
+ malloc_printerr ("double free or corruption (out)");
+ /* Or whether the block is actually not marked used. */
+ if (__glibc_unlikely (!prev_inuse(nextchunk)))
+ malloc_printerr ("double free or corruption (!prev)");
+
+ INTERNAL_SIZE_T nextsize = chunksize(nextchunk);
+ if (__builtin_expect (chunksize_nomask (nextchunk) <= CHUNK_HDR_SZ, 0)
+ || __builtin_expect (nextsize >= av->system_mem, 0))
+ malloc_printerr ("free(): invalid next size (normal)");
+
+ free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);
- /* consolidate backward */
- if (!prev_inuse(p)) {
- prevsize = prev_size (p);
+ /* Consolidate backward. */
+ if (!prev_inuse(p))
+ {
+ INTERNAL_SIZE_T prevsize = prev_size (p);
size += prevsize;
p = chunk_at_offset(p, -((long) prevsize));
if (__glibc_unlikely (chunksize(p) != prevsize))
@@ -4669,9 +4695,25 @@ _int_free (mstate av, mchunkptr p, int have_lock)
unlink_chunk (av, p);
}
- if (nextchunk != av->top) {
+ /* Write the chunk header, maybe after merging with the following chunk. */
+ size = _int_free_create_chunk (av, p, size, nextchunk, nextsize);
+ _int_free_maybe_consolidate (av, size);
+}
+
+/* Create a chunk at P of SIZE bytes, with SIZE potentially increased
+ to cover the immediately following chunk NEXTCHUNK of NEXTSIZE
+ bytes (if NEXTCHUNK is unused). The chunk at P is not actually
+ read and does not have to be initialized. After creation, it is
+ placed on the appropriate bin list. The function returns the size
+ of the new chunk. */
+static INTERNAL_SIZE_T
+_int_free_create_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size,
+ mchunkptr nextchunk, INTERNAL_SIZE_T nextsize)
+{
+ if (nextchunk != av->top)
+ {
/* get and clear inuse bit */
- nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
+ bool nextinuse = inuse_bit_at_offset (nextchunk, nextsize);
/* consolidate forward */
if (!nextinuse) {
@@ -4686,8 +4728,8 @@ _int_free (mstate av, mchunkptr p, int have_lock)
been given one chance to be used in malloc.
*/
- bck = unsorted_chunks(av);
- fwd = bck->fd;
+ mchunkptr bck = unsorted_chunks (av);
+ mchunkptr fwd = bck->fd;
if (__glibc_unlikely (fwd->bk != bck))
malloc_printerr ("free(): corrupted unsorted chunks");
p->fd = fwd;
@@ -4706,61 +4748,52 @@ _int_free (mstate av, mchunkptr p, int have_lock)
check_free_chunk(av, p);
}
- /*
- If the chunk borders the current high end of memory,
- consolidate into top
- */
-
- else {
+ else
+ {
+ /* If the chunk borders the current high end of memory,
+ consolidate into top. */
size += nextsize;
set_head(p, size | PREV_INUSE);
av->top = p;
check_chunk(av, p);
}
- /*
- If freeing a large space, consolidate possibly-surrounding
- chunks. Then, if the total unused topmost memory exceeds trim
- threshold, ask malloc_trim to reduce top.
-
- Unless max_fast is 0, we don't know if there are fastbins
- bordering top, so we cannot tell for sure whether threshold
- has been reached unless fastbins are consolidated. But we
- don't want to consolidate on each free. As a compromise,
- consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
- is reached.
- */
+ return size;
+}
- if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
+/* If freeing a large space, consolidate possibly-surrounding
+ chunks. Then, if the total unused topmost memory exceeds trim
+ threshold, ask malloc_trim to reduce top. */
+static void
+_int_free_maybe_consolidate (mstate av, INTERNAL_SIZE_T size)
+{
+ /* Unless max_fast is 0, we don't know if there are fastbins
+ bordering top, so we cannot tell for sure whether threshold has
+ been reached unless fastbins are consolidated. But we don't want
+ to consolidate on each free. As a compromise, consolidation is
+ performed if FASTBIN_CONSOLIDATION_THRESHOLD is reached. */
+ if (size >= FASTBIN_CONSOLIDATION_THRESHOLD)
+ {
if (atomic_load_relaxed (&av->have_fastchunks))
malloc_consolidate(av);
- if (av == &main_arena) {
+ if (av == &main_arena)
+ {
#ifndef MORECORE_CANNOT_TRIM
- if ((unsigned long)(chunksize(av->top)) >=
- (unsigned long)(mp_.trim_threshold))
- systrim(mp_.top_pad, av);
+ if (chunksize (av->top) >= mp_.trim_threshold)
+ systrim (mp_.top_pad, av);
#endif
- } else {
- /* Always try heap_trim(), even if the top chunk is not
- large, because the corresponding heap might go away. */
- heap_info *heap = heap_for_ptr(top(av));
+ }
+ else
+ {
+ /* Always try heap_trim, even if the top chunk is not large,
+ because the corresponding heap might go away. */
+ heap_info *heap = heap_for_ptr (top (av));
- assert(heap->ar_ptr == av);
- heap_trim(heap, mp_.top_pad);
- }
+ assert (heap->ar_ptr == av);
+ heap_trim (heap, mp_.top_pad);
+ }
}
-
- if (!have_lock)
- __libc_lock_unlock (av->mutex);
- }
- /*
- If the chunk was allocated via mmap, release via munmap().
- */
-
- else {
- munmap_chunk (p);
- }
}
/*
@@ -5221,7 +5254,7 @@ _int_memalign (mstate av, size_t alignment, size_t bytes)
(av != &main_arena ? NON_MAIN_ARENA : 0));
set_inuse_bit_at_offset (newp, newsize);
set_head_size (p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
- _int_free (av, p, 1);
+ _int_free_merge_chunk (av, p, leadsize);
p = newp;
assert (newsize >= nb &&
@@ -5232,15 +5265,27 @@ _int_memalign (mstate av, size_t alignment, size_t bytes)
if (!chunk_is_mmapped (p))
{
size = chunksize (p);
- if ((unsigned long) (size) > (unsigned long) (nb + MINSIZE))
+ mchunkptr nextchunk = chunk_at_offset(p, size);
+ INTERNAL_SIZE_T nextsize = chunksize(nextchunk);
+ if (size > nb)
{
remainder_size = size - nb;
- remainder = chunk_at_offset (p, nb);
- set_head (remainder, remainder_size | PREV_INUSE |
- (av != &main_arena ? NON_MAIN_ARENA : 0));
- set_head_size (p, nb);
- _int_free (av, remainder, 1);
- }
+ if (remainder_size >= MINSIZE
+ || nextchunk == av->top
+ || !inuse_bit_at_offset (nextchunk, nextsize))
+ {
+ /* We can only give back the tail if it is larger than
+ MINSIZE, or if the following chunk is unused (top
+ chunk or unused in-heap chunk). Otherwise we would
+ create a chunk that is smaller than MINSIZE. */
+ remainder = chunk_at_offset (p, nb);
+ set_head_size (p, nb);
+ remainder_size = _int_free_create_chunk (av, remainder,
+ remainder_size,
+ nextchunk, nextsize);
+ _int_free_maybe_consolidate (av, remainder_size);
+ }
+ }
}
check_inuse_chunk (av, p);
--
2.40.1