1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
|
/*-------------------------------------------------------------------------
*
* generation.c
* Generational allocator definitions.
*
* Generation is a custom MemoryContext implementation designed for cases of
* chunks with similar lifespan.
*
* Portions Copyright (c) 2017-2023, PostgreSQL Global Development Group
*
* IDENTIFICATION
* src/backend/utils/mmgr/generation.c
*
*
* This memory context is based on the assumption that the chunks are freed
* roughly in the same order as they were allocated (FIFO), or in groups with
* similar lifespan (generations - hence the name of the context). This is
* typical for various queue-like use cases, i.e. when tuples are constructed,
* processed and then thrown away.
*
* The memory context uses a very simple approach to free space management.
* Instead of a complex global freelist, each block tracks a number
* of allocated and freed chunks. The block is classed as empty when the
* number of free chunks is equal to the number of allocated chunks. When
* this occurs, instead of freeing the block, we try to "recycle" it, i.e.
* reuse it for new allocations. This is done by setting the block in the
* context's 'freeblock' field. If the freeblock field is already occupied
* by another free block we simply return the newly empty block to malloc.
*
* This approach to free blocks requires fewer malloc/free calls for truly
* first allocated, first free'd allocation patterns.
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "lib/ilist.h"
#include "port/pg_bitutils.h"
#include "utils/memdebug.h"
#include "utils/memutils.h"
#include "utils/memutils_memorychunk.h"
#include "utils/memutils_internal.h"
#define Generation_BLOCKHDRSZ MAXALIGN(sizeof(GenerationBlock))
#define Generation_CHUNKHDRSZ sizeof(MemoryChunk)
#define Generation_CHUNK_FRACTION 8
typedef struct GenerationBlock GenerationBlock; /* forward reference */
typedef void *GenerationPointer;
/*
* GenerationContext is a simple memory context not reusing allocated chunks,
* and freeing blocks once all chunks are freed.
*/
typedef struct GenerationContext
{
MemoryContextData header; /* Standard memory-context fields */
/* Generational context parameters */
Size initBlockSize; /* initial block size */
Size maxBlockSize; /* maximum block size */
Size nextBlockSize; /* next block size to allocate */
Size allocChunkLimit; /* effective chunk size limit */
GenerationBlock *block; /* current (most recently allocated) block, or
* NULL if we've just freed the most recent
* block */
GenerationBlock *freeblock; /* pointer to a block that's being recycled,
* or NULL if there's no such block. */
GenerationBlock *keeper; /* keep this block over resets */
dlist_head blocks; /* list of blocks */
} GenerationContext;
/*
* GenerationBlock
* GenerationBlock is the unit of memory that is obtained by generation.c
* from malloc(). It contains zero or more MemoryChunks, which are the
* units requested by palloc() and freed by pfree(). MemoryChunks cannot
* be returned to malloc() individually, instead pfree() updates the free
* counter of the block and when all chunks in a block are free the whole
* block can be returned to malloc().
*
* GenerationBlock is the header data for a block --- the usable space
* within the block begins at the next alignment boundary.
*/
struct GenerationBlock
{
dlist_node node; /* doubly-linked list of blocks */
GenerationContext *context; /* pointer back to the owning context */
Size blksize; /* allocated size of this block */
int nchunks; /* number of chunks in the block */
int nfree; /* number of free chunks */
char *freeptr; /* start of free space in this block */
char *endptr; /* end of space in this block */
};
/*
* GenerationIsValid
* True iff set is valid generation set.
*/
#define GenerationIsValid(set) \
(PointerIsValid(set) && IsA(set, GenerationContext))
/*
* GenerationBlockIsValid
* True iff block is valid block of generation set.
*/
#define GenerationBlockIsValid(block) \
(PointerIsValid(block) && GenerationIsValid((block)->context))
/*
* We always store external chunks on a dedicated block. This makes fetching
* the block from an external chunk easy since it's always the first and only
* chunk on the block.
*/
#define ExternalChunkGetBlock(chunk) \
(GenerationBlock *) ((char *) chunk - Generation_BLOCKHDRSZ)
/* Inlined helper functions */
static inline void GenerationBlockInit(GenerationContext *context,
GenerationBlock *block,
Size blksize);
static inline bool GenerationBlockIsEmpty(GenerationBlock *block);
static inline void GenerationBlockMarkEmpty(GenerationBlock *block);
static inline Size GenerationBlockFreeBytes(GenerationBlock *block);
static inline void GenerationBlockFree(GenerationContext *set,
GenerationBlock *block);
/*
* Public routines
*/
/*
* GenerationContextCreate
* Create a new Generation context.
*
* parent: parent context, or NULL if top-level context
* name: name of context (must be statically allocated)
* minContextSize: minimum context size
* initBlockSize: initial allocation block size
* maxBlockSize: maximum allocation block size
*/
MemoryContext
GenerationContextCreate(MemoryContext parent,
const char *name,
Size minContextSize,
Size initBlockSize,
Size maxBlockSize)
{
Size firstBlockSize;
Size allocSize;
GenerationContext *set;
GenerationBlock *block;
/* ensure MemoryChunk's size is properly maxaligned */
StaticAssertDecl(Generation_CHUNKHDRSZ == MAXALIGN(Generation_CHUNKHDRSZ),
"sizeof(MemoryChunk) is not maxaligned");
/*
* First, validate allocation parameters. Asserts seem sufficient because
* nobody varies their parameters at runtime. We somewhat arbitrarily
* enforce a minimum 1K block size. We restrict the maximum block size to
* MEMORYCHUNK_MAX_BLOCKOFFSET as MemoryChunks are limited to this in
* regards to addressing the offset between the chunk and the block that
* the chunk is stored on. We would be unable to store the offset between
* the chunk and block for any chunks that were beyond
* MEMORYCHUNK_MAX_BLOCKOFFSET bytes into the block if the block was to be
* larger than this.
*/
Assert(initBlockSize == MAXALIGN(initBlockSize) &&
initBlockSize >= 1024);
Assert(maxBlockSize == MAXALIGN(maxBlockSize) &&
maxBlockSize >= initBlockSize &&
AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
Assert(minContextSize == 0 ||
(minContextSize == MAXALIGN(minContextSize) &&
minContextSize >= 1024 &&
minContextSize <= maxBlockSize));
Assert(maxBlockSize <= MEMORYCHUNK_MAX_BLOCKOFFSET);
/* Determine size of initial block */
allocSize = MAXALIGN(sizeof(GenerationContext)) +
Generation_BLOCKHDRSZ + Generation_CHUNKHDRSZ;
if (minContextSize != 0)
allocSize = Max(allocSize, minContextSize);
else
allocSize = Max(allocSize, initBlockSize);
/*
* Allocate the initial block. Unlike other generation.c blocks, it
* starts with the context header and its block header follows that.
*/
set = (GenerationContext *) malloc(allocSize);
if (set == NULL)
{
MemoryContextStats(TopMemoryContext);
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory"),
errdetail("Failed while creating memory context \"%s\".",
name)));
}
/*
* Avoid writing code that can fail between here and MemoryContextCreate;
* we'd leak the header if we ereport in this stretch.
*/
dlist_init(&set->blocks);
/* Fill in the initial block's block header */
block = (GenerationBlock *) (((char *) set) + MAXALIGN(sizeof(GenerationContext)));
/* determine the block size and initialize it */
firstBlockSize = allocSize - MAXALIGN(sizeof(GenerationContext));
GenerationBlockInit(set, block, firstBlockSize);
/* add it to the doubly-linked list of blocks */
dlist_push_head(&set->blocks, &block->node);
/* use it as the current allocation block */
set->block = block;
/* No free block, yet */
set->freeblock = NULL;
/* Mark block as not to be released at reset time */
set->keeper = block;
/* Fill in GenerationContext-specific header fields */
set->initBlockSize = initBlockSize;
set->maxBlockSize = maxBlockSize;
set->nextBlockSize = initBlockSize;
/*
* Compute the allocation chunk size limit for this context.
*
* Limit the maximum size a non-dedicated chunk can be so that we can fit
* at least Generation_CHUNK_FRACTION of chunks this big onto the maximum
* sized block. We must further limit this value so that it's no more
* than MEMORYCHUNK_MAX_VALUE. We're unable to have non-external chunks
* larger than that value as we store the chunk size in the MemoryChunk
* 'value' field in the call to MemoryChunkSetHdrMask().
*/
set->allocChunkLimit = Min(maxBlockSize, MEMORYCHUNK_MAX_VALUE);
while ((Size) (set->allocChunkLimit + Generation_CHUNKHDRSZ) >
(Size) ((Size) (maxBlockSize - Generation_BLOCKHDRSZ) / Generation_CHUNK_FRACTION))
set->allocChunkLimit >>= 1;
/* Finally, do the type-independent part of context creation */
MemoryContextCreate((MemoryContext) set,
T_GenerationContext,
MCTX_GENERATION_ID,
parent,
name);
((MemoryContext) set)->mem_allocated = firstBlockSize;
return (MemoryContext) set;
}
/*
* GenerationReset
* Frees all memory which is allocated in the given set.
*
* The code simply frees all the blocks in the context - we don't keep any
* keeper blocks or anything like that.
*/
void
GenerationReset(MemoryContext context)
{
GenerationContext *set = (GenerationContext *) context;
dlist_mutable_iter miter;
Assert(GenerationIsValid(set));
#ifdef MEMORY_CONTEXT_CHECKING
/* Check for corruption and leaks before freeing */
GenerationCheck(context);
#endif
/*
* NULLify the free block pointer. We must do this before calling
* GenerationBlockFree as that function never expects to free the
* freeblock.
*/
set->freeblock = NULL;
dlist_foreach_modify(miter, &set->blocks)
{
GenerationBlock *block = dlist_container(GenerationBlock, node, miter.cur);
if (block == set->keeper)
GenerationBlockMarkEmpty(block);
else
GenerationBlockFree(set, block);
}
/* set it so new allocations to make use of the keeper block */
set->block = set->keeper;
/* Reset block size allocation sequence, too */
set->nextBlockSize = set->initBlockSize;
/* Ensure there is only 1 item in the dlist */
Assert(!dlist_is_empty(&set->blocks));
Assert(!dlist_has_next(&set->blocks, dlist_head_node(&set->blocks)));
}
/*
* GenerationDelete
* Free all memory which is allocated in the given context.
*/
void
GenerationDelete(MemoryContext context)
{
/* Reset to release all releasable GenerationBlocks */
GenerationReset(context);
/* And free the context header and keeper block */
free(context);
}
/*
* GenerationAlloc
* Returns pointer to allocated memory of given size or NULL if
* request could not be completed; memory is added to the set.
*
* No request may exceed:
* MAXALIGN_DOWN(SIZE_MAX) - Generation_BLOCKHDRSZ - Generation_CHUNKHDRSZ
* All callers use a much-lower limit.
*
* Note: when using valgrind, it doesn't matter how the returned allocation
* is marked, as mcxt.c will set it to UNDEFINED. In some paths we will
* return space that is marked NOACCESS - GenerationRealloc has to beware!
*/
void *
GenerationAlloc(MemoryContext context, Size size)
{
GenerationContext *set = (GenerationContext *) context;
GenerationBlock *block;
MemoryChunk *chunk;
Size chunk_size;
Size required_size;
Assert(GenerationIsValid(set));
#ifdef MEMORY_CONTEXT_CHECKING
/* ensure there's always space for the sentinel byte */
chunk_size = MAXALIGN(size + 1);
#else
chunk_size = MAXALIGN(size);
#endif
required_size = chunk_size + Generation_CHUNKHDRSZ;
/* is it an over-sized chunk? if yes, allocate special block */
if (chunk_size > set->allocChunkLimit)
{
Size blksize = required_size + Generation_BLOCKHDRSZ;
block = (GenerationBlock *) malloc(blksize);
if (block == NULL)
return NULL;
context->mem_allocated += blksize;
/* block with a single (used) chunk */
block->context = set;
block->blksize = blksize;
block->nchunks = 1;
block->nfree = 0;
/* the block is completely full */
block->freeptr = block->endptr = ((char *) block) + blksize;
chunk = (MemoryChunk *) (((char *) block) + Generation_BLOCKHDRSZ);
/* mark the MemoryChunk as externally managed */
MemoryChunkSetHdrMaskExternal(chunk, MCTX_GENERATION_ID);
#ifdef MEMORY_CONTEXT_CHECKING
chunk->requested_size = size;
/* set mark to catch clobber of "unused" space */
Assert(size < chunk_size);
set_sentinel(MemoryChunkGetPointer(chunk), size);
#endif
#ifdef RANDOMIZE_ALLOCATED_MEMORY
/* fill the allocated space with junk */
randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
#endif
/* add the block to the list of allocated blocks */
dlist_push_head(&set->blocks, &block->node);
/* Ensure any padding bytes are marked NOACCESS. */
VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
chunk_size - size);
/* Disallow access to the chunk header. */
VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ);
return MemoryChunkGetPointer(chunk);
}
/*
* Not an oversized chunk. We try to first make use of the current block,
* but if there's not enough space in it, instead of allocating a new
* block, we look to see if the freeblock is empty and has enough space.
* If not, we'll also try the same using the keeper block. The keeper
* block may have become empty and we have no other way to reuse it again
* if we don't try to use it explicitly here.
*
* We don't want to start filling the freeblock before the current block
* is full, otherwise we may cause fragmentation in FIFO type workloads.
* We only switch to using the freeblock or keeper block if those blocks
* are completely empty. If we didn't do that we could end up fragmenting
* consecutive allocations over multiple blocks which would be a problem
* that would compound over time.
*/
block = set->block;
if (block == NULL ||
GenerationBlockFreeBytes(block) < required_size)
{
Size blksize;
GenerationBlock *freeblock = set->freeblock;
if (freeblock != NULL &&
GenerationBlockIsEmpty(freeblock) &&
GenerationBlockFreeBytes(freeblock) >= required_size)
{
block = freeblock;
/*
* Zero out the freeblock as we'll set this to the current block
* below
*/
set->freeblock = NULL;
}
else if (GenerationBlockIsEmpty(set->keeper) &&
GenerationBlockFreeBytes(set->keeper) >= required_size)
{
block = set->keeper;
}
else
{
/*
* The first such block has size initBlockSize, and we double the
* space in each succeeding block, but not more than maxBlockSize.
*/
blksize = set->nextBlockSize;
set->nextBlockSize <<= 1;
if (set->nextBlockSize > set->maxBlockSize)
set->nextBlockSize = set->maxBlockSize;
/* we'll need a block hdr too, so add that to the required size */
required_size += Generation_BLOCKHDRSZ;
/* round the size up to the next power of 2 */
if (blksize < required_size)
blksize = pg_nextpower2_size_t(required_size);
block = (GenerationBlock *) malloc(blksize);
if (block == NULL)
return NULL;
context->mem_allocated += blksize;
/* initialize the new block */
GenerationBlockInit(set, block, blksize);
/* add it to the doubly-linked list of blocks */
dlist_push_head(&set->blocks, &block->node);
/* Zero out the freeblock in case it's become full */
set->freeblock = NULL;
}
/* and also use it as the current allocation block */
set->block = block;
}
/* we're supposed to have a block with enough free space now */
Assert(block != NULL);
Assert((block->endptr - block->freeptr) >= Generation_CHUNKHDRSZ + chunk_size);
chunk = (MemoryChunk *) block->freeptr;
/* Prepare to initialize the chunk header. */
VALGRIND_MAKE_MEM_UNDEFINED(chunk, Generation_CHUNKHDRSZ);
block->nchunks += 1;
block->freeptr += (Generation_CHUNKHDRSZ + chunk_size);
Assert(block->freeptr <= block->endptr);
MemoryChunkSetHdrMask(chunk, block, chunk_size, MCTX_GENERATION_ID);
#ifdef MEMORY_CONTEXT_CHECKING
chunk->requested_size = size;
/* set mark to catch clobber of "unused" space */
Assert(size < chunk_size);
set_sentinel(MemoryChunkGetPointer(chunk), size);
#endif
#ifdef RANDOMIZE_ALLOCATED_MEMORY
/* fill the allocated space with junk */
randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
#endif
/* Ensure any padding bytes are marked NOACCESS. */
VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
chunk_size - size);
/* Disallow access to the chunk header. */
VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ);
return MemoryChunkGetPointer(chunk);
}
/*
* GenerationBlockInit
* Initializes 'block' assuming 'blksize'. Does not update the context's
* mem_allocated field.
*/
static inline void
GenerationBlockInit(GenerationContext *context, GenerationBlock *block,
Size blksize)
{
block->context = context;
block->blksize = blksize;
block->nchunks = 0;
block->nfree = 0;
block->freeptr = ((char *) block) + Generation_BLOCKHDRSZ;
block->endptr = ((char *) block) + blksize;
/* Mark unallocated space NOACCESS. */
VALGRIND_MAKE_MEM_NOACCESS(block->freeptr,
blksize - Generation_BLOCKHDRSZ);
}
/*
* GenerationBlockIsEmpty
* Returns true iff 'block' contains no chunks
*/
static inline bool
GenerationBlockIsEmpty(GenerationBlock *block)
{
return (block->nchunks == 0);
}
/*
* GenerationBlockMarkEmpty
* Set a block as empty. Does not free the block.
*/
static inline void
GenerationBlockMarkEmpty(GenerationBlock *block)
{
#if defined(USE_VALGRIND) || defined(CLOBBER_FREED_MEMORY)
char *datastart = ((char *) block) + Generation_BLOCKHDRSZ;
#endif
#ifdef CLOBBER_FREED_MEMORY
wipe_mem(datastart, block->freeptr - datastart);
#else
/* wipe_mem() would have done this */
VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
#endif
/* Reset the block, but don't return it to malloc */
block->nchunks = 0;
block->nfree = 0;
block->freeptr = ((char *) block) + Generation_BLOCKHDRSZ;
}
/*
* GenerationBlockFreeBytes
* Returns the number of bytes free in 'block'
*/
static inline Size
GenerationBlockFreeBytes(GenerationBlock *block)
{
return (block->endptr - block->freeptr);
}
/*
* GenerationBlockFree
* Remove 'block' from 'set' and release the memory consumed by it.
*/
static inline void
GenerationBlockFree(GenerationContext *set, GenerationBlock *block)
{
/* Make sure nobody tries to free the keeper block */
Assert(block != set->keeper);
/* We shouldn't be freeing the freeblock either */
Assert(block != set->freeblock);
/* release the block from the list of blocks */
dlist_delete(&block->node);
((MemoryContext) set)->mem_allocated -= block->blksize;
#ifdef CLOBBER_FREED_MEMORY
wipe_mem(block, block->blksize);
#endif
free(block);
}
/*
* GenerationFree
* Update number of chunks in the block, and if all chunks in the block
* are now free then discard the block.
*/
void
GenerationFree(void *pointer)
{
MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
GenerationBlock *block;
GenerationContext *set;
#if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \
|| defined(CLOBBER_FREED_MEMORY)
Size chunksize;
#endif
/* Allow access to the chunk header. */
VALGRIND_MAKE_MEM_DEFINED(chunk, Generation_CHUNKHDRSZ);
if (MemoryChunkIsExternal(chunk))
{
block = ExternalChunkGetBlock(chunk);
/*
* Try to verify that we have a sane block pointer: the block header
* should reference a generation context.
*/
if (!GenerationBlockIsValid(block))
elog(ERROR, "could not find block containing chunk %p", chunk);
#if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \
|| defined(CLOBBER_FREED_MEMORY)
chunksize = block->endptr - (char *) pointer;
#endif
}
else
{
block = MemoryChunkGetBlock(chunk);
/*
* In this path, for speed reasons we just Assert that the referenced
* block is good. Future field experience may show that this Assert
* had better become a regular runtime test-and-elog check.
*/
Assert(GenerationBlockIsValid(block));
#if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \
|| defined(CLOBBER_FREED_MEMORY)
chunksize = MemoryChunkGetValue(chunk);
#endif
}
#ifdef MEMORY_CONTEXT_CHECKING
/* Test for someone scribbling on unused space in chunk */
Assert(chunk->requested_size < chunksize);
if (!sentinel_ok(pointer, chunk->requested_size))
elog(WARNING, "detected write past chunk end in %s %p",
((MemoryContext) block->context)->name, chunk);
#endif
#ifdef CLOBBER_FREED_MEMORY
wipe_mem(pointer, chunksize);
#endif
#ifdef MEMORY_CONTEXT_CHECKING
/* Reset requested_size to InvalidAllocSize in freed chunks */
chunk->requested_size = InvalidAllocSize;
#endif
block->nfree += 1;
Assert(block->nchunks > 0);
Assert(block->nfree <= block->nchunks);
/* If there are still allocated chunks in the block, we're done. */
if (block->nfree < block->nchunks)
return;
set = block->context;
/* Don't try to free the keeper block, just mark it empty */
if (block == set->keeper)
{
GenerationBlockMarkEmpty(block);
return;
}
/*
* If there is no freeblock set or if this is the freeblock then instead
* of freeing this memory, we keep it around so that new allocations have
* the option of recycling it.
*/
if (set->freeblock == NULL || set->freeblock == block)
{
/* XXX should we only recycle maxBlockSize sized blocks? */
set->freeblock = block;
GenerationBlockMarkEmpty(block);
return;
}
/* Also make sure the block is not marked as the current block. */
if (set->block == block)
set->block = NULL;
/*
* The block is empty, so let's get rid of it. First remove it from the
* list of blocks, then return it to malloc().
*/
dlist_delete(&block->node);
set->header.mem_allocated -= block->blksize;
free(block);
}
/*
* GenerationRealloc
* When handling repalloc, we simply allocate a new chunk, copy the data
* and discard the old one. The only exception is when the new size fits
* into the old chunk - in that case we just update chunk header.
*/
void *
GenerationRealloc(void *pointer, Size size)
{
MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
GenerationContext *set;
GenerationBlock *block;
GenerationPointer newPointer;
Size oldsize;
/* Allow access to the chunk header. */
VALGRIND_MAKE_MEM_DEFINED(chunk, Generation_CHUNKHDRSZ);
if (MemoryChunkIsExternal(chunk))
{
block = ExternalChunkGetBlock(chunk);
/*
* Try to verify that we have a sane block pointer: the block header
* should reference a generation context.
*/
if (!GenerationBlockIsValid(block))
elog(ERROR, "could not find block containing chunk %p", chunk);
oldsize = block->endptr - (char *) pointer;
}
else
{
block = MemoryChunkGetBlock(chunk);
/*
* In this path, for speed reasons we just Assert that the referenced
* block is good. Future field experience may show that this Assert
* had better become a regular runtime test-and-elog check.
*/
Assert(GenerationBlockIsValid(block));
oldsize = MemoryChunkGetValue(chunk);
}
set = block->context;
#ifdef MEMORY_CONTEXT_CHECKING
/* Test for someone scribbling on unused space in chunk */
Assert(chunk->requested_size < oldsize);
if (!sentinel_ok(pointer, chunk->requested_size))
elog(WARNING, "detected write past chunk end in %s %p",
((MemoryContext) set)->name, chunk);
#endif
/*
* Maybe the allocated area already is >= the new size. (In particular,
* we always fall out here if the requested size is a decrease.)
*
* This memory context does not use power-of-2 chunk sizing and instead
* carves the chunks to be as small as possible, so most repalloc() calls
* will end up in the palloc/memcpy/pfree branch.
*
* XXX Perhaps we should annotate this condition with unlikely()?
*/
if (oldsize >= size)
{
#ifdef MEMORY_CONTEXT_CHECKING
Size oldrequest = chunk->requested_size;
#ifdef RANDOMIZE_ALLOCATED_MEMORY
/* We can only fill the extra space if we know the prior request */
if (size > oldrequest)
randomize_mem((char *) pointer + oldrequest,
size - oldrequest);
#endif
chunk->requested_size = size;
/*
* If this is an increase, mark any newly-available part UNDEFINED.
* Otherwise, mark the obsolete part NOACCESS.
*/
if (size > oldrequest)
VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
size - oldrequest);
else
VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
oldsize - size);
/* set mark to catch clobber of "unused" space */
set_sentinel(pointer, size);
#else /* !MEMORY_CONTEXT_CHECKING */
/*
* We don't have the information to determine whether we're growing
* the old request or shrinking it, so we conservatively mark the
* entire new allocation DEFINED.
*/
VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
VALGRIND_MAKE_MEM_DEFINED(pointer, size);
#endif
/* Disallow access to the chunk header. */
VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ);
return pointer;
}
/* allocate new chunk */
newPointer = GenerationAlloc((MemoryContext) set, size);
/* leave immediately if request was not completed */
if (newPointer == NULL)
{
/* Disallow access to the chunk header. */
VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ);
return NULL;
}
/*
* GenerationAlloc() may have returned a region that is still NOACCESS.
* Change it to UNDEFINED for the moment; memcpy() will then transfer
* definedness from the old allocation to the new. If we know the old
* allocation, copy just that much. Otherwise, make the entire old chunk
* defined to avoid errors as we copy the currently-NOACCESS trailing
* bytes.
*/
VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
#ifdef MEMORY_CONTEXT_CHECKING
oldsize = chunk->requested_size;
#else
VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
#endif
/* transfer existing data (certain to fit) */
memcpy(newPointer, pointer, oldsize);
/* free old chunk */
GenerationFree(pointer);
return newPointer;
}
/*
* GenerationGetChunkContext
* Return the MemoryContext that 'pointer' belongs to.
*/
MemoryContext
GenerationGetChunkContext(void *pointer)
{
MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
GenerationBlock *block;
/* Allow access to the chunk header. */
VALGRIND_MAKE_MEM_DEFINED(chunk, Generation_CHUNKHDRSZ);
if (MemoryChunkIsExternal(chunk))
block = ExternalChunkGetBlock(chunk);
else
block = (GenerationBlock *) MemoryChunkGetBlock(chunk);
/* Disallow access to the chunk header. */
VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ);
Assert(GenerationBlockIsValid(block));
return &block->context->header;
}
/*
* GenerationGetChunkSpace
* Given a currently-allocated chunk, determine the total space
* it occupies (including all memory-allocation overhead).
*/
Size
GenerationGetChunkSpace(void *pointer)
{
MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
Size chunksize;
/* Allow access to the chunk header. */
VALGRIND_MAKE_MEM_DEFINED(chunk, Generation_CHUNKHDRSZ);
if (MemoryChunkIsExternal(chunk))
{
GenerationBlock *block = ExternalChunkGetBlock(chunk);
Assert(GenerationBlockIsValid(block));
chunksize = block->endptr - (char *) pointer;
}
else
chunksize = MemoryChunkGetValue(chunk);
/* Disallow access to the chunk header. */
VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ);
return Generation_CHUNKHDRSZ + chunksize;
}
/*
* GenerationIsEmpty
* Is a GenerationContext empty of any allocated space?
*/
bool
GenerationIsEmpty(MemoryContext context)
{
GenerationContext *set = (GenerationContext *) context;
dlist_iter iter;
Assert(GenerationIsValid(set));
dlist_foreach(iter, &set->blocks)
{
GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
if (block->nchunks > 0)
return false;
}
return true;
}
/*
* GenerationStats
* Compute stats about memory consumption of a Generation context.
*
* printfunc: if not NULL, pass a human-readable stats string to this.
* passthru: pass this pointer through to printfunc.
* totals: if not NULL, add stats about this context into *totals.
* print_to_stderr: print stats to stderr if true, elog otherwise.
*
* XXX freespace only accounts for empty space at the end of the block, not
* space of freed chunks (which is unknown).
*/
void
GenerationStats(MemoryContext context,
MemoryStatsPrintFunc printfunc, void *passthru,
MemoryContextCounters *totals, bool print_to_stderr)
{
GenerationContext *set = (GenerationContext *) context;
Size nblocks = 0;
Size nchunks = 0;
Size nfreechunks = 0;
Size totalspace;
Size freespace = 0;
dlist_iter iter;
Assert(GenerationIsValid(set));
/* Include context header in totalspace */
totalspace = MAXALIGN(sizeof(GenerationContext));
dlist_foreach(iter, &set->blocks)
{
GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
nblocks++;
nchunks += block->nchunks;
nfreechunks += block->nfree;
totalspace += block->blksize;
freespace += (block->endptr - block->freeptr);
}
if (printfunc)
{
char stats_string[200];
snprintf(stats_string, sizeof(stats_string),
"%zu total in %zu blocks (%zu chunks); %zu free (%zu chunks); %zu used",
totalspace, nblocks, nchunks, freespace,
nfreechunks, totalspace - freespace);
printfunc(context, passthru, stats_string, print_to_stderr);
}
if (totals)
{
totals->nblocks += nblocks;
totals->freechunks += nfreechunks;
totals->totalspace += totalspace;
totals->freespace += freespace;
}
}
#ifdef MEMORY_CONTEXT_CHECKING
/*
* GenerationCheck
* Walk through chunks and check consistency of memory.
*
* NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
* find yourself in an infinite loop when trouble occurs, because this
* routine will be entered again when elog cleanup tries to release memory!
*/
void
GenerationCheck(MemoryContext context)
{
GenerationContext *gen = (GenerationContext *) context;
const char *name = context->name;
dlist_iter iter;
Size total_allocated = 0;
/* walk all blocks in this context */
dlist_foreach(iter, &gen->blocks)
{
GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
int nfree,
nchunks;
char *ptr;
bool has_external_chunk = false;
total_allocated += block->blksize;
/*
* nfree > nchunks is surely wrong. Equality is allowed as the block
* might completely empty if it's the freeblock.
*/
if (block->nfree > block->nchunks)
elog(WARNING, "problem in Generation %s: number of free chunks %d in block %p exceeds %d allocated",
name, block->nfree, block, block->nchunks);
/* check block belongs to the correct context */
if (block->context != gen)
elog(WARNING, "problem in Generation %s: bogus context link in block %p",
name, block);
/* Now walk through the chunks and count them. */
nfree = 0;
nchunks = 0;
ptr = ((char *) block) + Generation_BLOCKHDRSZ;
while (ptr < block->freeptr)
{
MemoryChunk *chunk = (MemoryChunk *) ptr;
GenerationBlock *chunkblock;
Size chunksize;
/* Allow access to the chunk header. */
VALGRIND_MAKE_MEM_DEFINED(chunk, Generation_CHUNKHDRSZ);
if (MemoryChunkIsExternal(chunk))
{
chunkblock = ExternalChunkGetBlock(chunk);
chunksize = block->endptr - (char *) MemoryChunkGetPointer(chunk);
has_external_chunk = true;
}
else
{
chunkblock = MemoryChunkGetBlock(chunk);
chunksize = MemoryChunkGetValue(chunk);
}
/* move to the next chunk */
ptr += (chunksize + Generation_CHUNKHDRSZ);
nchunks += 1;
/* chunks have both block and context pointers, so check both */
if (chunkblock != block)
elog(WARNING, "problem in Generation %s: bogus block link in block %p, chunk %p",
name, block, chunk);
/* is chunk allocated? */
if (chunk->requested_size != InvalidAllocSize)
{
/* now make sure the chunk size is correct */
if (chunksize < chunk->requested_size ||
chunksize != MAXALIGN(chunksize))
elog(WARNING, "problem in Generation %s: bogus chunk size in block %p, chunk %p",
name, block, chunk);
/* check sentinel */
Assert(chunk->requested_size < chunksize);
if (!sentinel_ok(chunk, Generation_CHUNKHDRSZ + chunk->requested_size))
elog(WARNING, "problem in Generation %s: detected write past chunk end in block %p, chunk %p",
name, block, chunk);
}
else
nfree += 1;
/* if chunk is allocated, disallow access to the chunk header */
if (chunk->requested_size != InvalidAllocSize)
VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ);
}
/*
* Make sure we got the expected number of allocated and free chunks
* (as tracked in the block header).
*/
if (nchunks != block->nchunks)
elog(WARNING, "problem in Generation %s: number of allocated chunks %d in block %p does not match header %d",
name, nchunks, block, block->nchunks);
if (nfree != block->nfree)
elog(WARNING, "problem in Generation %s: number of free chunks %d in block %p does not match header %d",
name, nfree, block, block->nfree);
if (has_external_chunk && nchunks > 1)
elog(WARNING, "problem in Generation %s: external chunk on non-dedicated block %p",
name, block);
}
Assert(total_allocated == context->mem_allocated);
}
#endif /* MEMORY_CONTEXT_CHECKING */
|