Changeset ae75080a in mainline
- Timestamp:
- 2009-06-30T15:50:04Z (15 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 6db6fd1
- Parents:
- 497ae7b
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
uspace/lib/libc/include/malloc.h
r497ae7b rae75080a 1 1 /* 2 Default header file for malloc-2.8.x, written by Doug Lea 3 and released to the public domain, as explained at 4 http://creativecommons.org/licenses/publicdomain. 5 6 last update: Mon Aug 15 08:55:52 2005 Doug Lea (dl at gee) 2 * Copyright (c) 2009 Martin Decky 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * - Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * - Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * - The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 7 28 8 This header is for ANSI C/C++ only. You can set any of 9 the following #defines before including: 29 /** @addtogroup libc 30 * @{ 31 */ 32 /** @file 33 */ 10 34 11 * If USE_DL_PREFIX is defined, it is assumed that malloc.c 12 was also compiled with this option, so all routines 13 have names starting with "dl". 35 #ifndef LIBC_MALLOC_H_ 36 #define LIBC_MALLOC_H_ 14 37 15 * If HAVE_USR_INCLUDE_MALLOC_H is defined, it is assumed that this 16 file will be #included AFTER <malloc.h>. This is needed only if 17 your system defines a struct mallinfo that is incompatible with the 18 standard one declared here. Otherwise, you can include this file 19 INSTEAD of your system system <malloc.h>. At least on ANSI, all 20 declarations should be compatible with system versions 38 #include <sys/types.h> 21 39 22 * If MSPACES is defined, declarations for mspace versions are included. 23 */ 40 extern void __heap_init(void); 41 extern uintptr_t get_max_heap_addr(void); 24 42 25 #ifndef MALLOC_280_H 26 #define MALLOC_280_H 43 extern void *malloc(const size_t size); 44 extern void *memalign(const size_t align, const size_t size); 45 extern void *realloc(const void *addr, const size_t size); 46 extern void free(const void *addr); 27 47 28 #ifdef __cplusplus29 extern "C" {30 48 #endif 31 49 32 #include <stddef.h> /* for size_t */ 33 34 #if !ONLY_MSPACES 35 36 #ifndef USE_DL_PREFIX 37 #define dlcalloc calloc 38 #define dlfree free 39 #define dlmalloc malloc 40 #define dlmemalign memalign 41 #define dlrealloc realloc 42 #define dlvalloc valloc 43 #define dlpvalloc pvalloc 44 #define dlmallinfo mallinfo 45 #define dlmallopt mallopt 46 #define dlmalloc_trim malloc_trim 47 #define dlmalloc_stats malloc_stats 48 #define dlmalloc_usable_size malloc_usable_size 49 #define dlmalloc_footprint malloc_footprint 50 #define dlmalloc_max_footprint malloc_max_footprint 51 #define dlindependent_calloc independent_calloc 52 #define dlindependent_comalloc independent_comalloc 53 #endif /* USE_DL_PREFIX */ 54 55 56 /* 57 malloc(size_t n) 58 Returns a pointer to a newly allocated chunk of at least n bytes, or 59 null if no space is available, in which case errno is set to ENOMEM 60 on ANSI C systems. 61 62 If n is zero, malloc returns a minimum-sized chunk. (The minimum 63 size is 16 bytes on most 32bit systems, and 32 bytes on 64bit 64 systems.) Note that size_t is an unsigned type, so calls with 65 arguments that would be negative if signed are interpreted as 66 requests for huge amounts of space, which will often fail. The 67 maximum supported value of n differs across systems, but is in all 68 cases less than the maximum representable value of a size_t. 69 */ 70 void* dlmalloc(size_t); 71 72 /* 73 free(void* p) 74 Releases the chunk of memory pointed to by p, that had been previously 75 allocated using malloc or a related routine such as realloc. 76 It has no effect if p is null. If p was not malloced or already 77 freed, free(p) will by default cuase the current program to abort. 78 */ 79 void dlfree(void*); 80 81 /* 82 calloc(size_t n_elements, size_t element_size); 83 Returns a pointer to n_elements * element_size bytes, with all locations 84 set to zero. 85 */ 86 void* dlcalloc(size_t, size_t); 87 88 /* 89 realloc(void* p, size_t n) 90 Returns a pointer to a chunk of size n that contains the same data 91 as does chunk p up to the minimum of (n, p's size) bytes, or null 92 if no space is available. 93 94 The returned pointer may or may not be the same as p. The algorithm 95 prefers extending p in most cases when possible, otherwise it 96 employs the equivalent of a malloc-copy-free sequence. 97 98 If p is null, realloc is equivalent to malloc. 99 100 If space is not available, realloc returns null, errno is set (if on 101 ANSI) and p is NOT freed. 102 103 if n is for fewer bytes than already held by p, the newly unused 104 space is lopped off and freed if possible. realloc with a size 105 argument of zero (re)allocates a minimum-sized chunk. 106 107 The old unix realloc convention of allowing the last-free'd chunk 108 to be used as an argument to realloc is not supported. 109 */ 110 111 void* dlrealloc(void*, size_t); 112 113 /* 114 memalign(size_t alignment, size_t n); 115 Returns a pointer to a newly allocated chunk of n bytes, aligned 116 in accord with the alignment argument. 117 118 The alignment argument should be a power of two. If the argument is 119 not a power of two, the nearest greater power is used. 120 8-byte alignment is guaranteed by normal malloc calls, so don't 121 bother calling memalign with an argument of 8 or less. 122 123 Overreliance on memalign is a sure way to fragment space. 124 */ 125 void* dlmemalign(size_t, size_t); 126 127 /* 128 valloc(size_t n); 129 Equivalent to memalign(pagesize, n), where pagesize is the page 130 size of the system. If the pagesize is unknown, 4096 is used. 131 */ 132 void* dlvalloc(size_t); 133 134 /* 135 mallopt(int parameter_number, int parameter_value) 136 Sets tunable parameters The format is to provide a 137 (parameter-number, parameter-value) pair. mallopt then sets the 138 corresponding parameter to the argument value if it can (i.e., so 139 long as the value is meaningful), and returns 1 if successful else 140 0. SVID/XPG/ANSI defines four standard param numbers for mallopt, 141 normally defined in malloc.h. None of these are use in this malloc, 142 so setting them has no effect. But this malloc also supports other 143 options in mallopt: 144 145 Symbol param # default allowed param values 146 M_TRIM_THRESHOLD -1 2*1024*1024 any (-1U disables trimming) 147 M_GRANULARITY -2 page size any power of 2 >= page size 148 M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support) 149 */ 150 int dlmallopt(int, int); 151 152 #define M_TRIM_THRESHOLD (-1) 153 #define M_GRANULARITY (-2) 154 #define M_MMAP_THRESHOLD (-3) 155 156 157 /* 158 malloc_footprint(); 159 Returns the number of bytes obtained from the system. The total 160 number of bytes allocated by malloc, realloc etc., is less than this 161 value. Unlike mallinfo, this function returns only a precomputed 162 result, so can be called frequently to monitor memory consumption. 163 Even if locks are otherwise defined, this function does not use them, 164 so results might not be up to date. 165 */ 166 size_t dlmalloc_footprint(void); 167 size_t dlmalloc_max_footprint(void); 168 169 #if !NO_MALLINFO 170 /* 171 mallinfo() 172 Returns (by copy) a struct containing various summary statistics: 173 174 arena: current total non-mmapped bytes allocated from system 175 ordblks: the number of free chunks 176 smblks: always zero. 177 hblks: current number of mmapped regions 178 hblkhd: total bytes held in mmapped regions 179 usmblks: the maximum total allocated space. This will be greater 180 than current total if trimming has occurred. 181 fsmblks: always zero 182 uordblks: current total allocated space (normal or mmapped) 183 fordblks: total free space 184 keepcost: the maximum number of bytes that could ideally be released 185 back to system via malloc_trim. ("ideally" means that 186 it ignores page restrictions etc.) 187 188 Because these fields are ints, but internal bookkeeping may 189 be kept as longs, the reported values may wrap around zero and 190 thus be inaccurate. 191 */ 192 #ifndef HAVE_USR_INCLUDE_MALLOC_H 193 #ifndef _MALLOC_H 194 #ifndef MALLINFO_FIELD_TYPE 195 #define MALLINFO_FIELD_TYPE size_t 196 #endif /* MALLINFO_FIELD_TYPE */ 197 struct mallinfo { 198 MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ 199 MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ 200 MALLINFO_FIELD_TYPE smblks; /* always 0 */ 201 MALLINFO_FIELD_TYPE hblks; /* always 0 */ 202 MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ 203 MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ 204 MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ 205 MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ 206 MALLINFO_FIELD_TYPE fordblks; /* total free space */ 207 MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ 208 }; 209 #endif /* _MALLOC_H */ 210 #endif /* HAVE_USR_INCLUDE_MALLOC_H */ 211 212 struct mallinfo dlmallinfo(void); 213 #endif /* NO_MALLINFO */ 214 215 /* 216 independent_calloc(size_t n_elements, size_t element_size, void* chunks[]); 217 218 independent_calloc is similar to calloc, but instead of returning a 219 single cleared space, it returns an array of pointers to n_elements 220 independent elements that can hold contents of size elem_size, each 221 of which starts out cleared, and can be independently freed, 222 realloc'ed etc. The elements are guaranteed to be adjacently 223 allocated (this is not guaranteed to occur with multiple callocs or 224 mallocs), which may also improve cache locality in some 225 applications. 226 227 The "chunks" argument is optional (i.e., may be null, which is 228 probably the most typical usage). If it is null, the returned array 229 is itself dynamically allocated and should also be freed when it is 230 no longer needed. Otherwise, the chunks array must be of at least 231 n_elements in length. It is filled in with the pointers to the 232 chunks. 233 234 In either case, independent_calloc returns this pointer array, or 235 null if the allocation failed. If n_elements is zero and "chunks" 236 is null, it returns a chunk representing an array with zero elements 237 (which should be freed if not wanted). 238 239 Each element must be individually freed when it is no longer 240 needed. If you'd like to instead be able to free all at once, you 241 should instead use regular calloc and assign pointers into this 242 space to represent elements. (In this case though, you cannot 243 independently free elements.) 244 245 independent_calloc simplifies and speeds up implementations of many 246 kinds of pools. It may also be useful when constructing large data 247 structures that initially have a fixed number of fixed-sized nodes, 248 but the number is not known at compile time, and some of the nodes 249 may later need to be freed. For example: 250 251 struct Node { int item; struct Node* next; }; 252 253 struct Node* build_list() { 254 struct Node** pool; 255 int n = read_number_of_nodes_needed(); 256 if (n <= 0) return 0; 257 pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0); 258 if (pool == 0) die(); 259 // organize into a linked list... 260 struct Node* first = pool[0]; 261 for (i = 0; i < n-1; ++i) 262 pool[i]->next = pool[i+1]; 263 free(pool); // Can now free the array (or not, if it is needed later) 264 return first; 265 } 266 */ 267 void** dlindependent_calloc(size_t, size_t, void**); 268 269 /* 270 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); 271 272 independent_comalloc allocates, all at once, a set of n_elements 273 chunks with sizes indicated in the "sizes" array. It returns 274 an array of pointers to these elements, each of which can be 275 independently freed, realloc'ed etc. The elements are guaranteed to 276 be adjacently allocated (this is not guaranteed to occur with 277 multiple callocs or mallocs), which may also improve cache locality 278 in some applications. 279 280 The "chunks" argument is optional (i.e., may be null). If it is null 281 the returned array is itself dynamically allocated and should also 282 be freed when it is no longer needed. Otherwise, the chunks array 283 must be of at least n_elements in length. It is filled in with the 284 pointers to the chunks. 285 286 In either case, independent_comalloc returns this pointer array, or 287 null if the allocation failed. If n_elements is zero and chunks is 288 null, it returns a chunk representing an array with zero elements 289 (which should be freed if not wanted). 290 291 Each element must be individually freed when it is no longer 292 needed. If you'd like to instead be able to free all at once, you 293 should instead use a single regular malloc, and assign pointers at 294 particular offsets in the aggregate space. (In this case though, you 295 cannot independently free elements.) 296 297 independent_comallac differs from independent_calloc in that each 298 element may have a different size, and also that it does not 299 automatically clear elements. 300 301 independent_comalloc can be used to speed up allocation in cases 302 where several structs or objects must always be allocated at the 303 same time. For example: 304 305 struct Head { ... } 306 struct Foot { ... } 307 308 void send_message(char* msg) { 309 int msglen = strlen(msg); 310 size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) }; 311 void* chunks[3]; 312 if (independent_comalloc(3, sizes, chunks) == 0) 313 die(); 314 struct Head* head = (struct Head*)(chunks[0]); 315 char* body = (char*)(chunks[1]); 316 struct Foot* foot = (struct Foot*)(chunks[2]); 317 // ... 318 } 319 320 In general though, independent_comalloc is worth using only for 321 larger values of n_elements. For small values, you probably won't 322 detect enough difference from series of malloc calls to bother. 323 324 Overuse of independent_comalloc can increase overall memory usage, 325 since it cannot reuse existing noncontiguous small chunks that 326 might be available for some of the elements. 327 */ 328 void** dlindependent_comalloc(size_t, size_t*, void**); 329 330 331 /* 332 pvalloc(size_t n); 333 Equivalent to valloc(minimum-page-that-holds(n)), that is, 334 round up n to nearest pagesize. 50 /** @} 335 51 */ 336 void* dlpvalloc(size_t);337 338 /*339 malloc_trim(size_t pad);340 341 If possible, gives memory back to the system (via negative arguments342 to sbrk) if there is unused memory at the `high' end of the malloc343 pool or in unused MMAP segments. You can call this after freeing344 large blocks of memory to potentially reduce the system-level memory345 requirements of a program. However, it cannot guarantee to reduce346 memory. Under some allocation patterns, some large free blocks of347 memory will be locked between two used chunks, so they cannot be348 given back to the system.349 350 The `pad' argument to malloc_trim represents the amount of free351 trailing space to leave untrimmed. If this argument is zero, only352 the minimum amount of memory to maintain internal data structures353 will be left. Non-zero arguments can be supplied to maintain enough354 trailing space to service future expected allocations without having355 to re-obtain memory from the system.356 357 Malloc_trim returns 1 if it actually released any memory, else 0.358 */359 int dlmalloc_trim(size_t);360 361 /*362 malloc_usable_size(void* p);363 364 Returns the number of bytes you can actually use in365 an allocated chunk, which may be more than you requested (although366 often not) due to alignment and minimum size constraints.367 You can use this many bytes without worrying about368 overwriting other allocated objects. This is not a particularly great369 programming practice. malloc_usable_size can be more useful in370 debugging and assertions, for example:371 372 p = malloc(n);373 assert(malloc_usable_size(p) >= 256);374 */375 size_t dlmalloc_usable_size(void*);376 377 /*378 malloc_stats();379 Prints on stderr the amount of space obtained from the system (both380 via sbrk and mmap), the maximum amount (which may be more than381 current if malloc_trim and/or munmap got called), and the current382 number of bytes allocated via malloc (or realloc, etc) but not yet383 freed. Note that this is the number of bytes allocated, not the384 number requested. It will be larger than the number requested385 because of alignment and bookkeeping overhead. Because it includes386 alignment wastage as being in use, this figure may be greater than387 zero even when no user-level chunks are allocated.388 389 The reported current and maximum system memory can be inaccurate if390 a program makes other calls to system memory allocation functions391 (normally sbrk) outside of malloc.392 393 malloc_stats prints only the most commonly interesting statistics.394 More information can be obtained by calling mallinfo.395 */396 void dlmalloc_stats(void);397 398 #endif /* !ONLY_MSPACES */399 400 #if MSPACES401 402 /*403 mspace is an opaque type representing an independent404 region of space that supports mspace_malloc, etc.405 */406 typedef void* mspace;407 408 /*409 create_mspace creates and returns a new independent space with the410 given initial capacity, or, if 0, the default granularity size. It411 returns null if there is no system memory available to create the412 space. If argument locked is non-zero, the space uses a separate413 lock to control access. The capacity of the space will grow414 dynamically as needed to service mspace_malloc requests. You can415 control the sizes of incremental increases of this space by416 compiling with a different DEFAULT_GRANULARITY or dynamically417 setting with mallopt(M_GRANULARITY, value).418 */419 mspace create_mspace(size_t capacity, int locked);420 421 /*422 destroy_mspace destroys the given space, and attempts to return all423 of its memory back to the system, returning the total number of424 bytes freed. After destruction, the results of access to all memory425 used by the space become undefined.426 */427 size_t destroy_mspace(mspace msp);428 429 /*430 create_mspace_with_base uses the memory supplied as the initial base431 of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this432 space is used for bookkeeping, so the capacity must be at least this433 large. (Otherwise 0 is returned.) When this initial space is434 exhausted, additional memory will be obtained from the system.435 Destroying this space will deallocate all additionally allocated436 space (if possible) but not the initial base.437 */438 mspace create_mspace_with_base(void* base, size_t capacity, int locked);439 440 /*441 mspace_malloc behaves as malloc, but operates within442 the given space.443 */444 void* mspace_malloc(mspace msp, size_t bytes);445 446 /*447 mspace_free behaves as free, but operates within448 the given space.449 450 If compiled with FOOTERS==1, mspace_free is not actually needed.451 free may be called instead of mspace_free because freed chunks from452 any space are handled by their originating spaces.453 */454 void mspace_free(mspace msp, void* mem);455 456 /*457 mspace_realloc behaves as realloc, but operates within458 the given space.459 460 If compiled with FOOTERS==1, mspace_realloc is not actually461 needed. realloc may be called instead of mspace_realloc because462 realloced chunks from any space are handled by their originating463 spaces.464 */465 void* mspace_realloc(mspace msp, void* mem, size_t newsize);466 467 /*468 mspace_calloc behaves as calloc, but operates within469 the given space.470 */471 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);472 473 /*474 mspace_memalign behaves as memalign, but operates within475 the given space.476 */477 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);478 479 /*480 mspace_independent_calloc behaves as independent_calloc, but481 operates within the given space.482 */483 void** mspace_independent_calloc(mspace msp, size_t n_elements,484 size_t elem_size, void* chunks[]);485 486 /*487 mspace_independent_comalloc behaves as independent_comalloc, but488 operates within the given space.489 */490 void** mspace_independent_comalloc(mspace msp, size_t n_elements,491 size_t sizes[], void* chunks[]);492 493 /*494 mspace_footprint() returns the number of bytes obtained from the495 system for this space.496 */497 size_t mspace_footprint(mspace msp);498 499 500 #if !NO_MALLINFO501 /*502 mspace_mallinfo behaves as mallinfo, but reports properties of503 the given space.504 */505 struct mallinfo mspace_mallinfo(mspace msp);506 #endif /* NO_MALLINFO */507 508 /*509 mspace_malloc_stats behaves as malloc_stats, but reports510 properties of the given space.511 */512 void mspace_malloc_stats(mspace msp);513 514 /*515 mspace_trim behaves as malloc_trim, but516 operates within the given space.517 */518 int mspace_trim(mspace msp, size_t pad);519 520 /*521 An alias for mallopt.522 */523 int mspace_mallopt(int, int);524 525 #endif /* MSPACES */526 527 #ifdef __cplusplus528 }; /* end of extern "C" */529 #endif530 531 #endif /* MALLOC_280_H */532 533 534 /** @}535 */536 537
Note:
See TracChangeset
for help on using the changeset viewer.