Line data Source code
1 : /*
2 : * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 : * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 : * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
5 : * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
6 : *
7 : * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 : * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 : *
10 : * Permission is hereby granted to use or copy this program
11 : * for any purpose, provided the above notices are retained on all copies.
12 : * Permission to modify the code and to distribute modified code is granted,
13 : * provided the above notices are retained, and a notice that the code was
14 : * modified is included with the above copyright notice.
15 : */
16 :
17 : #include "private/gc_priv.h"
18 :
19 : /*
20 : * These are extra allocation routines which are likely to be less
21 : * frequently used than those in malloc.c. They are separate in the
22 : * hope that the .o file will be excluded from statically linked
23 : * executables. We should probably break this up further.
24 : */
25 :
26 : #include <stdio.h>
27 : #include <string.h>
28 :
29 : #ifdef MSWINCE
30 : # ifndef WIN32_LEAN_AND_MEAN
31 : # define WIN32_LEAN_AND_MEAN 1
32 : # endif
33 : # define NOSERVICE
34 : # include <windows.h>
35 : #else
36 : # include <errno.h>
37 : #endif
38 :
39 : /* Some externally visible but unadvertised variables to allow access to */
40 : /* free lists from inlined allocators without including gc_priv.h */
41 : /* or introducing dependencies on internal data structure layouts. */
42 : void ** const GC_objfreelist_ptr = GC_objfreelist;
43 : void ** const GC_aobjfreelist_ptr = GC_aobjfreelist;
44 : void ** const GC_uobjfreelist_ptr = GC_uobjfreelist;
45 : # ifdef ATOMIC_UNCOLLECTABLE
46 : void ** const GC_auobjfreelist_ptr = GC_auobjfreelist;
47 : # endif
48 :
49 :
50 0 : STATIC void * GC_generic_or_special_malloc(size_t lb, int knd)
51 : {
52 0 : switch(knd) {
53 : # ifdef STUBBORN_ALLOC
54 : case STUBBORN:
55 : return(GC_malloc_stubborn((size_t)lb));
56 : # endif
57 : case PTRFREE:
58 0 : return(GC_malloc_atomic((size_t)lb));
59 : case NORMAL:
60 0 : return(GC_malloc((size_t)lb));
61 : case UNCOLLECTABLE:
62 0 : return(GC_malloc_uncollectable((size_t)lb));
63 : # ifdef ATOMIC_UNCOLLECTABLE
64 : case AUNCOLLECTABLE:
65 0 : return(GC_malloc_atomic_uncollectable((size_t)lb));
66 : # endif /* ATOMIC_UNCOLLECTABLE */
67 : default:
68 0 : return(GC_generic_malloc(lb,knd));
69 : }
70 : }
71 :
72 : /* Change the size of the block pointed to by p to contain at least */
73 : /* lb bytes. The object may be (and quite likely will be) moved. */
74 : /* The kind (e.g. atomic) is the same as that of the old. */
75 : /* Shrinking of large blocks is not implemented well. */
76 0 : GC_API void * GC_CALL GC_realloc(void * p, size_t lb)
77 : {
78 : struct hblk * h;
79 : hdr * hhdr;
80 : size_t sz; /* Current size in bytes */
81 : size_t orig_sz; /* Original sz in bytes */
82 : int obj_kind;
83 :
84 0 : if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */
85 0 : h = HBLKPTR(p);
86 0 : hhdr = HDR(h);
87 0 : sz = hhdr -> hb_sz;
88 0 : obj_kind = hhdr -> hb_obj_kind;
89 0 : orig_sz = sz;
90 :
91 0 : if (sz > MAXOBJBYTES) {
92 : /* Round it up to the next whole heap block */
93 : word descr;
94 :
95 0 : sz = (sz+HBLKSIZE-1) & (~HBLKMASK);
96 0 : hhdr -> hb_sz = sz;
97 0 : descr = GC_obj_kinds[obj_kind].ok_descriptor;
98 0 : if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
99 0 : hhdr -> hb_descr = descr;
100 : # ifdef MARK_BIT_PER_OBJ
101 : GC_ASSERT(hhdr -> hb_inv_sz == LARGE_INV_SZ);
102 : # else
103 : GC_ASSERT(hhdr -> hb_large_block &&
104 : hhdr -> hb_map[ANY_INDEX] == 1);
105 : # endif
106 0 : if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
107 : /* Extra area is already cleared by GC_alloc_large_and_clear. */
108 : }
109 0 : if (ADD_SLOP(lb) <= sz) {
110 0 : if (lb >= (sz >> 1)) {
111 : # ifdef STUBBORN_ALLOC
112 : if (obj_kind == STUBBORN) GC_change_stubborn(p);
113 : # endif
114 0 : if (orig_sz > lb) {
115 : /* Clear unneeded part of object to avoid bogus pointer */
116 : /* tracing. */
117 : /* Safe for stubborn objects. */
118 0 : BZERO(((ptr_t)p) + lb, orig_sz - lb);
119 : }
120 0 : return(p);
121 : } else {
122 : /* shrink */
123 : void * result =
124 0 : GC_generic_or_special_malloc((word)lb, obj_kind);
125 :
126 0 : if (result == 0) return(0);
127 : /* Could also return original object. But this */
128 : /* gives the client warning of imminent disaster. */
129 0 : BCOPY(p, result, lb);
130 : # ifndef IGNORE_FREE
131 0 : GC_free(p);
132 : # endif
133 0 : return(result);
134 : }
135 : } else {
136 : /* grow */
137 : void * result =
138 0 : GC_generic_or_special_malloc((word)lb, obj_kind);
139 :
140 0 : if (result == 0) return(0);
141 0 : BCOPY(p, result, sz);
142 : # ifndef IGNORE_FREE
143 0 : GC_free(p);
144 : # endif
145 0 : return(result);
146 : }
147 : }
148 :
149 : # if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC)
150 : # define REDIRECT_REALLOC GC_realloc
151 : # endif
152 :
153 : # ifdef REDIRECT_REALLOC
154 :
155 : /* As with malloc, avoid two levels of extra calls here. */
156 :
157 : # define GC_debug_realloc_replacement(p, lb) \
158 : GC_debug_realloc(p, lb, GC_DBG_RA "unknown", 0)
159 :
160 : void * realloc(void * p, size_t lb)
161 : {
162 : return(REDIRECT_REALLOC(p, lb));
163 : }
164 :
165 : # undef GC_debug_realloc_replacement
166 : # endif /* REDIRECT_REALLOC */
167 :
168 :
169 : /* Allocate memory such that only pointers to near the */
170 : /* beginning of the object are considered. */
171 : /* We avoid holding allocation lock while we clear memory. */
172 0 : GC_INNER void * GC_generic_malloc_ignore_off_page(size_t lb, int k)
173 : {
174 : void *result;
175 : size_t lg;
176 : size_t lb_rounded;
177 : word n_blocks;
178 : GC_bool init;
179 : DCL_LOCK_STATE;
180 :
181 0 : if (SMALL_OBJ(lb))
182 0 : return(GC_generic_malloc((word)lb, k));
183 0 : lg = ROUNDED_UP_GRANULES(lb);
184 0 : lb_rounded = GRANULES_TO_BYTES(lg);
185 0 : if (lb_rounded < lb)
186 0 : return((*GC_get_oom_fn())(lb));
187 0 : n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
188 0 : init = GC_obj_kinds[k].ok_init;
189 0 : if (GC_have_errors) GC_print_all_errors();
190 0 : GC_INVOKE_FINALIZERS();
191 0 : LOCK();
192 0 : result = (ptr_t)GC_alloc_large(ADD_SLOP(lb), k, IGNORE_OFF_PAGE);
193 0 : if (0 != result) {
194 0 : if (GC_debugging_started) {
195 0 : BZERO(result, n_blocks * HBLKSIZE);
196 : } else {
197 : # ifdef THREADS
198 : /* Clear any memory that might be used for GC descriptors */
199 : /* before we release the lock. */
200 0 : ((word *)result)[0] = 0;
201 0 : ((word *)result)[1] = 0;
202 0 : ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
203 0 : ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0;
204 : # endif
205 : }
206 : }
207 0 : GC_bytes_allocd += lb_rounded;
208 0 : if (0 == result) {
209 0 : GC_oom_func oom_fn = GC_oom_fn;
210 0 : UNLOCK();
211 0 : return((*oom_fn)(lb));
212 : } else {
213 0 : UNLOCK();
214 0 : if (init && !GC_debugging_started) {
215 0 : BZERO(result, n_blocks * HBLKSIZE);
216 : }
217 0 : return(result);
218 : }
219 : }
220 :
221 0 : GC_API void * GC_CALL GC_malloc_ignore_off_page(size_t lb)
222 : {
223 0 : return((void *)GC_generic_malloc_ignore_off_page(lb, NORMAL));
224 : }
225 :
226 0 : GC_API void * GC_CALL GC_malloc_atomic_ignore_off_page(size_t lb)
227 : {
228 0 : return((void *)GC_generic_malloc_ignore_off_page(lb, PTRFREE));
229 : }
230 :
231 : /* Increment GC_bytes_allocd from code that doesn't have direct access */
232 : /* to GC_arrays. */
233 0 : GC_API void GC_CALL GC_incr_bytes_allocd(size_t n)
234 : {
235 0 : GC_bytes_allocd += n;
236 0 : }
237 :
238 : /* The same for GC_bytes_freed. */
239 0 : GC_API void GC_CALL GC_incr_bytes_freed(size_t n)
240 : {
241 0 : GC_bytes_freed += n;
242 0 : }
243 :
244 : # ifdef PARALLEL_MARK
245 : STATIC volatile signed_word GC_bytes_allocd_tmp = 0;
246 : /* Number of bytes of memory allocated since */
247 : /* we released the GC lock. Instead of */
248 : /* reacquiring the GC lock just to add this in, */
249 : /* we add it in the next time we reacquire */
250 : /* the lock. (Atomically adding it doesn't */
251 : /* work, since we would have to atomically */
252 : /* update it in GC_malloc, which is too */
253 : /* expensive.) */
254 : # endif /* PARALLEL_MARK */
255 :
256 : /* Return a list of 1 or more objects of the indicated size, linked */
257 : /* through the first word in the object. This has the advantage that */
258 : /* it acquires the allocation lock only once, and may greatly reduce */
259 : /* time wasted contending for the allocation lock. Typical usage would */
260 : /* be in a thread that requires many items of the same size. It would */
261 : /* keep its own free list in thread-local storage, and call */
262 : /* GC_malloc_many or friends to replenish it. (We do not round up */
263 : /* object sizes, since a call indicates the intention to consume many */
264 : /* objects of exactly this size.) */
265 : /* We assume that the size is a multiple of GRANULE_BYTES. */
266 : /* We return the free-list by assigning it to *result, since it is */
267 : /* not safe to return, e.g. a linked list of pointer-free objects, */
268 : /* since the collector would not retain the entire list if it were */
269 : /* invoked just as we were returning. */
270 : /* Note that the client should usually clear the link field. */
271 40389 : GC_API void GC_CALL GC_generic_malloc_many(size_t lb, int k, void **result)
272 : {
273 : void *op;
274 : void *p;
275 : void **opp;
276 : size_t lw; /* Length in words. */
277 : size_t lg; /* Length in granules. */
278 40389 : signed_word my_bytes_allocd = 0;
279 40389 : struct obj_kind * ok = &(GC_obj_kinds[k]);
280 : DCL_LOCK_STATE;
281 :
282 : GC_ASSERT(lb != 0 && (lb & (GRANULE_BYTES-1)) == 0);
283 40389 : if (!SMALL_OBJ(lb)) {
284 0 : op = GC_generic_malloc(lb, k);
285 0 : if(0 != op) obj_link(op) = 0;
286 0 : *result = op;
287 0 : return;
288 : }
289 40389 : lw = BYTES_TO_WORDS(lb);
290 40389 : lg = BYTES_TO_GRANULES(lb);
291 40389 : if (GC_have_errors) GC_print_all_errors();
292 40389 : GC_INVOKE_FINALIZERS();
293 40389 : LOCK();
294 40389 : if (!GC_is_initialized) GC_init();
295 : /* Do our share of marking work */
296 40389 : if (GC_incremental && !GC_dont_gc) {
297 0 : ENTER_GC();
298 0 : GC_collect_a_little_inner(1);
299 0 : EXIT_GC();
300 : }
301 : /* First see if we can reclaim a page of objects waiting to be */
302 : /* reclaimed. */
303 : {
304 40389 : struct hblk ** rlh = ok -> ok_reclaim_list;
305 : struct hblk * hbp;
306 : hdr * hhdr;
307 :
308 40389 : rlh += lg;
309 80778 : while ((hbp = *rlh) != 0) {
310 4771 : hhdr = HDR(hbp);
311 4771 : *rlh = hhdr -> hb_next;
312 : GC_ASSERT(hhdr -> hb_sz == lb);
313 4771 : hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
314 : # ifdef PARALLEL_MARK
315 : if (GC_parallel) {
316 : signed_word my_bytes_allocd_tmp = GC_bytes_allocd_tmp;
317 :
318 : GC_ASSERT(my_bytes_allocd_tmp >= 0);
319 : /* We only decrement it while holding the GC lock. */
320 : /* Thus we can't accidentally adjust it down in more */
321 : /* than one thread simultaneously. */
322 : if (my_bytes_allocd_tmp != 0) {
323 : (void)AO_fetch_and_add(
324 : (volatile void *)(&GC_bytes_allocd_tmp),
325 : (AO_t)(-my_bytes_allocd_tmp));
326 : GC_bytes_allocd += my_bytes_allocd_tmp;
327 : }
328 : GC_acquire_mark_lock();
329 : ++ GC_fl_builder_count;
330 : UNLOCK();
331 : GC_release_mark_lock();
332 : }
333 : # endif
334 4771 : op = GC_reclaim_generic(hbp, hhdr, lb,
335 : ok -> ok_init, 0, &my_bytes_allocd);
336 4771 : if (op != 0) {
337 : /* We also reclaimed memory, so we need to adjust */
338 : /* that count. */
339 : /* This should be atomic, so the results may be */
340 : /* inaccurate. */
341 4771 : GC_bytes_found += my_bytes_allocd;
342 : # ifdef PARALLEL_MARK
343 : if (GC_parallel) {
344 : *result = op;
345 : (void)AO_fetch_and_add(
346 : (volatile AO_t *)(&GC_bytes_allocd_tmp),
347 : (AO_t)(my_bytes_allocd));
348 : GC_acquire_mark_lock();
349 : -- GC_fl_builder_count;
350 : if (GC_fl_builder_count == 0) GC_notify_all_builder();
351 : GC_release_mark_lock();
352 : (void) GC_clear_stack(0);
353 : return;
354 : }
355 : # endif
356 4771 : GC_bytes_allocd += my_bytes_allocd;
357 4771 : goto out;
358 : }
359 : # ifdef PARALLEL_MARK
360 : if (GC_parallel) {
361 : GC_acquire_mark_lock();
362 : -- GC_fl_builder_count;
363 : if (GC_fl_builder_count == 0) GC_notify_all_builder();
364 : GC_release_mark_lock();
365 : LOCK();
366 : /* GC lock is needed for reclaim list access. We */
367 : /* must decrement fl_builder_count before reaquiring GC */
368 : /* lock. Hopefully this path is rare. */
369 : }
370 : # endif
371 : }
372 : }
373 : /* Next try to use prefix of global free list if there is one. */
374 : /* We don't refill it, but we need to use it up before allocating */
375 : /* a new block ourselves. */
376 35618 : opp = &(GC_obj_kinds[k].ok_freelist[lg]);
377 35618 : if ( (op = *opp) != 0 ) {
378 2012 : *opp = 0;
379 2012 : my_bytes_allocd = 0;
380 78611 : for (p = op; p != 0; p = obj_link(p)) {
381 76599 : my_bytes_allocd += lb;
382 76599 : if ((word)my_bytes_allocd >= HBLKSIZE) {
383 0 : *opp = obj_link(p);
384 0 : obj_link(p) = 0;
385 0 : break;
386 : }
387 : }
388 2012 : GC_bytes_allocd += my_bytes_allocd;
389 2012 : goto out;
390 : }
391 : /* Next try to allocate a new block worth of objects of this size. */
392 : {
393 33606 : struct hblk *h = GC_allochblk(lb, k, 0);
394 33606 : if (h != 0) {
395 33543 : if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h));
396 33543 : GC_bytes_allocd += HBLKSIZE - HBLKSIZE % lb;
397 : # ifdef PARALLEL_MARK
398 : if (GC_parallel) {
399 : GC_acquire_mark_lock();
400 : ++ GC_fl_builder_count;
401 : UNLOCK();
402 : GC_release_mark_lock();
403 :
404 : op = GC_build_fl(h, lw,
405 : (ok -> ok_init || GC_debugging_started), 0);
406 :
407 : *result = op;
408 : GC_acquire_mark_lock();
409 : -- GC_fl_builder_count;
410 : if (GC_fl_builder_count == 0) GC_notify_all_builder();
411 : GC_release_mark_lock();
412 : (void) GC_clear_stack(0);
413 : return;
414 : }
415 : # endif
416 33543 : op = GC_build_fl(h, lw, (ok -> ok_init || GC_debugging_started), 0);
417 33543 : goto out;
418 : }
419 : }
420 :
421 : /* As a last attempt, try allocating a single object. Note that */
422 : /* this may trigger a collection or expand the heap. */
423 63 : op = GC_generic_malloc_inner(lb, k);
424 63 : if (0 != op) obj_link(op) = 0;
425 :
426 : out:
427 40389 : *result = op;
428 40389 : UNLOCK();
429 40389 : (void) GC_clear_stack(0);
430 : }
431 :
432 : /* Note that the "atomic" version of this would be unsafe, since the */
433 : /* links would not be seen by the collector. */
434 0 : GC_API void * GC_CALL GC_malloc_many(size_t lb)
435 : {
436 : void *result;
437 0 : GC_generic_malloc_many((lb + EXTRA_BYTES + GRANULE_BYTES-1)
438 : & ~(GRANULE_BYTES-1),
439 : NORMAL, &result);
440 0 : return result;
441 : }
442 :
443 : /* Not well tested nor integrated. */
444 : /* Debug version is tricky and currently missing. */
445 : #include <limits.h>
446 :
447 0 : GC_API void * GC_CALL GC_memalign(size_t align, size_t lb)
448 : {
449 : size_t new_lb;
450 : size_t offset;
451 : ptr_t result;
452 :
453 0 : if (align <= GRANULE_BYTES) return GC_malloc(lb);
454 0 : if (align >= HBLKSIZE/2 || lb >= HBLKSIZE/2) {
455 0 : if (align > HBLKSIZE) {
456 0 : return (*GC_get_oom_fn())(LONG_MAX-1024); /* Fail */
457 : }
458 0 : return GC_malloc(lb <= HBLKSIZE? HBLKSIZE : lb);
459 : /* Will be HBLKSIZE aligned. */
460 : }
461 : /* We could also try to make sure that the real rounded-up object size */
462 : /* is a multiple of align. That would be correct up to HBLKSIZE. */
463 0 : new_lb = lb + align - 1;
464 0 : result = GC_malloc(new_lb);
465 0 : offset = (word)result % align;
466 0 : if (offset != 0) {
467 0 : offset = align - offset;
468 0 : if (!GC_all_interior_pointers) {
469 0 : if (offset >= VALID_OFFSET_SZ) return GC_malloc(HBLKSIZE);
470 0 : GC_register_displacement(offset);
471 : }
472 : }
473 0 : result = (void *) ((ptr_t)result + offset);
474 : GC_ASSERT((word)result % align == 0);
475 0 : return result;
476 : }
477 :
478 : /* This one exists largerly to redirect posix_memalign for leaks finding. */
479 0 : GC_API int GC_CALL GC_posix_memalign(void **memptr, size_t align, size_t lb)
480 : {
481 : /* Check alignment properly. */
482 0 : if (((align - 1) & align) != 0 || align < sizeof(void *)) {
483 : # ifdef MSWINCE
484 : return ERROR_INVALID_PARAMETER;
485 : # else
486 0 : return EINVAL;
487 : # endif
488 : }
489 :
490 0 : if ((*memptr = GC_memalign(align, lb)) == NULL) {
491 : # ifdef MSWINCE
492 : return ERROR_NOT_ENOUGH_MEMORY;
493 : # else
494 0 : return ENOMEM;
495 : # endif
496 : }
497 0 : return 0;
498 : }
499 :
500 : #ifdef ATOMIC_UNCOLLECTABLE
501 : /* Allocate lb bytes of pointerfree, untraced, uncollectable data */
502 : /* This is normally roughly equivalent to the system malloc. */
503 : /* But it may be useful if malloc is redefined. */
504 0 : GC_API void * GC_CALL GC_malloc_atomic_uncollectable(size_t lb)
505 : {
506 : void *op;
507 : void **opp;
508 : size_t lg;
509 : DCL_LOCK_STATE;
510 :
511 0 : if( SMALL_OBJ(lb) ) {
512 0 : if (EXTRA_BYTES != 0 && lb != 0) lb--;
513 : /* We don't need the extra byte, since this won't be */
514 : /* collected anyway. */
515 0 : lg = GC_size_map[lb];
516 0 : opp = &(GC_auobjfreelist[lg]);
517 0 : LOCK();
518 0 : if( (op = *opp) != 0 ) {
519 0 : *opp = obj_link(op);
520 0 : obj_link(op) = 0;
521 0 : GC_bytes_allocd += GRANULES_TO_BYTES(lg);
522 : /* Mark bit was already set while object was on free list. */
523 0 : GC_non_gc_bytes += GRANULES_TO_BYTES(lg);
524 0 : UNLOCK();
525 : } else {
526 0 : UNLOCK();
527 0 : op = (ptr_t)GC_generic_malloc(lb, AUNCOLLECTABLE);
528 : }
529 : GC_ASSERT(0 == op || GC_is_marked(op));
530 0 : return((void *) op);
531 : } else {
532 : hdr * hhdr;
533 :
534 0 : op = (ptr_t)GC_generic_malloc(lb, AUNCOLLECTABLE);
535 0 : if (0 == op) return(0);
536 :
537 : GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0);
538 0 : hhdr = HDR(op);
539 :
540 0 : LOCK();
541 0 : set_mark_bit_from_hdr(hhdr, 0); /* Only object. */
542 : # ifndef THREADS
543 : GC_ASSERT(hhdr -> hb_n_marks == 0);
544 : # endif
545 0 : hhdr -> hb_n_marks = 1;
546 0 : UNLOCK();
547 0 : return((void *) op);
548 : }
549 : }
550 : #endif /* ATOMIC_UNCOLLECTABLE */
551 :
552 : /* provide a version of strdup() that uses the collector to allocate the
553 : copy of the string */
554 0 : GC_API char * GC_CALL GC_strdup(const char *s)
555 : {
556 : char *copy;
557 : size_t lb;
558 0 : if (s == NULL) return NULL;
559 0 : lb = strlen(s) + 1;
560 0 : if ((copy = GC_malloc_atomic(lb)) == NULL) {
561 : # ifndef MSWINCE
562 0 : errno = ENOMEM;
563 : # endif
564 0 : return NULL;
565 : }
566 : # ifndef MSWINCE
567 0 : strcpy(copy, s);
568 : # else
569 : /* strcpy() is deprecated in WinCE */
570 : memcpy(copy, s, lb);
571 : # endif
572 0 : return copy;
573 : }
574 :
575 0 : GC_API char * GC_CALL GC_strndup(const char *str, size_t size)
576 : {
577 : char *copy;
578 0 : size_t len = strlen(str); /* str is expected to be non-NULL */
579 0 : if (len > size)
580 0 : len = size;
581 0 : copy = GC_malloc_atomic(len + 1);
582 0 : if (copy == NULL) {
583 : # ifndef MSWINCE
584 0 : errno = ENOMEM;
585 : # endif
586 0 : return NULL;
587 : }
588 0 : BCOPY(str, copy, len);
589 0 : copy[len] = '\0';
590 0 : return copy;
591 : }
592 :
593 : #ifdef GC_REQUIRE_WCSDUP
594 : # include <wchar.h> /* for wcslen() */
595 :
596 : GC_API wchar_t * GC_CALL GC_wcsdup(const wchar_t *str)
597 : {
598 : size_t lb = (wcslen(str) + 1) * sizeof(wchar_t);
599 : wchar_t *copy = GC_malloc_atomic(lb);
600 : if (copy == NULL) {
601 : # ifndef MSWINCE
602 : errno = ENOMEM;
603 : # endif
604 : return NULL;
605 : }
606 : BCOPY(str, copy, lb);
607 : return copy;
608 : }
609 : #endif /* GC_REQUIRE_WCSDUP */
|