mallocx.c 18.8 KB
Newer Older
eg's avatar
eg committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
 * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
 * Copyright (c) 1996 by Silicon Graphics.  All rights reserved.
 * Copyright (c) 2000 by Hewlett-Packard Company.  All rights reserved.
 *
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
 *
 * Permission is hereby granted to use or copy this program
 * for any purpose,  provided the above notices are retained on all copies.
 * Permission to modify the code and to distribute modified code is granted,
 * provided the above notices are retained, and a notice that the code was
 * modified is included with the above copyright notice.
 */

17 18 19
#include "private/gc_priv.h"
#include "gc_inline.h" /* for GC_malloc_kind */

eg's avatar
eg committed
20 21 22 23 24 25 26 27
/*
 * These are extra allocation routines which are likely to be less
 * frequently used than those in malloc.c.  They are separate in the
 * hope that the .o file will be excluded from statically linked
 * executables.  We should probably break this up further.
 */

#include <stdio.h>
28
#include <string.h>
eg's avatar
eg committed
29

30 31 32 33 34 35 36 37 38
#ifdef MSWINCE
# ifndef WIN32_LEAN_AND_MEAN
#   define WIN32_LEAN_AND_MEAN 1
# endif
# define NOSERVICE
# include <windows.h>
#else
# include <errno.h>
#endif
39

eg's avatar
eg committed
40
/* Some externally visible but unadvertised variables to allow access to */
41 42
/* free lists from inlined allocators without including gc_priv.h        */
/* or introducing dependencies on internal data structure layouts.       */
eg's avatar
eg committed
43 44 45
void ** const GC_objfreelist_ptr = GC_objfreelist;
void ** const GC_aobjfreelist_ptr = GC_aobjfreelist;
void ** const GC_uobjfreelist_ptr = GC_uobjfreelist;
46
# ifdef GC_ATOMIC_UNCOLLECTABLE
eg's avatar
eg committed
47
    void ** const GC_auobjfreelist_ptr = GC_auobjfreelist;
eg's avatar
eg committed
48 49
# endif

50 51 52
GC_API int GC_CALL GC_get_kind_and_size(const void * p, size_t * psize)
{
    hdr * hhdr = HDR(p);
eg's avatar
eg committed
53

54 55 56 57 58 59 60 61
    if (psize != NULL) {
        *psize = hhdr -> hb_sz;
    }
    return hhdr -> hb_obj_kind;
}

GC_API GC_ATTR_MALLOC void * GC_CALL GC_generic_or_special_malloc(size_t lb,
                                                                  int knd)
eg's avatar
eg committed
62 63 64
{
    switch(knd) {
#     ifdef STUBBORN_ALLOC
65 66
        case STUBBORN:
            return GC_malloc_stubborn(lb);
eg's avatar
eg committed
67
#     endif
68 69 70 71 72 73 74 75 76 77
        case PTRFREE:
        case NORMAL:
            return GC_malloc_kind(lb, knd);
        case UNCOLLECTABLE:
#       ifdef GC_ATOMIC_UNCOLLECTABLE
          case AUNCOLLECTABLE:
#       endif
            return GC_generic_malloc_uncollectable(lb, knd);
        default:
            return GC_generic_malloc(lb, knd);
eg's avatar
eg committed
78 79 80 81 82
    }
}

/* Change the size of the block pointed to by p to contain at least   */
/* lb bytes.  The object may be (and quite likely will be) moved.     */
83
/* The kind (e.g. atomic) is the same as that of the old.             */
eg's avatar
eg committed
84
/* Shrinking of large blocks is not implemented well.                 */
85
GC_API void * GC_CALL GC_realloc(void * p, size_t lb)
eg's avatar
eg committed
86
{
eg's avatar
eg committed
87 88
    struct hblk * h;
    hdr * hhdr;
89 90 91
    void * result;
    size_t sz;      /* Current size in bytes    */
    size_t orig_sz; /* Original sz in bytes     */
eg's avatar
eg committed
92
    int obj_kind;
eg's avatar
eg committed
93

94 95 96 97 98 99 100
    if (p == 0) return(GC_malloc(lb));  /* Required by ANSI */
    if (0 == lb) /* and p != NULL */ {
#     ifndef IGNORE_FREE
        GC_free(p);
#     endif
      return NULL;
    }
eg's avatar
eg committed
101 102 103 104 105 106 107
    h = HBLKPTR(p);
    hhdr = HDR(h);
    sz = hhdr -> hb_sz;
    obj_kind = hhdr -> hb_obj_kind;
    orig_sz = sz;

    if (sz > MAXOBJBYTES) {
108 109 110 111 112 113
        /* Round it up to the next whole heap block */
          word descr;

          sz = (sz+HBLKSIZE-1) & (~HBLKMASK);
          hhdr -> hb_sz = sz;
          descr = GC_obj_kinds[obj_kind].ok_descriptor;
eg's avatar
eg committed
114 115
          if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
          hhdr -> hb_descr = descr;
116 117 118 119 120 121 122 123
#         ifdef MARK_BIT_PER_OBJ
            GC_ASSERT(hhdr -> hb_inv_sz == LARGE_INV_SZ);
#         else
            GC_ASSERT((hhdr -> hb_flags & LARGE_BLOCK) != 0
                        && hhdr -> hb_map[ANY_INDEX] == 1);
#         endif
          if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
          /* Extra area is already cleared by GC_alloc_large_and_clear. */
eg's avatar
eg committed
124 125
    }
    if (ADD_SLOP(lb) <= sz) {
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
        if (lb >= (sz >> 1)) {
#           ifdef STUBBORN_ALLOC
                if (obj_kind == STUBBORN) GC_change_stubborn(p);
#           endif
            if (orig_sz > lb) {
              /* Clear unneeded part of object to avoid bogus pointer */
              /* tracing.                                             */
              /* Safe for stubborn objects.                           */
                BZERO(((ptr_t)p) + lb, orig_sz - lb);
            }
            return(p);
        }
        /* shrink */
        sz = lb;
    }
    result = GC_generic_or_special_malloc((word)lb, obj_kind);
    if (result != NULL) {
      /* In case of shrink, it could also return original object.       */
      /* But this gives the client warning of imminent disaster.        */
      BCOPY(p, result, sz);
#     ifndef IGNORE_FREE
        GC_free(p);
#     endif
eg's avatar
eg committed
149
    }
150
    return result;
eg's avatar
eg committed
151 152 153 154 155 156 157
}

# if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC)
#   define REDIRECT_REALLOC GC_realloc
# endif

# ifdef REDIRECT_REALLOC
eg's avatar
eg committed
158

159
/* As with malloc, avoid two levels of extra calls here.        */
eg's avatar
eg committed
160
# define GC_debug_realloc_replacement(p, lb) \
161
        GC_debug_realloc(p, lb, GC_DBG_EXTRAS)
eg's avatar
eg committed
162

163 164 165 166 167 168
# if !defined(REDIRECT_MALLOC_IN_HEADER)
    void * realloc(void * p, size_t lb)
    {
      return(REDIRECT_REALLOC(p, lb));
    }
# endif
eg's avatar
eg committed
169 170

# undef GC_debug_realloc_replacement
eg's avatar
eg committed
171 172
# endif /* REDIRECT_REALLOC */

eg's avatar
eg committed
173 174
/* Allocate memory such that only pointers to near the          */
/* beginning of the object are considered.                      */
175 176 177
/* We avoid holding allocation lock while we clear the memory.  */
GC_API GC_ATTR_MALLOC void * GC_CALL
    GC_generic_malloc_ignore_off_page(size_t lb, int k)
eg's avatar
eg committed
178
{
eg's avatar
eg committed
179
    void *result;
180
    size_t lg;
eg's avatar
eg committed
181
    size_t lb_rounded;
eg's avatar
eg committed
182 183 184
    word n_blocks;
    GC_bool init;
    DCL_LOCK_STATE;
185

eg's avatar
eg committed
186
    if (SMALL_OBJ(lb))
187 188
        return GC_generic_malloc(lb, k);
    GC_ASSERT(k < MAXOBJKINDS);
189 190
    lg = ROUNDED_UP_GRANULES(lb);
    lb_rounded = GRANULES_TO_BYTES(lg);
eg's avatar
eg committed
191
    n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
eg's avatar
eg committed
192
    init = GC_obj_kinds[k].ok_init;
193 194
    if (EXPECT(GC_have_errors, FALSE))
      GC_print_all_errors();
eg's avatar
eg committed
195
    GC_INVOKE_FINALIZERS();
196
    GC_DBG_COLLECT_AT_MALLOC(lb);
eg's avatar
eg committed
197
    LOCK();
eg's avatar
eg committed
198
    result = (ptr_t)GC_alloc_large(ADD_SLOP(lb), k, IGNORE_OFF_PAGE);
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
    if (NULL == result) {
        GC_oom_func oom_fn = GC_oom_fn;
        UNLOCK();
        return (*oom_fn)(lb);
    }

    if (GC_debugging_started) {
        BZERO(result, n_blocks * HBLKSIZE);
    } else {
#       ifdef THREADS
            /* Clear any memory that might be used for GC descriptors   */
            /* before we release the lock.                              */
            ((word *)result)[0] = 0;
            ((word *)result)[1] = 0;
            ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
            ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0;
#       endif
eg's avatar
eg committed
216
    }
eg's avatar
eg committed
217
    GC_bytes_allocd += lb_rounded;
218
    UNLOCK();
219 220
    if (init && !GC_debugging_started) {
        BZERO(result, n_blocks * HBLKSIZE);
eg's avatar
eg committed
221
    }
222
    return(result);
eg's avatar
eg committed
223 224
}

225
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_ignore_off_page(size_t lb)
eg's avatar
eg committed
226
{
227
    return GC_generic_malloc_ignore_off_page(lb, NORMAL);
eg's avatar
eg committed
228 229
}

230 231
GC_API GC_ATTR_MALLOC void * GC_CALL
    GC_malloc_atomic_ignore_off_page(size_t lb)
eg's avatar
eg committed
232
{
233
    return GC_generic_malloc_ignore_off_page(lb, PTRFREE);
eg's avatar
eg committed
234 235
}

236 237 238
/* Increment GC_bytes_allocd from code that doesn't have direct access  */
/* to GC_arrays.                                                        */
GC_API void GC_CALL GC_incr_bytes_allocd(size_t n)
eg's avatar
eg committed
239
{
eg's avatar
eg committed
240
    GC_bytes_allocd += n;
eg's avatar
eg committed
241 242
}

243 244
/* The same for GC_bytes_freed.                         */
GC_API void GC_CALL GC_incr_bytes_freed(size_t n)
eg's avatar
eg committed
245
{
eg's avatar
eg committed
246
    GC_bytes_freed += n;
eg's avatar
eg committed
247 248
}

249 250
# ifdef PARALLEL_MARK
    STATIC volatile AO_t GC_bytes_allocd_tmp = 0;
eg's avatar
eg committed
251
                        /* Number of bytes of memory allocated since    */
eg's avatar
eg committed
252 253 254 255 256 257
                        /* we released the GC lock.  Instead of         */
                        /* reacquiring the GC lock just to add this in, */
                        /* we add it in the next time we reacquire      */
                        /* the lock.  (Atomically adding it doesn't     */
                        /* work, since we would have to atomically      */
                        /* update it in GC_malloc, which is too         */
258 259
                        /* expensive.)                                  */
# endif /* PARALLEL_MARK */
eg's avatar
eg committed
260

261 262 263
/* Return a list of 1 or more objects of the indicated size, linked     */
/* through the first word in the object.  This has the advantage that   */
/* it acquires the allocation lock only once, and may greatly reduce    */
eg's avatar
eg committed
264
/* time wasted contending for the allocation lock.  Typical usage would */
265 266 267 268 269 270 271 272 273 274 275 276
/* be in a thread that requires many items of the same size.  It would  */
/* keep its own free list in thread-local storage, and call             */
/* GC_malloc_many or friends to replenish it.  (We do not round up      */
/* object sizes, since a call indicates the intention to consume many   */
/* objects of exactly this size.)                                       */
/* We assume that the size is a multiple of GRANULE_BYTES.              */
/* We return the free-list by assigning it to *result, since it is      */
/* not safe to return, e.g. a linked list of pointer-free objects,      */
/* since the collector would not retain the entire list if it were      */
/* invoked just as we were returning.                                   */
/* Note that the client should usually clear the link field.            */
GC_API void GC_CALL GC_generic_malloc_many(size_t lb, int k, void **result)
eg's avatar
eg committed
277
{
278 279 280 281 282 283 284 285 286
    void *op;
    void *p;
    void **opp;
    size_t lw;      /* Length in words.     */
    size_t lg;      /* Length in granules.  */
    signed_word my_bytes_allocd = 0;
    struct obj_kind * ok = &(GC_obj_kinds[k]);
    struct hblk ** rlh;
    DCL_LOCK_STATE;
287 288

    GC_ASSERT(lb != 0 && (lb & (GRANULE_BYTES-1)) == 0);
eg's avatar
eg committed
289 290
    if (!SMALL_OBJ(lb)) {
        op = GC_generic_malloc(lb, k);
291 292 293
        if (EXPECT(0 != op, TRUE))
            obj_link(op) = 0;
        *result = op;
eg's avatar
eg committed
294 295
        return;
    }
296
    GC_ASSERT(k < MAXOBJKINDS);
eg's avatar
eg committed
297 298
    lw = BYTES_TO_WORDS(lb);
    lg = BYTES_TO_GRANULES(lb);
299 300
    if (EXPECT(GC_have_errors, FALSE))
      GC_print_all_errors();
eg's avatar
eg committed
301
    GC_INVOKE_FINALIZERS();
302 303
    GC_DBG_COLLECT_AT_MALLOC(lb);
    if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
eg's avatar
eg committed
304 305 306 307
    LOCK();
    /* Do our share of marking work */
      if (GC_incremental && !GC_dont_gc) {
        ENTER_GC();
308
        GC_collect_a_little_inner(1);
eg's avatar
eg committed
309 310 311
        EXIT_GC();
      }
    /* First see if we can reclaim a page of objects waiting to be */
312 313 314 315 316 317 318 319
    /* reclaimed.                                                  */
    rlh = ok -> ok_reclaim_list;
    if (rlh != NULL) {
        struct hblk * hbp;
        hdr * hhdr;

        rlh += lg;
        while ((hbp = *rlh) != 0) {
eg's avatar
eg committed
320 321
            hhdr = HDR(hbp);
            *rlh = hhdr -> hb_next;
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
            GC_ASSERT(hhdr -> hb_sz == lb);
            hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
#           ifdef PARALLEL_MARK
              if (GC_parallel) {
                  signed_word my_bytes_allocd_tmp =
                                (signed_word)AO_load(&GC_bytes_allocd_tmp);
                  GC_ASSERT(my_bytes_allocd_tmp >= 0);
                  /* We only decrement it while holding the GC lock.    */
                  /* Thus we can't accidentally adjust it down in more  */
                  /* than one thread simultaneously.                    */

                  if (my_bytes_allocd_tmp != 0) {
                    (void)AO_fetch_and_add(&GC_bytes_allocd_tmp,
                                           (AO_t)(-my_bytes_allocd_tmp));
                    GC_bytes_allocd += my_bytes_allocd_tmp;
                  }
                  GC_acquire_mark_lock();
                  ++ GC_fl_builder_count;
                  UNLOCK();
                  GC_release_mark_lock();
              }
#           endif
            op = GC_reclaim_generic(hbp, hhdr, lb,
                                    ok -> ok_init, 0, &my_bytes_allocd);
eg's avatar
eg committed
346
            if (op != 0) {
347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
              /* We also reclaimed memory, so we need to adjust         */
              /* that count.                                            */
              /* This should be atomic, so the results may be           */
              /* inaccurate.                                            */
              GC_bytes_found += my_bytes_allocd;
#             ifdef PARALLEL_MARK
                if (GC_parallel) {
                  *result = op;
                  (void)AO_fetch_and_add(&GC_bytes_allocd_tmp,
                                         (AO_t)my_bytes_allocd);
                  GC_acquire_mark_lock();
                  -- GC_fl_builder_count;
                  if (GC_fl_builder_count == 0) GC_notify_all_builder();
                  GC_release_mark_lock();
                  (void) GC_clear_stack(0);
                  return;
                }
#             endif
              GC_bytes_allocd += my_bytes_allocd;
              goto out;
            }
#           ifdef PARALLEL_MARK
              if (GC_parallel) {
                GC_acquire_mark_lock();
                -- GC_fl_builder_count;
                if (GC_fl_builder_count == 0) GC_notify_all_builder();
                GC_release_mark_lock();
                LOCK();
                /* GC lock is needed for reclaim list access.   We      */
                /* must decrement fl_builder_count before reacquiring   */
                /* the lock.  Hopefully this path is rare.              */
              }
#           endif
        }
eg's avatar
eg committed
381
    }
382 383 384
    /* Next try to use prefix of global free list if there is one.      */
    /* We don't refill it, but we need to use it up before allocating   */
    /* a new block ourselves.                                           */
eg's avatar
eg committed
385
      opp = &(GC_obj_kinds[k].ok_freelist[lg]);
eg's avatar
eg committed
386
      if ( (op = *opp) != 0 ) {
387
        *opp = 0;
eg's avatar
eg committed
388
        my_bytes_allocd = 0;
eg's avatar
eg committed
389
        for (p = op; p != 0; p = obj_link(p)) {
eg's avatar
eg committed
390
          my_bytes_allocd += lb;
391
          if ((word)my_bytes_allocd >= HBLKSIZE) {
eg's avatar
eg committed
392 393 394
            *opp = obj_link(p);
            obj_link(p) = 0;
            break;
395
          }
eg's avatar
eg committed
396
        }
397 398
        GC_bytes_allocd += my_bytes_allocd;
        goto out;
eg's avatar
eg committed
399
      }
400
    /* Next try to allocate a new block worth of objects of this size.  */
eg's avatar
eg committed
401
    {
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
        struct hblk *h = GC_allochblk(lb, k, 0);
        if (h != 0) {
          if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h));
          GC_bytes_allocd += HBLKSIZE - HBLKSIZE % lb;
#         ifdef PARALLEL_MARK
            if (GC_parallel) {
              GC_acquire_mark_lock();
              ++ GC_fl_builder_count;
              UNLOCK();
              GC_release_mark_lock();

              op = GC_build_fl(h, lw,
                        (ok -> ok_init || GC_debugging_started), 0);

              *result = op;
              GC_acquire_mark_lock();
              -- GC_fl_builder_count;
              if (GC_fl_builder_count == 0) GC_notify_all_builder();
              GC_release_mark_lock();
              (void) GC_clear_stack(0);
              return;
            }
#         endif
          op = GC_build_fl(h, lw, (ok -> ok_init || GC_debugging_started), 0);
          goto out;
        }
eg's avatar
eg committed
428
    }
429 430 431

    /* As a last attempt, try allocating a single object.  Note that    */
    /* this may trigger a collection or expand the heap.                */
eg's avatar
eg committed
432 433
      op = GC_generic_malloc_inner(lb, k);
      if (0 != op) obj_link(op) = 0;
434

eg's avatar
eg committed
435 436 437 438 439 440
  out:
    *result = op;
    UNLOCK();
    (void) GC_clear_stack(0);
}

441 442 443
/* Note that the "atomic" version of this would be unsafe, since the    */
/* links would not be seen by the collector.                            */
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_many(size_t lb)
eg's avatar
eg committed
444
{
eg's avatar
eg committed
445
    void *result;
eg's avatar
eg committed
446

447 448 449
    /* Add EXTRA_BYTES and round up to a multiple of a granule. */
    lb = SIZET_SAT_ADD(lb, EXTRA_BYTES + GRANULE_BYTES - 1)
            & ~(GRANULE_BYTES - 1);
eg's avatar
eg committed
450

451 452
    GC_generic_malloc_many(lb, NORMAL, &result);
    return result;
eg's avatar
eg committed
453 454 455 456
}

#include <limits.h>

457 458 459
/* Debug version is tricky and currently missing.       */
GC_API GC_ATTR_MALLOC void * GC_CALL GC_memalign(size_t align, size_t lb)
{
eg's avatar
eg committed
460 461 462 463
    size_t new_lb;
    size_t offset;
    ptr_t result;

eg's avatar
eg committed
464
    if (align <= GRANULE_BYTES) return GC_malloc(lb);
eg's avatar
eg committed
465
    if (align >= HBLKSIZE/2 || lb >= HBLKSIZE/2) {
466 467 468 469 470
        if (align > HBLKSIZE) {
          return (*GC_get_oom_fn())(LONG_MAX-1024); /* Fail */
        }
        return GC_malloc(lb <= HBLKSIZE? HBLKSIZE : lb);
            /* Will be HBLKSIZE aligned.        */
eg's avatar
eg committed
471 472
    }
    /* We could also try to make sure that the real rounded-up object size */
473 474
    /* is a multiple of align.  That would be correct up to HBLKSIZE.      */
    new_lb = SIZET_SAT_ADD(lb, align - 1);
eg's avatar
eg committed
475
    result = GC_malloc(new_lb);
476 477
            /* It is OK not to check result for NULL as in that case    */
            /* GC_memalign returns NULL too since (0 + 0 % align) is 0. */
eg's avatar
eg committed
478 479
    offset = (word)result % align;
    if (offset != 0) {
480
        offset = align - offset;
eg's avatar
eg committed
481
        if (!GC_all_interior_pointers) {
482 483 484 485
            GC_STATIC_ASSERT(VALID_OFFSET_SZ <= HBLKSIZE);
            GC_ASSERT(offset < VALID_OFFSET_SZ);
            GC_register_displacement(offset);
        }
eg's avatar
eg committed
486
    }
eg's avatar
eg committed
487
    result = (void *) ((ptr_t)result + offset);
eg's avatar
eg committed
488 489 490 491
    GC_ASSERT((word)result % align == 0);
    return result;
}

492 493
/* This one exists largely to redirect posix_memalign for leaks finding. */
GC_API int GC_CALL GC_posix_memalign(void **memptr, size_t align, size_t lb)
eg's avatar
eg committed
494
{
495 496 497 498 499 500 501 502
  /* Check alignment properly.  */
  if (((align - 1) & align) != 0 || align < sizeof(void *)) {
#   ifdef MSWINCE
      return ERROR_INVALID_PARAMETER;
#   else
      return EINVAL;
#   endif
  }
eg's avatar
eg committed
503

504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
  if ((*memptr = GC_memalign(align, lb)) == NULL) {
#   ifdef MSWINCE
      return ERROR_NOT_ENOUGH_MEMORY;
#   else
      return ENOMEM;
#   endif
  }
  return 0;
}

/* provide a version of strdup() that uses the collector to allocate the
   copy of the string */
GC_API GC_ATTR_MALLOC char * GC_CALL GC_strdup(const char *s)
{
  char *copy;
  size_t lb;
  if (s == NULL) return NULL;
  lb = strlen(s) + 1;
  if ((copy = GC_malloc_atomic(lb)) == NULL) {
#   ifndef MSWINCE
      errno = ENOMEM;
#   endif
    return NULL;
  }
  BCOPY(s, copy, lb);
  return copy;
}

GC_API GC_ATTR_MALLOC char * GC_CALL GC_strndup(const char *str, size_t size)
{
  char *copy;
  size_t len = strlen(str); /* str is expected to be non-NULL  */
  if (len > size)
    len = size;
  copy = GC_malloc_atomic(len + 1);
  if (copy == NULL) {
#   ifndef MSWINCE
      errno = ENOMEM;
#   endif
    return NULL;
  }
  BCOPY(str, copy, len);
  copy[len] = '\0';
  return copy;
eg's avatar
eg committed
548 549
}

550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
#ifdef GC_REQUIRE_WCSDUP
# include <wchar.h> /* for wcslen() */

  GC_API GC_ATTR_MALLOC wchar_t * GC_CALL GC_wcsdup(const wchar_t *str)
  {
    size_t lb = (wcslen(str) + 1) * sizeof(wchar_t);
    wchar_t *copy = GC_malloc_atomic(lb);
    if (copy == NULL) {
#     ifndef MSWINCE
        errno = ENOMEM;
#     endif
      return NULL;
    }
    BCOPY(str, copy, lb);
    return copy;
  }
#endif /* GC_REQUIRE_WCSDUP */