ソースの整理中ですが、利用はできます。

Dependencies:   EthernetInterface HttpServer TextLCD mbed-rpc mbed-rtos mbed Socket lwip-eth lwip-sys lwip

Committer:
yueee_yt
Date:
Wed Mar 12 04:19:54 2014 +0000
Revision:
0:7766f6712673
???????????????

Who changed what in which revision?

UserRevisionLine numberNew contents of line
yueee_yt 0:7766f6712673 1 /**
yueee_yt 0:7766f6712673 2 * @file
yueee_yt 0:7766f6712673 3 * Dynamic memory manager
yueee_yt 0:7766f6712673 4 *
yueee_yt 0:7766f6712673 5 * This is a lightweight replacement for the standard C library malloc().
yueee_yt 0:7766f6712673 6 *
yueee_yt 0:7766f6712673 7 * If you want to use the standard C library malloc() instead, define
yueee_yt 0:7766f6712673 8 * MEM_LIBC_MALLOC to 1 in your lwipopts.h
yueee_yt 0:7766f6712673 9 *
yueee_yt 0:7766f6712673 10 * To let mem_malloc() use pools (prevents fragmentation and is much faster than
yueee_yt 0:7766f6712673 11 * a heap but might waste some memory), define MEM_USE_POOLS to 1, define
yueee_yt 0:7766f6712673 12 * MEM_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list
yueee_yt 0:7766f6712673 13 * of pools like this (more pools can be added between _START and _END):
yueee_yt 0:7766f6712673 14 *
yueee_yt 0:7766f6712673 15 * Define three pools with sizes 256, 512, and 1512 bytes
yueee_yt 0:7766f6712673 16 * LWIP_MALLOC_MEMPOOL_START
yueee_yt 0:7766f6712673 17 * LWIP_MALLOC_MEMPOOL(20, 256)
yueee_yt 0:7766f6712673 18 * LWIP_MALLOC_MEMPOOL(10, 512)
yueee_yt 0:7766f6712673 19 * LWIP_MALLOC_MEMPOOL(5, 1512)
yueee_yt 0:7766f6712673 20 * LWIP_MALLOC_MEMPOOL_END
yueee_yt 0:7766f6712673 21 */
yueee_yt 0:7766f6712673 22
yueee_yt 0:7766f6712673 23 /*
yueee_yt 0:7766f6712673 24 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
yueee_yt 0:7766f6712673 25 * All rights reserved.
yueee_yt 0:7766f6712673 26 *
yueee_yt 0:7766f6712673 27 * Redistribution and use in source and binary forms, with or without modification,
yueee_yt 0:7766f6712673 28 * are permitted provided that the following conditions are met:
yueee_yt 0:7766f6712673 29 *
yueee_yt 0:7766f6712673 30 * 1. Redistributions of source code must retain the above copyright notice,
yueee_yt 0:7766f6712673 31 * this list of conditions and the following disclaimer.
yueee_yt 0:7766f6712673 32 * 2. Redistributions in binary form must reproduce the above copyright notice,
yueee_yt 0:7766f6712673 33 * this list of conditions and the following disclaimer in the documentation
yueee_yt 0:7766f6712673 34 * and/or other materials provided with the distribution.
yueee_yt 0:7766f6712673 35 * 3. The name of the author may not be used to endorse or promote products
yueee_yt 0:7766f6712673 36 * derived from this software without specific prior written permission.
yueee_yt 0:7766f6712673 37 *
yueee_yt 0:7766f6712673 38 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
yueee_yt 0:7766f6712673 39 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
yueee_yt 0:7766f6712673 40 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
yueee_yt 0:7766f6712673 41 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
yueee_yt 0:7766f6712673 42 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
yueee_yt 0:7766f6712673 43 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
yueee_yt 0:7766f6712673 44 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
yueee_yt 0:7766f6712673 45 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
yueee_yt 0:7766f6712673 46 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
yueee_yt 0:7766f6712673 47 * OF SUCH DAMAGE.
yueee_yt 0:7766f6712673 48 *
yueee_yt 0:7766f6712673 49 * This file is part of the lwIP TCP/IP stack.
yueee_yt 0:7766f6712673 50 *
yueee_yt 0:7766f6712673 51 * Author: Adam Dunkels <adam@sics.se>
yueee_yt 0:7766f6712673 52 * Simon Goldschmidt
yueee_yt 0:7766f6712673 53 *
yueee_yt 0:7766f6712673 54 */
yueee_yt 0:7766f6712673 55
yueee_yt 0:7766f6712673 56 #include "lwip/opt.h"
yueee_yt 0:7766f6712673 57
yueee_yt 0:7766f6712673 58 #if !MEM_LIBC_MALLOC /* don't build if not configured for use in lwipopts.h */
yueee_yt 0:7766f6712673 59
yueee_yt 0:7766f6712673 60 #include "lwip/def.h"
yueee_yt 0:7766f6712673 61 #include "lwip/mem.h"
yueee_yt 0:7766f6712673 62 #include "lwip/sys.h"
yueee_yt 0:7766f6712673 63 #include "lwip/stats.h"
yueee_yt 0:7766f6712673 64 #include "lwip/err.h"
yueee_yt 0:7766f6712673 65
yueee_yt 0:7766f6712673 66 #include <string.h>
yueee_yt 0:7766f6712673 67
yueee_yt 0:7766f6712673 68 #if MEM_USE_POOLS
yueee_yt 0:7766f6712673 69 /* lwIP head implemented with different sized pools */
yueee_yt 0:7766f6712673 70
yueee_yt 0:7766f6712673 71 /**
yueee_yt 0:7766f6712673 72 * Allocate memory: determine the smallest pool that is big enough
yueee_yt 0:7766f6712673 73 * to contain an element of 'size' and get an element from that pool.
yueee_yt 0:7766f6712673 74 *
yueee_yt 0:7766f6712673 75 * @param size the size in bytes of the memory needed
yueee_yt 0:7766f6712673 76 * @return a pointer to the allocated memory or NULL if the pool is empty
yueee_yt 0:7766f6712673 77 */
yueee_yt 0:7766f6712673 78 void *
yueee_yt 0:7766f6712673 79 mem_malloc(mem_size_t size)
yueee_yt 0:7766f6712673 80 {
yueee_yt 0:7766f6712673 81 struct memp_malloc_helper *element;
yueee_yt 0:7766f6712673 82 memp_t poolnr;
yueee_yt 0:7766f6712673 83 mem_size_t required_size = size + sizeof(struct memp_malloc_helper);
yueee_yt 0:7766f6712673 84
yueee_yt 0:7766f6712673 85 for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr = (memp_t)(poolnr + 1)) {
yueee_yt 0:7766f6712673 86 #if MEM_USE_POOLS_TRY_BIGGER_POOL
yueee_yt 0:7766f6712673 87 again:
yueee_yt 0:7766f6712673 88 #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
yueee_yt 0:7766f6712673 89 /* is this pool big enough to hold an element of the required size
yueee_yt 0:7766f6712673 90 plus a struct memp_malloc_helper that saves the pool this element came from? */
yueee_yt 0:7766f6712673 91 if (required_size <= memp_sizes[poolnr]) {
yueee_yt 0:7766f6712673 92 break;
yueee_yt 0:7766f6712673 93 }
yueee_yt 0:7766f6712673 94 }
yueee_yt 0:7766f6712673 95 if (poolnr > MEMP_POOL_LAST) {
yueee_yt 0:7766f6712673 96 LWIP_ASSERT("mem_malloc(): no pool is that big!", 0);
yueee_yt 0:7766f6712673 97 return NULL;
yueee_yt 0:7766f6712673 98 }
yueee_yt 0:7766f6712673 99 element = (struct memp_malloc_helper*)memp_malloc(poolnr);
yueee_yt 0:7766f6712673 100 if (element == NULL) {
yueee_yt 0:7766f6712673 101 /* No need to DEBUGF or ASSERT: This error is already
yueee_yt 0:7766f6712673 102 taken care of in memp.c */
yueee_yt 0:7766f6712673 103 #if MEM_USE_POOLS_TRY_BIGGER_POOL
yueee_yt 0:7766f6712673 104 /** Try a bigger pool if this one is empty! */
yueee_yt 0:7766f6712673 105 if (poolnr < MEMP_POOL_LAST) {
yueee_yt 0:7766f6712673 106 poolnr++;
yueee_yt 0:7766f6712673 107 goto again;
yueee_yt 0:7766f6712673 108 }
yueee_yt 0:7766f6712673 109 #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
yueee_yt 0:7766f6712673 110 return NULL;
yueee_yt 0:7766f6712673 111 }
yueee_yt 0:7766f6712673 112
yueee_yt 0:7766f6712673 113 /* save the pool number this element came from */
yueee_yt 0:7766f6712673 114 element->poolnr = poolnr;
yueee_yt 0:7766f6712673 115 /* and return a pointer to the memory directly after the struct memp_malloc_helper */
yueee_yt 0:7766f6712673 116 element++;
yueee_yt 0:7766f6712673 117
yueee_yt 0:7766f6712673 118 return element;
yueee_yt 0:7766f6712673 119 }
yueee_yt 0:7766f6712673 120
yueee_yt 0:7766f6712673 121 /**
yueee_yt 0:7766f6712673 122 * Free memory previously allocated by mem_malloc. Loads the pool number
yueee_yt 0:7766f6712673 123 * and calls memp_free with that pool number to put the element back into
yueee_yt 0:7766f6712673 124 * its pool
yueee_yt 0:7766f6712673 125 *
yueee_yt 0:7766f6712673 126 * @param rmem the memory element to free
yueee_yt 0:7766f6712673 127 */
yueee_yt 0:7766f6712673 128 void
yueee_yt 0:7766f6712673 129 mem_free(void *rmem)
yueee_yt 0:7766f6712673 130 {
yueee_yt 0:7766f6712673 131 struct memp_malloc_helper *hmem = (struct memp_malloc_helper*)rmem;
yueee_yt 0:7766f6712673 132
yueee_yt 0:7766f6712673 133 LWIP_ASSERT("rmem != NULL", (rmem != NULL));
yueee_yt 0:7766f6712673 134 LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
yueee_yt 0:7766f6712673 135
yueee_yt 0:7766f6712673 136 /* get the original struct memp_malloc_helper */
yueee_yt 0:7766f6712673 137 hmem--;
yueee_yt 0:7766f6712673 138
yueee_yt 0:7766f6712673 139 LWIP_ASSERT("hmem != NULL", (hmem != NULL));
yueee_yt 0:7766f6712673 140 LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem)));
yueee_yt 0:7766f6712673 141 LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX));
yueee_yt 0:7766f6712673 142
yueee_yt 0:7766f6712673 143 /* and put it in the pool we saved earlier */
yueee_yt 0:7766f6712673 144 memp_free(hmem->poolnr, hmem);
yueee_yt 0:7766f6712673 145 }
yueee_yt 0:7766f6712673 146
yueee_yt 0:7766f6712673 147 #else /* MEM_USE_POOLS */
yueee_yt 0:7766f6712673 148 /* lwIP replacement for your libc malloc() */
yueee_yt 0:7766f6712673 149
yueee_yt 0:7766f6712673 150 /**
yueee_yt 0:7766f6712673 151 * The heap is made up as a list of structs of this type.
yueee_yt 0:7766f6712673 152 * This does not have to be aligned since for getting its size,
yueee_yt 0:7766f6712673 153 * we only use the macro SIZEOF_STRUCT_MEM, which automatically alignes.
yueee_yt 0:7766f6712673 154 */
yueee_yt 0:7766f6712673 155 struct mem {
yueee_yt 0:7766f6712673 156 /** index (-> ram[next]) of the next struct */
yueee_yt 0:7766f6712673 157 mem_size_t next;
yueee_yt 0:7766f6712673 158 /** index (-> ram[prev]) of the previous struct */
yueee_yt 0:7766f6712673 159 mem_size_t prev;
yueee_yt 0:7766f6712673 160 /** 1: this area is used; 0: this area is unused */
yueee_yt 0:7766f6712673 161 u8_t used;
yueee_yt 0:7766f6712673 162 };
yueee_yt 0:7766f6712673 163
yueee_yt 0:7766f6712673 164 /** All allocated blocks will be MIN_SIZE bytes big, at least!
yueee_yt 0:7766f6712673 165 * MIN_SIZE can be overridden to suit your needs. Smaller values save space,
yueee_yt 0:7766f6712673 166 * larger values could prevent too small blocks to fragment the RAM too much. */
yueee_yt 0:7766f6712673 167 #ifndef MIN_SIZE
yueee_yt 0:7766f6712673 168 #define MIN_SIZE 12
yueee_yt 0:7766f6712673 169 #endif /* MIN_SIZE */
yueee_yt 0:7766f6712673 170 /* some alignment macros: we define them here for better source code layout */
yueee_yt 0:7766f6712673 171 #define MIN_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MIN_SIZE)
yueee_yt 0:7766f6712673 172 #define SIZEOF_STRUCT_MEM LWIP_MEM_ALIGN_SIZE(sizeof(struct mem))
yueee_yt 0:7766f6712673 173 #define MEM_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SIZE)
yueee_yt 0:7766f6712673 174
yueee_yt 0:7766f6712673 175 /** If you want to relocate the heap to external memory, simply define
yueee_yt 0:7766f6712673 176 * LWIP_RAM_HEAP_POINTER as a void-pointer to that location.
yueee_yt 0:7766f6712673 177 * If so, make sure the memory at that location is big enough (see below on
yueee_yt 0:7766f6712673 178 * how that space is calculated). */
yueee_yt 0:7766f6712673 179 #ifndef LWIP_RAM_HEAP_POINTER
yueee_yt 0:7766f6712673 180
yueee_yt 0:7766f6712673 181 #if defined(TARGET_LPC4088)
yueee_yt 0:7766f6712673 182 # if defined (__ICCARM__)
yueee_yt 0:7766f6712673 183 # define ETHMEM_SECTION
yueee_yt 0:7766f6712673 184 # elif defined(TOOLCHAIN_GCC_CR)
yueee_yt 0:7766f6712673 185 # define ETHMEM_SECTION __attribute__((section(".data.$RamPeriph32")))
yueee_yt 0:7766f6712673 186 # else
yueee_yt 0:7766f6712673 187 # define ETHMEM_SECTION __attribute__((section("AHBSRAM1"),aligned))
yueee_yt 0:7766f6712673 188 # endif
yueee_yt 0:7766f6712673 189 #else
yueee_yt 0:7766f6712673 190 # define ETHMEM_SECTION __attribute((section("AHBSRAM0")))
yueee_yt 0:7766f6712673 191 #endif
yueee_yt 0:7766f6712673 192
yueee_yt 0:7766f6712673 193 /** the heap. we need one struct mem at the end and some room for alignment */
yueee_yt 0:7766f6712673 194 u8_t ram_heap[MEM_SIZE_ALIGNED + (2*SIZEOF_STRUCT_MEM) + MEM_ALIGNMENT] ETHMEM_SECTION;
yueee_yt 0:7766f6712673 195 #define LWIP_RAM_HEAP_POINTER ram_heap
yueee_yt 0:7766f6712673 196 #endif /* LWIP_RAM_HEAP_POINTER */
yueee_yt 0:7766f6712673 197
yueee_yt 0:7766f6712673 198 /** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */
yueee_yt 0:7766f6712673 199 static u8_t *ram;
yueee_yt 0:7766f6712673 200 /** the last entry, always unused! */
yueee_yt 0:7766f6712673 201 static struct mem *ram_end;
yueee_yt 0:7766f6712673 202 /** pointer to the lowest free block, this is used for faster search */
yueee_yt 0:7766f6712673 203 static struct mem *lfree;
yueee_yt 0:7766f6712673 204
yueee_yt 0:7766f6712673 205 /** concurrent access protection */
yueee_yt 0:7766f6712673 206 static sys_mutex_t mem_mutex;
yueee_yt 0:7766f6712673 207
yueee_yt 0:7766f6712673 208 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
yueee_yt 0:7766f6712673 209
yueee_yt 0:7766f6712673 210 static volatile u8_t mem_free_count;
yueee_yt 0:7766f6712673 211
yueee_yt 0:7766f6712673 212 /* Allow mem_free from other (e.g. interrupt) context */
yueee_yt 0:7766f6712673 213 #define LWIP_MEM_FREE_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_free)
yueee_yt 0:7766f6712673 214 #define LWIP_MEM_FREE_PROTECT() SYS_ARCH_PROTECT(lev_free)
yueee_yt 0:7766f6712673 215 #define LWIP_MEM_FREE_UNPROTECT() SYS_ARCH_UNPROTECT(lev_free)
yueee_yt 0:7766f6712673 216 #define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc)
yueee_yt 0:7766f6712673 217 #define LWIP_MEM_ALLOC_PROTECT() SYS_ARCH_PROTECT(lev_alloc)
yueee_yt 0:7766f6712673 218 #define LWIP_MEM_ALLOC_UNPROTECT() SYS_ARCH_UNPROTECT(lev_alloc)
yueee_yt 0:7766f6712673 219
yueee_yt 0:7766f6712673 220 #else /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
yueee_yt 0:7766f6712673 221
yueee_yt 0:7766f6712673 222 /* Protect the heap only by using a semaphore */
yueee_yt 0:7766f6712673 223 #define LWIP_MEM_FREE_DECL_PROTECT()
yueee_yt 0:7766f6712673 224 #define LWIP_MEM_FREE_PROTECT() sys_mutex_lock(&mem_mutex)
yueee_yt 0:7766f6712673 225 #define LWIP_MEM_FREE_UNPROTECT() sys_mutex_unlock(&mem_mutex)
yueee_yt 0:7766f6712673 226 /* mem_malloc is protected using semaphore AND LWIP_MEM_ALLOC_PROTECT */
yueee_yt 0:7766f6712673 227 #define LWIP_MEM_ALLOC_DECL_PROTECT()
yueee_yt 0:7766f6712673 228 #define LWIP_MEM_ALLOC_PROTECT()
yueee_yt 0:7766f6712673 229 #define LWIP_MEM_ALLOC_UNPROTECT()
yueee_yt 0:7766f6712673 230
yueee_yt 0:7766f6712673 231 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
yueee_yt 0:7766f6712673 232
yueee_yt 0:7766f6712673 233
yueee_yt 0:7766f6712673 234 /**
yueee_yt 0:7766f6712673 235 * "Plug holes" by combining adjacent empty struct mems.
yueee_yt 0:7766f6712673 236 * After this function is through, there should not exist
yueee_yt 0:7766f6712673 237 * one empty struct mem pointing to another empty struct mem.
yueee_yt 0:7766f6712673 238 *
yueee_yt 0:7766f6712673 239 * @param mem this points to a struct mem which just has been freed
yueee_yt 0:7766f6712673 240 * @internal this function is only called by mem_free() and mem_trim()
yueee_yt 0:7766f6712673 241 *
yueee_yt 0:7766f6712673 242 * This assumes access to the heap is protected by the calling function
yueee_yt 0:7766f6712673 243 * already.
yueee_yt 0:7766f6712673 244 */
yueee_yt 0:7766f6712673 245 static void
yueee_yt 0:7766f6712673 246 plug_holes(struct mem *mem)
yueee_yt 0:7766f6712673 247 {
yueee_yt 0:7766f6712673 248 struct mem *nmem;
yueee_yt 0:7766f6712673 249 struct mem *pmem;
yueee_yt 0:7766f6712673 250
yueee_yt 0:7766f6712673 251 LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram);
yueee_yt 0:7766f6712673 252 LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end);
yueee_yt 0:7766f6712673 253 LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0);
yueee_yt 0:7766f6712673 254
yueee_yt 0:7766f6712673 255 /* plug hole forward */
yueee_yt 0:7766f6712673 256 LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED);
yueee_yt 0:7766f6712673 257
yueee_yt 0:7766f6712673 258 nmem = (struct mem *)(void *)&ram[mem->next];
yueee_yt 0:7766f6712673 259 if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) {
yueee_yt 0:7766f6712673 260 /* if mem->next is unused and not end of ram, combine mem and mem->next */
yueee_yt 0:7766f6712673 261 if (lfree == nmem) {
yueee_yt 0:7766f6712673 262 lfree = mem;
yueee_yt 0:7766f6712673 263 }
yueee_yt 0:7766f6712673 264 mem->next = nmem->next;
yueee_yt 0:7766f6712673 265 ((struct mem *)(void *)&ram[nmem->next])->prev = (mem_size_t)((u8_t *)mem - ram);
yueee_yt 0:7766f6712673 266 }
yueee_yt 0:7766f6712673 267
yueee_yt 0:7766f6712673 268 /* plug hole backward */
yueee_yt 0:7766f6712673 269 pmem = (struct mem *)(void *)&ram[mem->prev];
yueee_yt 0:7766f6712673 270 if (pmem != mem && pmem->used == 0) {
yueee_yt 0:7766f6712673 271 /* if mem->prev is unused, combine mem and mem->prev */
yueee_yt 0:7766f6712673 272 if (lfree == mem) {
yueee_yt 0:7766f6712673 273 lfree = pmem;
yueee_yt 0:7766f6712673 274 }
yueee_yt 0:7766f6712673 275 pmem->next = mem->next;
yueee_yt 0:7766f6712673 276 ((struct mem *)(void *)&ram[mem->next])->prev = (mem_size_t)((u8_t *)pmem - ram);
yueee_yt 0:7766f6712673 277 }
yueee_yt 0:7766f6712673 278 }
yueee_yt 0:7766f6712673 279
yueee_yt 0:7766f6712673 280 /**
yueee_yt 0:7766f6712673 281 * Zero the heap and initialize start, end and lowest-free
yueee_yt 0:7766f6712673 282 */
yueee_yt 0:7766f6712673 283 void
yueee_yt 0:7766f6712673 284 mem_init(void)
yueee_yt 0:7766f6712673 285 {
yueee_yt 0:7766f6712673 286 struct mem *mem;
yueee_yt 0:7766f6712673 287
yueee_yt 0:7766f6712673 288 LWIP_ASSERT("Sanity check alignment",
yueee_yt 0:7766f6712673 289 (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT-1)) == 0);
yueee_yt 0:7766f6712673 290
yueee_yt 0:7766f6712673 291 /* align the heap */
yueee_yt 0:7766f6712673 292 ram = (u8_t *)LWIP_MEM_ALIGN(LWIP_RAM_HEAP_POINTER);
yueee_yt 0:7766f6712673 293 /* initialize the start of the heap */
yueee_yt 0:7766f6712673 294 mem = (struct mem *)(void *)ram;
yueee_yt 0:7766f6712673 295 mem->next = MEM_SIZE_ALIGNED;
yueee_yt 0:7766f6712673 296 mem->prev = 0;
yueee_yt 0:7766f6712673 297 mem->used = 0;
yueee_yt 0:7766f6712673 298 /* initialize the end of the heap */
yueee_yt 0:7766f6712673 299 ram_end = (struct mem *)(void *)&ram[MEM_SIZE_ALIGNED];
yueee_yt 0:7766f6712673 300 ram_end->used = 1;
yueee_yt 0:7766f6712673 301 ram_end->next = MEM_SIZE_ALIGNED;
yueee_yt 0:7766f6712673 302 ram_end->prev = MEM_SIZE_ALIGNED;
yueee_yt 0:7766f6712673 303
yueee_yt 0:7766f6712673 304 /* initialize the lowest-free pointer to the start of the heap */
yueee_yt 0:7766f6712673 305 lfree = (struct mem *)(void *)ram;
yueee_yt 0:7766f6712673 306
yueee_yt 0:7766f6712673 307 MEM_STATS_AVAIL(avail, MEM_SIZE_ALIGNED);
yueee_yt 0:7766f6712673 308
yueee_yt 0:7766f6712673 309 if(sys_mutex_new(&mem_mutex) != ERR_OK) {
yueee_yt 0:7766f6712673 310 LWIP_ASSERT("failed to create mem_mutex", 0);
yueee_yt 0:7766f6712673 311 }
yueee_yt 0:7766f6712673 312 }
yueee_yt 0:7766f6712673 313
yueee_yt 0:7766f6712673 314 /**
yueee_yt 0:7766f6712673 315 * Put a struct mem back on the heap
yueee_yt 0:7766f6712673 316 *
yueee_yt 0:7766f6712673 317 * @param rmem is the data portion of a struct mem as returned by a previous
yueee_yt 0:7766f6712673 318 * call to mem_malloc()
yueee_yt 0:7766f6712673 319 */
yueee_yt 0:7766f6712673 320 void
yueee_yt 0:7766f6712673 321 mem_free(void *rmem)
yueee_yt 0:7766f6712673 322 {
yueee_yt 0:7766f6712673 323 struct mem *mem;
yueee_yt 0:7766f6712673 324 LWIP_MEM_FREE_DECL_PROTECT();
yueee_yt 0:7766f6712673 325
yueee_yt 0:7766f6712673 326 if (rmem == NULL) {
yueee_yt 0:7766f6712673 327 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("mem_free(p == NULL) was called.\n"));
yueee_yt 0:7766f6712673 328 return;
yueee_yt 0:7766f6712673 329 }
yueee_yt 0:7766f6712673 330 LWIP_ASSERT("mem_free: sanity check alignment", (((mem_ptr_t)rmem) & (MEM_ALIGNMENT-1)) == 0);
yueee_yt 0:7766f6712673 331
yueee_yt 0:7766f6712673 332 LWIP_ASSERT("mem_free: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
yueee_yt 0:7766f6712673 333 (u8_t *)rmem < (u8_t *)ram_end);
yueee_yt 0:7766f6712673 334
yueee_yt 0:7766f6712673 335 if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
yueee_yt 0:7766f6712673 336 SYS_ARCH_DECL_PROTECT(lev);
yueee_yt 0:7766f6712673 337 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory\n"));
yueee_yt 0:7766f6712673 338 /* protect mem stats from concurrent access */
yueee_yt 0:7766f6712673 339 SYS_ARCH_PROTECT(lev);
yueee_yt 0:7766f6712673 340 MEM_STATS_INC(illegal);
yueee_yt 0:7766f6712673 341 SYS_ARCH_UNPROTECT(lev);
yueee_yt 0:7766f6712673 342 return;
yueee_yt 0:7766f6712673 343 }
yueee_yt 0:7766f6712673 344 /* protect the heap from concurrent access */
yueee_yt 0:7766f6712673 345 LWIP_MEM_FREE_PROTECT();
yueee_yt 0:7766f6712673 346 /* Get the corresponding struct mem ... */
yueee_yt 0:7766f6712673 347 mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
yueee_yt 0:7766f6712673 348 /* ... which has to be in a used state ... */
yueee_yt 0:7766f6712673 349 LWIP_ASSERT("mem_free: mem->used", mem->used);
yueee_yt 0:7766f6712673 350 /* ... and is now unused. */
yueee_yt 0:7766f6712673 351 mem->used = 0;
yueee_yt 0:7766f6712673 352
yueee_yt 0:7766f6712673 353 if (mem < lfree) {
yueee_yt 0:7766f6712673 354 /* the newly freed struct is now the lowest */
yueee_yt 0:7766f6712673 355 lfree = mem;
yueee_yt 0:7766f6712673 356 }
yueee_yt 0:7766f6712673 357
yueee_yt 0:7766f6712673 358 MEM_STATS_DEC_USED(used, mem->next - (mem_size_t)(((u8_t *)mem - ram)));
yueee_yt 0:7766f6712673 359
yueee_yt 0:7766f6712673 360 /* finally, see if prev or next are free also */
yueee_yt 0:7766f6712673 361 plug_holes(mem);
yueee_yt 0:7766f6712673 362 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
yueee_yt 0:7766f6712673 363 mem_free_count = 1;
yueee_yt 0:7766f6712673 364 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
yueee_yt 0:7766f6712673 365 LWIP_MEM_FREE_UNPROTECT();
yueee_yt 0:7766f6712673 366 }
yueee_yt 0:7766f6712673 367
yueee_yt 0:7766f6712673 368 /**
yueee_yt 0:7766f6712673 369 * Shrink memory returned by mem_malloc().
yueee_yt 0:7766f6712673 370 *
yueee_yt 0:7766f6712673 371 * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked
yueee_yt 0:7766f6712673 372 * @param newsize required size after shrinking (needs to be smaller than or
yueee_yt 0:7766f6712673 373 * equal to the previous size)
yueee_yt 0:7766f6712673 374 * @return for compatibility reasons: is always == rmem, at the moment
yueee_yt 0:7766f6712673 375 * or NULL if newsize is > old size, in which case rmem is NOT touched
yueee_yt 0:7766f6712673 376 * or freed!
yueee_yt 0:7766f6712673 377 */
yueee_yt 0:7766f6712673 378 void *
yueee_yt 0:7766f6712673 379 mem_trim(void *rmem, mem_size_t newsize)
yueee_yt 0:7766f6712673 380 {
yueee_yt 0:7766f6712673 381 mem_size_t size;
yueee_yt 0:7766f6712673 382 mem_size_t ptr, ptr2;
yueee_yt 0:7766f6712673 383 struct mem *mem, *mem2;
yueee_yt 0:7766f6712673 384 /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */
yueee_yt 0:7766f6712673 385 LWIP_MEM_FREE_DECL_PROTECT();
yueee_yt 0:7766f6712673 386
yueee_yt 0:7766f6712673 387 /* Expand the size of the allocated memory region so that we can
yueee_yt 0:7766f6712673 388 adjust for alignment. */
yueee_yt 0:7766f6712673 389 newsize = LWIP_MEM_ALIGN_SIZE(newsize);
yueee_yt 0:7766f6712673 390
yueee_yt 0:7766f6712673 391 if(newsize < MIN_SIZE_ALIGNED) {
yueee_yt 0:7766f6712673 392 /* every data block must be at least MIN_SIZE_ALIGNED long */
yueee_yt 0:7766f6712673 393 newsize = MIN_SIZE_ALIGNED;
yueee_yt 0:7766f6712673 394 }
yueee_yt 0:7766f6712673 395
yueee_yt 0:7766f6712673 396 if (newsize > MEM_SIZE_ALIGNED) {
yueee_yt 0:7766f6712673 397 return NULL;
yueee_yt 0:7766f6712673 398 }
yueee_yt 0:7766f6712673 399
yueee_yt 0:7766f6712673 400 LWIP_ASSERT("mem_trim: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
yueee_yt 0:7766f6712673 401 (u8_t *)rmem < (u8_t *)ram_end);
yueee_yt 0:7766f6712673 402
yueee_yt 0:7766f6712673 403 if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
yueee_yt 0:7766f6712673 404 SYS_ARCH_DECL_PROTECT(lev);
yueee_yt 0:7766f6712673 405 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_trim: illegal memory\n"));
yueee_yt 0:7766f6712673 406 /* protect mem stats from concurrent access */
yueee_yt 0:7766f6712673 407 SYS_ARCH_PROTECT(lev);
yueee_yt 0:7766f6712673 408 MEM_STATS_INC(illegal);
yueee_yt 0:7766f6712673 409 SYS_ARCH_UNPROTECT(lev);
yueee_yt 0:7766f6712673 410 return rmem;
yueee_yt 0:7766f6712673 411 }
yueee_yt 0:7766f6712673 412 /* Get the corresponding struct mem ... */
yueee_yt 0:7766f6712673 413 mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
yueee_yt 0:7766f6712673 414 /* ... and its offset pointer */
yueee_yt 0:7766f6712673 415 ptr = (mem_size_t)((u8_t *)mem - ram);
yueee_yt 0:7766f6712673 416
yueee_yt 0:7766f6712673 417 size = mem->next - ptr - SIZEOF_STRUCT_MEM;
yueee_yt 0:7766f6712673 418 LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size);
yueee_yt 0:7766f6712673 419 if (newsize > size) {
yueee_yt 0:7766f6712673 420 /* not supported */
yueee_yt 0:7766f6712673 421 return NULL;
yueee_yt 0:7766f6712673 422 }
yueee_yt 0:7766f6712673 423 if (newsize == size) {
yueee_yt 0:7766f6712673 424 /* No change in size, simply return */
yueee_yt 0:7766f6712673 425 return rmem;
yueee_yt 0:7766f6712673 426 }
yueee_yt 0:7766f6712673 427
yueee_yt 0:7766f6712673 428 /* protect the heap from concurrent access */
yueee_yt 0:7766f6712673 429 LWIP_MEM_FREE_PROTECT();
yueee_yt 0:7766f6712673 430
yueee_yt 0:7766f6712673 431 mem2 = (struct mem *)(void *)&ram[mem->next];
yueee_yt 0:7766f6712673 432 if(mem2->used == 0) {
yueee_yt 0:7766f6712673 433 /* The next struct is unused, we can simply move it at little */
yueee_yt 0:7766f6712673 434 mem_size_t next;
yueee_yt 0:7766f6712673 435 /* remember the old next pointer */
yueee_yt 0:7766f6712673 436 next = mem2->next;
yueee_yt 0:7766f6712673 437 /* create new struct mem which is moved directly after the shrinked mem */
yueee_yt 0:7766f6712673 438 ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
yueee_yt 0:7766f6712673 439 if (lfree == mem2) {
yueee_yt 0:7766f6712673 440 lfree = (struct mem *)(void *)&ram[ptr2];
yueee_yt 0:7766f6712673 441 }
yueee_yt 0:7766f6712673 442 mem2 = (struct mem *)(void *)&ram[ptr2];
yueee_yt 0:7766f6712673 443 mem2->used = 0;
yueee_yt 0:7766f6712673 444 /* restore the next pointer */
yueee_yt 0:7766f6712673 445 mem2->next = next;
yueee_yt 0:7766f6712673 446 /* link it back to mem */
yueee_yt 0:7766f6712673 447 mem2->prev = ptr;
yueee_yt 0:7766f6712673 448 /* link mem to it */
yueee_yt 0:7766f6712673 449 mem->next = ptr2;
yueee_yt 0:7766f6712673 450 /* last thing to restore linked list: as we have moved mem2,
yueee_yt 0:7766f6712673 451 * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not
yueee_yt 0:7766f6712673 452 * the end of the heap */
yueee_yt 0:7766f6712673 453 if (mem2->next != MEM_SIZE_ALIGNED) {
yueee_yt 0:7766f6712673 454 ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
yueee_yt 0:7766f6712673 455 }
yueee_yt 0:7766f6712673 456 MEM_STATS_DEC_USED(used, (size - newsize));
yueee_yt 0:7766f6712673 457 /* no need to plug holes, we've already done that */
yueee_yt 0:7766f6712673 458 } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) {
yueee_yt 0:7766f6712673 459 /* Next struct is used but there's room for another struct mem with
yueee_yt 0:7766f6712673 460 * at least MIN_SIZE_ALIGNED of data.
yueee_yt 0:7766f6712673 461 * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem
yueee_yt 0:7766f6712673 462 * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED').
yueee_yt 0:7766f6712673 463 * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
yueee_yt 0:7766f6712673 464 * region that couldn't hold data, but when mem->next gets freed,
yueee_yt 0:7766f6712673 465 * the 2 regions would be combined, resulting in more free memory */
yueee_yt 0:7766f6712673 466 ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
yueee_yt 0:7766f6712673 467 mem2 = (struct mem *)(void *)&ram[ptr2];
yueee_yt 0:7766f6712673 468 if (mem2 < lfree) {
yueee_yt 0:7766f6712673 469 lfree = mem2;
yueee_yt 0:7766f6712673 470 }
yueee_yt 0:7766f6712673 471 mem2->used = 0;
yueee_yt 0:7766f6712673 472 mem2->next = mem->next;
yueee_yt 0:7766f6712673 473 mem2->prev = ptr;
yueee_yt 0:7766f6712673 474 mem->next = ptr2;
yueee_yt 0:7766f6712673 475 if (mem2->next != MEM_SIZE_ALIGNED) {
yueee_yt 0:7766f6712673 476 ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
yueee_yt 0:7766f6712673 477 }
yueee_yt 0:7766f6712673 478 MEM_STATS_DEC_USED(used, (size - newsize));
yueee_yt 0:7766f6712673 479 /* the original mem->next is used, so no need to plug holes! */
yueee_yt 0:7766f6712673 480 }
yueee_yt 0:7766f6712673 481 /* else {
yueee_yt 0:7766f6712673 482 next struct mem is used but size between mem and mem2 is not big enough
yueee_yt 0:7766f6712673 483 to create another struct mem
yueee_yt 0:7766f6712673 484 -> don't do anyhting.
yueee_yt 0:7766f6712673 485 -> the remaining space stays unused since it is too small
yueee_yt 0:7766f6712673 486 } */
yueee_yt 0:7766f6712673 487 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
yueee_yt 0:7766f6712673 488 mem_free_count = 1;
yueee_yt 0:7766f6712673 489 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
yueee_yt 0:7766f6712673 490 LWIP_MEM_FREE_UNPROTECT();
yueee_yt 0:7766f6712673 491 return rmem;
yueee_yt 0:7766f6712673 492 }
yueee_yt 0:7766f6712673 493
yueee_yt 0:7766f6712673 494 /**
yueee_yt 0:7766f6712673 495 * Adam's mem_malloc() plus solution for bug #17922
yueee_yt 0:7766f6712673 496 * Allocate a block of memory with a minimum of 'size' bytes.
yueee_yt 0:7766f6712673 497 *
yueee_yt 0:7766f6712673 498 * @param size is the minimum size of the requested block in bytes.
yueee_yt 0:7766f6712673 499 * @return pointer to allocated memory or NULL if no free memory was found.
yueee_yt 0:7766f6712673 500 *
yueee_yt 0:7766f6712673 501 * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT).
yueee_yt 0:7766f6712673 502 */
yueee_yt 0:7766f6712673 503 void *
yueee_yt 0:7766f6712673 504 mem_malloc(mem_size_t size)
yueee_yt 0:7766f6712673 505 {
yueee_yt 0:7766f6712673 506 mem_size_t ptr, ptr2;
yueee_yt 0:7766f6712673 507 struct mem *mem, *mem2;
yueee_yt 0:7766f6712673 508 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
yueee_yt 0:7766f6712673 509 u8_t local_mem_free_count = 0;
yueee_yt 0:7766f6712673 510 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
yueee_yt 0:7766f6712673 511 LWIP_MEM_ALLOC_DECL_PROTECT();
yueee_yt 0:7766f6712673 512
yueee_yt 0:7766f6712673 513 if (size == 0) {
yueee_yt 0:7766f6712673 514 return NULL;
yueee_yt 0:7766f6712673 515 }
yueee_yt 0:7766f6712673 516
yueee_yt 0:7766f6712673 517 /* Expand the size of the allocated memory region so that we can
yueee_yt 0:7766f6712673 518 adjust for alignment. */
yueee_yt 0:7766f6712673 519 size = LWIP_MEM_ALIGN_SIZE(size);
yueee_yt 0:7766f6712673 520
yueee_yt 0:7766f6712673 521 if(size < MIN_SIZE_ALIGNED) {
yueee_yt 0:7766f6712673 522 /* every data block must be at least MIN_SIZE_ALIGNED long */
yueee_yt 0:7766f6712673 523 size = MIN_SIZE_ALIGNED;
yueee_yt 0:7766f6712673 524 }
yueee_yt 0:7766f6712673 525
yueee_yt 0:7766f6712673 526 if (size > MEM_SIZE_ALIGNED) {
yueee_yt 0:7766f6712673 527 return NULL;
yueee_yt 0:7766f6712673 528 }
yueee_yt 0:7766f6712673 529
yueee_yt 0:7766f6712673 530 /* protect the heap from concurrent access */
yueee_yt 0:7766f6712673 531 sys_mutex_lock(&mem_mutex);
yueee_yt 0:7766f6712673 532 LWIP_MEM_ALLOC_PROTECT();
yueee_yt 0:7766f6712673 533 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
yueee_yt 0:7766f6712673 534 /* run as long as a mem_free disturbed mem_malloc */
yueee_yt 0:7766f6712673 535 do {
yueee_yt 0:7766f6712673 536 local_mem_free_count = 0;
yueee_yt 0:7766f6712673 537 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
yueee_yt 0:7766f6712673 538
yueee_yt 0:7766f6712673 539 /* Scan through the heap searching for a free block that is big enough,
yueee_yt 0:7766f6712673 540 * beginning with the lowest free block.
yueee_yt 0:7766f6712673 541 */
yueee_yt 0:7766f6712673 542 for (ptr = (mem_size_t)((u8_t *)lfree - ram); ptr < MEM_SIZE_ALIGNED - size;
yueee_yt 0:7766f6712673 543 ptr = ((struct mem *)(void *)&ram[ptr])->next) {
yueee_yt 0:7766f6712673 544 mem = (struct mem *)(void *)&ram[ptr];
yueee_yt 0:7766f6712673 545 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
yueee_yt 0:7766f6712673 546 mem_free_count = 0;
yueee_yt 0:7766f6712673 547 LWIP_MEM_ALLOC_UNPROTECT();
yueee_yt 0:7766f6712673 548 /* allow mem_free to run */
yueee_yt 0:7766f6712673 549 LWIP_MEM_ALLOC_PROTECT();
yueee_yt 0:7766f6712673 550 if (mem_free_count != 0) {
yueee_yt 0:7766f6712673 551 local_mem_free_count = mem_free_count;
yueee_yt 0:7766f6712673 552 }
yueee_yt 0:7766f6712673 553 mem_free_count = 0;
yueee_yt 0:7766f6712673 554 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
yueee_yt 0:7766f6712673 555
yueee_yt 0:7766f6712673 556 if ((!mem->used) &&
yueee_yt 0:7766f6712673 557 (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) {
yueee_yt 0:7766f6712673 558 /* mem is not used and at least perfect fit is possible:
yueee_yt 0:7766f6712673 559 * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
yueee_yt 0:7766f6712673 560
yueee_yt 0:7766f6712673 561 if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) {
yueee_yt 0:7766f6712673 562 /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing
yueee_yt 0:7766f6712673 563 * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
yueee_yt 0:7766f6712673 564 * -> split large block, create empty remainder,
yueee_yt 0:7766f6712673 565 * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
yueee_yt 0:7766f6712673 566 * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
yueee_yt 0:7766f6712673 567 * struct mem would fit in but no data between mem2 and mem2->next
yueee_yt 0:7766f6712673 568 * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
yueee_yt 0:7766f6712673 569 * region that couldn't hold data, but when mem->next gets freed,
yueee_yt 0:7766f6712673 570 * the 2 regions would be combined, resulting in more free memory
yueee_yt 0:7766f6712673 571 */
yueee_yt 0:7766f6712673 572 ptr2 = ptr + SIZEOF_STRUCT_MEM + size;
yueee_yt 0:7766f6712673 573 /* create mem2 struct */
yueee_yt 0:7766f6712673 574 mem2 = (struct mem *)(void *)&ram[ptr2];
yueee_yt 0:7766f6712673 575 mem2->used = 0;
yueee_yt 0:7766f6712673 576 mem2->next = mem->next;
yueee_yt 0:7766f6712673 577 mem2->prev = ptr;
yueee_yt 0:7766f6712673 578 /* and insert it between mem and mem->next */
yueee_yt 0:7766f6712673 579 mem->next = ptr2;
yueee_yt 0:7766f6712673 580 mem->used = 1;
yueee_yt 0:7766f6712673 581
yueee_yt 0:7766f6712673 582 if (mem2->next != MEM_SIZE_ALIGNED) {
yueee_yt 0:7766f6712673 583 ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
yueee_yt 0:7766f6712673 584 }
yueee_yt 0:7766f6712673 585 MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM));
yueee_yt 0:7766f6712673 586 } else {
yueee_yt 0:7766f6712673 587 /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
yueee_yt 0:7766f6712673 588 * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
yueee_yt 0:7766f6712673 589 * take care of this).
yueee_yt 0:7766f6712673 590 * -> near fit or excact fit: do not split, no mem2 creation
yueee_yt 0:7766f6712673 591 * also can't move mem->next directly behind mem, since mem->next
yueee_yt 0:7766f6712673 592 * will always be used at this point!
yueee_yt 0:7766f6712673 593 */
yueee_yt 0:7766f6712673 594 mem->used = 1;
yueee_yt 0:7766f6712673 595 MEM_STATS_INC_USED(used, mem->next - (mem_size_t)((u8_t *)mem - ram));
yueee_yt 0:7766f6712673 596 }
yueee_yt 0:7766f6712673 597
yueee_yt 0:7766f6712673 598 if (mem == lfree) {
yueee_yt 0:7766f6712673 599 /* Find next free block after mem and update lowest free pointer */
yueee_yt 0:7766f6712673 600 while (lfree->used && lfree != ram_end) {
yueee_yt 0:7766f6712673 601 LWIP_MEM_ALLOC_UNPROTECT();
yueee_yt 0:7766f6712673 602 /* prevent high interrupt latency... */
yueee_yt 0:7766f6712673 603 LWIP_MEM_ALLOC_PROTECT();
yueee_yt 0:7766f6712673 604 lfree = (struct mem *)(void *)&ram[lfree->next];
yueee_yt 0:7766f6712673 605 }
yueee_yt 0:7766f6712673 606 LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used)));
yueee_yt 0:7766f6712673 607 }
yueee_yt 0:7766f6712673 608 LWIP_MEM_ALLOC_UNPROTECT();
yueee_yt 0:7766f6712673 609 sys_mutex_unlock(&mem_mutex);
yueee_yt 0:7766f6712673 610 LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.",
yueee_yt 0:7766f6712673 611 (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end);
yueee_yt 0:7766f6712673 612 LWIP_ASSERT("mem_malloc: allocated memory properly aligned.",
yueee_yt 0:7766f6712673 613 ((mem_ptr_t)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0);
yueee_yt 0:7766f6712673 614 LWIP_ASSERT("mem_malloc: sanity check alignment",
yueee_yt 0:7766f6712673 615 (((mem_ptr_t)mem) & (MEM_ALIGNMENT-1)) == 0);
yueee_yt 0:7766f6712673 616
yueee_yt 0:7766f6712673 617 return (u8_t *)mem + SIZEOF_STRUCT_MEM;
yueee_yt 0:7766f6712673 618 }
yueee_yt 0:7766f6712673 619 }
yueee_yt 0:7766f6712673 620 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
yueee_yt 0:7766f6712673 621 /* if we got interrupted by a mem_free, try again */
yueee_yt 0:7766f6712673 622 } while(local_mem_free_count != 0);
yueee_yt 0:7766f6712673 623 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
yueee_yt 0:7766f6712673 624 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size));
yueee_yt 0:7766f6712673 625 MEM_STATS_INC(err);
yueee_yt 0:7766f6712673 626 LWIP_MEM_ALLOC_UNPROTECT();
yueee_yt 0:7766f6712673 627 sys_mutex_unlock(&mem_mutex);
yueee_yt 0:7766f6712673 628 return NULL;
yueee_yt 0:7766f6712673 629 }
yueee_yt 0:7766f6712673 630
yueee_yt 0:7766f6712673 631 #endif /* MEM_USE_POOLS */
yueee_yt 0:7766f6712673 632 /**
yueee_yt 0:7766f6712673 633 * Contiguously allocates enough space for count objects that are size bytes
yueee_yt 0:7766f6712673 634 * of memory each and returns a pointer to the allocated memory.
yueee_yt 0:7766f6712673 635 *
yueee_yt 0:7766f6712673 636 * The allocated memory is filled with bytes of value zero.
yueee_yt 0:7766f6712673 637 *
yueee_yt 0:7766f6712673 638 * @param count number of objects to allocate
yueee_yt 0:7766f6712673 639 * @param size size of the objects to allocate
yueee_yt 0:7766f6712673 640 * @return pointer to allocated memory / NULL pointer if there is an error
yueee_yt 0:7766f6712673 641 */
yueee_yt 0:7766f6712673 642 void *mem_calloc(mem_size_t count, mem_size_t size)
yueee_yt 0:7766f6712673 643 {
yueee_yt 0:7766f6712673 644 void *p;
yueee_yt 0:7766f6712673 645
yueee_yt 0:7766f6712673 646 /* allocate 'count' objects of size 'size' */
yueee_yt 0:7766f6712673 647 p = mem_malloc(count * size);
yueee_yt 0:7766f6712673 648 if (p) {
yueee_yt 0:7766f6712673 649 /* zero the memory */
yueee_yt 0:7766f6712673 650 memset(p, 0, count * size);
yueee_yt 0:7766f6712673 651 }
yueee_yt 0:7766f6712673 652 return p;
yueee_yt 0:7766f6712673 653 }
yueee_yt 0:7766f6712673 654
yueee_yt 0:7766f6712673 655 #endif /* !MEM_LIBC_MALLOC */