Official mbed lwIP library (version 1.4.0)

Dependents:   LwIPNetworking NetServicesMin EthernetInterface EthernetInterface_RSF ... more

Legacy Networking Libraries

This is an mbed 2 networking library. For mbed OS 5, lwip has been integrated with built-in networking interfaces. The networking libraries have been revised to better support additional network stacks and thread safety here.

This library is based on the code of lwIP v1.4.0

Copyright (c) 2001, 2002 Swedish Institute of Computer Science.
All rights reserved. 

Redistribution and use in source and binary forms, with or without modification, 
are permitted provided that the following conditions are met:

1. Redistributions of source code must retain the above copyright notice,
   this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
   this list of conditions and the following disclaimer in the documentation
   and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
   derived from this software without specific prior written permission. 

THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 
SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY 
OF SUCH DAMAGE.
Committer:
emilmont
Date:
Thu May 30 17:11:58 2013 +0100
Revision:
10:42a34d63b218
Parent:
2:fcd6ac34b3f8
Child:
16:092c37b63ee8
Add LPC4088 target

Who changed what in which revision?

UserRevisionLine numberNew contents of line
mbed_official 0:51ac1d130fd4 1 /**
mbed_official 0:51ac1d130fd4 2 * @file
mbed_official 0:51ac1d130fd4 3 * Dynamic memory manager
mbed_official 0:51ac1d130fd4 4 *
mbed_official 0:51ac1d130fd4 5 * This is a lightweight replacement for the standard C library malloc().
mbed_official 0:51ac1d130fd4 6 *
mbed_official 0:51ac1d130fd4 7 * If you want to use the standard C library malloc() instead, define
mbed_official 0:51ac1d130fd4 8 * MEM_LIBC_MALLOC to 1 in your lwipopts.h
mbed_official 0:51ac1d130fd4 9 *
mbed_official 0:51ac1d130fd4 10 * To let mem_malloc() use pools (prevents fragmentation and is much faster than
mbed_official 0:51ac1d130fd4 11 * a heap but might waste some memory), define MEM_USE_POOLS to 1, define
mbed_official 0:51ac1d130fd4 12 * MEM_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list
mbed_official 0:51ac1d130fd4 13 * of pools like this (more pools can be added between _START and _END):
mbed_official 0:51ac1d130fd4 14 *
mbed_official 0:51ac1d130fd4 15 * Define three pools with sizes 256, 512, and 1512 bytes
mbed_official 0:51ac1d130fd4 16 * LWIP_MALLOC_MEMPOOL_START
mbed_official 0:51ac1d130fd4 17 * LWIP_MALLOC_MEMPOOL(20, 256)
mbed_official 0:51ac1d130fd4 18 * LWIP_MALLOC_MEMPOOL(10, 512)
mbed_official 0:51ac1d130fd4 19 * LWIP_MALLOC_MEMPOOL(5, 1512)
mbed_official 0:51ac1d130fd4 20 * LWIP_MALLOC_MEMPOOL_END
mbed_official 0:51ac1d130fd4 21 */
mbed_official 0:51ac1d130fd4 22
mbed_official 0:51ac1d130fd4 23 /*
mbed_official 0:51ac1d130fd4 24 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
mbed_official 0:51ac1d130fd4 25 * All rights reserved.
mbed_official 0:51ac1d130fd4 26 *
mbed_official 0:51ac1d130fd4 27 * Redistribution and use in source and binary forms, with or without modification,
mbed_official 0:51ac1d130fd4 28 * are permitted provided that the following conditions are met:
mbed_official 0:51ac1d130fd4 29 *
mbed_official 0:51ac1d130fd4 30 * 1. Redistributions of source code must retain the above copyright notice,
mbed_official 0:51ac1d130fd4 31 * this list of conditions and the following disclaimer.
mbed_official 0:51ac1d130fd4 32 * 2. Redistributions in binary form must reproduce the above copyright notice,
mbed_official 0:51ac1d130fd4 33 * this list of conditions and the following disclaimer in the documentation
mbed_official 0:51ac1d130fd4 34 * and/or other materials provided with the distribution.
mbed_official 0:51ac1d130fd4 35 * 3. The name of the author may not be used to endorse or promote products
mbed_official 0:51ac1d130fd4 36 * derived from this software without specific prior written permission.
mbed_official 0:51ac1d130fd4 37 *
mbed_official 0:51ac1d130fd4 38 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
mbed_official 0:51ac1d130fd4 39 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
mbed_official 0:51ac1d130fd4 40 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
mbed_official 0:51ac1d130fd4 41 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
mbed_official 0:51ac1d130fd4 42 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
mbed_official 0:51ac1d130fd4 43 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
mbed_official 0:51ac1d130fd4 44 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
mbed_official 0:51ac1d130fd4 45 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
mbed_official 0:51ac1d130fd4 46 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
mbed_official 0:51ac1d130fd4 47 * OF SUCH DAMAGE.
mbed_official 0:51ac1d130fd4 48 *
mbed_official 0:51ac1d130fd4 49 * This file is part of the lwIP TCP/IP stack.
mbed_official 0:51ac1d130fd4 50 *
mbed_official 0:51ac1d130fd4 51 * Author: Adam Dunkels <adam@sics.se>
mbed_official 0:51ac1d130fd4 52 * Simon Goldschmidt
mbed_official 0:51ac1d130fd4 53 *
mbed_official 0:51ac1d130fd4 54 */
mbed_official 0:51ac1d130fd4 55
mbed_official 0:51ac1d130fd4 56 #include "lwip/opt.h"
mbed_official 0:51ac1d130fd4 57
mbed_official 0:51ac1d130fd4 58 #if !MEM_LIBC_MALLOC /* don't build if not configured for use in lwipopts.h */
mbed_official 0:51ac1d130fd4 59
mbed_official 0:51ac1d130fd4 60 #include "lwip/def.h"
mbed_official 0:51ac1d130fd4 61 #include "lwip/mem.h"
mbed_official 0:51ac1d130fd4 62 #include "lwip/sys.h"
mbed_official 0:51ac1d130fd4 63 #include "lwip/stats.h"
mbed_official 0:51ac1d130fd4 64 #include "lwip/err.h"
mbed_official 0:51ac1d130fd4 65
mbed_official 0:51ac1d130fd4 66 #include <string.h>
mbed_official 0:51ac1d130fd4 67
mbed_official 0:51ac1d130fd4 68 #if MEM_USE_POOLS
mbed_official 0:51ac1d130fd4 69 /* lwIP head implemented with different sized pools */
mbed_official 0:51ac1d130fd4 70
mbed_official 0:51ac1d130fd4 71 /**
mbed_official 0:51ac1d130fd4 72 * Allocate memory: determine the smallest pool that is big enough
mbed_official 0:51ac1d130fd4 73 * to contain an element of 'size' and get an element from that pool.
mbed_official 0:51ac1d130fd4 74 *
mbed_official 0:51ac1d130fd4 75 * @param size the size in bytes of the memory needed
mbed_official 0:51ac1d130fd4 76 * @return a pointer to the allocated memory or NULL if the pool is empty
mbed_official 0:51ac1d130fd4 77 */
mbed_official 0:51ac1d130fd4 78 void *
mbed_official 0:51ac1d130fd4 79 mem_malloc(mem_size_t size)
mbed_official 0:51ac1d130fd4 80 {
mbed_official 0:51ac1d130fd4 81 struct memp_malloc_helper *element;
mbed_official 0:51ac1d130fd4 82 memp_t poolnr;
mbed_official 0:51ac1d130fd4 83 mem_size_t required_size = size + sizeof(struct memp_malloc_helper);
mbed_official 0:51ac1d130fd4 84
mbed_official 0:51ac1d130fd4 85 for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr = (memp_t)(poolnr + 1)) {
mbed_official 0:51ac1d130fd4 86 #if MEM_USE_POOLS_TRY_BIGGER_POOL
mbed_official 0:51ac1d130fd4 87 again:
mbed_official 0:51ac1d130fd4 88 #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
mbed_official 0:51ac1d130fd4 89 /* is this pool big enough to hold an element of the required size
mbed_official 0:51ac1d130fd4 90 plus a struct memp_malloc_helper that saves the pool this element came from? */
mbed_official 0:51ac1d130fd4 91 if (required_size <= memp_sizes[poolnr]) {
mbed_official 0:51ac1d130fd4 92 break;
mbed_official 0:51ac1d130fd4 93 }
mbed_official 0:51ac1d130fd4 94 }
mbed_official 0:51ac1d130fd4 95 if (poolnr > MEMP_POOL_LAST) {
mbed_official 0:51ac1d130fd4 96 LWIP_ASSERT("mem_malloc(): no pool is that big!", 0);
mbed_official 0:51ac1d130fd4 97 return NULL;
mbed_official 0:51ac1d130fd4 98 }
mbed_official 0:51ac1d130fd4 99 element = (struct memp_malloc_helper*)memp_malloc(poolnr);
mbed_official 0:51ac1d130fd4 100 if (element == NULL) {
mbed_official 0:51ac1d130fd4 101 /* No need to DEBUGF or ASSERT: This error is already
mbed_official 0:51ac1d130fd4 102 taken care of in memp.c */
mbed_official 0:51ac1d130fd4 103 #if MEM_USE_POOLS_TRY_BIGGER_POOL
mbed_official 0:51ac1d130fd4 104 /** Try a bigger pool if this one is empty! */
mbed_official 0:51ac1d130fd4 105 if (poolnr < MEMP_POOL_LAST) {
mbed_official 0:51ac1d130fd4 106 poolnr++;
mbed_official 0:51ac1d130fd4 107 goto again;
mbed_official 0:51ac1d130fd4 108 }
mbed_official 0:51ac1d130fd4 109 #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
mbed_official 0:51ac1d130fd4 110 return NULL;
mbed_official 0:51ac1d130fd4 111 }
mbed_official 0:51ac1d130fd4 112
mbed_official 0:51ac1d130fd4 113 /* save the pool number this element came from */
mbed_official 0:51ac1d130fd4 114 element->poolnr = poolnr;
mbed_official 0:51ac1d130fd4 115 /* and return a pointer to the memory directly after the struct memp_malloc_helper */
mbed_official 0:51ac1d130fd4 116 element++;
mbed_official 0:51ac1d130fd4 117
mbed_official 0:51ac1d130fd4 118 return element;
mbed_official 0:51ac1d130fd4 119 }
mbed_official 0:51ac1d130fd4 120
mbed_official 0:51ac1d130fd4 121 /**
mbed_official 0:51ac1d130fd4 122 * Free memory previously allocated by mem_malloc. Loads the pool number
mbed_official 0:51ac1d130fd4 123 * and calls memp_free with that pool number to put the element back into
mbed_official 0:51ac1d130fd4 124 * its pool
mbed_official 0:51ac1d130fd4 125 *
mbed_official 0:51ac1d130fd4 126 * @param rmem the memory element to free
mbed_official 0:51ac1d130fd4 127 */
mbed_official 0:51ac1d130fd4 128 void
mbed_official 0:51ac1d130fd4 129 mem_free(void *rmem)
mbed_official 0:51ac1d130fd4 130 {
mbed_official 0:51ac1d130fd4 131 struct memp_malloc_helper *hmem = (struct memp_malloc_helper*)rmem;
mbed_official 0:51ac1d130fd4 132
mbed_official 0:51ac1d130fd4 133 LWIP_ASSERT("rmem != NULL", (rmem != NULL));
mbed_official 0:51ac1d130fd4 134 LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
mbed_official 0:51ac1d130fd4 135
mbed_official 0:51ac1d130fd4 136 /* get the original struct memp_malloc_helper */
mbed_official 0:51ac1d130fd4 137 hmem--;
mbed_official 0:51ac1d130fd4 138
mbed_official 0:51ac1d130fd4 139 LWIP_ASSERT("hmem != NULL", (hmem != NULL));
mbed_official 0:51ac1d130fd4 140 LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem)));
mbed_official 0:51ac1d130fd4 141 LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX));
mbed_official 0:51ac1d130fd4 142
mbed_official 0:51ac1d130fd4 143 /* and put it in the pool we saved earlier */
mbed_official 0:51ac1d130fd4 144 memp_free(hmem->poolnr, hmem);
mbed_official 0:51ac1d130fd4 145 }
mbed_official 0:51ac1d130fd4 146
mbed_official 0:51ac1d130fd4 147 #else /* MEM_USE_POOLS */
mbed_official 0:51ac1d130fd4 148 /* lwIP replacement for your libc malloc() */
mbed_official 0:51ac1d130fd4 149
mbed_official 0:51ac1d130fd4 150 /**
mbed_official 0:51ac1d130fd4 151 * The heap is made up as a list of structs of this type.
mbed_official 0:51ac1d130fd4 152 * This does not have to be aligned since for getting its size,
mbed_official 0:51ac1d130fd4 153 * we only use the macro SIZEOF_STRUCT_MEM, which automatically alignes.
mbed_official 0:51ac1d130fd4 154 */
mbed_official 0:51ac1d130fd4 155 struct mem {
mbed_official 0:51ac1d130fd4 156 /** index (-> ram[next]) of the next struct */
mbed_official 0:51ac1d130fd4 157 mem_size_t next;
mbed_official 0:51ac1d130fd4 158 /** index (-> ram[prev]) of the previous struct */
mbed_official 0:51ac1d130fd4 159 mem_size_t prev;
mbed_official 0:51ac1d130fd4 160 /** 1: this area is used; 0: this area is unused */
mbed_official 0:51ac1d130fd4 161 u8_t used;
mbed_official 0:51ac1d130fd4 162 };
mbed_official 0:51ac1d130fd4 163
mbed_official 0:51ac1d130fd4 164 /** All allocated blocks will be MIN_SIZE bytes big, at least!
mbed_official 0:51ac1d130fd4 165 * MIN_SIZE can be overridden to suit your needs. Smaller values save space,
mbed_official 0:51ac1d130fd4 166 * larger values could prevent too small blocks to fragment the RAM too much. */
mbed_official 0:51ac1d130fd4 167 #ifndef MIN_SIZE
mbed_official 0:51ac1d130fd4 168 #define MIN_SIZE 12
mbed_official 0:51ac1d130fd4 169 #endif /* MIN_SIZE */
mbed_official 0:51ac1d130fd4 170 /* some alignment macros: we define them here for better source code layout */
mbed_official 0:51ac1d130fd4 171 #define MIN_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MIN_SIZE)
mbed_official 0:51ac1d130fd4 172 #define SIZEOF_STRUCT_MEM LWIP_MEM_ALIGN_SIZE(sizeof(struct mem))
mbed_official 0:51ac1d130fd4 173 #define MEM_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SIZE)
mbed_official 0:51ac1d130fd4 174
mbed_official 0:51ac1d130fd4 175 /** If you want to relocate the heap to external memory, simply define
mbed_official 0:51ac1d130fd4 176 * LWIP_RAM_HEAP_POINTER as a void-pointer to that location.
mbed_official 0:51ac1d130fd4 177 * If so, make sure the memory at that location is big enough (see below on
mbed_official 0:51ac1d130fd4 178 * how that space is calculated). */
mbed_official 0:51ac1d130fd4 179 #ifndef LWIP_RAM_HEAP_POINTER
emilmont 10:42a34d63b218 180
emilmont 10:42a34d63b218 181 #if defined(TARGET_LPC4088)
emilmont 10:42a34d63b218 182 # if defined (__ICCARM__)
emilmont 10:42a34d63b218 183 # define ETHMEM_SECTION
emilmont 10:42a34d63b218 184 # elif defined(TOOLCHAIN_GCC_CR)
emilmont 10:42a34d63b218 185 # define ETHMEM_SECTION __attribute__((section(".data.$RamPeriph32")))
emilmont 10:42a34d63b218 186 # else
emilmont 10:42a34d63b218 187 # define ETHMEM_SECTION __attribute__((section("AHBSRAM1"),aligned))
emilmont 10:42a34d63b218 188 # endif
emilmont 10:42a34d63b218 189 #else
emilmont 10:42a34d63b218 190 # define ETHMEM_SECTION __attribute((section("AHBSRAM0")))
emilmont 10:42a34d63b218 191 #endif
emilmont 10:42a34d63b218 192
mbed_official 0:51ac1d130fd4 193 /** the heap. we need one struct mem at the end and some room for alignment */
emilmont 10:42a34d63b218 194 u8_t ram_heap[MEM_SIZE_ALIGNED + (2*SIZEOF_STRUCT_MEM) + MEM_ALIGNMENT] ETHMEM_SECTION;
mbed_official 0:51ac1d130fd4 195 #define LWIP_RAM_HEAP_POINTER ram_heap
mbed_official 0:51ac1d130fd4 196 #endif /* LWIP_RAM_HEAP_POINTER */
mbed_official 0:51ac1d130fd4 197
mbed_official 0:51ac1d130fd4 198 /** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */
mbed_official 0:51ac1d130fd4 199 static u8_t *ram;
mbed_official 0:51ac1d130fd4 200 /** the last entry, always unused! */
mbed_official 0:51ac1d130fd4 201 static struct mem *ram_end;
mbed_official 0:51ac1d130fd4 202 /** pointer to the lowest free block, this is used for faster search */
mbed_official 0:51ac1d130fd4 203 static struct mem *lfree;
mbed_official 0:51ac1d130fd4 204
mbed_official 0:51ac1d130fd4 205 /** concurrent access protection */
mbed_official 0:51ac1d130fd4 206 static sys_mutex_t mem_mutex;
mbed_official 0:51ac1d130fd4 207
mbed_official 0:51ac1d130fd4 208 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
mbed_official 0:51ac1d130fd4 209
mbed_official 0:51ac1d130fd4 210 static volatile u8_t mem_free_count;
mbed_official 0:51ac1d130fd4 211
mbed_official 0:51ac1d130fd4 212 /* Allow mem_free from other (e.g. interrupt) context */
mbed_official 0:51ac1d130fd4 213 #define LWIP_MEM_FREE_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_free)
mbed_official 0:51ac1d130fd4 214 #define LWIP_MEM_FREE_PROTECT() SYS_ARCH_PROTECT(lev_free)
mbed_official 0:51ac1d130fd4 215 #define LWIP_MEM_FREE_UNPROTECT() SYS_ARCH_UNPROTECT(lev_free)
mbed_official 0:51ac1d130fd4 216 #define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc)
mbed_official 0:51ac1d130fd4 217 #define LWIP_MEM_ALLOC_PROTECT() SYS_ARCH_PROTECT(lev_alloc)
mbed_official 0:51ac1d130fd4 218 #define LWIP_MEM_ALLOC_UNPROTECT() SYS_ARCH_UNPROTECT(lev_alloc)
mbed_official 0:51ac1d130fd4 219
mbed_official 0:51ac1d130fd4 220 #else /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
mbed_official 0:51ac1d130fd4 221
mbed_official 0:51ac1d130fd4 222 /* Protect the heap only by using a semaphore */
mbed_official 0:51ac1d130fd4 223 #define LWIP_MEM_FREE_DECL_PROTECT()
mbed_official 0:51ac1d130fd4 224 #define LWIP_MEM_FREE_PROTECT() sys_mutex_lock(&mem_mutex)
mbed_official 0:51ac1d130fd4 225 #define LWIP_MEM_FREE_UNPROTECT() sys_mutex_unlock(&mem_mutex)
mbed_official 0:51ac1d130fd4 226 /* mem_malloc is protected using semaphore AND LWIP_MEM_ALLOC_PROTECT */
mbed_official 0:51ac1d130fd4 227 #define LWIP_MEM_ALLOC_DECL_PROTECT()
mbed_official 0:51ac1d130fd4 228 #define LWIP_MEM_ALLOC_PROTECT()
mbed_official 0:51ac1d130fd4 229 #define LWIP_MEM_ALLOC_UNPROTECT()
mbed_official 0:51ac1d130fd4 230
mbed_official 0:51ac1d130fd4 231 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
mbed_official 0:51ac1d130fd4 232
mbed_official 0:51ac1d130fd4 233
mbed_official 0:51ac1d130fd4 234 /**
mbed_official 0:51ac1d130fd4 235 * "Plug holes" by combining adjacent empty struct mems.
mbed_official 0:51ac1d130fd4 236 * After this function is through, there should not exist
mbed_official 0:51ac1d130fd4 237 * one empty struct mem pointing to another empty struct mem.
mbed_official 0:51ac1d130fd4 238 *
mbed_official 0:51ac1d130fd4 239 * @param mem this points to a struct mem which just has been freed
mbed_official 0:51ac1d130fd4 240 * @internal this function is only called by mem_free() and mem_trim()
mbed_official 0:51ac1d130fd4 241 *
mbed_official 0:51ac1d130fd4 242 * This assumes access to the heap is protected by the calling function
mbed_official 0:51ac1d130fd4 243 * already.
mbed_official 0:51ac1d130fd4 244 */
mbed_official 0:51ac1d130fd4 245 static void
mbed_official 0:51ac1d130fd4 246 plug_holes(struct mem *mem)
mbed_official 0:51ac1d130fd4 247 {
mbed_official 0:51ac1d130fd4 248 struct mem *nmem;
mbed_official 0:51ac1d130fd4 249 struct mem *pmem;
mbed_official 0:51ac1d130fd4 250
mbed_official 0:51ac1d130fd4 251 LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram);
mbed_official 0:51ac1d130fd4 252 LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end);
mbed_official 0:51ac1d130fd4 253 LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0);
mbed_official 0:51ac1d130fd4 254
mbed_official 0:51ac1d130fd4 255 /* plug hole forward */
mbed_official 0:51ac1d130fd4 256 LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED);
mbed_official 0:51ac1d130fd4 257
mbed_official 0:51ac1d130fd4 258 nmem = (struct mem *)(void *)&ram[mem->next];
mbed_official 0:51ac1d130fd4 259 if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) {
mbed_official 0:51ac1d130fd4 260 /* if mem->next is unused and not end of ram, combine mem and mem->next */
mbed_official 0:51ac1d130fd4 261 if (lfree == nmem) {
mbed_official 0:51ac1d130fd4 262 lfree = mem;
mbed_official 0:51ac1d130fd4 263 }
mbed_official 0:51ac1d130fd4 264 mem->next = nmem->next;
mbed_official 0:51ac1d130fd4 265 ((struct mem *)(void *)&ram[nmem->next])->prev = (mem_size_t)((u8_t *)mem - ram);
mbed_official 0:51ac1d130fd4 266 }
mbed_official 0:51ac1d130fd4 267
mbed_official 0:51ac1d130fd4 268 /* plug hole backward */
mbed_official 0:51ac1d130fd4 269 pmem = (struct mem *)(void *)&ram[mem->prev];
mbed_official 0:51ac1d130fd4 270 if (pmem != mem && pmem->used == 0) {
mbed_official 0:51ac1d130fd4 271 /* if mem->prev is unused, combine mem and mem->prev */
mbed_official 0:51ac1d130fd4 272 if (lfree == mem) {
mbed_official 0:51ac1d130fd4 273 lfree = pmem;
mbed_official 0:51ac1d130fd4 274 }
mbed_official 0:51ac1d130fd4 275 pmem->next = mem->next;
mbed_official 0:51ac1d130fd4 276 ((struct mem *)(void *)&ram[mem->next])->prev = (mem_size_t)((u8_t *)pmem - ram);
mbed_official 0:51ac1d130fd4 277 }
mbed_official 0:51ac1d130fd4 278 }
mbed_official 0:51ac1d130fd4 279
mbed_official 0:51ac1d130fd4 280 /**
mbed_official 0:51ac1d130fd4 281 * Zero the heap and initialize start, end and lowest-free
mbed_official 0:51ac1d130fd4 282 */
mbed_official 0:51ac1d130fd4 283 void
mbed_official 0:51ac1d130fd4 284 mem_init(void)
mbed_official 0:51ac1d130fd4 285 {
mbed_official 0:51ac1d130fd4 286 struct mem *mem;
mbed_official 0:51ac1d130fd4 287
mbed_official 0:51ac1d130fd4 288 LWIP_ASSERT("Sanity check alignment",
mbed_official 0:51ac1d130fd4 289 (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT-1)) == 0);
mbed_official 0:51ac1d130fd4 290
mbed_official 0:51ac1d130fd4 291 /* align the heap */
mbed_official 0:51ac1d130fd4 292 ram = (u8_t *)LWIP_MEM_ALIGN(LWIP_RAM_HEAP_POINTER);
mbed_official 0:51ac1d130fd4 293 /* initialize the start of the heap */
mbed_official 0:51ac1d130fd4 294 mem = (struct mem *)(void *)ram;
mbed_official 0:51ac1d130fd4 295 mem->next = MEM_SIZE_ALIGNED;
mbed_official 0:51ac1d130fd4 296 mem->prev = 0;
mbed_official 0:51ac1d130fd4 297 mem->used = 0;
mbed_official 0:51ac1d130fd4 298 /* initialize the end of the heap */
mbed_official 0:51ac1d130fd4 299 ram_end = (struct mem *)(void *)&ram[MEM_SIZE_ALIGNED];
mbed_official 0:51ac1d130fd4 300 ram_end->used = 1;
mbed_official 0:51ac1d130fd4 301 ram_end->next = MEM_SIZE_ALIGNED;
mbed_official 0:51ac1d130fd4 302 ram_end->prev = MEM_SIZE_ALIGNED;
mbed_official 0:51ac1d130fd4 303
mbed_official 0:51ac1d130fd4 304 /* initialize the lowest-free pointer to the start of the heap */
mbed_official 0:51ac1d130fd4 305 lfree = (struct mem *)(void *)ram;
mbed_official 0:51ac1d130fd4 306
mbed_official 0:51ac1d130fd4 307 MEM_STATS_AVAIL(avail, MEM_SIZE_ALIGNED);
mbed_official 0:51ac1d130fd4 308
mbed_official 0:51ac1d130fd4 309 if(sys_mutex_new(&mem_mutex) != ERR_OK) {
mbed_official 0:51ac1d130fd4 310 LWIP_ASSERT("failed to create mem_mutex", 0);
mbed_official 0:51ac1d130fd4 311 }
mbed_official 0:51ac1d130fd4 312 }
mbed_official 0:51ac1d130fd4 313
mbed_official 0:51ac1d130fd4 314 /**
mbed_official 0:51ac1d130fd4 315 * Put a struct mem back on the heap
mbed_official 0:51ac1d130fd4 316 *
mbed_official 0:51ac1d130fd4 317 * @param rmem is the data portion of a struct mem as returned by a previous
mbed_official 0:51ac1d130fd4 318 * call to mem_malloc()
mbed_official 0:51ac1d130fd4 319 */
mbed_official 0:51ac1d130fd4 320 void
mbed_official 0:51ac1d130fd4 321 mem_free(void *rmem)
mbed_official 0:51ac1d130fd4 322 {
mbed_official 0:51ac1d130fd4 323 struct mem *mem;
mbed_official 0:51ac1d130fd4 324 LWIP_MEM_FREE_DECL_PROTECT();
mbed_official 0:51ac1d130fd4 325
mbed_official 0:51ac1d130fd4 326 if (rmem == NULL) {
mbed_official 0:51ac1d130fd4 327 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("mem_free(p == NULL) was called.\n"));
mbed_official 0:51ac1d130fd4 328 return;
mbed_official 0:51ac1d130fd4 329 }
mbed_official 0:51ac1d130fd4 330 LWIP_ASSERT("mem_free: sanity check alignment", (((mem_ptr_t)rmem) & (MEM_ALIGNMENT-1)) == 0);
mbed_official 0:51ac1d130fd4 331
mbed_official 0:51ac1d130fd4 332 LWIP_ASSERT("mem_free: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
mbed_official 0:51ac1d130fd4 333 (u8_t *)rmem < (u8_t *)ram_end);
mbed_official 0:51ac1d130fd4 334
mbed_official 0:51ac1d130fd4 335 if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
mbed_official 0:51ac1d130fd4 336 SYS_ARCH_DECL_PROTECT(lev);
mbed_official 0:51ac1d130fd4 337 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory\n"));
mbed_official 0:51ac1d130fd4 338 /* protect mem stats from concurrent access */
mbed_official 0:51ac1d130fd4 339 SYS_ARCH_PROTECT(lev);
mbed_official 0:51ac1d130fd4 340 MEM_STATS_INC(illegal);
mbed_official 0:51ac1d130fd4 341 SYS_ARCH_UNPROTECT(lev);
mbed_official 0:51ac1d130fd4 342 return;
mbed_official 0:51ac1d130fd4 343 }
mbed_official 0:51ac1d130fd4 344 /* protect the heap from concurrent access */
mbed_official 0:51ac1d130fd4 345 LWIP_MEM_FREE_PROTECT();
mbed_official 0:51ac1d130fd4 346 /* Get the corresponding struct mem ... */
mbed_official 0:51ac1d130fd4 347 mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
mbed_official 0:51ac1d130fd4 348 /* ... which has to be in a used state ... */
mbed_official 0:51ac1d130fd4 349 LWIP_ASSERT("mem_free: mem->used", mem->used);
mbed_official 0:51ac1d130fd4 350 /* ... and is now unused. */
mbed_official 0:51ac1d130fd4 351 mem->used = 0;
mbed_official 0:51ac1d130fd4 352
mbed_official 0:51ac1d130fd4 353 if (mem < lfree) {
mbed_official 0:51ac1d130fd4 354 /* the newly freed struct is now the lowest */
mbed_official 0:51ac1d130fd4 355 lfree = mem;
mbed_official 0:51ac1d130fd4 356 }
mbed_official 0:51ac1d130fd4 357
mbed_official 0:51ac1d130fd4 358 MEM_STATS_DEC_USED(used, mem->next - (mem_size_t)(((u8_t *)mem - ram)));
mbed_official 0:51ac1d130fd4 359
mbed_official 0:51ac1d130fd4 360 /* finally, see if prev or next are free also */
mbed_official 0:51ac1d130fd4 361 plug_holes(mem);
mbed_official 0:51ac1d130fd4 362 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
mbed_official 0:51ac1d130fd4 363 mem_free_count = 1;
mbed_official 0:51ac1d130fd4 364 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
mbed_official 0:51ac1d130fd4 365 LWIP_MEM_FREE_UNPROTECT();
mbed_official 0:51ac1d130fd4 366 }
mbed_official 0:51ac1d130fd4 367
mbed_official 0:51ac1d130fd4 368 /**
mbed_official 0:51ac1d130fd4 369 * Shrink memory returned by mem_malloc().
mbed_official 0:51ac1d130fd4 370 *
mbed_official 0:51ac1d130fd4 371 * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked
mbed_official 0:51ac1d130fd4 372 * @param newsize required size after shrinking (needs to be smaller than or
mbed_official 0:51ac1d130fd4 373 * equal to the previous size)
mbed_official 0:51ac1d130fd4 374 * @return for compatibility reasons: is always == rmem, at the moment
mbed_official 0:51ac1d130fd4 375 * or NULL if newsize is > old size, in which case rmem is NOT touched
mbed_official 0:51ac1d130fd4 376 * or freed!
mbed_official 0:51ac1d130fd4 377 */
mbed_official 0:51ac1d130fd4 378 void *
mbed_official 0:51ac1d130fd4 379 mem_trim(void *rmem, mem_size_t newsize)
mbed_official 0:51ac1d130fd4 380 {
mbed_official 0:51ac1d130fd4 381 mem_size_t size;
mbed_official 0:51ac1d130fd4 382 mem_size_t ptr, ptr2;
mbed_official 0:51ac1d130fd4 383 struct mem *mem, *mem2;
mbed_official 0:51ac1d130fd4 384 /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */
mbed_official 0:51ac1d130fd4 385 LWIP_MEM_FREE_DECL_PROTECT();
mbed_official 0:51ac1d130fd4 386
mbed_official 0:51ac1d130fd4 387 /* Expand the size of the allocated memory region so that we can
mbed_official 0:51ac1d130fd4 388 adjust for alignment. */
mbed_official 0:51ac1d130fd4 389 newsize = LWIP_MEM_ALIGN_SIZE(newsize);
mbed_official 0:51ac1d130fd4 390
mbed_official 0:51ac1d130fd4 391 if(newsize < MIN_SIZE_ALIGNED) {
mbed_official 0:51ac1d130fd4 392 /* every data block must be at least MIN_SIZE_ALIGNED long */
mbed_official 0:51ac1d130fd4 393 newsize = MIN_SIZE_ALIGNED;
mbed_official 0:51ac1d130fd4 394 }
mbed_official 0:51ac1d130fd4 395
mbed_official 0:51ac1d130fd4 396 if (newsize > MEM_SIZE_ALIGNED) {
mbed_official 0:51ac1d130fd4 397 return NULL;
mbed_official 0:51ac1d130fd4 398 }
mbed_official 0:51ac1d130fd4 399
mbed_official 0:51ac1d130fd4 400 LWIP_ASSERT("mem_trim: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
mbed_official 0:51ac1d130fd4 401 (u8_t *)rmem < (u8_t *)ram_end);
mbed_official 0:51ac1d130fd4 402
mbed_official 0:51ac1d130fd4 403 if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
mbed_official 0:51ac1d130fd4 404 SYS_ARCH_DECL_PROTECT(lev);
mbed_official 0:51ac1d130fd4 405 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_trim: illegal memory\n"));
mbed_official 0:51ac1d130fd4 406 /* protect mem stats from concurrent access */
mbed_official 0:51ac1d130fd4 407 SYS_ARCH_PROTECT(lev);
mbed_official 0:51ac1d130fd4 408 MEM_STATS_INC(illegal);
mbed_official 0:51ac1d130fd4 409 SYS_ARCH_UNPROTECT(lev);
mbed_official 0:51ac1d130fd4 410 return rmem;
mbed_official 0:51ac1d130fd4 411 }
mbed_official 0:51ac1d130fd4 412 /* Get the corresponding struct mem ... */
mbed_official 0:51ac1d130fd4 413 mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
mbed_official 0:51ac1d130fd4 414 /* ... and its offset pointer */
mbed_official 0:51ac1d130fd4 415 ptr = (mem_size_t)((u8_t *)mem - ram);
mbed_official 0:51ac1d130fd4 416
mbed_official 0:51ac1d130fd4 417 size = mem->next - ptr - SIZEOF_STRUCT_MEM;
mbed_official 0:51ac1d130fd4 418 LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size);
mbed_official 0:51ac1d130fd4 419 if (newsize > size) {
mbed_official 0:51ac1d130fd4 420 /* not supported */
mbed_official 0:51ac1d130fd4 421 return NULL;
mbed_official 0:51ac1d130fd4 422 }
mbed_official 0:51ac1d130fd4 423 if (newsize == size) {
mbed_official 0:51ac1d130fd4 424 /* No change in size, simply return */
mbed_official 0:51ac1d130fd4 425 return rmem;
mbed_official 0:51ac1d130fd4 426 }
mbed_official 0:51ac1d130fd4 427
mbed_official 0:51ac1d130fd4 428 /* protect the heap from concurrent access */
mbed_official 0:51ac1d130fd4 429 LWIP_MEM_FREE_PROTECT();
mbed_official 0:51ac1d130fd4 430
mbed_official 0:51ac1d130fd4 431 mem2 = (struct mem *)(void *)&ram[mem->next];
mbed_official 0:51ac1d130fd4 432 if(mem2->used == 0) {
mbed_official 0:51ac1d130fd4 433 /* The next struct is unused, we can simply move it at little */
mbed_official 0:51ac1d130fd4 434 mem_size_t next;
mbed_official 0:51ac1d130fd4 435 /* remember the old next pointer */
mbed_official 0:51ac1d130fd4 436 next = mem2->next;
mbed_official 0:51ac1d130fd4 437 /* create new struct mem which is moved directly after the shrinked mem */
mbed_official 0:51ac1d130fd4 438 ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
mbed_official 0:51ac1d130fd4 439 if (lfree == mem2) {
mbed_official 0:51ac1d130fd4 440 lfree = (struct mem *)(void *)&ram[ptr2];
mbed_official 0:51ac1d130fd4 441 }
mbed_official 0:51ac1d130fd4 442 mem2 = (struct mem *)(void *)&ram[ptr2];
mbed_official 0:51ac1d130fd4 443 mem2->used = 0;
mbed_official 0:51ac1d130fd4 444 /* restore the next pointer */
mbed_official 0:51ac1d130fd4 445 mem2->next = next;
mbed_official 0:51ac1d130fd4 446 /* link it back to mem */
mbed_official 0:51ac1d130fd4 447 mem2->prev = ptr;
mbed_official 0:51ac1d130fd4 448 /* link mem to it */
mbed_official 0:51ac1d130fd4 449 mem->next = ptr2;
mbed_official 0:51ac1d130fd4 450 /* last thing to restore linked list: as we have moved mem2,
mbed_official 0:51ac1d130fd4 451 * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not
mbed_official 0:51ac1d130fd4 452 * the end of the heap */
mbed_official 0:51ac1d130fd4 453 if (mem2->next != MEM_SIZE_ALIGNED) {
mbed_official 0:51ac1d130fd4 454 ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
mbed_official 0:51ac1d130fd4 455 }
mbed_official 0:51ac1d130fd4 456 MEM_STATS_DEC_USED(used, (size - newsize));
mbed_official 0:51ac1d130fd4 457 /* no need to plug holes, we've already done that */
mbed_official 0:51ac1d130fd4 458 } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) {
mbed_official 0:51ac1d130fd4 459 /* Next struct is used but there's room for another struct mem with
mbed_official 0:51ac1d130fd4 460 * at least MIN_SIZE_ALIGNED of data.
mbed_official 0:51ac1d130fd4 461 * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem
mbed_official 0:51ac1d130fd4 462 * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED').
mbed_official 0:51ac1d130fd4 463 * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
mbed_official 0:51ac1d130fd4 464 * region that couldn't hold data, but when mem->next gets freed,
mbed_official 0:51ac1d130fd4 465 * the 2 regions would be combined, resulting in more free memory */
mbed_official 0:51ac1d130fd4 466 ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
mbed_official 0:51ac1d130fd4 467 mem2 = (struct mem *)(void *)&ram[ptr2];
mbed_official 0:51ac1d130fd4 468 if (mem2 < lfree) {
mbed_official 0:51ac1d130fd4 469 lfree = mem2;
mbed_official 0:51ac1d130fd4 470 }
mbed_official 0:51ac1d130fd4 471 mem2->used = 0;
mbed_official 0:51ac1d130fd4 472 mem2->next = mem->next;
mbed_official 0:51ac1d130fd4 473 mem2->prev = ptr;
mbed_official 0:51ac1d130fd4 474 mem->next = ptr2;
mbed_official 0:51ac1d130fd4 475 if (mem2->next != MEM_SIZE_ALIGNED) {
mbed_official 0:51ac1d130fd4 476 ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
mbed_official 0:51ac1d130fd4 477 }
mbed_official 0:51ac1d130fd4 478 MEM_STATS_DEC_USED(used, (size - newsize));
mbed_official 0:51ac1d130fd4 479 /* the original mem->next is used, so no need to plug holes! */
mbed_official 0:51ac1d130fd4 480 }
mbed_official 0:51ac1d130fd4 481 /* else {
mbed_official 0:51ac1d130fd4 482 next struct mem is used but size between mem and mem2 is not big enough
mbed_official 0:51ac1d130fd4 483 to create another struct mem
mbed_official 0:51ac1d130fd4 484 -> don't do anyhting.
mbed_official 0:51ac1d130fd4 485 -> the remaining space stays unused since it is too small
mbed_official 0:51ac1d130fd4 486 } */
mbed_official 0:51ac1d130fd4 487 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
mbed_official 0:51ac1d130fd4 488 mem_free_count = 1;
mbed_official 0:51ac1d130fd4 489 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
mbed_official 0:51ac1d130fd4 490 LWIP_MEM_FREE_UNPROTECT();
mbed_official 0:51ac1d130fd4 491 return rmem;
mbed_official 0:51ac1d130fd4 492 }
mbed_official 0:51ac1d130fd4 493
mbed_official 0:51ac1d130fd4 494 /**
mbed_official 0:51ac1d130fd4 495 * Adam's mem_malloc() plus solution for bug #17922
mbed_official 0:51ac1d130fd4 496 * Allocate a block of memory with a minimum of 'size' bytes.
mbed_official 0:51ac1d130fd4 497 *
mbed_official 0:51ac1d130fd4 498 * @param size is the minimum size of the requested block in bytes.
mbed_official 0:51ac1d130fd4 499 * @return pointer to allocated memory or NULL if no free memory was found.
mbed_official 0:51ac1d130fd4 500 *
mbed_official 0:51ac1d130fd4 501 * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT).
mbed_official 0:51ac1d130fd4 502 */
mbed_official 0:51ac1d130fd4 503 void *
mbed_official 0:51ac1d130fd4 504 mem_malloc(mem_size_t size)
mbed_official 0:51ac1d130fd4 505 {
mbed_official 0:51ac1d130fd4 506 mem_size_t ptr, ptr2;
mbed_official 0:51ac1d130fd4 507 struct mem *mem, *mem2;
mbed_official 0:51ac1d130fd4 508 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
mbed_official 0:51ac1d130fd4 509 u8_t local_mem_free_count = 0;
mbed_official 0:51ac1d130fd4 510 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
mbed_official 0:51ac1d130fd4 511 LWIP_MEM_ALLOC_DECL_PROTECT();
mbed_official 0:51ac1d130fd4 512
mbed_official 0:51ac1d130fd4 513 if (size == 0) {
mbed_official 0:51ac1d130fd4 514 return NULL;
mbed_official 0:51ac1d130fd4 515 }
mbed_official 0:51ac1d130fd4 516
mbed_official 0:51ac1d130fd4 517 /* Expand the size of the allocated memory region so that we can
mbed_official 0:51ac1d130fd4 518 adjust for alignment. */
mbed_official 0:51ac1d130fd4 519 size = LWIP_MEM_ALIGN_SIZE(size);
mbed_official 0:51ac1d130fd4 520
mbed_official 0:51ac1d130fd4 521 if(size < MIN_SIZE_ALIGNED) {
mbed_official 0:51ac1d130fd4 522 /* every data block must be at least MIN_SIZE_ALIGNED long */
mbed_official 0:51ac1d130fd4 523 size = MIN_SIZE_ALIGNED;
mbed_official 0:51ac1d130fd4 524 }
mbed_official 0:51ac1d130fd4 525
mbed_official 0:51ac1d130fd4 526 if (size > MEM_SIZE_ALIGNED) {
mbed_official 0:51ac1d130fd4 527 return NULL;
mbed_official 0:51ac1d130fd4 528 }
mbed_official 0:51ac1d130fd4 529
mbed_official 0:51ac1d130fd4 530 /* protect the heap from concurrent access */
mbed_official 0:51ac1d130fd4 531 sys_mutex_lock(&mem_mutex);
mbed_official 0:51ac1d130fd4 532 LWIP_MEM_ALLOC_PROTECT();
mbed_official 0:51ac1d130fd4 533 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
mbed_official 0:51ac1d130fd4 534 /* run as long as a mem_free disturbed mem_malloc */
mbed_official 0:51ac1d130fd4 535 do {
mbed_official 0:51ac1d130fd4 536 local_mem_free_count = 0;
mbed_official 0:51ac1d130fd4 537 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
mbed_official 0:51ac1d130fd4 538
mbed_official 0:51ac1d130fd4 539 /* Scan through the heap searching for a free block that is big enough,
mbed_official 0:51ac1d130fd4 540 * beginning with the lowest free block.
mbed_official 0:51ac1d130fd4 541 */
mbed_official 0:51ac1d130fd4 542 for (ptr = (mem_size_t)((u8_t *)lfree - ram); ptr < MEM_SIZE_ALIGNED - size;
mbed_official 0:51ac1d130fd4 543 ptr = ((struct mem *)(void *)&ram[ptr])->next) {
mbed_official 0:51ac1d130fd4 544 mem = (struct mem *)(void *)&ram[ptr];
mbed_official 0:51ac1d130fd4 545 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
mbed_official 0:51ac1d130fd4 546 mem_free_count = 0;
mbed_official 0:51ac1d130fd4 547 LWIP_MEM_ALLOC_UNPROTECT();
mbed_official 0:51ac1d130fd4 548 /* allow mem_free to run */
mbed_official 0:51ac1d130fd4 549 LWIP_MEM_ALLOC_PROTECT();
mbed_official 0:51ac1d130fd4 550 if (mem_free_count != 0) {
mbed_official 0:51ac1d130fd4 551 local_mem_free_count = mem_free_count;
mbed_official 0:51ac1d130fd4 552 }
mbed_official 0:51ac1d130fd4 553 mem_free_count = 0;
mbed_official 0:51ac1d130fd4 554 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
mbed_official 0:51ac1d130fd4 555
mbed_official 0:51ac1d130fd4 556 if ((!mem->used) &&
mbed_official 0:51ac1d130fd4 557 (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) {
mbed_official 0:51ac1d130fd4 558 /* mem is not used and at least perfect fit is possible:
mbed_official 0:51ac1d130fd4 559 * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
mbed_official 0:51ac1d130fd4 560
mbed_official 0:51ac1d130fd4 561 if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) {
mbed_official 0:51ac1d130fd4 562 /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing
mbed_official 0:51ac1d130fd4 563 * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
mbed_official 0:51ac1d130fd4 564 * -> split large block, create empty remainder,
mbed_official 0:51ac1d130fd4 565 * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
mbed_official 0:51ac1d130fd4 566 * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
mbed_official 0:51ac1d130fd4 567 * struct mem would fit in but no data between mem2 and mem2->next
mbed_official 0:51ac1d130fd4 568 * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
mbed_official 0:51ac1d130fd4 569 * region that couldn't hold data, but when mem->next gets freed,
mbed_official 0:51ac1d130fd4 570 * the 2 regions would be combined, resulting in more free memory
mbed_official 0:51ac1d130fd4 571 */
mbed_official 0:51ac1d130fd4 572 ptr2 = ptr + SIZEOF_STRUCT_MEM + size;
mbed_official 0:51ac1d130fd4 573 /* create mem2 struct */
mbed_official 0:51ac1d130fd4 574 mem2 = (struct mem *)(void *)&ram[ptr2];
mbed_official 0:51ac1d130fd4 575 mem2->used = 0;
mbed_official 0:51ac1d130fd4 576 mem2->next = mem->next;
mbed_official 0:51ac1d130fd4 577 mem2->prev = ptr;
mbed_official 0:51ac1d130fd4 578 /* and insert it between mem and mem->next */
mbed_official 0:51ac1d130fd4 579 mem->next = ptr2;
mbed_official 0:51ac1d130fd4 580 mem->used = 1;
mbed_official 0:51ac1d130fd4 581
mbed_official 0:51ac1d130fd4 582 if (mem2->next != MEM_SIZE_ALIGNED) {
mbed_official 0:51ac1d130fd4 583 ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
mbed_official 0:51ac1d130fd4 584 }
mbed_official 0:51ac1d130fd4 585 MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM));
mbed_official 0:51ac1d130fd4 586 } else {
mbed_official 0:51ac1d130fd4 587 /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
mbed_official 0:51ac1d130fd4 588 * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
mbed_official 0:51ac1d130fd4 589 * take care of this).
mbed_official 0:51ac1d130fd4 590 * -> near fit or excact fit: do not split, no mem2 creation
mbed_official 0:51ac1d130fd4 591 * also can't move mem->next directly behind mem, since mem->next
mbed_official 0:51ac1d130fd4 592 * will always be used at this point!
mbed_official 0:51ac1d130fd4 593 */
mbed_official 0:51ac1d130fd4 594 mem->used = 1;
mbed_official 0:51ac1d130fd4 595 MEM_STATS_INC_USED(used, mem->next - (mem_size_t)((u8_t *)mem - ram));
mbed_official 0:51ac1d130fd4 596 }
mbed_official 0:51ac1d130fd4 597
mbed_official 0:51ac1d130fd4 598 if (mem == lfree) {
mbed_official 0:51ac1d130fd4 599 /* Find next free block after mem and update lowest free pointer */
mbed_official 0:51ac1d130fd4 600 while (lfree->used && lfree != ram_end) {
mbed_official 0:51ac1d130fd4 601 LWIP_MEM_ALLOC_UNPROTECT();
mbed_official 0:51ac1d130fd4 602 /* prevent high interrupt latency... */
mbed_official 0:51ac1d130fd4 603 LWIP_MEM_ALLOC_PROTECT();
mbed_official 0:51ac1d130fd4 604 lfree = (struct mem *)(void *)&ram[lfree->next];
mbed_official 0:51ac1d130fd4 605 }
mbed_official 0:51ac1d130fd4 606 LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used)));
mbed_official 0:51ac1d130fd4 607 }
mbed_official 0:51ac1d130fd4 608 LWIP_MEM_ALLOC_UNPROTECT();
mbed_official 0:51ac1d130fd4 609 sys_mutex_unlock(&mem_mutex);
mbed_official 0:51ac1d130fd4 610 LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.",
mbed_official 0:51ac1d130fd4 611 (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end);
mbed_official 0:51ac1d130fd4 612 LWIP_ASSERT("mem_malloc: allocated memory properly aligned.",
mbed_official 0:51ac1d130fd4 613 ((mem_ptr_t)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0);
mbed_official 0:51ac1d130fd4 614 LWIP_ASSERT("mem_malloc: sanity check alignment",
mbed_official 0:51ac1d130fd4 615 (((mem_ptr_t)mem) & (MEM_ALIGNMENT-1)) == 0);
mbed_official 0:51ac1d130fd4 616
mbed_official 0:51ac1d130fd4 617 return (u8_t *)mem + SIZEOF_STRUCT_MEM;
mbed_official 0:51ac1d130fd4 618 }
mbed_official 0:51ac1d130fd4 619 }
mbed_official 0:51ac1d130fd4 620 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
mbed_official 0:51ac1d130fd4 621 /* if we got interrupted by a mem_free, try again */
mbed_official 0:51ac1d130fd4 622 } while(local_mem_free_count != 0);
mbed_official 0:51ac1d130fd4 623 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
mbed_official 0:51ac1d130fd4 624 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size));
mbed_official 0:51ac1d130fd4 625 MEM_STATS_INC(err);
mbed_official 0:51ac1d130fd4 626 LWIP_MEM_ALLOC_UNPROTECT();
mbed_official 0:51ac1d130fd4 627 sys_mutex_unlock(&mem_mutex);
mbed_official 0:51ac1d130fd4 628 return NULL;
mbed_official 0:51ac1d130fd4 629 }
mbed_official 0:51ac1d130fd4 630
mbed_official 0:51ac1d130fd4 631 #endif /* MEM_USE_POOLS */
mbed_official 0:51ac1d130fd4 632 /**
mbed_official 0:51ac1d130fd4 633 * Contiguously allocates enough space for count objects that are size bytes
mbed_official 0:51ac1d130fd4 634 * of memory each and returns a pointer to the allocated memory.
mbed_official 0:51ac1d130fd4 635 *
mbed_official 0:51ac1d130fd4 636 * The allocated memory is filled with bytes of value zero.
mbed_official 0:51ac1d130fd4 637 *
mbed_official 0:51ac1d130fd4 638 * @param count number of objects to allocate
mbed_official 0:51ac1d130fd4 639 * @param size size of the objects to allocate
mbed_official 0:51ac1d130fd4 640 * @return pointer to allocated memory / NULL pointer if there is an error
mbed_official 0:51ac1d130fd4 641 */
mbed_official 0:51ac1d130fd4 642 void *mem_calloc(mem_size_t count, mem_size_t size)
mbed_official 0:51ac1d130fd4 643 {
mbed_official 0:51ac1d130fd4 644 void *p;
mbed_official 0:51ac1d130fd4 645
mbed_official 0:51ac1d130fd4 646 /* allocate 'count' objects of size 'size' */
mbed_official 0:51ac1d130fd4 647 p = mem_malloc(count * size);
mbed_official 0:51ac1d130fd4 648 if (p) {
mbed_official 0:51ac1d130fd4 649 /* zero the memory */
mbed_official 0:51ac1d130fd4 650 memset(p, 0, count * size);
mbed_official 0:51ac1d130fd4 651 }
mbed_official 0:51ac1d130fd4 652 return p;
mbed_official 0:51ac1d130fd4 653 }
mbed_official 0:51ac1d130fd4 654
mbed_official 0:51ac1d130fd4 655 #endif /* !MEM_LIBC_MALLOC */