Nordic stack and drivers for the mbed BLE API Modified for HRM 1017 and correct DISCONNECT event processing

Fork of nRF51822 by Nordic Semiconductor

Files at this revision

API Documentation at this revision

Comitter:
Rohit Grover
Date:
Wed Jul 16 10:54:22 2014 +0100
Parent:
45:3c4df37ed83e
Child:
47:db25ca6ed092
Commit message:
get some more app_common ancillary sources from the Nordic SDK

Changed in this revision

nordic/app_common/app_gpiote.c Show annotated file Show diff for this revision Revisions of this file
nordic/app_common/app_scheduler.c Show annotated file Show diff for this revision Revisions of this file
nordic/app_common/hci_mem_pool.c Show annotated file Show diff for this revision Revisions of this file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/nordic/app_common/app_gpiote.c	Wed Jul 16 10:54:22 2014 +0100
@@ -0,0 +1,360 @@
+/* Copyright (c) 2012 Nordic Semiconductor. All Rights Reserved.
+ *
+ * The information contained herein is property of Nordic Semiconductor ASA.
+ * Terms and conditions of usage are described in detail in NORDIC
+ * SEMICONDUCTOR STANDARD SOFTWARE LICENSE AGREEMENT.
+ *
+ * Licensees are granted free, non-transferable use of the information. NO
+ * WARRANTY of ANY KIND is provided. This heading must NOT be removed from
+ * the file.
+ *
+ */
+
+#include "app_gpiote.h"
+#include <stdlib.h>
+#include <string.h>
+#include "app_util.h"
+#include "app_util_platform.h"
+#include "nrf_error.h"
+#include "nrf_gpio.h"
+
+
+/**@brief GPIOTE user type. */
+typedef struct
+{
+    uint32_t                   pins_mask;             /**< Mask defining which pins user wants to monitor. */
+    uint32_t                   pins_low_to_high_mask; /**< Mask defining which pins will generate events to this user when toggling low->high. */
+    uint32_t                   pins_high_to_low_mask; /**< Mask defining which pins will generate events to this user when toggling high->low. */
+    uint32_t                   sense_high_pins;       /**< Mask defining which pins are configured to generate GPIOTE interrupt on transition to high level. */
+    app_gpiote_event_handler_t event_handler;         /**< Pointer to function to be executed when an event occurs. */
+} gpiote_user_t;
+
+STATIC_ASSERT(sizeof(gpiote_user_t) <= GPIOTE_USER_NODE_SIZE);
+STATIC_ASSERT(sizeof(gpiote_user_t) % 4 == 0);
+
+static uint32_t        m_enabled_users_mask;          /**< Mask for tracking which users are enabled. */
+static uint8_t         m_user_array_size;             /**< Size of user array. */
+static uint8_t         m_user_count;                  /**< Number of registered users. */
+static gpiote_user_t * mp_users = NULL;               /**< Array of GPIOTE users. */
+
+
+/**@brief Function for toggling sense level for specified pins.
+ *
+ * @param[in]   p_user   Pointer to user structure.
+ * @param[in]   pins     Bitmask specifying for which pins the sense level is to be toggled.
+ */
+static void sense_level_toggle(gpiote_user_t * p_user, uint32_t pins)
+{
+    uint32_t pin_no;
+
+    for (pin_no = 0; pin_no < NO_OF_PINS; pin_no++)
+    {
+        uint32_t pin_mask = (1 << pin_no);
+
+        if ((pins & pin_mask) != 0)
+        {
+            uint32_t sense;
+
+            // Invert sensing.
+            if ((p_user->sense_high_pins & pin_mask) == 0)
+            {
+                sense                    = GPIO_PIN_CNF_SENSE_High << GPIO_PIN_CNF_SENSE_Pos;
+                p_user->sense_high_pins |= pin_mask;
+            }
+            else
+            {
+                sense                    = GPIO_PIN_CNF_SENSE_Low << GPIO_PIN_CNF_SENSE_Pos;
+                p_user->sense_high_pins &= ~pin_mask;
+            }
+
+            NRF_GPIO->PIN_CNF[pin_no] &= ~GPIO_PIN_CNF_SENSE_Msk;
+            NRF_GPIO->PIN_CNF[pin_no] |= sense;
+        }
+    }
+}
+
+
+/**@brief Function for handling the GPIOTE interrupt.
+ */
+void GPIOTE_IRQHandler(void)
+{
+    uint8_t  i;
+    uint32_t pins_changed;
+    uint32_t pins_state = NRF_GPIO->IN;
+
+    // Clear event.
+    NRF_GPIOTE->EVENTS_PORT = 0;
+
+    // Check all users.
+    for (i = 0; i < m_user_count; i++)
+    {
+        gpiote_user_t * p_user = &mp_users[i];
+
+        // Check if user is enabled.
+        if (((1 << i) & m_enabled_users_mask) != 0)
+        {
+            uint32_t transition_pins;
+            uint32_t event_low_to_high;
+            uint32_t event_high_to_low;
+
+            // Find set of pins on which there has been a transition.
+            transition_pins = (pins_state ^ ~p_user->sense_high_pins) & p_user->pins_mask;
+
+            // Toggle SENSE level for all pins that have changed state.
+            sense_level_toggle(p_user, transition_pins);
+
+            // Second read after setting sense.
+            // Check if any pins have changed while serving this interrupt.
+            pins_changed = NRF_GPIO->IN ^ pins_state;
+            if (pins_changed)
+            {
+                // Transition pins detected in late stage.
+                uint32_t late_transition_pins;
+
+                pins_state          |= pins_changed;
+
+                // Find set of pins on which there has been a transition.
+                late_transition_pins = (pins_state ^ ~p_user->sense_high_pins) & p_user->pins_mask;
+
+                // Toggle SENSE level for all pins that have changed state in last phase.
+                sense_level_toggle(p_user, late_transition_pins);
+
+                // Update pins that has changed state since the interrupt occurred.
+                transition_pins |= late_transition_pins;
+            }
+
+            // Call user event handler if an event has occurred.
+            event_high_to_low = (~pins_state & p_user->pins_high_to_low_mask) & transition_pins;
+            event_low_to_high = (pins_state & p_user->pins_low_to_high_mask) & transition_pins;
+
+            if ((event_low_to_high | event_high_to_low) != 0)
+            {
+                p_user->event_handler(event_low_to_high, event_high_to_low);
+            }
+        }
+    }
+}
+
+
+/**@brief Function for sense disabling for all pins for specified user.
+ *
+ * @param[in]  user_id   User id.
+ */
+static void pins_sense_disable(app_gpiote_user_id_t user_id)
+{
+    uint32_t pin_no;
+
+    for (pin_no = 0; pin_no < 32; pin_no++)
+    {
+        if ((mp_users[user_id].pins_mask & (1 << pin_no)) != 0)
+        {
+            NRF_GPIO->PIN_CNF[pin_no] &= ~GPIO_PIN_CNF_SENSE_Msk;
+            NRF_GPIO->PIN_CNF[pin_no] |= GPIO_PIN_CNF_SENSE_Disabled << GPIO_PIN_CNF_SENSE_Pos;
+        }
+    }
+}
+
+
+uint32_t app_gpiote_init(uint8_t max_users, void * p_buffer)
+{
+    if (p_buffer == NULL)
+    {
+        return NRF_ERROR_INVALID_PARAM;
+    }
+
+    // Check that buffer is correctly aligned.
+    if (!is_word_aligned(p_buffer))
+    {
+        return NRF_ERROR_INVALID_PARAM;
+    }
+
+    // Initialize file globals.
+    mp_users             = (gpiote_user_t *)p_buffer;
+    m_user_array_size    = max_users;
+    m_user_count         = 0;
+    m_enabled_users_mask = 0;
+
+    memset(mp_users, 0, m_user_array_size * sizeof(gpiote_user_t));
+
+    // Initialize GPIOTE interrupt (will not be enabled until app_gpiote_user_enable() is called).
+    NRF_GPIOTE->INTENCLR = 0xFFFFFFFF;
+
+    NVIC_ClearPendingIRQ(GPIOTE_IRQn);
+    NVIC_SetPriority(GPIOTE_IRQn, APP_IRQ_PRIORITY_HIGH);
+    NVIC_EnableIRQ(GPIOTE_IRQn);
+
+    return NRF_SUCCESS;
+}
+
+
+uint32_t app_gpiote_user_register(app_gpiote_user_id_t     * p_user_id,
+                                  uint32_t                   pins_low_to_high_mask,
+                                  uint32_t                   pins_high_to_low_mask,
+                                  app_gpiote_event_handler_t event_handler)
+{
+    // Check state and parameters.
+    if (mp_users == NULL)
+    {
+        return NRF_ERROR_INVALID_STATE;
+    }
+    if (event_handler == NULL)
+    {
+        return NRF_ERROR_INVALID_PARAM;
+    }
+    if (m_user_count >= m_user_array_size)
+    {
+        return NRF_ERROR_NO_MEM;
+    }
+
+    // Allocate new user.
+    mp_users[m_user_count].pins_mask             = pins_low_to_high_mask | pins_high_to_low_mask;
+    mp_users[m_user_count].pins_low_to_high_mask = pins_low_to_high_mask;
+    mp_users[m_user_count].pins_high_to_low_mask = pins_high_to_low_mask;
+    mp_users[m_user_count].event_handler         = event_handler;
+
+    *p_user_id = m_user_count++;
+
+    // Make sure SENSE is disabled for all pins.
+    pins_sense_disable(*p_user_id);
+
+    return NRF_SUCCESS;
+}
+
+
+uint32_t app_gpiote_user_enable(app_gpiote_user_id_t user_id)
+{
+    uint32_t pin_no;
+    uint32_t pins_state;
+
+    // Check state and parameters.
+    if (mp_users == NULL)
+    {
+        return NRF_ERROR_INVALID_STATE;
+    }
+    if (user_id >= m_user_count)
+    {
+        return NRF_ERROR_INVALID_PARAM;
+    }
+
+    // Clear any pending event.
+    NRF_GPIOTE->EVENTS_PORT = 0;
+    pins_state              = NRF_GPIO->IN;
+
+    // Enable user.
+    if (m_enabled_users_mask == 0)
+    {
+        NRF_GPIOTE->INTENSET = GPIOTE_INTENSET_PORT_Msk;
+    }
+    m_enabled_users_mask |= (1 << user_id);
+
+    // Enable sensing for all pins for specified user.
+    mp_users[user_id].sense_high_pins = 0;
+    for (pin_no = 0; pin_no < 32; pin_no++)
+    {
+        uint32_t pin_mask = (1 << pin_no);
+
+        if ((mp_users[user_id].pins_mask & pin_mask) != 0)
+        {
+            uint32_t sense;
+
+            if ((pins_state & pin_mask) != 0)
+            {
+                sense = GPIO_PIN_CNF_SENSE_Low << GPIO_PIN_CNF_SENSE_Pos;
+            }
+            else
+            {
+                sense = GPIO_PIN_CNF_SENSE_High << GPIO_PIN_CNF_SENSE_Pos;
+                mp_users[user_id].sense_high_pins |= pin_mask;
+            }
+
+            NRF_GPIO->PIN_CNF[pin_no] &= ~GPIO_PIN_CNF_SENSE_Msk;
+            NRF_GPIO->PIN_CNF[pin_no] |= sense;
+        }
+    }
+
+    return NRF_SUCCESS;
+}
+
+
+uint32_t app_gpiote_user_disable(app_gpiote_user_id_t user_id)
+{
+    // Check state and parameters.
+    if (mp_users == NULL)
+    {
+        return NRF_ERROR_INVALID_STATE;
+    }
+    if (user_id >= m_user_count)
+    {
+        return NRF_ERROR_INVALID_PARAM;
+    }
+
+    // Disable sensing for all pins for specified user.
+    pins_sense_disable(user_id);
+
+    // Disable user.
+    m_enabled_users_mask &= ~(1UL << user_id);
+    if (m_enabled_users_mask == 0)
+    {
+        NRF_GPIOTE->INTENCLR = GPIOTE_INTENSET_PORT_Msk;
+    }
+
+    return NRF_SUCCESS;
+}
+
+
+uint32_t app_gpiote_pins_state_get(app_gpiote_user_id_t user_id, uint32_t * p_pins)
+{
+    gpiote_user_t * p_user;
+
+    // Check state and parameters.
+    if (mp_users == NULL)
+    {
+        return NRF_ERROR_INVALID_STATE;
+    }
+    if (user_id >= m_user_count)
+    {
+        return NRF_ERROR_INVALID_PARAM;
+    }
+
+    // Get pins.
+    p_user  = &mp_users[user_id];
+    *p_pins = NRF_GPIO->IN & p_user->pins_mask;
+
+    return NRF_SUCCESS;
+}
+
+#if defined(SVCALL_AS_NORMAL_FUNCTION) || defined(SER_CONNECTIVITY)
+uint32_t app_gpiote_input_event_handler_register(const uint8_t                    channel,
+                                                 const uint32_t                   pin,
+                                                 const uint32_t                   polarity,
+                                                 app_gpiote_input_event_handler_t event_handler)
+{
+    (void)sense_level_toggle(NULL, pin);
+    return NRF_ERROR_NOT_SUPPORTED;
+}
+
+uint32_t app_gpiote_input_event_handler_unregister(const uint8_t channel)
+{
+    return NRF_ERROR_NOT_SUPPORTED;
+}
+
+uint32_t app_gpiote_end_irq_event_handler_register(app_gpiote_input_event_handler_t event_handler)
+{
+    return NRF_ERROR_NOT_SUPPORTED;
+}
+
+uint32_t app_gpiote_end_irq_event_handler_unregister(void)
+{
+    return NRF_ERROR_NOT_SUPPORTED;
+}
+
+uint32_t app_gpiote_enable_interrupts(void)
+{
+    return NRF_ERROR_NOT_SUPPORTED;
+}
+
+uint32_t app_gpiote_disable_interrupts(void)
+{
+    return NRF_ERROR_NOT_SUPPORTED;
+}
+#endif // SVCALL_AS_NORMAL_FUNCTION || SER_CONNECTIVITY
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/nordic/app_common/app_scheduler.c	Wed Jul 16 10:54:22 2014 +0100
@@ -0,0 +1,179 @@
+/* Copyright (c) 2012 Nordic Semiconductor. All Rights Reserved.
+ *
+ * The information contained herein is property of Nordic Semiconductor ASA.
+ * Terms and conditions of usage are described in detail in NORDIC
+ * SEMICONDUCTOR STANDARD SOFTWARE LICENSE AGREEMENT.
+ *
+ * Licensees are granted free, non-transferable use of the information. NO
+ * WARRANTY of ANY KIND is provided. This heading must NOT be removed from
+ * the file.
+ *
+ */
+
+#include "app_scheduler.h"
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include "nrf_soc.h"
+#include "nrf_assert.h"
+#include "app_util.h"
+#include "app_util_platform.h"
+
+/**@brief Structure for holding a scheduled event header. */
+typedef struct
+{
+    app_sched_event_handler_t handler;          /**< Pointer to event handler to receive the event. */
+    uint16_t                  event_data_size;  /**< Size of event data. */
+} event_header_t;
+
+STATIC_ASSERT(sizeof(event_header_t) <= APP_SCHED_EVENT_HEADER_SIZE);
+
+static event_header_t * m_queue_event_headers;  /**< Array for holding the queue event headers. */
+static uint8_t        * m_queue_event_data;     /**< Array for holding the queue event data. */
+static volatile uint8_t m_queue_start_index;    /**< Index of queue entry at the start of the queue. */
+static volatile uint8_t m_queue_end_index;      /**< Index of queue entry at the end of the queue. */
+static uint16_t         m_queue_event_size;     /**< Maximum event size in queue. */
+static uint16_t         m_queue_size;           /**< Number of queue entries. */
+
+/**@brief Macro for checking if a queue is full. */
+#define APP_SCHED_QUEUE_FULL() (next_index(m_queue_end_index) == m_queue_start_index)
+
+/**@brief Macro for checking if a queue is empty. */
+#define APP_SCHED_QUEUE_EMPTY() (m_queue_end_index == m_queue_start_index)
+
+
+/**@brief Function for incrementing a queue index, and handle wrap-around.
+ *
+ * @param[in]   index   Old index.
+ *
+ * @return      New (incremented) index.
+ */
+static __INLINE uint8_t next_index(uint8_t index)
+{
+    return (index < m_queue_size) ? (index + 1) : 0;
+}
+
+
+uint32_t app_sched_init(uint16_t event_size, uint16_t queue_size, void * p_event_buffer)
+{
+    uint16_t data_start_index = (queue_size + 1) * sizeof(event_header_t);
+
+    // Check that buffer is correctly aligned
+    if (!is_word_aligned(p_event_buffer))
+    {
+        return NRF_ERROR_INVALID_PARAM;
+    }
+
+    // Initialize event scheduler
+    m_queue_event_headers = p_event_buffer;
+    m_queue_event_data    = &((uint8_t *)p_event_buffer)[data_start_index];
+    m_queue_end_index     = 0;
+    m_queue_start_index   = 0;
+    m_queue_event_size    = event_size;
+    m_queue_size          = queue_size;
+
+    return NRF_SUCCESS;
+}
+
+
+uint32_t app_sched_event_put(void                    * p_event_data,
+                             uint16_t                  event_data_size,
+                             app_sched_event_handler_t handler)
+{
+    uint32_t err_code;
+
+    if (event_data_size <= m_queue_event_size)
+    {
+        uint16_t event_index = 0xFFFF;
+
+        CRITICAL_REGION_ENTER();
+
+        if (!APP_SCHED_QUEUE_FULL())
+        {
+            event_index       = m_queue_end_index;
+            m_queue_end_index = next_index(m_queue_end_index);
+        }
+
+        CRITICAL_REGION_EXIT();
+
+        if (event_index != 0xFFFF)
+        {
+            // NOTE: This can be done outside the critical region since the event consumer will
+            //       always be called from the main loop, and will thus never interrupt this code.
+            m_queue_event_headers[event_index].handler = handler;
+            if ((p_event_data != NULL) && (event_data_size > 0))
+            {
+                memcpy(&m_queue_event_data[event_index * m_queue_event_size],
+                       p_event_data,
+                       event_data_size);
+                m_queue_event_headers[event_index].event_data_size = event_data_size;
+            }
+            else
+            {
+                m_queue_event_headers[event_index].event_data_size = 0;
+            }
+
+            err_code = NRF_SUCCESS;
+        }
+        else
+        {
+            err_code = NRF_ERROR_NO_MEM;
+        }
+    }
+    else
+    {
+        err_code = NRF_ERROR_INVALID_LENGTH;
+    }
+
+    return err_code;
+}
+
+
+/**@brief Function for reading the next event from specified event queue.
+ *
+ * @param[out]  pp_event_data       Pointer to pointer to event data.
+ * @param[out]  p_event_data_size   Pointer to size of event data.
+ * @param[out]  p_event_handler     Pointer to event handler function pointer.
+ *
+ * @return      NRF_SUCCESS if new event, NRF_ERROR_NOT_FOUND if event queue is empty.
+ */
+static uint32_t app_sched_event_get(void                     ** pp_event_data,
+                                    uint16_t *                  p_event_data_size,
+                                    app_sched_event_handler_t * p_event_handler)
+{
+    uint32_t err_code = NRF_ERROR_NOT_FOUND;
+
+    if (!APP_SCHED_QUEUE_EMPTY())
+    {
+        uint16_t event_index;
+
+        // NOTE: There is no need for a critical region here, as this function will only be called
+        //       from app_sched_execute() from inside the main loop, so it will never interrupt
+        //       app_sched_event_put(). Also, updating of (i.e. writing to) the start index will be
+        //       an atomic operation.
+        event_index         = m_queue_start_index;
+        m_queue_start_index = next_index(m_queue_start_index);
+
+        *pp_event_data     = &m_queue_event_data[event_index * m_queue_event_size];
+        *p_event_data_size = m_queue_event_headers[event_index].event_data_size;
+        *p_event_handler   = m_queue_event_headers[event_index].handler;
+
+        err_code = NRF_SUCCESS;
+    }
+
+    return err_code;
+}
+
+
+void app_sched_execute(void)
+{
+    void                    * p_event_data;
+    uint16_t                  event_data_size;
+    app_sched_event_handler_t event_handler;
+
+    // Get next event (if any), and execute handler
+    while ((app_sched_event_get(&p_event_data, &event_data_size, &event_handler) == NRF_SUCCESS))
+    {
+        event_handler(p_event_data, event_data_size);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/nordic/app_common/hci_mem_pool.c	Wed Jul 16 10:54:22 2014 +0100
@@ -0,0 +1,235 @@
+/* Copyright (c) 2013 Nordic Semiconductor. All Rights Reserved.
+ *
+ * The information contained herein is property of Nordic Semiconductor ASA.
+ * Terms and conditions of usage are described in detail in NORDIC
+ * SEMICONDUCTOR STANDARD SOFTWARE LICENSE AGREEMENT.
+ *
+ * Licensees are granted free, non-transferable use of the information. NO
+ * WARRANTY of ANY KIND is provided. This heading must NOT be removed from
+ * the file.
+ *
+ */
+ 
+#include "hci_mem_pool.h"
+#include "hci_mem_pool_internal.h"
+#include <stdbool.h>
+#include <stdio.h>
+
+/**@brief RX buffer element instance structure. 
+ */
+typedef struct 
+{
+    uint8_t  rx_buffer[RX_BUF_SIZE];                                /**< RX buffer memory array. */  
+    uint32_t length;                                                /**< Length of the RX buffer memory array. */
+} rx_buffer_elem_t;
+
+/**@brief RX buffer queue element instance structure. 
+ */
+typedef struct 
+{
+    rx_buffer_elem_t * p_buffer;                                    /**< Pointer to RX buffer element. */
+    uint32_t           free_window_count;                           /**< Free space element count. */
+    uint32_t           free_available_count;                        /**< Free area element count. */
+    uint32_t           read_available_count;                        /**< Read area element count. */
+    uint32_t           write_index;                                 /**< Write position index. */                                      
+    uint32_t           read_index;                                  /**< Read position index. */                                                                            
+    uint32_t           free_index;                                  /**< Free position index. */                                                                                                                  
+} rx_buffer_queue_t;
+
+static bool              m_is_tx_allocated;                         /**< Boolean value to determine if the TX buffer is allocated. */
+static rx_buffer_elem_t  m_rx_buffer_elem_queue[RX_BUF_QUEUE_SIZE]; /**< RX buffer element instances. */
+static rx_buffer_queue_t m_rx_buffer_queue;                         /**< RX buffer queue element instance. */
+
+
+uint32_t hci_mem_pool_open(void)
+{
+    m_is_tx_allocated                      = false;    
+    m_rx_buffer_queue.p_buffer             = m_rx_buffer_elem_queue;
+    m_rx_buffer_queue.free_window_count    = RX_BUF_QUEUE_SIZE;
+    m_rx_buffer_queue.free_available_count = 0;
+    m_rx_buffer_queue.read_available_count = 0;
+    m_rx_buffer_queue.write_index          = 0;    
+    m_rx_buffer_queue.read_index           = 0;        
+    m_rx_buffer_queue.free_index           = 0;            
+    
+    return NRF_SUCCESS;
+}
+
+
+uint32_t hci_mem_pool_close(void)
+{    
+    return NRF_SUCCESS;
+}
+
+
+uint32_t hci_mem_pool_tx_alloc(void ** pp_buffer)
+{
+    static uint8_t tx_buffer[TX_BUF_SIZE];  
+
+    uint32_t err_code;
+    
+    if (pp_buffer == NULL)
+    {
+        return NRF_ERROR_NULL;
+    }
+    
+    if (!m_is_tx_allocated)
+    {        
+            m_is_tx_allocated = true;
+            *pp_buffer        = tx_buffer;
+            err_code          = NRF_SUCCESS;
+    }
+    else
+    {
+        err_code              = NRF_ERROR_NO_MEM;
+    }
+    
+    return err_code;
+}
+
+
+uint32_t hci_mem_pool_tx_free(void)
+{
+    m_is_tx_allocated = false;
+    
+    return NRF_SUCCESS;
+}
+
+
+uint32_t hci_mem_pool_rx_produce(uint32_t length, void ** pp_buffer)
+{
+    uint32_t err_code; 
+
+    if (pp_buffer == NULL)
+    {
+        return NRF_ERROR_NULL;
+    }    
+    *pp_buffer = NULL;
+    
+    if (m_rx_buffer_queue.free_window_count != 0)
+    {    
+        if (length <= RX_BUF_SIZE)
+        {    
+            --(m_rx_buffer_queue.free_window_count);            
+            ++(m_rx_buffer_queue.read_available_count);            
+
+            *pp_buffer                    = 
+                    m_rx_buffer_queue.p_buffer[m_rx_buffer_queue.write_index].rx_buffer;
+
+            m_rx_buffer_queue.free_index |= (1u << m_rx_buffer_queue.write_index);
+
+            // @note: Adjust the write_index making use of the fact that the buffer size is of 
+            // power of two and two's complement arithmetic. For details refer example to book 
+            // "Making embedded systems: Elicia White".
+            m_rx_buffer_queue.write_index = 
+                    (m_rx_buffer_queue.write_index + 1u) & (RX_BUF_QUEUE_SIZE - 1u);
+            
+            err_code                      = NRF_SUCCESS;
+        }
+        else
+        {
+            err_code = NRF_ERROR_DATA_SIZE;    
+        }        
+    }
+    else
+    {
+        err_code = NRF_ERROR_NO_MEM;    
+    }
+    
+    return err_code;
+}
+
+
+uint32_t hci_mem_pool_rx_consume(uint8_t * p_buffer)
+{
+    uint32_t err_code;
+    uint32_t consume_index;
+    uint32_t start_index;
+    
+    if (m_rx_buffer_queue.free_available_count != 0)
+    {
+        // Find the buffer that has been freed -
+        // Start at read_index minus free_available_count and then increment until read index.
+        err_code      = NRF_ERROR_INVALID_ADDR;
+        consume_index = (m_rx_buffer_queue.read_index - m_rx_buffer_queue.free_available_count) & 
+                        (RX_BUF_QUEUE_SIZE - 1u);
+        start_index   = consume_index;
+        
+        do
+        {
+            if (m_rx_buffer_queue.p_buffer[consume_index].rx_buffer == p_buffer)
+            {
+                m_rx_buffer_queue.free_index ^= (1u << consume_index);
+                err_code = NRF_SUCCESS;
+                break;
+            }
+            else
+            {
+                consume_index = (consume_index + 1u) & (RX_BUF_QUEUE_SIZE - 1u);
+            }
+        }
+        while (consume_index != m_rx_buffer_queue.read_index);
+
+        while (!(m_rx_buffer_queue.free_index & (1 << start_index)) && 
+                (m_rx_buffer_queue.free_available_count != 0))
+        {
+            --(m_rx_buffer_queue.free_available_count);
+            ++(m_rx_buffer_queue.free_window_count);            
+            start_index = (consume_index + 1u) & (RX_BUF_QUEUE_SIZE - 1u);
+        }
+    }
+    else
+    {
+        err_code = NRF_ERROR_NO_MEM;
+    }
+        
+    return err_code;    
+}
+
+
+uint32_t hci_mem_pool_rx_data_size_set(uint32_t length)
+{
+    // @note: Adjust the write_index making use of the fact that the buffer size is of power
+    // of two and two's complement arithmetic. For details refer example to book 
+    // "Making embedded systems: Elicia White".
+    const uint32_t index = (m_rx_buffer_queue.write_index - 1u) & (RX_BUF_QUEUE_SIZE - 1u);
+    m_rx_buffer_queue.p_buffer[index].length = length;    
+    
+    return NRF_SUCCESS;
+}
+
+
+uint32_t hci_mem_pool_rx_extract(uint8_t ** pp_buffer, uint32_t * p_length)
+{
+    uint32_t err_code;
+    
+    if ((pp_buffer == NULL) || (p_length == NULL))
+    {
+        return NRF_ERROR_NULL;
+    }
+    
+    if (m_rx_buffer_queue.read_available_count != 0)
+    {
+        --(m_rx_buffer_queue.read_available_count);
+        ++(m_rx_buffer_queue.free_available_count);        
+        
+        *pp_buffer                   = 
+            m_rx_buffer_queue.p_buffer[m_rx_buffer_queue.read_index].rx_buffer;
+        *p_length                    = 
+            m_rx_buffer_queue.p_buffer[m_rx_buffer_queue.read_index].length;
+        
+        // @note: Adjust the write_index making use of the fact that the buffer size is of power
+        // of two and two's complement arithmetic. For details refer example to book 
+        // "Making embedded systems: Elicia White".            
+        m_rx_buffer_queue.read_index = 
+            (m_rx_buffer_queue.read_index + 1u) & (RX_BUF_QUEUE_SIZE - 1u); 
+        
+        err_code                     = NRF_SUCCESS;
+    }
+    else
+    {
+        err_code                     = NRF_ERROR_NO_MEM;        
+    }
+    
+    return err_code;
+}