ser_phy_hci.c 54 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768
  1. /**
  2. * Copyright (c) 2014 - 2020, Nordic Semiconductor ASA
  3. *
  4. * All rights reserved.
  5. *
  6. * Redistribution and use in source and binary forms, with or without modification,
  7. * are permitted provided that the following conditions are met:
  8. *
  9. * 1. Redistributions of source code must retain the above copyright notice, this
  10. * list of conditions and the following disclaimer.
  11. *
  12. * 2. Redistributions in binary form, except as embedded into a Nordic
  13. * Semiconductor ASA integrated circuit in a product or a software update for
  14. * such product, must reproduce the above copyright notice, this list of
  15. * conditions and the following disclaimer in the documentation and/or other
  16. * materials provided with the distribution.
  17. *
  18. * 3. Neither the name of Nordic Semiconductor ASA nor the names of its
  19. * contributors may be used to endorse or promote products derived from this
  20. * software without specific prior written permission.
  21. *
  22. * 4. This software, with or without modification, must only be used with a
  23. * Nordic Semiconductor ASA integrated circuit.
  24. *
  25. * 5. Any software provided in binary form under this license must not be reverse
  26. * engineered, decompiled, modified and/or disassembled.
  27. *
  28. * THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
  29. * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  30. * OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
  31. * DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
  32. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  33. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
  34. * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  35. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  36. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
  37. * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  38. *
  39. */
  40. #include <stddef.h>
  41. #include <string.h>
  42. #include "app_error.h"
  43. #include "app_util.h"
  44. #include "app_util_platform.h"
  45. #include "app_timer.h"
  46. #include "nrf_queue.h"
  47. #include "ser_phy.h"
  48. #include "ser_phy_hci.h"
  49. #include "crc16.h"
  50. #include "nrf_soc.h"
  51. #include "ser_config.h"
  52. #include "ser_phy_debug_comm.h"
  53. #include "nrf_sdh.h"
  54. #include "ser_hal_transport.h"
  55. #include "app_scheduler.h"
  56. #ifdef SER_CONNECTIVITY
  57. #include "ser_conn_handlers.h"
  58. #include "ser_conn_reset_cmd_decoder.h"
  59. #endif /* SER_CONNECTIVITY */
  60. #ifdef BLE_STACK_SUPPORT_REQD
  61. #include "nrf_sdm.h"
  62. #endif /* BLE_STACK_SUPPORT_REQD */
  63. #define NRF_LOG_MODULE_NAME sphy_hci
  64. #include "nrf_log.h"
  65. NRF_LOG_MODULE_REGISTER();
  66. // hide globals for release version, expose for debug version
  67. #if defined(SER_PHY_HCI_DEBUG_ENABLE)
  68. #define _static
  69. #else
  70. #define _static static
  71. #endif
  72. #define PKT_HDR_SIZE 4 /**< Packet header size in number of bytes. */
  73. #define PKT_CRC_SIZE 2 /**< Packet CRC size in number of bytes. */
  74. #define MAX_PACKET_SIZE_IN_BITS (11uL * \
  75. (SER_HAL_TRANSPORT_MAX_PKT_SIZE + PKT_HDR_SIZE + PKT_CRC_SIZE))
  76. #define BAUD_TIME_us (1000000uL / SER_PHY_UART_BAUDRATE_VAL)
  77. #define TX_EVT_QUEUE_SIZE 16
  78. #define RX_EVT_QUEUE_SIZE 16
  79. #define PKT_TYPE_VENDOR_SPECIFIC 14 /**< Packet type vendor specific. */
  80. #define PKT_TYPE_ACK 0 /**< Packet type acknowledgement. */
  81. #define PKT_TYPE_LINK_CONTROL 15 /**< Packet type link control. */
  82. #define PKT_TYPE_RESET 5 /**< Packet type reset. */
  83. #define DATA_INTEGRITY_MASK (1 << 6) /**< Mask for data integrity bit in the packet header. */
  84. #define RELIABLE_PKT_MASK (1 << 7) /**< Mask for reliable packet bit in the packet header. */
  85. #define INITIAL_ACK_NUMBER_EXPECTED 0 /**< Initial acknowledge number expected. */
  86. #define INITIAL_SEQ_NUMBER INITIAL_ACK_NUMBER_EXPECTED /**< Initial acknowledge number transmitted. */
  87. #define INVALID_PKT_TYPE 0xFFFFFFFFu /**< Internal invalid packet type value. */
  88. #define MAX_TRANSMISSION_TIME_ms (MAX_PACKET_SIZE_IN_BITS * BAUD_TIME_us / 1000uL) /**< Max transmission time of a single application packet over UART in units of mseconds. */
  89. #define RETRANSMISSION_TIMEOUT_IN_ms (50uL * MAX_TRANSMISSION_TIME_ms) /**< Retransmission timeout for application packet in units of mseconds. */
  90. #ifdef HCI_LINK_CONTROL
  91. #define HCI_PKT_SYNC 0x7E01u /**< Link Control Packet: type SYNC */
  92. #define HCI_PKT_SYNC_RSP 0x7D02u /**< Link Control Packet: type SYNC RESPONSE */
  93. #define HCI_PKT_CONFIG 0xFC03u /**< Link Control Packet: type CONFIG */
  94. #define HCI_PKT_CONFIG_RSP 0x7B04u /**< Link Control Packet: type CONFIG RESPONSE */
  95. #define HCI_CONFIG_FIELD 0x11u /**< Configuration field of CONFIG and CONFIG_RSP packet */
  96. #define HCI_PKT_SYNC_SIZE 6u /**< Size of SYNC and SYNC_RSP packet */
  97. #define HCI_PKT_CONFIG_SIZE 7u /**< Size of CONFIG and CONFIG_RSP packet */
  98. #define HCI_LINK_CONTROL_PKT_INVALID 0xFFFFu /**< Size of CONFIG and CONFIG_RSP packet */
  99. #define HCI_LINK_CONTROL_TIMEOUT 1u /**< Default link control timeout. */
  100. #endif /* HCI_LINK_CONTROL */
  101. #define PACKET_TYPE_STR(type)\
  102. ((type == PKT_TYPE_ACK) ? "ACK" :\
  103. ((type ==PKT_TYPE_LINK_CONTROL) ? "Link Control" : \
  104. ((type ==PKT_TYPE_VENDOR_SPECIFIC) ? "Vendor Specific" : "Reset")))
  105. #define RETRANSMISSION_TIMEOUT_IN_TICKS (APP_TIMER_TICKS(RETRANSMISSION_TIMEOUT_IN_ms)) /**< Retransmission timeout for application packet in units of timer ticks. */
  106. #define MAX_RETRY_COUNT 5 /**< Max retransmission retry count for application packets. */
  107. #if (defined(HCI_TIMER0))
  108. #define HCI_TIMER NRF_TIMER0
  109. #define HCI_TIMER_IRQn TIMER0_IRQn
  110. #define HCI_TIMER_IRQHandler TIMER0_IRQHandler
  111. #elif (defined(HCI_TIMER1))
  112. #define HCI_TIMER NRF_TIMER1
  113. #define HCI_TIMER_IRQn TIMER1_IRQn
  114. #define HCI_TIMER_IRQHandler TIMER1_IRQHandler
  115. #elif (defined(HCI_TIMER2))
  116. #define HCI_TIMER NRF_TIMER2
  117. #define HCI_TIMER_IRQn TIMER2_IRQn
  118. #define HCI_TIMER_IRQHandler TIMER2_IRQHandler
  119. #else
  120. #define HCI_APP_TIMER
  121. #endif
  122. /**@brief States of the hci event driven state machine. */
  123. typedef enum
  124. {
  125. HCI_TX_STATE_DISABLE,
  126. HCI_TX_STATE_SEND,
  127. HCI_TX_STATE_WAIT_FOR_FIRST_TX_END,
  128. HCI_TX_STATE_WAIT_FOR_ACK_OR_TX_END,
  129. HCI_TX_STATE_WAIT_FOR_ACK,
  130. HCI_TX_STATE_WAIT_FOR_TX_END
  131. } hci_tx_fsm_state_t;
  132. typedef enum
  133. {
  134. HCI_RX_STATE_DISABLE,
  135. HCI_RX_STATE_RECEIVE,
  136. HCI_RX_STATE_WAIT_FOR_MEM,
  137. HCI_RX_STATE_WAIT_FOR_SLIP_ACK_END,
  138. HCI_RX_STATE_WAIT_FOR_SLIP_NACK_END,
  139. } hci_rx_fsm_state_t;
  140. typedef enum
  141. {
  142. HCI_EVT_TIMEOUT,
  143. } hci_timer_evt_type_t;
  144. typedef enum
  145. {
  146. HCI_SER_PHY_TX_REQUEST,
  147. HCI_SER_PHY_RX_BUF_GRANTED,
  148. HCI_SER_PHY_EVT_GEN_ENABLE,
  149. HCI_SER_PHY_EVT_GEN_DISABLE
  150. } ser_phy_int_evt_type_t;
  151. typedef enum
  152. {
  153. HCI_SER_PHY_EVT,
  154. HCI_SLIP_EVT,
  155. HCI_TIMER_EVT,
  156. } hci_evt_source_t;
  157. #ifdef HCI_LINK_CONTROL
  158. typedef enum
  159. {
  160. HCI_MODE_DISABLE,
  161. HCI_MODE_UNINITIALIZED,
  162. HCI_MODE_INITIALIZED,
  163. HCI_MODE_ACTIVE,
  164. } hci_mode_t;
  165. #endif /*HCI_LINK_CONTROL */
  166. typedef struct
  167. {
  168. hci_timer_evt_type_t evt_type; /**< Type of an event. */
  169. } hci_timer_evt_t;
  170. typedef struct
  171. {
  172. ser_phy_int_evt_type_t evt_type; /**< Type of an event. */
  173. } ser_phy_int_evt_t;
  174. typedef struct
  175. {
  176. hci_evt_source_t evt_source; /**< source of an event. */
  177. union
  178. {
  179. ser_phy_int_evt_t ser_phy_evt; /**< ser_phy event. */
  180. ser_phy_hci_slip_evt_t ser_phy_slip_evt; /**< ser_phy_hci event. */
  181. hci_timer_evt_t timer_evt; /**< timer event. */
  182. } evt;
  183. } hci_evt_t;
  184. _static uint8_t m_tx_packet_header[PKT_HDR_SIZE];
  185. _static uint8_t m_tx_packet_crc[PKT_CRC_SIZE];
  186. _static uint8_t m_tx_ack_packet[PKT_HDR_SIZE];
  187. #ifdef HCI_LINK_CONTROL
  188. _static uint8_t m_tx_link_control_header[PKT_HDR_SIZE];
  189. _static uint8_t m_tx_link_control_payload[HCI_PKT_CONFIG_SIZE - PKT_HDR_SIZE];
  190. static bool m_cfg_sent;
  191. #endif /* HCI_LINK_CONTROL */
  192. _static uint32_t m_packet_ack_number; // Sequence number counter of the packet expected to be received
  193. _static uint32_t m_packet_seq_number; // Sequence number counter of the transmitted packet for which acknowledgement packet is waited for
  194. _static uint32_t m_tx_retry_count;
  195. // _static uint32_t m_tx_retx_counter = 0;
  196. // _static uint32_t m_rx_drop_counter = 0;
  197. NRF_QUEUE_DEF(hci_evt_t,
  198. m_tx_evt_queue,
  199. TX_EVT_QUEUE_SIZE,
  200. NRF_QUEUE_MODE_NO_OVERFLOW);
  201. NRF_QUEUE_DEF(hci_evt_t,
  202. m_rx_evt_queue,
  203. RX_EVT_QUEUE_SIZE,
  204. NRF_QUEUE_MODE_NO_OVERFLOW);
  205. _static hci_tx_fsm_state_t m_hci_tx_fsm_state = HCI_TX_STATE_DISABLE;
  206. _static hci_rx_fsm_state_t m_hci_rx_fsm_state = HCI_RX_STATE_DISABLE;
  207. #ifdef HCI_LINK_CONTROL
  208. _static hci_mode_t m_hci_mode = HCI_MODE_DISABLE;
  209. _static uint16_t m_hci_link_control_next_pkt = HCI_PKT_SYNC;
  210. _static bool m_hci_other_side_active = false;
  211. #endif /* HCI_LINK_CONTROL */
  212. #ifdef HCI_APP_TIMER
  213. APP_TIMER_DEF(m_app_timer_id);
  214. #endif
  215. _static bool m_tx_fsm_idle_flag = true;
  216. _static bool m_rx_fsm_idle_flag = true;
  217. _static bool m_buffer_reqested_flag = false;
  218. _static uint8_t * m_p_rx_buffer = NULL;
  219. _static uint16_t m_rx_packet_length;
  220. _static uint8_t * m_p_rx_packet;
  221. _static uint8_t * m_p_tx_payload = NULL;
  222. _static uint16_t m_tx_payload_length;
  223. _static ser_phy_events_handler_t m_ser_phy_callback = NULL;
  224. static void hci_tx_event_handler(hci_evt_t * p_event);
  225. static void hci_rx_event_handler(hci_evt_t * p_event);
  226. #ifdef HCI_LINK_CONTROL
  227. static void hci_link_control_event_handler(hci_evt_t * p_event);
  228. #endif /* HCI_LINK_CONTROL */
  229. _static bool m_hci_timer_enabled_flag = true;
  230. _static bool m_hci_timout_pending_flag = false;
  231. _static bool m_hci_global_enable_flag = true;
  232. #define ser_phy_hci_assert(cond) APP_ERROR_CHECK_BOOL(cond)
  233. static void hci_signal_timeout_event(void)
  234. {
  235. hci_evt_t event;
  236. event.evt_source = HCI_TIMER_EVT;
  237. event.evt.timer_evt.evt_type = HCI_EVT_TIMEOUT;
  238. DEBUG_EVT_TIMEOUT(0);
  239. #ifndef HCI_LINK_CONTROL
  240. hci_tx_event_handler(&event);
  241. #else
  242. hci_link_control_event_handler(&event);
  243. if ((m_hci_mode == HCI_MODE_ACTIVE) && m_hci_other_side_active)
  244. {
  245. hci_tx_event_handler(&event);
  246. }
  247. #endif /* HCI_LINK_CONTROL */
  248. }
  249. #ifndef HCI_APP_TIMER
  250. void HCI_TIMER_IRQHandler(void)
  251. {
  252. if ((HCI_TIMER->EVENTS_COMPARE[1] == 1) && (HCI_TIMER->INTENSET & TIMER_INTENSET_COMPARE1_Msk))
  253. {
  254. HCI_TIMER->EVENTS_COMPARE[1] = 0;
  255. HCI_TIMER->TASKS_CLEAR = 1;
  256. if (m_hci_timer_enabled_flag)
  257. {
  258. hci_signal_timeout_event();
  259. }
  260. else
  261. {
  262. m_hci_timout_pending_flag = true;
  263. }
  264. }
  265. }
  266. static void hci_timeout_setup(uint32_t count)
  267. {
  268. uint32_t time_msec;
  269. if (count)
  270. {
  271. HCI_TIMER->INTENCLR = TIMER_INTENCLR_COMPARE1_Msk;
  272. time_msec = count * RETRANSMISSION_TIMEOUT_IN_ms;
  273. HCI_TIMER->CC[1] = time_msec * 31;
  274. HCI_TIMER->CC[1] += time_msec / 4;
  275. HCI_TIMER->TASKS_CLEAR = 1; // < Clear TIMER
  276. HCI_TIMER->EVENTS_COMPARE[1] = 0;
  277. HCI_TIMER->TASKS_START = 1; // < Start TIMER
  278. HCI_TIMER->INTENSET = TIMER_INTENSET_COMPARE1_Msk;
  279. }
  280. else
  281. {
  282. HCI_TIMER->INTENCLR = TIMER_INTENCLR_COMPARE1_Msk;
  283. HCI_TIMER->TASKS_STOP = 1; // < Start TIMER
  284. }
  285. }
  286. #else
  287. _static bool m_hci_timer_setup_flag = false;
  288. _static uint32_t m_hci_timer_counter = 0;
  289. _static uint32_t m_hci_timer_setup;
  290. static void hci_timeout_setup(uint32_t count)
  291. {
  292. m_hci_timer_setup = count;
  293. m_hci_timer_setup_flag = true;
  294. }
  295. static void hci_timeout_handler(void * p_context)
  296. {
  297. if (m_hci_timer_setup_flag)
  298. {
  299. m_hci_timer_setup_flag = false;
  300. m_hci_timer_counter = m_hci_timer_setup; /* for 1 it will be always more than 1 tick - jitter is up to 1 tick */
  301. }
  302. else if ( m_hci_timer_counter )
  303. {
  304. m_hci_timer_counter--;
  305. if (m_hci_timer_counter == 0)
  306. {
  307. if (m_hci_timer_enabled_flag)
  308. {
  309. hci_signal_timeout_event();
  310. }
  311. else
  312. {
  313. m_hci_timout_pending_flag = true;
  314. }
  315. }
  316. }
  317. return;
  318. }
  319. #endif
  320. /**@brief Function for validating a received packet.
  321. *
  322. * @param[in] p_buffer Pointer to the packet data.
  323. * @param[in] length Length of packet data in bytes.
  324. *
  325. * @return true if received packet is valid, false in other case.
  326. */
  327. static bool is_rx_pkt_valid(const uint8_t * p_buffer, uint32_t length)
  328. {
  329. // Executed packet filtering algorithm order:
  330. // - verify packet overall length
  331. // - verify data integrity bit set
  332. // - verify reliable packet bit set
  333. // - verify supported packet type
  334. // - verify header checksum
  335. // - verify payload length field
  336. // - verify CRC
  337. if (length <= PKT_HDR_SIZE)
  338. {
  339. return false;
  340. }
  341. if (!(p_buffer[0] & DATA_INTEGRITY_MASK))
  342. {
  343. return false;
  344. }
  345. if (!(p_buffer[0] & RELIABLE_PKT_MASK))
  346. {
  347. return false;
  348. }
  349. if ((p_buffer[1] & 0x0Fu) != PKT_TYPE_VENDOR_SPECIFIC)
  350. {
  351. return false;
  352. }
  353. const uint32_t expected_checksum =
  354. ((p_buffer[0] + p_buffer[1] + p_buffer[2] + p_buffer[3])) & 0xFFu;
  355. if (expected_checksum != 0)
  356. {
  357. return false;
  358. }
  359. const uint16_t crc_calculated = crc16_compute(p_buffer, (length - PKT_CRC_SIZE), NULL);
  360. const uint16_t crc_received = uint16_decode(&p_buffer[length - PKT_CRC_SIZE]);
  361. if (crc_calculated != crc_received)
  362. {
  363. return false;
  364. }
  365. return true;
  366. }
  367. /**@brief Function for getting the sequence number of the next reliable packet expected.
  368. *
  369. * @return sequence number of the next reliable packet expected.
  370. */
  371. static __INLINE uint8_t packet_ack_get(void)
  372. {
  373. return (uint8_t) m_packet_ack_number;
  374. }
  375. /**@brief Function for getting the sequence number of a reliable TX packet for which peer protocol
  376. * entity acknowledgment is pending.
  377. *
  378. * @return sequence number of a reliable TX packet for which peer protocol entity acknowledgement
  379. * is pending.
  380. */
  381. static __INLINE uint8_t packet_seq_get(void)
  382. {
  383. return m_packet_seq_number;
  384. }
  385. static __INLINE uint8_t packet_seq_nmbr_extract(const uint8_t * p_buffer)
  386. {
  387. return (p_buffer[0] & 0x07u);
  388. }
  389. /**@brief Function for constructing 1st byte of the packet header of the packet to be transmitted.
  390. *
  391. * @return 1st byte of the packet header of the packet to be transmitted
  392. */
  393. static __INLINE uint8_t tx_packet_byte_zero_construct(void)
  394. {
  395. const uint32_t value = DATA_INTEGRITY_MASK | RELIABLE_PKT_MASK |
  396. (packet_ack_get() << 3u) | packet_seq_get();
  397. return (uint8_t) value;
  398. }
  399. /**@brief Function for calculating a packet header checksum.
  400. *
  401. * @param[in] p_hdr Pointer to the packet header.
  402. *
  403. * @return Calculated checksum.
  404. */
  405. static __INLINE uint8_t header_checksum_calculate(const uint8_t * p_hdr)
  406. {
  407. // @note: no pointer validation check needed as already checked by calling function.
  408. uint32_t checksum;
  409. checksum = p_hdr[0];
  410. checksum += p_hdr[1];
  411. checksum += p_hdr[2];
  412. checksum &= 0xFFu;
  413. checksum = (~checksum + 1u);
  414. return (uint8_t)checksum;
  415. }
  416. /**@brief Function for getting the expected ACK number.
  417. *
  418. * @return expected ACK number.
  419. */
  420. static __INLINE uint8_t expected_ack_number_get(void)
  421. {
  422. uint8_t seq_nmbr = packet_seq_get();
  423. ++seq_nmbr;
  424. seq_nmbr &= 0x07u;
  425. return seq_nmbr;
  426. }
  427. /**@brief Function for getting the expected ACK number.
  428. *
  429. * @return next expected ACK number.
  430. */
  431. static __INLINE uint8_t next_expected_ack_number_get(void)
  432. {
  433. uint8_t seq_nmbr = expected_ack_number_get();
  434. ++seq_nmbr;
  435. seq_nmbr &= 0x07u;
  436. return seq_nmbr;
  437. }
  438. /**@brief Function for processing a received acknowledgement packet.
  439. *
  440. * Verifies does the received acknowledgement packet has the expected acknowledgement number and
  441. * that the header checksum is correct.
  442. *
  443. * @param[in] p_buffer Pointer to the packet data.
  444. *
  445. * @return true if valid acknowledgement packet received.
  446. */
  447. static bool rx_ack_pkt_valid(const uint8_t * p_buffer)
  448. {
  449. // @note: no pointer validation check needed as allready checked by calling function.
  450. // Verify header checksum.
  451. const uint32_t expected_checksum =
  452. ((p_buffer[0] + p_buffer[1] + p_buffer[2] + p_buffer[3])) & 0xFFu;
  453. if (expected_checksum != 0)
  454. {
  455. return false;
  456. }
  457. const uint8_t ack_number = (p_buffer[0] >> 3u) & 0x07u;
  458. // Verify expected acknowledgment number.
  459. return ( (ack_number == expected_ack_number_get()) ||
  460. (ack_number == next_expected_ack_number_get()) );
  461. }
  462. /**@brief Function for decoding a packet type field.
  463. *
  464. * @param[in] p_buffer Pointer to the packet data.
  465. * @param[in] length Length of packet data in bytes.
  466. *
  467. * @return Packet type field or INVALID_PKT_TYPE in case of decode error.
  468. */
  469. static uint32_t packet_type_decode(const uint8_t * p_buffer, uint32_t length)
  470. {
  471. // @note: no pointer validation check needed as allready checked by calling function.
  472. uint32_t return_value;
  473. if (length >= PKT_HDR_SIZE)
  474. {
  475. return_value = (p_buffer[1] & 0x0Fu);
  476. }
  477. else
  478. {
  479. return_value = INVALID_PKT_TYPE;
  480. }
  481. return return_value;
  482. }
  483. #ifdef HCI_LINK_CONTROL
  484. /**@brief Function for decoding a link control packet.
  485. *
  486. * @param[in] p_buffer Pointer to the packet data.
  487. * @param[in] length Length of packet data in bytes.
  488. *
  489. * @return Link Control Packet Type if decoding successful, HCI_LINK_CONTROL_PKT_INVALID otherwise.
  490. */
  491. static uint16_t link_control_packet_decode(const uint8_t * p_buffer, uint32_t length)
  492. {
  493. // @note: no pointer validation check needed as allready checked by calling function.
  494. uint16_t packet_type = HCI_LINK_CONTROL_PKT_INVALID;
  495. // Executed link control packet filtering algorithm order:
  496. // - verify packet overall length
  497. // - verify data integrity bit cleared
  498. // - verify reliable packet bit cleared
  499. // - verify header checksum
  500. // - verify payload: length and value
  501. if (length < HCI_PKT_SYNC_SIZE)
  502. {
  503. packet_type = HCI_LINK_CONTROL_PKT_INVALID;
  504. }
  505. packet_type = p_buffer[PKT_HDR_SIZE] | (p_buffer[PKT_HDR_SIZE + 1] << 8u);
  506. if ((p_buffer[0] & DATA_INTEGRITY_MASK) || (p_buffer[0] & RELIABLE_PKT_MASK))
  507. {
  508. packet_type = HCI_LINK_CONTROL_PKT_INVALID;
  509. }
  510. const uint32_t expected_checksum =
  511. ((p_buffer[0] + p_buffer[1] + p_buffer[2] + p_buffer[3])) & 0xFFu;
  512. if (expected_checksum != 0)
  513. {
  514. packet_type = HCI_LINK_CONTROL_PKT_INVALID;
  515. }
  516. // This is a CONFIG or CONFIG_RSP packet
  517. if ((packet_type == HCI_PKT_CONFIG) || (packet_type == HCI_PKT_CONFIG_RSP))
  518. {
  519. if (length != HCI_PKT_CONFIG_SIZE)
  520. {
  521. packet_type = HCI_LINK_CONTROL_PKT_INVALID;
  522. }
  523. // Verify configuration field (0x11):
  524. // - Sliding Window Size == 1,
  525. // - OOF Flow Control == 0,
  526. // - Data Integrity Check Type == 1,
  527. // - Version Number == 0
  528. if (p_buffer[HCI_PKT_CONFIG_SIZE - 1] != HCI_CONFIG_FIELD)
  529. {
  530. packet_type = HCI_LINK_CONTROL_PKT_INVALID;
  531. }
  532. }
  533. // This is a SYNC or SYNC_RSP packet
  534. else if ((packet_type == HCI_PKT_SYNC) || (packet_type == HCI_PKT_SYNC_RSP))
  535. {
  536. if (length != HCI_PKT_SYNC_SIZE)
  537. {
  538. packet_type = HCI_LINK_CONTROL_PKT_INVALID;
  539. }
  540. }
  541. else
  542. {
  543. packet_type = HCI_LINK_CONTROL_PKT_INVALID;
  544. }
  545. return packet_type;
  546. }
  547. #endif /* HCI_LINK_CONTROL */
  548. /**@brief Function for writing an acknowledgment packet for transmission.
  549. */
  550. static void ack_transmit(void)
  551. {
  552. uint32_t err_code;
  553. // TX ACK packet format:
  554. // - Unreliable Packet type
  555. // - Payload Length set to 0
  556. // - Sequence Number set to 0
  557. // - Header checksum calculated
  558. // - Acknowledge Number set correctly
  559. m_tx_ack_packet[0] = (packet_ack_get() << 3u);
  560. m_tx_ack_packet[1] = 0;
  561. m_tx_ack_packet[2] = 0;
  562. m_tx_ack_packet[3] = header_checksum_calculate(m_tx_ack_packet);
  563. ser_phy_hci_pkt_params_t pkt_header;
  564. pkt_header.p_buffer = m_tx_ack_packet;
  565. pkt_header.num_of_bytes = PKT_HDR_SIZE;
  566. DEBUG_EVT_SLIP_ACK_TX(0);
  567. err_code = ser_phy_hci_slip_tx_pkt_send(&pkt_header, NULL, NULL);
  568. NRF_LOG_DEBUG("Start sending ACK.");
  569. ser_phy_hci_assert(err_code == NRF_SUCCESS);
  570. return;
  571. }
  572. static void ser_phy_event_callback(ser_phy_evt_t event)
  573. {
  574. if (m_ser_phy_callback)
  575. {
  576. m_ser_phy_callback(event);
  577. }
  578. return;
  579. }
  580. static void memory_request_callback(uint16_t size)
  581. {
  582. ser_phy_evt_t event;
  583. DEBUG_EVT_HCI_PHY_EVT_BUF_REQUEST(0);
  584. event.evt_type = SER_PHY_EVT_RX_BUF_REQUEST;
  585. event.evt_params.rx_buf_request.num_of_bytes = size;
  586. ser_phy_event_callback(event);
  587. }
  588. static void packet_received_callback(uint8_t * pBuffer, uint16_t size)
  589. {
  590. ser_phy_evt_t event;
  591. DEBUG_EVT_HCI_PHY_EVT_RX_PKT_RECEIVED(0);
  592. event.evt_type = SER_PHY_EVT_RX_PKT_RECEIVED;
  593. event.evt_params.rx_pkt_received.num_of_bytes = size;
  594. event.evt_params.rx_pkt_received.p_buffer = pBuffer;
  595. ser_phy_event_callback(event);
  596. }
  597. static void packet_dropped_callback(void)
  598. {
  599. ser_phy_evt_t event;
  600. DEBUG_EVT_HCI_PHY_EVT_RX_PKT_DROPPED(0);
  601. event.evt_type = SER_PHY_EVT_RX_PKT_DROPPED;
  602. ser_phy_event_callback(event);
  603. }
  604. static void packet_transmitted_callback(void)
  605. {
  606. ser_phy_evt_t event;
  607. DEBUG_EVT_HCI_PHY_EVT_TX_PKT_SENT(0);
  608. event.evt_type = SER_PHY_EVT_TX_PKT_SENT;
  609. ser_phy_event_callback(event);
  610. }
  611. static void error_callback(void)
  612. {
  613. ser_phy_evt_t event;
  614. DEBUG_EVT_HCI_PHY_EVT_TX_ERROR(0);
  615. NRF_LOG_DEBUG("no ack");
  616. event.evt_type = SER_PHY_EVT_HW_ERROR;
  617. event.evt_params.hw_error.p_buffer = m_p_tx_payload;
  618. ser_phy_event_callback(event);
  619. }
  620. static void hci_slip_event_handler(ser_phy_hci_slip_evt_t * p_event)
  621. {
  622. hci_evt_t event;
  623. uint32_t packet_type;
  624. uint32_t err_code;
  625. if ( p_event->evt_type == SER_PHY_HCI_SLIP_EVT_PKT_SENT )
  626. {
  627. NRF_LOG_DEBUG("EVT:Tx packet sent.");
  628. DEBUG_EVT_SLIP_PACKET_TXED(0);
  629. event.evt_source = HCI_SLIP_EVT;
  630. event.evt.ser_phy_slip_evt.evt_type = p_event->evt_type;
  631. #ifndef HCI_LINK_CONTROL
  632. hci_tx_event_handler(&event);
  633. #else
  634. if ((m_hci_mode == HCI_MODE_ACTIVE) && m_hci_other_side_active)
  635. {
  636. hci_tx_event_handler(&event);
  637. }
  638. #endif /*HCI_LINK_CONTROL*/
  639. }
  640. else if ( p_event->evt_type == SER_PHY_HCI_SLIP_EVT_ACK_SENT )
  641. {
  642. NRF_LOG_DEBUG("EVT:ACK sent.");
  643. DEBUG_EVT_SLIP_ACK_TXED(0);
  644. event.evt_source = HCI_SLIP_EVT;
  645. event.evt.ser_phy_slip_evt.evt_type = p_event->evt_type;
  646. #ifndef HCI_LINK_CONTROL
  647. hci_rx_event_handler(&event);
  648. #else
  649. if ((m_hci_mode == HCI_MODE_ACTIVE) && m_hci_other_side_active)
  650. {
  651. hci_rx_event_handler(&event);
  652. }
  653. #endif /* HCI_LINK_CONTROL */
  654. }
  655. else if ( p_event->evt_type == SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED )
  656. {
  657. event.evt_source = HCI_SLIP_EVT;
  658. event.evt.ser_phy_slip_evt.evt_type = p_event->evt_type;
  659. event.evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer =
  660. p_event->evt_params.received_pkt.p_buffer;
  661. event.evt.ser_phy_slip_evt.evt_params.received_pkt.num_of_bytes =
  662. p_event->evt_params.received_pkt.num_of_bytes;
  663. ser_phy_hci_assert(event.evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer != NULL);
  664. ser_phy_hci_assert(event.evt.ser_phy_slip_evt.evt_params.received_pkt.num_of_bytes != 0);
  665. packet_type = packet_type_decode(
  666. event.evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer,
  667. event.evt.ser_phy_slip_evt.evt_params.received_pkt.num_of_bytes);
  668. NRF_LOG_DEBUG("EVT:RX %s packet (length:%u)", PACKET_TYPE_STR(packet_type),
  669. p_event->evt_params.received_pkt.num_of_bytes);
  670. if (packet_type == PKT_TYPE_RESET)
  671. {
  672. #if defined(SER_CONNECTIVITY) && defined(SER_PHY_HCI_USB_CDC)
  673. (void)soft_reset_trigger();
  674. #else
  675. NVIC_SystemReset();
  676. #endif
  677. }
  678. else if (packet_type == PKT_TYPE_ACK )
  679. {
  680. DEBUG_EVT_SLIP_ACK_RXED(0);
  681. #ifndef HCI_LINK_CONTROL
  682. hci_tx_event_handler(&event);
  683. #else
  684. if ((m_hci_mode == HCI_MODE_ACTIVE) && m_hci_other_side_active)
  685. {
  686. hci_tx_event_handler(&event);
  687. }
  688. else
  689. {
  690. err_code = ser_phy_hci_slip_rx_buf_free(
  691. event.evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer);
  692. }
  693. #endif /* HCI_LINK_CONTROL */
  694. }
  695. else if ( packet_type == PKT_TYPE_VENDOR_SPECIFIC )
  696. {
  697. if (is_rx_pkt_valid(event.evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer,
  698. event.evt.ser_phy_slip_evt.evt_params.received_pkt.num_of_bytes))
  699. {
  700. DEBUG_EVT_SLIP_PACKET_RXED(0);
  701. #ifndef HCI_LINK_CONTROL
  702. hci_rx_event_handler(&event);
  703. #else
  704. if ((m_hci_mode == HCI_MODE_ACTIVE) && m_hci_other_side_active)
  705. {
  706. hci_rx_event_handler(&event);
  707. }
  708. else
  709. {
  710. err_code = ser_phy_hci_slip_rx_buf_free(
  711. event.evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer);
  712. }
  713. #endif /* HCI_LINK_CONTROL */
  714. }
  715. else
  716. {
  717. err_code = ser_phy_hci_slip_rx_buf_free(
  718. event.evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer);
  719. ser_phy_hci_assert(err_code == NRF_SUCCESS);
  720. /* throw assert when in debug mode*/
  721. DEBUG_EVT_SLIP_ERR_RXED(0);
  722. }
  723. }
  724. #ifdef HCI_LINK_CONTROL
  725. else if (packet_type == PKT_TYPE_LINK_CONTROL)
  726. {
  727. hci_link_control_event_handler(&event);
  728. }
  729. #endif /* HCI_LINK_CONTROL */
  730. else
  731. {
  732. err_code = ser_phy_hci_slip_rx_buf_free(
  733. event.evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer);
  734. ser_phy_hci_assert(err_code == NRF_SUCCESS);
  735. /* throw assert when in debug mode*/
  736. DEBUG_EVT_SLIP_ERR_RXED(0);
  737. }
  738. }
  739. else
  740. {
  741. NRF_LOG_DEBUG("EVT_HW_ERROR");
  742. }
  743. }
  744. static void hci_pkt_send(void)
  745. {
  746. uint32_t err_code;
  747. m_tx_packet_header[0] = tx_packet_byte_zero_construct();
  748. uint16_t type_and_length_fields = ((m_tx_payload_length << 4u) | PKT_TYPE_VENDOR_SPECIFIC);
  749. (void)uint16_encode(type_and_length_fields, &(m_tx_packet_header[1]));
  750. m_tx_packet_header[3] = header_checksum_calculate(m_tx_packet_header);
  751. uint16_t crc = crc16_compute(m_tx_packet_header, PKT_HDR_SIZE, NULL);
  752. crc = crc16_compute(m_p_tx_payload, m_tx_payload_length, &crc);
  753. (void)uint16_encode(crc, m_tx_packet_crc);
  754. ser_phy_hci_pkt_params_t pkt_header;
  755. ser_phy_hci_pkt_params_t pkt_payload;
  756. ser_phy_hci_pkt_params_t pkt_crc;
  757. pkt_header.p_buffer = m_tx_packet_header;
  758. pkt_header.num_of_bytes = PKT_HDR_SIZE;
  759. pkt_payload.p_buffer = m_p_tx_payload;
  760. pkt_payload.num_of_bytes = m_tx_payload_length;
  761. pkt_crc.p_buffer = m_tx_packet_crc;
  762. pkt_crc.num_of_bytes = PKT_CRC_SIZE;
  763. DEBUG_EVT_SLIP_PACKET_TX(0);
  764. err_code = ser_phy_hci_slip_tx_pkt_send(&pkt_header, &pkt_payload, &pkt_crc);
  765. NRF_LOG_DEBUG("Started TX packet (payload %d).", m_tx_payload_length);
  766. ser_phy_hci_assert(err_code == NRF_SUCCESS);
  767. return;
  768. }
  769. #ifdef HCI_LINK_CONTROL
  770. static void hci_link_control_pkt_send(void)
  771. {
  772. uint32_t err_code;
  773. uint16_t link_control_payload_len = 0;
  774. m_tx_link_control_header[0] = 0x00u; // SEQ, ACK, DI and RP are set to 0 for link control
  775. if (m_hci_link_control_next_pkt == HCI_PKT_SYNC)
  776. {
  777. link_control_payload_len = HCI_PKT_SYNC_SIZE - PKT_HDR_SIZE;
  778. (void)uint16_encode(HCI_PKT_SYNC, m_tx_link_control_payload);
  779. }
  780. else if (m_hci_link_control_next_pkt == HCI_PKT_SYNC_RSP)
  781. {
  782. link_control_payload_len = HCI_PKT_SYNC_SIZE - PKT_HDR_SIZE;
  783. (void)uint16_encode(HCI_PKT_SYNC_RSP, m_tx_link_control_payload);
  784. }
  785. else if (m_hci_link_control_next_pkt == HCI_PKT_CONFIG)
  786. {
  787. link_control_payload_len = HCI_PKT_CONFIG_SIZE - PKT_HDR_SIZE;
  788. (void)uint16_encode(HCI_PKT_CONFIG, m_tx_link_control_payload);
  789. m_tx_link_control_payload[2] = HCI_CONFIG_FIELD;
  790. }
  791. else if (m_hci_link_control_next_pkt == HCI_PKT_CONFIG_RSP)
  792. {
  793. link_control_payload_len = HCI_PKT_CONFIG_SIZE - PKT_HDR_SIZE;
  794. (void)uint16_encode(HCI_PKT_CONFIG_RSP, m_tx_link_control_payload);
  795. m_tx_link_control_payload[2] = HCI_CONFIG_FIELD;
  796. }
  797. uint16_t type_and_length_fields = ((link_control_payload_len << 4u) | PKT_TYPE_LINK_CONTROL);
  798. (void)uint16_encode(type_and_length_fields, &(m_tx_link_control_header[1]));
  799. m_tx_link_control_header[3] = header_checksum_calculate(m_tx_link_control_header);
  800. ser_phy_hci_pkt_params_t pkt_header;
  801. ser_phy_hci_pkt_params_t pkt_payload;
  802. ser_phy_hci_pkt_params_t pkt_crc;
  803. pkt_header.p_buffer = m_tx_link_control_header;
  804. pkt_header.num_of_bytes = PKT_HDR_SIZE;
  805. pkt_payload.p_buffer = m_tx_link_control_payload;
  806. pkt_payload.num_of_bytes = link_control_payload_len;
  807. pkt_crc.p_buffer = NULL;
  808. pkt_crc.num_of_bytes = 0;
  809. DEBUG_EVT_SLIP_PACKET_TX(0);
  810. err_code = ser_phy_hci_slip_tx_pkt_send(&pkt_header, &pkt_payload, &pkt_crc);
  811. ser_phy_hci_assert(err_code == NRF_SUCCESS);
  812. return;
  813. }
  814. #endif /* HCI_LINK_CONTROL */
  815. static void hci_pkt_sent_upcall(void)
  816. {
  817. m_packet_seq_number++; // incoming ACK is valid, increment SEQ
  818. m_packet_seq_number &= 0x07u;
  819. m_p_tx_payload = NULL;
  820. packet_transmitted_callback();
  821. return;
  822. }
  823. static void hci_release_ack_buffer(hci_evt_t * p_event)
  824. {
  825. uint32_t err_code;
  826. err_code = ser_phy_hci_slip_rx_buf_free(
  827. p_event->evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer);
  828. ser_phy_hci_assert(err_code == NRF_SUCCESS);
  829. return;
  830. }
  831. static void hci_process_orphaned_ack(hci_evt_t * p_event)
  832. {
  833. hci_release_ack_buffer(p_event);
  834. return;
  835. }
  836. /* main tx fsm */
  837. static void hci_tx_fsm_event_process(hci_evt_t * p_event)
  838. {
  839. switch (m_hci_tx_fsm_state)
  840. {
  841. case HCI_TX_STATE_SEND:
  842. if ((p_event->evt_source == HCI_SER_PHY_EVT) &&
  843. (p_event->evt.ser_phy_evt.evt_type == HCI_SER_PHY_TX_REQUEST))
  844. {
  845. hci_pkt_send();
  846. hci_timeout_setup(0);
  847. m_tx_retry_count = MAX_RETRY_COUNT;
  848. m_hci_tx_fsm_state = HCI_TX_STATE_WAIT_FOR_FIRST_TX_END;
  849. }
  850. else if ((p_event->evt_source == HCI_SLIP_EVT) &&
  851. (p_event->evt.ser_phy_slip_evt.evt_type == SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED))
  852. {
  853. hci_process_orphaned_ack(p_event);
  854. }
  855. break;
  856. case HCI_TX_STATE_WAIT_FOR_FIRST_TX_END:
  857. if ((p_event->evt_source == HCI_SLIP_EVT) &&
  858. (p_event->evt.ser_phy_slip_evt.evt_type == SER_PHY_HCI_SLIP_EVT_PKT_SENT))
  859. {
  860. hci_timeout_setup(1);
  861. m_hci_tx_fsm_state = HCI_TX_STATE_WAIT_FOR_ACK;
  862. }
  863. else if ((p_event->evt_source == HCI_SLIP_EVT) &&
  864. (p_event->evt.ser_phy_slip_evt.evt_type == SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED))
  865. {
  866. hci_process_orphaned_ack(p_event);
  867. }
  868. break;
  869. case HCI_TX_STATE_WAIT_FOR_ACK_OR_TX_END:
  870. if ((p_event->evt_source == HCI_SLIP_EVT) &&
  871. (p_event->evt.ser_phy_slip_evt.evt_type == SER_PHY_HCI_SLIP_EVT_PKT_SENT))
  872. {
  873. hci_timeout_setup(1);
  874. m_hci_tx_fsm_state = HCI_TX_STATE_WAIT_FOR_ACK;
  875. }
  876. else if ((p_event->evt_source == HCI_SLIP_EVT) &&
  877. (p_event->evt.ser_phy_slip_evt.evt_type == SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED))
  878. {
  879. if (rx_ack_pkt_valid(p_event->evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer))
  880. {
  881. hci_timeout_setup(0);
  882. m_hci_tx_fsm_state = HCI_TX_STATE_WAIT_FOR_TX_END;
  883. }
  884. hci_release_ack_buffer(p_event);
  885. }
  886. break;
  887. case HCI_TX_STATE_WAIT_FOR_ACK:
  888. if ((p_event->evt_source == HCI_SLIP_EVT) &&
  889. (p_event->evt.ser_phy_slip_evt.evt_type == SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED))
  890. {
  891. if (rx_ack_pkt_valid(p_event->evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer))
  892. {
  893. hci_timeout_setup(0);
  894. hci_pkt_sent_upcall();
  895. m_hci_tx_fsm_state = HCI_TX_STATE_SEND;
  896. }
  897. hci_release_ack_buffer(p_event);
  898. }
  899. else if (p_event->evt_source == HCI_TIMER_EVT)
  900. {
  901. m_tx_retry_count--;
  902. // m_tx_retx_counter++; // global retransmissions counter
  903. if (m_tx_retry_count)
  904. {
  905. NRF_LOG_DEBUG("Timeout, no ACK. Retrying tx packet.");
  906. hci_pkt_send();
  907. DEBUG_HCI_RETX(0);
  908. m_hci_tx_fsm_state = HCI_TX_STATE_WAIT_FOR_ACK_OR_TX_END;
  909. }
  910. else
  911. {
  912. error_callback();
  913. m_hci_tx_fsm_state = HCI_TX_STATE_SEND;
  914. NRF_LOG_WARNING("Timeout, no ACK. Dropping.");
  915. }
  916. }
  917. break;
  918. case HCI_TX_STATE_WAIT_FOR_TX_END:
  919. if ((p_event->evt_source == HCI_SLIP_EVT) &&
  920. (p_event->evt.ser_phy_slip_evt.evt_type == SER_PHY_HCI_SLIP_EVT_PKT_SENT))
  921. {
  922. hci_pkt_sent_upcall();
  923. m_hci_tx_fsm_state = HCI_TX_STATE_SEND;
  924. }
  925. else if ((p_event->evt_source == HCI_SLIP_EVT) &&
  926. (p_event->evt.ser_phy_slip_evt.evt_type == SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED))
  927. {
  928. hci_process_orphaned_ack(p_event);
  929. }
  930. break;
  931. #ifdef HCI_LINK_CONTROL
  932. case HCI_TX_STATE_DISABLE:
  933. /* This case should not happen if HCI is in ACTIVE mode */
  934. if (m_hci_mode == HCI_MODE_ACTIVE)
  935. {
  936. ser_phy_hci_assert(false);
  937. }
  938. break;
  939. #endif /* HCI_LINK_CONTROL */
  940. default:
  941. ser_phy_hci_assert(false);
  942. break;
  943. }
  944. }
  945. static void hci_mem_request(hci_evt_t * p_event)
  946. {
  947. m_buffer_reqested_flag = true;
  948. m_p_rx_packet = p_event->evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer;
  949. m_rx_packet_length = p_event->evt.ser_phy_slip_evt.evt_params.received_pkt.num_of_bytes;
  950. ser_phy_hci_assert(m_rx_packet_length > PKT_HDR_SIZE + PKT_CRC_SIZE);
  951. memory_request_callback(m_rx_packet_length - PKT_HDR_SIZE - PKT_CRC_SIZE);
  952. return;
  953. }
  954. static void hci_inc_ack()
  955. {
  956. m_packet_ack_number++;
  957. m_packet_ack_number &= 0x07u;
  958. }
  959. static void hci_rx_fsm_event_process(hci_evt_t * p_event)
  960. {
  961. switch (m_hci_rx_fsm_state)
  962. {
  963. case HCI_RX_STATE_RECEIVE:
  964. if ((p_event->evt_source == HCI_SLIP_EVT) &&
  965. (p_event->evt.ser_phy_slip_evt.evt_type == SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED))
  966. {
  967. /* type and crc and check sum are validated by slip handler */
  968. uint8_t rx_seq_number = packet_seq_nmbr_extract(
  969. p_event->evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer);
  970. if (packet_ack_get() == rx_seq_number)
  971. {
  972. hci_mem_request(p_event);
  973. m_hci_rx_fsm_state = HCI_RX_STATE_WAIT_FOR_MEM;
  974. }
  975. else
  976. {
  977. // m_rx_drop_counter++;
  978. m_hci_rx_fsm_state = HCI_RX_STATE_WAIT_FOR_SLIP_NACK_END;
  979. (void) ser_phy_hci_slip_rx_buf_free(m_p_rx_packet); // and drop a packet
  980. ack_transmit(); // send NACK with valid ACK
  981. }
  982. }
  983. break;
  984. case HCI_RX_STATE_WAIT_FOR_MEM:
  985. if ((p_event->evt_source == HCI_SER_PHY_EVT) &&
  986. (p_event->evt.ser_phy_evt.evt_type == HCI_SER_PHY_RX_BUF_GRANTED))
  987. {
  988. if (m_p_rx_buffer)
  989. {
  990. memcpy(m_p_rx_buffer,
  991. m_p_rx_packet + PKT_HDR_SIZE,
  992. m_rx_packet_length - PKT_HDR_SIZE - PKT_CRC_SIZE);
  993. (void) ser_phy_hci_slip_rx_buf_free(m_p_rx_packet);
  994. }
  995. m_hci_rx_fsm_state = HCI_RX_STATE_WAIT_FOR_SLIP_ACK_END;
  996. hci_inc_ack(); // SEQ was valid for good packet, we will send incremented SEQ as ACK
  997. ack_transmit();
  998. }
  999. break;
  1000. case HCI_RX_STATE_WAIT_FOR_SLIP_ACK_END:
  1001. if ((p_event->evt_source == HCI_SLIP_EVT) &&
  1002. (p_event->evt.ser_phy_slip_evt.evt_type == SER_PHY_HCI_SLIP_EVT_ACK_SENT))
  1003. {
  1004. if (m_p_rx_buffer)
  1005. {
  1006. packet_received_callback(m_p_rx_buffer,
  1007. m_rx_packet_length - PKT_HDR_SIZE - PKT_CRC_SIZE);
  1008. }
  1009. else
  1010. {
  1011. packet_dropped_callback();
  1012. }
  1013. m_hci_rx_fsm_state = HCI_RX_STATE_RECEIVE;
  1014. }
  1015. else if ((p_event->evt_source == HCI_SLIP_EVT) &&
  1016. (p_event->evt.ser_phy_slip_evt.evt_type == SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED))
  1017. {
  1018. (void) ser_phy_hci_slip_rx_buf_free(p_event->evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer);
  1019. }
  1020. break;
  1021. case HCI_RX_STATE_WAIT_FOR_SLIP_NACK_END:
  1022. if ((p_event->evt_source == HCI_SLIP_EVT) &&
  1023. (p_event->evt.ser_phy_slip_evt.evt_type == SER_PHY_HCI_SLIP_EVT_ACK_SENT))
  1024. {
  1025. m_hci_rx_fsm_state = HCI_RX_STATE_RECEIVE;
  1026. }
  1027. else
  1028. {
  1029. (void) ser_phy_hci_slip_rx_buf_free(p_event->evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer);
  1030. }
  1031. break;
  1032. #ifdef HCI_LINK_CONTROL
  1033. case HCI_RX_STATE_DISABLE:
  1034. if (m_hci_mode == HCI_MODE_ACTIVE)
  1035. {
  1036. ser_phy_hci_assert(false);
  1037. }
  1038. break;
  1039. #endif /* HCI_LINK_CONTROL */
  1040. default:
  1041. ser_phy_hci_assert(false);
  1042. break;
  1043. }
  1044. }
  1045. /* this function might be entered only via hci_tx_event_handler */
  1046. static void hci_tx_fsm(void)
  1047. {
  1048. hci_evt_t event;
  1049. uint32_t err_code = NRF_SUCCESS;
  1050. while (err_code == NRF_SUCCESS)
  1051. {
  1052. CRITICAL_REGION_ENTER();
  1053. err_code = nrf_queue_pop(&m_tx_evt_queue, &event);
  1054. if (err_code != NRF_SUCCESS)
  1055. {
  1056. m_tx_fsm_idle_flag = true;
  1057. }
  1058. CRITICAL_REGION_EXIT();
  1059. if (err_code == NRF_SUCCESS)
  1060. {
  1061. hci_tx_fsm_event_process(&event); /* this is the only entry to the TX_FSM */
  1062. }
  1063. }
  1064. return;
  1065. }
  1066. /* this function might be entered only via hci_rx_event_handler */
  1067. static void hci_rx_fsm(void)
  1068. {
  1069. hci_evt_t event;
  1070. uint32_t err_code = NRF_SUCCESS;
  1071. while (err_code == NRF_SUCCESS)
  1072. {
  1073. CRITICAL_REGION_ENTER();
  1074. err_code = nrf_queue_pop(&m_rx_evt_queue, &event);
  1075. if (err_code != NRF_SUCCESS)
  1076. {
  1077. m_rx_fsm_idle_flag = true;
  1078. }
  1079. CRITICAL_REGION_EXIT();
  1080. if (err_code == NRF_SUCCESS)
  1081. {
  1082. hci_rx_fsm_event_process(&event); /* this is the only entry to the RX_FSM */
  1083. }
  1084. }
  1085. return;
  1086. }
  1087. /* something might have been queued by API with disabled 'PHY-interrupts' */
  1088. static void hci_tx_reschedule()
  1089. {
  1090. bool tx_exec_flag = false;
  1091. uint32_t tx_queue_length;
  1092. CRITICAL_REGION_ENTER();
  1093. tx_queue_length = nrf_queue_utilization_get(&m_tx_evt_queue);
  1094. #ifndef HCI_LINK_CONTROL
  1095. if (m_tx_fsm_idle_flag && m_hci_global_enable_flag && tx_queue_length)
  1096. #else
  1097. if (m_tx_fsm_idle_flag && m_hci_global_enable_flag && tx_queue_length && (m_hci_mode == HCI_MODE_ACTIVE))
  1098. #endif /* HCI_LINK_CONTROL */
  1099. {
  1100. tx_exec_flag = true; // FSM should be activated
  1101. m_tx_fsm_idle_flag = false; // FSM will be busy from now on till the queue is exhausted
  1102. }
  1103. CRITICAL_REGION_EXIT();
  1104. if (tx_exec_flag)
  1105. {
  1106. hci_tx_fsm();
  1107. }
  1108. return;
  1109. }
  1110. /* entry to TX state machine, might be called asynchronously from different contexts */
  1111. /* Puts event into the TX event queue and execute if FSM was idle */
  1112. static void hci_tx_event_handler(hci_evt_t * p_event)
  1113. {
  1114. bool tx_exec_flag = false;
  1115. uint32_t err_code;
  1116. CRITICAL_REGION_ENTER();
  1117. err_code = nrf_queue_push(&m_tx_evt_queue, p_event);
  1118. ser_phy_hci_assert(err_code == NRF_SUCCESS);
  1119. // CRITICAL_REGION_ENTER();
  1120. /* only one process can acquire tx_exec_flag */
  1121. if (m_tx_fsm_idle_flag && m_hci_global_enable_flag)
  1122. {
  1123. tx_exec_flag = true; // FSM should be activated
  1124. m_tx_fsm_idle_flag = false; // FSM will be busy from now on till the queue is exhausted
  1125. }
  1126. CRITICAL_REGION_EXIT();
  1127. if (tx_exec_flag)
  1128. {
  1129. hci_tx_fsm();
  1130. }
  1131. return;
  1132. }
  1133. /* Something might have been queued by API with disabled 'PHY-interrupts' */
  1134. static void hci_rx_reschedule()
  1135. {
  1136. bool rx_exec_flag = false;
  1137. uint32_t rx_queue_length;
  1138. CRITICAL_REGION_ENTER();
  1139. rx_queue_length = nrf_queue_utilization_get(&m_rx_evt_queue);
  1140. #ifndef HCI_LINK_CONTROL
  1141. if (m_rx_fsm_idle_flag && m_hci_global_enable_flag && rx_queue_length)
  1142. #else
  1143. if (m_rx_fsm_idle_flag && m_hci_global_enable_flag && rx_queue_length && (m_hci_mode == HCI_MODE_ACTIVE))
  1144. #endif /* HCI_LINK_CONTROL */
  1145. {
  1146. rx_exec_flag = true; // FSM should be activated
  1147. m_rx_fsm_idle_flag = false; // FSM will be busy from now on till the queue is exhausted
  1148. }
  1149. CRITICAL_REGION_EXIT();
  1150. if (rx_exec_flag)
  1151. {
  1152. hci_rx_fsm();
  1153. }
  1154. }
  1155. /* Entry to RX state machine, might be called asynchronously from different contexts */
  1156. /* Puts event into the RX event queue and execute if FSM was idle */
  1157. static void hci_rx_event_handler(hci_evt_t * p_event)
  1158. {
  1159. bool rx_exec_flag = false;
  1160. uint32_t err_code;
  1161. CRITICAL_REGION_ENTER();
  1162. err_code = nrf_queue_push(&m_rx_evt_queue, p_event);
  1163. ser_phy_hci_assert(err_code == NRF_SUCCESS);
  1164. /* only one process can acquire rx_exec_flag */
  1165. // CRITICAL_REGION_ENTER();
  1166. if (m_rx_fsm_idle_flag && m_hci_global_enable_flag)
  1167. {
  1168. rx_exec_flag = true; // FSM should be activated
  1169. m_rx_fsm_idle_flag = false; // FSM will be busy from now on till the queue is exhausted
  1170. }
  1171. CRITICAL_REGION_EXIT();
  1172. if (rx_exec_flag)
  1173. {
  1174. hci_rx_fsm();
  1175. }
  1176. return;
  1177. }
  1178. #ifdef HCI_LINK_CONTROL
  1179. /* Link control event handler - used only for Link Control packets */
  1180. /* This handler will be called only in 2 cases:
  1181. - when SER_PHY_HCI_SLIP_EVT_PKT_RECEIVED event is received
  1182. - when HCI_TIMER_EVT event is reveived */
  1183. static void hci_link_control_event_handler(hci_evt_t * p_event)
  1184. {
  1185. uint16_t pkt_type = HCI_LINK_CONTROL_PKT_INVALID;
  1186. switch (p_event->evt_source)
  1187. {
  1188. case HCI_SLIP_EVT:
  1189. pkt_type = link_control_packet_decode(
  1190. p_event->evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer,
  1191. p_event->evt.ser_phy_slip_evt.evt_params.received_pkt.num_of_bytes);
  1192. /* Perform HCI mode transition if needed */
  1193. CRITICAL_REGION_ENTER();
  1194. switch (pkt_type)
  1195. {
  1196. case HCI_PKT_SYNC:
  1197. m_hci_link_control_next_pkt = HCI_PKT_SYNC_RSP;
  1198. /* Restart HCI communication if it was in ACTIVE mode */
  1199. if (m_hci_mode == HCI_MODE_ACTIVE)
  1200. {
  1201. m_hci_mode = HCI_MODE_UNINITIALIZED;
  1202. m_packet_ack_number = INITIAL_ACK_NUMBER_EXPECTED;
  1203. m_packet_seq_number = INITIAL_SEQ_NUMBER;
  1204. m_hci_tx_fsm_state = HCI_TX_STATE_DISABLE;
  1205. m_hci_rx_fsm_state = HCI_RX_STATE_DISABLE;
  1206. m_hci_other_side_active = false;
  1207. }
  1208. NRF_LOG_DEBUG("Link control. Sync received, sending Sync Response.");
  1209. hci_link_control_pkt_send();
  1210. hci_timeout_setup(HCI_LINK_CONTROL_TIMEOUT); // Need to trigger transmitting SYNC messages
  1211. break;
  1212. case HCI_PKT_SYNC_RSP:
  1213. if (m_hci_mode == HCI_MODE_UNINITIALIZED)
  1214. {
  1215. m_hci_mode = HCI_MODE_INITIALIZED;
  1216. m_hci_link_control_next_pkt = HCI_PKT_CONFIG;
  1217. m_cfg_sent = false;
  1218. }
  1219. NRF_LOG_DEBUG("Link control. Sync Resposnse recieved.");
  1220. break;
  1221. case HCI_PKT_CONFIG:
  1222. if (m_hci_mode != HCI_MODE_UNINITIALIZED)
  1223. {
  1224. if (m_cfg_sent)
  1225. {
  1226. m_hci_link_control_next_pkt = HCI_PKT_CONFIG_RSP;
  1227. m_hci_other_side_active = true;
  1228. }
  1229. else
  1230. {
  1231. m_hci_link_control_next_pkt = HCI_PKT_CONFIG;
  1232. }
  1233. hci_link_control_pkt_send();
  1234. m_cfg_sent = true;
  1235. }
  1236. break;
  1237. case HCI_PKT_CONFIG_RSP:
  1238. if (m_hci_mode == HCI_MODE_INITIALIZED)
  1239. {
  1240. m_hci_mode = HCI_MODE_ACTIVE;
  1241. m_hci_tx_fsm_state = HCI_TX_STATE_SEND;
  1242. m_hci_rx_fsm_state = HCI_RX_STATE_RECEIVE;
  1243. }
  1244. break;
  1245. }
  1246. CRITICAL_REGION_EXIT();
  1247. (void) ser_phy_hci_slip_rx_buf_free(
  1248. p_event->evt.ser_phy_slip_evt.evt_params.received_pkt.p_buffer);
  1249. /* Kick the state machine so it can start process BLE packets */
  1250. if ((m_hci_mode == HCI_MODE_ACTIVE) && m_hci_other_side_active)
  1251. {
  1252. hci_tx_reschedule();
  1253. hci_rx_reschedule();
  1254. }
  1255. break;
  1256. case HCI_TIMER_EVT:
  1257. /* Send one of the Link Control packets if in Unintialized or Initialized state */
  1258. CRITICAL_REGION_ENTER();
  1259. switch (m_hci_mode)
  1260. {
  1261. case HCI_MODE_UNINITIALIZED:
  1262. //send packet
  1263. m_hci_link_control_next_pkt = HCI_PKT_SYNC;
  1264. hci_link_control_pkt_send();
  1265. hci_timeout_setup(HCI_LINK_CONTROL_TIMEOUT);
  1266. break;
  1267. case HCI_MODE_INITIALIZED:
  1268. m_hci_link_control_next_pkt = HCI_PKT_CONFIG;
  1269. hci_link_control_pkt_send();
  1270. m_cfg_sent = true;
  1271. hci_timeout_setup(HCI_LINK_CONTROL_TIMEOUT);
  1272. break;
  1273. case HCI_MODE_ACTIVE:
  1274. case HCI_MODE_DISABLE:
  1275. default:
  1276. // No implementation needed
  1277. break;
  1278. }
  1279. CRITICAL_REGION_EXIT();
  1280. break;
  1281. case HCI_SER_PHY_EVT:
  1282. default:
  1283. // No implementation needed
  1284. break;
  1285. }
  1286. }
  1287. #endif /* HCI_LINK_CONTROL */
  1288. /* ser_phy API function */
  1289. void ser_phy_interrupts_enable(void)
  1290. {
  1291. bool pending_timer_callback_flag = false;
  1292. CRITICAL_REGION_ENTER();
  1293. m_hci_timer_enabled_flag = true;
  1294. if (m_hci_timout_pending_flag)
  1295. {
  1296. m_hci_timout_pending_flag = false;
  1297. pending_timer_callback_flag = true;
  1298. }
  1299. CRITICAL_REGION_EXIT();
  1300. // this is a workaround - scheduled SER_PHY EVENTS
  1301. m_hci_global_enable_flag = true;
  1302. hci_tx_reschedule();
  1303. hci_rx_reschedule();
  1304. if (pending_timer_callback_flag)
  1305. {
  1306. hci_signal_timeout_event();
  1307. }
  1308. return;
  1309. }
  1310. /* ser_phy API function */
  1311. void ser_phy_interrupts_disable(void)
  1312. {
  1313. CRITICAL_REGION_ENTER();
  1314. m_hci_timer_enabled_flag = false;
  1315. // transport calls PHY API with ser_phy_interrupts_disabled
  1316. m_hci_global_enable_flag = false;
  1317. CRITICAL_REGION_EXIT();
  1318. }
  1319. /* ser_phy API function */
  1320. uint32_t ser_phy_rx_buf_set(uint8_t * p_buffer)
  1321. {
  1322. uint32_t status = NRF_SUCCESS;
  1323. hci_evt_t event;
  1324. if (m_buffer_reqested_flag)
  1325. {
  1326. m_buffer_reqested_flag = false;
  1327. m_p_rx_buffer = p_buffer;
  1328. event.evt_source = HCI_SER_PHY_EVT;
  1329. event.evt.ser_phy_evt.evt_type = HCI_SER_PHY_RX_BUF_GRANTED;
  1330. hci_rx_event_handler(&event);
  1331. }
  1332. else
  1333. {
  1334. status = NRF_ERROR_BUSY;
  1335. }
  1336. return status;
  1337. }
  1338. /* ser_phy API function */
  1339. uint32_t ser_phy_tx_pkt_send(const uint8_t * p_buffer, uint16_t num_of_bytes)
  1340. {
  1341. NRF_LOG_DEBUG("TX request (%d bytes)", num_of_bytes);
  1342. uint32_t status = NRF_SUCCESS;
  1343. hci_evt_t event;
  1344. if ( p_buffer == NULL || num_of_bytes == 0)
  1345. {
  1346. return NRF_ERROR_NULL;
  1347. }
  1348. if ( m_p_tx_payload == NULL)
  1349. {
  1350. m_tx_payload_length = num_of_bytes;
  1351. m_p_tx_payload = (uint8_t *)p_buffer;
  1352. DEBUG_EVT_TX_REQ(0);
  1353. event.evt_source = HCI_SER_PHY_EVT;
  1354. event.evt.ser_phy_evt.evt_type = HCI_SER_PHY_TX_REQUEST;
  1355. hci_tx_event_handler(&event);
  1356. }
  1357. else
  1358. {
  1359. status = NRF_ERROR_BUSY;
  1360. }
  1361. return status;
  1362. }
  1363. static uint32_t hci_timer_reset(void)
  1364. {
  1365. #ifdef HCI_APP_TIMER
  1366. ret_code_t err_code = app_timer_stop(m_app_timer_id);
  1367. if (err_code != NRF_SUCCESS)
  1368. {
  1369. return NRF_ERROR_INTERNAL;
  1370. }
  1371. err_code = app_timer_start(m_app_timer_id, RETRANSMISSION_TIMEOUT_IN_TICKS, NULL);
  1372. if (err_code != NRF_SUCCESS)
  1373. {
  1374. return NRF_ERROR_INTERNAL;
  1375. }
  1376. #else
  1377. // Configure TIMER for compare[1] event
  1378. HCI_TIMER->PRESCALER = 9;
  1379. HCI_TIMER->MODE = TIMER_MODE_MODE_Timer;
  1380. HCI_TIMER->BITMODE = TIMER_BITMODE_BITMODE_16Bit;
  1381. // Clear TIMER
  1382. HCI_TIMER->TASKS_CLEAR = 1;
  1383. HCI_TIMER->TASKS_STOP = 1;
  1384. // Enable interrupt
  1385. HCI_TIMER->INTENCLR = 0xFFFFFFFF;
  1386. HCI_TIMER->INTENSET = TIMER_INTENSET_COMPARE1_Enabled << TIMER_INTENSET_COMPARE1_Pos;
  1387. NVIC_ClearPendingIRQ(HCI_TIMER_IRQn);
  1388. NVIC_SetPriority(HCI_TIMER_IRQn, APP_IRQ_PRIORITY_HIGH);
  1389. NVIC_EnableIRQ(HCI_TIMER_IRQn);
  1390. #endif
  1391. return NRF_SUCCESS;
  1392. }
  1393. static uint32_t hci_timer_init(void)
  1394. {
  1395. #ifdef HCI_APP_TIMER
  1396. uint32_t err_code = NRF_SUCCESS;
  1397. err_code = app_timer_create(&m_app_timer_id, APP_TIMER_MODE_REPEATED, hci_timeout_handler);
  1398. if (err_code != NRF_SUCCESS)
  1399. {
  1400. return NRF_ERROR_INTERNAL;
  1401. }
  1402. #endif
  1403. return hci_timer_reset();
  1404. }
  1405. void ser_phy_hci_reset(void)
  1406. {
  1407. m_p_tx_payload = NULL;
  1408. m_p_rx_buffer = NULL;
  1409. nrf_queue_reset(&m_tx_evt_queue);
  1410. nrf_queue_reset(&m_rx_evt_queue);
  1411. (void)hci_timer_reset();
  1412. m_packet_ack_number = INITIAL_ACK_NUMBER_EXPECTED;
  1413. m_packet_seq_number = INITIAL_SEQ_NUMBER;
  1414. #ifndef HCI_LINK_CONTROL
  1415. m_hci_tx_fsm_state = HCI_TX_STATE_SEND;
  1416. m_hci_rx_fsm_state = HCI_RX_STATE_RECEIVE;
  1417. #else
  1418. m_hci_tx_fsm_state = HCI_TX_STATE_DISABLE;
  1419. m_hci_rx_fsm_state = HCI_RX_STATE_DISABLE;
  1420. hci_timeout_setup(HCI_LINK_CONTROL_TIMEOUT);// Trigger sending SYNC messages
  1421. m_hci_link_control_next_pkt = HCI_PKT_SYNC;
  1422. m_hci_mode = HCI_MODE_UNINITIALIZED;
  1423. m_hci_other_side_active = false;
  1424. m_rx_fsm_idle_flag = true;
  1425. m_hci_global_enable_flag = true;
  1426. #endif /*HCI_LINK_CONTROL*/
  1427. }
  1428. /* ser_phy API function */
  1429. uint32_t ser_phy_open(ser_phy_events_handler_t events_handler)
  1430. {
  1431. uint32_t err_code;
  1432. if ((m_hci_tx_fsm_state != HCI_TX_STATE_DISABLE) || (m_hci_rx_fsm_state != HCI_RX_STATE_DISABLE))
  1433. {
  1434. return NRF_ERROR_INVALID_STATE;
  1435. }
  1436. if (events_handler == NULL)
  1437. {
  1438. return NRF_ERROR_NULL;
  1439. }
  1440. m_ser_phy_callback = events_handler;
  1441. err_code = hci_timer_init();
  1442. if (err_code != NRF_SUCCESS)
  1443. {
  1444. return NRF_ERROR_INTERNAL;
  1445. }
  1446. err_code = ser_phy_hci_slip_open(hci_slip_event_handler);
  1447. if (err_code != NRF_SUCCESS)
  1448. {
  1449. return err_code;
  1450. }
  1451. if (err_code == NRF_SUCCESS)
  1452. {
  1453. ser_phy_hci_reset();
  1454. }
  1455. return err_code;
  1456. }
  1457. static uint32_t hci_timer_close(void)
  1458. {
  1459. uint32_t err_code = NRF_SUCCESS;
  1460. #ifdef HCI_APP_TIMER
  1461. err_code = app_timer_stop(m_app_timer_id);
  1462. if (err_code != NRF_SUCCESS)
  1463. {
  1464. return NRF_ERROR_INTERNAL;
  1465. }
  1466. #endif
  1467. return err_code;
  1468. }
  1469. /* ser_phy API function */
  1470. void ser_phy_close(void)
  1471. {
  1472. m_ser_phy_callback = NULL;
  1473. ser_phy_hci_slip_close();
  1474. m_hci_tx_fsm_state = HCI_TX_STATE_DISABLE;
  1475. m_hci_rx_fsm_state = HCI_RX_STATE_DISABLE;
  1476. #ifdef HCI_LINK_CONTROL
  1477. m_hci_mode = HCI_MODE_DISABLE;
  1478. #endif /* HCI_LINK_CONTROL */
  1479. uint32_t err_code = hci_timer_close();
  1480. ser_phy_hci_assert(err_code == NRF_SUCCESS);
  1481. }