nrf_block_dev_qspi.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829
  1. /**
  2. * Copyright (c) 2016 - 2020, Nordic Semiconductor ASA
  3. *
  4. * All rights reserved.
  5. *
  6. * Redistribution and use in source and binary forms, with or without modification,
  7. * are permitted provided that the following conditions are met:
  8. *
  9. * 1. Redistributions of source code must retain the above copyright notice, this
  10. * list of conditions and the following disclaimer.
  11. *
  12. * 2. Redistributions in binary form, except as embedded into a Nordic
  13. * Semiconductor ASA integrated circuit in a product or a software update for
  14. * such product, must reproduce the above copyright notice, this list of
  15. * conditions and the following disclaimer in the documentation and/or other
  16. * materials provided with the distribution.
  17. *
  18. * 3. Neither the name of Nordic Semiconductor ASA nor the names of its
  19. * contributors may be used to endorse or promote products derived from this
  20. * software without specific prior written permission.
  21. *
  22. * 4. This software, with or without modification, must only be used with a
  23. * Nordic Semiconductor ASA integrated circuit.
  24. *
  25. * 5. Any software provided in binary form under this license must not be reverse
  26. * engineered, decompiled, modified and/or disassembled.
  27. *
  28. * THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
  29. * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  30. * OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
  31. * DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
  32. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  33. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
  34. * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  35. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  36. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
  37. * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  38. *
  39. */
  40. #include "sdk_common.h"
  41. #if NRF_MODULE_ENABLED(NRF_BLOCK_DEV_QSPI)
  42. #include "nrf_serial_flash_params.h"
  43. #include "nrf_block_dev_qspi.h"
  44. #include <inttypes.h>
  45. /**@file
  46. *
  47. * @ingroup nrf_block_dev_qspi
  48. * @{
  49. *
  50. * @brief This module implements block device API. It should be used as a reference block device.
  51. */
  52. #if NRF_BLOCK_DEV_QSPI_CONFIG_LOG_ENABLED
  53. #define NRF_LOG_LEVEL NRF_BLOCK_DEV_QSPI_CONFIG_LOG_LEVEL
  54. #define NRF_LOG_INFO_COLOR NRF_BLOCK_DEV_QSPI_CONFIG_INFO_COLOR
  55. #define NRF_LOG_DEBUG_COLOR NRF_BLOCK_DEV_QSPI_CONFIG_DEBUG_COLOR
  56. #else
  57. #define NRF_LOG_LEVEL 0
  58. #endif
  59. #include "nrf_log.h"
  60. #define QSPI_STD_CMD_WRSR 0x01 /**< Write status register command*/
  61. #define QSPI_STD_CMD_RSTEN 0x66 /**< Reset enable command*/
  62. #define QSPI_STD_CMD_RST 0x99 /**< Reset command*/
  63. #define QSPI_STD_CMD_READ_ID 0x9F /**< Read ID command*/
  64. #define BD_PAGE_PROGRAM_SIZE 256 /**< Page program size (minimum block size)*/
  65. #define BD_ERASE_UNIT_INVALID_ID 0xFFFFFFFF /**< Invalid erase unit number*/
  66. #define BD_ERASE_UNIT_ERASE_VAL 0xFFFFFFFF /**< Erased memory value*/
  67. /**
  68. * @brief Block to erase unit translation
  69. *
  70. * @param blk_id Block index
  71. * @param blk_size Block size
  72. * */
  73. #define BD_BLOCK_TO_ERASEUNIT(blk_id, blk_size) \
  74. ((blk_id) * (blk_size)) / (NRF_BLOCK_DEV_QSPI_ERASE_UNIT_SIZE)
  75. /**
  76. * @brief Blocks per erase unit
  77. *
  78. * @param blk_size Block size
  79. * */
  80. #define BD_BLOCKS_PER_ERASEUNIT(blk_size) \
  81. (NRF_BLOCK_DEV_QSPI_ERASE_UNIT_SIZE / (blk_size))
  82. static ret_code_t block_dev_qspi_eunit_write(nrf_block_dev_qspi_t const * p_qspi_dev,
  83. nrf_block_req_t * p_blk_left);
  84. static void block_dev_qspi_read_from_eunit(nrf_block_dev_qspi_t const * p_qspi_dev)
  85. {
  86. nrf_block_dev_qspi_work_t const * p_work = p_qspi_dev->p_work;
  87. /*In write-back mode data that we read might not be the same as in erase unit buffer*/
  88. uint32_t eunit_start = BD_BLOCK_TO_ERASEUNIT(p_work->req.blk_id,
  89. p_work->geometry.blk_size);
  90. uint32_t eunit_end = BD_BLOCK_TO_ERASEUNIT(p_work->req.blk_id + p_work->req.blk_count,
  91. p_work->geometry.blk_size);
  92. if ((eunit_start > p_work->erase_unit_idx) || (eunit_end < p_work->erase_unit_idx))
  93. {
  94. /*Do nothing. Read request doesn't hit current cached erase unit*/
  95. return;
  96. }
  97. /*Case 1: Copy data from start erase unit*/
  98. if (eunit_start == p_work->erase_unit_idx)
  99. {
  100. size_t blk = p_work->req.blk_id %
  101. BD_BLOCKS_PER_ERASEUNIT(p_work->geometry.blk_size);
  102. size_t cnt = BD_BLOCKS_PER_ERASEUNIT(p_work->geometry.blk_size) - blk;
  103. size_t off = p_work->geometry.blk_size * blk;
  104. if (cnt > p_work->req.blk_count)
  105. {
  106. cnt = p_work->req.blk_count;
  107. }
  108. memcpy(p_work->req.p_buff,
  109. p_work->p_erase_unit_buff + off,
  110. cnt * p_work->geometry.blk_size);
  111. return;
  112. }
  113. /*Case 2: Copy data from end erase unit*/
  114. if (eunit_end == p_work->erase_unit_idx)
  115. {
  116. size_t cnt = (p_work->req.blk_id + p_work->req.blk_count) %
  117. BD_BLOCKS_PER_ERASEUNIT(p_work->geometry.blk_size);
  118. size_t off = (p_work->erase_unit_idx * BD_BLOCKS_PER_ERASEUNIT(p_work->geometry.blk_size) -
  119. p_work->req.blk_id) * p_work->geometry.blk_size;
  120. if (cnt > p_work->req.blk_count)
  121. {
  122. cnt = p_work->req.blk_count;
  123. }
  124. memcpy((uint8_t *)p_work->req.p_buff + off,
  125. p_work->p_erase_unit_buff,
  126. cnt * p_work->geometry.blk_size);
  127. return;
  128. }
  129. /*Case 3: Copy data from eunit_start < p_work->erase_unit_idx < eunit_end*/
  130. size_t off = (p_work->erase_unit_idx * BD_BLOCKS_PER_ERASEUNIT(p_work->geometry.blk_size) -
  131. p_work->req.blk_id) * p_work->geometry.blk_size;
  132. memcpy((uint8_t *)p_work->req.p_buff + off,
  133. p_work->p_erase_unit_buff,
  134. NRF_BLOCK_DEV_QSPI_ERASE_UNIT_SIZE);
  135. }
  136. /**
  137. * @brief Active QSPI block device handle. Only one instance.
  138. * */
  139. static nrf_block_dev_qspi_t const * m_active_qspi_dev;
  140. static void qspi_handler(nrf_drv_qspi_evt_t event, void * p_context)
  141. {
  142. if (m_active_qspi_dev != p_context)
  143. {
  144. return;
  145. }
  146. nrf_block_dev_qspi_t const * p_qspi_dev = p_context;
  147. nrf_block_dev_qspi_work_t * p_work = p_qspi_dev->p_work;
  148. nrf_block_req_t * p_blk_left = &p_work->left_req;
  149. switch (p_work->state)
  150. {
  151. case NRF_BLOCK_DEV_QSPI_STATE_READ_EXEC:
  152. {
  153. if (p_work->writeback_mode)
  154. {
  155. block_dev_qspi_read_from_eunit(p_qspi_dev);
  156. }
  157. p_work->state = NRF_BLOCK_DEV_QSPI_STATE_IDLE;
  158. if (p_work->ev_handler)
  159. {
  160. const nrf_block_dev_event_t ev = {
  161. NRF_BLOCK_DEV_EVT_BLK_READ_DONE,
  162. NRF_BLOCK_DEV_RESULT_SUCCESS,
  163. &p_work->req,
  164. p_work->p_context
  165. };
  166. p_work->ev_handler(&p_qspi_dev->block_dev, &ev);
  167. }
  168. break;
  169. }
  170. case NRF_BLOCK_DEV_QSPI_STATE_EUNIT_LOAD:
  171. {
  172. ret_code_t ret;
  173. uint32_t erase_unit = BD_BLOCK_TO_ERASEUNIT(p_blk_left->blk_id,
  174. p_work->geometry.blk_size);
  175. UNUSED_VARIABLE(erase_unit);
  176. ASSERT(erase_unit == p_work->erase_unit_idx);
  177. /* Check if block is in erase unit buffer*/
  178. ret = block_dev_qspi_eunit_write(p_qspi_dev, p_blk_left);
  179. ASSERT(ret == NRF_SUCCESS);
  180. UNUSED_VARIABLE(ret);
  181. break;
  182. }
  183. case NRF_BLOCK_DEV_QSPI_STATE_WRITE_ERASE:
  184. case NRF_BLOCK_DEV_QSPI_STATE_WRITE_EXEC:
  185. {
  186. /*Clear last programmed block*/
  187. uint32_t block_to_program = __CLZ(__RBIT(p_work->erase_unit_dirty_blocks));
  188. if (p_work->state == NRF_BLOCK_DEV_QSPI_STATE_WRITE_EXEC)
  189. {
  190. p_work->erase_unit_dirty_blocks ^= 1u << block_to_program;
  191. }
  192. if (p_work->erase_unit_dirty_blocks == 0)
  193. {
  194. if (p_work->left_req.blk_count)
  195. {
  196. /*Load next erase unit*/
  197. ret_code_t ret;
  198. uint32_t eunit = BD_BLOCK_TO_ERASEUNIT(p_blk_left->blk_id,
  199. p_work->geometry.blk_size);
  200. p_work->erase_unit_idx = eunit;
  201. p_work->state = NRF_BLOCK_DEV_QSPI_STATE_EUNIT_LOAD;
  202. ret = nrf_drv_qspi_read(p_work->p_erase_unit_buff,
  203. NRF_BLOCK_DEV_QSPI_ERASE_UNIT_SIZE,
  204. p_work->erase_unit_idx *
  205. NRF_BLOCK_DEV_QSPI_ERASE_UNIT_SIZE);
  206. UNUSED_VARIABLE(ret);
  207. break;
  208. }
  209. /*All blocks are programmed. Call event handler if required.*/
  210. p_work->state = NRF_BLOCK_DEV_QSPI_STATE_IDLE;
  211. if (p_work->ev_handler && !p_work->cache_flushing)
  212. {
  213. const nrf_block_dev_event_t ev = {
  214. NRF_BLOCK_DEV_EVT_BLK_WRITE_DONE,
  215. NRF_BLOCK_DEV_RESULT_SUCCESS,
  216. &p_work->req,
  217. p_work->p_context
  218. };
  219. p_work->ev_handler(&p_qspi_dev->block_dev, &ev);
  220. }
  221. p_work->cache_flushing = false;
  222. break;
  223. }
  224. /*Get next block to program from program mask*/
  225. block_to_program = __CLZ(__RBIT(p_work->erase_unit_dirty_blocks));
  226. uint32_t dst_address = (p_work->erase_unit_idx * NRF_BLOCK_DEV_QSPI_ERASE_UNIT_SIZE) +
  227. (block_to_program * p_work->geometry.blk_size);
  228. const void * p_src_address = p_work->p_erase_unit_buff +
  229. block_to_program * p_work->geometry.blk_size;
  230. p_work->state = NRF_BLOCK_DEV_QSPI_STATE_WRITE_EXEC;
  231. ret_code_t ret = nrf_drv_qspi_write(p_src_address,
  232. p_work->geometry.blk_size,
  233. dst_address);
  234. UNUSED_VARIABLE(ret);
  235. break;
  236. }
  237. default:
  238. ASSERT(0);
  239. break;
  240. }
  241. }
  242. static void wait_for_idle(nrf_block_dev_qspi_t const * p_qspi_dev)
  243. {
  244. nrf_block_dev_qspi_work_t * p_work = p_qspi_dev->p_work;
  245. while (p_work->state != NRF_BLOCK_DEV_QSPI_STATE_IDLE)
  246. {
  247. __WFI();
  248. }
  249. }
  250. static ret_code_t block_dev_qspi_init(nrf_block_dev_t const * p_blk_dev,
  251. nrf_block_dev_ev_handler ev_handler,
  252. void const * p_context)
  253. {
  254. ASSERT(p_blk_dev);
  255. nrf_block_dev_qspi_t const * p_qspi_dev =
  256. CONTAINER_OF(p_blk_dev, nrf_block_dev_qspi_t, block_dev);
  257. nrf_block_dev_qspi_work_t * p_work = p_qspi_dev->p_work;
  258. nrf_drv_qspi_config_t const * p_qspi_cfg = &p_qspi_dev->qspi_bdev_config.qspi_config;
  259. ret_code_t ret = NRF_SUCCESS;
  260. NRF_LOG_INST_DEBUG(p_qspi_dev->p_log, "Init");
  261. if (p_qspi_dev->qspi_bdev_config.block_size % BD_PAGE_PROGRAM_SIZE)
  262. {
  263. /*Unsupported block size*/
  264. NRF_LOG_INST_ERROR(p_qspi_dev->p_log, "Unsupported block size because of program page size");
  265. return NRF_ERROR_NOT_SUPPORTED;
  266. }
  267. if (NRF_BLOCK_DEV_QSPI_ERASE_UNIT_SIZE % p_qspi_dev->qspi_bdev_config.block_size)
  268. {
  269. /*Unsupported block size*/
  270. NRF_LOG_INST_ERROR(p_qspi_dev->p_log, "Unsupported block size because of erase unit size");
  271. return NRF_ERROR_NOT_SUPPORTED;
  272. }
  273. if (m_active_qspi_dev)
  274. {
  275. /* QSPI instance is BUSY*/
  276. NRF_LOG_INST_ERROR(p_qspi_dev->p_log, "Cannot init because QSPI is busy");
  277. return NRF_ERROR_BUSY;
  278. }
  279. ret = nrf_drv_qspi_init(p_qspi_cfg, qspi_handler, (void *)p_blk_dev);
  280. if (ret != NRF_SUCCESS)
  281. {
  282. NRF_LOG_INST_ERROR(p_qspi_dev->p_log, "QSPI init error: %"PRIu32"", ret);
  283. return ret;
  284. }
  285. nrf_qspi_cinstr_conf_t cinstr_cfg = {
  286. .opcode = QSPI_STD_CMD_RSTEN,
  287. .length = NRF_QSPI_CINSTR_LEN_1B,
  288. .io2_level = true,
  289. .io3_level = true,
  290. .wipwait = true,
  291. .wren = true
  292. };
  293. /* Send reset enable */
  294. ret = nrf_drv_qspi_cinstr_xfer(&cinstr_cfg, NULL, NULL);
  295. if (ret != NRF_SUCCESS)
  296. {
  297. NRF_LOG_INST_ERROR(p_qspi_dev->p_log, "QSPI reset enable command error: %"PRIu32"", ret);
  298. return ret;
  299. }
  300. /* Send reset command */
  301. cinstr_cfg.opcode = QSPI_STD_CMD_RST;
  302. ret = nrf_drv_qspi_cinstr_xfer(&cinstr_cfg, NULL, NULL);
  303. if (ret != NRF_SUCCESS)
  304. {
  305. NRF_LOG_INST_ERROR(p_qspi_dev->p_log, "QSPI reset command error: %"PRIu32"", ret);
  306. return ret;
  307. }
  308. /* Get 3 byte identification value */
  309. uint8_t rdid_buf[3] = {0, 0, 0};
  310. cinstr_cfg.opcode = QSPI_STD_CMD_READ_ID;
  311. cinstr_cfg.length = NRF_QSPI_CINSTR_LEN_4B;
  312. ret = nrf_drv_qspi_cinstr_xfer(&cinstr_cfg, NULL, rdid_buf);
  313. if (ret != NRF_SUCCESS)
  314. {
  315. NRF_LOG_INST_ERROR(p_qspi_dev->p_log, "QSPI get 3 byte id error: %"PRIu32"", ret);
  316. return ret;
  317. }
  318. nrf_serial_flash_params_t const * serial_flash_id = nrf_serial_flash_params_get(rdid_buf);
  319. if (!serial_flash_id)
  320. {
  321. NRF_LOG_INST_ERROR(p_qspi_dev->p_log, "QSPI FLASH not supported");
  322. return NRF_ERROR_NOT_SUPPORTED;
  323. }
  324. if (serial_flash_id->erase_size != NRF_BLOCK_DEV_QSPI_ERASE_UNIT_SIZE)
  325. {
  326. NRF_LOG_INST_ERROR(p_qspi_dev->p_log, "QSPI FLASH erase unit size not supported");
  327. return NRF_ERROR_NOT_SUPPORTED;
  328. }
  329. /* Calculate block device geometry.... */
  330. uint32_t blk_size = p_qspi_dev->qspi_bdev_config.block_size;
  331. uint32_t blk_count = serial_flash_id->size / p_qspi_dev->qspi_bdev_config.block_size;
  332. if (!blk_count || (blk_count % BD_BLOCKS_PER_ERASEUNIT(blk_size)))
  333. {
  334. NRF_LOG_INST_ERROR(p_qspi_dev->p_log, "QSPI FLASH block size not supported");
  335. return NRF_ERROR_NOT_SUPPORTED;
  336. }
  337. p_work->geometry.blk_size = blk_size;
  338. p_work->geometry.blk_count = blk_count;
  339. p_work->p_context = p_context;
  340. p_work->ev_handler = ev_handler;
  341. p_work->state = NRF_BLOCK_DEV_QSPI_STATE_IDLE;
  342. p_work->erase_unit_idx = BD_ERASE_UNIT_INVALID_ID;
  343. p_work->writeback_mode = (p_qspi_dev->qspi_bdev_config.flags &
  344. NRF_BLOCK_DEV_QSPI_FLAG_CACHE_WRITEBACK) != 0;
  345. m_active_qspi_dev = p_qspi_dev;
  346. if (p_work->ev_handler)
  347. {
  348. /*Asynchronous operation (simulation)*/
  349. const nrf_block_dev_event_t ev = {
  350. NRF_BLOCK_DEV_EVT_INIT,
  351. NRF_BLOCK_DEV_RESULT_SUCCESS,
  352. NULL,
  353. p_work->p_context
  354. };
  355. p_work->ev_handler(p_blk_dev, &ev);
  356. }
  357. return NRF_SUCCESS;
  358. }
  359. static ret_code_t block_dev_qspi_uninit(nrf_block_dev_t const * p_blk_dev)
  360. {
  361. ASSERT(p_blk_dev);
  362. nrf_block_dev_qspi_t const * p_qspi_dev =
  363. CONTAINER_OF(p_blk_dev, nrf_block_dev_qspi_t, block_dev);
  364. nrf_block_dev_qspi_work_t * p_work = p_qspi_dev->p_work;
  365. NRF_LOG_INST_DEBUG(p_qspi_dev->p_log, "Uninit");
  366. if (m_active_qspi_dev != p_qspi_dev)
  367. {
  368. /* QSPI instance is BUSY*/
  369. return NRF_ERROR_BUSY;
  370. }
  371. if (p_work->state != NRF_BLOCK_DEV_QSPI_STATE_IDLE)
  372. {
  373. /* Previous asynchronous operation in progress*/
  374. NRF_LOG_INST_ERROR(p_qspi_dev->p_log, "Cannot uninit because QSPI is busy");
  375. return NRF_ERROR_BUSY;
  376. }
  377. if (p_work->ev_handler)
  378. {
  379. /*Asynchronous operation*/
  380. const nrf_block_dev_event_t ev = {
  381. NRF_BLOCK_DEV_EVT_UNINIT,
  382. NRF_BLOCK_DEV_RESULT_SUCCESS,
  383. NULL,
  384. p_work->p_context
  385. };
  386. p_work->ev_handler(p_blk_dev, &ev);
  387. }
  388. p_work->state = NRF_BLOCK_DEV_QSPI_STATE_DISABLED;
  389. nrf_drv_qspi_uninit();
  390. memset(p_work, 0, sizeof(nrf_block_dev_qspi_work_t));
  391. m_active_qspi_dev = NULL;
  392. return NRF_SUCCESS;
  393. }
  394. static ret_code_t block_dev_qspi_read_req(nrf_block_dev_t const * p_blk_dev,
  395. nrf_block_req_t const * p_blk)
  396. {
  397. ASSERT(p_blk_dev);
  398. ASSERT(p_blk);
  399. nrf_block_dev_qspi_t const * p_qspi_dev =
  400. CONTAINER_OF(p_blk_dev, nrf_block_dev_qspi_t, block_dev);
  401. nrf_block_dev_qspi_work_t * p_work = p_qspi_dev->p_work;
  402. ret_code_t ret = NRF_SUCCESS;
  403. NRF_LOG_INST_DEBUG(
  404. p_qspi_dev->p_log,
  405. "Read req from block %"PRIu32" size %"PRIu32"(x%"PRIu32") to %"PRIXPTR,
  406. p_blk->blk_id,
  407. p_blk->blk_count,
  408. p_blk_dev->p_ops->geometry(p_blk_dev)->blk_size,
  409. p_blk->p_buff);
  410. if ((p_blk->blk_id + p_blk->blk_count) > p_work->geometry.blk_count)
  411. {
  412. NRF_LOG_INST_ERROR(
  413. p_qspi_dev->p_log,
  414. "Out of range read req block %"PRIu32" count %"PRIu32" while max is %"PRIu32,
  415. p_blk->blk_id,
  416. p_blk->blk_count,
  417. p_blk_dev->p_ops->geometry(p_blk_dev)->blk_count);
  418. return NRF_ERROR_INVALID_ADDR;
  419. }
  420. if (m_active_qspi_dev != p_qspi_dev)
  421. {
  422. /* QSPI instance is BUSY*/
  423. NRF_LOG_INST_ERROR(p_qspi_dev->p_log, "Cannot read because QSPI is busy");
  424. return NRF_ERROR_BUSY;
  425. }
  426. if (p_work->state != NRF_BLOCK_DEV_QSPI_STATE_IDLE)
  427. {
  428. /* Previous asynchronous operation in progress*/
  429. NRF_LOG_INST_ERROR(p_qspi_dev->p_log, "Cannot read because of ongoing previous operation");
  430. return NRF_ERROR_BUSY;
  431. }
  432. p_work->left_req = *p_blk;
  433. p_work->req = *p_blk;
  434. nrf_block_req_t * p_blk_left = &p_work->left_req;
  435. p_work->state = NRF_BLOCK_DEV_QSPI_STATE_READ_EXEC;
  436. ret = nrf_drv_qspi_read(p_blk_left->p_buff,
  437. p_blk_left->blk_count * p_work->geometry.blk_size,
  438. p_blk_left->blk_id * p_work->geometry.blk_size);
  439. if (ret != NRF_SUCCESS)
  440. {
  441. NRF_LOG_INST_ERROR(p_qspi_dev->p_log, "QSPI read error: %"PRIu32"", ret);
  442. p_work->state = NRF_BLOCK_DEV_QSPI_STATE_IDLE;
  443. return ret;
  444. }
  445. p_blk_left->p_buff = NULL;
  446. p_blk_left->blk_count = 0;
  447. if (!p_work->ev_handler && (p_work->state != NRF_BLOCK_DEV_QSPI_STATE_IDLE))
  448. {
  449. /*Synchronous operation*/
  450. wait_for_idle(p_qspi_dev);
  451. }
  452. return ret;
  453. }
  454. static bool block_dev_qspi_update_eunit(nrf_block_dev_qspi_t const * p_qspi_dev,
  455. size_t off,
  456. const void * p_src,
  457. size_t len)
  458. {
  459. ASSERT((len % sizeof(uint32_t)) == 0)
  460. nrf_block_dev_qspi_work_t * p_work = p_qspi_dev->p_work;
  461. uint32_t * p_dst32 = (uint32_t *)(p_work->p_erase_unit_buff + off);
  462. const uint32_t * p_src32 = p_src;
  463. bool erase_required = false;
  464. len /= sizeof(uint32_t);
  465. /*Do normal copying until erase unit is not required*/
  466. do
  467. {
  468. if (*p_dst32 != *p_src32)
  469. {
  470. if (*p_dst32 != BD_ERASE_UNIT_ERASE_VAL)
  471. {
  472. erase_required = true;
  473. }
  474. /*Mark block as dirty*/
  475. p_work->erase_unit_dirty_blocks |= 1u << (off / p_work->geometry.blk_size);
  476. }
  477. *p_dst32++ = *p_src32++;
  478. off += sizeof(uint32_t);
  479. } while (--len);
  480. return erase_required;
  481. }
  482. static ret_code_t block_dev_qspi_write_start(nrf_block_dev_qspi_t const * p_qspi_dev)
  483. {
  484. nrf_block_dev_qspi_work_t * p_work = p_qspi_dev->p_work;
  485. if (!p_work->erase_required)
  486. {
  487. /*Get first block to program from program mask*/
  488. uint32_t block_to_program = __CLZ(__RBIT(p_work->erase_unit_dirty_blocks));
  489. uint32_t dst_address = (p_work->erase_unit_idx * NRF_BLOCK_DEV_QSPI_ERASE_UNIT_SIZE) +
  490. (block_to_program * p_work->geometry.blk_size);
  491. const void * p_src_address = p_work->p_erase_unit_buff +
  492. block_to_program * p_work->geometry.blk_size;
  493. p_work->state = NRF_BLOCK_DEV_QSPI_STATE_WRITE_EXEC;
  494. return nrf_drv_qspi_write(p_src_address,
  495. p_work->geometry.blk_size,
  496. dst_address);
  497. }
  498. /*Erase is required*/
  499. uint32_t address = (p_work->erase_unit_idx * NRF_BLOCK_DEV_QSPI_ERASE_UNIT_SIZE);
  500. p_work->state = NRF_BLOCK_DEV_QSPI_STATE_WRITE_ERASE;
  501. p_work->erase_required = false;
  502. return nrf_drv_qspi_erase(NRF_QSPI_ERASE_LEN_4KB, address);
  503. }
  504. static ret_code_t block_dev_qspi_eunit_write(nrf_block_dev_qspi_t const * p_qspi_dev,
  505. nrf_block_req_t * p_blk_left)
  506. {
  507. nrf_block_dev_qspi_work_t * p_work = p_qspi_dev->p_work;
  508. size_t blk = p_blk_left->blk_id %
  509. BD_BLOCKS_PER_ERASEUNIT(p_work->geometry.blk_size);
  510. size_t cnt = BD_BLOCKS_PER_ERASEUNIT(p_work->geometry.blk_size) - blk;
  511. size_t off = p_work->geometry.blk_size * blk;
  512. if (cnt > p_blk_left->blk_count)
  513. {
  514. cnt = p_blk_left->blk_count;
  515. }
  516. bool erase_required = block_dev_qspi_update_eunit(p_qspi_dev,
  517. off,
  518. p_blk_left->p_buff,
  519. cnt * p_work->geometry.blk_size);
  520. if (erase_required)
  521. {
  522. p_work->erase_required = true;
  523. }
  524. p_blk_left->blk_count -= cnt;
  525. p_blk_left->blk_id += cnt;
  526. p_blk_left->p_buff = (uint8_t *)p_blk_left->p_buff + cnt * p_work->geometry.blk_size;
  527. if (p_work->erase_required)
  528. {
  529. uint32_t blk_size = p_work->geometry.blk_size;
  530. p_work->erase_unit_dirty_blocks |= (1u << BD_BLOCKS_PER_ERASEUNIT(blk_size)) - 1;
  531. }
  532. if (p_work->erase_unit_dirty_blocks == 0 || p_work->writeback_mode)
  533. {
  534. /*No dirty blocks detected. Write end.*/
  535. if (p_work->ev_handler && p_blk_left->blk_count == 0)
  536. {
  537. const nrf_block_dev_event_t ev = {
  538. NRF_BLOCK_DEV_EVT_BLK_WRITE_DONE,
  539. NRF_BLOCK_DEV_RESULT_SUCCESS,
  540. &p_work->req,
  541. p_work->p_context
  542. };
  543. p_work->state = NRF_BLOCK_DEV_QSPI_STATE_IDLE;
  544. p_work->ev_handler(&p_qspi_dev->block_dev, &ev);
  545. return NRF_SUCCESS;
  546. }
  547. }
  548. return block_dev_qspi_write_start(p_qspi_dev);
  549. }
  550. static ret_code_t block_dev_qspi_write_req(nrf_block_dev_t const * p_blk_dev,
  551. nrf_block_req_t const * p_blk)
  552. {
  553. ASSERT(p_blk_dev);
  554. ASSERT(p_blk);
  555. nrf_block_dev_qspi_t const * p_qspi_dev =
  556. CONTAINER_OF(p_blk_dev, nrf_block_dev_qspi_t, block_dev);
  557. nrf_block_dev_qspi_work_t * p_work = p_qspi_dev->p_work;
  558. ret_code_t ret = NRF_SUCCESS;
  559. NRF_LOG_INST_DEBUG(
  560. p_qspi_dev->p_log,
  561. "Write req to block %"PRIu32" size %"PRIu32"(x%"PRIu32") from %"PRIXPTR,
  562. p_blk->blk_id,
  563. p_blk->blk_count,
  564. p_blk_dev->p_ops->geometry(p_blk_dev)->blk_size,
  565. p_blk->p_buff);
  566. if ((p_blk->blk_id + p_blk->blk_count) > p_work->geometry.blk_count)
  567. {
  568. NRF_LOG_INST_ERROR(
  569. p_qspi_dev->p_log,
  570. "Out of range write req block %"PRIu32" count %"PRIu32" while max is %"PRIu32,
  571. p_blk->blk_id,
  572. p_blk->blk_count,
  573. p_blk_dev->p_ops->geometry(p_blk_dev)->blk_count);
  574. return NRF_ERROR_INVALID_ADDR;
  575. }
  576. if (m_active_qspi_dev != p_qspi_dev)
  577. {
  578. /* QSPI instance is BUSY*/
  579. NRF_LOG_INST_ERROR(p_qspi_dev->p_log, "Cannot write because QSPI is busy");
  580. return NRF_ERROR_BUSY;
  581. }
  582. if (p_work->state != NRF_BLOCK_DEV_QSPI_STATE_IDLE)
  583. {
  584. /* Previous asynchronous operation in progress*/
  585. NRF_LOG_INST_ERROR(p_qspi_dev->p_log, "Cannot write because of ongoing previous operation");
  586. return NRF_ERROR_BUSY;
  587. }
  588. p_work->left_req = *p_blk;
  589. p_work->req = *p_blk;
  590. nrf_block_req_t * p_blk_left = &p_work->left_req;
  591. uint32_t erase_unit = BD_BLOCK_TO_ERASEUNIT(p_blk_left->blk_id,
  592. p_work->geometry.blk_size);
  593. /* Check if block is in erase unit buffer*/
  594. if (erase_unit == p_work->erase_unit_idx)
  595. {
  596. ret = block_dev_qspi_eunit_write(p_qspi_dev, p_blk_left);
  597. }
  598. else
  599. {
  600. if (p_work->writeback_mode)
  601. {
  602. ret = block_dev_qspi_write_start(p_qspi_dev);
  603. }
  604. else
  605. {
  606. p_work->erase_unit_idx = erase_unit;
  607. p_work->state = NRF_BLOCK_DEV_QSPI_STATE_EUNIT_LOAD;
  608. ret = nrf_drv_qspi_read(p_work->p_erase_unit_buff,
  609. NRF_BLOCK_DEV_QSPI_ERASE_UNIT_SIZE,
  610. erase_unit * NRF_BLOCK_DEV_QSPI_ERASE_UNIT_SIZE);
  611. }
  612. }
  613. if (ret != NRF_SUCCESS)
  614. {
  615. NRF_LOG_INST_ERROR(p_qspi_dev->p_log, "QSPI write error: %"PRIu32"", ret);
  616. p_work->state = NRF_BLOCK_DEV_QSPI_STATE_IDLE;
  617. return ret;
  618. }
  619. if (!p_work->ev_handler && (p_work->state != NRF_BLOCK_DEV_QSPI_STATE_IDLE))
  620. {
  621. /*Synchronous operation*/
  622. wait_for_idle(p_qspi_dev);
  623. }
  624. return ret;
  625. }
  626. static ret_code_t block_dev_qspi_ioctl(nrf_block_dev_t const * p_blk_dev,
  627. nrf_block_dev_ioctl_req_t req,
  628. void * p_data)
  629. {
  630. ASSERT(p_blk_dev);
  631. nrf_block_dev_qspi_t const * p_qspi_dev =
  632. CONTAINER_OF(p_blk_dev, nrf_block_dev_qspi_t, block_dev);
  633. nrf_block_dev_qspi_work_t * p_work = p_qspi_dev->p_work;
  634. switch (req)
  635. {
  636. case NRF_BLOCK_DEV_IOCTL_REQ_CACHE_FLUSH:
  637. {
  638. bool * p_flushing = p_data;
  639. NRF_LOG_INST_DEBUG(p_qspi_dev->p_log, "IOCtl: Cache flush");
  640. if (p_work->state != NRF_BLOCK_DEV_QSPI_STATE_IDLE)
  641. {
  642. return NRF_ERROR_BUSY;
  643. }
  644. if (!p_work->writeback_mode || p_work->erase_unit_dirty_blocks == 0)
  645. {
  646. if (p_flushing)
  647. {
  648. *p_flushing = false;
  649. }
  650. return NRF_SUCCESS;
  651. }
  652. ret_code_t ret = block_dev_qspi_write_start(p_qspi_dev);
  653. if (ret == NRF_SUCCESS)
  654. {
  655. if (p_flushing)
  656. {
  657. *p_flushing = true;
  658. }
  659. p_work->cache_flushing = true;
  660. }
  661. return ret;
  662. }
  663. case NRF_BLOCK_DEV_IOCTL_REQ_INFO_STRINGS:
  664. {
  665. if (p_data == NULL)
  666. {
  667. return NRF_ERROR_INVALID_PARAM;
  668. }
  669. nrf_block_dev_info_strings_t const * * pp_strings = p_data;
  670. *pp_strings = &p_qspi_dev->info_strings;
  671. return NRF_SUCCESS;
  672. }
  673. default:
  674. break;
  675. }
  676. return NRF_ERROR_NOT_SUPPORTED;
  677. }
  678. static nrf_block_dev_geometry_t const * block_dev_qspi_geometry(nrf_block_dev_t const * p_blk_dev)
  679. {
  680. ASSERT(p_blk_dev);
  681. nrf_block_dev_qspi_t const * p_qspi_dev =
  682. CONTAINER_OF(p_blk_dev, nrf_block_dev_qspi_t, block_dev);
  683. nrf_block_dev_qspi_work_t const * p_work = p_qspi_dev->p_work;
  684. return &p_work->geometry;
  685. }
  686. const nrf_block_dev_ops_t nrf_block_device_qspi_ops = {
  687. .init = block_dev_qspi_init,
  688. .uninit = block_dev_qspi_uninit,
  689. .read_req = block_dev_qspi_read_req,
  690. .write_req = block_dev_qspi_write_req,
  691. .ioctl = block_dev_qspi_ioctl,
  692. .geometry = block_dev_qspi_geometry,
  693. };
  694. /** @} */
  695. #endif // NRF_MODULE_ENABLED(NRF_BLOCK_DEV_QSPI)