Lines Matching refs:ctrlr

41 nvme_ctrlr_get_ready_to_in_ms(struct nvme_ctrlr *ctrlr)  in nvme_ctrlr_get_ready_to_in_ms()  argument
48 cap.raw = nvme_reg_mmio_read_8(ctrlr, cap.raw); in nvme_ctrlr_get_ready_to_in_ms()
56 static int nvme_ctrlr_create_qpair(struct nvme_ctrlr *ctrlr, in nvme_ctrlr_create_qpair() argument
62 ret = nvme_admin_create_ioq(ctrlr, qpair, NVME_IO_COMPLETION_QUEUE); in nvme_ctrlr_create_qpair()
70 ret = nvme_admin_create_ioq(ctrlr, qpair, NVME_IO_SUBMISSION_QUEUE); in nvme_ctrlr_create_qpair()
75 nvme_admin_delete_ioq(ctrlr, qpair, NVME_IO_COMPLETION_QUEUE); in nvme_ctrlr_create_qpair()
87 static int nvme_ctrlr_delete_qpair(struct nvme_ctrlr *ctrlr, in nvme_ctrlr_delete_qpair() argument
93 ret = nvme_admin_delete_ioq(ctrlr, qpair, NVME_IO_SUBMISSION_QUEUE); in nvme_ctrlr_delete_qpair()
101 ret = nvme_admin_delete_ioq(ctrlr, qpair, NVME_IO_COMPLETION_QUEUE); in nvme_ctrlr_delete_qpair()
115 nvme_ctrlr_construct_intel_support_log_page_list(struct nvme_ctrlr *ctrlr, in nvme_ctrlr_construct_intel_support_log_page_list() argument
119 if (ctrlr->cdata.vid != NVME_PCI_VID_INTEL || in nvme_ctrlr_construct_intel_support_log_page_list()
123 ctrlr->log_page_supported[NVME_INTEL_LOG_PAGE_DIR] = true; in nvme_ctrlr_construct_intel_support_log_page_list()
126 (ctrlr->quirks & NVME_INTEL_QUIRK_READ_LATENCY)) in nvme_ctrlr_construct_intel_support_log_page_list()
127 ctrlr->log_page_supported[NVME_INTEL_LOG_READ_CMD_LATENCY] = true; in nvme_ctrlr_construct_intel_support_log_page_list()
130 (ctrlr->quirks & NVME_INTEL_QUIRK_WRITE_LATENCY)) in nvme_ctrlr_construct_intel_support_log_page_list()
131 ctrlr->log_page_supported[NVME_INTEL_LOG_WRITE_CMD_LATENCY] = true; in nvme_ctrlr_construct_intel_support_log_page_list()
134 ctrlr->log_page_supported[NVME_INTEL_LOG_TEMPERATURE] = true; in nvme_ctrlr_construct_intel_support_log_page_list()
137 ctrlr->log_page_supported[NVME_INTEL_LOG_SMART] = true; in nvme_ctrlr_construct_intel_support_log_page_list()
140 ctrlr->log_page_supported[NVME_INTEL_MARKETING_DESCRIPTION] = true; in nvme_ctrlr_construct_intel_support_log_page_list()
146 static int nvme_ctrlr_set_intel_support_log_pages(struct nvme_ctrlr *ctrlr) in nvme_ctrlr_set_intel_support_log_pages() argument
157 ret = nvme_admin_get_log_page(ctrlr, NVME_INTEL_LOG_PAGE_DIR, in nvme_ctrlr_set_intel_support_log_pages()
164 nvme_ctrlr_construct_intel_support_log_page_list(ctrlr, in nvme_ctrlr_set_intel_support_log_pages()
175 static void nvme_ctrlr_set_supported_log_pages(struct nvme_ctrlr *ctrlr) in nvme_ctrlr_set_supported_log_pages() argument
178 memset(ctrlr->log_page_supported, 0, sizeof(ctrlr->log_page_supported)); in nvme_ctrlr_set_supported_log_pages()
181 ctrlr->log_page_supported[NVME_LOG_ERROR] = true; in nvme_ctrlr_set_supported_log_pages()
182 ctrlr->log_page_supported[NVME_LOG_HEALTH_INFORMATION] = true; in nvme_ctrlr_set_supported_log_pages()
183 ctrlr->log_page_supported[NVME_LOG_FIRMWARE_SLOT] = true; in nvme_ctrlr_set_supported_log_pages()
185 if (ctrlr->cdata.lpa.celp) in nvme_ctrlr_set_supported_log_pages()
186 ctrlr->log_page_supported[NVME_LOG_COMMAND_EFFECTS_LOG] = true; in nvme_ctrlr_set_supported_log_pages()
188 if (ctrlr->cdata.vid == NVME_PCI_VID_INTEL) in nvme_ctrlr_set_supported_log_pages()
189 nvme_ctrlr_set_intel_support_log_pages(ctrlr); in nvme_ctrlr_set_supported_log_pages()
195 static void nvme_ctrlr_set_intel_supported_features(struct nvme_ctrlr *ctrlr) in nvme_ctrlr_set_intel_supported_features() argument
197 bool *supported_feature = ctrlr->feature_supported; in nvme_ctrlr_set_intel_supported_features()
211 static void nvme_ctrlr_set_supported_features(struct nvme_ctrlr *ctrlr) in nvme_ctrlr_set_supported_features() argument
213 bool *supported_feature = ctrlr->feature_supported; in nvme_ctrlr_set_supported_features()
215 memset(ctrlr->feature_supported, 0, sizeof(ctrlr->feature_supported)); in nvme_ctrlr_set_supported_features()
229 if (ctrlr->cdata.vwc.present) in nvme_ctrlr_set_supported_features()
231 if (ctrlr->cdata.apsta.supported) in nvme_ctrlr_set_supported_features()
234 if (ctrlr->cdata.hmpre) in nvme_ctrlr_set_supported_features()
236 if (ctrlr->cdata.vid == NVME_PCI_VID_INTEL) in nvme_ctrlr_set_supported_features()
237 nvme_ctrlr_set_intel_supported_features(ctrlr); in nvme_ctrlr_set_supported_features()
243 static int nvme_ctrlr_init_io_qpairs(struct nvme_ctrlr *ctrlr) in nvme_ctrlr_init_io_qpairs() argument
249 if (ctrlr->ioq != NULL) in nvme_ctrlr_init_io_qpairs()
263 cap.raw = nvme_reg_mmio_read_8(ctrlr, cap.raw); in nvme_ctrlr_init_io_qpairs()
264 ctrlr->io_qpairs_max_entries = in nvme_ctrlr_init_io_qpairs()
267 ctrlr->ioq = calloc(ctrlr->io_queues, sizeof(struct nvme_qpair)); in nvme_ctrlr_init_io_qpairs()
268 if (!ctrlr->ioq) in nvme_ctrlr_init_io_qpairs()
272 for (i = 0; i < ctrlr->io_queues; i++) { in nvme_ctrlr_init_io_qpairs()
273 qpair = &ctrlr->ioq[i]; in nvme_ctrlr_init_io_qpairs()
275 TAILQ_INSERT_TAIL(&ctrlr->free_io_qpairs, qpair, tailq); in nvme_ctrlr_init_io_qpairs()
284 static void nvme_ctrlr_shutdown(struct nvme_ctrlr *ctrlr) in nvme_ctrlr_shutdown() argument
290 cc.raw = nvme_reg_mmio_read_4(ctrlr, cc.raw); in nvme_ctrlr_shutdown()
292 nvme_reg_mmio_write_4(ctrlr, cc.raw, cc.raw); in nvme_ctrlr_shutdown()
294 csts.raw = nvme_reg_mmio_read_4(ctrlr, csts.raw); in nvme_ctrlr_shutdown()
303 csts.raw = nvme_reg_mmio_read_4(ctrlr, csts.raw); in nvme_ctrlr_shutdown()
316 static int nvme_ctrlr_enable(struct nvme_ctrlr *ctrlr) in nvme_ctrlr_enable() argument
322 cc.raw = nvme_reg_mmio_read_4(ctrlr, cc.raw); in nvme_ctrlr_enable()
329 nvme_reg_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr); in nvme_ctrlr_enable()
330 nvme_reg_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr); in nvme_ctrlr_enable()
334 aqa.bits.acqs = ctrlr->adminq.entries - 1; in nvme_ctrlr_enable()
335 aqa.bits.asqs = ctrlr->adminq.entries - 1; in nvme_ctrlr_enable()
336 nvme_reg_mmio_write_4(ctrlr, aqa.raw, aqa.raw); in nvme_ctrlr_enable()
347 cap.raw = nvme_reg_mmio_read_8(ctrlr, cap.raw); in nvme_ctrlr_enable()
349 switch (ctrlr->opts.arb_mechanism) { in nvme_ctrlr_enable()
364 cc.bits.ams = ctrlr->opts.arb_mechanism; in nvme_ctrlr_enable()
366 nvme_reg_mmio_write_4(ctrlr, cc.raw, cc.raw); in nvme_ctrlr_enable()
374 static inline void nvme_ctrlr_disable(struct nvme_ctrlr *ctrlr) in nvme_ctrlr_disable() argument
378 cc.raw = nvme_reg_mmio_read_4(ctrlr, cc.raw); in nvme_ctrlr_disable()
381 nvme_reg_mmio_write_4(ctrlr, cc.raw, cc.raw); in nvme_ctrlr_disable()
387 static inline int nvme_ctrlr_enabled(struct nvme_ctrlr *ctrlr) in nvme_ctrlr_enabled() argument
391 cc.raw = nvme_reg_mmio_read_4(ctrlr, cc.raw); in nvme_ctrlr_enabled()
399 static inline int nvme_ctrlr_ready(struct nvme_ctrlr *ctrlr) in nvme_ctrlr_ready() argument
403 csts.raw = nvme_reg_mmio_read_4(ctrlr, csts.raw); in nvme_ctrlr_ready()
411 static void nvme_ctrlr_set_state(struct nvme_ctrlr *ctrlr, in nvme_ctrlr_set_state() argument
415 ctrlr->state = state; in nvme_ctrlr_set_state()
417 ctrlr->state_timeout_ms = NVME_TIMEOUT_INFINITE; in nvme_ctrlr_set_state()
419 ctrlr->state_timeout_ms = nvme_time_msec() + timeout_in_ms; in nvme_ctrlr_set_state()
425 static int nvme_ctrlr_identify(struct nvme_ctrlr *ctrlr) in nvme_ctrlr_identify() argument
429 ret = nvme_admin_identify_ctrlr(ctrlr, &ctrlr->cdata); in nvme_ctrlr_identify()
439 if (ctrlr->cdata.mdts > 0) in nvme_ctrlr_identify()
440 ctrlr->max_xfer_size = nvme_min(ctrlr->max_xfer_size, in nvme_ctrlr_identify()
441 ctrlr->min_page_size in nvme_ctrlr_identify()
442 * (1 << (ctrlr->cdata.mdts))); in nvme_ctrlr_identify()
449 static int nvme_ctrlr_get_max_io_qpairs(struct nvme_ctrlr *ctrlr) in nvme_ctrlr_get_max_io_qpairs() argument
454 ret = nvme_admin_get_feature(ctrlr, NVME_FEAT_CURRENT, in nvme_ctrlr_get_max_io_qpairs()
470 ctrlr->max_io_queues = nvme_min(sq_allocated, cq_allocated); in nvme_ctrlr_get_max_io_qpairs()
478 static int nvme_ctrlr_set_num_qpairs(struct nvme_ctrlr *ctrlr) in nvme_ctrlr_set_num_qpairs() argument
484 ret = nvme_ctrlr_get_max_io_qpairs(ctrlr); in nvme_ctrlr_set_num_qpairs()
496 num_queues = ((ctrlr->opts.io_queues - 1) << 16) | in nvme_ctrlr_set_num_qpairs()
497 (ctrlr->opts.io_queues - 1); in nvme_ctrlr_set_num_qpairs()
504 ret = nvme_admin_set_feature(ctrlr, false, NVME_FEAT_NUMBER_OF_QUEUES, in nvme_ctrlr_set_num_qpairs()
518 ctrlr->io_queues = nvme_min(sq_allocated, cq_allocated); in nvme_ctrlr_set_num_qpairs()
524 ctrlr->io_queues = nvme_min(ctrlr->io_queues, ctrlr->opts.io_queues); in nvme_ctrlr_set_num_qpairs()
529 static void nvme_ctrlr_destruct_namespaces(struct nvme_ctrlr *ctrlr) in nvme_ctrlr_destruct_namespaces() argument
532 if (ctrlr->ns) { in nvme_ctrlr_destruct_namespaces()
533 free(ctrlr->ns); in nvme_ctrlr_destruct_namespaces()
534 ctrlr->ns = NULL; in nvme_ctrlr_destruct_namespaces()
535 ctrlr->nr_ns = 0; in nvme_ctrlr_destruct_namespaces()
538 if (ctrlr->nsdata) { in nvme_ctrlr_destruct_namespaces()
539 nvme_free(ctrlr->nsdata); in nvme_ctrlr_destruct_namespaces()
540 ctrlr->nsdata = NULL; in nvme_ctrlr_destruct_namespaces()
544 static int nvme_ctrlr_construct_namespaces(struct nvme_ctrlr *ctrlr) in nvme_ctrlr_construct_namespaces() argument
546 unsigned int i, nr_ns = ctrlr->cdata.nn; in nvme_ctrlr_construct_namespaces()
553 if (nr_ns != ctrlr->nr_ns) { in nvme_ctrlr_construct_namespaces()
555 nvme_ctrlr_destruct_namespaces(ctrlr); in nvme_ctrlr_construct_namespaces()
557 ctrlr->ns = calloc(nr_ns, sizeof(struct nvme_ns)); in nvme_ctrlr_construct_namespaces()
558 if (!ctrlr->ns) in nvme_ctrlr_construct_namespaces()
562 ctrlr->nsdata = nvme_calloc(nr_ns, sizeof(struct nvme_ns_data), in nvme_ctrlr_construct_namespaces()
564 if (!ctrlr->nsdata) in nvme_ctrlr_construct_namespaces()
567 ctrlr->nr_ns = nr_ns; in nvme_ctrlr_construct_namespaces()
572 ns = &ctrlr->ns[i]; in nvme_ctrlr_construct_namespaces()
573 if (nvme_ns_construct(ctrlr, ns, i + 1) != 0) in nvme_ctrlr_construct_namespaces()
580 nvme_ctrlr_destruct_namespaces(ctrlr); in nvme_ctrlr_construct_namespaces()
588 static int nvme_ctrlr_construct_and_submit_aer(struct nvme_ctrlr *ctrlr,
597 struct nvme_ctrlr *ctrlr = aer->ctrlr; in nvme_ctrlr_async_event_cb() local
608 if (ctrlr->aer_cb_fn != NULL) in nvme_ctrlr_async_event_cb()
609 ctrlr->aer_cb_fn(ctrlr->aer_cb_arg, cpl); in nvme_ctrlr_async_event_cb()
615 if (nvme_ctrlr_construct_and_submit_aer(ctrlr, aer)) in nvme_ctrlr_async_event_cb()
627 static int nvme_ctrlr_construct_and_submit_aer(struct nvme_ctrlr *ctrlr, in nvme_ctrlr_construct_and_submit_aer() argument
632 req = nvme_request_allocate_null(&ctrlr->adminq, in nvme_ctrlr_construct_and_submit_aer()
637 aer->ctrlr = ctrlr; in nvme_ctrlr_construct_and_submit_aer()
641 return nvme_qpair_submit_request(&ctrlr->adminq, req); in nvme_ctrlr_construct_and_submit_aer()
647 static int nvme_ctrlr_configure_aer(struct nvme_ctrlr *ctrlr) in nvme_ctrlr_configure_aer() argument
657 ret = nvme_admin_set_feature(ctrlr, false, in nvme_ctrlr_configure_aer()
666 ctrlr->num_aers = nvme_min(NVME_MAX_ASYNC_EVENTS, in nvme_ctrlr_configure_aer()
667 (ctrlr->cdata.aerl + 1)); in nvme_ctrlr_configure_aer()
669 for (i = 0; i < ctrlr->num_aers; i++) { in nvme_ctrlr_configure_aer()
670 aer = &ctrlr->aer[i]; in nvme_ctrlr_configure_aer()
671 if (nvme_ctrlr_construct_and_submit_aer(ctrlr, aer)) { in nvme_ctrlr_configure_aer()
683 static int nvme_ctrlr_start(struct nvme_ctrlr *ctrlr) in nvme_ctrlr_start() argument
686 nvme_qpair_reset(&ctrlr->adminq); in nvme_ctrlr_start()
687 nvme_qpair_enable(&ctrlr->adminq); in nvme_ctrlr_start()
689 if (nvme_ctrlr_identify(ctrlr) != 0) in nvme_ctrlr_start()
692 if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) in nvme_ctrlr_start()
695 if (nvme_ctrlr_init_io_qpairs(ctrlr)) in nvme_ctrlr_start()
698 if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) in nvme_ctrlr_start()
701 if (nvme_ctrlr_configure_aer(ctrlr) != 0) in nvme_ctrlr_start()
704 nvme_ctrlr_set_supported_log_pages(ctrlr); in nvme_ctrlr_start()
705 nvme_ctrlr_set_supported_features(ctrlr); in nvme_ctrlr_start()
707 if (ctrlr->cdata.sgls.supported) in nvme_ctrlr_start()
708 ctrlr->flags |= NVME_CTRLR_SGL_SUPPORTED; in nvme_ctrlr_start()
716 static void nvme_ctrlr_map_cmb(struct nvme_ctrlr *ctrlr) in nvme_ctrlr_map_cmb() argument
725 cmbsz.raw = nvme_reg_mmio_read_4(ctrlr, cmbsz.raw); in nvme_ctrlr_map_cmb()
726 cmbloc.raw = nvme_reg_mmio_read_4(ctrlr, cmbloc.raw); in nvme_ctrlr_map_cmb()
744 nvme_pcicfg_get_bar_addr_len(ctrlr->pci_dev, bir, &bar_phys_addr, in nvme_ctrlr_map_cmb()
753 ret = nvme_pcicfg_map_bar_write_combine(ctrlr->pci_dev, bir, &addr); in nvme_ctrlr_map_cmb()
757 ctrlr->cmb_bar_virt_addr = addr; in nvme_ctrlr_map_cmb()
758 ctrlr->cmb_bar_phys_addr = bar_phys_addr; in nvme_ctrlr_map_cmb()
759 ctrlr->cmb_size = size; in nvme_ctrlr_map_cmb()
760 ctrlr->cmb_current_offset = offset; in nvme_ctrlr_map_cmb()
763 ctrlr->opts.use_cmb_sqs = false; in nvme_ctrlr_map_cmb()
768 ctrlr->cmb_bar_virt_addr = NULL; in nvme_ctrlr_map_cmb()
769 ctrlr->opts.use_cmb_sqs = false; in nvme_ctrlr_map_cmb()
777 static int nvme_ctrlr_unmap_cmb(struct nvme_ctrlr *ctrlr) in nvme_ctrlr_unmap_cmb() argument
780 void *addr = ctrlr->cmb_bar_virt_addr; in nvme_ctrlr_unmap_cmb()
784 cmbloc.raw = nvme_reg_mmio_read_4(ctrlr, cmbloc.raw); in nvme_ctrlr_unmap_cmb()
785 ret = nvme_pcicfg_unmap_bar(ctrlr->pci_dev, cmbloc.bits.bir, in nvme_ctrlr_unmap_cmb()
794 static int nvme_ctrlr_map_bars(struct nvme_ctrlr *ctrlr) in nvme_ctrlr_map_bars() argument
799 ret = nvme_pcicfg_map_bar(ctrlr->pci_dev, 0, 0, &addr); in nvme_ctrlr_map_bars()
808 ctrlr->regs = (volatile struct nvme_registers *)addr; in nvme_ctrlr_map_bars()
809 nvme_ctrlr_map_cmb(ctrlr); in nvme_ctrlr_map_bars()
817 static int nvme_ctrlr_unmap_bars(struct nvme_ctrlr *ctrlr) in nvme_ctrlr_unmap_bars() argument
819 void *addr = (void *)ctrlr->regs; in nvme_ctrlr_unmap_bars()
822 ret = nvme_ctrlr_unmap_cmb(ctrlr); in nvme_ctrlr_unmap_bars()
829 ret = nvme_pcicfg_unmap_bar(ctrlr->pci_dev, 0, addr); in nvme_ctrlr_unmap_bars()
842 static void nvme_ctrlr_fail(struct nvme_ctrlr *ctrlr) in nvme_ctrlr_fail() argument
846 ctrlr->failed = true; in nvme_ctrlr_fail()
848 nvme_qpair_fail(&ctrlr->adminq); in nvme_ctrlr_fail()
849 if (ctrlr->ioq) in nvme_ctrlr_fail()
850 for (i = 0; i < ctrlr->io_queues; i++) in nvme_ctrlr_fail()
851 nvme_qpair_fail(&ctrlr->ioq[i]); in nvme_ctrlr_fail()
858 static int nvme_ctrlr_init(struct nvme_ctrlr *ctrlr) in nvme_ctrlr_init() argument
860 unsigned int ready_timeout_in_ms = nvme_ctrlr_get_ready_to_in_ms(ctrlr); in nvme_ctrlr_init()
866 switch (ctrlr->state) { in nvme_ctrlr_init()
872 if (nvme_ctrlr_enabled(ctrlr)) { in nvme_ctrlr_init()
876 if (!nvme_ctrlr_ready(ctrlr)) { in nvme_ctrlr_init()
878 nvme_ctrlr_set_state(ctrlr, in nvme_ctrlr_init()
888 nvme_ctrlr_disable(ctrlr); in nvme_ctrlr_init()
889 nvme_ctrlr_set_state(ctrlr, in nvme_ctrlr_init()
893 if (ctrlr->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) in nvme_ctrlr_init()
899 if (nvme_ctrlr_ready(ctrlr)) { in nvme_ctrlr_init()
904 nvme_ctrlr_set_state(ctrlr, in nvme_ctrlr_init()
914 ret = nvme_ctrlr_enable(ctrlr); in nvme_ctrlr_init()
918 nvme_ctrlr_set_state(ctrlr, in nvme_ctrlr_init()
925 if (nvme_ctrlr_ready(ctrlr)) { in nvme_ctrlr_init()
928 nvme_ctrlr_disable(ctrlr); in nvme_ctrlr_init()
929 nvme_ctrlr_set_state(ctrlr, in nvme_ctrlr_init()
939 if (!nvme_ctrlr_ready(ctrlr)) { in nvme_ctrlr_init()
942 ret = nvme_ctrlr_enable(ctrlr); in nvme_ctrlr_init()
946 nvme_ctrlr_set_state(ctrlr, in nvme_ctrlr_init()
955 if (nvme_ctrlr_ready(ctrlr)) { in nvme_ctrlr_init()
956 if (ctrlr->quirks & NVME_QUIRK_DELAY_AFTER_RDY) in nvme_ctrlr_init()
959 ret = nvme_ctrlr_start(ctrlr); in nvme_ctrlr_init()
963 nvme_ctrlr_set_state(ctrlr, in nvme_ctrlr_init()
971 nvme_panic("Unhandled ctrlr state %d\n", ctrlr->state); in nvme_ctrlr_init()
972 nvme_ctrlr_fail(ctrlr); in nvme_ctrlr_init()
976 if ((ctrlr->state_timeout_ms != NVME_TIMEOUT_INFINITE) && in nvme_ctrlr_init()
977 (nvme_time_msec() > ctrlr->state_timeout_ms)) { in nvme_ctrlr_init()
979 ctrlr->state); in nvme_ctrlr_init()
980 nvme_ctrlr_fail(ctrlr); in nvme_ctrlr_init()
990 static int nvme_ctrlr_reset(struct nvme_ctrlr *ctrlr) in nvme_ctrlr_reset() argument
995 if (ctrlr->resetting || ctrlr->failed) in nvme_ctrlr_reset()
1003 ctrlr->resetting = true; in nvme_ctrlr_reset()
1006 nvme_qpair_disable(&ctrlr->adminq); in nvme_ctrlr_reset()
1007 for (i = 0; i < ctrlr->io_queues; i++) in nvme_ctrlr_reset()
1008 nvme_qpair_disable(&ctrlr->ioq[i]); in nvme_ctrlr_reset()
1011 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, in nvme_ctrlr_reset()
1014 while (ctrlr->state != NVME_CTRLR_STATE_READY) { in nvme_ctrlr_reset()
1015 if (nvme_ctrlr_init(ctrlr) != 0) { in nvme_ctrlr_reset()
1017 nvme_ctrlr_fail(ctrlr); in nvme_ctrlr_reset()
1023 TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) { in nvme_ctrlr_reset()
1024 if (nvme_ctrlr_create_qpair(ctrlr, qpair) != 0) in nvme_ctrlr_reset()
1025 nvme_ctrlr_fail(ctrlr); in nvme_ctrlr_reset()
1029 ctrlr->resetting = false; in nvme_ctrlr_reset()
1031 return ctrlr->failed ? -1 : 0; in nvme_ctrlr_reset()
1037 static void nvme_ctrlr_set_opts(struct nvme_ctrlr *ctrlr, in nvme_ctrlr_set_opts() argument
1041 memcpy(&ctrlr->opts, opts, sizeof(struct nvme_ctrlr_opts)); in nvme_ctrlr_set_opts()
1043 memset(&ctrlr->opts, 0, sizeof(struct nvme_ctrlr_opts)); in nvme_ctrlr_set_opts()
1045 if (ctrlr->opts.io_queues == 0) in nvme_ctrlr_set_opts()
1046 ctrlr->opts.io_queues = DEFAULT_MAX_IO_QUEUES; in nvme_ctrlr_set_opts()
1048 if (ctrlr->opts.io_queues > NVME_MAX_IO_QUEUES) { in nvme_ctrlr_set_opts()
1050 ctrlr->opts.io_queues, NVME_MAX_IO_QUEUES); in nvme_ctrlr_set_opts()
1051 ctrlr->opts.io_queues = NVME_MAX_IO_QUEUES; in nvme_ctrlr_set_opts()
1062 struct nvme_ctrlr *ctrlr; in nvme_ctrlr_attach() local
1068 ctrlr = malloc(sizeof(struct nvme_ctrlr)); in nvme_ctrlr_attach()
1069 if (!ctrlr) { in nvme_ctrlr_attach()
1074 nvme_debug("New controller handle %p\n", ctrlr); in nvme_ctrlr_attach()
1077 memset(ctrlr, 0, sizeof(struct nvme_ctrlr)); in nvme_ctrlr_attach()
1078 ctrlr->pci_dev = pci_dev; in nvme_ctrlr_attach()
1079 ctrlr->resetting = false; in nvme_ctrlr_attach()
1080 ctrlr->failed = false; in nvme_ctrlr_attach()
1081 TAILQ_INIT(&ctrlr->free_io_qpairs); in nvme_ctrlr_attach()
1082 TAILQ_INIT(&ctrlr->active_io_qpairs); in nvme_ctrlr_attach()
1083 pthread_mutex_init(&ctrlr->lock, NULL); in nvme_ctrlr_attach()
1084 ctrlr->quirks = nvme_ctrlr_get_quirks(pci_dev); in nvme_ctrlr_attach()
1086 nvme_ctrlr_set_state(ctrlr, in nvme_ctrlr_attach()
1090 ret = nvme_ctrlr_map_bars(ctrlr); in nvme_ctrlr_attach()
1093 pthread_mutex_destroy(&ctrlr->lock); in nvme_ctrlr_attach()
1094 free(ctrlr); in nvme_ctrlr_attach()
1107 cap.raw = nvme_reg_mmio_read_8(ctrlr, cap.raw); in nvme_ctrlr_attach()
1108 ctrlr->doorbell_stride_u32 = 1 << cap.bits.dstrd; in nvme_ctrlr_attach()
1109 ctrlr->min_page_size = 1 << (12 + cap.bits.mpsmin); in nvme_ctrlr_attach()
1112 ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE; in nvme_ctrlr_attach()
1115 ret = nvme_qpair_construct(ctrlr, &ctrlr->adminq, 0, in nvme_ctrlr_attach()
1123 nvme_ctrlr_set_opts(ctrlr, opts); in nvme_ctrlr_attach()
1125 ret = nvme_ctrlr_init(ctrlr); in nvme_ctrlr_attach()
1128 } while (ctrlr->state != NVME_CTRLR_STATE_READY); in nvme_ctrlr_attach()
1130 return ctrlr; in nvme_ctrlr_attach()
1133 nvme_ctrlr_detach(ctrlr); in nvme_ctrlr_attach()
1141 void nvme_ctrlr_detach(struct nvme_ctrlr *ctrlr) in nvme_ctrlr_detach() argument
1146 while (!TAILQ_EMPTY(&ctrlr->active_io_qpairs)) { in nvme_ctrlr_detach()
1147 qpair = TAILQ_FIRST(&ctrlr->active_io_qpairs); in nvme_ctrlr_detach()
1151 nvme_ctrlr_shutdown(ctrlr); in nvme_ctrlr_detach()
1153 nvme_ctrlr_destruct_namespaces(ctrlr); in nvme_ctrlr_detach()
1154 if (ctrlr->ioq) { in nvme_ctrlr_detach()
1155 for (i = 0; i < ctrlr->io_queues; i++) in nvme_ctrlr_detach()
1156 nvme_qpair_destroy(&ctrlr->ioq[i]); in nvme_ctrlr_detach()
1157 free(ctrlr->ioq); in nvme_ctrlr_detach()
1160 nvme_qpair_destroy(&ctrlr->adminq); in nvme_ctrlr_detach()
1162 nvme_ctrlr_unmap_bars(ctrlr); in nvme_ctrlr_detach()
1164 pthread_mutex_destroy(&ctrlr->lock); in nvme_ctrlr_detach()
1165 free(ctrlr); in nvme_ctrlr_detach()
1171 int nvme_ctrlr_get_feature(struct nvme_ctrlr *ctrlr, in nvme_ctrlr_get_feature() argument
1178 pthread_mutex_lock(&ctrlr->lock); in nvme_ctrlr_get_feature()
1180 ret = nvme_admin_get_feature(ctrlr, sel, feature, cdw11, attributes); in nvme_ctrlr_get_feature()
1185 pthread_mutex_unlock(&ctrlr->lock); in nvme_ctrlr_get_feature()
1193 int nvme_ctrlr_set_feature(struct nvme_ctrlr *ctrlr, in nvme_ctrlr_set_feature() argument
1200 pthread_mutex_lock(&ctrlr->lock); in nvme_ctrlr_set_feature()
1202 ret = nvme_admin_set_feature(ctrlr, save, feature, in nvme_ctrlr_set_feature()
1208 pthread_mutex_unlock(&ctrlr->lock); in nvme_ctrlr_set_feature()
1216 int nvme_ctrlr_attach_ns(struct nvme_ctrlr *ctrlr, unsigned int nsid, in nvme_ctrlr_attach_ns() argument
1221 pthread_mutex_lock(&ctrlr->lock); in nvme_ctrlr_attach_ns()
1223 ret = nvme_admin_attach_ns(ctrlr, nsid, clist); in nvme_ctrlr_attach_ns()
1229 ret = nvme_ctrlr_reset(ctrlr); in nvme_ctrlr_attach_ns()
1234 pthread_mutex_unlock(&ctrlr->lock); in nvme_ctrlr_attach_ns()
1242 int nvme_ctrlr_detach_ns(struct nvme_ctrlr *ctrlr, unsigned int nsid, in nvme_ctrlr_detach_ns() argument
1247 pthread_mutex_lock(&ctrlr->lock); in nvme_ctrlr_detach_ns()
1249 ret = nvme_admin_detach_ns(ctrlr, nsid, clist); in nvme_ctrlr_detach_ns()
1255 ret = nvme_ctrlr_reset(ctrlr); in nvme_ctrlr_detach_ns()
1260 pthread_mutex_unlock(&ctrlr->lock); in nvme_ctrlr_detach_ns()
1268 unsigned int nvme_ctrlr_create_ns(struct nvme_ctrlr *ctrlr, in nvme_ctrlr_create_ns() argument
1274 pthread_mutex_lock(&ctrlr->lock); in nvme_ctrlr_create_ns()
1276 ret = nvme_admin_create_ns(ctrlr, nsdata, &nsid); in nvme_ctrlr_create_ns()
1282 pthread_mutex_unlock(&ctrlr->lock); in nvme_ctrlr_create_ns()
1290 int nvme_ctrlr_delete_ns(struct nvme_ctrlr *ctrlr, unsigned int nsid) in nvme_ctrlr_delete_ns() argument
1294 pthread_mutex_lock(&ctrlr->lock); in nvme_ctrlr_delete_ns()
1296 ret = nvme_admin_delete_ns(ctrlr, nsid); in nvme_ctrlr_delete_ns()
1302 ret = nvme_ctrlr_reset(ctrlr); in nvme_ctrlr_delete_ns()
1307 pthread_mutex_unlock(&ctrlr->lock); in nvme_ctrlr_delete_ns()
1315 int nvme_ctrlr_format_ns(struct nvme_ctrlr *ctrlr, unsigned int nsid, in nvme_ctrlr_format_ns() argument
1320 pthread_mutex_lock(&ctrlr->lock); in nvme_ctrlr_format_ns()
1322 ret = nvme_admin_format_nvm(ctrlr, nsid, format); in nvme_ctrlr_format_ns()
1331 ret = nvme_ctrlr_reset(ctrlr); in nvme_ctrlr_format_ns()
1336 pthread_mutex_unlock(&ctrlr->lock); in nvme_ctrlr_format_ns()
1344 int nvme_ctrlr_update_firmware(struct nvme_ctrlr *ctrlr, in nvme_ctrlr_update_firmware() argument
1357 pthread_mutex_lock(&ctrlr->lock); in nvme_ctrlr_update_firmware()
1362 transfer = nvme_min(size_remaining, ctrlr->min_page_size); in nvme_ctrlr_update_firmware()
1364 ret = nvme_admin_fw_image_dl(ctrlr, f, transfer, offset); in nvme_ctrlr_update_firmware()
1382 ret = nvme_admin_fw_commit(ctrlr, &fw_commit); in nvme_ctrlr_update_firmware()
1389 ret = nvme_ctrlr_reset(ctrlr); in nvme_ctrlr_update_firmware()
1394 pthread_mutex_unlock(&ctrlr->lock); in nvme_ctrlr_update_firmware()
1402 struct nvme_qpair *nvme_ioqp_get(struct nvme_ctrlr *ctrlr, in nvme_ioqp_get() argument
1410 cc.raw = nvme_reg_mmio_read_4(ctrlr, cc.raw); in nvme_ioqp_get()
1432 if (qd == 0 || qd > ctrlr->io_qpairs_max_entries) in nvme_ioqp_get()
1433 qd = ctrlr->io_qpairs_max_entries; in nvme_ioqp_get()
1442 pthread_mutex_lock(&ctrlr->lock); in nvme_ioqp_get()
1445 qpair = TAILQ_FIRST(&ctrlr->free_io_qpairs); in nvme_ioqp_get()
1453 ret = nvme_qpair_construct(ctrlr, qpair, qprio, qd, trackers); in nvme_ioqp_get()
1467 if (nvme_ctrlr_create_qpair(ctrlr, qpair) != 0) { in nvme_ioqp_get()
1474 TAILQ_REMOVE(&ctrlr->free_io_qpairs, qpair, tailq); in nvme_ioqp_get()
1475 TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq); in nvme_ioqp_get()
1478 pthread_mutex_unlock(&ctrlr->lock); in nvme_ioqp_get()
1488 struct nvme_ctrlr *ctrlr; in nvme_ioqp_release() local
1494 ctrlr = qpair->ctrlr; in nvme_ioqp_release()
1496 pthread_mutex_lock(&ctrlr->lock); in nvme_ioqp_release()
1499 ret = nvme_ctrlr_delete_qpair(ctrlr, qpair); in nvme_ioqp_release()
1503 TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq); in nvme_ioqp_release()
1504 TAILQ_INSERT_HEAD(&ctrlr->free_io_qpairs, qpair, tailq); in nvme_ioqp_release()
1507 pthread_mutex_unlock(&ctrlr->lock); in nvme_ioqp_release()