xref: /haiku/src/add-ons/kernel/drivers/disk/nvme/libnvme/nvme.h (revision 04d1d2da0b27294f0f1e623071df310a0820d4b6)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *   Copyright (c) 2017, Western Digital Corporation or its affiliates.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /**
35  * @file
36  * NVMe driver public API
37  *
38  * @mainpage
39  *
40  * libnvme is a user space utility to provide control over NVMe,
41  * the host controller interface for drives based on PCI Express.
42  *
43  * \addtogroup libnvme
44  *  @{
45  */
46 
47 #ifndef __LIBNVME_H__
48 #define __LIBNVME_H__
49 
50 #ifdef __cplusplus
51 extern "C" {
52 #endif
53 
54 #include <libnvme/nvme_spec.h>
55 
56 #ifndef __HAIKU__
57 #include <pciaccess.h>
58 #endif
59 #include <stdbool.h>
60 #include <stddef.h>
61 #include <stdint.h>
62 #include <string.h>
63 
64 /**
65  * Log levels.
66  */
67 enum nvme_log_level {
68 
69 	/**
70 	 * Disable all log messages.
71 	 */
72 	NVME_LOG_NONE = 0,
73 
74 	/**
75 	 * System is unusable.
76 	 */
77 	NVME_LOG_EMERG,
78 
79 	/**
80 	 * Action must be taken immediately.
81 	 */
82 	NVME_LOG_ALERT,
83 
84 	/**
85 	 * Critical conditions.
86 	 */
87 	NVME_LOG_CRIT,
88 
89 	/**
90 	 * Error conditions.
91 	 */
92 	NVME_LOG_ERR,
93 
94 	/**
95 	 * Warning conditions.
96 	 */
97 	NVME_LOG_WARNING,
98 
99 	/**
100 	 * Normal but significant condition.
101 	 */
102 	NVME_LOG_NOTICE,
103 
104 	/**
105 	 * Informational messages.
106 	 */
107 	NVME_LOG_INFO,
108 
109 	/**
110 	 * Debug-level messages.
111 	 */
112 	NVME_LOG_DEBUG,
113 
114 };
115 
116 /**
117  * Log facilities.
118  */
119 enum nvme_log_facility {
120 
121 	/**
122 	 * Standard output log facility
123 	 */
124 	NVME_LOG_STDOUT = 0x00000001,
125 
126 	/**
127 	 * Regular file output log facility
128 	 */
129 	NVME_LOG_FILE = 0x00000002,
130 
131 	/**
132 	 * syslog service output log facility
133 	 */
134 	NVME_LOG_SYSLOG = 0x00000004,
135 
136 };
137 
138 /**
139  * @brief Initialize libnvme
140  *
141  * @param level	Library log level
142  * @param facility	Facility code
143  * @param path		File name for the NVME_LOG_FILE facility
144  *
145  * This function must always be called first before any other
146  * function provided by libnvme. The arguments allow setting the
147  * initial log level and log facility so that any problem during
148  * initialization can be caught.
149  *
150  * @return 0 on success and a negative error code on failure.
151  */
152 extern int nvme_lib_init(enum nvme_log_level level,
153 			 enum nvme_log_facility facility, const char *path);
154 
155 /**
156  * @brief Set the library log level
157  *
158  * @param level	Library log level
159  */
160 extern void nvme_set_log_level(enum nvme_log_level level);
161 
162 /**
163  * @brief Get the current log level
164  *
165  * @return The current library log level.
166  */
167 extern enum nvme_log_level nvme_get_log_level(void);
168 
169 /**
170  * @brief Change the library log facility
171  *
172  * @param facility	Facility code
173  * @param path		File name for the NVME_LOG_FILE facility
174  *
175  * Set th library log facility. On failure, the facility is
176  * always automatically set to stdout.
177  *
178  * @return 0 on success and a negative error code on failure.
179  */
180 extern int nvme_set_log_facility(enum nvme_log_facility facility,
181 				 const char *path);
182 
183 /**
184  * @brief Get the current library log facility.
185  *
186  * @return The current library log facility.
187  */
188 extern enum nvme_log_facility nvme_get_log_facility(void);
189 
190 /**
191  * @brief Opaque handle to a controller returned by nvme_ctrlr_open().
192  */
193 struct nvme_ctrlr;
194 
195 /**
196  * @brief Opaque handle to a namespace
197  */
198 struct nvme_ns;
199 
200 /**
201  * @brief Opaque handle to an I/O queue pair
202  */
203 struct nvme_qpair;
204 
205 /**
206  * @brief Capabilities register of a controller
207  */
208 struct nvme_register_data {
209 
210 	/**
211 	 * Maximum Queue Entries Supported indicates the maximum individual
212 	 * queue size that the controller supports. This is a 0’s based value,
213 	 * so 1 has to be added.
214 	 */
215 	unsigned int		mqes;
216 
217 };
218 
219 /**
220  * Length of the string for the serial number
221  */
222 #define NVME_SERIAL_NUMBER_LENGTH	NVME_SERIAL_NUMBER_CHARACTERS + 1
223 
224 /**
225  * Length of the string for the model number
226  */
227 #define NVME_MODEL_NUMBER_LENGTH	NVME_MODEL_NUMBER_CHARACTERS + 1
228 
229 /**
230  * @brief Controller information
231  */
232 struct nvme_ctrlr_stat {
233 
234 	/**
235 	 * PCI device vendor ID.
236 	 */
237 	unsigned short		vendor_id;
238 
239 	/**
240 	 * PCI device ID.
241 	 */
242 	unsigned short		device_id;
243 
244 	/**
245 	 * PCI device sub-vendor ID.
246 	 */
247 	unsigned short		subvendor_id;
248 
249 	/**
250 	 * PCI sub-device ID.
251 	 */
252 	unsigned short		subdevice_id;
253 
254 	/**
255 	 * PCI device class.
256 	 */
257 	unsigned int		device_class;
258 
259 	/**
260 	 * PCI device revision.
261 	 */
262 	unsigned char		revision;
263 
264 	/**
265 	 * PCI slot domain.
266 	 */
267 	unsigned int		domain;
268 
269 	/**
270 	 * PCI slot bus.
271 	 */
272 	unsigned int		bus;
273 
274 	/**
275 	 * PCI slot bus device number.
276 	 */
277 	unsigned int		dev;
278 
279 	/**
280 	 * PCI slot device function.
281 	 */
282 	unsigned int		func;
283 
284 	/**
285 	 * Serial number
286 	 */
287 	char			sn[NVME_SERIAL_NUMBER_LENGTH];
288 
289 	/**
290 	 * Model number
291 	 */
292 	char			mn[NVME_MODEL_NUMBER_LENGTH];
293 
294 	/**
295 	 * Maximum transfer size.
296 	 */
297 	size_t			max_xfer_size;
298 
299 	/**
300 	 * All the log pages supported.
301 	 */
302 	bool			log_pages[256];
303 
304 	/**
305 	 * All the features supported.
306 	 */
307 	bool			features[256];
308 
309 	/**
310 	 * Number of valid namespaces in the array of namespace IDs.
311 	 */
312 	unsigned int		nr_ns;
313 
314 	/**
315 	 * Array of valid namespace IDs of the controller.
316 	 * Namspeace IDs are integers between 1 and NVME_MAX_NS
317 	 */
318 	unsigned int		ns_ids[NVME_MAX_NS];
319 
320 	/**
321 	 * Maximum number of I/O queue pairs
322 	 */
323 	unsigned int		max_io_qpairs;
324 
325 	/**
326 	 * Number of I/O queue pairs allocated
327 	 */
328 	unsigned int		io_qpairs;
329 
330 	/**
331 	 * Number of I/O queue pairs enabled
332 	 */
333 	unsigned int		enabled_io_qpairs;
334 
335 	/**
336 	 * IO qpairs maximum entries
337 	 */
338 	unsigned int		max_qd;
339 };
340 
341 /**
342  * @brief NVMe controller options
343  *
344  * Allow the user to request non-default options.
345  */
346 struct nvme_ctrlr_opts {
347 
348 	/**
349 	 * Number of I/O queues to initialize.
350 	 * (default: all possible I/O queues)
351 	 */
352 	unsigned int		io_queues;
353 
354 	/**
355 	 * Enable submission queue in controller memory buffer
356 	 * (default: false)
357 	 */
358 	bool 			use_cmb_sqs;
359 
360 	/**
361 	 * Type of arbitration mechanism.
362 	 * (default: round-robin == NVME_CC_AMS_RR)
363 	 */
364 	enum nvme_cc_ams	arb_mechanism;
365 
366 };
367 
368 /**
369  * @brief Namespace command support flags
370  */
371 enum nvme_ns_flags {
372 
373 	/**
374 	 * The deallocate command is supported.
375 	 */
376 	NVME_NS_DEALLOCATE_SUPPORTED	= 0x1,
377 
378 	/**
379 	 * The flush command is supported.
380 	 */
381 	NVME_NS_FLUSH_SUPPORTED		= 0x2,
382 
383 	/**
384 	 * The reservation command is supported.
385 	 */
386 	NVME_NS_RESERVATION_SUPPORTED	= 0x4,
387 
388 	/**
389 	 * The write zeroes command is supported.
390 	 */
391 	NVME_NS_WRITE_ZEROES_SUPPORTED	= 0x8,
392 
393 	/**
394 	 * The end-to-end data protection is supported.
395 	 */
396 	NVME_NS_DPS_PI_SUPPORTED	= 0x10,
397 
398 	/**
399 	 * The extended lba format is supported, metadata is transferred as
400 	 * a contiguous part of the logical block that it is associated with.
401 	 */
402 	NVME_NS_EXTENDED_LBA_SUPPORTED	= 0x20,
403 
404 };
405 
406 /**
407  * @brief Namespace information
408  */
409 struct nvme_ns_stat {
410 
411 	/**
412 	 * Namespace ID.
413 	 */
414 	unsigned int			id;
415 
416 	/**
417 	 * Namespace command support flags.
418 	 */
419 	enum nvme_ns_flags		flags;
420 
421 	/**
422 	 * Namespace sector size in bytes.
423 	 */
424 	size_t				sector_size;
425 
426 	/**
427 	 * Namespace number of sectors.
428 	 */
429 	uint64_t			sectors;
430 
431 	/**
432 	 * Namespace metadata size in bytes.
433 	 */
434 	size_t				md_size;
435 
436 	/**
437 	 * Namespace priority information type.
438 	 */
439 	enum nvme_pi_type		pi_type;
440 
441 };
442 
443 /**
444  * @brief Queue pair information
445  */
446 struct nvme_qpair_stat {
447 
448 	/**
449 	 * Qpair ID
450 	 */
451 	unsigned int		id;
452 
453 	/**
454 	 * Qpair number of entries
455 	 */
456 	unsigned int		qd;
457 
458 	/**
459 	 * Qpair is enabled
460 	 */
461 	bool			enabled;
462 
463 	/**
464 	 * Qpair priority
465 	 */
466 	unsigned int		qprio;
467 };
468 
469 /**
470  * @brief Command completion callback function signature
471  *
472  * @param cmd_cb_arg	Callback function input argument.
473  * @param cpl_status	Contains the completion status.
474  */
475 typedef void (*nvme_cmd_cb)(void *cmd_cb_arg,
476 			    const struct nvme_cpl *cpl_status);
477 
478 /**
479  * @brief Asynchronous error request completion callback
480  *
481  * @param aer_cb_arg	AER context set by nvme_register_aer_callback()
482  * @param cpl_status	Completion status of the asynchronous event request
483  */
484 typedef void (*nvme_aer_cb)(void *aer_cb_arg,
485 			    const struct nvme_cpl *cpl_status);
486 
487 /**
488  * @brief Restart SGL walk to the specified offset callback
489  *
490  * @param cb_arg	Value passed to nvme_readv/nvme_writev
491  * @param offset	Offset in the SGL
492  */
493 typedef void (*nvme_req_reset_sgl_cb)(void *cb_arg, uint32_t offset);
494 
495 /**
496  * @brief Get an SGL entry address and length and advance to the next entry
497  *
498  * @param cb_arg	Value passed to readv/writev
499  * @param address	Physical address of this segment
500  * @param length	Length of this physical segment
501  *
502  * Fill out address and length with the current SGL entry and advance
503  * to the next entry for the next time the callback is invoked
504  */
505 typedef int (*nvme_req_next_sge_cb)(void *cb_arg,
506 				    uint64_t *address, uint32_t *length);
507 
508 /**
509  * @brief Open an NVMe controller
510  *
511  * @param url	PCI device URL
512  * @param opts	controller options
513  *
514  * Obtain a handle for an NVMe controller specified as a PCI device URL,
515  * e.g. pci://[DDDD:]BB:DD.F. If called more than once for the same
516  * controller, NULL is returned.
517  * To stop using the the controller and release its associated resources,
518  * call nvme_ctrlr_close() with the handle returned by this function.
519  *
520  * @return A handle to the controller on success and NULL on failure.
521  */
522 struct pci_device {
523 	uint16_t  vendor_id;
524 	uint16_t  device_id;
525 	uint16_t  subvendor_id;
526 	uint16_t  subdevice_id;
527 
528 	uint16_t domain;
529 	uint16_t bus;
530 	uint16_t dev;
531 	uint16_t func;
532 
533 	void* pci_info;
534 };
535 
536 extern struct nvme_ctrlr * nvme_ctrlr_open(struct pci_device *pdev,
537 					   struct nvme_ctrlr_opts *opts);
538 
539 /**
540  * @brief Close an open NVMe controller
541  *
542  * @param ctrlr	Controller handle
543  *
544  * This function should be called while no other threads
545  * are actively using the controller.
546  *
547  * @return 0 on success and a negative error code on failure.
548  */
549 extern int nvme_ctrlr_close(struct nvme_ctrlr *ctrlr);
550 
551 /**
552  * @brief Get controller capabilities and features
553  *
554  * @param ctrlr	Controller handle
555  * @param cstat	Controller information
556  *
557  * @return 0 on success and a negative error code on failure.
558  */
559 extern int nvme_ctrlr_stat(struct nvme_ctrlr *ctrlr,
560 			   struct nvme_ctrlr_stat *cstat);
561 
562 /**
563  * @brief Get controller data and some data from the capabilities register
564  *
565  * @param ctrlr	Controller handle
566  * @param cdata	Controller data to fill
567  * @param rdata	Capabilities register data to fill
568  *
569  * cdata and rdata are optional (NULL can be specified).
570  *
571  * @return 0 on success and a negative error code on failure.
572  */
573 extern int nvme_ctrlr_data(struct nvme_ctrlr *ctrlr,
574 			   struct nvme_ctrlr_data *cdata,
575 			   struct nvme_register_data *rdata);
576 
577 /**
578  * @brief Get a specific feature of a controller
579  *
580  * @param ctrlr		Controller handle
581  * @param sel		Feature selector
582  * @param feature 	Feature identifier
583  * @param cdw11 	Command word 11 (command dependent)
584  * @param attributes	Features attributes
585  *
586  * This function is thread safe and can be called at any point while
587  * the controller is attached.
588  *
589  * @return 0 on success and a negative error code on failure.
590  *
591  * See nvme_ctrlr_set_feature()
592  */
593 extern int nvme_ctrlr_get_feature(struct nvme_ctrlr *ctrlr,
594 				  enum nvme_feat_sel sel,
595 				  enum nvme_feat feature,
596 				  uint32_t cdw11, uint32_t *attributes);
597 
598 /**
599  * @brief Set a specific feature of a controller
600  *
601  * @param ctrlr		Controller handle
602  * @param save		Save feature across power cycles
603  * @param feature 	Feature identifier
604  * @param cdw11 	Command word 11 (feature dependent)
605  * @param cdw12 	Command word 12 (feature dependent)
606  * @param attributes	Features attributes
607  *
608  * This function is thread safe and can be called at any point while
609  * the controller is attached to the NVMe driver.
610  *
611  * @return 0 on success and a negative error code on failure.
612  *
613  * See nvme_ctrlr_get_feature()
614  */
615 extern int nvme_ctrlr_set_feature(struct nvme_ctrlr *ctrlr,
616 				  bool save, enum nvme_feat feature,
617 				  uint32_t cdw11, uint32_t cdw12,
618 				  uint32_t *attributes);
619 
620 /**
621  * @brief Attach the specified namespace to controllers
622  *
623  * @param ctrlr Controller handle to use for command submission
624  * @param nsid	Namespace ID of the namespaces to attach
625  * @param clist List of controllers as defined in the NVMe specification
626  *
627  * @return 0 on success and a negative error code on failure.
628  */
629 extern int nvme_ctrlr_attach_ns(struct nvme_ctrlr *ctrlr, unsigned int nsid,
630 				struct nvme_ctrlr_list *clist);
631 
632 /**
633  * @brief Detach the specified namespace from controllers
634  *
635  * @param ctrlr Controller handle to use for command submission
636  * @param nsid	Namespace ID of the namespaces to detach
637  * @param clist List of controllers as defined in the NVMe specification
638  *
639  * @return 0 on success and a negative error code on failure.
640  */
641 extern int nvme_ctrlr_detach_ns(struct nvme_ctrlr *ctrlr, unsigned int nsid,
642 				struct nvme_ctrlr_list *clist);
643 
644 /**
645  * @brief Create a namespace
646  *
647  * @param ctrlr 	Controller handle
648  * @param nsdata	namespace data
649  *
650  * @return Namespace ID (>= 1) on success and 0 on failure.
651  */
652 extern unsigned int nvme_ctrlr_create_ns(struct nvme_ctrlr *ctrlr,
653 					 struct nvme_ns_data *nsdata);
654 
655 /**
656  * @brief Delete a namespace
657  *
658  * @param ctrlr	Controller handle
659  * @param nsid	ID of the namespace to delete
660  *
661  * @return 0 on success and a negative error code on failure.
662  */
663 extern int nvme_ctrlr_delete_ns(struct nvme_ctrlr *ctrlr, unsigned int nsid);
664 
665 /**
666  * @brief Format media
667  *
668  * @param ctrlr		Controller handle
669  * @param nsid		ID of the namespace to format
670  * @param format	Format information
671  *
672  * This function requests a low-level format of the media.
673  * If nsid is NVME_GLOBAL_NS_TAG, all namspaces attached to the contoller
674  * are formatted.
675  *
676  * @return 0 on success and a negative error code on failure.
677  */
678 extern int nvme_ctrlr_format_ns(struct nvme_ctrlr *ctrlr,
679 				unsigned int nsid, struct nvme_format *format);
680 
681 /**
682  * @brief Download a new firmware image
683  *
684  * @param ctrlr	Controller handle
685  * @param fw	Firmware data buffer
686  * @param size	Firmware buffer size
687  * @param slot 	Firmware image slot to use
688  *
689  * @return 0 on success and a negative error code on failure.
690  */
691 extern int nvme_ctrlr_update_firmware(struct nvme_ctrlr *ctrlr,
692 				      void *fw, size_t size, int slot);
693 
694 /**
695  * @brief Get an I/O queue pair
696  *
697  * @param ctrlr	Controller handle
698  * @param qprio I/O queue pair priority for weighted round robin arbitration
699  * @param qd 	I/O queue pair maximum submission queue depth
700  *
701  * A queue depth of 0 will result in the maximum hardware defined queue
702  * depth being used. The use of a queue pair is not thread safe. Applications
703  * must ensure mutual exclusion access to the queue pair during I/O processing.
704  *
705  * @return An I/O queue pair handle on success and NULL in case of failure.
706  */
707 extern struct nvme_qpair * nvme_ioqp_get(struct nvme_ctrlr *ctrlr,
708 					 enum nvme_qprio qprio,
709 					 unsigned int qd);
710 
711 /**
712  * @brief Release an I/O queue pair
713  *
714  * @param qpair	I/O queue pair handle
715  *
716  * @return 0 on success and a negative error code on failure.
717  */
718 extern int nvme_ioqp_release(struct nvme_qpair *qpair);
719 
720 /**
721  * @brief Get information on an I/O queue pair
722  *
723  * @param qpair		I/O queue pair handle
724  * @param qpstat	I/O queue pair information to fill
725  *
726  * @return 0 on success and a negative error code on failure.
727  */
728 extern int nvme_qpair_stat(struct nvme_qpair *qpair,
729 			   struct nvme_qpair_stat *qpstat);
730 
731 /**
732  * @brief Submit an NVMe command
733  *
734  * @param qpair		I/O qpair handle
735  * @param cmd		Command to submit
736  * @param buf		Payload buffer
737  * @param len		Payload buffer length
738  * @param cb_fn		Callback function
739  * @param cb_arg	Argument for the call back function
740  *
741  * This is a low level interface for submitting I/O commands directly.
742  * The validity of the command will not be checked.
743  *
744  * When constructing the nvme_command it is not necessary to fill out the PRP
745  * list/SGL or the CID. The driver will handle both of those for you.
746  *
747  * @return 0 on success and a negative error code on failure.
748  */
749 extern int nvme_ioqp_submit_cmd(struct nvme_qpair *qpair,
750 				struct nvme_cmd *cmd,
751 				void *buf, size_t len,
752 				nvme_cmd_cb cb_fn, void *cb_arg);
753 
754 /**
755  * @brief Process I/O command completions
756  *
757  * @param qpair			I/O queue pair handle
758  * @param max_completions	Maximum number of completions to check
759  *
760  * This call is non-blocking, i.e. it only processes completions that are
761  * ready at the time of this function call. It does not wait for
762  * outstanding commands to complete.
763  * For each completed command, the request callback function will
764  * be called if specified as non-NULL when the request was submitted.
765  * This function may be called at any point after the command submission
766  * while the controller is open
767  *
768  * @return The number of completions processed (may be 0).
769  *
770  * @sa nvme_cmd_cb
771  */
772 extern unsigned int nvme_qpair_poll(struct nvme_qpair *qpair,
773 				   unsigned int max_completions);
774 
775 /**
776  * @brief Open a name space
777  *
778  * @param ctrlr	Controller handle
779  * @param ns_id	ID of the name space to open
780  *
781  * @return A namspace handle on success or NULL in case of failure.
782  */
783 extern struct nvme_ns *nvme_ns_open(struct nvme_ctrlr *ctrlr,
784 				    unsigned int ns_id);
785 
786 /**
787  * @brief Close an open name space
788  *
789  * @param ns	Namspace handle
790  *
791  * See nvme_ns_open()
792  */
793 extern int nvme_ns_close(struct nvme_ns *ns);
794 
795 /**
796  * @brief Get information on a namespace
797  *
798  * @param ns		Namespace handle
799  * @param ns_stat	Namespace information
800  *
801  * @return 0 on success and a negative error code in case of failure.
802  */
803 extern int nvme_ns_stat(struct nvme_ns *ns,
804 			struct nvme_ns_stat *ns_stat);
805 
806 /**
807  * @brief Get namespace data
808  *
809  * @param ns		Namespace handle
810  * @param nsdata	Namespace data
811  *
812  * @return 0 on success and a negative error code in case of failure.
813  */
814 extern int nvme_ns_data(struct nvme_ns *ns,
815 			struct nvme_ns_data *nsdata);
816 
817 /**
818  * @brief Submit a write I/O
819  *
820  * @param ns		Namespace handle
821  * @param qpair		I/O queue pair handle
822  * @param buffer	Data buffer
823  * @param lba		Starting LBA to read from
824  * @param lba_count	Number of LBAs to read
825  * @param cb_fn		Completion callback
826  * @param cb_arg	Argument to pass to the completion callback
827  * @param io_flags	I/O flags (NVME_IO_FLAGS_*)
828  *
829  * @return 0 on success and a negative error code in case of failure.
830  */
831 extern int nvme_ns_write(struct nvme_ns *ns, struct nvme_qpair *qpair,
832 			 void *buffer,
833 			 uint64_t lba, uint32_t lba_count,
834 			 nvme_cmd_cb cb_fn, void *cb_arg,
835 			 unsigned int io_flags);
836 
837 /**
838  * @brief Submit a scattered write I/O
839  *
840  * @param ns		Namespace handle
841  * @param qpair		I/O queue pair handle
842  * @param lba		Starting LBA to write to
843  * @param lba_count	Number of LBAs to write
844  * @param cb_fn		Completion callback
845  * @param cb_arg	Argument to pass to the completion callback
846  * @param io_flags	I/O flags (NVME_IO_FLAGS_*)
847  * @param reset_sgl_fn	Reset scattered payload callback
848  * @param next_sge_fn	Scattered payload iteration callback
849  *
850  * @return 0 on success and a negative error code in case of failure.
851  */
852 extern int nvme_ns_writev(struct nvme_ns *ns, struct nvme_qpair *qpair,
853 			  uint64_t lba, uint32_t lba_count,
854 			  nvme_cmd_cb cb_fn, void *cb_arg,
855 			  unsigned int io_flags,
856 			  nvme_req_reset_sgl_cb reset_sgl_fn,
857 			  nvme_req_next_sge_cb next_sge_fn);
858 
859 /**
860  * @brief Submits a write I/O with metadata
861  *
862  * @param ns		Namespace handle
863  * @param qpair		I/O queue pair handle
864  * @param payload	Data buffer
865  * @param metadata	Metadata payload
866  * @param lba		Starting LBA to write to
867  * @param lba_count	Number of LBAs to write
868  * @param cb_fn		Completion callback
869  * @param cb_arg	Argument to pass to the completion callback
870  * @param io_flags	I/O flags (NVME_IO_FLAGS_*)
871  * @param apptag_mask	Application tag mask
872  * @param apptag	Application tag to use end-to-end protection information
873  *
874  * @return 0 on success and a negative error code in case of failure.
875  */
876 extern int nvme_ns_write_with_md(struct nvme_ns *ns, struct nvme_qpair *qpair,
877 				 void *payload, void *metadata,
878 				 uint64_t lba, uint32_t lba_count,
879 				 nvme_cmd_cb cb_fn, void *cb_arg,
880 				 unsigned int io_flags,
881 				 uint16_t apptag_mask, uint16_t apptag);
882 
883 /**
884  * @brief Submit a write zeroes I/O
885  *
886  * @param ns		Namespace handle
887  * @param qpair		I/O queue pair handle
888  * @param lba		Starting LBA to write to
889  * @param lba_count	Number of LBAs to write
890  * @param cb_fn		Completion callback
891  * @param cb_arg	Argument to pass to the completion callback
892  * @param io_flags	I/O flags (NVME_IO_FLAGS_*)
893  *
894  * @return 0 on success and a negative error code in case of failure.
895  */
896 extern int nvme_ns_write_zeroes(struct nvme_ns *ns, struct nvme_qpair *qpair,
897 				uint64_t lba, uint32_t lba_count,
898 				nvme_cmd_cb cb_fn, void *cb_arg,
899 				unsigned int io_flags);
900 
901 /**
902  * @brief Submit a read I/O
903  *
904  * @param ns		Namespace handle
905  * @param qpair		I/O queue pair handle
906  * @param buffer	Data buffer
907  * @param lba		Starting LBA to read from
908  * @param lba_count	Number of LBAs to read
909  * @param cb_fn		Completion callback
910  * @param cb_arg	Argument to pass to the completion callback
911  * @param io_flags	I/O flags (NVME_IO_FLAGS_*)
912  *
913  * @return 0 on success and a negative error code in case of failure.
914  */
915 extern int nvme_ns_read(struct nvme_ns *ns, struct nvme_qpair *qpair,
916 			void *buffer,
917 			uint64_t lba, uint32_t lba_count,
918 			nvme_cmd_cb cb_fn, void *cb_arg,
919 			unsigned int io_flags);
920 
921 /**
922  * @brief Submit a scattered read I/O
923  *
924  * @param ns		Namespace handle
925  * @param qpair		I/O queue pair handle
926  * @param lba		Starting LBA to read from
927  * @param lba_count	Number of LBAs to read
928  * @param cb_fn		Completion callback
929  * @param cb_arg	Argument to pass to the completion callback
930  * @param io_flags	I/O flags (NVME_IO_FLAGS_*)
931  * @param reset_sgl_fn	Reset scattered payload callback
932  * @param next_sge_fn	Scattered payload iteration callback
933  *
934  * @return 0 on success and a negative error code in case of failure.
935  */
936 extern int nvme_ns_readv(struct nvme_ns *ns, struct nvme_qpair *qpair,
937 			 uint64_t lba, uint32_t lba_count,
938 			 nvme_cmd_cb cb_fn, void *cb_arg,
939 			 unsigned int io_flags,
940 			 nvme_req_reset_sgl_cb reset_sgl_fn,
941 			 nvme_req_next_sge_cb next_sge_fn);
942 
943 /**
944  * @brief Submit a read I/O with metadata
945  *
946  * @param ns		Namespace handle
947  * @param qpair		I/O queue pair handle
948  * @param buffer	Data buffer
949  * @param metadata	Metadata payload
950  * @param lba		Starting LBA to read from
951  * @param lba_count	Number of LBAs to read
952  * @param cb_fn		Completion callback
953  * @param cb_arg	Argument to pass to the completion callback
954  * @param io_flags	I/O flags (NVME_IO_FLAGS_*)
955  * @param apptag_mask	Application tag mask
956  * @param apptag	Application tag to use end-to-end protection information
957  *
958  * @return 0 on success and a negative error code in case of failure.
959  */
960 extern int nvme_ns_read_with_md(struct nvme_ns *ns, struct nvme_qpair *qpair,
961 				void *buffer, void *metadata,
962 				uint64_t lba, uint32_t lba_count,
963 				nvme_cmd_cb cb_fn, void *cb_arg,
964 				unsigned int io_flags,
965 				uint16_t apptag_mask, uint16_t apptag);
966 
967 /**
968  * @brief Submit a deallocate command
969  *
970  * @param ns		Namespace handle
971  * @param qpair		I/O queue pair handle
972  * @param payload	List of LBA ranges to deallocate
973  * @param num_ranges	Number of ranges in the list
974  * @param cb_fn		Completion callback
975  * @param cb_arg	Argument to pass to the completion callback
976  *
977  * The number of LBA ranges must be at least 1 and at most
978  * NVME_DATASET_MANAGEMENT_MAX_RANGES.
979  *
980  * @return 0 on success and a negative error code in case of failure.
981  */
982 extern int nvme_ns_deallocate(struct nvme_ns *ns, struct nvme_qpair *qpair,
983 			      void *payload, uint16_t num_ranges,
984 			      nvme_cmd_cb cb_fn, void *cb_arg);
985 
986 /**
987  * @brief Submit a flush command
988  *
989  * @param ns		Namespace handle
990  * @param qpair		I/O queue pair handle
991  * @param cb_fn		Completion callback
992  * @param cb_arg	Argument to pass to the completion callback
993  *
994  * @return 0 on success and a negative error code in case of failure.
995  */
996 extern int nvme_ns_flush(struct nvme_ns *ns, struct nvme_qpair *qpair,
997 			 nvme_cmd_cb cb_fn, void *cb_arg);
998 
999 /**
1000  * @brief Submit a reservation register command
1001  *
1002  * @param ns		Namespace handle
1003  * @param qpair		I/O queue pair handle
1004  * @param payload	Reservation register data buffer
1005  * @param ignore_key	Enable or not the current reservation key check
1006  * @param action	Registration action
1007  * @param cptpl		Persist Through Power Loss state
1008  * @param cb_fn		Completion callback
1009  * @param cb_arg	Argument to pass to the completion callback
1010  *
1011  * @return 0 on success and a negative error code in case of failure.
1012  */
1013 extern int nvme_ns_reservation_register(struct nvme_ns *ns,
1014 				struct nvme_qpair *qpair,
1015 				struct nvme_reservation_register_data *payload,
1016 				bool ignore_key,
1017 				enum nvme_reservation_register_action action,
1018 				enum nvme_reservation_register_cptpl cptpl,
1019 				nvme_cmd_cb cb_fn, void *cb_arg);
1020 
1021 /**
1022  * @brief Submit a reservation release command
1023  *
1024  * @param ns		Namespace handle
1025  * @param qpair		I/O queue pair handle
1026  * @param payload	Current reservation key buffer
1027  * @param ignore_key	Enable or not the current reservation key check
1028  * @param action	Reservation release action
1029  * @param type		Reservation type
1030  * @param cb_fn		Completion callback
1031  * @param cb_arg	Argument to pass to the completion callback
1032  *
1033  * @return 0 on success and a negative error code in case of failure.
1034  */
1035 extern int nvme_ns_reservation_release(struct nvme_ns *ns,
1036 			       struct nvme_qpair *qpair,
1037 			       struct nvme_reservation_key_data *payload,
1038 			       bool ignore_key,
1039 			       enum nvme_reservation_release_action action,
1040 			       enum nvme_reservation_type type,
1041 			       nvme_cmd_cb cb_fn, void *cb_arg);
1042 
1043 /**
1044  * @brief Submit a reservation acquire command
1045  *
1046  * @param ns		Namespace handle
1047  * @param qpair		I/O queue pair handle
1048  * @param payload	Reservation acquire data buffer
1049  * @param ignore_key	Enable or not the current reservation key check
1050  * @param action	Reservation acquire action
1051  * @param type		Reservation type
1052  * @param cb_fn		Completion callback
1053  * @param cb_arg	Argument to pass to the completion callback
1054  *
1055  * @return 0 on success and a negative error code in case of failure.
1056  */
1057 extern int nvme_ns_reservation_acquire(struct nvme_ns *ns,
1058 				struct nvme_qpair *qpair,
1059 				struct nvme_reservation_acquire_data *payload,
1060 				bool ignore_key,
1061 				enum nvme_reservation_acquire_action action,
1062 				enum nvme_reservation_type type,
1063 				nvme_cmd_cb cb_fn, void *cb_arg);
1064 
1065 /**
1066  * @brief Submits a reservation report to a namespace
1067  *
1068  * @param ns		Namespace handle
1069  * @param qpair		I/O queue pair handle
1070  * @param payload	Reservation status data buffer
1071  * @param len		Length in bytes of the reservation status data
1072  * @param cb_fn		Completion callback
1073  * @param cb_arg	Argument to pass to the completion callback
1074  *
1075  * The command is submitted to a qpair allocated by nvme_ctrlr_alloc_io_qpair().
1076  * The user must ensure that only one thread submits I/O on
1077  * a given qpair at any given time.
1078  *
1079  * @return 0 on success and a negative error code in case of failure.
1080  */
1081 extern int nvme_ns_reservation_report(struct nvme_ns *ns,
1082 				      struct nvme_qpair *qpair,
1083 				      void *payload, size_t len,
1084 				      nvme_cmd_cb cb_fn, void *cb_arg);
1085 
1086 /**
1087  * Any NUMA node.
1088  */
1089 #define NVME_NODE_ID_ANY 	(~0U)
1090 
1091 /**
1092  * @brief Allocate physically contiguous memory
1093  *
1094  * @param size 		Size (in bytes) to be allocated
1095  * @param align 	Memory alignment constraint
1096  * @param node_id	The NUMA node to get memory from or NVME_NODE_ID_ANY
1097  *
1098  * This function allocates memory from the hugepage area of memory. The
1099  * memory is not cleared. In NUMA systems, the memory allocated resides
1100  * on the requested NUMA node if node_id is not NVME_NODE_ID_ANY.
1101  * Otherwise, allocation will take preferrably on the node of the
1102  * function call context, or any other node if that fails.
1103  *
1104  * @return The address of the allocated memory on success and NULL on failure.
1105  */
1106 extern void *nvme_malloc_node(size_t size, size_t align,
1107 			      unsigned int node_id);
1108 
1109 /**
1110  * @brief Allocate zero'ed memory
1111  *
1112  * @param size 		Size (in bytes) to be allocated
1113  * @param align 	Memory alignment constraint
1114  * @param node_id	The NUMA node to get memory from or NVME_NODE_ID_ANY
1115  *
1116  * See @nvme_malloc_node.
1117  */
1118 static inline void *nvme_zmalloc_node(size_t size, size_t align,
1119 				      unsigned int node_id)
1120 {
1121 	void *buf;
1122 
1123 	buf = nvme_malloc_node(size, align, node_id);
1124 	if (buf)
1125 		memset(buf, 0, size);
1126 
1127 	return buf;
1128 }
1129 
1130 /**
1131  * @brief Allocate zero'ed array memory
1132  *
1133  * @param num 		Size of the array
1134  * @param size 		Size (in bytes) of the array elements
1135  * @param align 	Memory alignment constraint
1136  * @param node_id	The NUMA node to get memory from or NVME_NODE_ID_ANY
1137  *
1138  * See @nvme_malloc_node.
1139  */
1140 static inline void *nvme_calloc_node(size_t num, size_t size,
1141 				     size_t align, unsigned int node_id)
1142 {
1143 	return nvme_zmalloc_node(size * num, align, node_id);
1144 }
1145 
1146 /**
1147  * @brief Allocate physically contiguous memory
1148  *
1149  * @param size 		Size (in bytes) to be allocated
1150  * @param align 	Memory alignment constraint
1151  *
1152  * @return The address of the allocated memory on success and NULL on error
1153  *
1154  * See @nvme_malloc_node.
1155  */
1156 static inline void *nvme_malloc(size_t size, size_t align)
1157 {
1158 	return nvme_malloc_node(size, align, NVME_NODE_ID_ANY);
1159 }
1160 
1161 /**
1162  * @brief Allocate zero'ed memory
1163  *
1164  * @param size 		Size (in bytes) to be allocated
1165  * @param align 	Memory alignment constraint
1166  *
1167  * @return The address of the allocated memory on success and NULL on error
1168  *
1169  * See @nvme_zmalloc_node.
1170  */
1171 static inline void *nvme_zmalloc(size_t size, size_t align)
1172 {
1173 	return nvme_zmalloc_node(size, align, NVME_NODE_ID_ANY);
1174 }
1175 
1176 /**
1177  * @brief Allocate zero'ed array memory
1178  *
1179  * @param num 		Size of the array
1180  * @param size 		Size (in bytes) of the array elements
1181  * @param align 	Memory alignment constraint
1182  *
1183  * See @nvme_calloc_node.
1184  */
1185 static inline void *nvme_calloc(size_t num, size_t size, size_t align)
1186 {
1187 	return nvme_calloc_node(num, size, align, NVME_NODE_ID_ANY);
1188 }
1189 
1190 /**
1191  * @brief Free allocated memory
1192  *
1193  * @param addr	Address of the memory to free
1194  *
1195  * Free the memory at the specified address.
1196  * The address must be one that was returned by one of the
1197  * allocation function nvme_malloc_node(), nvme_zmalloc_node()
1198  * or nvme_calloc_node().
1199  *
1200  * If the pointer is NULL, the function does nothing.
1201  */
1202 extern void nvme_free(void *addr);
1203 
1204 /**
1205  * Structure to hold memory statistics.
1206  */
1207 struct nvme_mem_stats {
1208 
1209 	/**
1210 	 * Number of huge pages allocated.
1211 	 */
1212 	size_t		nr_hugepages;
1213 
1214 	/**
1215 	 * Total bytes in memory pools.
1216 	 */
1217 	size_t		total_bytes;
1218 
1219 	/**
1220 	 * Total free bytes in memory pools.
1221 	 */
1222 	size_t		free_bytes;
1223 
1224 };
1225 
1226 /**
1227  * @brief Get memory usage information
1228  *
1229  * @param stats		Memory usage inforamtion structure to fill
1230  * @param node_id	NUMA node ID or NVME_NVME_NODE_ID_ANY
1231  *
1232  * Return memory usage statistics for the specified
1233  * NUMA node (CPU socket) or global memory usage if node_id
1234  * is NVME_NODE_ID_ANY.
1235  *
1236  * @return 0 on success and a negative error code on failure.
1237  */
1238 extern int nvme_memstat(struct nvme_mem_stats *stats,
1239 			unsigned int node_id);
1240 
1241 /**
1242  * @}
1243  */
1244 
1245 #ifdef __cplusplus
1246 }
1247 #endif
1248 
1249 #endif /* __LIBNVME_H__ */
1250