xref: /haiku/src/add-ons/kernel/drivers/disk/nvme/libnvme/nvme.h (revision 5889cb5e7e8e7bfea6072ddfe881f55d364a0cf0)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *   Copyright (c) 2017, Western Digital Corporation or its affiliates.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /**
35  * @file
36  * NVMe driver public API
37  *
38  * @mainpage
39  *
40  * libnvme is a user space utility to provide control over NVMe,
41  * the host controller interface for drives based on PCI Express.
42  *
43  * \addtogroup libnvme
44  *  @{
45  */
46 
47 #ifndef __LIBNVME_H__
48 #define __LIBNVME_H__
49 
50 #ifdef __cplusplus
51 extern "C" {
52 #endif
53 
54 #include <libnvme/nvme_spec.h>
55 
56 #ifndef __HAIKU__
57 #include <pciaccess.h>
58 #endif
59 #include <stdbool.h>
60 #include <stddef.h>
61 #include <stdint.h>
62 #include <string.h>
63 
64 /**
65  * Log levels.
66  */
67 enum nvme_log_level {
68 
69 	/**
70 	 * Disable all log messages.
71 	 */
72 	NVME_LOG_NONE = 0,
73 
74 	/**
75 	 * System is unusable.
76 	 */
77 	NVME_LOG_EMERG,
78 
79 	/**
80 	 * Action must be taken immediately.
81 	 */
82 	NVME_LOG_ALERT,
83 
84 	/**
85 	 * Critical conditions.
86 	 */
87 	NVME_LOG_CRIT,
88 
89 	/**
90 	 * Error conditions.
91 	 */
92 	NVME_LOG_ERR,
93 
94 	/**
95 	 * Warning conditions.
96 	 */
97 	NVME_LOG_WARNING,
98 
99 	/**
100 	 * Normal but significant condition.
101 	 */
102 	NVME_LOG_NOTICE,
103 
104 	/**
105 	 * Informational messages.
106 	 */
107 	NVME_LOG_INFO,
108 
109 	/**
110 	 * Debug-level messages.
111 	 */
112 	NVME_LOG_DEBUG,
113 
114 };
115 
116 /**
117  * Log facilities.
118  */
119 enum nvme_log_facility {
120 
121 	/**
122 	 * Standard output log facility
123 	 */
124 	NVME_LOG_STDOUT = 0x00000001,
125 
126 	/**
127 	 * Regular file output log facility
128 	 */
129 	NVME_LOG_FILE = 0x00000002,
130 
131 	/**
132 	 * syslog service output log facility
133 	 */
134 	NVME_LOG_SYSLOG = 0x00000004,
135 
136 };
137 
138 /**
139  * @brief Initialize libnvme
140  *
141  * @param level	Library log level
142  * @param facility	Facility code
143  * @param path		File name for the NVME_LOG_FILE facility
144  *
145  * This function must always be called first before any other
146  * function provided by libnvme. The arguments allow setting the
147  * initial log level and log facility so that any problem during
148  * initialization can be caught.
149  *
150  * @return 0 on success and a negative error code on failure.
151  */
152 extern int nvme_lib_init(enum nvme_log_level level,
153 			 enum nvme_log_facility facility, const char *path);
154 
155 /**
156  * @brief Set the library log level
157  *
158  * @param level	Library log level
159  */
160 extern void nvme_set_log_level(enum nvme_log_level level);
161 
162 /**
163  * @brief Get the current log level
164  *
165  * @return The current library log level.
166  */
167 extern enum nvme_log_level nvme_get_log_level(void);
168 
169 /**
170  * @brief Change the library log facility
171  *
172  * @param facility	Facility code
173  * @param path		File name for the NVME_LOG_FILE facility
174  *
175  * Set th library log facility. On failure, the facility is
176  * always automatically set to stdout.
177  *
178  * @return 0 on success and a negative error code on failure.
179  */
180 extern int nvme_set_log_facility(enum nvme_log_facility facility,
181 				 const char *path);
182 
183 /**
184  * @brief Get the current library log facility.
185  *
186  * @return The current library log facility.
187  */
188 extern enum nvme_log_facility nvme_get_log_facility(void);
189 
190 /**
191  * @brief Opaque handle to a controller returned by nvme_ctrlr_open().
192  */
193 struct nvme_ctrlr;
194 
195 /**
196  * @brief Opaque handle to a namespace
197  */
198 struct nvme_ns;
199 
200 /**
201  * @brief Opaque handle to an I/O queue pair
202  */
203 struct nvme_qpair;
204 
205 /**
206  * @brief Capabilities register of a controller
207  */
208 struct nvme_register_data {
209 
210 	/**
211 	 * Maximum Queue Entries Supported indicates the maximum individual
212 	 * queue size that the controller supports. This is a 0’s based value,
213 	 * so 1 has to be added.
214 	 */
215 	unsigned int		mqes;
216 
217 };
218 
219 /**
220  * Length of the string for the serial number
221  */
222 #define NVME_SERIAL_NUMBER_LENGTH	NVME_SERIAL_NUMBER_CHARACTERS + 1
223 
224 /**
225  * Length of the string for the model number
226  */
227 #define NVME_MODEL_NUMBER_LENGTH	NVME_MODEL_NUMBER_CHARACTERS + 1
228 
229 /**
230  * @brief Controller information
231  */
232 struct nvme_ctrlr_stat {
233 
234 	/**
235 	 * PCI device vendor ID.
236 	 */
237 	unsigned short		vendor_id;
238 
239 	/**
240 	 * PCI device ID.
241 	 */
242 	unsigned short		device_id;
243 
244 	/**
245 	 * PCI device sub-vendor ID.
246 	 */
247 	unsigned short		subvendor_id;
248 
249 	/**
250 	 * PCI sub-device ID.
251 	 */
252 	unsigned short		subdevice_id;
253 
254 	/**
255 	 * PCI device class.
256 	 */
257 	unsigned int		device_class;
258 
259 	/**
260 	 * PCI device revision.
261 	 */
262 	unsigned char		revision;
263 
264 	/**
265 	 * PCI slot domain.
266 	 */
267 	unsigned int		domain;
268 
269 	/**
270 	 * PCI slot bus.
271 	 */
272 	unsigned int		bus;
273 
274 	/**
275 	 * PCI slot bus device number.
276 	 */
277 	unsigned int		dev;
278 
279 	/**
280 	 * PCI slot device function.
281 	 */
282 	unsigned int		func;
283 
284 	/**
285 	 * Serial number
286 	 */
287 	char			sn[NVME_SERIAL_NUMBER_LENGTH];
288 
289 	/**
290 	 * Model number
291 	 */
292 	char			mn[NVME_MODEL_NUMBER_LENGTH];
293 
294 	/**
295 	 * Maximum transfer size.
296 	 */
297 	size_t			max_xfer_size;
298 
299 	/**
300 	 * All the log pages supported.
301 	 */
302 	bool			log_pages[256];
303 
304 	/**
305 	 * Whether SGL is supported by the controller.
306 	 *
307 	 * Note that this does not mean all SGL requests will fail;
308 	 * many are convertible into standard (PRP) requests by libnvme.
309 	 */
310 	bool			sgl_supported;
311 
312 	/**
313 	 * All the features supported.
314 	 */
315 	bool			features[256];
316 
317 	/**
318 	 * Number of valid namespaces in the array of namespace IDs.
319 	 */
320 	unsigned int		nr_ns;
321 
322 	/**
323 	 * Array of valid namespace IDs of the controller.
324 	 * Namspeace IDs are integers between 1 and NVME_MAX_NS
325 	 */
326 	unsigned int		ns_ids[NVME_MAX_NS];
327 
328 	/**
329 	 * Maximum number of I/O queue pairs
330 	 */
331 	unsigned int		max_io_qpairs;
332 
333 	/**
334 	 * Number of I/O queue pairs allocated
335 	 */
336 	unsigned int		io_qpairs;
337 
338 	/**
339 	 * Number of I/O queue pairs enabled
340 	 */
341 	unsigned int		enabled_io_qpairs;
342 
343 	/**
344 	 * IO qpairs maximum entries
345 	 */
346 	unsigned int		max_qd;
347 };
348 
349 /**
350  * @brief NVMe controller options
351  *
352  * Allow the user to request non-default options.
353  */
354 struct nvme_ctrlr_opts {
355 
356 	/**
357 	 * Number of I/O queues to initialize.
358 	 * (default: all possible I/O queues)
359 	 */
360 	unsigned int		io_queues;
361 
362 	/**
363 	 * Enable submission queue in controller memory buffer
364 	 * (default: false)
365 	 */
366 	bool 			use_cmb_sqs;
367 
368 	/**
369 	 * Type of arbitration mechanism.
370 	 * (default: round-robin == NVME_CC_AMS_RR)
371 	 */
372 	enum nvme_cc_ams	arb_mechanism;
373 
374 };
375 
376 /**
377  * @brief Namespace command support flags
378  */
379 enum nvme_ns_flags {
380 
381 	/**
382 	 * The deallocate command is supported.
383 	 */
384 	NVME_NS_DEALLOCATE_SUPPORTED	= 0x1,
385 
386 	/**
387 	 * The flush command is supported.
388 	 */
389 	NVME_NS_FLUSH_SUPPORTED		= 0x2,
390 
391 	/**
392 	 * The reservation command is supported.
393 	 */
394 	NVME_NS_RESERVATION_SUPPORTED	= 0x4,
395 
396 	/**
397 	 * The write zeroes command is supported.
398 	 */
399 	NVME_NS_WRITE_ZEROES_SUPPORTED	= 0x8,
400 
401 	/**
402 	 * The end-to-end data protection is supported.
403 	 */
404 	NVME_NS_DPS_PI_SUPPORTED	= 0x10,
405 
406 	/**
407 	 * The extended lba format is supported, metadata is transferred as
408 	 * a contiguous part of the logical block that it is associated with.
409 	 */
410 	NVME_NS_EXTENDED_LBA_SUPPORTED	= 0x20,
411 
412 };
413 
414 /**
415  * @brief Namespace information
416  */
417 struct nvme_ns_stat {
418 
419 	/**
420 	 * Namespace ID.
421 	 */
422 	unsigned int			id;
423 
424 	/**
425 	 * Namespace command support flags.
426 	 */
427 	enum nvme_ns_flags		flags;
428 
429 	/**
430 	 * Namespace sector size in bytes.
431 	 */
432 	size_t				sector_size;
433 
434 	/**
435 	 * Namespace number of sectors.
436 	 */
437 	uint64_t			sectors;
438 
439 	/**
440 	 * Namespace metadata size in bytes.
441 	 */
442 	size_t				md_size;
443 
444 	/**
445 	 * Namespace priority information type.
446 	 */
447 	enum nvme_pi_type		pi_type;
448 
449 };
450 
451 /**
452  * @brief Queue pair information
453  */
454 struct nvme_qpair_stat {
455 
456 	/**
457 	 * Qpair ID
458 	 */
459 	unsigned int		id;
460 
461 	/**
462 	 * Qpair number of entries
463 	 */
464 	unsigned int		qd;
465 
466 	/**
467 	 * Qpair is enabled
468 	 */
469 	bool			enabled;
470 
471 	/**
472 	 * Qpair priority
473 	 */
474 	unsigned int		qprio;
475 };
476 
477 /**
478  * @brief Command completion callback function signature
479  *
480  * @param cmd_cb_arg	Callback function input argument.
481  * @param cpl_status	Contains the completion status.
482  */
483 typedef void (*nvme_cmd_cb)(void *cmd_cb_arg,
484 			    const struct nvme_cpl *cpl_status);
485 
486 /**
487  * @brief Asynchronous error request completion callback
488  *
489  * @param aer_cb_arg	AER context set by nvme_register_aer_callback()
490  * @param cpl_status	Completion status of the asynchronous event request
491  */
492 typedef void (*nvme_aer_cb)(void *aer_cb_arg,
493 			    const struct nvme_cpl *cpl_status);
494 
495 /**
496  * @brief Restart SGL walk to the specified offset callback
497  *
498  * @param cb_arg	Value passed to nvme_readv/nvme_writev
499  * @param offset	Offset in the SGL
500  */
501 typedef void (*nvme_req_reset_sgl_cb)(void *cb_arg, uint32_t offset);
502 
503 /**
504  * @brief Get an SGL entry address and length and advance to the next entry
505  *
506  * @param cb_arg	Value passed to readv/writev
507  * @param address	Physical address of this segment
508  * @param length	Length of this physical segment
509  *
510  * Fill out address and length with the current SGL entry and advance
511  * to the next entry for the next time the callback is invoked
512  */
513 typedef int (*nvme_req_next_sge_cb)(void *cb_arg,
514 				    uint64_t *address, uint32_t *length);
515 
516 /**
517  * @brief Open an NVMe controller
518  *
519  * @param url	PCI device URL
520  * @param opts	controller options
521  *
522  * Obtain a handle for an NVMe controller specified as a PCI device URL,
523  * e.g. pci://[DDDD:]BB:DD.F. If called more than once for the same
524  * controller, NULL is returned.
525  * To stop using the the controller and release its associated resources,
526  * call nvme_ctrlr_close() with the handle returned by this function.
527  *
528  * @return A handle to the controller on success and NULL on failure.
529  */
530 struct pci_device {
531 	uint16_t  vendor_id;
532 	uint16_t  device_id;
533 	uint16_t  subvendor_id;
534 	uint16_t  subdevice_id;
535 
536 	uint16_t domain;
537 	uint16_t bus;
538 	uint16_t dev;
539 	uint16_t func;
540 
541 	void* pci_info;
542 };
543 
544 extern struct nvme_ctrlr * nvme_ctrlr_open(struct pci_device *pdev,
545 					   struct nvme_ctrlr_opts *opts);
546 
547 /**
548  * @brief Close an open NVMe controller
549  *
550  * @param ctrlr	Controller handle
551  *
552  * This function should be called while no other threads
553  * are actively using the controller.
554  *
555  * @return 0 on success and a negative error code on failure.
556  */
557 extern int nvme_ctrlr_close(struct nvme_ctrlr *ctrlr);
558 
559 /**
560  * @brief Get controller capabilities and features
561  *
562  * @param ctrlr	Controller handle
563  * @param cstat	Controller information
564  *
565  * @return 0 on success and a negative error code on failure.
566  */
567 extern int nvme_ctrlr_stat(struct nvme_ctrlr *ctrlr,
568 			   struct nvme_ctrlr_stat *cstat);
569 
570 /**
571  * @brief Get controller data and some data from the capabilities register
572  *
573  * @param ctrlr	Controller handle
574  * @param cdata	Controller data to fill
575  * @param rdata	Capabilities register data to fill
576  *
577  * cdata and rdata are optional (NULL can be specified).
578  *
579  * @return 0 on success and a negative error code on failure.
580  */
581 extern int nvme_ctrlr_data(struct nvme_ctrlr *ctrlr,
582 			   struct nvme_ctrlr_data *cdata,
583 			   struct nvme_register_data *rdata);
584 
585 /**
586  * @brief Get a specific feature of a controller
587  *
588  * @param ctrlr		Controller handle
589  * @param sel		Feature selector
590  * @param feature 	Feature identifier
591  * @param cdw11 	Command word 11 (command dependent)
592  * @param attributes	Features attributes
593  *
594  * This function is thread safe and can be called at any point while
595  * the controller is attached.
596  *
597  * @return 0 on success and a negative error code on failure.
598  *
599  * See nvme_ctrlr_set_feature()
600  */
601 extern int nvme_ctrlr_get_feature(struct nvme_ctrlr *ctrlr,
602 				  enum nvme_feat_sel sel,
603 				  enum nvme_feat feature,
604 				  uint32_t cdw11, uint32_t *attributes);
605 
606 /**
607  * @brief Set a specific feature of a controller
608  *
609  * @param ctrlr		Controller handle
610  * @param save		Save feature across power cycles
611  * @param feature 	Feature identifier
612  * @param cdw11 	Command word 11 (feature dependent)
613  * @param cdw12 	Command word 12 (feature dependent)
614  * @param attributes	Features attributes
615  *
616  * This function is thread safe and can be called at any point while
617  * the controller is attached to the NVMe driver.
618  *
619  * @return 0 on success and a negative error code on failure.
620  *
621  * See nvme_ctrlr_get_feature()
622  */
623 extern int nvme_ctrlr_set_feature(struct nvme_ctrlr *ctrlr,
624 				  bool save, enum nvme_feat feature,
625 				  uint32_t cdw11, uint32_t cdw12,
626 				  uint32_t *attributes);
627 
628 /**
629  * @brief Attach the specified namespace to controllers
630  *
631  * @param ctrlr Controller handle to use for command submission
632  * @param nsid	Namespace ID of the namespaces to attach
633  * @param clist List of controllers as defined in the NVMe specification
634  *
635  * @return 0 on success and a negative error code on failure.
636  */
637 extern int nvme_ctrlr_attach_ns(struct nvme_ctrlr *ctrlr, unsigned int nsid,
638 				struct nvme_ctrlr_list *clist);
639 
640 /**
641  * @brief Detach the specified namespace from controllers
642  *
643  * @param ctrlr Controller handle to use for command submission
644  * @param nsid	Namespace ID of the namespaces to detach
645  * @param clist List of controllers as defined in the NVMe specification
646  *
647  * @return 0 on success and a negative error code on failure.
648  */
649 extern int nvme_ctrlr_detach_ns(struct nvme_ctrlr *ctrlr, unsigned int nsid,
650 				struct nvme_ctrlr_list *clist);
651 
652 /**
653  * @brief Create a namespace
654  *
655  * @param ctrlr 	Controller handle
656  * @param nsdata	namespace data
657  *
658  * @return Namespace ID (>= 1) on success and 0 on failure.
659  */
660 extern unsigned int nvme_ctrlr_create_ns(struct nvme_ctrlr *ctrlr,
661 					 struct nvme_ns_data *nsdata);
662 
663 /**
664  * @brief Delete a namespace
665  *
666  * @param ctrlr	Controller handle
667  * @param nsid	ID of the namespace to delete
668  *
669  * @return 0 on success and a negative error code on failure.
670  */
671 extern int nvme_ctrlr_delete_ns(struct nvme_ctrlr *ctrlr, unsigned int nsid);
672 
673 /**
674  * @brief Format media
675  *
676  * @param ctrlr		Controller handle
677  * @param nsid		ID of the namespace to format
678  * @param format	Format information
679  *
680  * This function requests a low-level format of the media.
681  * If nsid is NVME_GLOBAL_NS_TAG, all namspaces attached to the contoller
682  * are formatted.
683  *
684  * @return 0 on success and a negative error code on failure.
685  */
686 extern int nvme_ctrlr_format_ns(struct nvme_ctrlr *ctrlr,
687 				unsigned int nsid, struct nvme_format *format);
688 
689 /**
690  * @brief Download a new firmware image
691  *
692  * @param ctrlr	Controller handle
693  * @param fw	Firmware data buffer
694  * @param size	Firmware buffer size
695  * @param slot 	Firmware image slot to use
696  *
697  * @return 0 on success and a negative error code on failure.
698  */
699 extern int nvme_ctrlr_update_firmware(struct nvme_ctrlr *ctrlr,
700 				      void *fw, size_t size, int slot);
701 
702 /**
703  * @brief Get an I/O queue pair
704  *
705  * @param ctrlr	Controller handle
706  * @param qprio I/O queue pair priority for weighted round robin arbitration
707  * @param qd 	I/O queue pair maximum submission queue depth
708  *
709  * A queue depth of 0 will result in the maximum hardware defined queue
710  * depth being used. The use of a queue pair is not thread safe. Applications
711  * must ensure mutual exclusion access to the queue pair during I/O processing.
712  *
713  * @return An I/O queue pair handle on success and NULL in case of failure.
714  */
715 extern struct nvme_qpair * nvme_ioqp_get(struct nvme_ctrlr *ctrlr,
716 					 enum nvme_qprio qprio,
717 					 unsigned int qd);
718 
719 /**
720  * @brief Release an I/O queue pair
721  *
722  * @param qpair	I/O queue pair handle
723  *
724  * @return 0 on success and a negative error code on failure.
725  */
726 extern int nvme_ioqp_release(struct nvme_qpair *qpair);
727 
728 /**
729  * @brief Get information on an I/O queue pair
730  *
731  * @param qpair		I/O queue pair handle
732  * @param qpstat	I/O queue pair information to fill
733  *
734  * @return 0 on success and a negative error code on failure.
735  */
736 extern int nvme_qpair_stat(struct nvme_qpair *qpair,
737 			   struct nvme_qpair_stat *qpstat);
738 
739 /**
740  * @brief Submit an NVMe command
741  *
742  * @param qpair		I/O qpair handle
743  * @param cmd		Command to submit
744  * @param buf		Payload buffer
745  * @param len		Payload buffer length
746  * @param cb_fn		Callback function
747  * @param cb_arg	Argument for the call back function
748  *
749  * This is a low level interface for submitting I/O commands directly.
750  * The validity of the command will not be checked.
751  *
752  * When constructing the nvme_command it is not necessary to fill out the PRP
753  * list/SGL or the CID. The driver will handle both of those for you.
754  *
755  * @return 0 on success and a negative error code on failure.
756  */
757 extern int nvme_ioqp_submit_cmd(struct nvme_qpair *qpair,
758 				struct nvme_cmd *cmd,
759 				void *buf, size_t len,
760 				nvme_cmd_cb cb_fn, void *cb_arg);
761 
762 /**
763  * @brief Process I/O command completions
764  *
765  * @param qpair			I/O queue pair handle
766  * @param max_completions	Maximum number of completions to check
767  *
768  * This call is non-blocking, i.e. it only processes completions that are
769  * ready at the time of this function call. It does not wait for
770  * outstanding commands to complete.
771  * For each completed command, the request callback function will
772  * be called if specified as non-NULL when the request was submitted.
773  * This function may be called at any point after the command submission
774  * while the controller is open
775  *
776  * @return The number of completions processed (may be 0).
777  *
778  * @sa nvme_cmd_cb
779  */
780 extern unsigned int nvme_qpair_poll(struct nvme_qpair *qpair,
781 				   unsigned int max_completions);
782 
783 /**
784  * @brief Open a name space
785  *
786  * @param ctrlr	Controller handle
787  * @param ns_id	ID of the name space to open
788  *
789  * @return A namspace handle on success or NULL in case of failure.
790  */
791 extern struct nvme_ns *nvme_ns_open(struct nvme_ctrlr *ctrlr,
792 				    unsigned int ns_id);
793 
794 /**
795  * @brief Close an open name space
796  *
797  * @param ns	Namspace handle
798  *
799  * See nvme_ns_open()
800  */
801 extern int nvme_ns_close(struct nvme_ns *ns);
802 
803 /**
804  * @brief Get information on a namespace
805  *
806  * @param ns		Namespace handle
807  * @param ns_stat	Namespace information
808  *
809  * @return 0 on success and a negative error code in case of failure.
810  */
811 extern int nvme_ns_stat(struct nvme_ns *ns,
812 			struct nvme_ns_stat *ns_stat);
813 
814 /**
815  * @brief Get namespace data
816  *
817  * @param ns		Namespace handle
818  * @param nsdata	Namespace data
819  *
820  * @return 0 on success and a negative error code in case of failure.
821  */
822 extern int nvme_ns_data(struct nvme_ns *ns,
823 			struct nvme_ns_data *nsdata);
824 
825 /**
826  * @brief Submit a write I/O
827  *
828  * @param ns		Namespace handle
829  * @param qpair		I/O queue pair handle
830  * @param buffer	Physically contiguous data buffer
831  * @param lba		Starting LBA to read from
832  * @param lba_count	Number of LBAs to read
833  * @param cb_fn		Completion callback
834  * @param cb_arg	Argument to pass to the completion callback
835  * @param io_flags	I/O flags (NVME_IO_FLAGS_*)
836  *
837  * @return 0 on success and a negative error code in case of failure.
838  */
839 extern int nvme_ns_write(struct nvme_ns *ns, struct nvme_qpair *qpair,
840 			 void *buffer,
841 			 uint64_t lba, uint32_t lba_count,
842 			 nvme_cmd_cb cb_fn, void *cb_arg,
843 			 unsigned int io_flags);
844 
845 /**
846  * @brief Submit a scattered write I/O
847  *
848  * @param ns		Namespace handle
849  * @param qpair		I/O queue pair handle
850  * @param lba		Starting LBA to write to
851  * @param lba_count	Number of LBAs to write
852  * @param cb_fn		Completion callback
853  * @param cb_arg	Argument to pass to the completion callback
854  * @param io_flags	I/O flags (NVME_IO_FLAGS_*)
855  * @param reset_sgl_fn	Reset scattered payload callback
856  * @param next_sge_fn	Scattered payload iteration callback
857  *
858  * @return 0 on success and a negative error code in case of failure.
859  */
860 extern int nvme_ns_writev(struct nvme_ns *ns, struct nvme_qpair *qpair,
861 			  uint64_t lba, uint32_t lba_count,
862 			  nvme_cmd_cb cb_fn, void *cb_arg,
863 			  unsigned int io_flags,
864 			  nvme_req_reset_sgl_cb reset_sgl_fn,
865 			  nvme_req_next_sge_cb next_sge_fn);
866 
867 /**
868  * @brief Submits a write I/O with metadata
869  *
870  * @param ns		Namespace handle
871  * @param qpair		I/O queue pair handle
872  * @param payload	Data buffer
873  * @param metadata	Metadata payload
874  * @param lba		Starting LBA to write to
875  * @param lba_count	Number of LBAs to write
876  * @param cb_fn		Completion callback
877  * @param cb_arg	Argument to pass to the completion callback
878  * @param io_flags	I/O flags (NVME_IO_FLAGS_*)
879  * @param apptag_mask	Application tag mask
880  * @param apptag	Application tag to use end-to-end protection information
881  *
882  * @return 0 on success and a negative error code in case of failure.
883  */
884 extern int nvme_ns_write_with_md(struct nvme_ns *ns, struct nvme_qpair *qpair,
885 				 void *payload, void *metadata,
886 				 uint64_t lba, uint32_t lba_count,
887 				 nvme_cmd_cb cb_fn, void *cb_arg,
888 				 unsigned int io_flags,
889 				 uint16_t apptag_mask, uint16_t apptag);
890 
891 /**
892  * @brief Submit a write zeroes I/O
893  *
894  * @param ns		Namespace handle
895  * @param qpair		I/O queue pair handle
896  * @param lba		Starting LBA to write to
897  * @param lba_count	Number of LBAs to write
898  * @param cb_fn		Completion callback
899  * @param cb_arg	Argument to pass to the completion callback
900  * @param io_flags	I/O flags (NVME_IO_FLAGS_*)
901  *
902  * @return 0 on success and a negative error code in case of failure.
903  */
904 extern int nvme_ns_write_zeroes(struct nvme_ns *ns, struct nvme_qpair *qpair,
905 				uint64_t lba, uint32_t lba_count,
906 				nvme_cmd_cb cb_fn, void *cb_arg,
907 				unsigned int io_flags);
908 
909 /**
910  * @brief Submit a read I/O
911  *
912  * @param ns		Namespace handle
913  * @param qpair		I/O queue pair handle
914  * @param buffer	Physically contiguous data buffer
915  * @param lba		Starting LBA to read from
916  * @param lba_count	Number of LBAs to read
917  * @param cb_fn		Completion callback
918  * @param cb_arg	Argument to pass to the completion callback
919  * @param io_flags	I/O flags (NVME_IO_FLAGS_*)
920  *
921  * @return 0 on success and a negative error code in case of failure.
922  */
923 extern int nvme_ns_read(struct nvme_ns *ns, struct nvme_qpair *qpair,
924 			void *buffer,
925 			uint64_t lba, uint32_t lba_count,
926 			nvme_cmd_cb cb_fn, void *cb_arg,
927 			unsigned int io_flags);
928 
929 /**
930  * @brief Submit a scattered read I/O
931  *
932  * @param ns		Namespace handle
933  * @param qpair		I/O queue pair handle
934  * @param lba		Starting LBA to read from
935  * @param lba_count	Number of LBAs to read
936  * @param cb_fn		Completion callback
937  * @param cb_arg	Argument to pass to the completion callback
938  * @param io_flags	I/O flags (NVME_IO_FLAGS_*)
939  * @param reset_sgl_fn	Reset scattered payload callback
940  * @param next_sge_fn	Scattered payload iteration callback
941  *
942  * @return 0 on success and a negative error code in case of failure.
943  */
944 extern int nvme_ns_readv(struct nvme_ns *ns, struct nvme_qpair *qpair,
945 			 uint64_t lba, uint32_t lba_count,
946 			 nvme_cmd_cb cb_fn, void *cb_arg,
947 			 unsigned int io_flags,
948 			 nvme_req_reset_sgl_cb reset_sgl_fn,
949 			 nvme_req_next_sge_cb next_sge_fn);
950 
951 /**
952  * @brief Submit a read I/O with metadata
953  *
954  * @param ns		Namespace handle
955  * @param qpair		I/O queue pair handle
956  * @param buffer	Data buffer
957  * @param metadata	Metadata payload
958  * @param lba		Starting LBA to read from
959  * @param lba_count	Number of LBAs to read
960  * @param cb_fn		Completion callback
961  * @param cb_arg	Argument to pass to the completion callback
962  * @param io_flags	I/O flags (NVME_IO_FLAGS_*)
963  * @param apptag_mask	Application tag mask
964  * @param apptag	Application tag to use end-to-end protection information
965  *
966  * @return 0 on success and a negative error code in case of failure.
967  */
968 extern int nvme_ns_read_with_md(struct nvme_ns *ns, struct nvme_qpair *qpair,
969 				void *buffer, void *metadata,
970 				uint64_t lba, uint32_t lba_count,
971 				nvme_cmd_cb cb_fn, void *cb_arg,
972 				unsigned int io_flags,
973 				uint16_t apptag_mask, uint16_t apptag);
974 
975 /**
976  * @brief Submit a deallocate command
977  *
978  * @param ns		Namespace handle
979  * @param qpair		I/O queue pair handle
980  * @param payload	List of LBA ranges to deallocate
981  * @param num_ranges	Number of ranges in the list
982  * @param cb_fn		Completion callback
983  * @param cb_arg	Argument to pass to the completion callback
984  *
985  * The number of LBA ranges must be at least 1 and at most
986  * NVME_DATASET_MANAGEMENT_MAX_RANGES.
987  *
988  * @return 0 on success and a negative error code in case of failure.
989  */
990 extern int nvme_ns_deallocate(struct nvme_ns *ns, struct nvme_qpair *qpair,
991 			      void *payload, uint16_t num_ranges,
992 			      nvme_cmd_cb cb_fn, void *cb_arg);
993 
994 /**
995  * @brief Submit a flush command
996  *
997  * @param ns		Namespace handle
998  * @param qpair		I/O queue pair handle
999  * @param cb_fn		Completion callback
1000  * @param cb_arg	Argument to pass to the completion callback
1001  *
1002  * @return 0 on success and a negative error code in case of failure.
1003  */
1004 extern int nvme_ns_flush(struct nvme_ns *ns, struct nvme_qpair *qpair,
1005 			 nvme_cmd_cb cb_fn, void *cb_arg);
1006 
1007 /**
1008  * @brief Submit a reservation register command
1009  *
1010  * @param ns		Namespace handle
1011  * @param qpair		I/O queue pair handle
1012  * @param payload	Reservation register data buffer
1013  * @param ignore_key	Enable or not the current reservation key check
1014  * @param action	Registration action
1015  * @param cptpl		Persist Through Power Loss state
1016  * @param cb_fn		Completion callback
1017  * @param cb_arg	Argument to pass to the completion callback
1018  *
1019  * @return 0 on success and a negative error code in case of failure.
1020  */
1021 extern int nvme_ns_reservation_register(struct nvme_ns *ns,
1022 				struct nvme_qpair *qpair,
1023 				struct nvme_reservation_register_data *payload,
1024 				bool ignore_key,
1025 				enum nvme_reservation_register_action action,
1026 				enum nvme_reservation_register_cptpl cptpl,
1027 				nvme_cmd_cb cb_fn, void *cb_arg);
1028 
1029 /**
1030  * @brief Submit a reservation release command
1031  *
1032  * @param ns		Namespace handle
1033  * @param qpair		I/O queue pair handle
1034  * @param payload	Current reservation key buffer
1035  * @param ignore_key	Enable or not the current reservation key check
1036  * @param action	Reservation release action
1037  * @param type		Reservation type
1038  * @param cb_fn		Completion callback
1039  * @param cb_arg	Argument to pass to the completion callback
1040  *
1041  * @return 0 on success and a negative error code in case of failure.
1042  */
1043 extern int nvme_ns_reservation_release(struct nvme_ns *ns,
1044 			       struct nvme_qpair *qpair,
1045 			       struct nvme_reservation_key_data *payload,
1046 			       bool ignore_key,
1047 			       enum nvme_reservation_release_action action,
1048 			       enum nvme_reservation_type type,
1049 			       nvme_cmd_cb cb_fn, void *cb_arg);
1050 
1051 /**
1052  * @brief Submit a reservation acquire command
1053  *
1054  * @param ns		Namespace handle
1055  * @param qpair		I/O queue pair handle
1056  * @param payload	Reservation acquire data buffer
1057  * @param ignore_key	Enable or not the current reservation key check
1058  * @param action	Reservation acquire action
1059  * @param type		Reservation type
1060  * @param cb_fn		Completion callback
1061  * @param cb_arg	Argument to pass to the completion callback
1062  *
1063  * @return 0 on success and a negative error code in case of failure.
1064  */
1065 extern int nvme_ns_reservation_acquire(struct nvme_ns *ns,
1066 				struct nvme_qpair *qpair,
1067 				struct nvme_reservation_acquire_data *payload,
1068 				bool ignore_key,
1069 				enum nvme_reservation_acquire_action action,
1070 				enum nvme_reservation_type type,
1071 				nvme_cmd_cb cb_fn, void *cb_arg);
1072 
1073 /**
1074  * @brief Submits a reservation report to a namespace
1075  *
1076  * @param ns		Namespace handle
1077  * @param qpair		I/O queue pair handle
1078  * @param payload	Reservation status data buffer
1079  * @param len		Length in bytes of the reservation status data
1080  * @param cb_fn		Completion callback
1081  * @param cb_arg	Argument to pass to the completion callback
1082  *
1083  * The command is submitted to a qpair allocated by nvme_ctrlr_alloc_io_qpair().
1084  * The user must ensure that only one thread submits I/O on
1085  * a given qpair at any given time.
1086  *
1087  * @return 0 on success and a negative error code in case of failure.
1088  */
1089 extern int nvme_ns_reservation_report(struct nvme_ns *ns,
1090 				      struct nvme_qpair *qpair,
1091 				      void *payload, size_t len,
1092 				      nvme_cmd_cb cb_fn, void *cb_arg);
1093 
1094 /**
1095  * Any NUMA node.
1096  */
1097 #define NVME_NODE_ID_ANY 	(~0U)
1098 
1099 /**
1100  * @brief Allocate physically contiguous memory
1101  *
1102  * @param size 		Size (in bytes) to be allocated
1103  * @param align 	Memory alignment constraint
1104  * @param node_id	The NUMA node to get memory from or NVME_NODE_ID_ANY
1105  *
1106  * This function allocates memory from the hugepage area of memory. The
1107  * memory is not cleared. In NUMA systems, the memory allocated resides
1108  * on the requested NUMA node if node_id is not NVME_NODE_ID_ANY.
1109  * Otherwise, allocation will take preferrably on the node of the
1110  * function call context, or any other node if that fails.
1111  *
1112  * @return The address of the allocated memory on success and NULL on failure.
1113  */
1114 extern void *nvme_malloc_node(size_t size, size_t align,
1115 			      unsigned int node_id);
1116 
1117 /**
1118  * @brief Allocate zero'ed memory
1119  *
1120  * @param size 		Size (in bytes) to be allocated
1121  * @param align 	Memory alignment constraint
1122  * @param node_id	The NUMA node to get memory from or NVME_NODE_ID_ANY
1123  *
1124  * See @nvme_malloc_node.
1125  */
1126 static inline void *nvme_zmalloc_node(size_t size, size_t align,
1127 				      unsigned int node_id)
1128 {
1129 	void *buf;
1130 
1131 	buf = nvme_malloc_node(size, align, node_id);
1132 	if (buf)
1133 		memset(buf, 0, size);
1134 
1135 	return buf;
1136 }
1137 
1138 /**
1139  * @brief Allocate zero'ed array memory
1140  *
1141  * @param num 		Size of the array
1142  * @param size 		Size (in bytes) of the array elements
1143  * @param align 	Memory alignment constraint
1144  * @param node_id	The NUMA node to get memory from or NVME_NODE_ID_ANY
1145  *
1146  * See @nvme_malloc_node.
1147  */
1148 static inline void *nvme_calloc_node(size_t num, size_t size,
1149 				     size_t align, unsigned int node_id)
1150 {
1151 	return nvme_zmalloc_node(size * num, align, node_id);
1152 }
1153 
1154 /**
1155  * @brief Allocate physically contiguous memory
1156  *
1157  * @param size 		Size (in bytes) to be allocated
1158  * @param align 	Memory alignment constraint
1159  *
1160  * @return The address of the allocated memory on success and NULL on error
1161  *
1162  * See @nvme_malloc_node.
1163  */
1164 static inline void *nvme_malloc(size_t size, size_t align)
1165 {
1166 	return nvme_malloc_node(size, align, NVME_NODE_ID_ANY);
1167 }
1168 
1169 /**
1170  * @brief Allocate zero'ed memory
1171  *
1172  * @param size 		Size (in bytes) to be allocated
1173  * @param align 	Memory alignment constraint
1174  *
1175  * @return The address of the allocated memory on success and NULL on error
1176  *
1177  * See @nvme_zmalloc_node.
1178  */
1179 static inline void *nvme_zmalloc(size_t size, size_t align)
1180 {
1181 	return nvme_zmalloc_node(size, align, NVME_NODE_ID_ANY);
1182 }
1183 
1184 /**
1185  * @brief Allocate zero'ed array memory
1186  *
1187  * @param num 		Size of the array
1188  * @param size 		Size (in bytes) of the array elements
1189  * @param align 	Memory alignment constraint
1190  *
1191  * See @nvme_calloc_node.
1192  */
1193 static inline void *nvme_calloc(size_t num, size_t size, size_t align)
1194 {
1195 	return nvme_calloc_node(num, size, align, NVME_NODE_ID_ANY);
1196 }
1197 
1198 /**
1199  * @brief Free allocated memory
1200  *
1201  * @param addr	Address of the memory to free
1202  *
1203  * Free the memory at the specified address.
1204  * The address must be one that was returned by one of the
1205  * allocation function nvme_malloc_node(), nvme_zmalloc_node()
1206  * or nvme_calloc_node().
1207  *
1208  * If the pointer is NULL, the function does nothing.
1209  */
1210 extern void nvme_free(void *addr);
1211 
1212 /**
1213  * Structure to hold memory statistics.
1214  */
1215 struct nvme_mem_stats {
1216 
1217 	/**
1218 	 * Number of huge pages allocated.
1219 	 */
1220 	size_t		nr_hugepages;
1221 
1222 	/**
1223 	 * Total bytes in memory pools.
1224 	 */
1225 	size_t		total_bytes;
1226 
1227 	/**
1228 	 * Total free bytes in memory pools.
1229 	 */
1230 	size_t		free_bytes;
1231 
1232 };
1233 
1234 /**
1235  * @brief Get memory usage information
1236  *
1237  * @param stats		Memory usage inforamtion structure to fill
1238  * @param node_id	NUMA node ID or NVME_NVME_NODE_ID_ANY
1239  *
1240  * Return memory usage statistics for the specified
1241  * NUMA node (CPU socket) or global memory usage if node_id
1242  * is NVME_NODE_ID_ANY.
1243  *
1244  * @return 0 on success and a negative error code on failure.
1245  */
1246 extern int nvme_memstat(struct nvme_mem_stats *stats,
1247 			unsigned int node_id);
1248 
1249 /**
1250  * @}
1251  */
1252 
1253 #ifdef __cplusplus
1254 }
1255 #endif
1256 
1257 #endif /* __LIBNVME_H__ */
1258