From e9bd17c305510fa7a76e4d34b372b92ef7e57d85 Mon Sep 17 00:00:00 2001 From: Chuck Horkin Date: Mon, 3 Mar 2025 16:11:08 -0800 Subject: [PATCH] mi: Introduce asynchronous event message handling Added new functionality to mi.c and mi-mctp.c to handle AEMs. Included new example mi-mctp-ae.c for usage. Added tests for mi-mctp. Signed-off-by: Chuck Horkin --- examples/meson.build | 7 + examples/mi-mctp-ae.c | 181 ++++++++++ src/libnvme-mi.map | 6 + src/nvme/mi-mctp.c | 251 +++++++++++++- src/nvme/mi.c | 750 ++++++++++++++++++++++++++++++++++++++++-- src/nvme/mi.h | 465 +++++++++++++++++++++++++- src/nvme/private.h | 19 +- test/mi-mctp.c | 720 +++++++++++++++++++++++++++++++++++++++- test/mi.c | 4 + 9 files changed, 2355 insertions(+), 48 deletions(-) create mode 100644 examples/mi-mctp-ae.c diff --git a/examples/meson.build b/examples/meson.build index 26d7b2159..fe7f6349b 100644 --- a/examples/meson.build +++ b/examples/meson.build @@ -47,6 +47,13 @@ executable( include_directories: [incdir, internal_incdir] ) +executable( + 'mi-mctp-ae', + ['mi-mctp-ae.c'], + dependencies: libnvme_mi_dep, + include_directories: [incdir, internal_incdir] +) + if libdbus_dep.found() executable( 'mi-conf', diff --git a/examples/mi-mctp-ae.c b/examples/mi-mctp-ae.c new file mode 100644 index 000000000..d73a35987 --- /dev/null +++ b/examples/mi-mctp-ae.c @@ -0,0 +1,181 @@ +// SPDX-License-Identifier: LGPL-2.1-or-later +/** + * This file is part of libnvme. + */ + +/** + * mi-mctp-ae: open a MI connection over MCTP, supporting asynchronous event messages + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include // for usleep + +#include +#include + +#include +#include +#include + +struct app_userdata { + uint32_t count; +}; + +static void print_byte_array(void *data, size_t len) +{ + uint8_t *byte_data = (uint8_t *)data; + + for (size_t i = 0; i < len; ++i) + printf("%02X ", byte_data[i]); + printf("\n"); +} + +static void print_event_info(struct nvme_mi_event *event) +{ + printf("aeoi: %02X\n", event->aeoi); + printf("aeocidi: %04X\n", event->aeocidi); + printf("aessi: %02X\n", event->aessi); + + if (event->spec_info_len && event->spec_info) { + printf("specific_info: "); + print_byte_array(event->spec_info, event->spec_info_len); + } + + if (event->vend_spec_info_len && event->vend_spec_info) { + printf("vendor_specific_info: "); + print_byte_array(event->vend_spec_info, event->vend_spec_info_len); + } +} + +enum nvme_mi_aem_handler_next_action aem_handler(nvme_mi_ep_t ep, size_t num_events, void *userdata) +{ + struct app_userdata *data = (struct app_userdata *) userdata; + + data->count++; + + printf("Received notification #%d with %zu events:\n", data->count, num_events); + for (int i = 0; i < num_events; i++) { + struct nvme_mi_event *event = nvme_mi_aem_get_next_event(ep); + + if (event == NULL) + printf("Unexpected NULL event\n"); + else { + printf("Event:\n"); + print_event_info(event); + printf("\n"); + } + } + + return NVME_MI_AEM_HNA_ACK; +} + +int main(int argc, char **argv) +{ + nvme_root_t root; + nvme_mi_ep_t ep; + uint8_t eid = 0; + int rc = 0, net = 0; + struct nvme_mi_aem_config aem_config = {0}; + struct nvme_mi_aem_enabled_map enabled_map = {0}; + struct app_userdata data = {0}; + + const uint8_t AEM_FD_INDEX = 0; + const uint8_t STD_IN_FD_INDEX = 1; + + if (argc == 4) { + net = atoi(argv[1]); + eid = atoi(argv[2]) & 0xff; + argv += 2; + argc -= 2; + + int event_count = argc - 1; + + for (int i = 0; i < event_count; i++) { + int event = atoi(argv[1+i]); + + aem_config.enabled_map.enabled[event] = true; + } + } else { + fprintf(stderr, + "usage: %s [AE #s separated by spaces]\n", + argv[0]); + return EXIT_FAILURE; + } + + root = nvme_mi_create_root(stderr, DEFAULT_LOGLEVEL); + if (!root) + err(EXIT_FAILURE, "can't create NVMe root"); + + ep = nvme_mi_open_mctp(root, net, eid); + if (!ep) + err(EXIT_FAILURE, "can't open MCTP endpoint %d:%d", net, eid); + + aem_config.aem_handler = aem_handler; + aem_config.aemd = 1; + aem_config.aerd = 100; + + rc = nvme_mi_aem_get_enabled(ep, &enabled_map); + if (rc) + err(EXIT_FAILURE, "Can't query enabled aems:%d", rc); + printf("The following events were previously enabled:\n"); + for (int i = 0; i < 256; i++) { + if (enabled_map.enabled[i]) + printf("Event: %d\n", i); + } + + rc = nvme_mi_aem_enable(ep, &aem_config, &data); + if (rc && errno == EOPNOTSUPP) + errx(EXIT_FAILURE, "MCTP Peer-Bind is required for AEM"); + else if (rc) + err(EXIT_FAILURE, "Can't enable aem:%d", rc); + + rc = nvme_mi_aem_get_enabled(ep, &enabled_map); + if (rc) + err(EXIT_FAILURE, "Can't query enabled aems:%d", rc); + + struct pollfd fds[2]; + + fds[AEM_FD_INDEX].fd = nvme_mi_aem_get_fd(ep); + if (fds[AEM_FD_INDEX].fd < 0) + errx(EXIT_FAILURE, "Can't get aem fd"); + + fds[STD_IN_FD_INDEX].fd = STDIN_FILENO; + + fds[AEM_FD_INDEX].events = POLLIN; + fds[STD_IN_FD_INDEX].events = POLLIN; + + printf("Press any key to exit\n"); + while (1) { + rc = poll(fds, 2, -1); + + if (rc == -1) { + warn("poll"); + break; + } + //Time to do the work + if (fds[AEM_FD_INDEX].revents & POLLIN) { + rc = nvme_mi_aem_process(ep, &data); + if (rc) + err(EXIT_FAILURE, + "nvme_mi_aem_process failed with:%d", rc); + } + if (fds[STD_IN_FD_INDEX].revents & POLLIN) + break;//we are done + } + + //Cleanup + nvme_mi_aem_disable(ep); + nvme_mi_close(ep); + nvme_mi_free_root(root); + + return rc ? EXIT_FAILURE : EXIT_SUCCESS; +} + + diff --git a/src/libnvme-mi.map b/src/libnvme-mi.map index cbd1285a9..db679413e 100644 --- a/src/libnvme-mi.map +++ b/src/libnvme-mi.map @@ -1,6 +1,12 @@ # SPDX-License-Identifier: LGPL-2.1-or-later LIBNVME_MI_UNRELEASED { global: + nvme_mi_aem_disable; + nvme_mi_aem_enable; + nvme_mi_aem_get_enabled; + nvme_mi_aem_get_fd; + nvme_mi_aem_get_next_event; + nvme_mi_aem_process; nvme_mi_set_csi; nvme_mi_submit_entry; nvme_mi_submit_exit; diff --git a/src/nvme/mi-mctp.c b/src/nvme/mi-mctp.c index a4124f04d..308e061b0 100644 --- a/src/nvme/mi-mctp.c +++ b/src/nvme/mi-mctp.c @@ -13,6 +13,7 @@ #include #include +#include #include #include #include @@ -84,6 +85,9 @@ struct nvme_mi_transport_mctp { int sd; void *resp_buf; size_t resp_buf_size; + int sd_aem; + void *resp_buf_aem; + size_t resp_buf_aem_size; }; static int ioctl_tag(int sd, unsigned long req, struct mctp_ioc_tag_ctl *ctl) @@ -91,8 +95,48 @@ static int ioctl_tag(int sd, unsigned long req, struct mctp_ioc_tag_ctl *ctl) return ioctl(sd, req, ctl); } +static int nvme_mi_msg_socket(void) +{ + return socket(AF_MCTP, SOCK_DGRAM, 0); +} + +static int nvme_mi_aem_socket(__u8 eid, unsigned int network) +{ + struct sockaddr_mctp local_addr = {0}, remote_addr = {0}; + int sd, rc; + + sd = socket(AF_MCTP, SOCK_DGRAM | SOCK_NONBLOCK, 0); + if (sd < 0) + return sd; + + remote_addr.smctp_family = AF_MCTP; + remote_addr.smctp_network = network; + remote_addr.smctp_addr.s_addr = eid; + remote_addr.smctp_type = MCTP_TYPE_NVME; + /* connect() will specify a remote EID for the upcoming bind() */ + rc = connect(sd, (struct sockaddr *)&remote_addr, sizeof(remote_addr)); + if (rc) + goto err_close; + + local_addr.smctp_family = AF_MCTP; + local_addr.smctp_network = network; + local_addr.smctp_addr.s_addr = MCTP_ADDR_ANY; + local_addr.smctp_type = MCTP_TYPE_NVME; + + rc = bind(sd, (struct sockaddr *)&local_addr, sizeof(local_addr)); + if (rc) + goto err_close; + + return sd; + +err_close: + close(sd); + return -1; +} + static struct __mi_mctp_socket_ops ops = { - socket, + nvme_mi_msg_socket, + nvme_mi_aem_socket, sendmsg, recvmsg, poll, @@ -220,6 +264,168 @@ static bool nvme_mi_mctp_resp_is_mpr(void *buf, size_t len, return true; } +static int nvme_mi_mctp_aem_fd(struct nvme_mi_ep *ep) +{ + struct nvme_mi_transport_mctp *mctp; + + if (ep->transport != &nvme_mi_transport_mctp) { + errno = EINVAL; + return -1; + } + + mctp = ep->transport_data; + return mctp->sd_aem; +} + +static int nvme_mi_mctp_aem_purge(struct nvme_mi_ep *ep) +{ + struct nvme_mi_transport_mctp *mctp = ep->transport_data; + struct msghdr msg = {0}; + struct iovec iov; + char buffer; + + iov.iov_base = &buffer; + iov.iov_len = sizeof(buffer); + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + + // Read until there is no more data + while (ops.recvmsg(mctp->sd_aem, &msg, MSG_TRUNC) > 0) + ; + + return 0; +} + + +static int nvme_mi_mctp_aem_read(struct nvme_mi_ep *ep, + struct nvme_mi_resp *resp) +{ + ssize_t len, resp_len, resp_hdr_len, resp_data_len; + struct sockaddr_mctp src_addr = { 0 }; + struct nvme_mi_transport_mctp *mctp; + struct iovec resp_iov[1]; + struct msghdr resp_msg; + int rc, errno_save; + __le32 mic; + + if (ep->transport != &nvme_mi_transport_mctp) { + errno = EINVAL; + return -1; + } + + /* we need enough space for at least a generic (/error) response */ + if (resp->hdr_len < sizeof(struct nvme_mi_msg_hdr)) { + errno = EINVAL; + return -1; + } + + mctp = ep->transport_data; + + resp_len = resp->hdr_len + resp->data_len + sizeof(mic); + if (resp_len > mctp->resp_buf_aem_size) { + void *tmp = realloc(mctp->resp_buf_aem, resp_len); + + if (!tmp) { + errno_save = errno; + nvme_msg(ep->root, LOG_ERR, + "Failure allocating response buffer: %m\n"); + errno = errno_save; + rc = -1; + goto out; + } + mctp->resp_buf_aem = tmp; + mctp->resp_buf_aem_size = resp_len; + } + + /* offset by one: the MCTP message type is excluded from the buffer */ + resp_iov[0].iov_base = mctp->resp_buf_aem + 1; + resp_iov[0].iov_len = resp_len - 1; + + memset(&resp_msg, 0, sizeof(resp_msg)); + resp_msg.msg_iov = resp_iov; + resp_msg.msg_iovlen = 1; + resp_msg.msg_name = &src_addr; + resp_msg.msg_namelen = sizeof(src_addr); + + rc = -1; + len = ops.recvmsg(mctp->sd_aem, &resp_msg, MSG_DONTWAIT); + + if (len < 0) { + if (errno == EAGAIN) + goto out; + + errno_save = errno; + nvme_msg(ep->root, LOG_ERR, + "Failure receiving MCTP message: %m\n"); + errno = errno_save; + goto out; + } + + + if (len == 0) { + nvme_msg(ep->root, LOG_WARNING, "No data from MCTP endpoint\n"); + errno = EIO; + goto out; + } + + if (resp_msg.msg_namelen < sizeof(src_addr)) { + nvme_msg(ep->root, LOG_WARNING, "Unexpected src address length\n"); + errno = EIO; + goto out; + } + + if (mctp->eid != src_addr.smctp_addr.s_addr) { + //This is unexpected if the socket is bound to the endpoint + errno = EPROTO; + goto out; + } + + /* Re-add the type byte, so we can work on aligned lengths from here */ + ((uint8_t *)mctp->resp_buf_aem)[0] = MCTP_TYPE_NVME | MCTP_TYPE_MIC; + len += 1; + + /* The smallest response data is 8 bytes: generic 4-byte message header + * plus four bytes of error data (excluding MIC). Ensure we have enough. + */ + if (len < 8 + sizeof(mic)) { + nvme_msg(ep->root, LOG_ERR, + "Invalid MCTP response: too short (%zd bytes, needed %zd)\n", + len, 8 + sizeof(mic)); + errno = EPROTO; + goto out; + } + + /* Start unpacking the linear resp buffer into the split header + data + * + MIC. + */ + + /* MIC is always at the tail */ + memcpy(&mic, mctp->resp_buf_aem + len - sizeof(mic), sizeof(mic)); + len -= 4; + + /* we expect resp->hdr_len bytes, but we may have less */ + resp_hdr_len = resp->hdr_len; + if (resp_hdr_len > len) + resp_hdr_len = len; + memcpy(resp->hdr, mctp->resp_buf_aem, resp_hdr_len); + resp->hdr_len = resp_hdr_len; + len -= resp_hdr_len; + + /* any remaining bytes are the data payload */ + resp_data_len = resp->data_len; + if (resp_data_len > len) + resp_data_len = len; + memcpy(resp->data, mctp->resp_buf_aem + resp_hdr_len, resp_data_len); + resp->data_len = resp_data_len; + + resp->mic = le32_to_cpu(mic); + + rc = 0; + +out: + return rc; +} + static int nvme_mi_mctp_submit(struct nvme_mi_ep *ep, struct nvme_mi_req *req, struct nvme_mi_resp *resp) @@ -433,7 +639,10 @@ static void nvme_mi_mctp_close(struct nvme_mi_ep *ep) mctp = ep->transport_data; close(mctp->sd); + close(mctp->sd_aem); + free(ep->aem_ctx); free(mctp->resp_buf); + free(mctp->resp_buf_aem); free(ep->transport_data); } @@ -459,8 +668,34 @@ static const struct nvme_mi_transport nvme_mi_transport_mctp = { .submit = nvme_mi_mctp_submit, .close = nvme_mi_mctp_close, .desc_ep = nvme_mi_mctp_desc_ep, + .aem_read = nvme_mi_mctp_aem_read, + .aem_fd = nvme_mi_mctp_aem_fd, + .aem_purge = nvme_mi_mctp_aem_purge, }; +int nvme_mi_aem_open(nvme_mi_ep_t ep) +{ + struct nvme_mi_transport_mctp *mctp; + + if (ep->transport != &nvme_mi_transport_mctp) { + errno = EINVAL; + return -1; + } + + mctp = ep->transport_data; + + //This doesn't have to be done multiple times + if (mctp->sd_aem >= 0) + return 0; + + mctp->sd_aem = ops.aem_socket(mctp->eid, mctp->net); + + if (mctp->sd_aem < 0) + return -1; + + return 0; +} + nvme_mi_ep_t nvme_mi_open_mctp(nvme_root_t root, unsigned int netid, __u8 eid) { struct nvme_mi_transport_mctp *mctp; @@ -479,6 +714,7 @@ nvme_mi_ep_t nvme_mi_open_mctp(nvme_root_t root, unsigned int netid, __u8 eid) memset(mctp, 0, sizeof(*mctp)); mctp->sd = -1; + mctp->sd_aem = -1; mctp->resp_buf_size = 4096; mctp->resp_buf = malloc(mctp->resp_buf_size); @@ -487,13 +723,20 @@ nvme_mi_ep_t nvme_mi_open_mctp(nvme_root_t root, unsigned int netid, __u8 eid) goto err_free_mctp; } + mctp->resp_buf_aem_size = 4096; + mctp->resp_buf_aem = malloc(mctp->resp_buf_aem_size); + if (!mctp->resp_buf_aem) { + errno_save = errno; + goto err_free_rspbuf; + } + mctp->net = netid; mctp->eid = eid; - mctp->sd = ops.socket(AF_MCTP, SOCK_DGRAM, 0); + mctp->sd = ops.msg_socket(); if (mctp->sd < 0) { errno_save = errno; - goto err_free_rspbuf; + goto err_free_aem_rspbuf; } ep->transport = &nvme_mi_transport_mctp; @@ -508,6 +751,8 @@ nvme_mi_ep_t nvme_mi_open_mctp(nvme_root_t root, unsigned int netid, __u8 eid) return ep; +err_free_aem_rspbuf: + free(mctp->resp_buf_aem); err_free_rspbuf: free(mctp->resp_buf); err_free_mctp: diff --git a/src/nvme/mi.c b/src/nvme/mi.c index aed0192ef..93c85317c 100644 --- a/src/nvme/mi.c +++ b/src/nvme/mi.c @@ -21,6 +21,24 @@ #include "mi.h" #include "private.h" +#define NUM_ENABLES (256u) + +_Static_assert(sizeof(struct nvme_mi_aem_supported_list_header) == 5, + "size_of_nvme_mi_aem_supported_list_header_is_not_5_bytes"); +_Static_assert(sizeof(struct nvme_mi_aem_supported_item) == 3, + "sizeof_nvme_mi_aem_supported_item_is_not_3_bytes"); +_Static_assert(sizeof(struct nvme_mi_aem_enable_item) == 3, + "size_of_ae_enable_item_t_is_not_3_bytes"); +_Static_assert(sizeof(struct nvme_mi_aem_enable_list_header) == 5, + "size_of_nvme_mi_aem_enable_list_header_is_not_5_bytes"); +_Static_assert(sizeof(struct nvme_mi_aem_occ_data) == 9, + "size_of_nvme_mi_aem_occ_data_is_not_9_bytes"); +_Static_assert(sizeof(struct nvme_mi_aem_occ_list_hdr) == 7, + "size_of_nvme_mi_aem_occ_list_hdr_is_not_7_bytes"); + +static int nvme_mi_get_async_message(nvme_mi_ep_t ep, + struct nvme_mi_aem_msg *aem_msg, size_t *aem_msg_len); + static const int default_timeout = 1000; /* milliseconds; endpoints may override */ @@ -424,6 +442,68 @@ __attribute__((weak)) void nvme_mi_submit_exit(__u8 type, const struct nvme_mi_m size_t hdr_len, const void *data, size_t data_len, void *user_data) { } + +int nvme_mi_async_read(nvme_mi_ep_t ep, struct nvme_mi_resp *resp) +{ + if (nvme_mi_ep_has_quirk(ep, NVME_QUIRK_MIN_INTER_COMMAND_TIME)) + nvme_mi_record_resp_time(ep); + + int rc = ep->transport->aem_read(ep, resp); + + if (rc && errno == EWOULDBLOCK) { + //Sometimes we might get owned tag data from the wrong endpoint. + //This isn't an error, but we shouldn't process it here + resp->data_len = 0;//No data to process + return 0; + } else if (rc) { + nvme_msg(ep->root, LOG_INFO, "transport failure\n"); + return rc; + } + + if (ep->transport->mic_enabled) { + rc = nvme_mi_verify_resp_mic(resp); + if (rc) { + nvme_msg(ep->root, LOG_WARNING, "crc mismatch\n"); + errno = EBADMSG; + return -1; + } + } + + //TODO: There's a bunch of overlap with the nvme_mi_submit. Maybe we make common helpers + + /* basic response checks */ + if (resp->hdr_len < sizeof(struct nvme_mi_msg_hdr)) { + nvme_msg(ep->root, LOG_DEBUG, + "Bad response header len: %zd\n", resp->hdr_len); + errno = EPROTO; + return -1; + } + + if (resp->hdr->type != NVME_MI_MSGTYPE_NVME) { + nvme_msg(ep->root, LOG_DEBUG, + "Invalid message type 0x%02x\n", resp->hdr->type); + errno = EPROTO; + return -1; + } + + if (!(resp->hdr->nmp & ~(NVME_MI_ROR_REQ << 7))) { + nvme_msg(ep->root, LOG_DEBUG, + "ROR value in response indicates a response\n"); + errno = EIO; + return -1; + } + + if (!(resp->hdr->nmp & (NVME_MI_MT_AE << 3))) { + nvme_msg(ep->root, LOG_DEBUG, + "NMIMT does not indicate AEM\n"); + resp->data_len = 0;//No data to process + return 0; + } + + return 0; +} + + int nvme_mi_submit(nvme_mi_ep_t ep, struct nvme_mi_req *req, struct nvme_mi_resp *resp) { @@ -443,11 +523,6 @@ int nvme_mi_submit(nvme_mi_ep_t ep, struct nvme_mi_req *req, return -1; } - if (req->data_len & 0x3) { - errno = EINVAL; - return -1; - } - if (resp->hdr_len < sizeof(struct nvme_mi_msg_hdr)) { errno = EINVAL; return -1; @@ -659,6 +734,28 @@ static int nvme_mi_control_parse_status(struct nvme_mi_resp *resp, __u16 *cpsr) return control_resp->status; } +static int nvme_mi_get_async_message(nvme_mi_ep_t ep, + struct nvme_mi_aem_msg *aem_msg, + size_t *aem_msg_len) +{ + struct nvme_mi_resp resp; + + memset(&resp, 0, sizeof(resp)); + resp.hdr = &aem_msg->hdr; + resp.hdr_len = sizeof(struct nvme_mi_msg_hdr); + resp.data = &aem_msg->occ_list_hdr; + resp.data_len = *aem_msg_len; + + int rc = nvme_mi_async_read(ep, &resp); + + if (rc) + return rc; + + *aem_msg_len = resp.data_len; + return 0; +} + + int nvme_mi_admin_xfer(nvme_mi_ctrl_t ctrl, struct nvme_mi_admin_req_hdr *admin_req, size_t req_data_size, @@ -1809,8 +1906,9 @@ int nvme_mi_mi_subsystem_health_status_poll(nvme_mi_ep_t ep, bool clear, return 0; } -int nvme_mi_mi_config_get(nvme_mi_ep_t ep, __u32 dw0, __u32 dw1, - __u32 *nmresp) +int nvme_mi_mi_config_set_get_ex(nvme_mi_ep_t ep, __u8 opcode, __u32 dw0, + __u32 dw1, void *data_out, size_t data_out_len, + void *data_in, size_t *data_in_len, __u32 *nmresp) { struct nvme_mi_mi_resp_hdr resp_hdr; struct nvme_mi_mi_req_hdr req_hdr; @@ -1818,13 +1916,16 @@ int nvme_mi_mi_config_get(nvme_mi_ep_t ep, __u32 dw0, __u32 dw1, struct nvme_mi_req req; int rc; - nvme_mi_mi_init_req(ep, &req, &req_hdr, dw0, - nvme_mi_mi_opcode_configuration_get); + nvme_mi_mi_init_req(ep, &req, &req_hdr, dw0, opcode); req_hdr.cdw1 = cpu_to_le32(dw1); + req.data = data_out; + req.data_len = data_out_len; memset(&resp, 0, sizeof(resp)); resp.hdr = &resp_hdr.hdr; resp.hdr_len = sizeof(resp_hdr); + resp.data = data_in; + resp.data_len = *data_in_len; rc = nvme_mi_submit(ep, &req, &resp); if (rc) @@ -1833,39 +1934,122 @@ int nvme_mi_mi_config_get(nvme_mi_ep_t ep, __u32 dw0, __u32 dw1, if (resp_hdr.status) return resp_hdr.status; - *nmresp = resp_hdr.nmresp[0] | - resp_hdr.nmresp[1] << 8 | - resp_hdr.nmresp[2] << 16; + *data_in_len = resp.data_len; + + if (nmresp) { + *nmresp = resp_hdr.nmresp[0] | + resp_hdr.nmresp[1] << 8 | + resp_hdr.nmresp[2] << 16; + } return 0; } +int nvme_mi_mi_config_get(nvme_mi_ep_t ep, __u32 dw0, __u32 dw1, + __u32 *nmresp) +{ + size_t data_in_len = 0; + + return nvme_mi_mi_config_set_get_ex(ep, + nvme_mi_mi_opcode_configuration_get, + dw0, + dw1, + NULL, + 0, + NULL, + &data_in_len, + nmresp); +} + int nvme_mi_mi_config_set(nvme_mi_ep_t ep, __u32 dw0, __u32 dw1) { - struct nvme_mi_mi_resp_hdr resp_hdr; - struct nvme_mi_mi_req_hdr req_hdr; - struct nvme_mi_resp resp; - struct nvme_mi_req req; - int rc; + size_t data_in_len = 0; - nvme_mi_mi_init_req(ep, &req, &req_hdr, dw0, - nvme_mi_mi_opcode_configuration_set); - req_hdr.cdw1 = cpu_to_le32(dw1); + return nvme_mi_mi_config_set_get_ex(ep, + nvme_mi_mi_opcode_configuration_set, + dw0, + dw1, + NULL, + 0, + NULL, + &data_in_len, + NULL); +} - memset(&resp, 0, sizeof(resp)); - resp.hdr = &resp_hdr.hdr; - resp.hdr_len = sizeof(resp_hdr); +int nvme_mi_mi_config_get_async_event(nvme_mi_ep_t ep, + __u8 *aeelver, + struct nvme_mi_aem_supported_list *list, + size_t *list_num_bytes) +{ + + __u32 dw0 = NVME_MI_CONFIG_AE; + __u32 aeelvertemp = 0; + + int rc = nvme_mi_mi_config_set_get_ex(ep, + nvme_mi_mi_opcode_configuration_get, + dw0, + 0, + NULL, + 0, + list, + list_num_bytes, + &aeelvertemp); - rc = nvme_mi_submit(ep, &req, &resp); if (rc) return rc; - if (resp_hdr.status) - return resp_hdr.status; + *aeelver = 0x000F & aeelvertemp; return 0; } +int nvme_mi_mi_config_set_async_event(nvme_mi_ep_t ep, + bool envfa, + bool empfa, + bool encfa, + __u8 aemd, + __u8 aerd, + struct nvme_mi_aem_enable_list *enable_list, + size_t enable_list_size, + struct nvme_mi_aem_occ_list_hdr *occ_list, + size_t *occ_list_size) +{ + + __u32 dw0 = ((__u32)envfa << 26) | + ((__u32)empfa << 25) | + ((__u32)encfa << 24) | + ((__u32)aemd << 16) | + ((__u16) aerd << 8) | NVME_MI_CONFIG_AE; + + //Basic checks here on lengths + if (enable_list_size < sizeof(struct nvme_mi_aem_enable_list) || + (sizeof(struct nvme_mi_aem_enable_list) + + enable_list->hdr.numaee * sizeof(struct nvme_mi_aem_enable_item) + > enable_list_size) + ) { + errno = EINVAL; + return -1; + } + + //Some very baseic header checks + if (enable_list->hdr.aeelhl != sizeof(struct nvme_mi_aem_enable_list_header) || + enable_list->hdr.aeelver != 0) { + errno = EINVAL; + return -1; + } + + return nvme_mi_mi_config_set_get_ex(ep, + nvme_mi_mi_opcode_configuration_set, + dw0, + 0, + enable_list, + enable_list_size, + occ_list, + occ_list_size, + NULL); +} + + void nvme_mi_close(nvme_mi_ep_t ep) { struct nvme_mi_ctrl *ctrl, *tmp; @@ -1971,3 +2155,517 @@ const char *nvme_mi_status_to_string(int status) return s; } + +bool nvme_mi_aem_aeei_get_aee(__le16 aeei) +{ + return !!(le16_to_cpu(aeei) & 0x8000); +} + +__u8 nvme_mi_aem_aeei_get_aeeid(__le16 aeei) +{ + return (le16_to_cpu(aeei) & 0xFF); +} + +void nvme_mi_aem_aeei_set_aeeid(struct nvme_mi_aem_enable_item *item, __u8 aeeid) +{ + __u16 temp = le16_to_cpu(item->aeei); + + item->aeei = cpu_to_le16((temp & 0xFF00) | aeeid); +} + +void nvme_mi_aem_aeei_set_aee(struct nvme_mi_aem_enable_item *item, bool enabled) +{ + __u16 temp = le16_to_cpu(item->aeei); + __u8 bit = (enabled) ? 1 : 0; + + item->aeei = cpu_to_le16((temp & 0xFF) | (bit << 15)); +} + +bool nvme_mi_aem_aesi_get_aese(__le16 aesi) +{ + return !!(le16_to_cpu(aesi) & 0x8000); +} + +__u8 nvme_mi_aem_aesi_get_aesid(__le16 aesi) +{ + return le16_to_cpu(aesi) & 0xff; +} + +void nvme_mi_aem_aesi_set_aesid(struct nvme_mi_aem_supported_item *item, __u8 aesid) +{ + __u16 temp = le16_to_cpu(item->aesi); + + item->aesi = cpu_to_le16((temp & 0xFF00) | aesid); +} + +void nvme_mi_aem_aesi_set_aee(struct nvme_mi_aem_supported_item *item, bool enabled) +{ + __u16 temp = le16_to_cpu(item->aesi); + __u8 bit = (enabled) ? 1 : 0; + + item->aesi = cpu_to_le16((temp & 0xFF) | (bit << 15)); +} + +__u8 nvme_mi_aem_aemti_get_aemgn(__u8 aemti) +{ + return aemti >> 3 & 0x1f; +} + +__u32 nvme_mi_aem_aeolli_get_aeoltl(__u8 *aeolli) +{ + //First 23-bits contain the aeoltl + __u32 aeoltl = aeolli[0] | (aeolli[1] << 8) | (aeolli[2] << 16); + + return aeoltl & 0x7FFFFF; +} + +void nvme_mi_aem_aeolli_set_aeoltl(struct nvme_mi_aem_occ_list_hdr *hdr, __u32 aeoltl) +{ + hdr->aeolli[0] = aeoltl & 0xFF; + hdr->aeolli[1] = (aeoltl >> 8) & 0xFF; + hdr->aeolli[2] = (hdr->aeolli[2] & 0b10000000) | ((aeoltl >> 16) & 0x7F); +} + +static int validate_enabled_list(struct nvme_mi_aem_supported_list *list, size_t len) +{ + if (list->hdr.aeslver != 0) { + errno = EPROTO; + return -1; + } + if (list->hdr.aeslhl != sizeof(struct nvme_mi_aem_supported_list)) { + errno = EPROTO; + return -1; + } + if (list->hdr.aest > len || + list->hdr.aest != + list->hdr.aeslhl + list->hdr.numaes * sizeof(struct nvme_mi_aem_supported_item)) { + errno = EPROTO; + return -1; + } + return 0; +} +static int validate_occ_list_update_ctx( + struct nvme_mi_aem_occ_list_hdr *occ_header, + size_t len, + struct nvme_mi_aem_ctx *ctx, + bool check_generation_num) +{ + //Make sure header fields have valid data + if (len < sizeof(*occ_header)) { + errno = EPROTO; + goto err_cleanup; + } else if (occ_header->aelver != 0 || + occ_header->aeolhl != sizeof(*occ_header)) { + //Make sure header is the right version and length + errno = EPROTO; + goto err_cleanup; + } else if (nvme_mi_aem_aeolli_get_aeoltl(occ_header->aeolli) > len) { + //Full length is bigger than the data that was received + errno = EPROTO; + goto err_cleanup; + } else if (check_generation_num && + ctx->last_generation_num == + (int) nvme_mi_aem_aemti_get_aemgn(occ_header->aemti)) { + //This is a duplicate and shouldn't be parsed. + //Let's just act like there's no updates + occ_header->numaeo = 0; + } else if (check_generation_num) { + ctx->last_generation_num = + nvme_mi_aem_aemti_get_aemgn(occ_header->aemti); + } + + //Header is fine. Let's go through the data + //First, we should update our context appropriately + ctx->occ_header = occ_header; + + //Data starts after header + ctx->list_current = (struct nvme_mi_aem_occ_data *) (occ_header + 1); + ctx->list_current_index = 0; + ctx->list_start = ctx->list_current; + + struct nvme_mi_aem_occ_data *current = ctx->list_current; + size_t bytes_so_far = ctx->occ_header->aeolhl; + + for (int i = 0; i < occ_header->numaeo; i++) { + //Validate this item + if (current->aelhlen != sizeof(*current)) { + errno = EPROTO; + goto err_cleanup; + } else if (!ctx->callbacks.enabled_map.enabled[current->aeoui.aeoi]) { + //This is unexpected as this AE shouldn't be enabled + errno = EPROTO; + goto err_cleanup; + } + + //Okay, check data lengths, including this header and the specific data(s) + uint32_t offset = sizeof(*current) + current->aeosil + current->aeovsil; + + bytes_so_far += offset; + if (bytes_so_far > nvme_mi_aem_aeolli_get_aeoltl(occ_header->aeolli)) { + errno = EPROTO; + goto err_cleanup; + } + + current = (struct nvme_mi_aem_occ_data *)((uint8_t *)current + offset); + } + + return 0; + +err_cleanup: + return -1; +} + +int nvme_mi_aem_get_fd(nvme_mi_ep_t ep) +{ + if (!ep || !ep->aem_ctx || !ep->transport || !ep->transport->aem_fd) + return -1; + + return ep->transport->aem_fd(ep); +} + +static void reset_list_info(struct nvme_mi_aem_ctx *ctx) +{ + //Reset context information + ctx->list_current_index = -1; + ctx->list_start = NULL; + ctx->list_current = NULL; + ctx->occ_header = NULL; +} + +static int aem_sync(nvme_mi_ep_t ep, + bool envfa, + bool empfa, + bool encfa, + __u8 aemd, + __u8 aerd, + struct nvme_mi_aem_enable_item *items, + __u8 num_items, + struct nvme_mi_aem_occ_list_hdr *resp, + size_t *resp_len +) +{ + size_t msg_len = + sizeof(struct nvme_mi_aem_enable_list_header) + + num_items * sizeof(struct nvme_mi_aem_enable_item); + + struct nvme_mi_aem_enable_list_header *request = malloc(msg_len); + + if (!request) + return -1; + + request->aeelhl = sizeof(struct nvme_mi_aem_enable_list_header); + request->numaee = num_items; + request->aeelver = 0; + request->aeetl = msg_len; + + //Data follows header + struct nvme_mi_aem_enable_item *msg_items = (struct nvme_mi_aem_enable_item *)(request + 1); + + //Let's be explicit about what's enabled and what's not + for (int i = 0; i < num_items; i++) { + msg_items[i] = items[i]; + msg_items[i].aeel = sizeof(msg_items[i]); + } + + //Send it + int rc = nvme_mi_mi_config_set_async_event(ep, + envfa, + empfa, + encfa, + aemd, + aerd, + (struct nvme_mi_aem_enable_list *)request, + msg_len, + resp, + resp_len); + + free(request); + return rc; +} + +static int aem_disable_enabled(nvme_mi_ep_t ep) +{ + struct nvme_mi_aem_enabled_map already_enabled = {false}; + uint8_t response_buffer[4096] = {0}; + size_t response_len = sizeof(response_buffer); + struct nvme_mi_aem_occ_list_hdr *response = + (struct nvme_mi_aem_occ_list_hdr *)response_buffer; + + // First, let's figure out if anything is already enabled that we need to + // disable + int rc = nvme_mi_aem_get_enabled(ep, &already_enabled); + + if (rc) + return rc; + + int sync_data_count = 0; + + //Add the enabled items to the list of things to disable + struct nvme_mi_aem_enable_item sync_data[NUM_ENABLES] = {0}; + + for (int i = 0; i < NUM_ENABLES; i++) { + if (already_enabled.enabled[i]) { + nvme_mi_aem_aeei_set_aeeid(&sync_data[sync_data_count], i); + nvme_mi_aem_aeei_set_aee(&sync_data[sync_data_count], false); + sync_data_count++; + } + } + + rc = aem_sync(ep, false, false, false, 1, 0, + sync_data, sync_data_count, response, &response_len); + + if (rc) + return rc; + + //Now, allow a purge of the aem fd because we could have + //received some events during this process + rc = ep->transport->aem_purge(ep); + + return rc; +} + +int nvme_mi_aem_enable(nvme_mi_ep_t ep, + struct nvme_mi_aem_config *config, + void *userdata) +{ + if (!ep || !config || !config->aem_handler) + return -1; + + int rc = nvme_mi_aem_open(ep); + + if (rc < 0) + return rc; + + int sync_data_count = 0; + struct nvme_mi_aem_enable_item sync_data[NUM_ENABLES] = {0}; + + uint8_t response_buffer[4096] = {0}; + size_t response_len = sizeof(response_buffer); + struct nvme_mi_aem_occ_list_hdr *response = + (struct nvme_mi_aem_occ_list_hdr *)response_buffer; + + //It's possible we're already enabled + if (!ep->aem_ctx) + ep->aem_ctx = malloc(sizeof(*ep->aem_ctx)); + if (!(ep->aem_ctx)) + return -1; + + memset(ep->aem_ctx, 0, sizeof(*ep->aem_ctx)); + ep->aem_ctx->last_generation_num = -1;//Invalid + reset_list_info((ep->aem_ctx)); + ep->aem_ctx->callbacks = *config; + + rc = aem_disable_enabled(ep); + if (rc) + goto cleanup_ctx; + + //Now, let's do a fresh enable of what's asked + for (int i = 0; i < NUM_ENABLES; i++) { + if (config->enabled_map.enabled[i]) { + nvme_mi_aem_aeei_set_aeeid(&sync_data[sync_data_count], i); + nvme_mi_aem_aeei_set_aee(&sync_data[sync_data_count], true); + sync_data_count++; + } + } + + rc = aem_sync(ep, config->envfa, config->empfa, + config->encfa, config->aemd, config->aerd, + sync_data, sync_data_count, response, &response_len); + if (rc) + goto cleanup_ctx; + + //Parse the response and fire events + rc = validate_occ_list_update_ctx(response, + response_len, + ep->aem_ctx, + false /*generation # shouldn't matter*/); + if (rc) + goto cleanup_ctx; + + if (response->numaeo) { + //Return value unused here + config->aem_handler(ep, response->numaeo, userdata); + } + +cleanup_ctx: + // Clear these because they won't point to valid memory anymore + reset_list_info(ep->aem_ctx); + + if (rc) { + free(ep->aem_ctx); + ep->aem_ctx = NULL; + } + return rc; +} + +int nvme_mi_aem_get_enabled(nvme_mi_ep_t ep, + struct nvme_mi_aem_enabled_map *enabled_map) +{ + if (!ep || !enabled_map) + return -1; + + int rc = 0; + + unsigned char aeelver; + size_t ae_list_bytes = NUM_ENABLES * sizeof(struct nvme_mi_aem_supported_list); + struct nvme_mi_aem_supported_list *enabled_list = malloc(ae_list_bytes); + + if (!enabled_list) + return -1; + + rc = nvme_mi_mi_config_get_async_event( + ep, &aeelver, enabled_list, &ae_list_bytes); + if (rc) + goto cleanup; + + rc = validate_enabled_list(enabled_list, ae_list_bytes); + if (rc) + goto cleanup; + + memset(enabled_map, 0, sizeof(*enabled_map)); + + struct nvme_mi_aem_enable_item *items = + (struct nvme_mi_aem_enable_item *)(enabled_list + 1); + + for (int i = 0; i < enabled_list->hdr.numaes; i++) { + __u8 aeeid = nvme_mi_aem_aeei_get_aeeid(items[i].aeei); + bool enabled = nvme_mi_aem_aeei_get_aee(items[i].aeei); + + enabled_map->enabled[aeeid] = enabled; + } + +cleanup: + free(enabled_list); + return rc; +} + +int nvme_mi_aem_disable(nvme_mi_ep_t ep) +{ + if (!ep) + return -1; + + int rc = aem_disable_enabled(ep); + + if (ep->aem_ctx) + free(ep->aem_ctx); + ep->aem_ctx = NULL; + + return rc; +} + +/*When inside a aem_handler, call with the aem_ctx and struct will be populated with next + *event information. Will return NULL when end of parsing (or error) is occurred. + *spec_info and vend_spec_info must be copied to persist as they will not be valid after + *the aem_handler has returned. + */ +struct nvme_mi_event *nvme_mi_aem_get_next_event(nvme_mi_ep_t ep) +{ + if (!ep || !ep->aem_ctx || + !ep->aem_ctx->list_current || + ep->aem_ctx->list_current_index == -1 || + !ep->aem_ctx->occ_header) { + return NULL; + } + + if (ep->aem_ctx->occ_header->numaeo <= ep->aem_ctx->list_current_index) + return NULL; + + struct nvme_mi_aem_ctx *aem_ctx = ep->aem_ctx; + struct nvme_mi_aem_occ_data *current = aem_ctx->list_current; + + aem_ctx->event.aeoi = current->aeoui.aeoi; + aem_ctx->event.aessi = current->aeoui.aessi; + aem_ctx->event.aeocidi = current->aeoui.aeocidi; + aem_ctx->event.spec_info_len = current->aeosil; + aem_ctx->event.vend_spec_info_len = current->aeovsil; + //Now the pointers + aem_ctx->event.spec_info = ((uint8_t *)current + current->aelhlen); + aem_ctx->event.vend_spec_info = + ((uint8_t *)aem_ctx->event.spec_info + aem_ctx->event.spec_info_len); + + //Let's grab the next item (if there is any). + aem_ctx->list_current_index++; + aem_ctx->list_current = + (struct nvme_mi_aem_occ_data *) + ((uint8_t *)aem_ctx->event.vend_spec_info + aem_ctx->event.vend_spec_info_len); + + return &aem_ctx->event; +} + +/* POLLIN has indicated events. This function reads and processes them. + * A callback will likely be invoked. + */ +int nvme_mi_aem_process(nvme_mi_ep_t ep, void *userdata) +{ + int rc = 0; + uint8_t response_buffer[4096]; + struct nvme_mi_aem_msg *response = (struct nvme_mi_aem_msg *)response_buffer; + size_t response_len = sizeof(response_buffer) - sizeof(struct nvme_mi_aem_msg); + + if (!ep || !ep->aem_ctx) + return -1; + + memset(response_buffer, 0, sizeof(response_buffer)); + + //Reset context information + reset_list_info(ep->aem_ctx); + + rc = nvme_mi_get_async_message(ep, response, &response_len); + if (rc) + goto cleanup; + + if (!response_len) { + //If no error and response length zero, we've likely received an owned + //tag message from a different endpoint than this path is responsible + //for monitoring. + goto cleanup; + } + + //Parse the response and fire events + rc = validate_occ_list_update_ctx(&response->occ_list_hdr, + response_len, + ep->aem_ctx, + true /*Ensure unique generation number*/); + if (rc) + goto cleanup; + + if (response->occ_list_hdr.numaeo) { + enum nvme_mi_aem_handler_next_action action = + ep->aem_ctx->callbacks.aem_handler(ep, + response->occ_list_hdr.numaeo, + userdata); + + reset_list_info(ep->aem_ctx); + + if (action == NVME_MI_AEM_HNA_ACK) { + response_len = sizeof(response_buffer); + + rc = nvme_mi_aem_ack(ep, &response->occ_list_hdr, &response_len); + if (rc) + goto cleanup; + + //The Ack is not guaranteed to have data + if (response_len && response->occ_list_hdr.numaeo) { + rc = validate_occ_list_update_ctx(&response->occ_list_hdr, + response_len, + ep->aem_ctx, + true); + //Callbacks based on ack + if (rc == 0 && response->occ_list_hdr.numaeo) { + //Return value unused here + ep->aem_ctx->callbacks.aem_handler(ep, + response->occ_list_hdr.numaeo, + userdata); + } + } + } + } else { + //This is unexpected unless we have duplicates. But those shouldn't be acked + } + +cleanup: + reset_list_info(ep->aem_ctx); + return rc; +} + + diff --git a/src/nvme/mi.h b/src/nvme/mi.h index 985332f4a..ac75ec4cc 100644 --- a/src/nvme/mi.h +++ b/src/nvme/mi.h @@ -107,6 +107,7 @@ * @NVME_MI_MT_MI: NVMe-MI command * @NVME_MI_MT_ADMIN: NVMe Admin command * @NVME_MI_MT_PCIE: PCIe command + * @NVME_MI_MT_AE: Asynchronous Event * * Used as byte 1 of both request and response messages (NMIMT bits of NMP * byte). Not to be confused with the MCTP message type in byte 0. @@ -116,6 +117,7 @@ enum nvme_mi_message_type { NVME_MI_MT_MI = 1, NVME_MI_MT_ADMIN = 2, NVME_MI_MT_PCIE = 4, + NVME_MI_MT_AE = 5, }; /** @@ -281,7 +283,7 @@ enum nvme_mi_dtyp { * status poll. Only for Set ops. * @NVME_MI_CONFIG_MCTP_MTU: MCTP maximum transmission unit size of port * specified in dw 0 - * + * @NVME_MI_CONFIG_AE: Asynchronous Events configuration * Configuration parameters for the MI Get/Set Configuration commands. * * See &nvme_mi_mi_config_get() and &nvme_mi_config_set(). @@ -290,6 +292,7 @@ enum nvme_mi_config_id { NVME_MI_CONFIG_SMBUS_FREQ = 0x1, NVME_MI_CONFIG_HEALTH_STATUS_CHANGE = 0x2, NVME_MI_CONFIG_MCTP_MTU = 0x3, + NVME_MI_CONFIG_AE = 0x4, }; /** @@ -307,6 +310,229 @@ enum nvme_mi_config_smbus_freq { NVME_MI_CONFIG_SMBUS_FREQ_1MHz = 0x3, }; +/* Asynchronous Event Message definitions*/ + +/** + * struct nvme_mi_aem_supported_list_header - Asynchronous Event Supported List Header. + * @numaes: Number of AE supported data structures that follow the header + * @aeslver: AE Supported List Version + * @aest: AE Supported list length (including this header) + * @aeslhl: AE Supported list header length + * + * This header preceeds a number, (&numaes), of AE supported data structures + */ +struct nvme_mi_aem_supported_list_header { + __u8 numaes; + __u8 aeslver;//Should be zero + __le16 aest; + __u8 aeslhl; //Should be 5 +} __attribute__((packed)); + +/** + * struct nvme_mi_aem_supported_item - AE Supported List Item + * @aesl: AE supported list item length + * @aesi: AE supported info + * + * Following this header should be hdr.numaes entries of + * nvme_mi_aem_supported_item structures + */ +struct nvme_mi_aem_supported_item { + __u8 aesl;//Length of this item. Set to 3 + __le16 aesi; +} __attribute__((packed)); + +/** + * nvme_mi_aem_aesi_get_aese() - return aese from aesi field + * @aesi: aesi field from @nvme_mi_aem_supported_item + * + * Returns: A bool representing the aese value + */ +bool nvme_mi_aem_aesi_get_aese(__le16 aesi); + +/** + * nvme_mi_aem_aesi_get_aesid() - return aesid from aesi field + * @aesi: aesi field from @nvme_mi_aem_supported_item + * + * Returns: aesid value + */ +__u8 nvme_mi_aem_aesi_get_aesid(__le16 aesi); + +/** + * nvme_mi_aem_aesi_set_aesid() - set aesid in the aesi field + * @item: Pointer to @nvme_mi_aem_supported_item to update the aesi field + * @aesid: aesid value to use + */ +void nvme_mi_aem_aesi_set_aesid(struct nvme_mi_aem_supported_item *item, __u8 aesid); + +/** + * nvme_mi_aem_aesi_set_aee() - set aee in the aesi field + * @item: Pointer to @nvme_mi_aem_supported_item to update the aesi field + * @enabled: aee value to use + */ +void nvme_mi_aem_aesi_set_aee(struct nvme_mi_aem_supported_item *item, bool enabled); + +/** + * struct nvme_mi_aem_supported_list - AE Supported List received with GET CONFIG Asynchronous Event + * @hdr: AE supported list header + * + * Following this header should be hdr.numaes entries of + * nvme_mi_aem_supported_item structures + */ +struct nvme_mi_aem_supported_list { + struct nvme_mi_aem_supported_list_header hdr; +} __attribute__((packed)); + +/** + * struct nvme_mi_aem_enable_item - AE Enabled item entry + * @aeel: AE Enable Length (length of this structure which is 3) + * @aeei: AE Enable Info + * + */ +struct nvme_mi_aem_enable_item { + __u8 aeel; + __le16 aeei; +} __attribute__((packed)); + +/** + * nvme_mi_aem_aeei_get_aee() - return aee from aeei field + * @aeei: aeei field from @nvme_mi_aem_enable_item + * + * Returns: aee value + */ +bool nvme_mi_aem_aeei_get_aee(__le16 aeei); + +/** + * nvme_mi_aem_aeei_get_aeeid() - return aeeid from aeei field + * @aeei: aeei field from @nvme_mi_aem_enable_item + * + * Returns: aeeid value + */ +__u8 nvme_mi_aem_aeei_get_aeeid(__le16 aeei); + +/** + * nvme_mi_aem_aeei_set_aeeid() - set aeeid in the aeei field + * @item: Pointer to @nvme_mi_aem_enable_item to update the aeei field + * @aeeid: aeeid value to use + */ +void nvme_mi_aem_aeei_set_aeeid(struct nvme_mi_aem_enable_item *item, __u8 aeeid); + +/** + * nvme_mi_aem_aeei_set_aee() - set aee in the aeei field + * @item: Pointer to @nvme_mi_aem_enable_item to update the aee field + * @enabled: aee value to use + */ +void nvme_mi_aem_aeei_set_aee(struct nvme_mi_aem_enable_item *item, bool enabled); + +/** + * struct nvme_mi_aem_enable_list_header - AE Enable list header + * @numaee: Number of AE enable items following the header + * @aeelver: Version of the AE enable list (zero) + * @aeetl: Total length of the AE enable list including header and items + * @aeelhl: Header length of this header (5) + */ +struct nvme_mi_aem_enable_list_header { + __u8 numaee; + __u8 aeelver; + __le16 aeetl; + __u8 aeelhl; +} __attribute__((packed)); + +/** + * struct nvme_mi_aem_enable_list - AE enable list sent with SET CONFIG Asyncronous Event + * @hdr: AE enable list header + * + * Following this header should be hdr.numaee entries of nvme_mi_aem_enable_item structures + */ +struct nvme_mi_aem_enable_list { + struct nvme_mi_aem_enable_list_header hdr; +} __attribute__((packed)); + +/** + * struct nvme_mi_aem_occ_data - AEM Message definition. + * @aelhlen: AE Occurrence Header Length + * @aeosil: AE Occurrence Specific Info Length + * @aeovsil: AE Occurrence Vendor Specific Info Length + * @aeoui: AE Occurrence Unique ID made up of other subfields + * + * A single entry of ae occurrence data that comes with an nvme_aem_msg. + * Following this structure is variable length AEOSI (occurrence specific + * info) and variable length AEVSI (vendor specific info). The length of + * AEOSI is specified by aeosil and the length of AEVSI is specified by + * AEVSI. Neither field is mandatory and shall be omitted if their length + * parameter is set to zero. + */ +struct nvme_mi_aem_occ_data { + __u8 aelhlen; + __u8 aeosil; + __u8 aeovsil; + struct { + __u8 aeoi; + __le32 aeocidi; + __u8 aessi; + } __attribute__((packed)) aeoui; +} __attribute__((packed)); + +/** + * struct nvme_mi_aem_occ_list_hdr - AE occurrence list header + * @numaeo: Number of AE Occurrence Data Structures + * @aelver: AE Occurrence List Version Number + * @aeolli: AE Occurrence List Length Info (AEOLLI) + * @aeolhl: AE Occurrence List Header Length (shall be set to 7) + * @aemti: AEM Transmission Info + * + * The header for the occurrence list. numaeo defines how many + * nvme_mi_aem_occ_data structures (including variable payaloads) are included. + * Following this header is each of the numaeo occurrence data structures. + */ +struct nvme_mi_aem_occ_list_hdr { + __u8 numaeo; + __u8 aelver; + __u8 aeolli[3];//24-bits + __u8 aeolhl; + __u8 aemti; +} __attribute__((packed)); + +/** + * nvme_mi_aem_aemti_get_aemgn() - return aemgn from aemti field + * @aemti: aemti field from @nvme_mi_aem_occ_list_hdr + * + * Returns: aemgn value + */ +__u8 nvme_mi_aem_aemti_get_aemgn(__u8 aemti); + +/** + * nvme_mi_aem_aeolli_get_aeoltl() - return aeoltl from aeolli field + * @aeolli: Pointer to 3 byte aeolli field from @nvme_mi_aem_occ_list_hdr + * + * Returns: aeoltl value + */ +__u32 nvme_mi_aem_aeolli_get_aeoltl(__u8 *aeolli); + +/** + * nvme_mi_aem_aeolli_set_aeoltl() - set aeoltl in the aeolli field + * @hdr:Pointer to @nvme_mi_aem_occ_list_hdr to set the aeolli field + * @aeoltl: aeoltl value to use + */ +void nvme_mi_aem_aeolli_set_aeoltl(struct nvme_mi_aem_occ_list_hdr *hdr, __u32 aeoltl); + +/** + * struct nvme_mi_aem_msg - AEM Message definition. + * @hdr: the general response message header + * @occ_list_hdr: ae occurrence list header. + * + * Every ae message will start with one of these. The occ_list_hder wil define + * information about how many ae occ data entries are included. Each entry is + * defined by the nvme_mi_aem_occ_data structure which will follow the + * occ_list_hdr. Each nvme_mi_aem_occ_data structure has a fixed length header + * but a variable length payload ude to occurrence specific and vendor specific + * info. For this reason, do not index the nvme_mi_ae_occ data structures by + * array or fixed offset. + */ +struct nvme_mi_aem_msg { + struct nvme_mi_msg_hdr hdr; + struct nvme_mi_aem_occ_list_hdr occ_list_hdr; +} __attribute__((packed)); + /* Admin command definitions */ /** @@ -646,6 +872,14 @@ nvme_mi_ctrl_t nvme_mi_next_ctrl(nvme_mi_ep_t ep, nvme_mi_ctrl_t c); */ nvme_mi_ep_t nvme_mi_open_mctp(nvme_root_t root, unsigned int netid, uint8_t eid); +/** + * nvme_mi_aem_open() - Prepare an existing endpoint to receive AEMs + * @ep: Endpoint to configure for AEMs + * + * Return: 0 if success, -1 otherwise + */ +int nvme_mi_aem_open(nvme_mi_ep_t ep); + /** * nvme_mi_close() - Close an endpoint connection and release resources, * including controller objects. @@ -1030,6 +1264,80 @@ static inline int nvme_mi_mi_config_set_mctp_mtu(nvme_mi_ep_t ep, __u8 port, return nvme_mi_mi_config_set(ep, dw0, mtu); } + +/** + * nvme_mi_mi_config_get_async_event - get configuration: Asynchronous Event + * @ep: endpoint for MI communication + * @aeelver: Asynchronous Event Enable List Version Number + * @list: AE Supported list header and list contents + * @list_num_bytes: number of bytes in the list header and contents buffer. + * This will be populated with returned size of list and contents if successful. + * + * Performs a MI Configuration Get, to query the current enable Asynchronous + * Events. On success, populates @aeelver and the @list with current info, + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise.. + */ +int nvme_mi_mi_config_get_async_event(nvme_mi_ep_t ep, + __u8 *aeelver, + struct nvme_mi_aem_supported_list *list, + size_t *list_num_bytes); + +/** + * nvme_mi_mi_config_set_async_event - set configuration: Asynchronous Event + * @ep: endpoint for MI communication + * @envfa: Enable SR-IOV Virtual Functions AE + * @empfa: Enable SR-IOV Physical Functions AE + * @encfa: Enable PCI Functions AE. + * @aemd: AEM Delay Interval (for Sync only) + * @aerd: AEM Retry Delay (for Sync only; time in 100s of ms) + * @enable_list: nvme_mi_aem_enable_listucture containing header and items + * of events to be enabled or disabled. This is taken as a delta change + * from the current configuration. + * @enable_list_size: Size of the enable_list including header and data. + * Meant to catch overrun issues. + * @occ_list: Pointer to populate with the occurrence list (header and data) + * @occ_list_size: Total size of provided occ_list buffer. Will be updated + * with received size if successful + * + * Performs a MI Configuration Set, to ACK (sent after an AEM) or Sync (at anytime to enable + * or disable Asynchronous Events). + * + * On success, populates @occ_list. See TP6035a for details on how occ_list is populated in + * ACK versus Sync conditions + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise.. + */ +int nvme_mi_mi_config_set_async_event(nvme_mi_ep_t ep, + bool envfa, + bool empfa, + bool encfa, + __u8 aemd, + __u8 aerd, + struct nvme_mi_aem_enable_list *enable_list, + size_t enable_list_size, + struct nvme_mi_aem_occ_list_hdr *occ_list, + size_t *occ_list_size); + +static inline int nvme_mi_aem_ack(nvme_mi_ep_t ep, + struct nvme_mi_aem_occ_list_hdr *occ_list, + size_t *occ_list_size) +{ + //An AEM Ack is defined as a SET CONFIG AE with no AE enable items + struct nvme_mi_aem_enable_list list = {0}; + + list.hdr.aeelhl = sizeof(struct nvme_mi_aem_enable_list_header); + list.hdr.aeelver = 0; + list.hdr.aeetl = sizeof(struct nvme_mi_aem_enable_list_header); + list.hdr.numaee = 0; + + return nvme_mi_mi_config_set_async_event(ep, false, false, false, 0, 0, + &list, sizeof(list), occ_list, + occ_list_size); +} + /* Admin channel functions */ /** @@ -3096,4 +3404,159 @@ int nvme_mi_admin_format_nvm(nvme_mi_ctrl_t ctrl, int nvme_mi_admin_sanitize_nvm(nvme_mi_ctrl_t ctrl, struct nvme_sanitize_nvm_args *args); +/** + * enum nvme_mi_aem_handler_next_action - Next action for the AEM state machine handler + * @NVME_MI_AEM_HNA_ACK: Send an ack for the AEM + * @NVME_MI_AEM_HNA_NONE: No further action + * + * Used as return value for the AE callback generated when calling nvme_mi_aem_process + */ +enum nvme_mi_aem_handler_next_action { + NVME_MI_AEM_HNA_ACK, + NVME_MI_AEM_HNA_NONE, +}; + +/** + * struct nvme_mi_event - AE event information structure + * @aeoi: Event identifier + * @aessi: Event occurrence scope info + * @aeocidi: Event occurrence scope ID info + * @spec_info: Specific info buffer + * @spec_info_len: Length of specific info buffer + * @vend_spec_info: Vendor specific info buffer + * @vend_spec_info_len: Length of vendor specific info buffer + * + * Application callbacks for nvme_mi_aem_process will be able to call + * nvme_mi_aem_get_next_event which will return a pointer to such an identifier + * for the next event the application should parse + */ +struct nvme_mi_event { + uint8_t aeoi; + uint8_t aessi; + uint32_t aeocidi; + void *spec_info; + size_t spec_info_len; + void *vend_spec_info; + size_t vend_spec_info_len; +}; + +/** + * nvme_mi_aem_get_next_event() - Get details for the next event to parse + * @ep: The endpoint with the event + * + * When inside a aem_handler, call this and a returned struct pointer + * will provide details of event information. Will return NULL when end of parsing is occurred. + * spec_info and vend_spec_info must be copied to persist as they will not be valid + * after the handler_next_action has returned. + * + * Return: Pointer no next nvme_mi_event or NULL if this is the last one + */ +struct nvme_mi_event *nvme_mi_aem_get_next_event(nvme_mi_ep_t ep); + +struct nvme_mi_aem_enabled_map { + bool enabled[256]; +}; + +/** + * struct nvme_mi_aem_config - Provided for nvme_mi_aem_enable + * @aem_handler: Callback function for application processing of events + * @enabled_map: Map indicating which AE should be enabled on the endpoint + * @envfa: Enable SR-IOV virtual functions AE + * @empfa: Enable SR-IOV physical functions AE + * @encfa: Enable PCIe functions AE + * @aemd: AEM Delay (time in seconds from when event happens to AEM being batched and sent) + * @aerd: AEM Retry Delay (time in 100s of ms between AEM retries from the endpoint) + * + * Application callbacks for nvme_mi_aem_process will be able to call + * nvme_mi_aem_get_next_event which will return a pointer to such an identifier + * for the next event the application should parse + */ +struct nvme_mi_aem_config { + /* + * This is called from inside nvme_mi_process when a payload has been validated and + * can be parsed. The application may call nvme_mi_aem_get_next_event from inside + * the callback to parse event data. + */ + enum nvme_mi_aem_handler_next_action (*aem_handler)( + nvme_mi_ep_t ep, + size_t num_events, + void *userdata); + + struct nvme_mi_aem_enabled_map enabled_map; + + bool envfa; + bool empfa; + bool encfa; + __u8 aemd; + __u8 aerd; +}; + +/** + * nvme_mi_aem_get_fd() - Returns the pollable fd for AEM data available + * @ep: The endpoint being monitored for asynchronous data + * + * This populated structure can be polled from the application to understand if + * a call to nvme_mi_aem_process() is required (when a poll returns > 0). + * + * Return: The fd value or -1 if error + */ +int nvme_mi_aem_get_fd(nvme_mi_ep_t ep); + +/** + * nvme_mi_aem_enable() - Enable AE on the provided endpoint + * @ep: Endpoint to enable AEs + * @config: AE configuraiton including which events are enabled and the callback function + * @userdata: Application provided context pointer for callback function + * + * This function is called to enable AE on the endpoint. Endpoint will provide initial state + * (if any) of enabled AEs and application can parse those via the aem_handler fn pointer in + * callbacks. Thes can be obtained in the callback by calling nvme_mi_aem_get_next_event(). + * + * Application should poll the fd that can be obtained from nvme_mi_aem_get_fd and then call + * nvme_mi_aem_process() when poll() indicates data available. + * + * A call to nvme_mi_aem_process() will grab AEM data and call the aem_handler fn pointer. + * At this point the application can call nvme_mi_aem_get_next_event() to get information for + * each triggered event. + * + * Return: 0 is a success, nonzero is an error and errno may be read for further details + */ +int nvme_mi_aem_enable(nvme_mi_ep_t ep, + struct nvme_mi_aem_config *config, + void *userdata); + + +/** + * nvme_mi_aem_get_enabled() - Return information on which AEs are enabled + * @ep: Endpoint to check enabled status + * @enabled: nvme_mi_aem_enabled_map indexed by AE event ID of enabled state + * + * Return: 0 is a success, nonzero is an error and errno may be read for further details + */ +int nvme_mi_aem_get_enabled(nvme_mi_ep_t ep, + struct nvme_mi_aem_enabled_map *enabled); + +/** + * nvme_mi_aem_disable() - Disable AE on the provided endpoint + * @ep: Endpoint to disable AEs + * + * Return: 0 is a success, nonzero is an error and errno may be read for further details + */ +int nvme_mi_aem_disable(nvme_mi_ep_t ep); + +/** + * nvme_mi_aem_process() - Process AEM on the provided endpoint + * @ep: Endpoint to process + * @userdata: Application provided context pointer for callback function + * + * Call this if poll() indicates data is available on the fd provided by nvme_mi_aem_get_fd() + * + * This will call the fn pointer, aem_handler, provided with nvme_mi_aem_config and the + * application can call nvme_mi_aem_get_next_event() from within this callback to get + * aem event data. The callback function should return NVME_MI_AEM_HNA_ACK for normal operation. + * + * Return: 0 is a success, nonzero is an error and errno may be read for further details + */ +int nvme_mi_aem_process(nvme_mi_ep_t ep, void *userdata); + #endif /* _LIBNVME_MI_MI_H */ diff --git a/src/nvme/private.h b/src/nvme/private.h index 52d4054c2..bfae07ae8 100644 --- a/src/nvme/private.h +++ b/src/nvme/private.h @@ -243,6 +243,20 @@ struct nvme_mi_transport { void (*close)(struct nvme_mi_ep *ep); int (*desc_ep)(struct nvme_mi_ep *ep, char *buf, size_t len); int (*check_timeout)(struct nvme_mi_ep *ep, unsigned int timeout); + int (*aem_fd)(struct nvme_mi_ep *ep); + int (*aem_read)(struct nvme_mi_ep *ep, + struct nvme_mi_resp *resp); + int (*aem_purge)(struct nvme_mi_ep *ep); +}; + +struct nvme_mi_aem_ctx { + struct nvme_mi_aem_occ_list_hdr *occ_header; + struct nvme_mi_aem_occ_data *list_start; + struct nvme_mi_aem_occ_data *list_current; + int list_current_index; + struct nvme_mi_aem_config callbacks; + int last_generation_num; + struct nvme_mi_event event; }; /* quirks */ @@ -276,6 +290,8 @@ struct nvme_mi_ep { unsigned int inter_command_us; struct timespec last_resp_time; bool last_resp_time_valid; + + struct nvme_mi_aem_ctx *aem_ctx; }; struct nvme_mi_ctrl { @@ -295,7 +311,8 @@ __u32 nvme_mi_crc32_update(__u32 crc, void *data, size_t len); * in the shared lib */; struct mctp_ioc_tag_ctl; struct __mi_mctp_socket_ops { - int (*socket)(int, int, int); + int (*msg_socket)(void); + int (*aem_socket)(__u8 eid, unsigned int network); ssize_t (*sendmsg)(int, const struct msghdr *, int); ssize_t (*recvmsg)(int, struct msghdr *, int); int (*poll)(struct pollfd *, nfds_t, int); diff --git a/test/mi-mctp.c b/test/mi-mctp.c index 5711c0305..7d1acd2c6 100644 --- a/test/mi-mctp.c +++ b/test/mi-mctp.c @@ -27,10 +27,13 @@ struct test_peer; -typedef int (*rx_test_fn)(struct test_peer *peer, void *buf, size_t len); +typedef int (*rx_test_fn)(struct test_peer *peer, void *buf, size_t len, int sd); typedef int (*poll_test_fn)(struct test_peer *peer, struct pollfd *fds, nfds_t nfds, int timeout); +#define TEST_PEER_SD_COMMANDS_IDX (0) +#define TEST_PEER_SD_AEMS_IDX (1) + /* Our fake MCTP "peer". * * The terms TX (transmit) and RX (receive) are from the perspective of @@ -64,17 +67,19 @@ static struct test_peer { void *poll_data; /* store sd from socket() setup */ - int sd; + int sd[2]; } test_peer; /* ensure tests start from a standard state */ void reset_test_peer(void) { - int tmp = test_peer.sd; + int temp_sd[2] = {test_peer.sd[TEST_PEER_SD_COMMANDS_IDX], + test_peer.sd[TEST_PEER_SD_AEMS_IDX]}; + memset(&test_peer, 0, sizeof(test_peer)); test_peer.tx_buf[0] = NVME_MI_MSGTYPE_NVME; test_peer.rx_buf[0] = NVME_MI_MSGTYPE_NVME; - test_peer.sd = tmp; + memcpy(test_peer.sd, temp_sd, 2*sizeof(*temp_sd)); } /* calculate MIC of peer-to-libnvme data, expand buf by 4 bytes and insert @@ -93,18 +98,25 @@ static void test_set_tx_mic(struct test_peer *peer) peer->tx_buf_len += sizeof(crc_le); } -int __wrap_socket(int family, int type, int protocol) +int __wrap_msg_socket(void) +{ + /* we do an open here to give the mi-mctp code something to close() */ + test_peer.sd[TEST_PEER_SD_COMMANDS_IDX] = open("/dev/null", 0); + return test_peer.sd[TEST_PEER_SD_COMMANDS_IDX]; +} + +int __wrap_aem_socket(__u8 eid, unsigned int network) { /* we do an open here to give the mi-mctp code something to close() */ - test_peer.sd = open("/dev/null", 0); - return test_peer.sd; + test_peer.sd[TEST_PEER_SD_AEMS_IDX] = open("/dev/null", 0); + return test_peer.sd[TEST_PEER_SD_AEMS_IDX]; } ssize_t __wrap_sendmsg(int sd, const struct msghdr *hdr, int flags) { size_t i, pos; - assert(sd == test_peer.sd); + assert(sd == test_peer.sd[TEST_PEER_SD_COMMANDS_IDX]); test_peer.rx_buf[0] = NVME_MI_MSGTYPE_NVME; @@ -128,13 +140,23 @@ ssize_t __wrap_recvmsg(int sd, struct msghdr *hdr, int flags) { size_t i, pos, len; - assert(sd == test_peer.sd); + assert(sd == test_peer.sd[TEST_PEER_SD_COMMANDS_IDX] || + sd == test_peer.sd[TEST_PEER_SD_AEMS_IDX]); + + //Check for purge case + if (flags & MSG_TRUNC) + return 0; if (test_peer.tx_fn) { test_peer.tx_fn_res = test_peer.tx_fn(&test_peer, test_peer.rx_buf, - test_peer.rx_buf_len); + test_peer.rx_buf_len, + sd); } else { + if (sd == test_peer.sd[TEST_PEER_SD_COMMANDS_IDX] && test_peer.tx_buf_len == 0) { + errno = EAGAIN; + return -1; + } /* set up a few default response fields; caller may have * initialised the rest of the response */ test_peer.tx_buf[0] = NVME_MI_MSGTYPE_NVME; @@ -157,6 +179,7 @@ ssize_t __wrap_recvmsg(int sd, struct msghdr *hdr, int flags) errno = test_peer.tx_errno; + test_peer.tx_buf_len = 0; //Clear since this is sent return test_peer.tx_rc ?: (pos - 1); } @@ -173,14 +196,14 @@ struct mctp_ioc_tag_ctl; #ifdef SIOCMCTPALLOCTAG int test_ioctl_tag(int sd, unsigned long req, struct mctp_ioc_tag_ctl *ctl) { - assert(sd == test_peer.sd); + assert(sd == test_peer.sd[TEST_PEER_SD_COMMANDS_IDX]); switch (req) { case SIOCMCTPALLOCTAG: ctl->tag = 1 | MCTP_TAG_PREALLOC | MCTP_TAG_OWNER; break; case SIOCMCTPDROPTAG: - assert(tag == 1 | MCTP_TAG_PREALLOC | MCTP_TAG_OWNER); + assert(ctl->tag == (1 | MCTP_TAG_PREALLOC | MCTP_TAG_OWNER)); break; }; @@ -189,13 +212,14 @@ int test_ioctl_tag(int sd, unsigned long req, struct mctp_ioc_tag_ctl *ctl) #else int test_ioctl_tag(int sd, unsigned long req, struct mctp_ioc_tag_ctl *ctl) { - assert(sd == test_peer.sd); + assert(sd == test_peer.sd[TEST_PEER_SD_COMMANDS_IDX]); return 0; } #endif static struct __mi_mctp_socket_ops ops = { - __wrap_socket, + __wrap_msg_socket, + __wrap_aem_socket, __wrap_sendmsg, __wrap_recvmsg, __wrap_poll, @@ -214,7 +238,7 @@ static void test_rx_err(nvme_mi_ep_t ep, struct test_peer *peer) assert(rc != 0); } -static int tx_none(struct test_peer *peer, void *buf, size_t len) +static int tx_none(struct test_peer *peer, void *buf, size_t len, int sd) { return 0; } @@ -474,10 +498,12 @@ struct mpr_tx_info { size_t final_len; }; -static int tx_mpr(struct test_peer *peer, void *buf, size_t len) +static int tx_mpr(struct test_peer *peer, void *buf, size_t len, int sd) { struct mpr_tx_info *tx_info = peer->tx_data; + assert(sd == peer->sd[TEST_PEER_SD_COMMANDS_IDX]); + memset(peer->tx_buf, 0, sizeof(peer->tx_buf)); peer->tx_buf[0] = NVME_MI_MSGTYPE_NVME; peer->tx_buf[1] = test_peer.rx_buf[1] | (NVME_MI_ROR_RSP << 7); @@ -594,12 +620,14 @@ static int poll_fn_mpr_poll(struct test_peer *peer, struct pollfd *fds, return 1; } -static int tx_fn_mpr_poll(struct test_peer *peer, void *buf, size_t len) +static int tx_fn_mpr_poll(struct test_peer *peer, void *buf, size_t len, int sd) { struct mpr_tx_info *tx_info = peer->tx_data; struct mpr_poll_info *poll_info = peer->poll_data; unsigned int mprt; + assert(sd == peer->sd[TEST_PEER_SD_COMMANDS_IDX]); + memset(peer->tx_buf, 0, sizeof(peer->tx_buf)); peer->tx_buf[0] = NVME_MI_MSGTYPE_NVME; peer->tx_buf[1] = test_peer.rx_buf[1] | (NVME_MI_ROR_RSP << 7); @@ -713,6 +741,655 @@ static void test_mpr_mprt_zero(nvme_mi_ep_t ep, struct test_peer *peer) assert(rc == 0); } +enum aem_enable_state { + AEM_ES_GET_ENABLED, + AEM_ES_SET_TO_DISABLED, + AEM_ES_ENABLE_SET_ENABLED, + AEM_ES_PROCESS, + AEM_ES_ACK_RESPONSE, + AEM_ES_ACK_RECEIVED +}; + +enum aem_failure_condition { + AEM_FC_NONE, + AEM_FC_BAD_GET_CONFIG_HEADER_LEN, + AEM_FC_BAD_GET_CONFIG_TOTAL_LEN, + AEM_FC_BAD_GET_CONFIG_BUFFER_LEN, + AEM_FC_BAD_OCC_RSP_HDR_LEN_SYNC, + AEM_FC_BAD_OCC_RSP_TOTAL_LEN_SYNC, + AEM_FC_BAD_OCC_RSP_BUFFER_LEN_SYNC, + AEM_FC_BAD_OCC_RSP_HDR_LEN_AEM, + AEM_FC_BAD_OCC_RSP_TOTAL_LEN_AEM, + AEM_FC_BAD_OCC_RSP_BUFFER_LEN_AEM, +}; + +struct aem_rcv_enable_fn_data { + enum aem_enable_state state; + enum aem_failure_condition fc; + struct nvme_mi_aem_enabled_map ep_enabled_map; + struct nvme_mi_aem_enabled_map host_enabled_map; + struct nvme_mi_aem_enabled_map aem_during_process_map; + struct nvme_mi_aem_enabled_map ack_events_map; + struct nvme_mi_event *events[256]; + int callback_count; +}; + +static void populate_tx_occ_list(bool aem_not_ack, + struct aem_rcv_enable_fn_data *fn_data, struct nvme_mi_aem_enabled_map *to_send) +{ + struct nvme_mi_mi_resp_hdr *resp_hdr = + (struct nvme_mi_mi_resp_hdr *)test_peer.tx_buf; + + struct nvme_mi_msg_hdr *mi_msg_hdr = + (struct nvme_mi_msg_hdr *)test_peer.tx_buf; + + size_t hdr_len = sizeof(*resp_hdr); + + struct nvme_mi_aem_occ_list_hdr *list_hdr = + (struct nvme_mi_aem_occ_list_hdr *)(resp_hdr+1); + + //For AEM, the data is actually in request format + //since it originates from the endpoint + if (aem_not_ack) { + list_hdr = (struct nvme_mi_aem_occ_list_hdr *)(mi_msg_hdr+1); + hdr_len = sizeof(*mi_msg_hdr); + mi_msg_hdr->nmp = (NVME_MI_MT_AE << 3); + } else { + resp_hdr->status = 0; + } + + list_hdr->aelver = 0; + list_hdr->aeolhl = sizeof(*list_hdr); + list_hdr->numaeo = 0; + __u32 aeoltl = list_hdr->aeolhl; + + struct nvme_mi_aem_occ_data *data = + (struct nvme_mi_aem_occ_data *)(list_hdr+1); + + for (int i = 0; i < 255; i++) { + if (fn_data->events[i] && to_send->enabled[i]) { + struct nvme_mi_event *event = fn_data->events[i]; + + list_hdr->numaeo++; + aeoltl += sizeof(struct nvme_mi_aem_occ_data); + aeoltl += event->spec_info_len + + event->vend_spec_info_len; + + data->aelhlen = sizeof(*data); + + if ((fn_data->fc == AEM_FC_BAD_OCC_RSP_HDR_LEN_SYNC && !aem_not_ack) || + (fn_data->fc == AEM_FC_BAD_OCC_RSP_HDR_LEN_AEM && aem_not_ack)) + data->aelhlen--; + + data->aeoui.aeocidi = event->aeocidi; + data->aeoui.aeoi = event->aeoi; + data->aeoui.aessi = event->aessi; + data->aeosil = event->spec_info_len; + data->aeovsil = event->vend_spec_info_len; + + if ((fn_data->fc == AEM_FC_BAD_OCC_RSP_TOTAL_LEN_SYNC && + !aem_not_ack) || + (fn_data->fc == AEM_FC_BAD_OCC_RSP_TOTAL_LEN_AEM && + aem_not_ack)) + aeoltl -= 1; + + //Now the data + uint8_t *spec = (uint8_t *)(data+1); + + if (data->aeosil) { + memcpy(spec, event->spec_info, event->spec_info_len); + spec += event->spec_info_len; + } + + if (data->aeovsil) { + memcpy(spec, event->vend_spec_info, event->vend_spec_info_len); + spec += event->vend_spec_info_len; + } + + data = (struct nvme_mi_aem_occ_data *)(spec); + } + } + + nvme_mi_aem_aeolli_set_aeoltl(list_hdr, aeoltl); + test_peer.tx_buf_len = hdr_len + aeoltl; + + if ((fn_data->fc == AEM_FC_BAD_OCC_RSP_BUFFER_LEN_SYNC && !aem_not_ack) || + (fn_data->fc == AEM_FC_BAD_OCC_RSP_BUFFER_LEN_AEM && aem_not_ack)) + test_peer.tx_buf_len--; + + test_set_tx_mic(&test_peer); +} + +static void check_aem_sync_message(struct nvme_mi_aem_enabled_map *expected_mask, + struct nvme_mi_aem_enabled_map *expected_state, + struct aem_rcv_enable_fn_data *fn_data) +{ + //Check the RX buffer for the endpoint. We should be getting a CONFIG SET AEM + //with all enabled items disabled + struct nvme_mi_mi_req_hdr *req = + (struct nvme_mi_mi_req_hdr *)test_peer.rx_buf; + + struct nvme_mi_aem_supported_list *list = + (struct nvme_mi_aem_supported_list *)(req+1); + + assert(req->opcode == nvme_mi_mi_opcode_configuration_set); + assert((le32_to_cpu(req->cdw0) & 0xFF) == NVME_MI_CONFIG_AE); + assert(list->hdr.aeslver == 0); + + int count = 0; + //Count how many events we want to act are in the expected state + for (int i = 0; i < 256; i++) { + if (expected_mask->enabled[i]) + count++; + } + + assert(list->hdr.numaes == count); + assert(list->hdr.aeslhl == sizeof(struct nvme_mi_aem_supported_list)); + assert(list->hdr.aest == list->hdr.aeslhl + + count * sizeof(struct nvme_mi_aem_supported_item)); + + struct nvme_mi_aem_supported_item *item = + (struct nvme_mi_aem_supported_item *)(list+1); + + //Check the items + for (int i = 0; i < 256; i++) { + if (expected_mask->enabled[i]) { + bool found = false; + + for (int j = 0; j < count; j++) { + if (nvme_mi_aem_aesi_get_aesid(item[j].aesi) == i && + nvme_mi_aem_aesi_get_aese(item[j].aesi) == + expected_state->enabled[i]) { + found = true; + break; + } + } + assert(found); + } + } +} + +static int aem_rcv_enable_fn(struct test_peer *peer, void *buf, size_t len, int sd) +{ + struct aem_rcv_enable_fn_data *fn_data = peer->tx_data; + struct nvme_mi_mi_resp_hdr *tx_hdr = (struct nvme_mi_mi_resp_hdr *)peer->tx_buf; + + /* set up a few default response fields; caller may have + * initialised the rest of the response + */ + test_peer.tx_buf[0] = NVME_MI_MSGTYPE_NVME; + test_peer.tx_buf[1] = test_peer.rx_buf[1] | (NVME_MI_ROR_RSP << 7); + tx_hdr->status = 0; + + switch (fn_data->state) { + case AEM_ES_GET_ENABLED: + { + assert(sd == peer->sd[TEST_PEER_SD_COMMANDS_IDX]); + + //First, we want to return some data about what is already enabled + struct nvme_mi_aem_supported_list_header *list_hdr = + (struct nvme_mi_aem_supported_list_header *)(tx_hdr+1); + + if (fn_data->fc == AEM_FC_BAD_GET_CONFIG_HEADER_LEN) + list_hdr->aeslhl = + sizeof(struct nvme_mi_aem_supported_list_header) - 1; + else + list_hdr->aeslhl = + sizeof(struct nvme_mi_aem_supported_list_header); + + list_hdr->aeslver = 0; + struct nvme_mi_aem_supported_item *item = + (struct nvme_mi_aem_supported_item *)(list_hdr+1); + int item_count = 0; + + list_hdr->numaes = 0; + //Count how many events we want to act are enabled + for (int i = 0; i < 256; i++) { + if (fn_data->ep_enabled_map.enabled[i]) { + list_hdr->numaes++; + nvme_mi_aem_aesi_set_aesid(&item[item_count], i); + nvme_mi_aem_aesi_set_aee(&item[item_count], 1); + item[item_count].aesl = + sizeof(struct nvme_mi_aem_supported_item); + item_count++; + } + } + + list_hdr->aest = list_hdr->aeslhl + + list_hdr->numaes * sizeof(struct nvme_mi_aem_supported_item); + if (fn_data->fc == AEM_FC_BAD_GET_CONFIG_TOTAL_LEN) + list_hdr->aest--;//Shrink + + test_peer.tx_buf_len = + sizeof(struct nvme_mi_mi_resp_hdr) + list_hdr->aest; + if (fn_data->fc == AEM_FC_BAD_GET_CONFIG_BUFFER_LEN) + test_peer.tx_buf_len--; + + test_set_tx_mic(&test_peer); + + fn_data->state = AEM_ES_SET_TO_DISABLED; + break; + } + case AEM_ES_SET_TO_DISABLED: + { + assert(sd == peer->sd[TEST_PEER_SD_COMMANDS_IDX]); + + struct nvme_mi_aem_enabled_map expected = {false}; + //The items in the ep_enabled_map should get disabled + check_aem_sync_message(&fn_data->ep_enabled_map, &expected, fn_data); + + //Need to queue a reasonable response with no OCC + struct nvme_mi_mi_resp_hdr *tx_hdr = + (struct nvme_mi_mi_resp_hdr *)test_peer.tx_buf; + struct nvme_mi_aem_occ_list_hdr *list_hdr = + (struct nvme_mi_aem_occ_list_hdr *)(tx_hdr+1); + + list_hdr->aelver = 0; + list_hdr->aeolhl = sizeof(*list_hdr); + list_hdr->numaeo = 0; + nvme_mi_aem_aeolli_set_aeoltl(list_hdr, list_hdr->aeolhl); + + test_peer.tx_buf_len = sizeof(struct nvme_mi_mi_resp_hdr) + + nvme_mi_aem_aeolli_get_aeoltl(list_hdr->aeolli); + + test_set_tx_mic(&test_peer); + + fn_data->state = AEM_ES_ENABLE_SET_ENABLED; + break; + } + case AEM_ES_ENABLE_SET_ENABLED: + assert(sd == peer->sd[TEST_PEER_SD_COMMANDS_IDX]); + + //We should verify the right things are enabled + //The items in the host enable map should get enabled + check_aem_sync_message(&fn_data->host_enabled_map, + &fn_data->host_enabled_map, fn_data); + + //Prepare an OCC list response + populate_tx_occ_list(false, fn_data, &fn_data->host_enabled_map); + + fn_data->state = AEM_ES_PROCESS; + break; + case AEM_ES_PROCESS: + //This case is actually a TX without any request from the host + assert(sd == peer->sd[TEST_PEER_SD_AEMS_IDX]); + + //Prepare an OCC list response + populate_tx_occ_list(true, fn_data, &fn_data->aem_during_process_map); + + fn_data->state = AEM_ES_ACK_RESPONSE; + break; + case AEM_ES_ACK_RESPONSE: + assert(sd == peer->sd[TEST_PEER_SD_COMMANDS_IDX]); + + //Prepare an OCC list response + populate_tx_occ_list(false, fn_data, &fn_data->ack_events_map); + + fn_data->state = AEM_ES_ACK_RECEIVED; + break; + default: + assert(false);//Not expected + } + + return 0; +} + +enum nvme_mi_aem_handler_next_action aem_handler(nvme_mi_ep_t ep, size_t num_events, void *userdata) +{ + struct aem_rcv_enable_fn_data *fn_data = userdata; + + fn_data->callback_count++; + + switch (fn_data->state) { + case AEM_ES_PROCESS: + case AEM_ES_ACK_RESPONSE: + case AEM_ES_ACK_RECEIVED: + { + //This means we just sent out first OCC data + int item_count = 0; + struct nvme_mi_aem_enabled_map *map; + + //Count how many events we want to act are enabled + switch (fn_data->state) { + case AEM_ES_PROCESS: + map = &fn_data->host_enabled_map; + break; + case AEM_ES_ACK_RESPONSE: + map = &fn_data->aem_during_process_map; + break; + case AEM_ES_ACK_RECEIVED: + map = &fn_data->ack_events_map; + break; + default: + assert(false); + } + + for (int i = 0; i < 256; i++) + if (map->enabled[i]) + item_count++; + + assert(num_events == item_count); + + for (int i = 0; i < num_events; i++) { + struct nvme_mi_event *e = nvme_mi_aem_get_next_event(ep); + uint8_t idx = e->aeoi; + + assert(fn_data->events[idx]); + assert(fn_data->host_enabled_map.enabled[idx]); + assert(fn_data->events[idx]->aeocidi == e->aeocidi); + assert(fn_data->events[idx]->aessi == e->aessi); + assert(fn_data->events[idx]->spec_info_len == + e->spec_info_len); + assert(memcmp(fn_data->events[idx]->spec_info, + e->spec_info, e->spec_info_len) == 0); + assert(fn_data->events[idx]->vend_spec_info_len == + e->vend_spec_info_len); + assert(memcmp(fn_data->events[idx]->vend_spec_info, + e->vend_spec_info, e->vend_spec_info_len) == 0); + } + + assert(nvme_mi_aem_get_next_event(ep) == NULL); + break; + } + default: + assert(false); + } + + return NVME_MI_AEM_HNA_ACK; +} + +static void aem_test_aem_api_helper(nvme_mi_ep_t ep, + struct nvme_mi_aem_config *config, int expected_event_count) +{ + struct aem_rcv_enable_fn_data *fn_data = + (struct aem_rcv_enable_fn_data *)test_peer.tx_data; + int rc = 0; + + test_peer.tx_fn = aem_rcv_enable_fn; + + //This should not work outside the handler + assert(nvme_mi_aem_get_next_event(ep) == NULL); + + rc = nvme_mi_aem_enable(ep, config, test_peer.tx_data); + assert(rc == 0); + + //This should not work outside the handler + assert(nvme_mi_aem_get_next_event(ep) == NULL); + + rc = nvme_mi_aem_process(ep, test_peer.tx_data); + assert(rc == 0); + + //One for initial enable, one for AEM. No ACK events + assert(fn_data->callback_count == expected_event_count); + + //This should not work outside the handler + assert(nvme_mi_aem_get_next_event(ep) == NULL); +} + +static void aem_test_aem_disable_helper(nvme_mi_ep_t ep, + struct aem_rcv_enable_fn_data *fn_data) +{ + memcpy(&fn_data->ep_enabled_map, &fn_data->host_enabled_map, + sizeof(fn_data->host_enabled_map)); + + fn_data->state = AEM_ES_GET_ENABLED;//This is the flow for disabling + assert(nvme_mi_aem_disable(ep) == 0); +} + +static void test_mi_aem_ep_based_failure_helper(nvme_mi_ep_t ep, + enum aem_failure_condition fc, struct test_peer *peer) +{ + struct aem_rcv_enable_fn_data fn_data = {0}; + struct nvme_mi_aem_config config = {0}; + + config.aemd = 1; + config.aerd = 2; + config.enabled_map.enabled[3] = true; + fn_data.aem_during_process_map.enabled[3] = true; + struct nvme_mi_event e = {0}; + + e.aeoi = 3; + e.spec_info_len = 0; + fn_data.events[3] = &e; + + memcpy(&fn_data.host_enabled_map, &config.enabled_map, sizeof(config.enabled_map)); + + config.aem_handler = aem_handler; + peer->tx_data = (void *) &fn_data; + peer->tx_fn = aem_rcv_enable_fn; + + fn_data.fc = fc; + switch (fc) { + case AEM_FC_BAD_GET_CONFIG_HEADER_LEN: + case AEM_FC_BAD_GET_CONFIG_TOTAL_LEN: + case AEM_FC_BAD_GET_CONFIG_BUFFER_LEN: + case AEM_FC_BAD_OCC_RSP_HDR_LEN_SYNC: + case AEM_FC_BAD_OCC_RSP_TOTAL_LEN_SYNC: + case AEM_FC_BAD_OCC_RSP_BUFFER_LEN_SYNC: + //These all should fail before processing + assert(nvme_mi_aem_enable(ep, &config, &fn_data) == -1); + assert(errno == EPROTO); + break; + case AEM_FC_BAD_OCC_RSP_HDR_LEN_AEM: + case AEM_FC_BAD_OCC_RSP_TOTAL_LEN_AEM: + case AEM_FC_BAD_OCC_RSP_BUFFER_LEN_AEM: + //These should fail on the processing + assert(nvme_mi_aem_enable(ep, &config, &fn_data) == 0); + assert(nvme_mi_aem_process(ep, &fn_data) == -1); + assert(errno == EPROTO); + break; + default: + assert(false);//Unexpected + } +} + +/* test: Check validation of endpoint messages in various stages of aem handling */ +static void test_mi_aem_ep_based_failure_conditions(nvme_mi_ep_t ep, struct test_peer *peer) +{ + test_mi_aem_ep_based_failure_helper(ep, AEM_FC_BAD_GET_CONFIG_HEADER_LEN, peer); + test_mi_aem_ep_based_failure_helper(ep, AEM_FC_BAD_GET_CONFIG_TOTAL_LEN, peer); + test_mi_aem_ep_based_failure_helper(ep, AEM_FC_BAD_GET_CONFIG_BUFFER_LEN, peer); + test_mi_aem_ep_based_failure_helper(ep, AEM_FC_BAD_OCC_RSP_HDR_LEN_SYNC, peer); + test_mi_aem_ep_based_failure_helper(ep, AEM_FC_BAD_OCC_RSP_HDR_LEN_AEM, peer); + test_mi_aem_ep_based_failure_helper(ep, AEM_FC_BAD_OCC_RSP_TOTAL_LEN_SYNC, peer); + test_mi_aem_ep_based_failure_helper(ep, AEM_FC_BAD_OCC_RSP_TOTAL_LEN_AEM, peer); + test_mi_aem_ep_based_failure_helper(ep, AEM_FC_BAD_OCC_RSP_BUFFER_LEN_SYNC, peer); + test_mi_aem_ep_based_failure_helper(ep, AEM_FC_BAD_OCC_RSP_BUFFER_LEN_AEM, peer); +} + +/* test: Check aem process logic when API used improperly */ +static void test_mi_aem_enable_invalid_usage(nvme_mi_ep_t ep, struct test_peer *peer) +{ + struct nvme_mi_aem_config config = {0}; + + config.aem_handler = aem_handler; + config.enabled_map.enabled[0] = false; + config.aemd = 1; + config.aerd = 2; + + //Call with invalid config due to nothing enabled + assert(nvme_mi_aem_enable(ep, &config, NULL) == -1); + + config.aem_handler = NULL; + config.enabled_map.enabled[0] = true; + + //Call with invalid config due to no callback + assert(nvme_mi_aem_enable(ep, &config, NULL) == -1); + + //Call with invalid config due to being NULL + assert(nvme_mi_aem_enable(ep, NULL, NULL) == -1); + + config.aem_handler = aem_handler; + config.enabled_map.enabled[0] = true; + + //Call with invalid endpoint + assert(nvme_mi_aem_enable(NULL, &config, NULL) == -1); +} + +/* test: Check aem process logic when API used improperly */ +static void test_mi_aem_process_invalid_usage(nvme_mi_ep_t ep, struct test_peer *peer) +{ + //Without calling enable first + assert(nvme_mi_aem_process(ep, NULL) == -1); + + //Call with invalid ep + assert(nvme_mi_aem_process(NULL, NULL) == -1); +} + +/* test: Check aem disable logic when API used improperly */ +static void test_mi_aem_disable_invalid_usage(nvme_mi_ep_t ep, struct test_peer *peer) +{ + assert(nvme_mi_aem_disable(NULL) == -1); +} + +static void test_mi_aem_get_enabled_invalid_usage(nvme_mi_ep_t ep, struct test_peer *peer) +{ + struct nvme_mi_aem_enabled_map map; + + assert(nvme_mi_aem_get_enabled(ep, NULL) == -1); + assert(nvme_mi_aem_get_enabled(NULL, &map) == -1); +} + +/* test: Check aem get enabled logic*/ +static void test_mi_aem_get_enabled(nvme_mi_ep_t ep, struct test_peer *peer) +{ + //When no events enabled on Endpoint + struct aem_rcv_enable_fn_data fn_data = {0}; + struct nvme_mi_aem_enabled_map map; + + test_peer.tx_fn = aem_rcv_enable_fn; + peer->tx_data = (void *) &fn_data; + fn_data.ep_enabled_map.enabled[8] = true; + fn_data.ep_enabled_map.enabled[20] = true; + fn_data.ep_enabled_map.enabled[51] = true; + fn_data.ep_enabled_map.enabled[255] = true; + + assert(nvme_mi_aem_get_enabled(ep, &map) == 0); + assert(memcmp(&fn_data.ep_enabled_map, &map, sizeof(map)) == 0); +} + + +/* test: Check aem disable logic when called without an enable */ +static void test_mi_aem_disable_no_enable(nvme_mi_ep_t ep, struct test_peer *peer) +{ + //When no events enabled on Endpoint + struct aem_rcv_enable_fn_data fn_data = {0}; + + test_peer.tx_fn = aem_rcv_enable_fn; + peer->tx_data = (void *) &fn_data; + + aem_test_aem_disable_helper(ep, &fn_data); + + //When some events enabled on Endpoint + fn_data.ep_enabled_map.enabled[45] = true; + + aem_test_aem_disable_helper(ep, &fn_data); +} + +/* test: Check aem enable logic with ack carrying events */ +static void test_mi_aem_api_w_ack_events(nvme_mi_ep_t ep, struct test_peer *peer) +{ + struct aem_rcv_enable_fn_data fn_data = {0}; + struct nvme_mi_aem_config config = {0}; + + config.aemd = 1; + config.aerd = 2; + peer->tx_data = (void *) &fn_data; + config.aem_handler = aem_handler; + + config.enabled_map.enabled[5] = true; + config.enabled_map.enabled[15] = true; + + fn_data.aem_during_process_map.enabled[5] = true; + + //No ack_events_map will be enabled in this test + fn_data.ack_events_map.enabled[15] = true; + + //Will have EP have nothing enabled at start (ep_enabled_map) + + struct nvme_mi_event ev5 = {0}; + + ev5.aeoi = 5; + ev5.aeocidi = 2; + ev5.aessi = 3; + + struct nvme_mi_event ev15 = {0}; + uint8_t ev15_spec[] = { 45, 15}; + + ev15.aeoi = 15; + ev15.aeocidi = 60213; + ev15.aessi = 200; + ev15.spec_info = ev15_spec; + ev15.spec_info_len = sizeof(ev15_spec); + + fn_data.events[5] = &ev5; + fn_data.events[15] = &ev15; + + memcpy(&fn_data.host_enabled_map, &config.enabled_map, sizeof(config.enabled_map)); + + aem_test_aem_api_helper(ep, &config, 3); + + aem_test_aem_disable_helper(ep, &fn_data); +} + +/* test: Check aem enable logic */ +static void test_mi_aem_api_simple(nvme_mi_ep_t ep, struct test_peer *peer) +{ + struct aem_rcv_enable_fn_data fn_data = {0}; + struct nvme_mi_aem_config config = {0}; + + config.aemd = 1; + config.aerd = 2; + peer->tx_data = (void *) &fn_data; + config.aem_handler = aem_handler; + + config.enabled_map.enabled[1] = true; + config.enabled_map.enabled[3] = true; + config.enabled_map.enabled[16] = true; + + fn_data.aem_during_process_map.enabled[3] = true; + + //No ack_events_map will be enabled in this test + + fn_data.ep_enabled_map.enabled[3] = true; + fn_data.ep_enabled_map.enabled[20] = true; + fn_data.ep_enabled_map.enabled[200] = true; + + struct nvme_mi_event ev1 = {0}; + uint8_t ev1_spec[] = { 98, 56, 32, 12}; + + ev1.aeoi = 1; + ev1.aeocidi = 2; + ev1.aessi = 3; + ev1.spec_info = ev1_spec; + ev1.spec_info_len = sizeof(ev1_spec); + + struct nvme_mi_event ev3 = {0}; + uint8_t ev3_spec[] = { 45, 15}; + + ev3.aeoi = 3; + ev3.aeocidi = 4; + ev3.aessi = 5; + ev3.spec_info = ev3_spec; + ev3.spec_info_len = sizeof(ev3_spec); + + struct nvme_mi_event ev16 = {0}; + + ev16.aeoi = 16; + ev16.aeocidi = 6; + ev16.aessi = 7; + + fn_data.events[1] = &ev1; + fn_data.events[3] = &ev3; + fn_data.events[16] = &ev16; + + memcpy(&fn_data.host_enabled_map, &config.enabled_map, sizeof(config.enabled_map)); + + aem_test_aem_api_helper(ep, &config, 2); + + aem_test_aem_disable_helper(ep, &fn_data); +} + #define DEFINE_TEST(name) { #name, test_ ## name } struct test { const char *name; @@ -737,6 +1414,15 @@ struct test { DEFINE_TEST(mpr_timeouts), DEFINE_TEST(mpr_timeout_clamp), DEFINE_TEST(mpr_mprt_zero), + DEFINE_TEST(mi_aem_api_simple), + DEFINE_TEST(mi_aem_api_w_ack_events), + DEFINE_TEST(mi_aem_disable_no_enable), + DEFINE_TEST(mi_aem_process_invalid_usage), + DEFINE_TEST(mi_aem_enable_invalid_usage), + DEFINE_TEST(mi_aem_disable_invalid_usage), + DEFINE_TEST(mi_aem_get_enabled), + DEFINE_TEST(mi_aem_get_enabled_invalid_usage), + DEFINE_TEST(mi_aem_ep_based_failure_conditions), }; static void run_test(struct test *test, FILE *logfd, nvme_mi_ep_t ep, diff --git a/test/mi.c b/test/mi.c index b6e152bb1..acf01cb77 100644 --- a/test/mi.c +++ b/test/mi.c @@ -95,6 +95,10 @@ static const struct nvme_mi_transport test_transport = { .submit = test_transport_submit, .close = test_transport_close, .desc_ep = test_transport_desc_ep, + //The following aren't actually used by the test_transport + .aem_fd = NULL, + .aem_purge = NULL, + .aem_read = NULL, }; static void test_set_transport_callback(nvme_mi_ep_t ep, test_submit_cb cb,