diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 10055c7e4a9f..2244dfe5e96f 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -73,6 +73,9 @@ #include "mpt3sas_trigger_diag.h" #include "mpt3sas_trigger_pages.h" +#include +#include + /* driver versioning info */ #define MPT3SAS_DRIVER_NAME "mpt3sas" #define MPT3SAS_AUTHOR "Avago Technologies " @@ -1844,6 +1847,9 @@ mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle); void mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth); struct _sas_device * __mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc, struct sas_rphy *rphy); +struct _pcie_device * +__mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc, + struct MPT3SAS_TARGET *tgt_priv); struct virtual_phy * mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc, struct hba_port *port, u32 phy); @@ -2052,6 +2058,10 @@ void mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc); void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc); void mpt3sas_init_debugfs(void); void mpt3sas_exit_debugfs(void); +int mpt3_nvme_user_cmd64(struct scsi_device *sdev, + struct nvme_passthru_cmd64 __user *ucmd); +int mpt3_nvme_user_cmd(struct scsi_device *sdev, + struct nvme_passthru_cmd __user *ucmd); /** * _scsih_is_pcie_scsi_device - determines if device is an pcie scsi device diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index 0d8b1e942ded..b92b3069f51a 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -660,7 +660,7 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg, */ static long _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, - void __user *mf) + void __user *mf, int is_user) { MPI2RequestHeader_t *mpi_request = NULL, *request; MPI2DefaultReply_t *mpi_reply; @@ -708,7 +708,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, } /* copy in request message frame from user */ - if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) { + if (!is_user) { + memcpy(mpi_request, mf, karg.data_sge_offset*4); + } else if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) { pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); ret = -EFAULT; @@ -1074,7 +1076,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, /* copy out reply message frame to user */ if (karg.max_reply_bytes) { sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz); - if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply, + if (!is_user) { + memcpy(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply, sz); + } else if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply, sz)) { pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); @@ -1096,7 +1100,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, MPI2_FUNCTION_NVME_ENCAPSULATED) ? NVME_ERROR_RESPONSE_SIZE : SCSI_SENSE_BUFFERSIZE; sz = min_t(u32, karg.max_sense_bytes, sz_arg); - if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense, + if (!is_user) { + memcpy(karg.sense_data_ptr, ioc->ctl_cmds.sense, sz); + } else if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense, sz)) { pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); @@ -2585,7 +2591,7 @@ _ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd, karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr); karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr); karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr); - return _ctl_do_mpt_command(ioc, karg, &uarg->mf); + return _ctl_do_mpt_command(ioc, karg, &uarg->mf, 1); } #endif @@ -2672,7 +2678,7 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg, } if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_command)) { uarg = arg; - ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf); + ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf, 1); } break; } @@ -4187,3 +4193,131 @@ mpt3sas_ctl_exit(ushort hbas_to_enumerate) if (hbas_to_enumerate != 2) misc_deregister(&gen2_ctl_dev); } + +static int mpt3_process_nvme_cmd(struct scsi_device *sdev, struct nvme_common_command encap_cmd, + u64 ubuf, u32 ubuf_len, u64 *result) { + struct Scsi_Host *shost = sdev->host; + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_DEVICE *sas_device_priv_data = \ + sdev->hostdata; + struct MPT3SAS_TARGET *sas_target_priv_data = \ + sas_device_priv_data->sas_target; + struct _pcie_device *pcie_device; + unsigned long flags; + int ret; + + /* + * Prepare IOCTL + */ + struct mpt3_ioctl_command mpt3_iotl_karg; + struct nvme_completion cqe; + Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = kzalloc(sizeof(encap_cmd) + \ + sizeof(Mpi26NVMeEncapsulatedRequest_t), GFP_KERNEL); + if (!nvme_encap_request) + return -ENOMEM; + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device = __mpt3sas_get_pdev_from_target(ioc, sas_target_priv_data); + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + if (!pcie_device) + return -ENXIO; + memset(nvme_encap_request, 0, sizeof(nvme_encap_request)); + memset(&mpt3_iotl_karg, 0, sizeof(mpt3_iotl_karg)); + memset(&cqe, 0, sizeof(cqe)); + nvme_encap_request->DevHandle = pcie_device->handle; + nvme_encap_request->Function = MPI2_FUNCTION_NVME_ENCAPSULATED; + nvme_encap_request->EncapsulatedCommandLength = sizeof(encap_cmd); + nvme_encap_request->DataLength = ubuf_len; + mpt3_iotl_karg.timeout = 120; + + // Admin SQE and Always CQE + nvme_encap_request->Flags = 0x30; + if (ubuf && ubuf_len) { + if (encap_cmd.opcode & 0x1) { + mpt3_iotl_karg.data_out_buf_ptr = (void __user *) ubuf; + mpt3_iotl_karg.data_out_size = ubuf_len; + // Write flag + nvme_encap_request->Flags |= 0x1; + } else if (encap_cmd.opcode & 0x2) { + mpt3_iotl_karg.data_in_buf_ptr = (void __user *) ubuf; + mpt3_iotl_karg.data_in_size = ubuf_len; + // Read flag + nvme_encap_request->Flags |= 0x2; + } + } + // Can be set to MPI26_NVME_ENCAPSULATED_ERROR_REPLY for reply descriptor + mpt3_iotl_karg.reply_frame_buf_ptr = NULL; + mpt3_iotl_karg.max_reply_bytes = 0; + mpt3_iotl_karg.sense_data_ptr = &cqe; + mpt3_iotl_karg.max_sense_bytes = sizeof(cqe); + mpt3_iotl_karg.data_sge_offset = (sizeof(encap_cmd) + sizeof(*nvme_encap_request)) / 4; + mpt3_iotl_karg.hdr.port_number = pcie_device->port_num; + memcpy(nvme_encap_request->NVMe_Command, &encap_cmd, sizeof(encap_cmd)); + ret = _ctl_do_mpt_command(ioc, mpt3_iotl_karg, nvme_encap_request, 0); + if (ret) + return ret; + ret = cqe.status >> 1; + if (result) + *result = le64_to_cpu(cqe.result.u64); + kfree(nvme_encap_request); + return ret; +} + +int mpt3_nvme_user_cmd(struct scsi_device *sdev, struct nvme_passthru_cmd __user *ucmd) { + struct nvme_passthru_cmd cmd; + struct nvme_common_command encap_cmd; + u64 result = 0; + int ret = 0; + if (copy_from_user(&cmd, ucmd, sizeof(cmd))) + return -EFAULT; + if (cmd.flags) + return -EINVAL; + memset(&encap_cmd, 0, sizeof(encap_cmd)); + encap_cmd.opcode = cmd.opcode; + encap_cmd.flags = cmd.flags; + encap_cmd.nsid = cpu_to_le32(cmd.nsid); + encap_cmd.cdw2[0] = cpu_to_le32(cmd.cdw2); + encap_cmd.cdw2[1] = cpu_to_le32(cmd.cdw3); + encap_cmd.cdw10 = cpu_to_le32(cmd.cdw10); + encap_cmd.cdw11 = cpu_to_le32(cmd.cdw11); + encap_cmd.cdw12 = cpu_to_le32(cmd.cdw12); + encap_cmd.cdw13 = cpu_to_le32(cmd.cdw13); + encap_cmd.cdw14 = cpu_to_le32(cmd.cdw14); + encap_cmd.cdw15 = cpu_to_le32(cmd.cdw15); + ret = mpt3_process_nvme_cmd(sdev, encap_cmd, cmd.addr, cmd.data_len, &result); + if (ret >= 0) { + if (put_user(result, &ucmd->result)) { + return -EFAULT; + } + } + return ret; +} +int mpt3_nvme_user_cmd64(struct scsi_device *sdev, struct nvme_passthru_cmd64 __user *ucmd) { + struct nvme_passthru_cmd cmd; + struct nvme_common_command encap_cmd; + u64 result = 0; + int ret = 0; + if (copy_from_user(&cmd, ucmd, sizeof(cmd))) + return -EFAULT; + if (cmd.flags) + return -EINVAL; + memset(&encap_cmd, 0, sizeof(encap_cmd)); + encap_cmd.opcode = cmd.opcode; + encap_cmd.flags = cmd.flags; + encap_cmd.nsid = cpu_to_le32(cmd.nsid); + encap_cmd.cdw2[0] = cpu_to_le32(cmd.cdw2); + encap_cmd.cdw2[1] = cpu_to_le32(cmd.cdw3); + encap_cmd.cdw10 = cpu_to_le32(cmd.cdw10); + encap_cmd.cdw11 = cpu_to_le32(cmd.cdw11); + encap_cmd.cdw12 = cpu_to_le32(cmd.cdw12); + encap_cmd.cdw13 = cpu_to_le32(cmd.cdw13); + encap_cmd.cdw14 = cpu_to_le32(cmd.cdw14); + encap_cmd.cdw15 = cpu_to_le32(cmd.cdw15); + ret = mpt3_process_nvme_cmd(sdev, encap_cmd, cmd.addr, cmd.data_len, &result); + if (ret >= 0) { + if (put_user(result, &ucmd->result)) { + return -EFAULT; + } + } + return ret; +} + diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 2ea3bdc63817..57118ce1548e 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -672,7 +672,7 @@ mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc, return ret; } -static struct _pcie_device * +struct _pcie_device * __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc, struct MPT3SAS_TARGET *tgt_priv) { @@ -11963,12 +11963,41 @@ static struct raid_function_template mpt2sas_raid_functions = { .get_state = scsih_get_state, }; +int scsih_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg) { + if (scsih_is_nvme(&sdev->sdev_gendev)) { + switch (cmd) { + case NVME_IOCTL_ID: + // TODO: How to detect which namespace device has? + return 1; + case NVME_IOCTL_ADMIN_CMD: + return mpt3_nvme_user_cmd(sdev, arg); + case NVME_IOCTL_ADMIN64_CMD: + return mpt3_nvme_user_cmd64(sdev, arg); + + /* + * TODO: For resv registers + */ + case NVME_IOCTL_IO_CMD: + break; + + default: + printk(KERN_ERR "%s(): Unhandled NVME IOCTL\n", __func__); + } + } else { + printk(KERN_ERR "%s(): Other than NVME IOCTL\n", __func__); + } + return -1; +} + /* shost template for SAS 3.0 HBA devices */ static struct scsi_host_template mpt3sas_driver_template = { .module = THIS_MODULE, .name = "Fusion MPT SAS Host", .proc_name = MPT3SAS_DRIVER_NAME, .queuecommand = scsih_qcmd, + .ioctl = scsih_ioctl, + + //TODO: compat_ioctl .target_alloc = scsih_target_alloc, .slave_alloc = scsih_slave_alloc, .slave_configure = scsih_slave_configure, diff --git a/include/uapi/linux/nvme_ioctl.h b/include/uapi/linux/nvme_ioctl.h index 2f76cba67166..1c659f6fc97a 100644 --- a/include/uapi/linux/nvme_ioctl.h +++ b/include/uapi/linux/nvme_ioctl.h @@ -46,28 +46,28 @@ struct nvme_passthru_cmd { }; struct nvme_passthru_cmd64 { - __u8 opcode; - __u8 flags; - __u16 rsvd1; - __u32 nsid; - __u32 cdw2; - __u32 cdw3; - __u64 metadata; - __u64 addr; - __u32 metadata_len; + __u8 opcode; //1 + __u8 flags; //2 + __u16 rsvd1; //4 + __u32 nsid; //8 + __u32 cdw2; //12 + __u32 cdw3; //16 + __u64 metadata;//24 + __u64 addr; //32 + __u32 metadata_len; //36 union { - __u32 data_len; /* for non-vectored io */ + __u32 data_len; /* for non-vectored io */ //40 __u32 vec_cnt; /* for vectored io */ }; - __u32 cdw10; - __u32 cdw11; - __u32 cdw12; - __u32 cdw13; - __u32 cdw14; - __u32 cdw15; - __u32 timeout_ms; - __u32 rsvd2; - __u64 result; + __u32 cdw10; //44 + __u32 cdw11; //48 + __u32 cdw12; //52 + __u32 cdw13; //56 + __u32 cdw14; //60 + __u32 cdw15; //64 + __u32 timeout_ms; //68 + __u32 rsvd2; //72 + __u64 result; //80 }; /* same as struct nvme_passthru_cmd64, minus the 8b result field */