Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull target updates from Nicholas Bellinger:
 "It has been a very busy development cycle this time around in target
  land, with the highlights including:

   - Kill struct se_subsystem_dev, in favor of direct se_device usage
     (hch)
   - Simplify reservations code by combining SPC-3 + SCSI-2 support for
     virtual backends only (hch)
   - Simplify ALUA code for virtual only backends, and remove left over
     abstractions (hch)
   - Pass sense_reason_t as return value for I/O submission path (hch)
   - Refactor MODE_SENSE emulation to allow for easier addition of new
     mode pages.  (roland)
   - Add emulation of MODE_SELECT (roland)
   - Fix bug in handling of ExpStatSN wrap-around (steve)
   - Fix bug in TMR ABORT_TASK lookup in qla2xxx target (steve)
   - Add WRITE_SAME w/ UNMAP=0 support for IBLOCK backends (nab)
   - Convert ib_srpt to use modern target_submit_cmd caller + drop
     legacy ioctx->kref usage (nab)
   - Convert ib_srpt to use modern target_submit_tmr caller (nab)
   - Add link_magic for fabric allow_link destination target_items for
     symlinks within target_core_fabric_configfs.c code (nab)
   - Allocate pointers in instead of full structs for
     config_group->default_groups (sebastian)
   - Fix 32-bit highmem breakage for FILEIO (sebastian)

  All told, hch was able to shave off another ~1K LOC by killing the
  se_subsystem_dev abstraction, along with a number of PR + ALUA
  simplifications.  Also, a nice patch by Roland is the refactoring of
  MODE_SENSE handling, along with the addition of initial MODE_SELECT
  emulation support for virtual backends.

  Sebastian found a long-standing issue wrt to allocation of full
  config_group instead of pointers for config_group->default_group[]
  setup in a number of areas, which ends up saving memory with big
  configurations.  He also managed to fix another long-standing BUG wrt
  to broken 32-bit highmem support within the FILEIO backend driver.

  Thank you again to everyone who contributed this round!"

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (50 commits)
  target/iscsi_target: Add NodeACL tags for initiator group support
  target/tcm_fc: fix the lockdep warning due to inconsistent lock state
  sbp-target: fix error path in sbp_make_tpg()
  sbp-target: use simple assignment in tgt_agent_rw_agent_state()
  iscsi-target: use kstrdup() for iscsi_param
  target/file: merge fd_do_readv() and fd_do_writev()
  target/file: Fix 32-bit highmem breakage for SGL -> iovec mapping
  target: Add link_magic for fabric allow_link destination target_items
  ib_srpt: Convert TMR path to target_submit_tmr
  ib_srpt: Convert I/O path to target_submit_cmd + drop legacy ioctx->kref
  target: Make spc_get_write_same_sectors return sector_t
  target/configfs: use kmalloc() instead of kzalloc() for default groups
  target/configfs: allocate only 6 slots for dev_cg->default_groups
  target/configfs: allocate pointers instead of full struct for default_groups
  target: update error handling for sbc_setup_write_same()
  iscsit: use GFP_ATOMIC under spin lock
  iscsi_target: Remove redundant null check before kfree
  target/iblock: Forward declare bio helpers
  target: Clean up flow in transport_check_aborted_status()
  target: Clean up logic in transport_put_cmd()
  ...
This commit is contained in:
Linus Torvalds 2012-12-15 14:25:10 -08:00
commit 5bd665f28d
49 changed files with 2859 additions and 4003 deletions

View file

@ -1269,7 +1269,6 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
return ioctx; return ioctx;
BUG_ON(ioctx->ch != ch); BUG_ON(ioctx->ch != ch);
kref_init(&ioctx->kref);
spin_lock_init(&ioctx->spinlock); spin_lock_init(&ioctx->spinlock);
ioctx->state = SRPT_STATE_NEW; ioctx->state = SRPT_STATE_NEW;
ioctx->n_rbuf = 0; ioctx->n_rbuf = 0;
@ -1290,39 +1289,6 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
return ioctx; return ioctx;
} }
/**
* srpt_put_send_ioctx() - Free up resources.
*/
static void srpt_put_send_ioctx(struct srpt_send_ioctx *ioctx)
{
struct srpt_rdma_ch *ch;
unsigned long flags;
BUG_ON(!ioctx);
ch = ioctx->ch;
BUG_ON(!ch);
WARN_ON(srpt_get_cmd_state(ioctx) != SRPT_STATE_DONE);
srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
transport_generic_free_cmd(&ioctx->cmd, 0);
if (ioctx->n_rbuf > 1) {
kfree(ioctx->rbufs);
ioctx->rbufs = NULL;
ioctx->n_rbuf = 0;
}
spin_lock_irqsave(&ch->spinlock, flags);
list_add(&ioctx->free_list, &ch->free_list);
spin_unlock_irqrestore(&ch->spinlock, flags);
}
static void srpt_put_send_ioctx_kref(struct kref *kref)
{
srpt_put_send_ioctx(container_of(kref, struct srpt_send_ioctx, kref));
}
/** /**
* srpt_abort_cmd() - Abort a SCSI command. * srpt_abort_cmd() - Abort a SCSI command.
* @ioctx: I/O context associated with the SCSI command. * @ioctx: I/O context associated with the SCSI command.
@ -1359,8 +1325,14 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
} }
spin_unlock_irqrestore(&ioctx->spinlock, flags); spin_unlock_irqrestore(&ioctx->spinlock, flags);
if (state == SRPT_STATE_DONE) if (state == SRPT_STATE_DONE) {
struct srpt_rdma_ch *ch = ioctx->ch;
BUG_ON(ch->sess == NULL);
target_put_sess_cmd(ch->sess, &ioctx->cmd);
goto out; goto out;
}
pr_debug("Aborting cmd with state %d and tag %lld\n", state, pr_debug("Aborting cmd with state %d and tag %lld\n", state,
ioctx->tag); ioctx->tag);
@ -1395,11 +1367,11 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags); spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
ioctx->cmd.transport_state |= CMD_T_LUN_STOP; ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags); spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
kref_put(&ioctx->kref, srpt_put_send_ioctx_kref); target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
break; break;
case SRPT_STATE_MGMT_RSP_SENT: case SRPT_STATE_MGMT_RSP_SENT:
srpt_set_cmd_state(ioctx, SRPT_STATE_DONE); srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
kref_put(&ioctx->kref, srpt_put_send_ioctx_kref); target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
break; break;
default: default:
WARN_ON("ERROR: unexpected command state"); WARN_ON("ERROR: unexpected command state");
@ -1457,11 +1429,13 @@ static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
&& state != SRPT_STATE_DONE)) && state != SRPT_STATE_DONE))
pr_debug("state = %d\n", state); pr_debug("state = %d\n", state);
if (state != SRPT_STATE_DONE) if (state != SRPT_STATE_DONE) {
kref_put(&ioctx->kref, srpt_put_send_ioctx_kref); srpt_unmap_sg_to_ib_sge(ch, ioctx);
else transport_generic_free_cmd(&ioctx->cmd, 0);
} else {
printk(KERN_ERR "IB completion has been received too late for" printk(KERN_ERR "IB completion has been received too late for"
" wr_id = %u.\n", ioctx->ioctx.index); " wr_id = %u.\n", ioctx->ioctx.index);
}
} }
/** /**
@ -1712,10 +1686,10 @@ out_err:
static int srpt_check_stop_free(struct se_cmd *cmd) static int srpt_check_stop_free(struct se_cmd *cmd)
{ {
struct srpt_send_ioctx *ioctx; struct srpt_send_ioctx *ioctx = container_of(cmd,
struct srpt_send_ioctx, cmd);
ioctx = container_of(cmd, struct srpt_send_ioctx, cmd); return target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
return kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
} }
/** /**
@ -1730,12 +1704,12 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
uint64_t unpacked_lun; uint64_t unpacked_lun;
u64 data_len; u64 data_len;
enum dma_data_direction dir; enum dma_data_direction dir;
int ret; sense_reason_t ret;
int rc;
BUG_ON(!send_ioctx); BUG_ON(!send_ioctx);
srp_cmd = recv_ioctx->ioctx.buf; srp_cmd = recv_ioctx->ioctx.buf;
kref_get(&send_ioctx->kref);
cmd = &send_ioctx->cmd; cmd = &send_ioctx->cmd;
send_ioctx->tag = srp_cmd->tag; send_ioctx->tag = srp_cmd->tag;
@ -1755,40 +1729,26 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
break; break;
} }
ret = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len); if (srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len)) {
if (ret) {
printk(KERN_ERR "0x%llx: parsing SRP descriptor table failed.\n", printk(KERN_ERR "0x%llx: parsing SRP descriptor table failed.\n",
srp_cmd->tag); srp_cmd->tag);
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; ret = TCM_INVALID_CDB_FIELD;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
goto send_sense; goto send_sense;
} }
cmd->data_length = data_len;
cmd->data_direction = dir;
unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun, unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun,
sizeof(srp_cmd->lun)); sizeof(srp_cmd->lun));
if (transport_lookup_cmd_lun(cmd, unpacked_lun) < 0) { rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb,
kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref); &send_ioctx->sense_data[0], unpacked_lun, data_len,
MSG_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
if (rc != 0) {
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto send_sense; goto send_sense;
} }
ret = target_setup_cmd_from_cdb(cmd, srp_cmd->cdb);
if (ret < 0) {
kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
if (cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) {
srpt_queue_status(cmd);
return 0;
} else
goto send_sense;
}
transport_handle_cdb_direct(cmd);
return 0; return 0;
send_sense: send_sense:
transport_send_check_condition_and_sense(cmd, cmd->scsi_sense_reason, transport_send_check_condition_and_sense(cmd, ret, 0);
0);
return -1; return -1;
} }
@ -1865,9 +1825,11 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
{ {
struct srp_tsk_mgmt *srp_tsk; struct srp_tsk_mgmt *srp_tsk;
struct se_cmd *cmd; struct se_cmd *cmd;
struct se_session *sess = ch->sess;
uint64_t unpacked_lun; uint64_t unpacked_lun;
uint32_t tag = 0;
int tcm_tmr; int tcm_tmr;
int res; int rc;
BUG_ON(!send_ioctx); BUG_ON(!send_ioctx);
@ -1882,39 +1844,32 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
send_ioctx->tag = srp_tsk->tag; send_ioctx->tag = srp_tsk->tag;
tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func); tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
if (tcm_tmr < 0) { if (tcm_tmr < 0) {
send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
send_ioctx->cmd.se_tmr_req->response = send_ioctx->cmd.se_tmr_req->response =
TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
goto process_tmr; goto fail;
} }
res = core_tmr_alloc_req(cmd, NULL, tcm_tmr, GFP_KERNEL);
if (res < 0) {
send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
goto process_tmr;
}
unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun, unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
sizeof(srp_tsk->lun)); sizeof(srp_tsk->lun));
res = transport_lookup_tmr_lun(&send_ioctx->cmd, unpacked_lun);
if (res) { if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK) {
pr_debug("rejecting TMR for LUN %lld\n", unpacked_lun); rc = srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; if (rc < 0) {
send_ioctx->cmd.se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; send_ioctx->cmd.se_tmr_req->response =
goto process_tmr; TMR_TASK_DOES_NOT_EXIST;
goto fail;
}
tag = srp_tsk->task_tag;
} }
rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL, unpacked_lun,
if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK) srp_tsk, tcm_tmr, GFP_KERNEL, tag,
srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag); TARGET_SCF_ACK_KREF);
if (rc != 0) {
process_tmr: send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
kref_get(&send_ioctx->kref); goto fail;
if (!(send_ioctx->cmd.se_cmd_flags & SCF_SCSI_CDB_EXCEPTION)) }
transport_generic_handle_tmr(&send_ioctx->cmd); return;
else fail:
transport_send_check_condition_and_sense(cmd, transport_send_check_condition_and_sense(cmd, 0, 0); // XXX:
cmd->scsi_sense_reason, 0);
} }
/** /**
@ -1956,10 +1911,6 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
} }
} }
transport_init_se_cmd(&send_ioctx->cmd, &srpt_target->tf_ops, ch->sess,
0, DMA_NONE, MSG_SIMPLE_TAG,
send_ioctx->sense_data);
switch (srp_cmd->opcode) { switch (srp_cmd->opcode) {
case SRP_CMD: case SRP_CMD:
srpt_handle_cmd(ch, recv_ioctx, send_ioctx); srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
@ -2365,6 +2316,7 @@ static void srpt_release_channel_work(struct work_struct *w)
{ {
struct srpt_rdma_ch *ch; struct srpt_rdma_ch *ch;
struct srpt_device *sdev; struct srpt_device *sdev;
struct se_session *se_sess;
ch = container_of(w, struct srpt_rdma_ch, release_work); ch = container_of(w, struct srpt_rdma_ch, release_work);
pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess, pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess,
@ -2373,8 +2325,13 @@ static void srpt_release_channel_work(struct work_struct *w)
sdev = ch->sport->sdev; sdev = ch->sport->sdev;
BUG_ON(!sdev); BUG_ON(!sdev);
transport_deregister_session_configfs(ch->sess); se_sess = ch->sess;
transport_deregister_session(ch->sess); BUG_ON(!se_sess);
target_wait_for_sess_cmds(se_sess, 0);
transport_deregister_session_configfs(se_sess);
transport_deregister_session(se_sess);
ch->sess = NULL; ch->sess = NULL;
srpt_destroy_ch_ib(ch); srpt_destroy_ch_ib(ch);
@ -3099,7 +3056,7 @@ static int srpt_queue_response(struct se_cmd *cmd)
ioctx->tag); ioctx->tag);
srpt_unmap_sg_to_ib_sge(ch, ioctx); srpt_unmap_sg_to_ib_sge(ch, ioctx);
srpt_set_cmd_state(ioctx, SRPT_STATE_DONE); srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
kref_put(&ioctx->kref, srpt_put_send_ioctx_kref); target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
} }
out: out:
@ -3490,6 +3447,23 @@ static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
static void srpt_release_cmd(struct se_cmd *se_cmd) static void srpt_release_cmd(struct se_cmd *se_cmd)
{ {
struct srpt_send_ioctx *ioctx = container_of(se_cmd,
struct srpt_send_ioctx, cmd);
struct srpt_rdma_ch *ch = ioctx->ch;
unsigned long flags;
WARN_ON(ioctx->state != SRPT_STATE_DONE);
WARN_ON(ioctx->mapped_sg_count != 0);
if (ioctx->n_rbuf > 1) {
kfree(ioctx->rbufs);
ioctx->rbufs = NULL;
ioctx->n_rbuf = 0;
}
spin_lock_irqsave(&ch->spinlock, flags);
list_add(&ioctx->free_list, &ch->free_list);
spin_unlock_irqrestore(&ch->spinlock, flags);
} }
/** /**

View file

@ -228,7 +228,6 @@ struct srpt_recv_ioctx {
struct srpt_send_ioctx { struct srpt_send_ioctx {
struct srpt_ioctx ioctx; struct srpt_ioctx ioctx;
struct srpt_rdma_ch *ch; struct srpt_rdma_ch *ch;
struct kref kref;
struct rdma_iu *rdma_ius; struct rdma_iu *rdma_ius;
struct srp_direct_buf *rbufs; struct srp_direct_buf *rbufs;
struct srp_direct_buf single_rbuf; struct srp_direct_buf single_rbuf;

View file

@ -1264,8 +1264,27 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess) struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
{ {
struct qla_hw_data *ha = vha->hw; struct qla_hw_data *ha = vha->hw;
struct se_session *se_sess = sess->se_sess;
struct qla_tgt_mgmt_cmd *mcmd; struct qla_tgt_mgmt_cmd *mcmd;
struct se_cmd *se_cmd;
u32 lun = 0;
int rc; int rc;
bool found_lun = false;
spin_lock(&se_sess->sess_cmd_lock);
list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
struct qla_tgt_cmd *cmd =
container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
if (cmd->tag == abts->exchange_addr_to_abort) {
lun = cmd->unpacked_lun;
found_lun = true;
break;
}
}
spin_unlock(&se_sess->sess_cmd_lock);
if (!found_lun)
return -ENOENT;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
"qla_target(%d): task abort (tag=%d)\n", "qla_target(%d): task abort (tag=%d)\n",
@ -1283,7 +1302,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
mcmd->sess = sess; mcmd->sess = sess;
memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, TMR_ABORT_TASK, rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK,
abts->exchange_addr_to_abort); abts->exchange_addr_to_abort);
if (rc != 0) { if (rc != 0) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052, ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,

View file

@ -620,8 +620,8 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
return; return;
} }
cmd->se_cmd.scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD; transport_generic_request_failure(&cmd->se_cmd,
transport_generic_request_failure(&cmd->se_cmd); TCM_CHECK_CONDITION_ABORT_CMD);
return; return;
} }

View file

@ -735,7 +735,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
spin_lock(&cmd->istate_lock); spin_lock(&cmd->istate_lock);
if ((cmd->i_state == ISTATE_SENT_STATUS) && if ((cmd->i_state == ISTATE_SENT_STATUS) &&
(cmd->stat_sn < exp_statsn)) { iscsi_sna_lt(cmd->stat_sn, exp_statsn)) {
cmd->i_state = ISTATE_REMOVE; cmd->i_state = ISTATE_REMOVE;
spin_unlock(&cmd->istate_lock); spin_unlock(&cmd->istate_lock);
iscsit_add_cmd_to_immediate_queue(cmd, conn, iscsit_add_cmd_to_immediate_queue(cmd, conn,
@ -767,9 +767,8 @@ static int iscsit_handle_scsi_cmd(
struct iscsi_conn *conn, struct iscsi_conn *conn,
unsigned char *buf) unsigned char *buf)
{ {
int data_direction, cmdsn_ret = 0, immed_ret, ret, transport_ret; int data_direction, payload_length, cmdsn_ret = 0, immed_ret;
int dump_immediate_data = 0, send_check_condition = 0, payload_length; struct iscsi_cmd *cmd = NULL;
struct iscsi_cmd *cmd = NULL;
struct iscsi_scsi_req *hdr; struct iscsi_scsi_req *hdr;
int iscsi_task_attr; int iscsi_task_attr;
int sam_task_attr; int sam_task_attr;
@ -956,38 +955,26 @@ done:
" ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt, " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
hdr->cmdsn, hdr->data_length, payload_length, conn->cid); hdr->cmdsn, hdr->data_length, payload_length, conn->cid);
/* cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
* The CDB is going to an se_device_t. scsilun_to_int(&hdr->lun));
*/ if (cmd->sense_reason)
ret = transport_lookup_cmd_lun(&cmd->se_cmd, goto attach_cmd;
scsilun_to_int(&hdr->lun));
if (ret < 0) { cmd->sense_reason = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb);
if (cmd->se_cmd.scsi_sense_reason == TCM_NON_EXISTENT_LUN) { if (cmd->sense_reason) {
pr_debug("Responding to non-acl'ed," if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
" non-existent or non-exported iSCSI LUN:" return iscsit_add_reject_from_cmd(
" 0x%016Lx\n", get_unaligned_le64(&hdr->lun)); ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1, 1, buf, cmd);
} }
send_check_condition = 1;
goto attach_cmd; goto attach_cmd;
} }
transport_ret = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb); if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) {
if (transport_ret == -ENOMEM) {
return iscsit_add_reject_from_cmd( return iscsit_add_reject_from_cmd(
ISCSI_REASON_BOOKMARK_NO_RESOURCES, ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1, 1, buf, cmd); 1, 1, buf, cmd);
} else if (transport_ret < 0) {
/*
* Unsupported SAM Opcode. CHECK_CONDITION will be sent
* in iscsit_execute_cmd() during the CmdSN OOO Execution
* Mechinism.
*/
send_check_condition = 1;
} else {
if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0)
return iscsit_add_reject_from_cmd(
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1, 1, buf, cmd);
} }
attach_cmd: attach_cmd:
@ -1000,11 +987,12 @@ attach_cmd:
*/ */
core_alua_check_nonop_delay(&cmd->se_cmd); core_alua_check_nonop_delay(&cmd->se_cmd);
ret = iscsit_allocate_iovecs(cmd); if (iscsit_allocate_iovecs(cmd) < 0) {
if (ret < 0)
return iscsit_add_reject_from_cmd( return iscsit_add_reject_from_cmd(
ISCSI_REASON_BOOKMARK_NO_RESOURCES, ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1, 0, buf, cmd); 1, 0, buf, cmd);
}
/* /*
* Check the CmdSN against ExpCmdSN/MaxCmdSN here if * Check the CmdSN against ExpCmdSN/MaxCmdSN here if
* the Immediate Bit is not set, and no Immediate * the Immediate Bit is not set, and no Immediate
@ -1031,10 +1019,7 @@ attach_cmd:
* If no Immediate Data is attached, it's OK to return now. * If no Immediate Data is attached, it's OK to return now.
*/ */
if (!cmd->immediate_data) { if (!cmd->immediate_data) {
if (send_check_condition) if (!cmd->sense_reason && cmd->unsolicited_data) {
return 0;
if (cmd->unsolicited_data) {
iscsit_set_dataout_sequence_values(cmd); iscsit_set_dataout_sequence_values(cmd);
spin_lock_bh(&cmd->dataout_timeout_lock); spin_lock_bh(&cmd->dataout_timeout_lock);
@ -1050,19 +1035,17 @@ attach_cmd:
* thread. They are processed in CmdSN order by * thread. They are processed in CmdSN order by
* iscsit_check_received_cmdsn() below. * iscsit_check_received_cmdsn() below.
*/ */
if (send_check_condition) { if (cmd->sense_reason) {
immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
dump_immediate_data = 1;
goto after_immediate_data; goto after_immediate_data;
} }
/* /*
* Call directly into transport_generic_new_cmd() to perform * Call directly into transport_generic_new_cmd() to perform
* the backend memory allocation. * the backend memory allocation.
*/ */
ret = transport_generic_new_cmd(&cmd->se_cmd); cmd->sense_reason = transport_generic_new_cmd(&cmd->se_cmd);
if (ret < 0) { if (cmd->sense_reason) {
immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
dump_immediate_data = 1;
goto after_immediate_data; goto after_immediate_data;
} }
@ -1079,7 +1062,7 @@ after_immediate_data:
* Special case for Unsupported SAM WRITE Opcodes * Special case for Unsupported SAM WRITE Opcodes
* and ImmediateData=Yes. * and ImmediateData=Yes.
*/ */
if (dump_immediate_data) { if (cmd->sense_reason) {
if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
return -1; return -1;
} else if (cmd->unsolicited_data) { } else if (cmd->unsolicited_data) {
@ -1272,8 +1255,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
spin_lock_irqsave(&se_cmd->t_state_lock, flags); spin_lock_irqsave(&se_cmd->t_state_lock, flags);
if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) || if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE))
(se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
dump_unsolicited_data = 1; dump_unsolicited_data = 1;
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
@ -1742,7 +1724,6 @@ static int iscsit_handle_task_mgt_cmd(
ret = transport_lookup_tmr_lun(&cmd->se_cmd, ret = transport_lookup_tmr_lun(&cmd->se_cmd,
scsilun_to_int(&hdr->lun)); scsilun_to_int(&hdr->lun));
if (ret < 0) { if (ret < 0) {
cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
se_tmr->response = ISCSI_TMF_RSP_NO_LUN; se_tmr->response = ISCSI_TMF_RSP_NO_LUN;
goto attach; goto attach;
} }
@ -1751,10 +1732,8 @@ static int iscsit_handle_task_mgt_cmd(
switch (function) { switch (function) {
case ISCSI_TM_FUNC_ABORT_TASK: case ISCSI_TM_FUNC_ABORT_TASK:
se_tmr->response = iscsit_tmr_abort_task(cmd, buf); se_tmr->response = iscsit_tmr_abort_task(cmd, buf);
if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE) { if (se_tmr->response)
cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
goto attach; goto attach;
}
break; break;
case ISCSI_TM_FUNC_ABORT_TASK_SET: case ISCSI_TM_FUNC_ABORT_TASK_SET:
case ISCSI_TM_FUNC_CLEAR_ACA: case ISCSI_TM_FUNC_CLEAR_ACA:
@ -1763,14 +1742,12 @@ static int iscsit_handle_task_mgt_cmd(
break; break;
case ISCSI_TM_FUNC_TARGET_WARM_RESET: case ISCSI_TM_FUNC_TARGET_WARM_RESET:
if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) { if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) {
cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED; se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
goto attach; goto attach;
} }
break; break;
case ISCSI_TM_FUNC_TARGET_COLD_RESET: case ISCSI_TM_FUNC_TARGET_COLD_RESET:
if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) { if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) {
cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED; se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
goto attach; goto attach;
} }
@ -1781,7 +1758,7 @@ static int iscsit_handle_task_mgt_cmd(
* Perform sanity checks on the ExpDataSN only if the * Perform sanity checks on the ExpDataSN only if the
* TASK_REASSIGN was successful. * TASK_REASSIGN was successful.
*/ */
if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE) if (se_tmr->response)
break; break;
if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0) if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0)
@ -1792,7 +1769,6 @@ static int iscsit_handle_task_mgt_cmd(
default: default:
pr_err("Unknown TMR function: 0x%02x, protocol" pr_err("Unknown TMR function: 0x%02x, protocol"
" error.\n", function); " error.\n", function);
cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED; se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED;
goto attach; goto attach;
} }
@ -2360,7 +2336,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
if (!conn_p) if (!conn_p)
return; return;
cmd = iscsit_allocate_cmd(conn_p, GFP_KERNEL); cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC);
if (!cmd) { if (!cmd) {
iscsit_dec_conn_usage_count(conn_p); iscsit_dec_conn_usage_count(conn_p);
return; return;

View file

@ -754,9 +754,33 @@ static ssize_t lio_target_nacl_store_cmdsn_depth(
TF_NACL_BASE_ATTR(lio_target, cmdsn_depth, S_IRUGO | S_IWUSR); TF_NACL_BASE_ATTR(lio_target, cmdsn_depth, S_IRUGO | S_IWUSR);
static ssize_t lio_target_nacl_show_tag(
struct se_node_acl *se_nacl,
char *page)
{
return snprintf(page, PAGE_SIZE, "%s", se_nacl->acl_tag);
}
static ssize_t lio_target_nacl_store_tag(
struct se_node_acl *se_nacl,
const char *page,
size_t count)
{
int ret;
ret = core_tpg_set_initiator_node_tag(se_nacl->se_tpg, se_nacl, page);
if (ret < 0)
return ret;
return count;
}
TF_NACL_BASE_ATTR(lio_target, tag, S_IRUGO | S_IWUSR);
static struct configfs_attribute *lio_target_initiator_attrs[] = { static struct configfs_attribute *lio_target_initiator_attrs[] = {
&lio_target_nacl_info.attr, &lio_target_nacl_info.attr,
&lio_target_nacl_cmdsn_depth.attr, &lio_target_nacl_cmdsn_depth.attr,
&lio_target_nacl_tag.attr,
NULL, NULL,
}; };
@ -803,7 +827,7 @@ static struct se_node_acl *lio_target_make_nodeacl(
acl = container_of(se_nacl, struct iscsi_node_acl, se_node_acl); acl = container_of(se_nacl, struct iscsi_node_acl, se_node_acl);
stats_cg = &se_nacl->acl_fabric_stat_group; stats_cg = &se_nacl->acl_fabric_stat_group;
stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, stats_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL); GFP_KERNEL);
if (!stats_cg->default_groups) { if (!stats_cg->default_groups) {
pr_err("Unable to allocate memory for" pr_err("Unable to allocate memory for"
@ -1268,7 +1292,7 @@ static struct se_wwn *lio_target_call_coreaddtiqn(
*/ */
stats_cg = &tiqn->tiqn_wwn.fabric_stat_group; stats_cg = &tiqn->tiqn_wwn.fabric_stat_group;
stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 6, stats_cg->default_groups = kmalloc(sizeof(struct config_group *) * 6,
GFP_KERNEL); GFP_KERNEL);
if (!stats_cg->default_groups) { if (!stats_cg->default_groups) {
pr_err("Unable to allocate memory for" pr_err("Unable to allocate memory for"

View file

@ -474,7 +474,7 @@ struct iscsi_cmd {
struct scatterlist *first_data_sg; struct scatterlist *first_data_sg;
u32 first_data_sg_off; u32 first_data_sg_off;
u32 kmapped_nents; u32 kmapped_nents;
sense_reason_t sense_reason;
} ____cacheline_aligned; } ____cacheline_aligned;
struct iscsi_tmr_req { struct iscsi_tmr_req {

View file

@ -929,11 +929,10 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
case ISCSI_OP_SCSI_CMD: case ISCSI_OP_SCSI_CMD:
/* /*
* Go ahead and send the CHECK_CONDITION status for * Go ahead and send the CHECK_CONDITION status for
* any SCSI CDB exceptions that may have occurred, also * any SCSI CDB exceptions that may have occurred.
* handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
*/ */
if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) { if (cmd->sense_reason) {
if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) { if (cmd->sense_reason == TCM_RESERVATION_CONFLICT) {
cmd->i_state = ISTATE_SEND_STATUS; cmd->i_state = ISTATE_SEND_STATUS;
spin_unlock_bh(&cmd->istate_lock); spin_unlock_bh(&cmd->istate_lock);
iscsit_add_cmd_to_response_queue(cmd, cmd->conn, iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
@ -956,7 +955,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
* exception * exception
*/ */
return transport_send_check_condition_and_sense(se_cmd, return transport_send_check_condition_and_sense(se_cmd,
se_cmd->scsi_sense_reason, 0); cmd->sense_reason, 0);
} }
/* /*
* Special case for delayed CmdSN with Immediate * Special case for delayed CmdSN with Immediate
@ -1013,7 +1012,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
break; break;
case ISCSI_OP_SCSI_TMFUNC: case ISCSI_OP_SCSI_TMFUNC:
if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) { if (cmd->se_cmd.se_tmr_req->response) {
spin_unlock_bh(&cmd->istate_lock); spin_unlock_bh(&cmd->istate_lock);
iscsit_add_cmd_to_response_queue(cmd, cmd->conn, iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
cmd->i_state); cmd->i_state);

View file

@ -372,7 +372,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
* made generic here. * made generic here.
*/ */
if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd && if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
(cmd->cmd_sn >= conn->sess->exp_cmd_sn)) { iscsi_sna_gte(cmd->stat_sn, conn->sess->exp_cmd_sn)) {
list_del(&cmd->i_conn_node); list_del(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock); spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd); iscsit_free_cmd(cmd);

View file

@ -127,13 +127,13 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
initiatorname_param = iscsi_find_param_from_key( initiatorname_param = iscsi_find_param_from_key(
INITIATORNAME, conn->param_list); INITIATORNAME, conn->param_list);
if (!initiatorname_param)
return -1;
sessiontype_param = iscsi_find_param_from_key( sessiontype_param = iscsi_find_param_from_key(
SESSIONTYPE, conn->param_list); SESSIONTYPE, conn->param_list);
if (!sessiontype_param) if (!initiatorname_param || !sessiontype_param) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_MISSING_FIELDS);
return -1; return -1;
}
sessiontype = (strncmp(sessiontype_param->value, NORMAL, 6)) ? 1 : 0; sessiontype = (strncmp(sessiontype_param->value, NORMAL, 6)) ? 1 : 0;
@ -254,9 +254,9 @@ static int iscsi_login_zero_tsih_s1(
kfree(sess); kfree(sess);
return -ENOMEM; return -ENOMEM;
} }
spin_lock(&sess_idr_lock); spin_lock_bh(&sess_idr_lock);
ret = idr_get_new(&sess_idr, NULL, &sess->session_index); ret = idr_get_new(&sess_idr, NULL, &sess->session_index);
spin_unlock(&sess_idr_lock); spin_unlock_bh(&sess_idr_lock);
if (ret < 0) { if (ret < 0) {
pr_err("idr_get_new() for sess_idr failed\n"); pr_err("idr_get_new() for sess_idr failed\n");
@ -1118,10 +1118,8 @@ new_sess_out:
idr_remove(&sess_idr, conn->sess->session_index); idr_remove(&sess_idr, conn->sess->session_index);
spin_unlock_bh(&sess_idr_lock); spin_unlock_bh(&sess_idr_lock);
} }
if (conn->sess->sess_ops) kfree(conn->sess->sess_ops);
kfree(conn->sess->sess_ops); kfree(conn->sess);
if (conn->sess)
kfree(conn->sess);
old_sess_out: old_sess_out:
iscsi_stop_login_thread_timer(np); iscsi_stop_login_thread_timer(np);
/* /*

View file

@ -620,8 +620,11 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
login->req_buf, login->req_buf,
payload_length, payload_length,
conn); conn);
if (ret < 0) if (ret < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_INIT_ERR);
return -1; return -1;
}
if (login->first_request) if (login->first_request)
if (iscsi_target_check_first_request(conn, login) < 0) if (iscsi_target_check_first_request(conn, login) < 0)
@ -636,8 +639,11 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
login->rsp_buf, login->rsp_buf,
&login->rsp_length, &login->rsp_length,
conn->param_list); conn->param_list);
if (ret < 0) if (ret < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_INIT_ERR);
return -1; return -1;
}
if (!login->auth_complete && if (!login->auth_complete &&
ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) { ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) {

View file

@ -154,22 +154,18 @@ static struct iscsi_param *iscsi_set_default_param(struct iscsi_param_list *para
} }
INIT_LIST_HEAD(&param->p_list); INIT_LIST_HEAD(&param->p_list);
param->name = kzalloc(strlen(name) + 1, GFP_KERNEL); param->name = kstrdup(name, GFP_KERNEL);
if (!param->name) { if (!param->name) {
pr_err("Unable to allocate memory for parameter name.\n"); pr_err("Unable to allocate memory for parameter name.\n");
goto out; goto out;
} }
param->value = kzalloc(strlen(value) + 1, GFP_KERNEL); param->value = kstrdup(value, GFP_KERNEL);
if (!param->value) { if (!param->value) {
pr_err("Unable to allocate memory for parameter value.\n"); pr_err("Unable to allocate memory for parameter value.\n");
goto out; goto out;
} }
memcpy(param->name, name, strlen(name));
param->name[strlen(name)] = '\0';
memcpy(param->value, value, strlen(value));
param->value[strlen(value)] = '\0';
param->phase = phase; param->phase = phase;
param->scope = scope; param->scope = scope;
param->sender = sender; param->sender = sender;
@ -635,11 +631,8 @@ void iscsi_release_param_list(struct iscsi_param_list *param_list)
list_del(&param->p_list); list_del(&param->p_list);
kfree(param->name); kfree(param->name);
param->name = NULL;
kfree(param->value); kfree(param->value);
param->value = NULL;
kfree(param); kfree(param);
param = NULL;
} }
iscsi_release_extra_responses(param_list); iscsi_release_extra_responses(param_list);
@ -687,15 +680,12 @@ int iscsi_update_param_value(struct iscsi_param *param, char *value)
{ {
kfree(param->value); kfree(param->value);
param->value = kzalloc(strlen(value) + 1, GFP_KERNEL); param->value = kstrdup(value, GFP_KERNEL);
if (!param->value) { if (!param->value) {
pr_err("Unable to allocate memory for value.\n"); pr_err("Unable to allocate memory for value.\n");
return -ENOMEM; return -ENOMEM;
} }
memcpy(param->value, value, strlen(value));
param->value[strlen(value)] = '\0';
pr_debug("iSCSI Parameter updated to %s=%s\n", pr_debug("iSCSI Parameter updated to %s=%s\n",
param->name, param->value); param->name, param->value);
return 0; return 0;

View file

@ -50,8 +50,8 @@ u8 iscsit_tmr_abort_task(
if (!ref_cmd) { if (!ref_cmd) {
pr_err("Unable to locate RefTaskTag: 0x%08x on CID:" pr_err("Unable to locate RefTaskTag: 0x%08x on CID:"
" %hu.\n", hdr->rtt, conn->cid); " %hu.\n", hdr->rtt, conn->cid);
return (be32_to_cpu(hdr->refcmdsn) >= conn->sess->exp_cmd_sn && return (iscsi_sna_gte(be32_to_cpu(hdr->refcmdsn), conn->sess->exp_cmd_sn) &&
be32_to_cpu(hdr->refcmdsn) <= conn->sess->max_cmd_sn) ? iscsi_sna_lte(be32_to_cpu(hdr->refcmdsn), conn->sess->max_cmd_sn)) ?
ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK; ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK;
} }
if (ref_cmd->cmd_sn != be32_to_cpu(hdr->refcmdsn)) { if (ref_cmd->cmd_sn != be32_to_cpu(hdr->refcmdsn)) {

View file

@ -66,8 +66,7 @@ static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
return NULL; return NULL;
} }
list_for_each_entry(ts, &inactive_ts_list, ts_list) ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list);
break;
list_del(&ts->ts_list); list_del(&ts->ts_list);
iscsit_global->inactive_ts--; iscsit_global->inactive_ts--;

View file

@ -500,8 +500,8 @@ struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *c
spin_unlock_bh(&conn->immed_queue_lock); spin_unlock_bh(&conn->immed_queue_lock);
return NULL; return NULL;
} }
list_for_each_entry(qr, &conn->immed_queue_list, qr_list) qr = list_first_entry(&conn->immed_queue_list,
break; struct iscsi_queue_req, qr_list);
list_del(&qr->qr_list); list_del(&qr->qr_list);
if (qr->cmd) if (qr->cmd)
@ -575,8 +575,8 @@ struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *co
return NULL; return NULL;
} }
list_for_each_entry(qr, &conn->response_queue_list, qr_list) qr = list_first_entry(&conn->response_queue_list,
break; struct iscsi_queue_req, qr_list);
list_del(&qr->qr_list); list_del(&qr->qr_list);
if (qr->cmd) if (qr->cmd)

View file

@ -53,7 +53,6 @@ struct tcm_loop_hba {
struct se_hba_s *se_hba; struct se_hba_s *se_hba;
struct se_lun *tl_hba_lun; struct se_lun *tl_hba_lun;
struct se_port *tl_hba_lun_sep; struct se_port *tl_hba_lun_sep;
struct se_device_s *se_dev_hba_ptr;
struct tcm_loop_nexus *tl_nexus; struct tcm_loop_nexus *tl_nexus;
struct device dev; struct device dev;
struct Scsi_Host *sh; struct Scsi_Host *sh;

View file

@ -1,6 +1,6 @@
config SBP_TARGET config SBP_TARGET
tristate "FireWire SBP-2 fabric module" tristate "FireWire SBP-2 fabric module"
depends on FIREWIRE && EXPERIMENTAL depends on FIREWIRE
help help
Say Y or M here to enable SCSI target functionality over FireWire. Say Y or M here to enable SCSI target functionality over FireWire.
This enables you to expose SCSI devices to other nodes on the FireWire This enables you to expose SCSI devices to other nodes on the FireWire

View file

@ -704,16 +704,17 @@ static void session_maintenance_work(struct work_struct *work)
static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data, static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
struct sbp_target_agent *agent) struct sbp_target_agent *agent)
{ {
__be32 state; int state;
switch (tcode) { switch (tcode) {
case TCODE_READ_QUADLET_REQUEST: case TCODE_READ_QUADLET_REQUEST:
pr_debug("tgt_agent AGENT_STATE READ\n"); pr_debug("tgt_agent AGENT_STATE READ\n");
spin_lock_bh(&agent->lock); spin_lock_bh(&agent->lock);
state = cpu_to_be32(agent->state); state = agent->state;
spin_unlock_bh(&agent->lock); spin_unlock_bh(&agent->lock);
memcpy(data, &state, sizeof(state));
*(__be32 *)data = cpu_to_be32(state);
return RCODE_COMPLETE; return RCODE_COMPLETE;
@ -2207,20 +2208,23 @@ static struct se_portal_group *sbp_make_tpg(
tport->mgt_agt = sbp_management_agent_register(tport); tport->mgt_agt = sbp_management_agent_register(tport);
if (IS_ERR(tport->mgt_agt)) { if (IS_ERR(tport->mgt_agt)) {
ret = PTR_ERR(tport->mgt_agt); ret = PTR_ERR(tport->mgt_agt);
kfree(tpg); goto out_free_tpg;
return ERR_PTR(ret);
} }
ret = core_tpg_register(&sbp_fabric_configfs->tf_ops, wwn, ret = core_tpg_register(&sbp_fabric_configfs->tf_ops, wwn,
&tpg->se_tpg, (void *)tpg, &tpg->se_tpg, (void *)tpg,
TRANSPORT_TPG_TYPE_NORMAL); TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0) { if (ret < 0)
sbp_management_agent_unregister(tport->mgt_agt); goto out_unreg_mgt_agt;
kfree(tpg);
return ERR_PTR(ret);
}
return &tpg->se_tpg; return &tpg->se_tpg;
out_unreg_mgt_agt:
sbp_management_agent_unregister(tport->mgt_agt);
out_free_tpg:
tport->tpg = NULL;
kfree(tpg);
return ERR_PTR(ret);
} }
static void sbp_drop_tpg(struct se_portal_group *se_tpg) static void sbp_drop_tpg(struct se_portal_group *se_tpg)

View file

@ -3,8 +3,7 @@
* *
* This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA) * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
* *
* Copyright (c) 2009-2010 Rising Tide Systems * (c) Copyright 2009-2012 RisingTide Systems LLC.
* Copyright (c) 2009-2010 Linux-iSCSI.org
* *
* Nicholas A. Bellinger <nab@kernel.org> * Nicholas A. Bellinger <nab@kernel.org>
* *
@ -41,7 +40,7 @@
#include "target_core_alua.h" #include "target_core_alua.h"
#include "target_core_ua.h" #include "target_core_ua.h"
static int core_alua_check_transition(int state, int *primary); static sense_reason_t core_alua_check_transition(int state, int *primary);
static int core_alua_set_tg_pt_secondary_state( static int core_alua_set_tg_pt_secondary_state(
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
struct se_port *port, int explict, int offline); struct se_port *port, int explict, int offline);
@ -59,15 +58,17 @@ struct t10_alua_lu_gp *default_lu_gp;
* *
* See spc4r17 section 6.27 * See spc4r17 section 6.27
*/ */
int target_emulate_report_target_port_groups(struct se_cmd *cmd) sense_reason_t
target_emulate_report_target_port_groups(struct se_cmd *cmd)
{ {
struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; struct se_device *dev = cmd->se_dev;
struct se_port *port; struct se_port *port;
struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
unsigned char *buf; unsigned char *buf;
u32 rd_len = 0, off; u32 rd_len = 0, off;
int ext_hdr = (cmd->t_task_cdb[1] & 0x20); int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
/* /*
* Skip over RESERVED area to first Target port group descriptor * Skip over RESERVED area to first Target port group descriptor
* depending on the PARAMETER DATA FORMAT type.. * depending on the PARAMETER DATA FORMAT type..
@ -81,13 +82,14 @@ int target_emulate_report_target_port_groups(struct se_cmd *cmd)
pr_warn("REPORT TARGET PORT GROUPS allocation length %u too" pr_warn("REPORT TARGET PORT GROUPS allocation length %u too"
" small for %s header\n", cmd->data_length, " small for %s header\n", cmd->data_length,
(ext_hdr) ? "extended" : "normal"); (ext_hdr) ? "extended" : "normal");
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; return TCM_INVALID_CDB_FIELD;
return -EINVAL;
} }
buf = transport_kmap_data_sg(cmd); buf = transport_kmap_data_sg(cmd);
if (!buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); spin_lock(&dev->t10_alua.tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) { tg_pt_gp_list) {
/* /*
* Check if the Target port group and Target port descriptor list * Check if the Target port group and Target port descriptor list
@ -160,7 +162,7 @@ int target_emulate_report_target_port_groups(struct se_cmd *cmd)
} }
spin_unlock(&tg_pt_gp->tg_pt_gp_lock); spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
} }
spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
/* /*
* Set the RETURN DATA LENGTH set in the header of the DataIN Payload * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
*/ */
@ -200,32 +202,33 @@ int target_emulate_report_target_port_groups(struct se_cmd *cmd)
* *
* See spc4r17 section 6.35 * See spc4r17 section 6.35
*/ */
int target_emulate_set_target_port_groups(struct se_cmd *cmd) sense_reason_t
target_emulate_set_target_port_groups(struct se_cmd *cmd)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct se_port *port, *l_port = cmd->se_lun->lun_sep; struct se_port *port, *l_port = cmd->se_lun->lun_sep;
struct se_node_acl *nacl = cmd->se_sess->se_node_acl; struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp; struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
unsigned char *buf; unsigned char *buf;
unsigned char *ptr; unsigned char *ptr;
sense_reason_t rc;
u32 len = 4; /* Skip over RESERVED area in header */ u32 len = 4; /* Skip over RESERVED area in header */
int alua_access_state, primary = 0, rc; int alua_access_state, primary = 0;
u16 tg_pt_id, rtpi; u16 tg_pt_id, rtpi;
if (!l_port) { if (!l_port)
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -EINVAL;
}
if (cmd->data_length < 4) { if (cmd->data_length < 4) {
pr_warn("SET TARGET PORT GROUPS parameter list length %u too" pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
" small\n", cmd->data_length); " small\n", cmd->data_length);
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; return TCM_INVALID_PARAMETER_LIST;
return -EINVAL;
} }
buf = transport_kmap_data_sg(cmd); buf = transport_kmap_data_sg(cmd);
if (!buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
/* /*
* Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
@ -234,8 +237,7 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem; l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
if (!l_tg_pt_gp_mem) { if (!l_tg_pt_gp_mem) {
pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n"); pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; rc = TCM_UNSUPPORTED_SCSI_OPCODE;
rc = -EINVAL;
goto out; goto out;
} }
spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
@ -243,24 +245,22 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
if (!l_tg_pt_gp) { if (!l_tg_pt_gp) {
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n"); pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; rc = TCM_UNSUPPORTED_SCSI_OPCODE;
rc = -EINVAL;
goto out; goto out;
} }
rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
if (!rc) { if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)) {
pr_debug("Unable to process SET_TARGET_PORT_GROUPS" pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
" while TPGS_EXPLICT_ALUA is disabled\n"); " while TPGS_EXPLICT_ALUA is disabled\n");
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; rc = TCM_UNSUPPORTED_SCSI_OPCODE;
rc = -EINVAL;
goto out; goto out;
} }
ptr = &buf[4]; /* Skip over RESERVED area in header */ ptr = &buf[4]; /* Skip over RESERVED area in header */
while (len < cmd->data_length) { while (len < cmd->data_length) {
bool found = false;
alua_access_state = (ptr[0] & 0x0f); alua_access_state = (ptr[0] & 0x0f);
/* /*
* Check the received ALUA access state, and determine if * Check the received ALUA access state, and determine if
@ -268,7 +268,7 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
* access state. * access state.
*/ */
rc = core_alua_check_transition(alua_access_state, &primary); rc = core_alua_check_transition(alua_access_state, &primary);
if (rc != 0) { if (rc) {
/* /*
* If the SET TARGET PORT GROUPS attempts to establish * If the SET TARGET PORT GROUPS attempts to establish
* an invalid combination of target port asymmetric * an invalid combination of target port asymmetric
@ -279,11 +279,9 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
* REQUEST, and the additional sense code set to INVALID * REQUEST, and the additional sense code set to INVALID
* FIELD IN PARAMETER LIST. * FIELD IN PARAMETER LIST.
*/ */
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
rc = -EINVAL;
goto out; goto out;
} }
rc = -1;
/* /*
* If the ASYMMETRIC ACCESS STATE field (see table 267) * If the ASYMMETRIC ACCESS STATE field (see table 267)
* specifies a primary target port asymmetric access state, * specifies a primary target port asymmetric access state,
@ -303,9 +301,9 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
* Locate the matching target port group ID from * Locate the matching target port group ID from
* the global tg_pt_gp list * the global tg_pt_gp list
*/ */
spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); spin_lock(&dev->t10_alua.tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp, list_for_each_entry(tg_pt_gp,
&su_dev->t10_alua.tg_pt_gps_list, &dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) { tg_pt_gp_list) {
if (!tg_pt_gp->tg_pt_gp_valid_id) if (!tg_pt_gp->tg_pt_gp_valid_id)
continue; continue;
@ -315,27 +313,20 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_inc(); smp_mb__after_atomic_inc();
spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
rc = core_alua_do_port_transition(tg_pt_gp, spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
if (!core_alua_do_port_transition(tg_pt_gp,
dev, l_port, nacl, dev, l_port, nacl,
alua_access_state, 1); alua_access_state, 1))
found = true;
spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_dec(); smp_mb__after_atomic_dec();
break; break;
} }
spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
/*
* If not matching target port group ID can be located
* throw an exception with ASCQ: INVALID_PARAMETER_LIST
*/
if (rc != 0) {
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
rc = -EINVAL;
goto out;
}
} else { } else {
/* /*
* Extact the RELATIVE TARGET PORT IDENTIFIER to identify * Extact the RELATIVE TARGET PORT IDENTIFIER to identify
@ -354,25 +345,22 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
continue; continue;
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
spin_unlock(&dev->se_port_lock); spin_unlock(&dev->se_port_lock);
rc = core_alua_set_tg_pt_secondary_state( if (!core_alua_set_tg_pt_secondary_state(
tg_pt_gp_mem, port, 1, 1); tg_pt_gp_mem, port, 1, 1))
found = true;
spin_lock(&dev->se_port_lock); spin_lock(&dev->se_port_lock);
break; break;
} }
spin_unlock(&dev->se_port_lock); spin_unlock(&dev->se_port_lock);
/* }
* If not matching relative target port identifier can
* be located, throw an exception with ASCQ: if (!found) {
* INVALID_PARAMETER_LIST rc = TCM_INVALID_PARAMETER_LIST;
*/ goto out;
if (rc != 0) {
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
rc = -EINVAL;
goto out;
}
} }
ptr += 4; ptr += 4;
@ -523,40 +511,27 @@ static inline int core_alua_state_transition(
} }
/* /*
* Used for alua_type SPC_ALUA_PASSTHROUGH and SPC2_ALUA_DISABLED
* in transport_cmd_sequencer(). This function is assigned to
* struct t10_alua *->state_check() in core_setup_alua()
*/
static int core_alua_state_check_nop(
struct se_cmd *cmd,
unsigned char *cdb,
u8 *alua_ascq)
{
return 0;
}
/*
* Used for alua_type SPC3_ALUA_EMULATED in transport_cmd_sequencer().
* This function is assigned to struct t10_alua *->state_check() in
* core_setup_alua()
*
* Also, this function can return three different return codes to
* signal transport_generic_cmd_sequencer()
*
* return 1: Is used to signal LUN not accecsable, and check condition/not ready * return 1: Is used to signal LUN not accecsable, and check condition/not ready
* return 0: Used to signal success * return 0: Used to signal success
* reutrn -1: Used to signal failure, and invalid cdb field * reutrn -1: Used to signal failure, and invalid cdb field
*/ */
static int core_alua_state_check( sense_reason_t
struct se_cmd *cmd, target_alua_state_check(struct se_cmd *cmd)
unsigned char *cdb,
u8 *alua_ascq)
{ {
struct se_device *dev = cmd->se_dev;
unsigned char *cdb = cmd->t_task_cdb;
struct se_lun *lun = cmd->se_lun; struct se_lun *lun = cmd->se_lun;
struct se_port *port = lun->lun_sep; struct se_port *port = lun->lun_sep;
struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
int out_alua_state, nonop_delay_msecs; int out_alua_state, nonop_delay_msecs;
u8 alua_ascq;
int ret;
if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
return 0;
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
return 0;
if (!port) if (!port)
return 0; return 0;
@ -565,11 +540,11 @@ static int core_alua_state_check(
* access state: OFFLINE * access state: OFFLINE
*/ */
if (atomic_read(&port->sep_tg_pt_secondary_offline)) { if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
*alua_ascq = ASCQ_04H_ALUA_OFFLINE;
pr_debug("ALUA: Got secondary offline status for local" pr_debug("ALUA: Got secondary offline status for local"
" target port\n"); " target port\n");
*alua_ascq = ASCQ_04H_ALUA_OFFLINE; alua_ascq = ASCQ_04H_ALUA_OFFLINE;
return 1; ret = 1;
goto out;
} }
/* /*
* Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
@ -594,14 +569,18 @@ static int core_alua_state_check(
switch (out_alua_state) { switch (out_alua_state) {
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
return core_alua_state_nonoptimized(cmd, cdb, ret = core_alua_state_nonoptimized(cmd, cdb,
nonop_delay_msecs, alua_ascq); nonop_delay_msecs, &alua_ascq);
break;
case ALUA_ACCESS_STATE_STANDBY: case ALUA_ACCESS_STATE_STANDBY:
return core_alua_state_standby(cmd, cdb, alua_ascq); ret = core_alua_state_standby(cmd, cdb, &alua_ascq);
break;
case ALUA_ACCESS_STATE_UNAVAILABLE: case ALUA_ACCESS_STATE_UNAVAILABLE:
return core_alua_state_unavailable(cmd, cdb, alua_ascq); ret = core_alua_state_unavailable(cmd, cdb, &alua_ascq);
break;
case ALUA_ACCESS_STATE_TRANSITION: case ALUA_ACCESS_STATE_TRANSITION:
return core_alua_state_transition(cmd, cdb, alua_ascq); ret = core_alua_state_transition(cmd, cdb, &alua_ascq);
break;
/* /*
* OFFLINE is a secondary ALUA target port group access state, that is * OFFLINE is a secondary ALUA target port group access state, that is
* handled above with struct se_port->sep_tg_pt_secondary_offline=1 * handled above with struct se_port->sep_tg_pt_secondary_offline=1
@ -610,7 +589,24 @@ static int core_alua_state_check(
default: default:
pr_err("Unknown ALUA access state: 0x%02x\n", pr_err("Unknown ALUA access state: 0x%02x\n",
out_alua_state); out_alua_state);
return -EINVAL; return TCM_INVALID_CDB_FIELD;
}
out:
if (ret > 0) {
/*
* Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
* The ALUA additional sense code qualifier (ASCQ) is determined
* by the ALUA primary or secondary access state..
*/
pr_debug("[%s]: ALUA TG Port not available, "
"SenseKey: NOT_READY, ASC/ASCQ: "
"0x04/0x%02x\n",
cmd->se_tfo->get_fabric_name(), alua_ascq);
cmd->scsi_asc = 0x04;
cmd->scsi_ascq = alua_ascq;
return TCM_CHECK_CONDITION_NOT_READY;
} }
return 0; return 0;
@ -619,7 +615,8 @@ static int core_alua_state_check(
/* /*
* Check implict and explict ALUA state change request. * Check implict and explict ALUA state change request.
*/ */
static int core_alua_check_transition(int state, int *primary) static sense_reason_t
core_alua_check_transition(int state, int *primary)
{ {
switch (state) { switch (state) {
case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED: case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
@ -641,7 +638,7 @@ static int core_alua_check_transition(int state, int *primary)
break; break;
default: default:
pr_err("Unknown ALUA access state: 0x%02x\n", state); pr_err("Unknown ALUA access state: 0x%02x\n", state);
return -EINVAL; return TCM_INVALID_PARAMETER_LIST;
} }
return 0; return 0;
@ -758,8 +755,7 @@ static int core_alua_update_tpg_primary_metadata(
int primary_state, int primary_state,
unsigned char *md_buf) unsigned char *md_buf)
{ {
struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
struct t10_wwn *wwn = &su_dev->t10_wwn;
char path[ALUA_METADATA_PATH_LEN]; char path[ALUA_METADATA_PATH_LEN];
int len; int len;
@ -899,7 +895,6 @@ int core_alua_do_port_transition(
{ {
struct se_device *dev; struct se_device *dev;
struct se_port *port; struct se_port *port;
struct se_subsystem_dev *su_dev;
struct se_node_acl *nacl; struct se_node_acl *nacl;
struct t10_alua_lu_gp *lu_gp; struct t10_alua_lu_gp *lu_gp;
struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem; struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
@ -949,14 +944,13 @@ int core_alua_do_port_transition(
lu_gp_mem_list) { lu_gp_mem_list) {
dev = lu_gp_mem->lu_gp_mem_dev; dev = lu_gp_mem->lu_gp_mem_dev;
su_dev = dev->se_sub_dev;
atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt); atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
smp_mb__after_atomic_inc(); smp_mb__after_atomic_inc();
spin_unlock(&lu_gp->lu_gp_lock); spin_unlock(&lu_gp->lu_gp_lock);
spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); spin_lock(&dev->t10_alua.tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp, list_for_each_entry(tg_pt_gp,
&su_dev->t10_alua.tg_pt_gps_list, &dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) { tg_pt_gp_list) {
if (!tg_pt_gp->tg_pt_gp_valid_id) if (!tg_pt_gp->tg_pt_gp_valid_id)
@ -981,7 +975,7 @@ int core_alua_do_port_transition(
} }
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_inc(); smp_mb__after_atomic_inc();
spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
/* /*
* core_alua_do_transition_tg_pt() will always return * core_alua_do_transition_tg_pt() will always return
* success. * success.
@ -989,11 +983,11 @@ int core_alua_do_port_transition(
core_alua_do_transition_tg_pt(tg_pt_gp, port, core_alua_do_transition_tg_pt(tg_pt_gp, port,
nacl, md_buf, new_state, explict); nacl, md_buf, new_state, explict);
spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_dec(); smp_mb__after_atomic_dec();
} }
spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
spin_lock(&lu_gp->lu_gp_lock); spin_lock(&lu_gp->lu_gp_lock);
atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
@ -1268,14 +1262,9 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
void core_alua_free_lu_gp_mem(struct se_device *dev) void core_alua_free_lu_gp_mem(struct se_device *dev)
{ {
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct t10_alua *alua = &su_dev->t10_alua;
struct t10_alua_lu_gp *lu_gp; struct t10_alua_lu_gp *lu_gp;
struct t10_alua_lu_gp_member *lu_gp_mem; struct t10_alua_lu_gp_member *lu_gp_mem;
if (alua->alua_type != SPC3_ALUA_EMULATED)
return;
lu_gp_mem = dev->dev_alua_lu_gp_mem; lu_gp_mem = dev->dev_alua_lu_gp_mem;
if (!lu_gp_mem) if (!lu_gp_mem)
return; return;
@ -1358,10 +1347,8 @@ void __core_alua_drop_lu_gp_mem(
spin_unlock(&lu_gp->lu_gp_lock); spin_unlock(&lu_gp->lu_gp_lock);
} }
struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp( struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
struct se_subsystem_dev *su_dev, const char *name, int def_group)
const char *name,
int def_group)
{ {
struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp *tg_pt_gp;
@ -1375,7 +1362,7 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
tg_pt_gp->tg_pt_gp_su_dev = su_dev; tg_pt_gp->tg_pt_gp_dev = dev;
tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN; tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
ALUA_ACCESS_STATE_ACTIVE_OPTMIZED); ALUA_ACCESS_STATE_ACTIVE_OPTMIZED);
@ -1392,14 +1379,14 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
tg_pt_gp->tg_pt_gp_implict_trans_secs = ALUA_DEFAULT_IMPLICT_TRANS_SECS; tg_pt_gp->tg_pt_gp_implict_trans_secs = ALUA_DEFAULT_IMPLICT_TRANS_SECS;
if (def_group) { if (def_group) {
spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); spin_lock(&dev->t10_alua.tg_pt_gps_lock);
tg_pt_gp->tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id =
su_dev->t10_alua.alua_tg_pt_gps_counter++; dev->t10_alua.alua_tg_pt_gps_counter++;
tg_pt_gp->tg_pt_gp_valid_id = 1; tg_pt_gp->tg_pt_gp_valid_id = 1;
su_dev->t10_alua.alua_tg_pt_gps_count++; dev->t10_alua.alua_tg_pt_gps_count++;
list_add_tail(&tg_pt_gp->tg_pt_gp_list, list_add_tail(&tg_pt_gp->tg_pt_gp_list,
&su_dev->t10_alua.tg_pt_gps_list); &dev->t10_alua.tg_pt_gps_list);
spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
} }
return tg_pt_gp; return tg_pt_gp;
@ -1409,9 +1396,10 @@ int core_alua_set_tg_pt_gp_id(
struct t10_alua_tg_pt_gp *tg_pt_gp, struct t10_alua_tg_pt_gp *tg_pt_gp,
u16 tg_pt_gp_id) u16 tg_pt_gp_id)
{ {
struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
struct t10_alua_tg_pt_gp *tg_pt_gp_tmp; struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
u16 tg_pt_gp_id_tmp; u16 tg_pt_gp_id_tmp;
/* /*
* The tg_pt_gp->tg_pt_gp_id may only be set once.. * The tg_pt_gp->tg_pt_gp_id may only be set once..
*/ */
@ -1421,19 +1409,19 @@ int core_alua_set_tg_pt_gp_id(
return -EINVAL; return -EINVAL;
} }
spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); spin_lock(&dev->t10_alua.tg_pt_gps_lock);
if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) { if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
pr_err("Maximum ALUA alua_tg_pt_gps_count:" pr_err("Maximum ALUA alua_tg_pt_gps_count:"
" 0x0000ffff reached\n"); " 0x0000ffff reached\n");
spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
return -ENOSPC; return -ENOSPC;
} }
again: again:
tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id : tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
su_dev->t10_alua.alua_tg_pt_gps_counter++; dev->t10_alua.alua_tg_pt_gps_counter++;
list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list, list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) { tg_pt_gp_list) {
if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) { if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
if (!tg_pt_gp_id) if (!tg_pt_gp_id)
@ -1441,7 +1429,7 @@ again:
pr_err("ALUA Target Port Group ID: %hu already" pr_err("ALUA Target Port Group ID: %hu already"
" exists, ignoring request\n", tg_pt_gp_id); " exists, ignoring request\n", tg_pt_gp_id);
spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
return -EINVAL; return -EINVAL;
} }
} }
@ -1449,9 +1437,9 @@ again:
tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp; tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
tg_pt_gp->tg_pt_gp_valid_id = 1; tg_pt_gp->tg_pt_gp_valid_id = 1;
list_add_tail(&tg_pt_gp->tg_pt_gp_list, list_add_tail(&tg_pt_gp->tg_pt_gp_list,
&su_dev->t10_alua.tg_pt_gps_list); &dev->t10_alua.tg_pt_gps_list);
su_dev->t10_alua.alua_tg_pt_gps_count++; dev->t10_alua.alua_tg_pt_gps_count++;
spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
return 0; return 0;
} }
@ -1480,8 +1468,9 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
void core_alua_free_tg_pt_gp( void core_alua_free_tg_pt_gp(
struct t10_alua_tg_pt_gp *tg_pt_gp) struct t10_alua_tg_pt_gp *tg_pt_gp)
{ {
struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
/* /*
* Once we have reached this point, config_item_put() has already * Once we have reached this point, config_item_put() has already
* been called from target_core_alua_drop_tg_pt_gp(). * been called from target_core_alua_drop_tg_pt_gp().
@ -1490,10 +1479,11 @@ void core_alua_free_tg_pt_gp(
* no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
* can be made while we are releasing struct t10_alua_tg_pt_gp. * can be made while we are releasing struct t10_alua_tg_pt_gp.
*/ */
spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); spin_lock(&dev->t10_alua.tg_pt_gps_lock);
list_del(&tg_pt_gp->tg_pt_gp_list); list_del(&tg_pt_gp->tg_pt_gp_list);
su_dev->t10_alua.alua_tg_pt_gps_counter--; dev->t10_alua.alua_tg_pt_gps_counter--;
spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
/* /*
* Allow a struct t10_alua_tg_pt_gp_member * referenced by * Allow a struct t10_alua_tg_pt_gp_member * referenced by
* core_alua_get_tg_pt_gp_by_name() in * core_alua_get_tg_pt_gp_by_name() in
@ -1502,6 +1492,7 @@ void core_alua_free_tg_pt_gp(
*/ */
while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt)) while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
cpu_relax(); cpu_relax();
/* /*
* Release reference to struct t10_alua_tg_pt_gp from all associated * Release reference to struct t10_alua_tg_pt_gp from all associated
* struct se_port. * struct se_port.
@ -1525,9 +1516,9 @@ void core_alua_free_tg_pt_gp(
* default_tg_pt_gp. * default_tg_pt_gp.
*/ */
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
if (tg_pt_gp != su_dev->t10_alua.default_tg_pt_gp) { if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) {
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
su_dev->t10_alua.default_tg_pt_gp); dev->t10_alua.default_tg_pt_gp);
} else } else
tg_pt_gp_mem->tg_pt_gp = NULL; tg_pt_gp_mem->tg_pt_gp = NULL;
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
@ -1541,14 +1532,9 @@ void core_alua_free_tg_pt_gp(
void core_alua_free_tg_pt_gp_mem(struct se_port *port) void core_alua_free_tg_pt_gp_mem(struct se_port *port)
{ {
struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
struct t10_alua *alua = &su_dev->t10_alua;
struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
if (alua->alua_type != SPC3_ALUA_EMULATED)
return;
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
if (!tg_pt_gp_mem) if (!tg_pt_gp_mem)
return; return;
@ -1574,25 +1560,24 @@ void core_alua_free_tg_pt_gp_mem(struct se_port *port)
} }
static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name( static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
struct se_subsystem_dev *su_dev, struct se_device *dev, const char *name)
const char *name)
{ {
struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp *tg_pt_gp;
struct config_item *ci; struct config_item *ci;
spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); spin_lock(&dev->t10_alua.tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) { tg_pt_gp_list) {
if (!tg_pt_gp->tg_pt_gp_valid_id) if (!tg_pt_gp->tg_pt_gp_valid_id)
continue; continue;
ci = &tg_pt_gp->tg_pt_gp_group.cg_item; ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
if (!strcmp(config_item_name(ci), name)) { if (!strcmp(config_item_name(ci), name)) {
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
return tg_pt_gp; return tg_pt_gp;
} }
} }
spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
return NULL; return NULL;
} }
@ -1600,11 +1585,11 @@ static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
static void core_alua_put_tg_pt_gp_from_name( static void core_alua_put_tg_pt_gp_from_name(
struct t10_alua_tg_pt_gp *tg_pt_gp) struct t10_alua_tg_pt_gp *tg_pt_gp)
{ {
struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
} }
/* /*
@ -1640,16 +1625,11 @@ static void __core_alua_drop_tg_pt_gp_mem(
ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page) ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
{ {
struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
struct config_item *tg_pt_ci; struct config_item *tg_pt_ci;
struct t10_alua *alua = &su_dev->t10_alua;
struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
ssize_t len = 0; ssize_t len = 0;
if (alua->alua_type != SPC3_ALUA_EMULATED)
return len;
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
if (!tg_pt_gp_mem) if (!tg_pt_gp_mem)
return len; return len;
@ -1683,7 +1663,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
{ {
struct se_portal_group *tpg; struct se_portal_group *tpg;
struct se_lun *lun; struct se_lun *lun;
struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; struct se_device *dev = port->sep_lun->lun_se_dev;
struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL; struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
unsigned char buf[TG_PT_GROUP_NAME_BUF]; unsigned char buf[TG_PT_GROUP_NAME_BUF];
@ -1692,13 +1672,9 @@ ssize_t core_alua_store_tg_pt_gp_info(
tpg = port->sep_tpg; tpg = port->sep_tpg;
lun = port->sep_lun; lun = port->sep_lun;
if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) { tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
pr_warn("SPC3_ALUA_EMULATED not enabled for" if (!tg_pt_gp_mem)
" %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg), return 0;
tpg->se_tpg_tfo->tpg_get_tag(tpg),
config_item_name(&lun->lun_group.cg_item));
return -EINVAL;
}
if (count > TG_PT_GROUP_NAME_BUF) { if (count > TG_PT_GROUP_NAME_BUF) {
pr_err("ALUA Target Port Group alias too large!\n"); pr_err("ALUA Target Port Group alias too large!\n");
@ -1716,18 +1692,11 @@ ssize_t core_alua_store_tg_pt_gp_info(
* struct t10_alua_tg_pt_gp. This reference is released with * struct t10_alua_tg_pt_gp. This reference is released with
* core_alua_put_tg_pt_gp_from_name() below. * core_alua_put_tg_pt_gp_from_name() below.
*/ */
tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev, tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev,
strstrip(buf)); strstrip(buf));
if (!tg_pt_gp_new) if (!tg_pt_gp_new)
return -ENODEV; return -ENODEV;
} }
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
if (!tg_pt_gp_mem) {
if (tg_pt_gp_new)
core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
pr_err("NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
return -EINVAL;
}
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
@ -1750,7 +1719,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
__core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp); __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
su_dev->t10_alua.default_tg_pt_gp); dev->t10_alua.default_tg_pt_gp);
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
return count; return count;
@ -2054,32 +2023,12 @@ ssize_t core_alua_store_secondary_write_metadata(
return count; return count;
} }
int core_setup_alua(struct se_device *dev, int force_pt) int core_setup_alua(struct se_device *dev)
{ {
struct se_subsystem_dev *su_dev = dev->se_sub_dev; if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
struct t10_alua *alua = &su_dev->t10_alua; !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
struct t10_alua_lu_gp_member *lu_gp_mem; struct t10_alua_lu_gp_member *lu_gp_mem;
/*
* If this device is from Target_Core_Mod/pSCSI, use the ALUA logic
* of the Underlying SCSI hardware. In Linux/SCSI terms, this can
* cause a problem because libata and some SATA RAID HBAs appear
* under Linux/SCSI, but emulate SCSI logic themselves.
*/
if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
!(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) {
alua->alua_type = SPC_ALUA_PASSTHROUGH;
alua->alua_state_check = &core_alua_state_check_nop;
pr_debug("%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
" emulation\n", dev->transport->name);
return 0;
}
/*
* If SPC-3 or above is reported by real or emulated struct se_device,
* use emulated ALUA.
*/
if (dev->transport->get_device_rev(dev) >= SCSI_3) {
pr_debug("%s: Enabling ALUA Emulation for SPC-3"
" device\n", dev->transport->name);
/* /*
* Associate this struct se_device with the default ALUA * Associate this struct se_device with the default ALUA
* LUN Group. * LUN Group.
@ -2088,8 +2037,6 @@ int core_setup_alua(struct se_device *dev, int force_pt)
if (IS_ERR(lu_gp_mem)) if (IS_ERR(lu_gp_mem))
return PTR_ERR(lu_gp_mem); return PTR_ERR(lu_gp_mem);
alua->alua_type = SPC3_ALUA_EMULATED;
alua->alua_state_check = &core_alua_state_check;
spin_lock(&lu_gp_mem->lu_gp_mem_lock); spin_lock(&lu_gp_mem->lu_gp_mem_lock);
__core_alua_attach_lu_gp_mem(lu_gp_mem, __core_alua_attach_lu_gp_mem(lu_gp_mem,
default_lu_gp); default_lu_gp);
@ -2098,11 +2045,6 @@ int core_setup_alua(struct se_device *dev, int force_pt)
pr_debug("%s: Adding to default ALUA LU Group:" pr_debug("%s: Adding to default ALUA LU Group:"
" core/alua/lu_gps/default_lu_gp\n", " core/alua/lu_gps/default_lu_gp\n",
dev->transport->name); dev->transport->name);
} else {
alua->alua_type = SPC2_ALUA_DISABLED;
alua->alua_state_check = &core_alua_state_check_nop;
pr_debug("%s: Disabling ALUA Emulation for SPC-2"
" device\n", dev->transport->name);
} }
return 0; return 0;

View file

@ -72,8 +72,8 @@ extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_cache; extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
extern int target_emulate_report_target_port_groups(struct se_cmd *); extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *);
extern int target_emulate_set_target_port_groups(struct se_cmd *); extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *);
extern int core_alua_check_nonop_delay(struct se_cmd *); extern int core_alua_check_nonop_delay(struct se_cmd *);
extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *, extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
struct se_device *, struct se_port *, struct se_device *, struct se_port *,
@ -91,7 +91,7 @@ extern void __core_alua_drop_lu_gp_mem(struct t10_alua_lu_gp_member *,
struct t10_alua_lu_gp *); struct t10_alua_lu_gp *);
extern void core_alua_drop_lu_gp_dev(struct se_device *); extern void core_alua_drop_lu_gp_dev(struct se_device *);
extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp( extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
struct se_subsystem_dev *, const char *, int); struct se_device *, const char *, int);
extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16); extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16);
extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem( extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
struct se_port *); struct se_port *);
@ -131,6 +131,7 @@ extern ssize_t core_alua_show_secondary_write_metadata(struct se_lun *,
char *); char *);
extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *, extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *,
const char *, size_t); const char *, size_t);
extern int core_setup_alua(struct se_device *, int); extern int core_setup_alua(struct se_device *);
extern sense_reason_t target_alua_state_check(struct se_cmd *cmd);
#endif /* TARGET_CORE_ALUA_H */ #endif /* TARGET_CORE_ALUA_H */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -4,10 +4,9 @@
* This file contains generic fabric module configfs infrastructure for * This file contains generic fabric module configfs infrastructure for
* TCM v4.x code * TCM v4.x code
* *
* Copyright (c) 2010,2011 Rising Tide Systems * (c) Copyright 2010-2012 RisingTide Systems LLC.
* Copyright (c) 2010,2011 Linux-iSCSI.org
* *
* Copyright (c) Nicholas A. Bellinger <nab@linux-iscsi.org> * Nicholas A. Bellinger <nab@linux-iscsi.org>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
@ -71,6 +70,12 @@ static int target_fabric_mappedlun_link(
struct se_portal_group *se_tpg; struct se_portal_group *se_tpg;
struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s; struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
int ret = 0, lun_access; int ret = 0, lun_access;
if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) {
pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:"
" %p to struct lun: %p\n", lun_ci, lun);
return -EFAULT;
}
/* /*
* Ensure that the source port exists * Ensure that the source port exists
*/ */
@ -358,7 +363,7 @@ static struct config_group *target_fabric_make_mappedlun(
} }
lacl_cg = &lacl->se_lun_group; lacl_cg = &lacl->se_lun_group;
lacl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, lacl_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL); GFP_KERNEL);
if (!lacl_cg->default_groups) { if (!lacl_cg->default_groups) {
pr_err("Unable to allocate lacl_cg->default_groups\n"); pr_err("Unable to allocate lacl_cg->default_groups\n");
@ -374,7 +379,7 @@ static struct config_group *target_fabric_make_mappedlun(
lacl_cg->default_groups[1] = NULL; lacl_cg->default_groups[1] = NULL;
ml_stat_grp = &lacl->ml_stat_grps.stat_group; ml_stat_grp = &lacl->ml_stat_grps.stat_group;
ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, ml_stat_grp->default_groups = kmalloc(sizeof(struct config_group *) * 3,
GFP_KERNEL); GFP_KERNEL);
if (!ml_stat_grp->default_groups) { if (!ml_stat_grp->default_groups) {
pr_err("Unable to allocate ml_stat_grp->default_groups\n"); pr_err("Unable to allocate ml_stat_grp->default_groups\n");
@ -734,17 +739,21 @@ static int target_fabric_port_link(
struct config_item *se_dev_ci) struct config_item *se_dev_ci)
{ {
struct config_item *tpg_ci; struct config_item *tpg_ci;
struct se_device *dev;
struct se_lun *lun = container_of(to_config_group(lun_ci), struct se_lun *lun = container_of(to_config_group(lun_ci),
struct se_lun, lun_group); struct se_lun, lun_group);
struct se_lun *lun_p; struct se_lun *lun_p;
struct se_portal_group *se_tpg; struct se_portal_group *se_tpg;
struct se_subsystem_dev *se_dev = container_of( struct se_device *dev =
to_config_group(se_dev_ci), struct se_subsystem_dev, container_of(to_config_group(se_dev_ci), struct se_device, dev_group);
se_dev_group);
struct target_fabric_configfs *tf; struct target_fabric_configfs *tf;
int ret; int ret;
if (dev->dev_link_magic != SE_DEV_LINK_MAGIC) {
pr_err("Bad dev->dev_link_magic, not a valid se_dev_ci pointer:"
" %p to struct se_device: %p\n", se_dev_ci, dev);
return -EFAULT;
}
tpg_ci = &lun_ci->ci_parent->ci_group->cg_item; tpg_ci = &lun_ci->ci_parent->ci_group->cg_item;
se_tpg = container_of(to_config_group(tpg_ci), se_tpg = container_of(to_config_group(tpg_ci),
struct se_portal_group, tpg_group); struct se_portal_group, tpg_group);
@ -755,14 +764,6 @@ static int target_fabric_port_link(
return -EEXIST; return -EEXIST;
} }
dev = se_dev->se_dev_ptr;
if (!dev) {
pr_err("Unable to locate struct se_device pointer from"
" %s\n", config_item_name(se_dev_ci));
ret = -ENODEV;
goto out;
}
lun_p = core_dev_add_lun(se_tpg, dev, lun->unpacked_lun); lun_p = core_dev_add_lun(se_tpg, dev, lun->unpacked_lun);
if (IS_ERR(lun_p)) { if (IS_ERR(lun_p)) {
pr_err("core_dev_add_lun() failed\n"); pr_err("core_dev_add_lun() failed\n");
@ -869,7 +870,7 @@ static struct config_group *target_fabric_make_lun(
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
lun_cg = &lun->lun_group; lun_cg = &lun->lun_group;
lun_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, lun_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL); GFP_KERNEL);
if (!lun_cg->default_groups) { if (!lun_cg->default_groups) {
pr_err("Unable to allocate lun_cg->default_groups\n"); pr_err("Unable to allocate lun_cg->default_groups\n");

View file

@ -4,8 +4,7 @@
* This file contains generic high level protocol identifier and PR * This file contains generic high level protocol identifier and PR
* handlers for TCM fabric modules * handlers for TCM fabric modules
* *
* Copyright (c) 2010 Rising Tide Systems, Inc. * (c) Copyright 2010-2012 RisingTide Systems LLC.
* Copyright (c) 2010 Linux-iSCSI.org
* *
* Nicholas A. Bellinger <nab@linux-iscsi.org> * Nicholas A. Bellinger <nab@linux-iscsi.org>
* *

View file

@ -3,10 +3,7 @@
* *
* This file contains the Storage Engine <-> FILEIO transport specific functions * This file contains the Storage Engine <-> FILEIO transport specific functions
* *
* Copyright (c) 2005 PyX Technologies, Inc. * (c) Copyright 2005-2012 RisingTide Systems LLC.
* Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
* *
* Nicholas A. Bellinger <nab@kernel.org> * Nicholas A. Bellinger <nab@kernel.org>
* *
@ -41,7 +38,10 @@
#include "target_core_file.h" #include "target_core_file.h"
static struct se_subsystem_api fileio_template; static inline struct fd_dev *FD_DEV(struct se_device *dev)
{
return container_of(dev, struct fd_dev, dev);
}
/* fd_attach_hba(): (Part of se_subsystem_api_t template) /* fd_attach_hba(): (Part of se_subsystem_api_t template)
* *
@ -82,7 +82,7 @@ static void fd_detach_hba(struct se_hba *hba)
hba->hba_ptr = NULL; hba->hba_ptr = NULL;
} }
static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name) static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name)
{ {
struct fd_dev *fd_dev; struct fd_dev *fd_dev;
struct fd_host *fd_host = hba->hba_ptr; struct fd_host *fd_host = hba->hba_ptr;
@ -97,34 +97,28 @@ static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
pr_debug("FILEIO: Allocated fd_dev for %p\n", name); pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
return fd_dev; return &fd_dev->dev;
} }
/* fd_create_virtdevice(): (Part of se_subsystem_api_t template) static int fd_configure_device(struct se_device *dev)
*
*
*/
static struct se_device *fd_create_virtdevice(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
void *p)
{ {
struct se_device *dev; struct fd_dev *fd_dev = FD_DEV(dev);
struct se_dev_limits dev_limits; struct fd_host *fd_host = dev->se_hba->hba_ptr;
struct queue_limits *limits;
struct fd_dev *fd_dev = p;
struct fd_host *fd_host = hba->hba_ptr;
struct file *file; struct file *file;
struct inode *inode = NULL; struct inode *inode = NULL;
int dev_flags = 0, flags, ret = -EINVAL; int flags, ret = -EINVAL;
memset(&dev_limits, 0, sizeof(struct se_dev_limits)); if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
pr_err("Missing fd_dev_name=\n");
return -EINVAL;
}
/* /*
* Use O_DSYNC by default instead of O_SYNC to forgo syncing * Use O_DSYNC by default instead of O_SYNC to forgo syncing
* of pure timestamp updates. * of pure timestamp updates.
*/ */
flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC; flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
/* /*
* Optionally allow fd_buffered_io=1 to be enabled for people * Optionally allow fd_buffered_io=1 to be enabled for people
* who want use the fs buffer cache as an WriteCache mechanism. * who want use the fs buffer cache as an WriteCache mechanism.
@ -154,22 +148,17 @@ static struct se_device *fd_create_virtdevice(
*/ */
inode = file->f_mapping->host; inode = file->f_mapping->host;
if (S_ISBLK(inode->i_mode)) { if (S_ISBLK(inode->i_mode)) {
struct request_queue *q; struct request_queue *q = bdev_get_queue(inode->i_bdev);
unsigned long long dev_size; unsigned long long dev_size;
/*
* Setup the local scope queue_limits from struct request_queue->limits dev->dev_attrib.hw_block_size =
* to pass into transport_add_device_to_core_hba() as struct se_dev_limits. bdev_logical_block_size(inode->i_bdev);
*/ dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
q = bdev_get_queue(inode->i_bdev);
limits = &dev_limits.limits;
limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
limits->max_hw_sectors = queue_max_hw_sectors(q);
limits->max_sectors = queue_max_sectors(q);
/* /*
* Determine the number of bytes from i_size_read() minus * Determine the number of bytes from i_size_read() minus
* one (1) logical sector from underlying struct block_device * one (1) logical sector from underlying struct block_device
*/ */
fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
dev_size = (i_size_read(file->f_mapping->host) - dev_size = (i_size_read(file->f_mapping->host) -
fd_dev->fd_block_size); fd_dev->fd_block_size);
@ -185,26 +174,18 @@ static struct se_device *fd_create_virtdevice(
goto fail; goto fail;
} }
limits = &dev_limits.limits; dev->dev_attrib.hw_block_size = FD_BLOCKSIZE;
limits->logical_block_size = FD_BLOCKSIZE; dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
limits->max_hw_sectors = FD_MAX_SECTORS;
limits->max_sectors = FD_MAX_SECTORS;
fd_dev->fd_block_size = FD_BLOCKSIZE;
} }
dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; fd_dev->fd_block_size = dev->dev_attrib.hw_block_size;
dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
dev = transport_add_device_to_core_hba(hba, &fileio_template, dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
se_dev, dev_flags, fd_dev,
&dev_limits, "FILEIO", FD_VERSION);
if (!dev)
goto fail;
if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
pr_debug("FILEIO: Forcing setting of emulate_write_cache=1" pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
" with FDBD_HAS_BUFFERED_IO_WCE\n"); " with FDBD_HAS_BUFFERED_IO_WCE\n");
dev->se_sub_dev->se_dev_attrib.emulate_write_cache = 1; dev->dev_attrib.emulate_write_cache = 1;
} }
fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++; fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
@ -214,22 +195,18 @@ static struct se_device *fd_create_virtdevice(
" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
fd_dev->fd_dev_name, fd_dev->fd_dev_size); fd_dev->fd_dev_name, fd_dev->fd_dev_size);
return dev; return 0;
fail: fail:
if (fd_dev->fd_file) { if (fd_dev->fd_file) {
filp_close(fd_dev->fd_file, NULL); filp_close(fd_dev->fd_file, NULL);
fd_dev->fd_file = NULL; fd_dev->fd_file = NULL;
} }
return ERR_PTR(ret); return ret;
} }
/* fd_free_device(): (Part of se_subsystem_api_t template) static void fd_free_device(struct se_device *dev)
*
*
*/
static void fd_free_device(void *p)
{ {
struct fd_dev *fd_dev = p; struct fd_dev *fd_dev = FD_DEV(dev);
if (fd_dev->fd_file) { if (fd_dev->fd_file) {
filp_close(fd_dev->fd_file, NULL); filp_close(fd_dev->fd_file, NULL);
@ -239,17 +216,16 @@ static void fd_free_device(void *p)
kfree(fd_dev); kfree(fd_dev);
} }
static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl, static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
u32 sgl_nents) u32 sgl_nents, int is_write)
{ {
struct se_device *se_dev = cmd->se_dev; struct se_device *se_dev = cmd->se_dev;
struct fd_dev *dev = se_dev->dev_ptr; struct fd_dev *dev = FD_DEV(se_dev);
struct file *fd = dev->fd_file; struct file *fd = dev->fd_file;
struct scatterlist *sg; struct scatterlist *sg;
struct iovec *iov; struct iovec *iov;
mm_segment_t old_fs; mm_segment_t old_fs;
loff_t pos = (cmd->t_task_lba * loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size);
se_dev->se_sub_dev->se_dev_attrib.block_size);
int ret = 0, i; int ret = 0, i;
iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL); iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
@ -260,81 +236,58 @@ static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl,
for_each_sg(sgl, sg, sgl_nents, i) { for_each_sg(sgl, sg, sgl_nents, i) {
iov[i].iov_len = sg->length; iov[i].iov_len = sg->length;
iov[i].iov_base = sg_virt(sg); iov[i].iov_base = kmap(sg_page(sg)) + sg->offset;
} }
old_fs = get_fs(); old_fs = get_fs();
set_fs(get_ds()); set_fs(get_ds());
ret = vfs_readv(fd, &iov[0], sgl_nents, &pos);
if (is_write)
ret = vfs_writev(fd, &iov[0], sgl_nents, &pos);
else
ret = vfs_readv(fd, &iov[0], sgl_nents, &pos);
set_fs(old_fs); set_fs(old_fs);
for_each_sg(sgl, sg, sgl_nents, i)
kunmap(sg_page(sg));
kfree(iov); kfree(iov);
/*
* Return zeros and GOOD status even if the READ did not return if (is_write) {
* the expected virt_size for struct file w/o a backing struct
* block_device.
*/
if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
if (ret < 0 || ret != cmd->data_length) { if (ret < 0 || ret != cmd->data_length) {
pr_err("vfs_readv() returned %d," pr_err("%s() write returned %d\n", __func__, ret);
" expecting %d for S_ISBLK\n", ret,
(int)cmd->data_length);
return (ret < 0 ? ret : -EINVAL); return (ret < 0 ? ret : -EINVAL);
} }
} else { } else {
if (ret < 0) { /*
pr_err("vfs_readv() returned %d for non" * Return zeros and GOOD status even if the READ did not return
" S_ISBLK\n", ret); * the expected virt_size for struct file w/o a backing struct
return ret; * block_device.
*/
if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
if (ret < 0 || ret != cmd->data_length) {
pr_err("%s() returned %d, expecting %u for "
"S_ISBLK\n", __func__, ret,
cmd->data_length);
return (ret < 0 ? ret : -EINVAL);
}
} else {
if (ret < 0) {
pr_err("%s() returned %d for non S_ISBLK\n",
__func__, ret);
return ret;
}
} }
} }
return 1; return 1;
} }
static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl, static sense_reason_t
u32 sgl_nents) fd_execute_sync_cache(struct se_cmd *cmd)
{
struct se_device *se_dev = cmd->se_dev;
struct fd_dev *dev = se_dev->dev_ptr;
struct file *fd = dev->fd_file;
struct scatterlist *sg;
struct iovec *iov;
mm_segment_t old_fs;
loff_t pos = (cmd->t_task_lba *
se_dev->se_sub_dev->se_dev_attrib.block_size);
int ret, i = 0;
iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
if (!iov) {
pr_err("Unable to allocate fd_do_writev iov[]\n");
return -ENOMEM;
}
for_each_sg(sgl, sg, sgl_nents, i) {
iov[i].iov_len = sg->length;
iov[i].iov_base = sg_virt(sg);
}
old_fs = get_fs();
set_fs(get_ds());
ret = vfs_writev(fd, &iov[0], sgl_nents, &pos);
set_fs(old_fs);
kfree(iov);
if (ret < 0 || ret != cmd->data_length) {
pr_err("vfs_writev() returned %d\n", ret);
return (ret < 0 ? ret : -EINVAL);
}
return 1;
}
static int fd_execute_sync_cache(struct se_cmd *cmd)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
struct fd_dev *fd_dev = dev->dev_ptr; struct fd_dev *fd_dev = FD_DEV(dev);
int immed = (cmd->t_task_cdb[1] & 0x2); int immed = (cmd->t_task_cdb[1] & 0x2);
loff_t start, end; loff_t start, end;
int ret; int ret;
@ -353,7 +306,7 @@ static int fd_execute_sync_cache(struct se_cmd *cmd)
start = 0; start = 0;
end = LLONG_MAX; end = LLONG_MAX;
} else { } else {
start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size; start = cmd->t_task_lba * dev->dev_attrib.block_size;
if (cmd->data_length) if (cmd->data_length)
end = start + cmd->data_length; end = start + cmd->data_length;
else else
@ -367,17 +320,16 @@ static int fd_execute_sync_cache(struct se_cmd *cmd)
if (immed) if (immed)
return 0; return 0;
if (ret) { if (ret)
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
} else { else
target_complete_cmd(cmd, SAM_STAT_GOOD); target_complete_cmd(cmd, SAM_STAT_GOOD);
}
return 0; return 0;
} }
static int fd_execute_rw(struct se_cmd *cmd) static sense_reason_t
fd_execute_rw(struct se_cmd *cmd)
{ {
struct scatterlist *sgl = cmd->t_data_sg; struct scatterlist *sgl = cmd->t_data_sg;
u32 sgl_nents = cmd->t_data_nents; u32 sgl_nents = cmd->t_data_nents;
@ -390,30 +342,29 @@ static int fd_execute_rw(struct se_cmd *cmd)
* physical memory addresses to struct iovec virtual memory. * physical memory addresses to struct iovec virtual memory.
*/ */
if (data_direction == DMA_FROM_DEVICE) { if (data_direction == DMA_FROM_DEVICE) {
ret = fd_do_readv(cmd, sgl, sgl_nents); ret = fd_do_rw(cmd, sgl, sgl_nents, 0);
} else { } else {
ret = fd_do_writev(cmd, sgl, sgl_nents); ret = fd_do_rw(cmd, sgl, sgl_nents, 1);
/* /*
* Perform implict vfs_fsync_range() for fd_do_writev() ops * Perform implict vfs_fsync_range() for fd_do_writev() ops
* for SCSI WRITEs with Forced Unit Access (FUA) set. * for SCSI WRITEs with Forced Unit Access (FUA) set.
* Allow this to happen independent of WCE=0 setting. * Allow this to happen independent of WCE=0 setting.
*/ */
if (ret > 0 && if (ret > 0 &&
dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && dev->dev_attrib.emulate_fua_write > 0 &&
(cmd->se_cmd_flags & SCF_FUA)) { (cmd->se_cmd_flags & SCF_FUA)) {
struct fd_dev *fd_dev = dev->dev_ptr; struct fd_dev *fd_dev = FD_DEV(dev);
loff_t start = cmd->t_task_lba * loff_t start = cmd->t_task_lba *
dev->se_sub_dev->se_dev_attrib.block_size; dev->dev_attrib.block_size;
loff_t end = start + cmd->data_length; loff_t end = start + cmd->data_length;
vfs_fsync_range(fd_dev->fd_file, start, end, 1); vfs_fsync_range(fd_dev->fd_file, start, end, 1);
} }
} }
if (ret < 0) { if (ret < 0)
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return ret;
}
if (ret) if (ret)
target_complete_cmd(cmd, SAM_STAT_GOOD); target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0; return 0;
@ -430,12 +381,10 @@ static match_table_t tokens = {
{Opt_err, NULL} {Opt_err, NULL}
}; };
static ssize_t fd_set_configfs_dev_params( static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
struct se_hba *hba, const char *page, ssize_t count)
struct se_subsystem_dev *se_dev,
const char *page, ssize_t count)
{ {
struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; struct fd_dev *fd_dev = FD_DEV(dev);
char *orig, *ptr, *arg_p, *opts; char *orig, *ptr, *arg_p, *opts;
substring_t args[MAX_OPT_ARGS]; substring_t args[MAX_OPT_ARGS];
int ret = 0, arg, token; int ret = 0, arg, token;
@ -502,24 +451,9 @@ out:
return (!ret) ? count : ret; return (!ret) ? count : ret;
} }
static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev) static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b)
{ {
struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; struct fd_dev *fd_dev = FD_DEV(dev);
if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
pr_err("Missing fd_dev_name=\n");
return -EINVAL;
}
return 0;
}
static ssize_t fd_show_configfs_dev_params(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
char *b)
{
struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
ssize_t bl = 0; ssize_t bl = 0;
bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
@ -530,27 +464,9 @@ static ssize_t fd_show_configfs_dev_params(
return bl; return bl;
} }
/* fd_get_device_rev(): (Part of se_subsystem_api_t template)
*
*
*/
static u32 fd_get_device_rev(struct se_device *dev)
{
return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
}
/* fd_get_device_type(): (Part of se_subsystem_api_t template)
*
*
*/
static u32 fd_get_device_type(struct se_device *dev)
{
return TYPE_DISK;
}
static sector_t fd_get_blocks(struct se_device *dev) static sector_t fd_get_blocks(struct se_device *dev)
{ {
struct fd_dev *fd_dev = dev->dev_ptr; struct fd_dev *fd_dev = FD_DEV(dev);
struct file *f = fd_dev->fd_file; struct file *f = fd_dev->fd_file;
struct inode *i = f->f_mapping->host; struct inode *i = f->f_mapping->host;
unsigned long long dev_size; unsigned long long dev_size;
@ -564,34 +480,35 @@ static sector_t fd_get_blocks(struct se_device *dev)
else else
dev_size = fd_dev->fd_dev_size; dev_size = fd_dev->fd_dev_size;
return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size); return div_u64(dev_size, dev->dev_attrib.block_size);
} }
static struct spc_ops fd_spc_ops = { static struct sbc_ops fd_sbc_ops = {
.execute_rw = fd_execute_rw, .execute_rw = fd_execute_rw,
.execute_sync_cache = fd_execute_sync_cache, .execute_sync_cache = fd_execute_sync_cache,
}; };
static int fd_parse_cdb(struct se_cmd *cmd) static sense_reason_t
fd_parse_cdb(struct se_cmd *cmd)
{ {
return sbc_parse_cdb(cmd, &fd_spc_ops); return sbc_parse_cdb(cmd, &fd_sbc_ops);
} }
static struct se_subsystem_api fileio_template = { static struct se_subsystem_api fileio_template = {
.name = "fileio", .name = "fileio",
.inquiry_prod = "FILEIO",
.inquiry_rev = FD_VERSION,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
.attach_hba = fd_attach_hba, .attach_hba = fd_attach_hba,
.detach_hba = fd_detach_hba, .detach_hba = fd_detach_hba,
.allocate_virtdevice = fd_allocate_virtdevice, .alloc_device = fd_alloc_device,
.create_virtdevice = fd_create_virtdevice, .configure_device = fd_configure_device,
.free_device = fd_free_device, .free_device = fd_free_device,
.parse_cdb = fd_parse_cdb, .parse_cdb = fd_parse_cdb,
.check_configfs_dev_params = fd_check_configfs_dev_params,
.set_configfs_dev_params = fd_set_configfs_dev_params, .set_configfs_dev_params = fd_set_configfs_dev_params,
.show_configfs_dev_params = fd_show_configfs_dev_params, .show_configfs_dev_params = fd_show_configfs_dev_params,
.get_device_rev = fd_get_device_rev, .get_device_type = sbc_get_device_type,
.get_device_type = fd_get_device_type,
.get_blocks = fd_get_blocks, .get_blocks = fd_get_blocks,
}; };

View file

@ -17,6 +17,8 @@
#define FDBD_HAS_BUFFERED_IO_WCE 0x04 #define FDBD_HAS_BUFFERED_IO_WCE 0x04
struct fd_dev { struct fd_dev {
struct se_device dev;
u32 fbd_flags; u32 fbd_flags;
unsigned char fd_dev_name[FD_MAX_DEV_NAME]; unsigned char fd_dev_name[FD_MAX_DEV_NAME];
/* Unique Ramdisk Device ID in Ramdisk HBA */ /* Unique Ramdisk Device ID in Ramdisk HBA */

View file

@ -3,10 +3,7 @@
* *
* This file contains the TCM HBA Transport related functions. * This file contains the TCM HBA Transport related functions.
* *
* Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. * (c) Copyright 2003-2012 RisingTide Systems LLC.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
* *
* Nicholas A. Bellinger <nab@kernel.org> * Nicholas A. Bellinger <nab@kernel.org>
* *
@ -113,7 +110,6 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
INIT_LIST_HEAD(&hba->hba_dev_list);
spin_lock_init(&hba->device_lock); spin_lock_init(&hba->device_lock);
mutex_init(&hba->hba_access_mutex); mutex_init(&hba->hba_access_mutex);
@ -152,8 +148,7 @@ out_free_hba:
int int
core_delete_hba(struct se_hba *hba) core_delete_hba(struct se_hba *hba)
{ {
if (!list_empty(&hba->hba_dev_list)) WARN_ON(hba->dev_count);
dump_stack();
hba->transport->detach_hba(hba); hba->transport->detach_hba(hba);

View file

@ -4,10 +4,7 @@
* This file contains the Storage Engine <-> Linux BlockIO transport * This file contains the Storage Engine <-> Linux BlockIO transport
* specific functions. * specific functions.
* *
* Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. * (c) Copyright 2003-2012 RisingTide Systems LLC.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
* *
* Nicholas A. Bellinger <nab@kernel.org> * Nicholas A. Bellinger <nab@kernel.org>
* *
@ -50,9 +47,13 @@
#define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */ #define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */
#define IBLOCK_BIO_POOL_SIZE 128 #define IBLOCK_BIO_POOL_SIZE 128
static struct se_subsystem_api iblock_template; static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
{
return container_of(dev, struct iblock_dev, dev);
}
static void iblock_bio_done(struct bio *, int);
static struct se_subsystem_api iblock_template;
/* iblock_attach_hba(): (Part of se_subsystem_api_t template) /* iblock_attach_hba(): (Part of se_subsystem_api_t template)
* *
@ -70,7 +71,7 @@ static void iblock_detach_hba(struct se_hba *hba)
{ {
} }
static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name) static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
{ {
struct iblock_dev *ib_dev = NULL; struct iblock_dev *ib_dev = NULL;
@ -82,40 +83,28 @@ static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name); pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
return ib_dev; return &ib_dev->dev;
} }
static struct se_device *iblock_create_virtdevice( static int iblock_configure_device(struct se_device *dev)
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
void *p)
{ {
struct iblock_dev *ib_dev = p; struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct se_device *dev;
struct se_dev_limits dev_limits;
struct block_device *bd = NULL;
struct request_queue *q; struct request_queue *q;
struct queue_limits *limits; struct block_device *bd = NULL;
u32 dev_flags = 0;
fmode_t mode; fmode_t mode;
int ret = -EINVAL; int ret = -ENOMEM;
if (!ib_dev) { if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
pr_err("Unable to locate struct iblock_dev parameter\n"); pr_err("Missing udev_path= parameters for IBLOCK\n");
return ERR_PTR(ret); return -EINVAL;
} }
memset(&dev_limits, 0, sizeof(struct se_dev_limits));
ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0); ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
if (!ib_dev->ibd_bio_set) { if (!ib_dev->ibd_bio_set) {
pr_err("IBLOCK: Unable to create bioset()\n"); pr_err("IBLOCK: Unable to create bioset\n");
return ERR_PTR(-ENOMEM); goto out;
} }
pr_debug("IBLOCK: Created bio_set()\n");
/*
* iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
* must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
*/
pr_debug( "IBLOCK: Claiming struct block_device: %s\n", pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
ib_dev->ibd_udev_path); ib_dev->ibd_udev_path);
@ -126,27 +115,15 @@ static struct se_device *iblock_create_virtdevice(
bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev); bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
if (IS_ERR(bd)) { if (IS_ERR(bd)) {
ret = PTR_ERR(bd); ret = PTR_ERR(bd);
goto failed; goto out_free_bioset;
} }
/*
* Setup the local scope queue_limits from struct request_queue->limits
* to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
*/
q = bdev_get_queue(bd);
limits = &dev_limits.limits;
limits->logical_block_size = bdev_logical_block_size(bd);
limits->max_hw_sectors = UINT_MAX;
limits->max_sectors = UINT_MAX;
dev_limits.hw_queue_depth = q->nr_requests;
dev_limits.queue_depth = q->nr_requests;
ib_dev->ibd_bd = bd; ib_dev->ibd_bd = bd;
dev = transport_add_device_to_core_hba(hba, q = bdev_get_queue(bd);
&iblock_template, se_dev, dev_flags, ib_dev,
&dev_limits, "IBLOCK", IBLOCK_VERSION); dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
if (!dev) dev->dev_attrib.hw_max_sectors = UINT_MAX;
goto failed; dev->dev_attrib.hw_queue_depth = q->nr_requests;
/* /*
* Check if the underlying struct block_device request_queue supports * Check if the underlying struct block_device request_queue supports
@ -154,38 +131,41 @@ static struct se_device *iblock_create_virtdevice(
* in ATA and we need to set TPE=1 * in ATA and we need to set TPE=1
*/ */
if (blk_queue_discard(q)) { if (blk_queue_discard(q)) {
dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = dev->dev_attrib.max_unmap_lba_count =
q->limits.max_discard_sectors; q->limits.max_discard_sectors;
/* /*
* Currently hardcoded to 1 in Linux/SCSI code.. * Currently hardcoded to 1 in Linux/SCSI code..
*/ */
dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1; dev->dev_attrib.max_unmap_block_desc_count = 1;
dev->se_sub_dev->se_dev_attrib.unmap_granularity = dev->dev_attrib.unmap_granularity =
q->limits.discard_granularity >> 9; q->limits.discard_granularity >> 9;
dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = dev->dev_attrib.unmap_granularity_alignment =
q->limits.discard_alignment; q->limits.discard_alignment;
pr_debug("IBLOCK: BLOCK Discard support available," pr_debug("IBLOCK: BLOCK Discard support available,"
" disabled by default\n"); " disabled by default\n");
} }
/*
* Enable write same emulation for IBLOCK and use 0xFFFF as
* the smaller WRITE_SAME(10) only has a two-byte block count.
*/
dev->dev_attrib.max_write_same_len = 0xFFFF;
if (blk_queue_nonrot(q)) if (blk_queue_nonrot(q))
dev->se_sub_dev->se_dev_attrib.is_nonrot = 1; dev->dev_attrib.is_nonrot = 1;
return 0;
return dev; out_free_bioset:
bioset_free(ib_dev->ibd_bio_set);
failed: ib_dev->ibd_bio_set = NULL;
if (ib_dev->ibd_bio_set) { out:
bioset_free(ib_dev->ibd_bio_set); return ret;
ib_dev->ibd_bio_set = NULL;
}
ib_dev->ibd_bd = NULL;
return ERR_PTR(ret);
} }
static void iblock_free_device(void *p) static void iblock_free_device(struct se_device *dev)
{ {
struct iblock_dev *ib_dev = p; struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
if (ib_dev->ibd_bd != NULL) if (ib_dev->ibd_bd != NULL)
blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
@ -203,12 +183,12 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
bdev_logical_block_size(bd)) - 1); bdev_logical_block_size(bd)) - 1);
u32 block_size = bdev_logical_block_size(bd); u32 block_size = bdev_logical_block_size(bd);
if (block_size == dev->se_sub_dev->se_dev_attrib.block_size) if (block_size == dev->dev_attrib.block_size)
return blocks_long; return blocks_long;
switch (block_size) { switch (block_size) {
case 4096: case 4096:
switch (dev->se_sub_dev->se_dev_attrib.block_size) { switch (dev->dev_attrib.block_size) {
case 2048: case 2048:
blocks_long <<= 1; blocks_long <<= 1;
break; break;
@ -222,7 +202,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
} }
break; break;
case 2048: case 2048:
switch (dev->se_sub_dev->se_dev_attrib.block_size) { switch (dev->dev_attrib.block_size) {
case 4096: case 4096:
blocks_long >>= 1; blocks_long >>= 1;
break; break;
@ -237,7 +217,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
} }
break; break;
case 1024: case 1024:
switch (dev->se_sub_dev->se_dev_attrib.block_size) { switch (dev->dev_attrib.block_size) {
case 4096: case 4096:
blocks_long >>= 2; blocks_long >>= 2;
break; break;
@ -252,7 +232,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
} }
break; break;
case 512: case 512:
switch (dev->se_sub_dev->se_dev_attrib.block_size) { switch (dev->dev_attrib.block_size) {
case 4096: case 4096:
blocks_long >>= 3; blocks_long >>= 3;
break; break;
@ -273,6 +253,87 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
return blocks_long; return blocks_long;
} }
static void iblock_complete_cmd(struct se_cmd *cmd)
{
struct iblock_req *ibr = cmd->priv;
u8 status;
if (!atomic_dec_and_test(&ibr->pending))
return;
if (atomic_read(&ibr->ib_bio_err_cnt))
status = SAM_STAT_CHECK_CONDITION;
else
status = SAM_STAT_GOOD;
target_complete_cmd(cmd, status);
kfree(ibr);
}
static void iblock_bio_done(struct bio *bio, int err)
{
struct se_cmd *cmd = bio->bi_private;
struct iblock_req *ibr = cmd->priv;
/*
* Set -EIO if !BIO_UPTODATE and the passed is still err=0
*/
if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
err = -EIO;
if (err != 0) {
pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
" err: %d\n", bio, err);
/*
* Bump the ib_bio_err_cnt and release bio.
*/
atomic_inc(&ibr->ib_bio_err_cnt);
smp_mb__after_atomic_inc();
}
bio_put(bio);
iblock_complete_cmd(cmd);
}
static struct bio *
iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
struct bio *bio;
/*
* Only allocate as many vector entries as the bio code allows us to,
* we'll loop later on until we have handled the whole request.
*/
if (sg_num > BIO_MAX_PAGES)
sg_num = BIO_MAX_PAGES;
bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
if (!bio) {
pr_err("Unable to allocate memory for bio\n");
return NULL;
}
bio->bi_bdev = ib_dev->ibd_bd;
bio->bi_private = cmd;
bio->bi_end_io = &iblock_bio_done;
bio->bi_sector = lba;
return bio;
}
static void iblock_submit_bios(struct bio_list *list, int rw)
{
struct blk_plug plug;
struct bio *bio;
blk_start_plug(&plug);
while ((bio = bio_list_pop(list)))
submit_bio(rw, bio);
blk_finish_plug(&plug);
}
static void iblock_end_io_flush(struct bio *bio, int err) static void iblock_end_io_flush(struct bio *bio, int err)
{ {
struct se_cmd *cmd = bio->bi_private; struct se_cmd *cmd = bio->bi_private;
@ -281,13 +342,10 @@ static void iblock_end_io_flush(struct bio *bio, int err)
pr_err("IBLOCK: cache flush failed: %d\n", err); pr_err("IBLOCK: cache flush failed: %d\n", err);
if (cmd) { if (cmd) {
if (err) { if (err)
cmd->scsi_sense_reason =
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
} else { else
target_complete_cmd(cmd, SAM_STAT_GOOD); target_complete_cmd(cmd, SAM_STAT_GOOD);
}
} }
bio_put(bio); bio_put(bio);
@ -297,9 +355,10 @@ static void iblock_end_io_flush(struct bio *bio, int err)
* Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
* always flush the whole cache. * always flush the whole cache.
*/ */
static int iblock_execute_sync_cache(struct se_cmd *cmd) static sense_reason_t
iblock_execute_sync_cache(struct se_cmd *cmd)
{ {
struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
int immed = (cmd->t_task_cdb[1] & 0x2); int immed = (cmd->t_task_cdb[1] & 0x2);
struct bio *bio; struct bio *bio;
@ -319,25 +378,27 @@ static int iblock_execute_sync_cache(struct se_cmd *cmd)
return 0; return 0;
} }
static int iblock_execute_unmap(struct se_cmd *cmd) static sense_reason_t
iblock_execute_unmap(struct se_cmd *cmd)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
struct iblock_dev *ibd = dev->dev_ptr; struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
unsigned char *buf, *ptr = NULL; unsigned char *buf, *ptr = NULL;
sector_t lba; sector_t lba;
int size; int size;
u32 range; u32 range;
int ret = 0; sense_reason_t ret = 0;
int dl, bd_dl; int dl, bd_dl, err;
if (cmd->data_length < 8) { if (cmd->data_length < 8) {
pr_warn("UNMAP parameter list length %u too small\n", pr_warn("UNMAP parameter list length %u too small\n",
cmd->data_length); cmd->data_length);
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; return TCM_INVALID_PARAMETER_LIST;
return -EINVAL;
} }
buf = transport_kmap_data_sg(cmd); buf = transport_kmap_data_sg(cmd);
if (!buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
dl = get_unaligned_be16(&buf[0]); dl = get_unaligned_be16(&buf[0]);
bd_dl = get_unaligned_be16(&buf[2]); bd_dl = get_unaligned_be16(&buf[2]);
@ -349,9 +410,8 @@ static int iblock_execute_unmap(struct se_cmd *cmd)
else else
size = bd_dl; size = bd_dl;
if (size / 16 > dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; ret = TCM_INVALID_PARAMETER_LIST;
ret = -EINVAL;
goto err; goto err;
} }
@ -366,23 +426,22 @@ static int iblock_execute_unmap(struct se_cmd *cmd)
pr_debug("UNMAP: Using lba: %llu and range: %u\n", pr_debug("UNMAP: Using lba: %llu and range: %u\n",
(unsigned long long)lba, range); (unsigned long long)lba, range);
if (range > dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count) { if (range > dev->dev_attrib.max_unmap_lba_count) {
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; ret = TCM_INVALID_PARAMETER_LIST;
ret = -EINVAL;
goto err; goto err;
} }
if (lba + range > dev->transport->get_blocks(dev) + 1) { if (lba + range > dev->transport->get_blocks(dev) + 1) {
cmd->scsi_sense_reason = TCM_ADDRESS_OUT_OF_RANGE; ret = TCM_ADDRESS_OUT_OF_RANGE;
ret = -EINVAL;
goto err; goto err;
} }
ret = blkdev_issue_discard(ibd->ibd_bd, lba, range, err = blkdev_issue_discard(ib_dev->ibd_bd, lba, range,
GFP_KERNEL, 0); GFP_KERNEL, 0);
if (ret < 0) { if (err < 0) {
pr_err("blkdev_issue_discard() failed: %d\n", pr_err("blkdev_issue_discard() failed: %d\n",
ret); err);
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto err; goto err;
} }
@ -397,23 +456,86 @@ err:
return ret; return ret;
} }
static int iblock_execute_write_same(struct se_cmd *cmd) static sense_reason_t
iblock_execute_write_same_unmap(struct se_cmd *cmd)
{ {
struct iblock_dev *ibd = cmd->se_dev->dev_ptr; struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
int ret; int rc;
ret = blkdev_issue_discard(ibd->ibd_bd, cmd->t_task_lba, rc = blkdev_issue_discard(ib_dev->ibd_bd, cmd->t_task_lba,
spc_get_write_same_sectors(cmd), GFP_KERNEL, spc_get_write_same_sectors(cmd), GFP_KERNEL, 0);
0); if (rc < 0) {
if (ret < 0) { pr_warn("blkdev_issue_discard() failed: %d\n", rc);
pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n"); return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return ret;
} }
target_complete_cmd(cmd, GOOD); target_complete_cmd(cmd, GOOD);
return 0; return 0;
} }
static sense_reason_t
iblock_execute_write_same(struct se_cmd *cmd)
{
struct iblock_req *ibr;
struct scatterlist *sg;
struct bio *bio;
struct bio_list list;
sector_t block_lba = cmd->t_task_lba;
sector_t sectors = spc_get_write_same_sectors(cmd);
sg = &cmd->t_data_sg[0];
if (cmd->t_data_nents > 1 ||
sg->length != cmd->se_dev->dev_attrib.block_size) {
pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
" block_size: %u\n", cmd->t_data_nents, sg->length,
cmd->se_dev->dev_attrib.block_size);
return TCM_INVALID_CDB_FIELD;
}
ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
if (!ibr)
goto fail;
cmd->priv = ibr;
bio = iblock_get_bio(cmd, block_lba, 1);
if (!bio)
goto fail_free_ibr;
bio_list_init(&list);
bio_list_add(&list, bio);
atomic_set(&ibr->pending, 1);
while (sectors) {
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
!= sg->length) {
bio = iblock_get_bio(cmd, block_lba, 1);
if (!bio)
goto fail_put_bios;
atomic_inc(&ibr->pending);
bio_list_add(&list, bio);
}
/* Always in 512 byte units for Linux/Block */
block_lba += sg->length >> IBLOCK_LBA_SHIFT;
sectors -= 1;
}
iblock_submit_bios(&list, WRITE);
return 0;
fail_put_bios:
while ((bio = bio_list_pop(&list)))
bio_put(bio);
fail_free_ibr:
kfree(ibr);
fail:
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
enum { enum {
Opt_udev_path, Opt_readonly, Opt_force, Opt_err Opt_udev_path, Opt_readonly, Opt_force, Opt_err
}; };
@ -425,11 +547,10 @@ static match_table_t tokens = {
{Opt_err, NULL} {Opt_err, NULL}
}; };
static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
struct se_subsystem_dev *se_dev, const char *page, ssize_t count)
const char *page, ssize_t count)
{ {
struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr; struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
char *orig, *ptr, *arg_p, *opts; char *orig, *ptr, *arg_p, *opts;
substring_t args[MAX_OPT_ARGS]; substring_t args[MAX_OPT_ARGS];
int ret = 0, token; int ret = 0, token;
@ -491,43 +612,26 @@ out:
return (!ret) ? count : ret; return (!ret) ? count : ret;
} }
static ssize_t iblock_check_configfs_dev_params( static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
struct se_hba *hba,
struct se_subsystem_dev *se_dev)
{ {
struct iblock_dev *ibd = se_dev->se_dev_su_ptr; struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct block_device *bd = ib_dev->ibd_bd;
if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
pr_err("Missing udev_path= parameters for IBLOCK\n");
return -EINVAL;
}
return 0;
}
static ssize_t iblock_show_configfs_dev_params(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
char *b)
{
struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
struct block_device *bd = ibd->ibd_bd;
char buf[BDEVNAME_SIZE]; char buf[BDEVNAME_SIZE];
ssize_t bl = 0; ssize_t bl = 0;
if (bd) if (bd)
bl += sprintf(b + bl, "iBlock device: %s", bl += sprintf(b + bl, "iBlock device: %s",
bdevname(bd, buf)); bdevname(bd, buf));
if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
bl += sprintf(b + bl, " UDEV PATH: %s", bl += sprintf(b + bl, " UDEV PATH: %s",
ibd->ibd_udev_path); ib_dev->ibd_udev_path);
bl += sprintf(b + bl, " readonly: %d\n", ibd->ibd_readonly); bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly);
bl += sprintf(b + bl, " "); bl += sprintf(b + bl, " ");
if (bd) { if (bd) {
bl += sprintf(b + bl, "Major: %d Minor: %d %s\n", bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ? MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
"" : (bd->bd_holder == ibd) ? "" : (bd->bd_holder == ib_dev) ?
"CLAIMED: IBLOCK" : "CLAIMED: OS"); "CLAIMED: IBLOCK" : "CLAIMED: OS");
} else { } else {
bl += sprintf(b + bl, "Major: 0 Minor: 0\n"); bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
@ -536,61 +640,8 @@ static ssize_t iblock_show_configfs_dev_params(
return bl; return bl;
} }
static void iblock_complete_cmd(struct se_cmd *cmd) static sense_reason_t
{ iblock_execute_rw(struct se_cmd *cmd)
struct iblock_req *ibr = cmd->priv;
u8 status;
if (!atomic_dec_and_test(&ibr->pending))
return;
if (atomic_read(&ibr->ib_bio_err_cnt))
status = SAM_STAT_CHECK_CONDITION;
else
status = SAM_STAT_GOOD;
target_complete_cmd(cmd, status);
kfree(ibr);
}
static struct bio *
iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
{
struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
struct bio *bio;
/*
* Only allocate as many vector entries as the bio code allows us to,
* we'll loop later on until we have handled the whole request.
*/
if (sg_num > BIO_MAX_PAGES)
sg_num = BIO_MAX_PAGES;
bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
if (!bio) {
pr_err("Unable to allocate memory for bio\n");
return NULL;
}
bio->bi_bdev = ib_dev->ibd_bd;
bio->bi_private = cmd;
bio->bi_end_io = &iblock_bio_done;
bio->bi_sector = lba;
return bio;
}
static void iblock_submit_bios(struct bio_list *list, int rw)
{
struct blk_plug plug;
struct bio *bio;
blk_start_plug(&plug);
while ((bio = bio_list_pop(list)))
submit_bio(rw, bio);
blk_finish_plug(&plug);
}
static int iblock_execute_rw(struct se_cmd *cmd)
{ {
struct scatterlist *sgl = cmd->t_data_sg; struct scatterlist *sgl = cmd->t_data_sg;
u32 sgl_nents = cmd->t_data_nents; u32 sgl_nents = cmd->t_data_nents;
@ -611,8 +662,8 @@ static int iblock_execute_rw(struct se_cmd *cmd)
* Force data to disk if we pretend to not have a volatile * Force data to disk if we pretend to not have a volatile
* write cache, or the initiator set the Force Unit Access bit. * write cache, or the initiator set the Force Unit Access bit.
*/ */
if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || if (dev->dev_attrib.emulate_write_cache == 0 ||
(dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && (dev->dev_attrib.emulate_fua_write > 0 &&
(cmd->se_cmd_flags & SCF_FUA))) (cmd->se_cmd_flags & SCF_FUA)))
rw = WRITE_FUA; rw = WRITE_FUA;
else else
@ -625,19 +676,18 @@ static int iblock_execute_rw(struct se_cmd *cmd)
* Convert the blocksize advertised to the initiator to the 512 byte * Convert the blocksize advertised to the initiator to the 512 byte
* units unconditionally used by the Linux block layer. * units unconditionally used by the Linux block layer.
*/ */
if (dev->se_sub_dev->se_dev_attrib.block_size == 4096) if (dev->dev_attrib.block_size == 4096)
block_lba = (cmd->t_task_lba << 3); block_lba = (cmd->t_task_lba << 3);
else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048) else if (dev->dev_attrib.block_size == 2048)
block_lba = (cmd->t_task_lba << 2); block_lba = (cmd->t_task_lba << 2);
else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024) else if (dev->dev_attrib.block_size == 1024)
block_lba = (cmd->t_task_lba << 1); block_lba = (cmd->t_task_lba << 1);
else if (dev->se_sub_dev->se_dev_attrib.block_size == 512) else if (dev->dev_attrib.block_size == 512)
block_lba = cmd->t_task_lba; block_lba = cmd->t_task_lba;
else { else {
pr_err("Unsupported SCSI -> BLOCK LBA conversion:" pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
" %u\n", dev->se_sub_dev->se_dev_attrib.block_size); " %u\n", dev->dev_attrib.block_size);
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -ENOSYS;
} }
ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
@ -697,83 +747,48 @@ fail_put_bios:
bio_put(bio); bio_put(bio);
fail_free_ibr: fail_free_ibr:
kfree(ibr); kfree(ibr);
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
fail: fail:
return -ENOMEM; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
static u32 iblock_get_device_rev(struct se_device *dev)
{
return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
}
static u32 iblock_get_device_type(struct se_device *dev)
{
return TYPE_DISK;
} }
static sector_t iblock_get_blocks(struct se_device *dev) static sector_t iblock_get_blocks(struct se_device *dev)
{ {
struct iblock_dev *ibd = dev->dev_ptr; struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct block_device *bd = ibd->ibd_bd; struct block_device *bd = ib_dev->ibd_bd;
struct request_queue *q = bdev_get_queue(bd); struct request_queue *q = bdev_get_queue(bd);
return iblock_emulate_read_cap_with_block_size(dev, bd, q); return iblock_emulate_read_cap_with_block_size(dev, bd, q);
} }
static void iblock_bio_done(struct bio *bio, int err) static struct sbc_ops iblock_sbc_ops = {
{
struct se_cmd *cmd = bio->bi_private;
struct iblock_req *ibr = cmd->priv;
/*
* Set -EIO if !BIO_UPTODATE and the passed is still err=0
*/
if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
err = -EIO;
if (err != 0) {
pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
" err: %d\n", bio, err);
/*
* Bump the ib_bio_err_cnt and release bio.
*/
atomic_inc(&ibr->ib_bio_err_cnt);
smp_mb__after_atomic_inc();
}
bio_put(bio);
iblock_complete_cmd(cmd);
}
static struct spc_ops iblock_spc_ops = {
.execute_rw = iblock_execute_rw, .execute_rw = iblock_execute_rw,
.execute_sync_cache = iblock_execute_sync_cache, .execute_sync_cache = iblock_execute_sync_cache,
.execute_write_same = iblock_execute_write_same, .execute_write_same = iblock_execute_write_same,
.execute_write_same_unmap = iblock_execute_write_same_unmap,
.execute_unmap = iblock_execute_unmap, .execute_unmap = iblock_execute_unmap,
}; };
static int iblock_parse_cdb(struct se_cmd *cmd) static sense_reason_t
iblock_parse_cdb(struct se_cmd *cmd)
{ {
return sbc_parse_cdb(cmd, &iblock_spc_ops); return sbc_parse_cdb(cmd, &iblock_sbc_ops);
} }
static struct se_subsystem_api iblock_template = { static struct se_subsystem_api iblock_template = {
.name = "iblock", .name = "iblock",
.inquiry_prod = "IBLOCK",
.inquiry_rev = IBLOCK_VERSION,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
.attach_hba = iblock_attach_hba, .attach_hba = iblock_attach_hba,
.detach_hba = iblock_detach_hba, .detach_hba = iblock_detach_hba,
.allocate_virtdevice = iblock_allocate_virtdevice, .alloc_device = iblock_alloc_device,
.create_virtdevice = iblock_create_virtdevice, .configure_device = iblock_configure_device,
.free_device = iblock_free_device, .free_device = iblock_free_device,
.parse_cdb = iblock_parse_cdb, .parse_cdb = iblock_parse_cdb,
.check_configfs_dev_params = iblock_check_configfs_dev_params,
.set_configfs_dev_params = iblock_set_configfs_dev_params, .set_configfs_dev_params = iblock_set_configfs_dev_params,
.show_configfs_dev_params = iblock_show_configfs_dev_params, .show_configfs_dev_params = iblock_show_configfs_dev_params,
.get_device_rev = iblock_get_device_rev, .get_device_type = sbc_get_device_type,
.get_device_type = iblock_get_device_type,
.get_blocks = iblock_get_blocks, .get_blocks = iblock_get_blocks,
}; };

View file

@ -14,6 +14,7 @@ struct iblock_req {
#define IBDF_HAS_UDEV_PATH 0x01 #define IBDF_HAS_UDEV_PATH 0x01
struct iblock_dev { struct iblock_dev {
struct se_device dev;
unsigned char ibd_udev_path[SE_UDEV_PATH_LEN]; unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
u32 ibd_flags; u32 ibd_flags;
struct bio_set *ibd_bio_set; struct bio_set *ibd_bio_set;

View file

@ -19,18 +19,12 @@ int core_dev_export(struct se_device *, struct se_portal_group *,
struct se_lun *); struct se_lun *);
void core_dev_unexport(struct se_device *, struct se_portal_group *, void core_dev_unexport(struct se_device *, struct se_portal_group *,
struct se_lun *); struct se_lun *);
int target_report_luns(struct se_cmd *);
void se_release_device_for_hba(struct se_device *);
void se_release_vpd_for_dev(struct se_device *);
int se_free_virtual_device(struct se_device *, struct se_hba *);
int se_dev_check_online(struct se_device *);
int se_dev_check_shutdown(struct se_device *);
void se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *);
int se_dev_set_task_timeout(struct se_device *, u32); int se_dev_set_task_timeout(struct se_device *, u32);
int se_dev_set_max_unmap_lba_count(struct se_device *, u32); int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32); int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
int se_dev_set_unmap_granularity(struct se_device *, u32); int se_dev_set_unmap_granularity(struct se_device *, u32);
int se_dev_set_unmap_granularity_alignment(struct se_device *, u32); int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
int se_dev_set_max_write_same_len(struct se_device *, u32);
int se_dev_set_emulate_dpo(struct se_device *, int); int se_dev_set_emulate_dpo(struct se_device *, int);
int se_dev_set_emulate_fua_write(struct se_device *, int); int se_dev_set_emulate_fua_write(struct se_device *, int);
int se_dev_set_emulate_fua_read(struct se_device *, int); int se_dev_set_emulate_fua_read(struct se_device *, int);
@ -60,6 +54,9 @@ void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun_acl *lacl); struct se_lun_acl *lacl);
int core_dev_setup_virtual_lun0(void); int core_dev_setup_virtual_lun0(void);
void core_dev_release_virtual_lun0(void); void core_dev_release_virtual_lun0(void);
struct se_device *target_alloc_device(struct se_hba *hba, const char *name);
int target_configure_device(struct se_device *dev);
void target_free_device(struct se_device *);
/* target_core_hba.c */ /* target_core_hba.c */
struct se_hba *core_alloc_hba(const char *, u32, u32); struct se_hba *core_alloc_hba(const char *, u32, u32);
@ -105,10 +102,11 @@ int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags); bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
int transport_clear_lun_from_sessions(struct se_lun *); int transport_clear_lun_from_sessions(struct se_lun *);
void transport_send_task_abort(struct se_cmd *); void transport_send_task_abort(struct se_cmd *);
int target_cmd_size_check(struct se_cmd *cmd, unsigned int size); sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
void target_qf_do_work(struct work_struct *work);
/* target_core_stat.c */ /* target_core_stat.c */
void target_stat_setup_dev_default_groups(struct se_subsystem_dev *); void target_stat_setup_dev_default_groups(struct se_device *);
void target_stat_setup_port_default_groups(struct se_lun *); void target_stat_setup_port_default_groups(struct se_lun *);
void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *); void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);

File diff suppressed because it is too large Load diff

View file

@ -47,8 +47,8 @@ extern struct kmem_cache *t10_pr_reg_cache;
extern int core_pr_dump_initiator_port(struct t10_pr_registration *, extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
char *, u32); char *, u32);
extern int target_scsi2_reservation_release(struct se_cmd *); extern sense_reason_t target_scsi2_reservation_release(struct se_cmd *);
extern int target_scsi2_reservation_reserve(struct se_cmd *); extern sense_reason_t target_scsi2_reservation_reserve(struct se_cmd *);
extern int core_scsi3_alloc_aptpl_registration( extern int core_scsi3_alloc_aptpl_registration(
struct t10_reservation *, u64, struct t10_reservation *, u64,
unsigned char *, unsigned char *, u32, unsigned char *, unsigned char *, u32,
@ -61,8 +61,8 @@ extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
extern void core_scsi3_free_all_registrations(struct se_device *); extern void core_scsi3_free_all_registrations(struct se_device *);
extern unsigned char *core_scsi3_pr_dump_type(int); extern unsigned char *core_scsi3_pr_dump_type(int);
extern int target_scsi3_emulate_pr_in(struct se_cmd *); extern sense_reason_t target_scsi3_emulate_pr_in(struct se_cmd *);
extern int target_scsi3_emulate_pr_out(struct se_cmd *); extern sense_reason_t target_scsi3_emulate_pr_out(struct se_cmd *);
extern int core_setup_reservations(struct se_device *, int); extern sense_reason_t target_check_reservation(struct se_cmd *);
#endif /* TARGET_CORE_PR_H */ #endif /* TARGET_CORE_PR_H */

View file

@ -3,10 +3,7 @@
* *
* This file contains the generic target mode <-> Linux SCSI subsystem plugin. * This file contains the generic target mode <-> Linux SCSI subsystem plugin.
* *
* Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. * (c) Copyright 2003-2012 RisingTide Systems LLC.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
* *
* Nicholas A. Bellinger <nab@kernel.org> * Nicholas A. Bellinger <nab@kernel.org>
* *
@ -53,9 +50,14 @@
#define ISPRINT(a) ((a >= ' ') && (a <= '~')) #define ISPRINT(a) ((a >= ' ') && (a <= '~'))
static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev)
{
return container_of(dev, struct pscsi_dev_virt, dev);
}
static struct se_subsystem_api pscsi_template; static struct se_subsystem_api pscsi_template;
static int pscsi_execute_cmd(struct se_cmd *cmd); static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd);
static void pscsi_req_done(struct request *, int); static void pscsi_req_done(struct request *, int);
/* pscsi_attach_hba(): /* pscsi_attach_hba():
@ -219,7 +221,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]); snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]);
wwn->t10_sub_dev->su_dev_flags |= SDF_FIRMWARE_VPD_UNIT_SERIAL; wwn->t10_dev->dev_flags |= DF_FIRMWARE_VPD_UNIT_SERIAL;
kfree(buf); kfree(buf);
return 0; return 0;
@ -299,23 +301,13 @@ out:
kfree(buf); kfree(buf);
} }
/* pscsi_add_device_to_list(): static int pscsi_add_device_to_list(struct se_device *dev,
* struct scsi_device *sd)
*
*/
static struct se_device *pscsi_add_device_to_list(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
struct pscsi_dev_virt *pdv,
struct scsi_device *sd,
int dev_flags)
{ {
struct se_device *dev; struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct se_dev_limits dev_limits; struct request_queue *q = sd->request_queue;
struct request_queue *q;
struct queue_limits *limits;
memset(&dev_limits, 0, sizeof(struct se_dev_limits)); pdv->pdv_sd = sd;
if (!sd->queue_depth) { if (!sd->queue_depth) {
sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH; sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
@ -324,54 +316,27 @@ static struct se_device *pscsi_add_device_to_list(
" queue_depth to %d\n", sd->channel, sd->id, " queue_depth to %d\n", sd->channel, sd->id,
sd->lun, sd->queue_depth); sd->lun, sd->queue_depth);
} }
/*
* Setup the local scope queue_limits from struct request_queue->limits dev->dev_attrib.hw_block_size = sd->sector_size;
* to pass into transport_add_device_to_core_hba() as struct se_dev_limits. dev->dev_attrib.hw_max_sectors =
*/ min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
q = sd->request_queue; dev->dev_attrib.hw_queue_depth = sd->queue_depth;
limits = &dev_limits.limits;
limits->logical_block_size = sd->sector_size;
limits->max_hw_sectors = min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
limits->max_sectors = min_t(int, sd->host->max_sectors, queue_max_sectors(q));
dev_limits.hw_queue_depth = sd->queue_depth;
dev_limits.queue_depth = sd->queue_depth;
/* /*
* Setup our standard INQUIRY info into se_dev->t10_wwn * Setup our standard INQUIRY info into se_dev->t10_wwn
*/ */
pscsi_set_inquiry_info(sd, &se_dev->t10_wwn); pscsi_set_inquiry_info(sd, &dev->t10_wwn);
/*
* Set the pointer pdv->pdv_sd to from passed struct scsi_device,
* which has already been referenced with Linux SCSI code with
* scsi_device_get() in this file's pscsi_create_virtdevice().
*
* The passthrough operations called by the transport_add_device_*
* function below will require this pointer to be set for passthroug
* ops.
*
* For the shutdown case in pscsi_free_device(), this struct
* scsi_device reference is released with Linux SCSI code
* scsi_device_put() and the pdv->pdv_sd cleared.
*/
pdv->pdv_sd = sd;
dev = transport_add_device_to_core_hba(hba, &pscsi_template,
se_dev, dev_flags, pdv,
&dev_limits, NULL, NULL);
if (!dev) {
pdv->pdv_sd = NULL;
return NULL;
}
/* /*
* Locate VPD WWN Information used for various purposes within * Locate VPD WWN Information used for various purposes within
* the Storage Engine. * the Storage Engine.
*/ */
if (!pscsi_get_inquiry_vpd_serial(sd, &se_dev->t10_wwn)) { if (!pscsi_get_inquiry_vpd_serial(sd, &dev->t10_wwn)) {
/* /*
* If VPD Unit Serial returned GOOD status, try * If VPD Unit Serial returned GOOD status, try
* VPD Device Identification page (0x83). * VPD Device Identification page (0x83).
*/ */
pscsi_get_inquiry_vpd_device_ident(sd, &se_dev->t10_wwn); pscsi_get_inquiry_vpd_device_ident(sd, &dev->t10_wwn);
} }
/* /*
@ -379,10 +344,11 @@ static struct se_device *pscsi_add_device_to_list(
*/ */
if (sd->type == TYPE_TAPE) if (sd->type == TYPE_TAPE)
pscsi_tape_read_blocksize(dev, sd); pscsi_tape_read_blocksize(dev, sd);
return dev; return 0;
} }
static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name) static struct se_device *pscsi_alloc_device(struct se_hba *hba,
const char *name)
{ {
struct pscsi_dev_virt *pdv; struct pscsi_dev_virt *pdv;
@ -391,139 +357,125 @@ static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name)
pr_err("Unable to allocate memory for struct pscsi_dev_virt\n"); pr_err("Unable to allocate memory for struct pscsi_dev_virt\n");
return NULL; return NULL;
} }
pdv->pdv_se_hba = hba;
pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name); pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name);
return pdv; return &pdv->dev;
} }
/* /*
* Called with struct Scsi_Host->host_lock called. * Called with struct Scsi_Host->host_lock called.
*/ */
static struct se_device *pscsi_create_type_disk( static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
struct scsi_device *sd,
struct pscsi_dev_virt *pdv,
struct se_subsystem_dev *se_dev,
struct se_hba *hba)
__releases(sh->host_lock) __releases(sh->host_lock)
{ {
struct se_device *dev; struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr; struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct Scsi_Host *sh = sd->host; struct Scsi_Host *sh = sd->host;
struct block_device *bd; struct block_device *bd;
u32 dev_flags = 0; int ret;
if (scsi_device_get(sd)) { if (scsi_device_get(sd)) {
pr_err("scsi_device_get() failed for %d:%d:%d:%d\n", pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
sh->host_no, sd->channel, sd->id, sd->lun); sh->host_no, sd->channel, sd->id, sd->lun);
spin_unlock_irq(sh->host_lock); spin_unlock_irq(sh->host_lock);
return NULL; return -EIO;
} }
spin_unlock_irq(sh->host_lock); spin_unlock_irq(sh->host_lock);
/* /*
* Claim exclusive struct block_device access to struct scsi_device * Claim exclusive struct block_device access to struct scsi_device
* for TYPE_DISK using supplied udev_path * for TYPE_DISK using supplied udev_path
*/ */
bd = blkdev_get_by_path(se_dev->se_dev_udev_path, bd = blkdev_get_by_path(dev->udev_path,
FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
if (IS_ERR(bd)) { if (IS_ERR(bd)) {
pr_err("pSCSI: blkdev_get_by_path() failed\n"); pr_err("pSCSI: blkdev_get_by_path() failed\n");
scsi_device_put(sd); scsi_device_put(sd);
return NULL; return PTR_ERR(bd);
} }
pdv->pdv_bd = bd; pdv->pdv_bd = bd;
dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); ret = pscsi_add_device_to_list(dev, sd);
if (!dev) { if (ret) {
blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
scsi_device_put(sd); scsi_device_put(sd);
return NULL; return ret;
} }
pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n", pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun); phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
return 0;
return dev;
} }
/* /*
* Called with struct Scsi_Host->host_lock called. * Called with struct Scsi_Host->host_lock called.
*/ */
static struct se_device *pscsi_create_type_rom( static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
struct scsi_device *sd,
struct pscsi_dev_virt *pdv,
struct se_subsystem_dev *se_dev,
struct se_hba *hba)
__releases(sh->host_lock) __releases(sh->host_lock)
{ {
struct se_device *dev; struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
struct Scsi_Host *sh = sd->host; struct Scsi_Host *sh = sd->host;
u32 dev_flags = 0; int ret;
if (scsi_device_get(sd)) { if (scsi_device_get(sd)) {
pr_err("scsi_device_get() failed for %d:%d:%d:%d\n", pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
sh->host_no, sd->channel, sd->id, sd->lun); sh->host_no, sd->channel, sd->id, sd->lun);
spin_unlock_irq(sh->host_lock); spin_unlock_irq(sh->host_lock);
return NULL; return -EIO;
} }
spin_unlock_irq(sh->host_lock); spin_unlock_irq(sh->host_lock);
dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); ret = pscsi_add_device_to_list(dev, sd);
if (!dev) { if (ret) {
scsi_device_put(sd); scsi_device_put(sd);
return NULL; return ret;
} }
pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
sd->channel, sd->id, sd->lun); sd->channel, sd->id, sd->lun);
return dev; return 0;
} }
/* /*
*Called with struct Scsi_Host->host_lock called. * Called with struct Scsi_Host->host_lock called.
*/ */
static struct se_device *pscsi_create_type_other( static int pscsi_create_type_other(struct se_device *dev,
struct scsi_device *sd, struct scsi_device *sd)
struct pscsi_dev_virt *pdv,
struct se_subsystem_dev *se_dev,
struct se_hba *hba)
__releases(sh->host_lock) __releases(sh->host_lock)
{ {
struct se_device *dev; struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
struct Scsi_Host *sh = sd->host; struct Scsi_Host *sh = sd->host;
u32 dev_flags = 0; int ret;
spin_unlock_irq(sh->host_lock); spin_unlock_irq(sh->host_lock);
dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); ret = pscsi_add_device_to_list(dev, sd);
if (!dev) if (ret)
return NULL; return ret;
pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
sd->channel, sd->id, sd->lun); sd->channel, sd->id, sd->lun);
return 0;
return dev;
} }
static struct se_device *pscsi_create_virtdevice( static int pscsi_configure_device(struct se_device *dev)
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
void *p)
{ {
struct pscsi_dev_virt *pdv = p; struct se_hba *hba = dev->se_hba;
struct se_device *dev; struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct scsi_device *sd; struct scsi_device *sd;
struct pscsi_hba_virt *phv = hba->hba_ptr; struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct Scsi_Host *sh = phv->phv_lld_host; struct Scsi_Host *sh = phv->phv_lld_host;
int legacy_mode_enable = 0; int legacy_mode_enable = 0;
int ret;
if (!pdv) { if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
pr_err("Unable to locate struct pscsi_dev_virt" !(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
" parameter\n"); !(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
return ERR_PTR(-EINVAL); pr_err("Missing scsi_channel_id=, scsi_target_id= and"
" scsi_lun_id= parameters\n");
return -EINVAL;
} }
/* /*
* If not running in PHV_LLD_SCSI_HOST_NO mode, locate the * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
* struct Scsi_Host we will need to bring the TCM/pSCSI object online * struct Scsi_Host we will need to bring the TCM/pSCSI object online
@ -532,16 +484,16 @@ static struct se_device *pscsi_create_virtdevice(
if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
pr_err("pSCSI: Unable to locate struct" pr_err("pSCSI: Unable to locate struct"
" Scsi_Host for PHV_LLD_SCSI_HOST_NO\n"); " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
return ERR_PTR(-ENODEV); return -ENODEV;
} }
/* /*
* For the newer PHV_VIRTUAL_HOST_ID struct scsi_device * For the newer PHV_VIRTUAL_HOST_ID struct scsi_device
* reference, we enforce that udev_path has been set * reference, we enforce that udev_path has been set
*/ */
if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) { if (!(dev->dev_flags & DF_USING_UDEV_PATH)) {
pr_err("pSCSI: udev_path attribute has not" pr_err("pSCSI: udev_path attribute has not"
" been set before ENABLE=1\n"); " been set before ENABLE=1\n");
return ERR_PTR(-EINVAL); return -EINVAL;
} }
/* /*
* If no scsi_host_id= was passed for PHV_VIRTUAL_HOST_ID, * If no scsi_host_id= was passed for PHV_VIRTUAL_HOST_ID,
@ -549,17 +501,14 @@ static struct se_device *pscsi_create_virtdevice(
* and enable for PHV_LLD_SCSI_HOST_NO mode. * and enable for PHV_LLD_SCSI_HOST_NO mode.
*/ */
if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) { if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) {
spin_lock(&hba->device_lock); if (hba->dev_count) {
if (!list_empty(&hba->hba_dev_list)) {
pr_err("pSCSI: Unable to set hba_mode" pr_err("pSCSI: Unable to set hba_mode"
" with active devices\n"); " with active devices\n");
spin_unlock(&hba->device_lock); return -EEXIST;
return ERR_PTR(-EEXIST);
} }
spin_unlock(&hba->device_lock);
if (pscsi_pmode_enable_hba(hba, 1) != 1) if (pscsi_pmode_enable_hba(hba, 1) != 1)
return ERR_PTR(-ENODEV); return -ENODEV;
legacy_mode_enable = 1; legacy_mode_enable = 1;
hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
@ -569,14 +518,14 @@ static struct se_device *pscsi_create_virtdevice(
if (IS_ERR(sh)) { if (IS_ERR(sh)) {
pr_err("pSCSI: Unable to locate" pr_err("pSCSI: Unable to locate"
" pdv_host_id: %d\n", pdv->pdv_host_id); " pdv_host_id: %d\n", pdv->pdv_host_id);
return ERR_CAST(sh); return PTR_ERR(sh);
} }
} }
} else { } else {
if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) { if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) {
pr_err("pSCSI: PHV_VIRTUAL_HOST_ID set while" pr_err("pSCSI: PHV_VIRTUAL_HOST_ID set while"
" struct Scsi_Host exists\n"); " struct Scsi_Host exists\n");
return ERR_PTR(-EEXIST); return -EEXIST;
} }
} }
@ -593,17 +542,17 @@ static struct se_device *pscsi_create_virtdevice(
*/ */
switch (sd->type) { switch (sd->type) {
case TYPE_DISK: case TYPE_DISK:
dev = pscsi_create_type_disk(sd, pdv, se_dev, hba); ret = pscsi_create_type_disk(dev, sd);
break; break;
case TYPE_ROM: case TYPE_ROM:
dev = pscsi_create_type_rom(sd, pdv, se_dev, hba); ret = pscsi_create_type_rom(dev, sd);
break; break;
default: default:
dev = pscsi_create_type_other(sd, pdv, se_dev, hba); ret = pscsi_create_type_other(dev, sd);
break; break;
} }
if (!dev) { if (ret) {
if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) if (phv->phv_mode == PHV_VIRTUAL_HOST_ID)
scsi_host_put(sh); scsi_host_put(sh);
else if (legacy_mode_enable) { else if (legacy_mode_enable) {
@ -611,9 +560,9 @@ static struct se_device *pscsi_create_virtdevice(
hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
} }
pdv->pdv_sd = NULL; pdv->pdv_sd = NULL;
return ERR_PTR(-ENODEV); return ret;
} }
return dev; return 0;
} }
spin_unlock_irq(sh->host_lock); spin_unlock_irq(sh->host_lock);
@ -627,17 +576,13 @@ static struct se_device *pscsi_create_virtdevice(
hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
} }
return ERR_PTR(-ENODEV); return -ENODEV;
} }
/* pscsi_free_device(): (Part of se_subsystem_api_t template) static void pscsi_free_device(struct se_device *dev)
*
*
*/
static void pscsi_free_device(void *p)
{ {
struct pscsi_dev_virt *pdv = p; struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr; struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct scsi_device *sd = pdv->pdv_sd; struct scsi_device *sd = pdv->pdv_sd;
if (sd) { if (sd) {
@ -670,7 +615,7 @@ static void pscsi_free_device(void *p)
static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg, static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg,
unsigned char *sense_buffer) unsigned char *sense_buffer)
{ {
struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
struct scsi_device *sd = pdv->pdv_sd; struct scsi_device *sd = pdv->pdv_sd;
int result; int result;
struct pscsi_plugin_task *pt = cmd->priv; struct pscsi_plugin_task *pt = cmd->priv;
@ -694,7 +639,11 @@ static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg,
if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
(status_byte(result) << 1) == SAM_STAT_GOOD) { (status_byte(result) << 1) == SAM_STAT_GOOD) {
if (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) { if (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) {
unsigned char *buf = transport_kmap_data_sg(cmd); unsigned char *buf;
buf = transport_kmap_data_sg(cmd);
if (!buf)
; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */
if (cdb[0] == MODE_SENSE_10) { if (cdb[0] == MODE_SENSE_10) {
if (!(buf[3] & 0x80)) if (!(buf[3] & 0x80))
@ -770,13 +719,11 @@ static match_table_t tokens = {
{Opt_err, NULL} {Opt_err, NULL}
}; };
static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba, static ssize_t pscsi_set_configfs_dev_params(struct se_device *dev,
struct se_subsystem_dev *se_dev, const char *page, ssize_t count)
const char *page,
ssize_t count)
{ {
struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct pscsi_hba_virt *phv = hba->hba_ptr; struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
char *orig, *ptr, *opts; char *orig, *ptr, *opts;
substring_t args[MAX_OPT_ARGS]; substring_t args[MAX_OPT_ARGS];
int ret = 0, arg, token; int ret = 0, arg, token;
@ -841,29 +788,10 @@ out:
return (!ret) ? count : ret; return (!ret) ? count : ret;
} }
static ssize_t pscsi_check_configfs_dev_params( static ssize_t pscsi_show_configfs_dev_params(struct se_device *dev, char *b)
struct se_hba *hba,
struct se_subsystem_dev *se_dev)
{ {
struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
!(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
!(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
pr_err("Missing scsi_channel_id=, scsi_target_id= and"
" scsi_lun_id= parameters\n");
return -EINVAL;
}
return 0;
}
static ssize_t pscsi_show_configfs_dev_params(struct se_hba *hba,
struct se_subsystem_dev *se_dev,
char *b)
{
struct pscsi_hba_virt *phv = hba->hba_ptr;
struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
struct scsi_device *sd = pdv->pdv_sd; struct scsi_device *sd = pdv->pdv_sd;
unsigned char host_id[16]; unsigned char host_id[16];
ssize_t bl; ssize_t bl;
@ -929,11 +857,11 @@ static inline struct bio *pscsi_get_bio(int sg_num)
return bio; return bio;
} }
static int pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, static sense_reason_t
u32 sgl_nents, enum dma_data_direction data_direction, pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
struct bio **hbio) enum dma_data_direction data_direction, struct bio **hbio)
{ {
struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
struct bio *bio = NULL, *tbio = NULL; struct bio *bio = NULL, *tbio = NULL;
struct page *page; struct page *page;
struct scatterlist *sg; struct scatterlist *sg;
@ -1019,7 +947,7 @@ static int pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl,
} }
} }
return sgl_nents; return 0;
fail: fail:
while (*hbio) { while (*hbio) {
bio = *hbio; bio = *hbio;
@ -1027,8 +955,7 @@ fail:
bio->bi_next = NULL; bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */ bio_endio(bio, 0); /* XXX: should be error */
} }
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -ENOMEM;
} }
/* /*
@ -1055,17 +982,13 @@ static inline void pscsi_clear_cdb_lun(unsigned char *cdb)
} }
} }
static int pscsi_parse_cdb(struct se_cmd *cmd) static sense_reason_t
pscsi_parse_cdb(struct se_cmd *cmd)
{ {
unsigned char *cdb = cmd->t_task_cdb; unsigned char *cdb = cmd->t_task_cdb;
unsigned int dummy_size;
int ret;
if (cmd->se_cmd_flags & SCF_BIDI) { if (cmd->se_cmd_flags & SCF_BIDI)
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; return TCM_UNSUPPORTED_SCSI_OPCODE;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
return -EINVAL;
}
pscsi_clear_cdb_lun(cdb); pscsi_clear_cdb_lun(cdb);
@ -1076,10 +999,8 @@ static int pscsi_parse_cdb(struct se_cmd *cmd)
*/ */
switch (cdb[0]) { switch (cdb[0]) {
case REPORT_LUNS: case REPORT_LUNS:
ret = spc_parse_cdb(cmd, &dummy_size); cmd->execute_cmd = spc_emulate_report_luns;
if (ret) return 0;
return ret;
break;
case READ_6: case READ_6:
case READ_10: case READ_10:
case READ_12: case READ_12:
@ -1093,22 +1014,21 @@ static int pscsi_parse_cdb(struct se_cmd *cmd)
/* FALLTHROUGH*/ /* FALLTHROUGH*/
default: default:
cmd->execute_cmd = pscsi_execute_cmd; cmd->execute_cmd = pscsi_execute_cmd;
break; return 0;
} }
return 0;
} }
static int pscsi_execute_cmd(struct se_cmd *cmd) static sense_reason_t
pscsi_execute_cmd(struct se_cmd *cmd)
{ {
struct scatterlist *sgl = cmd->t_data_sg; struct scatterlist *sgl = cmd->t_data_sg;
u32 sgl_nents = cmd->t_data_nents; u32 sgl_nents = cmd->t_data_nents;
enum dma_data_direction data_direction = cmd->data_direction; enum dma_data_direction data_direction = cmd->data_direction;
struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
struct pscsi_plugin_task *pt; struct pscsi_plugin_task *pt;
struct request *req; struct request *req;
struct bio *hbio; struct bio *hbio;
int ret; sense_reason_t ret;
/* /*
* Dynamically alloc cdb space, since it may be larger than * Dynamically alloc cdb space, since it may be larger than
@ -1116,8 +1036,7 @@ static int pscsi_execute_cmd(struct se_cmd *cmd)
*/ */
pt = kzalloc(sizeof(*pt) + scsi_command_size(cmd->t_task_cdb), GFP_KERNEL); pt = kzalloc(sizeof(*pt) + scsi_command_size(cmd->t_task_cdb), GFP_KERNEL);
if (!pt) { if (!pt) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -ENOMEM;
} }
cmd->priv = pt; cmd->priv = pt;
@ -1131,24 +1050,21 @@ static int pscsi_execute_cmd(struct se_cmd *cmd)
if (!req || IS_ERR(req)) { if (!req || IS_ERR(req)) {
pr_err("PSCSI: blk_get_request() failed: %ld\n", pr_err("PSCSI: blk_get_request() failed: %ld\n",
req ? IS_ERR(req) : -ENOMEM); req ? IS_ERR(req) : -ENOMEM);
cmd->scsi_sense_reason = ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto fail; goto fail;
} }
} else { } else {
BUG_ON(!cmd->data_length); BUG_ON(!cmd->data_length);
ret = pscsi_map_sg(cmd, sgl, sgl_nents, data_direction, &hbio); ret = pscsi_map_sg(cmd, sgl, sgl_nents, data_direction, &hbio);
if (ret < 0) { if (ret)
cmd->scsi_sense_reason =
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto fail; goto fail;
}
req = blk_make_request(pdv->pdv_sd->request_queue, hbio, req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
GFP_KERNEL); GFP_KERNEL);
if (IS_ERR(req)) { if (IS_ERR(req)) {
pr_err("pSCSI: blk_make_request() failed\n"); pr_err("pSCSI: blk_make_request() failed\n");
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto fail_free_bio; goto fail_free_bio;
} }
} }
@ -1179,22 +1095,10 @@ fail_free_bio:
bio->bi_next = NULL; bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */ bio_endio(bio, 0); /* XXX: should be error */
} }
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
fail: fail:
kfree(pt); kfree(pt);
return -ENOMEM; return ret;
}
/* pscsi_get_device_rev():
*
*
*/
static u32 pscsi_get_device_rev(struct se_device *dev)
{
struct pscsi_dev_virt *pdv = dev->dev_ptr;
struct scsi_device *sd = pdv->pdv_sd;
return (sd->scsi_level - 1) ? sd->scsi_level - 1 : 1;
} }
/* pscsi_get_device_type(): /* pscsi_get_device_type():
@ -1203,7 +1107,7 @@ static u32 pscsi_get_device_rev(struct se_device *dev)
*/ */
static u32 pscsi_get_device_type(struct se_device *dev) static u32 pscsi_get_device_type(struct se_device *dev)
{ {
struct pscsi_dev_virt *pdv = dev->dev_ptr; struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct scsi_device *sd = pdv->pdv_sd; struct scsi_device *sd = pdv->pdv_sd;
return sd->type; return sd->type;
@ -1211,7 +1115,7 @@ static u32 pscsi_get_device_type(struct se_device *dev)
static sector_t pscsi_get_blocks(struct se_device *dev) static sector_t pscsi_get_blocks(struct se_device *dev)
{ {
struct pscsi_dev_virt *pdv = dev->dev_ptr; struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
if (pdv->pdv_bd && pdv->pdv_bd->bd_part) if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
return pdv->pdv_bd->bd_part->nr_sects; return pdv->pdv_bd->bd_part->nr_sects;
@ -1243,7 +1147,6 @@ static void pscsi_req_done(struct request *req, int uptodate)
pr_debug("PSCSI Host Byte exception at cmd: %p CDB:" pr_debug("PSCSI Host Byte exception at cmd: %p CDB:"
" 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0], " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
pt->pscsi_result); pt->pscsi_result);
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
break; break;
} }
@ -1259,15 +1162,13 @@ static struct se_subsystem_api pscsi_template = {
.attach_hba = pscsi_attach_hba, .attach_hba = pscsi_attach_hba,
.detach_hba = pscsi_detach_hba, .detach_hba = pscsi_detach_hba,
.pmode_enable_hba = pscsi_pmode_enable_hba, .pmode_enable_hba = pscsi_pmode_enable_hba,
.allocate_virtdevice = pscsi_allocate_virtdevice, .alloc_device = pscsi_alloc_device,
.create_virtdevice = pscsi_create_virtdevice, .configure_device = pscsi_configure_device,
.free_device = pscsi_free_device, .free_device = pscsi_free_device,
.transport_complete = pscsi_transport_complete, .transport_complete = pscsi_transport_complete,
.parse_cdb = pscsi_parse_cdb, .parse_cdb = pscsi_parse_cdb,
.check_configfs_dev_params = pscsi_check_configfs_dev_params,
.set_configfs_dev_params = pscsi_set_configfs_dev_params, .set_configfs_dev_params = pscsi_set_configfs_dev_params,
.show_configfs_dev_params = pscsi_show_configfs_dev_params, .show_configfs_dev_params = pscsi_show_configfs_dev_params,
.get_device_rev = pscsi_get_device_rev,
.get_device_type = pscsi_get_device_type, .get_device_type = pscsi_get_device_type,
.get_blocks = pscsi_get_blocks, .get_blocks = pscsi_get_blocks,
}; };

View file

@ -37,6 +37,7 @@ struct pscsi_plugin_task {
#define PDF_HAS_VIRT_HOST_ID 0x20 #define PDF_HAS_VIRT_HOST_ID 0x20
struct pscsi_dev_virt { struct pscsi_dev_virt {
struct se_device dev;
int pdv_flags; int pdv_flags;
int pdv_host_id; int pdv_host_id;
int pdv_channel_id; int pdv_channel_id;
@ -44,7 +45,6 @@ struct pscsi_dev_virt {
int pdv_lun_id; int pdv_lun_id;
struct block_device *pdv_bd; struct block_device *pdv_bd;
struct scsi_device *pdv_sd; struct scsi_device *pdv_sd;
struct se_hba *pdv_se_hba;
} ____cacheline_aligned; } ____cacheline_aligned;
typedef enum phv_modes { typedef enum phv_modes {

View file

@ -4,10 +4,7 @@
* This file contains the Storage Engine <-> Ramdisk transport * This file contains the Storage Engine <-> Ramdisk transport
* specific functions. * specific functions.
* *
* Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. * (c) Copyright 2003-2012 RisingTide Systems LLC.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
* *
* Nicholas A. Bellinger <nab@kernel.org> * Nicholas A. Bellinger <nab@kernel.org>
* *
@ -41,7 +38,10 @@
#include "target_core_rd.h" #include "target_core_rd.h"
static struct se_subsystem_api rd_mcp_template; static inline struct rd_dev *RD_DEV(struct se_device *dev)
{
return container_of(dev, struct rd_dev, dev);
}
/* rd_attach_hba(): (Part of se_subsystem_api_t template) /* rd_attach_hba(): (Part of se_subsystem_api_t template)
* *
@ -196,7 +196,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
return 0; return 0;
} }
static void *rd_allocate_virtdevice(struct se_hba *hba, const char *name) static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
{ {
struct rd_dev *rd_dev; struct rd_dev *rd_dev;
struct rd_host *rd_host = hba->hba_ptr; struct rd_host *rd_host = hba->hba_ptr;
@ -209,39 +209,27 @@ static void *rd_allocate_virtdevice(struct se_hba *hba, const char *name)
rd_dev->rd_host = rd_host; rd_dev->rd_host = rd_host;
return rd_dev; return &rd_dev->dev;
} }
static struct se_device *rd_create_virtdevice(struct se_hba *hba, static int rd_configure_device(struct se_device *dev)
struct se_subsystem_dev *se_dev, void *p)
{ {
struct se_device *dev; struct rd_dev *rd_dev = RD_DEV(dev);
struct se_dev_limits dev_limits; struct rd_host *rd_host = dev->se_hba->hba_ptr;
struct rd_dev *rd_dev = p; int ret;
struct rd_host *rd_host = hba->hba_ptr;
int dev_flags = 0, ret;
char prod[16], rev[4];
memset(&dev_limits, 0, sizeof(struct se_dev_limits)); if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
pr_debug("Missing rd_pages= parameter\n");
return -EINVAL;
}
ret = rd_build_device_space(rd_dev); ret = rd_build_device_space(rd_dev);
if (ret < 0) if (ret < 0)
goto fail; goto fail;
snprintf(prod, 16, "RAMDISK-MCP"); dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
snprintf(rev, 4, "%s", RD_MCP_VERSION); dev->dev_attrib.hw_max_sectors = UINT_MAX;
dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
dev_limits.limits.max_hw_sectors = UINT_MAX;
dev_limits.limits.max_sectors = UINT_MAX;
dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
dev = transport_add_device_to_core_hba(hba,
&rd_mcp_template, se_dev, dev_flags, rd_dev,
&dev_limits, prod, rev);
if (!dev)
goto fail;
rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
@ -251,16 +239,16 @@ static struct se_device *rd_create_virtdevice(struct se_hba *hba,
rd_dev->sg_table_count, rd_dev->sg_table_count,
(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE)); (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
return dev; return 0;
fail: fail:
rd_release_device_space(rd_dev); rd_release_device_space(rd_dev);
return ERR_PTR(ret); return ret;
} }
static void rd_free_device(void *p) static void rd_free_device(struct se_device *dev)
{ {
struct rd_dev *rd_dev = p; struct rd_dev *rd_dev = RD_DEV(dev);
rd_release_device_space(rd_dev); rd_release_device_space(rd_dev);
kfree(rd_dev); kfree(rd_dev);
@ -284,13 +272,14 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
return NULL; return NULL;
} }
static int rd_execute_rw(struct se_cmd *cmd) static sense_reason_t
rd_execute_rw(struct se_cmd *cmd)
{ {
struct scatterlist *sgl = cmd->t_data_sg; struct scatterlist *sgl = cmd->t_data_sg;
u32 sgl_nents = cmd->t_data_nents; u32 sgl_nents = cmd->t_data_nents;
enum dma_data_direction data_direction = cmd->data_direction; enum dma_data_direction data_direction = cmd->data_direction;
struct se_device *se_dev = cmd->se_dev; struct se_device *se_dev = cmd->se_dev;
struct rd_dev *dev = se_dev->dev_ptr; struct rd_dev *dev = RD_DEV(se_dev);
struct rd_dev_sg_table *table; struct rd_dev_sg_table *table;
struct scatterlist *rd_sg; struct scatterlist *rd_sg;
struct sg_mapping_iter m; struct sg_mapping_iter m;
@ -300,14 +289,14 @@ static int rd_execute_rw(struct se_cmd *cmd)
u32 src_len; u32 src_len;
u64 tmp; u64 tmp;
tmp = cmd->t_task_lba * se_dev->se_sub_dev->se_dev_attrib.block_size; tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
rd_offset = do_div(tmp, PAGE_SIZE); rd_offset = do_div(tmp, PAGE_SIZE);
rd_page = tmp; rd_page = tmp;
rd_size = cmd->data_length; rd_size = cmd->data_length;
table = rd_get_sg_table(dev, rd_page); table = rd_get_sg_table(dev, rd_page);
if (!table) if (!table)
return -EINVAL; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
rd_sg = &table->sg_table[rd_page - table->page_start_offset]; rd_sg = &table->sg_table[rd_page - table->page_start_offset];
@ -357,7 +346,7 @@ static int rd_execute_rw(struct se_cmd *cmd)
table = rd_get_sg_table(dev, rd_page); table = rd_get_sg_table(dev, rd_page);
if (!table) { if (!table) {
sg_miter_stop(&m); sg_miter_stop(&m);
return -EINVAL; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
} }
/* since we increment, the first sg entry is correct */ /* since we increment, the first sg entry is correct */
@ -378,13 +367,10 @@ static match_table_t tokens = {
{Opt_err, NULL} {Opt_err, NULL}
}; };
static ssize_t rd_set_configfs_dev_params( static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
struct se_hba *hba, const char *page, ssize_t count)
struct se_subsystem_dev *se_dev,
const char *page,
ssize_t count)
{ {
struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; struct rd_dev *rd_dev = RD_DEV(dev);
char *orig, *ptr, *opts; char *orig, *ptr, *opts;
substring_t args[MAX_OPT_ARGS]; substring_t args[MAX_OPT_ARGS];
int ret = 0, arg, token; int ret = 0, arg, token;
@ -417,24 +403,10 @@ static ssize_t rd_set_configfs_dev_params(
return (!ret) ? count : ret; return (!ret) ? count : ret;
} }
static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev) static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
{ {
struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; struct rd_dev *rd_dev = RD_DEV(dev);
if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
pr_debug("Missing rd_pages= parameter\n");
return -EINVAL;
}
return 0;
}
static ssize_t rd_show_configfs_dev_params(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
char *b)
{
struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n", ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
rd_dev->rd_dev_id); rd_dev->rd_dev_id);
bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
@ -443,48 +415,40 @@ static ssize_t rd_show_configfs_dev_params(
return bl; return bl;
} }
static u32 rd_get_device_rev(struct se_device *dev)
{
return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
}
static u32 rd_get_device_type(struct se_device *dev)
{
return TYPE_DISK;
}
static sector_t rd_get_blocks(struct se_device *dev) static sector_t rd_get_blocks(struct se_device *dev)
{ {
struct rd_dev *rd_dev = dev->dev_ptr; struct rd_dev *rd_dev = RD_DEV(dev);
unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) / unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
dev->se_sub_dev->se_dev_attrib.block_size) - 1; dev->dev_attrib.block_size) - 1;
return blocks_long; return blocks_long;
} }
static struct spc_ops rd_spc_ops = { static struct sbc_ops rd_sbc_ops = {
.execute_rw = rd_execute_rw, .execute_rw = rd_execute_rw,
}; };
static int rd_parse_cdb(struct se_cmd *cmd) static sense_reason_t
rd_parse_cdb(struct se_cmd *cmd)
{ {
return sbc_parse_cdb(cmd, &rd_spc_ops); return sbc_parse_cdb(cmd, &rd_sbc_ops);
} }
static struct se_subsystem_api rd_mcp_template = { static struct se_subsystem_api rd_mcp_template = {
.name = "rd_mcp", .name = "rd_mcp",
.inquiry_prod = "RAMDISK-MCP",
.inquiry_rev = RD_MCP_VERSION,
.transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
.attach_hba = rd_attach_hba, .attach_hba = rd_attach_hba,
.detach_hba = rd_detach_hba, .detach_hba = rd_detach_hba,
.allocate_virtdevice = rd_allocate_virtdevice, .alloc_device = rd_alloc_device,
.create_virtdevice = rd_create_virtdevice, .configure_device = rd_configure_device,
.free_device = rd_free_device, .free_device = rd_free_device,
.parse_cdb = rd_parse_cdb, .parse_cdb = rd_parse_cdb,
.check_configfs_dev_params = rd_check_configfs_dev_params,
.set_configfs_dev_params = rd_set_configfs_dev_params, .set_configfs_dev_params = rd_set_configfs_dev_params,
.show_configfs_dev_params = rd_show_configfs_dev_params, .show_configfs_dev_params = rd_show_configfs_dev_params,
.get_device_rev = rd_get_device_rev, .get_device_type = sbc_get_device_type,
.get_device_type = rd_get_device_type,
.get_blocks = rd_get_blocks, .get_blocks = rd_get_blocks,
}; };

View file

@ -24,6 +24,7 @@ struct rd_dev_sg_table {
#define RDF_HAS_PAGE_COUNT 0x01 #define RDF_HAS_PAGE_COUNT 0x01
struct rd_dev { struct rd_dev {
struct se_device dev;
u32 rd_flags; u32 rd_flags;
/* Unique Ramdisk Device ID in Ramdisk HBA */ /* Unique Ramdisk Device ID in Ramdisk HBA */
u32 rd_dev_id; u32 rd_dev_id;

View file

@ -1,10 +1,7 @@
/* /*
* SCSI Block Commands (SBC) parsing and emulation. * SCSI Block Commands (SBC) parsing and emulation.
* *
* Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. * (c) Copyright 2002-2012 RisingTide Systems LLC.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
* *
* Nicholas A. Bellinger <nab@kernel.org> * Nicholas A. Bellinger <nab@kernel.org>
* *
@ -37,7 +34,8 @@
#include "target_core_ua.h" #include "target_core_ua.h"
static int sbc_emulate_readcapacity(struct se_cmd *cmd) static sense_reason_t
sbc_emulate_readcapacity(struct se_cmd *cmd)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
unsigned long long blocks_long = dev->transport->get_blocks(dev); unsigned long long blocks_long = dev->transport->get_blocks(dev);
@ -54,22 +52,24 @@ static int sbc_emulate_readcapacity(struct se_cmd *cmd)
buf[1] = (blocks >> 16) & 0xff; buf[1] = (blocks >> 16) & 0xff;
buf[2] = (blocks >> 8) & 0xff; buf[2] = (blocks >> 8) & 0xff;
buf[3] = blocks & 0xff; buf[3] = blocks & 0xff;
buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; buf[7] = dev->dev_attrib.block_size & 0xff;
rbuf = transport_kmap_data_sg(cmd); rbuf = transport_kmap_data_sg(cmd);
if (rbuf) { if (!rbuf)
memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
transport_kunmap_data_sg(cmd);
} memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
transport_kunmap_data_sg(cmd);
target_complete_cmd(cmd, GOOD); target_complete_cmd(cmd, GOOD);
return 0; return 0;
} }
static int sbc_emulate_readcapacity_16(struct se_cmd *cmd) static sense_reason_t
sbc_emulate_readcapacity_16(struct se_cmd *cmd)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
unsigned char *rbuf; unsigned char *rbuf;
@ -85,28 +85,29 @@ static int sbc_emulate_readcapacity_16(struct se_cmd *cmd)
buf[5] = (blocks >> 16) & 0xff; buf[5] = (blocks >> 16) & 0xff;
buf[6] = (blocks >> 8) & 0xff; buf[6] = (blocks >> 8) & 0xff;
buf[7] = blocks & 0xff; buf[7] = blocks & 0xff;
buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; buf[11] = dev->dev_attrib.block_size & 0xff;
/* /*
* Set Thin Provisioning Enable bit following sbc3r22 in section * Set Thin Provisioning Enable bit following sbc3r22 in section
* READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
*/ */
if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
buf[14] = 0x80; buf[14] = 0x80;
rbuf = transport_kmap_data_sg(cmd); rbuf = transport_kmap_data_sg(cmd);
if (rbuf) { if (!rbuf)
memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
transport_kunmap_data_sg(cmd);
} memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
transport_kunmap_data_sg(cmd);
target_complete_cmd(cmd, GOOD); target_complete_cmd(cmd, GOOD);
return 0; return 0;
} }
int spc_get_write_same_sectors(struct se_cmd *cmd) sector_t spc_get_write_same_sectors(struct se_cmd *cmd)
{ {
u32 num_blocks; u32 num_blocks;
@ -129,13 +130,8 @@ int spc_get_write_same_sectors(struct se_cmd *cmd)
} }
EXPORT_SYMBOL(spc_get_write_same_sectors); EXPORT_SYMBOL(spc_get_write_same_sectors);
static int sbc_emulate_verify(struct se_cmd *cmd) static sense_reason_t
{ sbc_emulate_noop(struct se_cmd *cmd)
target_complete_cmd(cmd, GOOD);
return 0;
}
static int sbc_emulate_noop(struct se_cmd *cmd)
{ {
target_complete_cmd(cmd, GOOD); target_complete_cmd(cmd, GOOD);
return 0; return 0;
@ -143,7 +139,7 @@ static int sbc_emulate_noop(struct se_cmd *cmd)
static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
{ {
return cmd->se_dev->se_sub_dev->se_dev_attrib.block_size * sectors; return cmd->se_dev->dev_attrib.block_size * sectors;
} }
static int sbc_check_valid_sectors(struct se_cmd *cmd) static int sbc_check_valid_sectors(struct se_cmd *cmd)
@ -152,7 +148,7 @@ static int sbc_check_valid_sectors(struct se_cmd *cmd)
unsigned long long end_lba; unsigned long long end_lba;
u32 sectors; u32 sectors;
sectors = cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size; sectors = cmd->data_length / dev->dev_attrib.block_size;
end_lba = dev->transport->get_blocks(dev) + 1; end_lba = dev->transport->get_blocks(dev) + 1;
if (cmd->t_task_lba + sectors > end_lba) { if (cmd->t_task_lba + sectors > end_lba) {
@ -236,26 +232,37 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
} }
static int sbc_write_same_supported(struct se_device *dev, static sense_reason_t
unsigned char *flags) sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
{ {
unsigned int sectors = spc_get_write_same_sectors(cmd);
if ((flags[0] & 0x04) || (flags[0] & 0x02)) { if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
pr_err("WRITE_SAME PBDATA and LBDATA" pr_err("WRITE_SAME PBDATA and LBDATA"
" bits not supported for Block Discard" " bits not supported for Block Discard"
" Emulation\n"); " Emulation\n");
return -ENOSYS; return TCM_UNSUPPORTED_SCSI_OPCODE;
}
if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) {
pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
sectors, cmd->se_dev->dev_attrib.max_write_same_len);
return TCM_INVALID_CDB_FIELD;
} }
/* /*
* Currently for the emulated case we only accept * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
* tpws with the UNMAP=1 bit set. * translated into block discard requests within backend code.
*/ */
if (!(flags[0] & 0x08)) { if (flags[0] & 0x08) {
pr_err("WRITE_SAME w/o UNMAP bit not" if (!ops->execute_write_same_unmap)
" supported for Block Discard Emulation\n"); return TCM_UNSUPPORTED_SCSI_OPCODE;
return -ENOSYS;
}
cmd->execute_cmd = ops->execute_write_same_unmap;
return 0;
}
if (!ops->execute_write_same)
return TCM_UNSUPPORTED_SCSI_OPCODE;
cmd->execute_cmd = ops->execute_write_same;
return 0; return 0;
} }
@ -313,14 +320,14 @@ out:
kfree(buf); kfree(buf);
} }
int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops) sense_reason_t
sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
{ {
struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
unsigned char *cdb = cmd->t_task_cdb; unsigned char *cdb = cmd->t_task_cdb;
unsigned int size; unsigned int size;
u32 sectors = 0; u32 sectors = 0;
int ret; sense_reason_t ret;
switch (cdb[0]) { switch (cdb[0]) {
case READ_6: case READ_6:
@ -379,9 +386,9 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
cmd->execute_cmd = ops->execute_rw; cmd->execute_cmd = ops->execute_rw;
break; break;
case XDWRITEREAD_10: case XDWRITEREAD_10:
if ((cmd->data_direction != DMA_TO_DEVICE) || if (cmd->data_direction != DMA_TO_DEVICE ||
!(cmd->se_cmd_flags & SCF_BIDI)) !(cmd->se_cmd_flags & SCF_BIDI))
goto out_invalid_cdb_field; return TCM_INVALID_CDB_FIELD;
sectors = transport_get_sectors_10(cdb); sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb); cmd->t_task_lba = transport_lba_32(cdb);
@ -419,27 +426,24 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
cmd->se_cmd_flags |= SCF_FUA; cmd->se_cmd_flags |= SCF_FUA;
break; break;
case WRITE_SAME_32: case WRITE_SAME_32:
if (!ops->execute_write_same)
goto out_unsupported_cdb;
sectors = transport_get_sectors_32(cdb); sectors = transport_get_sectors_32(cdb);
if (!sectors) { if (!sectors) {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
" supported\n"); " supported\n");
goto out_invalid_cdb_field; return TCM_INVALID_CDB_FIELD;
} }
size = sbc_get_size(cmd, 1); size = sbc_get_size(cmd, 1);
cmd->t_task_lba = get_unaligned_be64(&cdb[12]); cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
if (sbc_write_same_supported(dev, &cdb[10]) < 0) ret = sbc_setup_write_same(cmd, &cdb[10], ops);
goto out_unsupported_cdb; if (ret)
cmd->execute_cmd = ops->execute_write_same; return ret;
break; break;
default: default:
pr_err("VARIABLE_LENGTH_CMD service action" pr_err("VARIABLE_LENGTH_CMD service action"
" 0x%04x not supported\n", service_action); " 0x%04x not supported\n", service_action);
goto out_unsupported_cdb; return TCM_UNSUPPORTED_SCSI_OPCODE;
} }
break; break;
} }
@ -455,7 +459,7 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
default: default:
pr_err("Unsupported SA: 0x%02x\n", pr_err("Unsupported SA: 0x%02x\n",
cmd->t_task_cdb[1] & 0x1f); cmd->t_task_cdb[1] & 0x1f);
goto out_invalid_cdb_field; return TCM_INVALID_CDB_FIELD;
} }
size = (cdb[10] << 24) | (cdb[11] << 16) | size = (cdb[10] << 24) | (cdb[11] << 16) |
(cdb[12] << 8) | cdb[13]; (cdb[12] << 8) | cdb[13];
@ -463,7 +467,7 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
case SYNCHRONIZE_CACHE: case SYNCHRONIZE_CACHE:
case SYNCHRONIZE_CACHE_16: case SYNCHRONIZE_CACHE_16:
if (!ops->execute_sync_cache) if (!ops->execute_sync_cache)
goto out_unsupported_cdb; return TCM_UNSUPPORTED_SCSI_OPCODE;
/* /*
* Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
@ -484,42 +488,36 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
*/ */
if (cmd->t_task_lba || sectors) { if (cmd->t_task_lba || sectors) {
if (sbc_check_valid_sectors(cmd) < 0) if (sbc_check_valid_sectors(cmd) < 0)
goto out_invalid_cdb_field; return TCM_INVALID_CDB_FIELD;
} }
cmd->execute_cmd = ops->execute_sync_cache; cmd->execute_cmd = ops->execute_sync_cache;
break; break;
case UNMAP: case UNMAP:
if (!ops->execute_unmap) if (!ops->execute_unmap)
goto out_unsupported_cdb; return TCM_UNSUPPORTED_SCSI_OPCODE;
size = get_unaligned_be16(&cdb[7]); size = get_unaligned_be16(&cdb[7]);
cmd->execute_cmd = ops->execute_unmap; cmd->execute_cmd = ops->execute_unmap;
break; break;
case WRITE_SAME_16: case WRITE_SAME_16:
if (!ops->execute_write_same)
goto out_unsupported_cdb;
sectors = transport_get_sectors_16(cdb); sectors = transport_get_sectors_16(cdb);
if (!sectors) { if (!sectors) {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
goto out_invalid_cdb_field; return TCM_INVALID_CDB_FIELD;
} }
size = sbc_get_size(cmd, 1); size = sbc_get_size(cmd, 1);
cmd->t_task_lba = get_unaligned_be64(&cdb[2]); cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
if (sbc_write_same_supported(dev, &cdb[1]) < 0) ret = sbc_setup_write_same(cmd, &cdb[1], ops);
goto out_unsupported_cdb; if (ret)
cmd->execute_cmd = ops->execute_write_same; return ret;
break; break;
case WRITE_SAME: case WRITE_SAME:
if (!ops->execute_write_same)
goto out_unsupported_cdb;
sectors = transport_get_sectors_10(cdb); sectors = transport_get_sectors_10(cdb);
if (!sectors) { if (!sectors) {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
goto out_invalid_cdb_field; return TCM_INVALID_CDB_FIELD;
} }
size = sbc_get_size(cmd, 1); size = sbc_get_size(cmd, 1);
@ -529,13 +527,13 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
* Follow sbcr26 with WRITE_SAME (10) and check for the existence * Follow sbcr26 with WRITE_SAME (10) and check for the existence
* of byte 1 bit 3 UNMAP instead of original reserved field * of byte 1 bit 3 UNMAP instead of original reserved field
*/ */
if (sbc_write_same_supported(dev, &cdb[1]) < 0) ret = sbc_setup_write_same(cmd, &cdb[1], ops);
goto out_unsupported_cdb; if (ret)
cmd->execute_cmd = ops->execute_write_same; return ret;
break; break;
case VERIFY: case VERIFY:
size = 0; size = 0;
cmd->execute_cmd = sbc_emulate_verify; cmd->execute_cmd = sbc_emulate_noop;
break; break;
case REZERO_UNIT: case REZERO_UNIT:
case SEEK_6: case SEEK_6:
@ -557,24 +555,24 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
/* reject any command that we don't have a handler for */ /* reject any command that we don't have a handler for */
if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd) if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd)
goto out_unsupported_cdb; return TCM_UNSUPPORTED_SCSI_OPCODE;
if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
unsigned long long end_lba; unsigned long long end_lba;
if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) { if (sectors > dev->dev_attrib.fabric_max_sectors) {
printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
" big sectors %u exceeds fabric_max_sectors:" " big sectors %u exceeds fabric_max_sectors:"
" %u\n", cdb[0], sectors, " %u\n", cdb[0], sectors,
su_dev->se_dev_attrib.fabric_max_sectors); dev->dev_attrib.fabric_max_sectors);
goto out_invalid_cdb_field; return TCM_INVALID_CDB_FIELD;
} }
if (sectors > su_dev->se_dev_attrib.hw_max_sectors) { if (sectors > dev->dev_attrib.hw_max_sectors) {
printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
" big sectors %u exceeds backend hw_max_sectors:" " big sectors %u exceeds backend hw_max_sectors:"
" %u\n", cdb[0], sectors, " %u\n", cdb[0], sectors,
su_dev->se_dev_attrib.hw_max_sectors); dev->dev_attrib.hw_max_sectors);
goto out_invalid_cdb_field; return TCM_INVALID_CDB_FIELD;
} }
end_lba = dev->transport->get_blocks(dev) + 1; end_lba = dev->transport->get_blocks(dev) + 1;
@ -582,25 +580,18 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
pr_err("cmd exceeds last lba %llu " pr_err("cmd exceeds last lba %llu "
"(lba %llu, sectors %u)\n", "(lba %llu, sectors %u)\n",
end_lba, cmd->t_task_lba, sectors); end_lba, cmd->t_task_lba, sectors);
goto out_invalid_cdb_field; return TCM_INVALID_CDB_FIELD;
} }
size = sbc_get_size(cmd, sectors); size = sbc_get_size(cmd, sectors);
} }
ret = target_cmd_size_check(cmd, size); return target_cmd_size_check(cmd, size);
if (ret < 0)
return ret;
return 0;
out_unsupported_cdb:
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
return -EINVAL;
out_invalid_cdb_field:
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
} }
EXPORT_SYMBOL(sbc_parse_cdb); EXPORT_SYMBOL(sbc_parse_cdb);
u32 sbc_get_device_type(struct se_device *dev)
{
return TYPE_DISK;
}
EXPORT_SYMBOL(sbc_get_device_type);

View file

@ -1,10 +1,7 @@
/* /*
* SCSI Primary Commands (SPC) parsing and emulation. * SCSI Primary Commands (SPC) parsing and emulation.
* *
* Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. * (c) Copyright 2002-2012 RisingTide Systems LLC.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
* *
* Nicholas A. Bellinger <nab@kernel.org> * Nicholas A. Bellinger <nab@kernel.org>
* *
@ -69,7 +66,8 @@ static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
} }
static int spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf) static sense_reason_t
spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
{ {
struct se_lun *lun = cmd->se_lun; struct se_lun *lun = cmd->se_lun;
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
@ -78,7 +76,7 @@ static int spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
if (dev->transport->get_device_type(dev) == TYPE_TAPE) if (dev->transport->get_device_type(dev) == TYPE_TAPE)
buf[1] = 0x80; buf[1] = 0x80;
buf[2] = dev->transport->get_device_rev(dev); buf[2] = 0x05; /* SPC-3 */
/* /*
* NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2 * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
@ -95,34 +93,32 @@ static int spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
/* /*
* Enable SCCS and TPGS fields for Emulated ALUA * Enable SCCS and TPGS fields for Emulated ALUA
*/ */
if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) spc_fill_alua_data(lun->lun_sep, buf);
spc_fill_alua_data(lun->lun_sep, buf);
buf[7] = 0x2; /* CmdQue=1 */ buf[7] = 0x2; /* CmdQue=1 */
snprintf(&buf[8], 8, "LIO-ORG"); snprintf(&buf[8], 8, "LIO-ORG");
snprintf(&buf[16], 16, "%s", dev->se_sub_dev->t10_wwn.model); snprintf(&buf[16], 16, "%s", dev->t10_wwn.model);
snprintf(&buf[32], 4, "%s", dev->se_sub_dev->t10_wwn.revision); snprintf(&buf[32], 4, "%s", dev->t10_wwn.revision);
buf[4] = 31; /* Set additional length to 31 */ buf[4] = 31; /* Set additional length to 31 */
return 0; return 0;
} }
/* unit serial number */ /* unit serial number */
static int spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) static sense_reason_t
spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
u16 len = 0; u16 len = 0;
if (dev->se_sub_dev->su_dev_flags & if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
SDF_EMULATED_VPD_UNIT_SERIAL) {
u32 unit_serial_len; u32 unit_serial_len;
unit_serial_len = strlen(dev->se_sub_dev->t10_wwn.unit_serial); unit_serial_len = strlen(dev->t10_wwn.unit_serial);
unit_serial_len++; /* For NULL Terminator */ unit_serial_len++; /* For NULL Terminator */
len += sprintf(&buf[4], "%s", len += sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial);
dev->se_sub_dev->t10_wwn.unit_serial);
len++; /* Extra Byte for NULL Terminator */ len++; /* Extra Byte for NULL Terminator */
buf[3] = len; buf[3] = len;
} }
@ -132,7 +128,7 @@ static int spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
static void spc_parse_naa_6h_vendor_specific(struct se_device *dev, static void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
unsigned char *buf) unsigned char *buf)
{ {
unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0]; unsigned char *p = &dev->t10_wwn.unit_serial[0];
int cnt; int cnt;
bool next = true; bool next = true;
@ -164,7 +160,8 @@ static void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
* Device identification VPD, for a complete list of * Device identification VPD, for a complete list of
* DESIGNATOR TYPEs see spc4r17 Table 459. * DESIGNATOR TYPEs see spc4r17 Table 459.
*/ */
static int spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) static sense_reason_t
spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
struct se_lun *lun = cmd->se_lun; struct se_lun *lun = cmd->se_lun;
@ -173,7 +170,7 @@ static int spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
struct t10_alua_lu_gp_member *lu_gp_mem; struct t10_alua_lu_gp_member *lu_gp_mem;
struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
unsigned char *prod = &dev->se_sub_dev->t10_wwn.model[0]; unsigned char *prod = &dev->t10_wwn.model[0];
u32 prod_len; u32 prod_len;
u32 unit_serial_len, off = 0; u32 unit_serial_len, off = 0;
u16 len = 0, id_len; u16 len = 0, id_len;
@ -188,7 +185,7 @@ static int spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
* /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
* value in order to return the NAA id. * value in order to return the NAA id.
*/ */
if (!(dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL)) if (!(dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL))
goto check_t10_vend_desc; goto check_t10_vend_desc;
/* CODE SET == Binary */ /* CODE SET == Binary */
@ -236,14 +233,12 @@ check_t10_vend_desc:
prod_len += strlen(prod); prod_len += strlen(prod);
prod_len++; /* For : */ prod_len++; /* For : */
if (dev->se_sub_dev->su_dev_flags & if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
SDF_EMULATED_VPD_UNIT_SERIAL) { unit_serial_len = strlen(&dev->t10_wwn.unit_serial[0]);
unit_serial_len =
strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
unit_serial_len++; /* For NULL Terminator */ unit_serial_len++; /* For NULL Terminator */
id_len += sprintf(&buf[off+12], "%s:%s", prod, id_len += sprintf(&buf[off+12], "%s:%s", prod,
&dev->se_sub_dev->t10_wwn.unit_serial[0]); &dev->t10_wwn.unit_serial[0]);
} }
buf[off] = 0x2; /* ASCII */ buf[off] = 0x2; /* ASCII */
buf[off+1] = 0x1; /* T10 Vendor ID */ buf[off+1] = 0x1; /* T10 Vendor ID */
@ -298,10 +293,6 @@ check_t10_vend_desc:
* Get the PROTOCOL IDENTIFIER as defined by spc4r17 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
* section 7.5.1 Table 362 * section 7.5.1 Table 362
*/ */
if (dev->se_sub_dev->t10_alua.alua_type !=
SPC3_ALUA_EMULATED)
goto check_scsi_name;
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
if (!tg_pt_gp_mem) if (!tg_pt_gp_mem)
goto check_lu_gp; goto check_lu_gp;
@ -415,20 +406,22 @@ check_scsi_name:
} }
/* Extended INQUIRY Data VPD Page */ /* Extended INQUIRY Data VPD Page */
static int spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) static sense_reason_t
spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
{ {
buf[3] = 0x3c; buf[3] = 0x3c;
/* Set HEADSUP, ORDSUP, SIMPSUP */ /* Set HEADSUP, ORDSUP, SIMPSUP */
buf[5] = 0x07; buf[5] = 0x07;
/* If WriteCache emulation is enabled, set V_SUP */ /* If WriteCache emulation is enabled, set V_SUP */
if (cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) if (cmd->se_dev->dev_attrib.emulate_write_cache > 0)
buf[6] = 0x01; buf[6] = 0x01;
return 0; return 0;
} }
/* Block Limits VPD page */ /* Block Limits VPD page */
static int spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) static sense_reason_t
spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
u32 max_sectors; u32 max_sectors;
@ -439,7 +432,7 @@ static int spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
* emulate_tpu=1 or emulate_tpws=1 we will be expect a * emulate_tpu=1 or emulate_tpws=1 we will be expect a
* different page length for Thin Provisioning. * different page length for Thin Provisioning.
*/ */
if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
have_tp = 1; have_tp = 1;
buf[0] = dev->transport->get_device_type(dev); buf[0] = dev->transport->get_device_type(dev);
@ -456,62 +449,70 @@ static int spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/* /*
* Set MAXIMUM TRANSFER LENGTH * Set MAXIMUM TRANSFER LENGTH
*/ */
max_sectors = min(dev->se_sub_dev->se_dev_attrib.fabric_max_sectors, max_sectors = min(dev->dev_attrib.fabric_max_sectors,
dev->se_sub_dev->se_dev_attrib.hw_max_sectors); dev->dev_attrib.hw_max_sectors);
put_unaligned_be32(max_sectors, &buf[8]); put_unaligned_be32(max_sectors, &buf[8]);
/* /*
* Set OPTIMAL TRANSFER LENGTH * Set OPTIMAL TRANSFER LENGTH
*/ */
put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.optimal_sectors, &buf[12]); put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
/* /*
* Exit now if we don't support TP. * Exit now if we don't support TP.
*/ */
if (!have_tp) if (!have_tp)
return 0; goto max_write_same;
/* /*
* Set MAXIMUM UNMAP LBA COUNT * Set MAXIMUM UNMAP LBA COUNT
*/ */
put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count, &buf[20]); put_unaligned_be32(dev->dev_attrib.max_unmap_lba_count, &buf[20]);
/* /*
* Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
*/ */
put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count, put_unaligned_be32(dev->dev_attrib.max_unmap_block_desc_count,
&buf[24]); &buf[24]);
/* /*
* Set OPTIMAL UNMAP GRANULARITY * Set OPTIMAL UNMAP GRANULARITY
*/ */
put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity, &buf[28]); put_unaligned_be32(dev->dev_attrib.unmap_granularity, &buf[28]);
/* /*
* UNMAP GRANULARITY ALIGNMENT * UNMAP GRANULARITY ALIGNMENT
*/ */
put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment, put_unaligned_be32(dev->dev_attrib.unmap_granularity_alignment,
&buf[32]); &buf[32]);
if (dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment != 0) if (dev->dev_attrib.unmap_granularity_alignment != 0)
buf[32] |= 0x80; /* Set the UGAVALID bit */ buf[32] |= 0x80; /* Set the UGAVALID bit */
/*
* MAXIMUM WRITE SAME LENGTH
*/
max_write_same:
put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]);
return 0; return 0;
} }
/* Block Device Characteristics VPD page */ /* Block Device Characteristics VPD page */
static int spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf) static sense_reason_t
spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
buf[0] = dev->transport->get_device_type(dev); buf[0] = dev->transport->get_device_type(dev);
buf[3] = 0x3c; buf[3] = 0x3c;
buf[5] = dev->se_sub_dev->se_dev_attrib.is_nonrot ? 1 : 0; buf[5] = dev->dev_attrib.is_nonrot ? 1 : 0;
return 0; return 0;
} }
/* Thin Provisioning VPD */ /* Thin Provisioning VPD */
static int spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) static sense_reason_t
spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
@ -546,7 +547,7 @@ static int spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
* the UNMAP command (see 5.25). A TPU bit set to zero indicates * the UNMAP command (see 5.25). A TPU bit set to zero indicates
* that the device server does not support the UNMAP command. * that the device server does not support the UNMAP command.
*/ */
if (dev->se_sub_dev->se_dev_attrib.emulate_tpu != 0) if (dev->dev_attrib.emulate_tpu != 0)
buf[5] = 0x80; buf[5] = 0x80;
/* /*
@ -555,17 +556,18 @@ static int spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
* A TPWS bit set to zero indicates that the device server does not * A TPWS bit set to zero indicates that the device server does not
* support the use of the WRITE SAME (16) command to unmap LBAs. * support the use of the WRITE SAME (16) command to unmap LBAs.
*/ */
if (dev->se_sub_dev->se_dev_attrib.emulate_tpws != 0) if (dev->dev_attrib.emulate_tpws != 0)
buf[5] |= 0x40; buf[5] |= 0x40;
return 0; return 0;
} }
static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf); static sense_reason_t
spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
static struct { static struct {
uint8_t page; uint8_t page;
int (*emulate)(struct se_cmd *, unsigned char *); sense_reason_t (*emulate)(struct se_cmd *, unsigned char *);
} evpd_handlers[] = { } evpd_handlers[] = {
{ .page = 0x00, .emulate = spc_emulate_evpd_00 }, { .page = 0x00, .emulate = spc_emulate_evpd_00 },
{ .page = 0x80, .emulate = spc_emulate_evpd_80 }, { .page = 0x80, .emulate = spc_emulate_evpd_80 },
@ -577,7 +579,8 @@ static struct {
}; };
/* supported vital product data pages */ /* supported vital product data pages */
static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) static sense_reason_t
spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
{ {
int p; int p;
@ -586,8 +589,7 @@ static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
* Registered Extended LUN WWN has been set via ConfigFS * Registered Extended LUN WWN has been set via ConfigFS
* during device creation/restart. * during device creation/restart.
*/ */
if (cmd->se_dev->se_sub_dev->su_dev_flags & if (cmd->se_dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
SDF_EMULATED_VPD_UNIT_SERIAL) {
buf[3] = ARRAY_SIZE(evpd_handlers); buf[3] = ARRAY_SIZE(evpd_handlers);
for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p)
buf[p + 4] = evpd_handlers[p].page; buf[p + 4] = evpd_handlers[p].page;
@ -596,14 +598,16 @@ static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
return 0; return 0;
} }
static int spc_emulate_inquiry(struct se_cmd *cmd) static sense_reason_t
spc_emulate_inquiry(struct se_cmd *cmd)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg; struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
unsigned char *rbuf; unsigned char *rbuf;
unsigned char *cdb = cmd->t_task_cdb; unsigned char *cdb = cmd->t_task_cdb;
unsigned char buf[SE_INQUIRY_BUF]; unsigned char buf[SE_INQUIRY_BUF];
int p, ret; sense_reason_t ret;
int p;
memset(buf, 0, SE_INQUIRY_BUF); memset(buf, 0, SE_INQUIRY_BUF);
@ -616,8 +620,7 @@ static int spc_emulate_inquiry(struct se_cmd *cmd)
if (cdb[2]) { if (cdb[2]) {
pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n", pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n",
cdb[2]); cdb[2]);
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; ret = TCM_INVALID_CDB_FIELD;
ret = -EINVAL;
goto out; goto out;
} }
@ -634,33 +637,43 @@ static int spc_emulate_inquiry(struct se_cmd *cmd)
} }
pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]); pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; ret = TCM_INVALID_CDB_FIELD;
ret = -EINVAL;
out: out:
rbuf = transport_kmap_data_sg(cmd); rbuf = transport_kmap_data_sg(cmd);
if (rbuf) { if (!rbuf)
memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
transport_kunmap_data_sg(cmd);
} memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
transport_kunmap_data_sg(cmd);
if (!ret) if (!ret)
target_complete_cmd(cmd, GOOD); target_complete_cmd(cmd, GOOD);
return ret; return ret;
} }
static int spc_modesense_rwrecovery(unsigned char *p) static int spc_modesense_rwrecovery(struct se_device *dev, u8 pc, u8 *p)
{ {
p[0] = 0x01; p[0] = 0x01;
p[1] = 0x0a; p[1] = 0x0a;
/* No changeable values for now */
if (pc == 1)
goto out;
out:
return 12; return 12;
} }
static int spc_modesense_control(struct se_device *dev, unsigned char *p) static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p)
{ {
p[0] = 0x0a; p[0] = 0x0a;
p[1] = 0x0a; p[1] = 0x0a;
/* No changeable values for now */
if (pc == 1)
goto out;
p[2] = 2; p[2] = 2;
/* /*
* From spc4r23, 7.4.7 Control mode page * From spc4r23, 7.4.7 Control mode page
@ -690,7 +703,7 @@ static int spc_modesense_control(struct se_device *dev, unsigned char *p)
* command sequence order shall be explicitly handled by the application client * command sequence order shall be explicitly handled by the application client
* through the selection of appropriate ommands and task attributes. * through the selection of appropriate ommands and task attributes.
*/ */
p[3] = (dev->se_sub_dev->se_dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10; p[3] = (dev->dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
/* /*
* From spc4r17, section 7.4.6 Control mode Page * From spc4r17, section 7.4.6 Control mode Page
* *
@ -720,8 +733,8 @@ static int spc_modesense_control(struct se_device *dev, unsigned char *p)
* for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
* to the number of commands completed with one of those status codes. * to the number of commands completed with one of those status codes.
*/ */
p[4] = (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 : p[4] = (dev->dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
(dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00; (dev->dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
/* /*
* From spc4r17, section 7.4.6 Control mode Page * From spc4r17, section 7.4.6 Control mode Page
* *
@ -734,25 +747,56 @@ static int spc_modesense_control(struct se_device *dev, unsigned char *p)
* which the command was received shall be completed with TASK ABORTED * which the command was received shall be completed with TASK ABORTED
* status (see SAM-4). * status (see SAM-4).
*/ */
p[5] = (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? 0x40 : 0x00; p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00;
p[8] = 0xff; p[8] = 0xff;
p[9] = 0xff; p[9] = 0xff;
p[11] = 30; p[11] = 30;
out:
return 12; return 12;
} }
static int spc_modesense_caching(struct se_device *dev, unsigned char *p) static int spc_modesense_caching(struct se_device *dev, u8 pc, u8 *p)
{ {
p[0] = 0x08; p[0] = 0x08;
p[1] = 0x12; p[1] = 0x12;
if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
/* No changeable values for now */
if (pc == 1)
goto out;
if (dev->dev_attrib.emulate_write_cache > 0)
p[2] = 0x04; /* Write Cache Enable */ p[2] = 0x04; /* Write Cache Enable */
p[12] = 0x20; /* Disabled Read Ahead */ p[12] = 0x20; /* Disabled Read Ahead */
out:
return 20; return 20;
} }
static int spc_modesense_informational_exceptions(struct se_device *dev, u8 pc, unsigned char *p)
{
p[0] = 0x1c;
p[1] = 0x0a;
/* No changeable values for now */
if (pc == 1)
goto out;
out:
return 12;
}
static struct {
uint8_t page;
uint8_t subpage;
int (*emulate)(struct se_device *, u8, unsigned char *);
} modesense_handlers[] = {
{ .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery },
{ .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching },
{ .page = 0x0a, .subpage = 0x00, .emulate = spc_modesense_control },
{ .page = 0x1c, .subpage = 0x00, .emulate = spc_modesense_informational_exceptions },
};
static void spc_modesense_write_protect(unsigned char *buf, int type) static void spc_modesense_write_protect(unsigned char *buf, int type)
{ {
/* /*
@ -779,82 +823,224 @@ static void spc_modesense_dpofua(unsigned char *buf, int type)
} }
} }
static int spc_emulate_modesense(struct se_cmd *cmd) static int spc_modesense_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
{
*buf++ = 8;
put_unaligned_be32(min(blocks, 0xffffffffull), buf);
buf += 4;
put_unaligned_be32(block_size, buf);
return 9;
}
static int spc_modesense_long_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
{
if (blocks <= 0xffffffff)
return spc_modesense_blockdesc(buf + 3, blocks, block_size) + 3;
*buf++ = 1; /* LONGLBA */
buf += 2;
*buf++ = 16;
put_unaligned_be64(blocks, buf);
buf += 12;
put_unaligned_be32(block_size, buf);
return 17;
}
static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
char *cdb = cmd->t_task_cdb; char *cdb = cmd->t_task_cdb;
unsigned char *rbuf; unsigned char *buf, *map_buf;
int type = dev->transport->get_device_type(dev); int type = dev->transport->get_device_type(dev);
int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10); int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);
u32 offset = ten ? 8 : 4; bool dbd = !!(cdb[1] & 0x08);
bool llba = ten ? !!(cdb[1] & 0x10) : false;
u8 pc = cdb[2] >> 6;
u8 page = cdb[2] & 0x3f;
u8 subpage = cdb[3];
int length = 0; int length = 0;
unsigned char buf[SE_MODE_PAGE_BUF]; int ret;
int i;
memset(buf, 0, SE_MODE_PAGE_BUF); map_buf = transport_kmap_data_sg(cmd);
if (!map_buf)
switch (cdb[2] & 0x3f) { return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
case 0x01: /*
length = spc_modesense_rwrecovery(&buf[offset]); * If SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is not set, then we
break; * know we actually allocated a full page. Otherwise, if the
case 0x08: * data buffer is too small, allocate a temporary buffer so we
length = spc_modesense_caching(dev, &buf[offset]); * don't have to worry about overruns in all our INQUIRY
break; * emulation handling.
case 0x0a: */
length = spc_modesense_control(dev, &buf[offset]); if (cmd->data_length < SE_MODE_PAGE_BUF &&
break; (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
case 0x3f: buf = kzalloc(SE_MODE_PAGE_BUF, GFP_KERNEL);
length = spc_modesense_rwrecovery(&buf[offset]); if (!buf) {
length += spc_modesense_caching(dev, &buf[offset+length]); transport_kunmap_data_sg(cmd);
length += spc_modesense_control(dev, &buf[offset+length]); return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
break; }
default:
pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
cdb[2] & 0x3f, cdb[3]);
cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
return -EINVAL;
}
offset += length;
if (ten) {
offset -= 2;
buf[0] = (offset >> 8) & 0xff;
buf[1] = offset & 0xff;
offset += 2;
if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
(cmd->se_deve &&
(cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
spc_modesense_write_protect(&buf[3], type);
if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
(dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
spc_modesense_dpofua(&buf[3], type);
} else { } else {
offset -= 1; buf = map_buf;
buf[0] = offset & 0xff; }
offset += 1; /*
* Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for
* MODE_SENSE_10 and byte 2 for MODE_SENSE (6).
*/
length = ten ? 3 : 2;
if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || /* DEVICE-SPECIFIC PARAMETER */
(cmd->se_deve && if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
(cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) (cmd->se_deve &&
spc_modesense_write_protect(&buf[2], type); (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
spc_modesense_write_protect(&buf[length], type);
if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) && if ((dev->dev_attrib.emulate_write_cache > 0) &&
(dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0)) (dev->dev_attrib.emulate_fua_write > 0))
spc_modesense_dpofua(&buf[2], type); spc_modesense_dpofua(&buf[length], type);
++length;
/* BLOCK DESCRIPTOR */
/*
* For now we only include a block descriptor for disk (SBC)
* devices; other command sets use a slightly different format.
*/
if (!dbd && type == TYPE_DISK) {
u64 blocks = dev->transport->get_blocks(dev);
u32 block_size = dev->dev_attrib.block_size;
if (ten) {
if (llba) {
length += spc_modesense_long_blockdesc(&buf[length],
blocks, block_size);
} else {
length += 3;
length += spc_modesense_blockdesc(&buf[length],
blocks, block_size);
}
} else {
length += spc_modesense_blockdesc(&buf[length], blocks,
block_size);
}
} else {
if (ten)
length += 4;
else
length += 1;
} }
rbuf = transport_kmap_data_sg(cmd); if (page == 0x3f) {
if (rbuf) { if (subpage != 0x00 && subpage != 0xff) {
memcpy(rbuf, buf, min(offset, cmd->data_length)); pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage);
transport_kunmap_data_sg(cmd); kfree(buf);
transport_kunmap_data_sg(cmd);
return TCM_INVALID_CDB_FIELD;
}
for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) {
/*
* Tricky way to say all subpage 00h for
* subpage==0, all subpages for subpage==0xff
* (and we just checked above that those are
* the only two possibilities).
*/
if ((modesense_handlers[i].subpage & ~subpage) == 0) {
ret = modesense_handlers[i].emulate(dev, pc, &buf[length]);
if (!ten && length + ret >= 255)
break;
length += ret;
}
}
goto set_length;
} }
for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
if (modesense_handlers[i].page == page &&
modesense_handlers[i].subpage == subpage) {
length += modesense_handlers[i].emulate(dev, pc, &buf[length]);
goto set_length;
}
/*
* We don't intend to implement:
* - obsolete page 03h "format parameters" (checked by Solaris)
*/
if (page != 0x03)
pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
page, subpage);
transport_kunmap_data_sg(cmd);
return TCM_UNKNOWN_MODE_PAGE;
set_length:
if (ten)
put_unaligned_be16(length - 2, buf);
else
buf[0] = length - 1;
if (buf != map_buf) {
memcpy(map_buf, buf, cmd->data_length);
kfree(buf);
}
transport_kunmap_data_sg(cmd);
target_complete_cmd(cmd, GOOD); target_complete_cmd(cmd, GOOD);
return 0; return 0;
} }
static int spc_emulate_request_sense(struct se_cmd *cmd) static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
char *cdb = cmd->t_task_cdb;
bool ten = cdb[0] == MODE_SELECT_10;
int off = ten ? 8 : 4;
bool pf = !!(cdb[1] & 0x10);
u8 page, subpage;
unsigned char *buf;
unsigned char tbuf[SE_MODE_PAGE_BUF];
int length;
int ret = 0;
int i;
buf = transport_kmap_data_sg(cmd);
if (!buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
if (!pf) {
ret = TCM_INVALID_CDB_FIELD;
goto out;
}
page = buf[off] & 0x3f;
subpage = buf[off] & 0x40 ? buf[off + 1] : 0;
for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
if (modesense_handlers[i].page == page &&
modesense_handlers[i].subpage == subpage) {
memset(tbuf, 0, SE_MODE_PAGE_BUF);
length = modesense_handlers[i].emulate(dev, 0, tbuf);
goto check_contents;
}
ret = TCM_UNKNOWN_MODE_PAGE;
goto out;
check_contents:
if (memcmp(buf + off, tbuf, length))
ret = TCM_INVALID_PARAMETER_LIST;
out:
transport_kunmap_data_sg(cmd);
if (!ret)
target_complete_cmd(cmd, GOOD);
return ret;
}
static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd)
{ {
unsigned char *cdb = cmd->t_task_cdb; unsigned char *cdb = cmd->t_task_cdb;
unsigned char *rbuf; unsigned char *rbuf;
@ -866,19 +1052,14 @@ static int spc_emulate_request_sense(struct se_cmd *cmd)
if (cdb[1] & 0x01) { if (cdb[1] & 0x01) {
pr_err("REQUEST_SENSE description emulation not" pr_err("REQUEST_SENSE description emulation not"
" supported\n"); " supported\n");
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; return TCM_INVALID_CDB_FIELD;
return -ENOSYS;
} }
rbuf = transport_kmap_data_sg(cmd); rbuf = transport_kmap_data_sg(cmd);
if (cmd->scsi_sense_reason != 0) { if (!rbuf)
/* return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
* Out of memory. We will fail with CHECK CONDITION, so
* we must not clear the unit attention condition. if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
*/
target_complete_cmd(cmd, CHECK_CONDITION);
return 0;
} else if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
/* /*
* CURRENT ERROR, UNIT ATTENTION * CURRENT ERROR, UNIT ATTENTION
*/ */
@ -905,33 +1086,97 @@ static int spc_emulate_request_sense(struct se_cmd *cmd)
buf[7] = 0x0A; buf[7] = 0x0A;
} }
if (rbuf) { memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); transport_kunmap_data_sg(cmd);
transport_kunmap_data_sg(cmd);
target_complete_cmd(cmd, GOOD);
return 0;
}
sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
{
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
unsigned char *buf;
u32 lun_count = 0, offset = 8, i;
if (cmd->data_length < 16) {
pr_warn("REPORT LUNS allocation length %u too small\n",
cmd->data_length);
return TCM_INVALID_CDB_FIELD;
} }
buf = transport_kmap_data_sg(cmd);
if (!buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
/*
* If no struct se_session pointer is present, this struct se_cmd is
* coming via a target_core_mod PASSTHROUGH op, and not through
* a $FABRIC_MOD. In that case, report LUN=0 only.
*/
if (!sess) {
int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
lun_count = 1;
goto done;
}
spin_lock_irq(&sess->se_node_acl->device_list_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
deve = sess->se_node_acl->device_list[i];
if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
continue;
/*
* We determine the correct LUN LIST LENGTH even once we
* have reached the initial allocation length.
* See SPC2-R20 7.19.
*/
lun_count++;
if ((offset + 8) > cmd->data_length)
continue;
int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
offset += 8;
}
spin_unlock_irq(&sess->se_node_acl->device_list_lock);
/*
* See SPC3 r07, page 159.
*/
done:
lun_count *= 8;
buf[0] = ((lun_count >> 24) & 0xff);
buf[1] = ((lun_count >> 16) & 0xff);
buf[2] = ((lun_count >> 8) & 0xff);
buf[3] = (lun_count & 0xff);
transport_kunmap_data_sg(cmd);
target_complete_cmd(cmd, GOOD); target_complete_cmd(cmd, GOOD);
return 0; return 0;
} }
EXPORT_SYMBOL(spc_emulate_report_luns);
static int spc_emulate_testunitready(struct se_cmd *cmd) static sense_reason_t
spc_emulate_testunitready(struct se_cmd *cmd)
{ {
target_complete_cmd(cmd, GOOD); target_complete_cmd(cmd, GOOD);
return 0; return 0;
} }
int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) sense_reason_t
spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
unsigned char *cdb = cmd->t_task_cdb; unsigned char *cdb = cmd->t_task_cdb;
switch (cdb[0]) { switch (cdb[0]) {
case MODE_SELECT: case MODE_SELECT:
*size = cdb[4]; *size = cdb[4];
cmd->execute_cmd = spc_emulate_modeselect;
break; break;
case MODE_SELECT_10: case MODE_SELECT_10:
*size = (cdb[7] << 8) + cdb[8]; *size = (cdb[7] << 8) + cdb[8];
cmd->execute_cmd = spc_emulate_modeselect;
break; break;
case MODE_SENSE: case MODE_SENSE:
*size = cdb[4]; *size = cdb[4];
@ -946,14 +1191,12 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
*size = (cdb[7] << 8) + cdb[8]; *size = (cdb[7] << 8) + cdb[8];
break; break;
case PERSISTENT_RESERVE_IN: case PERSISTENT_RESERVE_IN:
if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
cmd->execute_cmd = target_scsi3_emulate_pr_in;
*size = (cdb[7] << 8) + cdb[8]; *size = (cdb[7] << 8) + cdb[8];
cmd->execute_cmd = target_scsi3_emulate_pr_in;
break; break;
case PERSISTENT_RESERVE_OUT: case PERSISTENT_RESERVE_OUT:
if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
cmd->execute_cmd = target_scsi3_emulate_pr_out;
*size = (cdb[7] << 8) + cdb[8]; *size = (cdb[7] << 8) + cdb[8];
cmd->execute_cmd = target_scsi3_emulate_pr_out;
break; break;
case RELEASE: case RELEASE:
case RELEASE_10: case RELEASE_10:
@ -962,8 +1205,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
else else
*size = cmd->data_length; *size = cmd->data_length;
if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) cmd->execute_cmd = target_scsi2_reservation_release;
cmd->execute_cmd = target_scsi2_reservation_release;
break; break;
case RESERVE: case RESERVE:
case RESERVE_10: case RESERVE_10:
@ -976,15 +1218,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
else else
*size = cmd->data_length; *size = cmd->data_length;
/* cmd->execute_cmd = target_scsi2_reservation_reserve;
* Setup the legacy emulated handler for SPC-2 and
* >= SPC-3 compatible reservation handling (CRH=1)
* Otherwise, we assume the underlying SCSI logic is
* is running in SPC_PASSTHROUGH, and wants reservations
* emulation disabled.
*/
if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
cmd->execute_cmd = target_scsi2_reservation_reserve;
break; break;
case REQUEST_SENSE: case REQUEST_SENSE:
*size = cdb[4]; *size = cdb[4];
@ -997,8 +1231,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
* Do implict HEAD_OF_QUEUE processing for INQUIRY. * Do implict HEAD_OF_QUEUE processing for INQUIRY.
* See spc4r17 section 5.3 * See spc4r17 section 5.3
*/ */
if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) cmd->sam_task_attr = MSG_HEAD_TAG;
cmd->sam_task_attr = MSG_HEAD_TAG;
cmd->execute_cmd = spc_emulate_inquiry; cmd->execute_cmd = spc_emulate_inquiry;
break; break;
case SECURITY_PROTOCOL_IN: case SECURITY_PROTOCOL_IN:
@ -1020,14 +1253,13 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
*size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; *size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
break; break;
case REPORT_LUNS: case REPORT_LUNS:
cmd->execute_cmd = target_report_luns; cmd->execute_cmd = spc_emulate_report_luns;
*size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
/* /*
* Do implict HEAD_OF_QUEUE processing for REPORT_LUNS * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
* See spc4r17 section 5.3 * See spc4r17 section 5.3
*/ */
if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) cmd->sam_task_attr = MSG_HEAD_TAG;
cmd->sam_task_attr = MSG_HEAD_TAG;
break; break;
case TEST_UNIT_READY: case TEST_UNIT_READY:
cmd->execute_cmd = spc_emulate_testunitready; cmd->execute_cmd = spc_emulate_testunitready;
@ -1039,8 +1271,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
* MAINTENANCE_IN from SCC-2 * MAINTENANCE_IN from SCC-2
* Check for emulated MI_REPORT_TARGET_PGS * Check for emulated MI_REPORT_TARGET_PGS
*/ */
if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS && if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS) {
su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
cmd->execute_cmd = cmd->execute_cmd =
target_emulate_report_target_port_groups; target_emulate_report_target_port_groups;
} }
@ -1058,8 +1289,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
* MAINTENANCE_OUT from SCC-2 * MAINTENANCE_OUT from SCC-2
* Check for emulated MO_SET_TARGET_PGS. * Check for emulated MO_SET_TARGET_PGS.
*/ */
if (cdb[1] == MO_SET_TARGET_PGS && if (cdb[1] == MO_SET_TARGET_PGS) {
su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
cmd->execute_cmd = cmd->execute_cmd =
target_emulate_set_target_port_groups; target_emulate_set_target_port_groups;
} }
@ -1075,9 +1305,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode" pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
" 0x%02x, sending CHECK_CONDITION.\n", " 0x%02x, sending CHECK_CONDITION.\n",
cmd->se_tfo->get_fabric_name(), cdb[0]); cmd->se_tfo->get_fabric_name(), cdb[0]);
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; return TCM_UNSUPPORTED_SCSI_OPCODE;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
return -EINVAL;
} }
return 0; return 0;

View file

@ -1,13 +1,10 @@
/******************************************************************************* /*******************************************************************************
* Filename: target_core_stat.c * Filename: target_core_stat.c
* *
* Copyright (c) 2011 Rising Tide Systems
* Copyright (c) 2011 Linux-iSCSI.org
*
* Modern ConfigFS group context specific statistics based on original * Modern ConfigFS group context specific statistics based on original
* target_core_mib.c code * target_core_mib.c code
* *
* Copyright (c) 2006-2007 SBE, Inc. All Rights Reserved. * (c) Copyright 2006-2012 RisingTide Systems LLC.
* *
* Nicholas A. Bellinger <nab@linux-iscsi.org> * Nicholas A. Bellinger <nab@linux-iscsi.org>
* *
@ -80,13 +77,9 @@ static struct target_stat_scsi_dev_attribute \
static ssize_t target_stat_scsi_dev_show_attr_inst( static ssize_t target_stat_scsi_dev_show_attr_inst(
struct se_dev_stat_grps *sgrps, char *page) struct se_dev_stat_grps *sgrps, char *page)
{ {
struct se_subsystem_dev *se_subdev = container_of(sgrps, struct se_device *dev =
struct se_subsystem_dev, dev_stat_grps); container_of(sgrps, struct se_device, dev_stat_grps);
struct se_hba *hba = se_subdev->se_dev_hba; struct se_hba *hba = dev->se_hba;
struct se_device *dev = se_subdev->se_dev_ptr;
if (!dev)
return -ENODEV;
return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
} }
@ -95,12 +88,8 @@ DEV_STAT_SCSI_DEV_ATTR_RO(inst);
static ssize_t target_stat_scsi_dev_show_attr_indx( static ssize_t target_stat_scsi_dev_show_attr_indx(
struct se_dev_stat_grps *sgrps, char *page) struct se_dev_stat_grps *sgrps, char *page)
{ {
struct se_subsystem_dev *se_subdev = container_of(sgrps, struct se_device *dev =
struct se_subsystem_dev, dev_stat_grps); container_of(sgrps, struct se_device, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
if (!dev)
return -ENODEV;
return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
} }
@ -109,13 +98,6 @@ DEV_STAT_SCSI_DEV_ATTR_RO(indx);
static ssize_t target_stat_scsi_dev_show_attr_role( static ssize_t target_stat_scsi_dev_show_attr_role(
struct se_dev_stat_grps *sgrps, char *page) struct se_dev_stat_grps *sgrps, char *page)
{ {
struct se_subsystem_dev *se_subdev = container_of(sgrps,
struct se_subsystem_dev, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
if (!dev)
return -ENODEV;
return snprintf(page, PAGE_SIZE, "Target\n"); return snprintf(page, PAGE_SIZE, "Target\n");
} }
DEV_STAT_SCSI_DEV_ATTR_RO(role); DEV_STAT_SCSI_DEV_ATTR_RO(role);
@ -123,12 +105,8 @@ DEV_STAT_SCSI_DEV_ATTR_RO(role);
static ssize_t target_stat_scsi_dev_show_attr_ports( static ssize_t target_stat_scsi_dev_show_attr_ports(
struct se_dev_stat_grps *sgrps, char *page) struct se_dev_stat_grps *sgrps, char *page)
{ {
struct se_subsystem_dev *se_subdev = container_of(sgrps, struct se_device *dev =
struct se_subsystem_dev, dev_stat_grps); container_of(sgrps, struct se_device, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
if (!dev)
return -ENODEV;
return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_port_count); return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_port_count);
} }
@ -176,13 +154,9 @@ static struct target_stat_scsi_tgt_dev_attribute \
static ssize_t target_stat_scsi_tgt_dev_show_attr_inst( static ssize_t target_stat_scsi_tgt_dev_show_attr_inst(
struct se_dev_stat_grps *sgrps, char *page) struct se_dev_stat_grps *sgrps, char *page)
{ {
struct se_subsystem_dev *se_subdev = container_of(sgrps, struct se_device *dev =
struct se_subsystem_dev, dev_stat_grps); container_of(sgrps, struct se_device, dev_stat_grps);
struct se_hba *hba = se_subdev->se_dev_hba; struct se_hba *hba = dev->se_hba;
struct se_device *dev = se_subdev->se_dev_ptr;
if (!dev)
return -ENODEV;
return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
} }
@ -191,12 +165,8 @@ DEV_STAT_SCSI_TGT_DEV_ATTR_RO(inst);
static ssize_t target_stat_scsi_tgt_dev_show_attr_indx( static ssize_t target_stat_scsi_tgt_dev_show_attr_indx(
struct se_dev_stat_grps *sgrps, char *page) struct se_dev_stat_grps *sgrps, char *page)
{ {
struct se_subsystem_dev *se_subdev = container_of(sgrps, struct se_device *dev =
struct se_subsystem_dev, dev_stat_grps); container_of(sgrps, struct se_device, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
if (!dev)
return -ENODEV;
return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
} }
@ -205,13 +175,6 @@ DEV_STAT_SCSI_TGT_DEV_ATTR_RO(indx);
static ssize_t target_stat_scsi_tgt_dev_show_attr_num_lus( static ssize_t target_stat_scsi_tgt_dev_show_attr_num_lus(
struct se_dev_stat_grps *sgrps, char *page) struct se_dev_stat_grps *sgrps, char *page)
{ {
struct se_subsystem_dev *se_subdev = container_of(sgrps,
struct se_subsystem_dev, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
if (!dev)
return -ENODEV;
return snprintf(page, PAGE_SIZE, "%u\n", LU_COUNT); return snprintf(page, PAGE_SIZE, "%u\n", LU_COUNT);
} }
DEV_STAT_SCSI_TGT_DEV_ATTR_RO(num_lus); DEV_STAT_SCSI_TGT_DEV_ATTR_RO(num_lus);
@ -219,60 +182,27 @@ DEV_STAT_SCSI_TGT_DEV_ATTR_RO(num_lus);
static ssize_t target_stat_scsi_tgt_dev_show_attr_status( static ssize_t target_stat_scsi_tgt_dev_show_attr_status(
struct se_dev_stat_grps *sgrps, char *page) struct se_dev_stat_grps *sgrps, char *page)
{ {
struct se_subsystem_dev *se_subdev = container_of(sgrps, struct se_device *dev =
struct se_subsystem_dev, dev_stat_grps); container_of(sgrps, struct se_device, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
char status[16];
if (!dev) if (dev->export_count)
return -ENODEV; return snprintf(page, PAGE_SIZE, "activated");
else
switch (dev->dev_status) { return snprintf(page, PAGE_SIZE, "deactivated");
case TRANSPORT_DEVICE_ACTIVATED:
strcpy(status, "activated");
break;
case TRANSPORT_DEVICE_DEACTIVATED:
strcpy(status, "deactivated");
break;
case TRANSPORT_DEVICE_SHUTDOWN:
strcpy(status, "shutdown");
break;
case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
strcpy(status, "offline");
break;
default:
sprintf(status, "unknown(%d)", dev->dev_status);
break;
}
return snprintf(page, PAGE_SIZE, "%s\n", status);
} }
DEV_STAT_SCSI_TGT_DEV_ATTR_RO(status); DEV_STAT_SCSI_TGT_DEV_ATTR_RO(status);
static ssize_t target_stat_scsi_tgt_dev_show_attr_non_access_lus( static ssize_t target_stat_scsi_tgt_dev_show_attr_non_access_lus(
struct se_dev_stat_grps *sgrps, char *page) struct se_dev_stat_grps *sgrps, char *page)
{ {
struct se_subsystem_dev *se_subdev = container_of(sgrps, struct se_device *dev =
struct se_subsystem_dev, dev_stat_grps); container_of(sgrps, struct se_device, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
int non_accessible_lus; int non_accessible_lus;
if (!dev) if (dev->export_count)
return -ENODEV;
switch (dev->dev_status) {
case TRANSPORT_DEVICE_ACTIVATED:
non_accessible_lus = 0; non_accessible_lus = 0;
break; else
case TRANSPORT_DEVICE_DEACTIVATED:
case TRANSPORT_DEVICE_SHUTDOWN:
case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
default:
non_accessible_lus = 1; non_accessible_lus = 1;
break;
}
return snprintf(page, PAGE_SIZE, "%u\n", non_accessible_lus); return snprintf(page, PAGE_SIZE, "%u\n", non_accessible_lus);
} }
@ -281,12 +211,8 @@ DEV_STAT_SCSI_TGT_DEV_ATTR_RO(non_access_lus);
static ssize_t target_stat_scsi_tgt_dev_show_attr_resets( static ssize_t target_stat_scsi_tgt_dev_show_attr_resets(
struct se_dev_stat_grps *sgrps, char *page) struct se_dev_stat_grps *sgrps, char *page)
{ {
struct se_subsystem_dev *se_subdev = container_of(sgrps, struct se_device *dev =
struct se_subsystem_dev, dev_stat_grps); container_of(sgrps, struct se_device, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
if (!dev)
return -ENODEV;
return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets); return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
} }
@ -335,13 +261,9 @@ static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_##_name = \
static ssize_t target_stat_scsi_lu_show_attr_inst( static ssize_t target_stat_scsi_lu_show_attr_inst(
struct se_dev_stat_grps *sgrps, char *page) struct se_dev_stat_grps *sgrps, char *page)
{ {
struct se_subsystem_dev *se_subdev = container_of(sgrps, struct se_device *dev =
struct se_subsystem_dev, dev_stat_grps); container_of(sgrps, struct se_device, dev_stat_grps);
struct se_hba *hba = se_subdev->se_dev_hba; struct se_hba *hba = dev->se_hba;
struct se_device *dev = se_subdev->se_dev_ptr;
if (!dev)
return -ENODEV;
return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
} }
@ -350,12 +272,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(inst);
static ssize_t target_stat_scsi_lu_show_attr_dev( static ssize_t target_stat_scsi_lu_show_attr_dev(
struct se_dev_stat_grps *sgrps, char *page) struct se_dev_stat_grps *sgrps, char *page)
{ {
struct se_subsystem_dev *se_subdev = container_of(sgrps, struct se_device *dev =
struct se_subsystem_dev, dev_stat_grps); container_of(sgrps, struct se_device, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
if (!dev)
return -ENODEV;
return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
} }
@ -364,13 +282,6 @@ DEV_STAT_SCSI_LU_ATTR_RO(dev);
static ssize_t target_stat_scsi_lu_show_attr_indx( static ssize_t target_stat_scsi_lu_show_attr_indx(
struct se_dev_stat_grps *sgrps, char *page) struct se_dev_stat_grps *sgrps, char *page)
{ {
struct se_subsystem_dev *se_subdev = container_of(sgrps,
struct se_subsystem_dev, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
if (!dev)
return -ENODEV;
return snprintf(page, PAGE_SIZE, "%u\n", SCSI_LU_INDEX); return snprintf(page, PAGE_SIZE, "%u\n", SCSI_LU_INDEX);
} }
DEV_STAT_SCSI_LU_ATTR_RO(indx); DEV_STAT_SCSI_LU_ATTR_RO(indx);
@ -378,12 +289,6 @@ DEV_STAT_SCSI_LU_ATTR_RO(indx);
static ssize_t target_stat_scsi_lu_show_attr_lun( static ssize_t target_stat_scsi_lu_show_attr_lun(
struct se_dev_stat_grps *sgrps, char *page) struct se_dev_stat_grps *sgrps, char *page)
{ {
struct se_subsystem_dev *se_subdev = container_of(sgrps,
struct se_subsystem_dev, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
if (!dev)
return -ENODEV;
/* FIXME: scsiLuDefaultLun */ /* FIXME: scsiLuDefaultLun */
return snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)0); return snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)0);
} }
@ -392,35 +297,28 @@ DEV_STAT_SCSI_LU_ATTR_RO(lun);
static ssize_t target_stat_scsi_lu_show_attr_lu_name( static ssize_t target_stat_scsi_lu_show_attr_lu_name(
struct se_dev_stat_grps *sgrps, char *page) struct se_dev_stat_grps *sgrps, char *page)
{ {
struct se_subsystem_dev *se_subdev = container_of(sgrps, struct se_device *dev =
struct se_subsystem_dev, dev_stat_grps); container_of(sgrps, struct se_device, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
if (!dev)
return -ENODEV;
/* scsiLuWwnName */ /* scsiLuWwnName */
return snprintf(page, PAGE_SIZE, "%s\n", return snprintf(page, PAGE_SIZE, "%s\n",
(strlen(dev->se_sub_dev->t10_wwn.unit_serial)) ? (strlen(dev->t10_wwn.unit_serial)) ?
dev->se_sub_dev->t10_wwn.unit_serial : "None"); dev->t10_wwn.unit_serial : "None");
} }
DEV_STAT_SCSI_LU_ATTR_RO(lu_name); DEV_STAT_SCSI_LU_ATTR_RO(lu_name);
static ssize_t target_stat_scsi_lu_show_attr_vend( static ssize_t target_stat_scsi_lu_show_attr_vend(
struct se_dev_stat_grps *sgrps, char *page) struct se_dev_stat_grps *sgrps, char *page)
{ {
struct se_subsystem_dev *se_subdev = container_of(sgrps, struct se_device *dev =
struct se_subsystem_dev, dev_stat_grps); container_of(sgrps, struct se_device, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
int i; int i;
char str[sizeof(dev->se_sub_dev->t10_wwn.vendor)+1]; char str[sizeof(dev->t10_wwn.vendor)+1];
if (!dev)
return -ENODEV;
/* scsiLuVendorId */ /* scsiLuVendorId */
for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++) for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++)
str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.vendor[i]) ? str[i] = ISPRINT(dev->t10_wwn.vendor[i]) ?
dev->se_sub_dev->t10_wwn.vendor[i] : ' '; dev->t10_wwn.vendor[i] : ' ';
str[i] = '\0'; str[i] = '\0';
return snprintf(page, PAGE_SIZE, "%s\n", str); return snprintf(page, PAGE_SIZE, "%s\n", str);
} }
@ -429,19 +327,15 @@ DEV_STAT_SCSI_LU_ATTR_RO(vend);
static ssize_t target_stat_scsi_lu_show_attr_prod( static ssize_t target_stat_scsi_lu_show_attr_prod(
struct se_dev_stat_grps *sgrps, char *page) struct se_dev_stat_grps *sgrps, char *page)
{ {
struct se_subsystem_dev *se_subdev = container_of(sgrps, struct se_device *dev =
struct se_subsystem_dev, dev_stat_grps); container_of(sgrps, struct se_device, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
int i; int i;
char str[sizeof(dev->se_sub_dev->t10_wwn.model)+1]; char str[sizeof(dev->t10_wwn.model)+1];
if (!dev)
return -ENODEV;
/* scsiLuProductId */ /* scsiLuProductId */
for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++) for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++)
str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.model[i]) ? str[i] = ISPRINT(dev->t10_wwn.model[i]) ?
dev->se_sub_dev->t10_wwn.model[i] : ' '; dev->t10_wwn.model[i] : ' ';
str[i] = '\0'; str[i] = '\0';
return snprintf(page, PAGE_SIZE, "%s\n", str); return snprintf(page, PAGE_SIZE, "%s\n", str);
} }
@ -450,19 +344,15 @@ DEV_STAT_SCSI_LU_ATTR_RO(prod);
static ssize_t target_stat_scsi_lu_show_attr_rev( static ssize_t target_stat_scsi_lu_show_attr_rev(
struct se_dev_stat_grps *sgrps, char *page) struct se_dev_stat_grps *sgrps, char *page)
{ {
struct se_subsystem_dev *se_subdev = container_of(sgrps, struct se_device *dev =
struct se_subsystem_dev, dev_stat_grps); container_of(sgrps, struct se_device, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
int i; int i;
char str[sizeof(dev->se_sub_dev->t10_wwn.revision)+1]; char str[sizeof(dev->t10_wwn.revision)+1];
if (!dev)
return -ENODEV;
/* scsiLuRevisionId */ /* scsiLuRevisionId */
for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.revision); i++) for (i = 0; i < sizeof(dev->t10_wwn.revision); i++)
str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.revision[i]) ? str[i] = ISPRINT(dev->t10_wwn.revision[i]) ?
dev->se_sub_dev->t10_wwn.revision[i] : ' '; dev->t10_wwn.revision[i] : ' ';
str[i] = '\0'; str[i] = '\0';
return snprintf(page, PAGE_SIZE, "%s\n", str); return snprintf(page, PAGE_SIZE, "%s\n", str);
} }
@ -471,12 +361,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(rev);
static ssize_t target_stat_scsi_lu_show_attr_dev_type( static ssize_t target_stat_scsi_lu_show_attr_dev_type(
struct se_dev_stat_grps *sgrps, char *page) struct se_dev_stat_grps *sgrps, char *page)
{ {
struct se_subsystem_dev *se_subdev = container_of(sgrps, struct se_device *dev =
struct se_subsystem_dev, dev_stat_grps); container_of(sgrps, struct se_device, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
if (!dev)
return -ENODEV;
/* scsiLuPeripheralType */ /* scsiLuPeripheralType */
return snprintf(page, PAGE_SIZE, "%u\n", return snprintf(page, PAGE_SIZE, "%u\n",
@ -487,30 +373,18 @@ DEV_STAT_SCSI_LU_ATTR_RO(dev_type);
static ssize_t target_stat_scsi_lu_show_attr_status( static ssize_t target_stat_scsi_lu_show_attr_status(
struct se_dev_stat_grps *sgrps, char *page) struct se_dev_stat_grps *sgrps, char *page)
{ {
struct se_subsystem_dev *se_subdev = container_of(sgrps, struct se_device *dev =
struct se_subsystem_dev, dev_stat_grps); container_of(sgrps, struct se_device, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
if (!dev)
return -ENODEV;
/* scsiLuStatus */ /* scsiLuStatus */
return snprintf(page, PAGE_SIZE, "%s\n", return snprintf(page, PAGE_SIZE, "%s\n",
(dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ? (dev->export_count) ? "available" : "notavailable");
"available" : "notavailable");
} }
DEV_STAT_SCSI_LU_ATTR_RO(status); DEV_STAT_SCSI_LU_ATTR_RO(status);
static ssize_t target_stat_scsi_lu_show_attr_state_bit( static ssize_t target_stat_scsi_lu_show_attr_state_bit(
struct se_dev_stat_grps *sgrps, char *page) struct se_dev_stat_grps *sgrps, char *page)
{ {
struct se_subsystem_dev *se_subdev = container_of(sgrps,
struct se_subsystem_dev, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
if (!dev)
return -ENODEV;
/* scsiLuState */ /* scsiLuState */
return snprintf(page, PAGE_SIZE, "exposed\n"); return snprintf(page, PAGE_SIZE, "exposed\n");
} }
@ -519,12 +393,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(state_bit);
static ssize_t target_stat_scsi_lu_show_attr_num_cmds( static ssize_t target_stat_scsi_lu_show_attr_num_cmds(
struct se_dev_stat_grps *sgrps, char *page) struct se_dev_stat_grps *sgrps, char *page)
{ {
struct se_subsystem_dev *se_subdev = container_of(sgrps, struct se_device *dev =
struct se_subsystem_dev, dev_stat_grps); container_of(sgrps, struct se_device, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
if (!dev)
return -ENODEV;
/* scsiLuNumCommands */ /* scsiLuNumCommands */
return snprintf(page, PAGE_SIZE, "%llu\n", return snprintf(page, PAGE_SIZE, "%llu\n",
@ -535,12 +405,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(num_cmds);
static ssize_t target_stat_scsi_lu_show_attr_read_mbytes( static ssize_t target_stat_scsi_lu_show_attr_read_mbytes(
struct se_dev_stat_grps *sgrps, char *page) struct se_dev_stat_grps *sgrps, char *page)
{ {
struct se_subsystem_dev *se_subdev = container_of(sgrps, struct se_device *dev =
struct se_subsystem_dev, dev_stat_grps); container_of(sgrps, struct se_device, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
if (!dev)
return -ENODEV;
/* scsiLuReadMegaBytes */ /* scsiLuReadMegaBytes */
return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->read_bytes >> 20)); return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->read_bytes >> 20));
@ -550,12 +416,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(read_mbytes);
static ssize_t target_stat_scsi_lu_show_attr_write_mbytes( static ssize_t target_stat_scsi_lu_show_attr_write_mbytes(
struct se_dev_stat_grps *sgrps, char *page) struct se_dev_stat_grps *sgrps, char *page)
{ {
struct se_subsystem_dev *se_subdev = container_of(sgrps, struct se_device *dev =
struct se_subsystem_dev, dev_stat_grps); container_of(sgrps, struct se_device, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
if (!dev)
return -ENODEV;
/* scsiLuWrittenMegaBytes */ /* scsiLuWrittenMegaBytes */
return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->write_bytes >> 20)); return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->write_bytes >> 20));
@ -565,12 +427,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(write_mbytes);
static ssize_t target_stat_scsi_lu_show_attr_resets( static ssize_t target_stat_scsi_lu_show_attr_resets(
struct se_dev_stat_grps *sgrps, char *page) struct se_dev_stat_grps *sgrps, char *page)
{ {
struct se_subsystem_dev *se_subdev = container_of(sgrps, struct se_device *dev =
struct se_subsystem_dev, dev_stat_grps); container_of(sgrps, struct se_device, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
if (!dev)
return -ENODEV;
/* scsiLuInResets */ /* scsiLuInResets */
return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets); return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
@ -580,13 +438,6 @@ DEV_STAT_SCSI_LU_ATTR_RO(resets);
static ssize_t target_stat_scsi_lu_show_attr_full_stat( static ssize_t target_stat_scsi_lu_show_attr_full_stat(
struct se_dev_stat_grps *sgrps, char *page) struct se_dev_stat_grps *sgrps, char *page)
{ {
struct se_subsystem_dev *se_subdev = container_of(sgrps,
struct se_subsystem_dev, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
if (!dev)
return -ENODEV;
/* FIXME: scsiLuOutTaskSetFullStatus */ /* FIXME: scsiLuOutTaskSetFullStatus */
return snprintf(page, PAGE_SIZE, "%u\n", 0); return snprintf(page, PAGE_SIZE, "%u\n", 0);
} }
@ -595,13 +446,6 @@ DEV_STAT_SCSI_LU_ATTR_RO(full_stat);
static ssize_t target_stat_scsi_lu_show_attr_hs_num_cmds( static ssize_t target_stat_scsi_lu_show_attr_hs_num_cmds(
struct se_dev_stat_grps *sgrps, char *page) struct se_dev_stat_grps *sgrps, char *page)
{ {
struct se_subsystem_dev *se_subdev = container_of(sgrps,
struct se_subsystem_dev, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
if (!dev)
return -ENODEV;
/* FIXME: scsiLuHSInCommands */ /* FIXME: scsiLuHSInCommands */
return snprintf(page, PAGE_SIZE, "%u\n", 0); return snprintf(page, PAGE_SIZE, "%u\n", 0);
} }
@ -610,12 +454,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(hs_num_cmds);
static ssize_t target_stat_scsi_lu_show_attr_creation_time( static ssize_t target_stat_scsi_lu_show_attr_creation_time(
struct se_dev_stat_grps *sgrps, char *page) struct se_dev_stat_grps *sgrps, char *page)
{ {
struct se_subsystem_dev *se_subdev = container_of(sgrps, struct se_device *dev =
struct se_subsystem_dev, dev_stat_grps); container_of(sgrps, struct se_device, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
if (!dev)
return -ENODEV;
/* scsiLuCreationTime */ /* scsiLuCreationTime */
return snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)dev->creation_time - return snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)dev->creation_time -
@ -662,20 +502,20 @@ static struct config_item_type target_stat_scsi_lu_cit = {
* Called from target_core_configfs.c:target_core_make_subdev() to setup * Called from target_core_configfs.c:target_core_make_subdev() to setup
* the target statistics groups + configfs CITs located in target_core_stat.c * the target statistics groups + configfs CITs located in target_core_stat.c
*/ */
void target_stat_setup_dev_default_groups(struct se_subsystem_dev *se_subdev) void target_stat_setup_dev_default_groups(struct se_device *dev)
{ {
struct config_group *dev_stat_grp = &se_subdev->dev_stat_grps.stat_group; struct config_group *dev_stat_grp = &dev->dev_stat_grps.stat_group;
config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_dev_group, config_group_init_type_name(&dev->dev_stat_grps.scsi_dev_group,
"scsi_dev", &target_stat_scsi_dev_cit); "scsi_dev", &target_stat_scsi_dev_cit);
config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_tgt_dev_group, config_group_init_type_name(&dev->dev_stat_grps.scsi_tgt_dev_group,
"scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit); "scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit);
config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_lu_group, config_group_init_type_name(&dev->dev_stat_grps.scsi_lu_group,
"scsi_lu", &target_stat_scsi_lu_cit); "scsi_lu", &target_stat_scsi_lu_cit);
dev_stat_grp->default_groups[0] = &se_subdev->dev_stat_grps.scsi_dev_group; dev_stat_grp->default_groups[0] = &dev->dev_stat_grps.scsi_dev_group;
dev_stat_grp->default_groups[1] = &se_subdev->dev_stat_grps.scsi_tgt_dev_group; dev_stat_grp->default_groups[1] = &dev->dev_stat_grps.scsi_tgt_dev_group;
dev_stat_grp->default_groups[2] = &se_subdev->dev_stat_grps.scsi_lu_group; dev_stat_grp->default_groups[2] = &dev->dev_stat_grps.scsi_lu_group;
dev_stat_grp->default_groups[3] = NULL; dev_stat_grp->default_groups[3] = NULL;
} }
@ -1161,7 +1001,7 @@ static ssize_t target_stat_scsi_transport_show_attr_dev_name(
return -ENODEV; return -ENODEV;
} }
tpg = sep->sep_tpg; tpg = sep->sep_tpg;
wwn = &dev->se_sub_dev->t10_wwn; wwn = &dev->t10_wwn;
/* scsiTransportDevName */ /* scsiTransportDevName */
ret = snprintf(page, PAGE_SIZE, "%s+%s\n", ret = snprintf(page, PAGE_SIZE, "%s+%s\n",
tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpg->se_tpg_tfo->tpg_get_wwn(tpg),

View file

@ -3,8 +3,7 @@
* *
* This file contains SPC-3 task management infrastructure * This file contains SPC-3 task management infrastructure
* *
* Copyright (c) 2009,2010 Rising Tide Systems * (c) Copyright 2009-2012 RisingTide Systems LLC.
* Copyright (c) 2009,2010 Linux-iSCSI.org
* *
* Nicholas A. Bellinger <nab@kernel.org> * Nicholas A. Bellinger <nab@kernel.org>
* *
@ -371,7 +370,7 @@ int core_tmr_lun_reset(
* which the command was received shall be completed with TASK ABORTED * which the command was received shall be completed with TASK ABORTED
* status (see SAM-4). * status (see SAM-4).
*/ */
tas = dev->se_sub_dev->se_dev_attrib.emulate_tas; tas = dev->dev_attrib.emulate_tas;
/* /*
* Determine if this se_tmr is coming from a $FABRIC_MOD * Determine if this se_tmr is coming from a $FABRIC_MOD
* or struct se_device passthrough.. * or struct se_device passthrough..
@ -399,10 +398,10 @@ int core_tmr_lun_reset(
* LOGICAL UNIT RESET * LOGICAL UNIT RESET
*/ */
if (!preempt_and_abort_list && if (!preempt_and_abort_list &&
(dev->dev_flags & DF_SPC2_RESERVATIONS)) { (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)) {
spin_lock(&dev->dev_reservation_lock); spin_lock(&dev->dev_reservation_lock);
dev->dev_reserved_node_acl = NULL; dev->dev_reserved_node_acl = NULL;
dev->dev_flags &= ~DF_SPC2_RESERVATIONS; dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
spin_unlock(&dev->dev_reservation_lock); spin_unlock(&dev->dev_reservation_lock);
pr_debug("LUN_RESET: SCSI-2 Released reservation\n"); pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
} }

View file

@ -3,10 +3,7 @@
* *
* This file contains generic Target Portal Group related functions. * This file contains generic Target Portal Group related functions.
* *
* Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. * (c) Copyright 2002-2012 RisingTide Systems LLC.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
* *
* Nicholas A. Bellinger <nab@kernel.org> * Nicholas A. Bellinger <nab@kernel.org>
* *
@ -619,6 +616,29 @@ int core_tpg_set_initiator_node_queue_depth(
} }
EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth); EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
/* core_tpg_set_initiator_node_tag():
*
* Initiator nodeacl tags are not used internally, but may be used by
* userspace to emulate aliases or groups.
* Returns length of newly-set tag or -EINVAL.
*/
int core_tpg_set_initiator_node_tag(
struct se_portal_group *tpg,
struct se_node_acl *acl,
const char *new_tag)
{
if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
return -EINVAL;
if (!strncmp("NULL", new_tag, 4)) {
acl->acl_tag[0] = '\0';
return 0;
}
return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
}
EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
{ {
/* Set in core_dev_setup_virtual_lun0() */ /* Set in core_dev_setup_virtual_lun0() */
@ -672,6 +692,7 @@ int core_tpg_register(
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
lun = se_tpg->tpg_lun_list[i]; lun = se_tpg->tpg_lun_list[i];
lun->unpacked_lun = i; lun->unpacked_lun = i;
lun->lun_link_magic = SE_LUN_LINK_MAGIC;
lun->lun_status = TRANSPORT_LUN_STATUS_FREE; lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
atomic_set(&lun->lun_acl_count, 0); atomic_set(&lun->lun_acl_count, 0);
init_completion(&lun->lun_shutdown_comp); init_completion(&lun->lun_shutdown_comp);

File diff suppressed because it is too large Load diff

View file

@ -3,8 +3,7 @@
* *
* This file contains logic for SPC-3 Unit Attention emulation * This file contains logic for SPC-3 Unit Attention emulation
* *
* Copyright (c) 2009,2010 Rising Tide Systems * (c) Copyright 2009-2012 RisingTide Systems LLC.
* Copyright (c) 2009,2010 Linux-iSCSI.org
* *
* Nicholas A. Bellinger <nab@kernel.org> * Nicholas A. Bellinger <nab@kernel.org>
* *
@ -38,9 +37,8 @@
#include "target_core_pr.h" #include "target_core_pr.h"
#include "target_core_ua.h" #include "target_core_ua.h"
int core_scsi3_ua_check( sense_reason_t
struct se_cmd *cmd, target_scsi3_ua_check(struct se_cmd *cmd)
unsigned char *cdb)
{ {
struct se_dev_entry *deve; struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess; struct se_session *sess = cmd->se_sess;
@ -71,16 +69,14 @@ int core_scsi3_ua_check(
* was received, then the device server shall process the command * was received, then the device server shall process the command
* and either: * and either:
*/ */
switch (cdb[0]) { switch (cmd->t_task_cdb[0]) {
case INQUIRY: case INQUIRY:
case REPORT_LUNS: case REPORT_LUNS:
case REQUEST_SENSE: case REQUEST_SENSE:
return 0; return 0;
default: default:
return -EINVAL; return TCM_CHECK_CONDITION_UNIT_ATTENTION;
} }
return -EINVAL;
} }
int core_scsi3_ua_allocate( int core_scsi3_ua_allocate(
@ -237,7 +233,7 @@ void core_scsi3_ua_for_check_condition(
* highest priority UNIT_ATTENTION and ASC/ASCQ without * highest priority UNIT_ATTENTION and ASC/ASCQ without
* clearing it. * clearing it.
*/ */
if (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) { if (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) {
*asc = ua->ua_asc; *asc = ua->ua_asc;
*ascq = ua->ua_ascq; *ascq = ua->ua_ascq;
break; break;
@ -265,8 +261,8 @@ void core_scsi3_ua_for_check_condition(
" INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x" " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
" reported ASC: 0x%02x, ASCQ: 0x%02x\n", " reported ASC: 0x%02x, ASCQ: 0x%02x\n",
nacl->se_tpg->se_tpg_tfo->get_fabric_name(), nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
(dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" : (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
"Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl, "Releasing", dev->dev_attrib.emulate_ua_intlck_ctrl,
cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq); cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
} }

View file

@ -26,7 +26,7 @@
extern struct kmem_cache *se_ua_cache; extern struct kmem_cache *se_ua_cache;
extern int core_scsi3_ua_check(struct se_cmd *, unsigned char *); extern sense_reason_t target_scsi3_ua_check(struct se_cmd *);
extern int core_scsi3_ua_allocate(struct se_node_acl *, u32, u8, u8); extern int core_scsi3_ua_allocate(struct se_node_acl *, u32, u8, u8);
extern void core_scsi3_ua_release_all(struct se_dev_entry *); extern void core_scsi3_ua_release_all(struct se_dev_entry *);
extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *); extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *);

View file

@ -430,7 +430,6 @@ static void ft_sess_rcu_free(struct rcu_head *rcu)
{ {
struct ft_sess *sess = container_of(rcu, struct ft_sess, rcu); struct ft_sess *sess = container_of(rcu, struct ft_sess, rcu);
transport_deregister_session(sess->se_sess);
kfree(sess); kfree(sess);
} }
@ -438,6 +437,7 @@ static void ft_sess_free(struct kref *kref)
{ {
struct ft_sess *sess = container_of(kref, struct ft_sess, kref); struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
transport_deregister_session(sess->se_sess);
call_rcu(&sess->rcu, ft_sess_rcu_free); call_rcu(&sess->rcu, ft_sess_rcu_free);
} }

View file

@ -538,10 +538,6 @@ static void tcm_vhost_submission_work(struct work_struct *work)
if (tv_cmd->tvc_sgl_count) { if (tv_cmd->tvc_sgl_count) {
sg_ptr = tv_cmd->tvc_sgl; sg_ptr = tv_cmd->tvc_sgl;
/*
* For BIDI commands, pass in the extra READ buffer
* to transport_generic_map_mem_to_cmd() below..
*/
/* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */ /* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
#if 0 #if 0
if (se_cmd->se_cmd_flags & SCF_BIDI) { if (se_cmd->se_cmd_flags & SCF_BIDI) {

View file

@ -9,6 +9,8 @@ struct se_subsystem_api {
struct list_head sub_api_list; struct list_head sub_api_list;
char name[16]; char name[16];
char inquiry_prod[16];
char inquiry_rev[4];
struct module *owner; struct module *owner;
u8 transport_type; u8 transport_type;
@ -16,46 +18,45 @@ struct se_subsystem_api {
int (*attach_hba)(struct se_hba *, u32); int (*attach_hba)(struct se_hba *, u32);
void (*detach_hba)(struct se_hba *); void (*detach_hba)(struct se_hba *);
int (*pmode_enable_hba)(struct se_hba *, unsigned long); int (*pmode_enable_hba)(struct se_hba *, unsigned long);
void *(*allocate_virtdevice)(struct se_hba *, const char *);
struct se_device *(*create_virtdevice)(struct se_hba *, struct se_device *(*alloc_device)(struct se_hba *, const char *);
struct se_subsystem_dev *, void *); int (*configure_device)(struct se_device *);
void (*free_device)(void *); void (*free_device)(struct se_device *device);
ssize_t (*set_configfs_dev_params)(struct se_device *,
const char *, ssize_t);
ssize_t (*show_configfs_dev_params)(struct se_device *, char *);
void (*transport_complete)(struct se_cmd *cmd, void (*transport_complete)(struct se_cmd *cmd,
struct scatterlist *, struct scatterlist *,
unsigned char *); unsigned char *);
int (*parse_cdb)(struct se_cmd *cmd); sense_reason_t (*parse_cdb)(struct se_cmd *cmd);
ssize_t (*check_configfs_dev_params)(struct se_hba *,
struct se_subsystem_dev *);
ssize_t (*set_configfs_dev_params)(struct se_hba *,
struct se_subsystem_dev *, const char *, ssize_t);
ssize_t (*show_configfs_dev_params)(struct se_hba *,
struct se_subsystem_dev *, char *);
u32 (*get_device_rev)(struct se_device *);
u32 (*get_device_type)(struct se_device *); u32 (*get_device_type)(struct se_device *);
sector_t (*get_blocks)(struct se_device *); sector_t (*get_blocks)(struct se_device *);
unsigned char *(*get_sense_buffer)(struct se_cmd *); unsigned char *(*get_sense_buffer)(struct se_cmd *);
}; };
struct spc_ops { struct sbc_ops {
int (*execute_rw)(struct se_cmd *cmd); sense_reason_t (*execute_rw)(struct se_cmd *cmd);
int (*execute_sync_cache)(struct se_cmd *cmd); sense_reason_t (*execute_sync_cache)(struct se_cmd *cmd);
int (*execute_write_same)(struct se_cmd *cmd); sense_reason_t (*execute_write_same)(struct se_cmd *cmd);
int (*execute_unmap)(struct se_cmd *cmd); sense_reason_t (*execute_write_same_unmap)(struct se_cmd *cmd);
sense_reason_t (*execute_unmap)(struct se_cmd *cmd);
}; };
int transport_subsystem_register(struct se_subsystem_api *); int transport_subsystem_register(struct se_subsystem_api *);
void transport_subsystem_release(struct se_subsystem_api *); void transport_subsystem_release(struct se_subsystem_api *);
struct se_device *transport_add_device_to_core_hba(struct se_hba *,
struct se_subsystem_api *, struct se_subsystem_dev *, u32,
void *, struct se_dev_limits *, const char *, const char *);
void target_complete_cmd(struct se_cmd *, u8); void target_complete_cmd(struct se_cmd *, u8);
int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops); sense_reason_t spc_parse_cdb(struct se_cmd *cmd, unsigned int *size);
int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size); sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd);
int spc_get_write_same_sectors(struct se_cmd *cmd); sector_t spc_get_write_same_sectors(struct se_cmd *cmd);
sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops);
u32 sbc_get_device_rev(struct se_device *dev);
u32 sbc_get_device_type(struct se_device *dev);
void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *); void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *); int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);

View file

@ -62,20 +62,6 @@
#define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */ #define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */
/*
* struct se_subsystem_dev->su_dev_flags
*/
#define SDF_FIRMWARE_VPD_UNIT_SERIAL 0x00000001
#define SDF_EMULATED_VPD_UNIT_SERIAL 0x00000002
#define SDF_USING_UDEV_PATH 0x00000004
#define SDF_USING_ALIAS 0x00000008
/*
* struct se_device->dev_flags
*/
#define DF_SPC2_RESERVATIONS 0x00000001
#define DF_SPC2_RESERVATIONS_WITH_ISID 0x00000002
/* struct se_dev_attrib sanity values */ /* struct se_dev_attrib sanity values */
/* Default max_unmap_lba_count */ /* Default max_unmap_lba_count */
#define DA_MAX_UNMAP_LBA_COUNT 0 #define DA_MAX_UNMAP_LBA_COUNT 0
@ -85,6 +71,8 @@
#define DA_UNMAP_GRANULARITY_DEFAULT 0 #define DA_UNMAP_GRANULARITY_DEFAULT 0
/* Default unmap_granularity_alignment */ /* Default unmap_granularity_alignment */
#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0 #define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0
/* Default max_write_same_len, disabled by default */
#define DA_MAX_WRITE_SAME_LEN 0
/* Default max transfer length */ /* Default max transfer length */
#define DA_FABRIC_MAX_SECTORS 8192 #define DA_FABRIC_MAX_SECTORS 8192
/* Emulation for Direct Page Out */ /* Emulation for Direct Page Out */
@ -107,8 +95,6 @@
*/ */
#define DA_EMULATE_TPWS 0 #define DA_EMULATE_TPWS 0
/* No Emulation for PSCSI by default */ /* No Emulation for PSCSI by default */
#define DA_EMULATE_RESERVATIONS 0
/* No Emulation for PSCSI by default */
#define DA_EMULATE_ALUA 0 #define DA_EMULATE_ALUA 0
/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */ /* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
#define DA_ENFORCE_PR_ISIDS 1 #define DA_ENFORCE_PR_ISIDS 1
@ -160,8 +146,6 @@ enum se_cmd_flags_table {
SCF_EMULATED_TASK_SENSE = 0x00000004, SCF_EMULATED_TASK_SENSE = 0x00000004,
SCF_SCSI_DATA_CDB = 0x00000008, SCF_SCSI_DATA_CDB = 0x00000008,
SCF_SCSI_TMR_CDB = 0x00000010, SCF_SCSI_TMR_CDB = 0x00000010,
SCF_SCSI_CDB_EXCEPTION = 0x00000020,
SCF_SCSI_RESERVATION_CONFLICT = 0x00000040,
SCF_FUA = 0x00000080, SCF_FUA = 0x00000080,
SCF_SE_LUN_CMD = 0x00000100, SCF_SE_LUN_CMD = 0x00000100,
SCF_BIDI = 0x00000400, SCF_BIDI = 0x00000400,
@ -182,38 +166,33 @@ enum transport_lunflags_table {
TRANSPORT_LUNFLAGS_READ_WRITE = 0x04, TRANSPORT_LUNFLAGS_READ_WRITE = 0x04,
}; };
/* struct se_device->dev_status */
enum transport_device_status_table {
TRANSPORT_DEVICE_ACTIVATED = 0x01,
TRANSPORT_DEVICE_DEACTIVATED = 0x02,
TRANSPORT_DEVICE_QUEUE_FULL = 0x04,
TRANSPORT_DEVICE_SHUTDOWN = 0x08,
TRANSPORT_DEVICE_OFFLINE_ACTIVATED = 0x10,
TRANSPORT_DEVICE_OFFLINE_DEACTIVATED = 0x20,
};
/* /*
* Used by transport_send_check_condition_and_sense() and se_cmd->scsi_sense_reason * Used by transport_send_check_condition_and_sense()
* to signal which ASC/ASCQ sense payload should be built. * to signal which ASC/ASCQ sense payload should be built.
*/ */
typedef unsigned __bitwise__ sense_reason_t;
enum tcm_sense_reason_table { enum tcm_sense_reason_table {
TCM_NON_EXISTENT_LUN = 0x01, #define R(x) (__force sense_reason_t )(x)
TCM_UNSUPPORTED_SCSI_OPCODE = 0x02, TCM_NON_EXISTENT_LUN = R(0x01),
TCM_INCORRECT_AMOUNT_OF_DATA = 0x03, TCM_UNSUPPORTED_SCSI_OPCODE = R(0x02),
TCM_UNEXPECTED_UNSOLICITED_DATA = 0x04, TCM_INCORRECT_AMOUNT_OF_DATA = R(0x03),
TCM_SERVICE_CRC_ERROR = 0x05, TCM_UNEXPECTED_UNSOLICITED_DATA = R(0x04),
TCM_SNACK_REJECTED = 0x06, TCM_SERVICE_CRC_ERROR = R(0x05),
TCM_SECTOR_COUNT_TOO_MANY = 0x07, TCM_SNACK_REJECTED = R(0x06),
TCM_INVALID_CDB_FIELD = 0x08, TCM_SECTOR_COUNT_TOO_MANY = R(0x07),
TCM_INVALID_PARAMETER_LIST = 0x09, TCM_INVALID_CDB_FIELD = R(0x08),
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE = 0x0a, TCM_INVALID_PARAMETER_LIST = R(0x09),
TCM_UNKNOWN_MODE_PAGE = 0x0b, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE = R(0x0a),
TCM_WRITE_PROTECTED = 0x0c, TCM_UNKNOWN_MODE_PAGE = R(0x0b),
TCM_CHECK_CONDITION_ABORT_CMD = 0x0d, TCM_WRITE_PROTECTED = R(0x0c),
TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e, TCM_CHECK_CONDITION_ABORT_CMD = R(0x0d),
TCM_CHECK_CONDITION_NOT_READY = 0x0f, TCM_CHECK_CONDITION_UNIT_ATTENTION = R(0x0e),
TCM_RESERVATION_CONFLICT = 0x10, TCM_CHECK_CONDITION_NOT_READY = R(0x0f),
TCM_ADDRESS_OUT_OF_RANGE = 0x11, TCM_RESERVATION_CONFLICT = R(0x10),
TCM_ADDRESS_OUT_OF_RANGE = R(0x11),
TCM_OUT_OF_RESOURCES = R(0x12),
#undef R
}; };
enum target_sc_flags_table { enum target_sc_flags_table {
@ -246,30 +225,6 @@ enum tcm_tmrsp_table {
TMR_FUNCTION_REJECTED = 255, TMR_FUNCTION_REJECTED = 255,
}; };
struct se_obj {
atomic_t obj_access_count;
};
/*
* Used by TCM Core internally to signal if ALUA emulation is enabled or
* disabled, or running in with TCM/pSCSI passthrough mode
*/
typedef enum {
SPC_ALUA_PASSTHROUGH,
SPC2_ALUA_DISABLED,
SPC3_ALUA_EMULATED
} t10_alua_index_t;
/*
* Used by TCM Core internally to signal if SAM Task Attribute emulation
* is enabled or disabled, or running in with TCM/pSCSI passthrough mode
*/
typedef enum {
SAM_TASK_ATTR_PASSTHROUGH,
SAM_TASK_ATTR_UNTAGGED,
SAM_TASK_ATTR_EMULATED
} t10_task_attr_index_t;
/* /*
* Used for target SCSI statistics * Used for target SCSI statistics
*/ */
@ -283,17 +238,15 @@ typedef enum {
struct se_cmd; struct se_cmd;
struct t10_alua { struct t10_alua {
t10_alua_index_t alua_type;
/* ALUA Target Port Group ID */ /* ALUA Target Port Group ID */
u16 alua_tg_pt_gps_counter; u16 alua_tg_pt_gps_counter;
u32 alua_tg_pt_gps_count; u32 alua_tg_pt_gps_count;
spinlock_t tg_pt_gps_lock; spinlock_t tg_pt_gps_lock;
struct se_subsystem_dev *t10_sub_dev; struct se_device *t10_dev;
/* Used for default ALUA Target Port Group */ /* Used for default ALUA Target Port Group */
struct t10_alua_tg_pt_gp *default_tg_pt_gp; struct t10_alua_tg_pt_gp *default_tg_pt_gp;
/* Used for default ALUA Target Port Group ConfigFS group */ /* Used for default ALUA Target Port Group ConfigFS group */
struct config_group alua_tg_pt_gps_group; struct config_group alua_tg_pt_gps_group;
int (*alua_state_check)(struct se_cmd *, unsigned char *, u8 *);
struct list_head tg_pt_gps_list; struct list_head tg_pt_gps_list;
}; };
@ -335,7 +288,7 @@ struct t10_alua_tg_pt_gp {
atomic_t tg_pt_gp_ref_cnt; atomic_t tg_pt_gp_ref_cnt;
spinlock_t tg_pt_gp_lock; spinlock_t tg_pt_gp_lock;
struct mutex tg_pt_gp_md_mutex; struct mutex tg_pt_gp_md_mutex;
struct se_subsystem_dev *tg_pt_gp_su_dev; struct se_device *tg_pt_gp_dev;
struct config_group tg_pt_gp_group; struct config_group tg_pt_gp_group;
struct list_head tg_pt_gp_list; struct list_head tg_pt_gp_list;
struct list_head tg_pt_gp_mem_list; struct list_head tg_pt_gp_mem_list;
@ -366,23 +319,11 @@ struct t10_wwn {
char revision[4]; char revision[4];
char unit_serial[INQUIRY_VPD_SERIAL_LEN]; char unit_serial[INQUIRY_VPD_SERIAL_LEN];
spinlock_t t10_vpd_lock; spinlock_t t10_vpd_lock;
struct se_subsystem_dev *t10_sub_dev; struct se_device *t10_dev;
struct config_group t10_wwn_group; struct config_group t10_wwn_group;
struct list_head t10_vpd_list; struct list_head t10_vpd_list;
}; };
/*
* Used by TCM Core internally to signal if >= SPC-3 persistent reservations
* emulation is enabled or disabled, or running in with TCM/pSCSI passthrough
* mode
*/
typedef enum {
SPC_PASSTHROUGH,
SPC2_RESERVATIONS,
SPC3_PERSISTENT_RESERVATIONS
} t10_reservations_index_t;
struct t10_pr_registration { struct t10_pr_registration {
/* Used for fabrics that contain WWN+ISID */ /* Used for fabrics that contain WWN+ISID */
#define PR_REG_ISID_LEN 16 #define PR_REG_ISID_LEN 16
@ -424,18 +365,6 @@ struct t10_pr_registration {
struct list_head pr_reg_atp_mem_list; struct list_head pr_reg_atp_mem_list;
}; };
/*
* This set of function pointer ops is set based upon SPC3_PERSISTENT_RESERVATIONS,
* SPC2_RESERVATIONS or SPC_PASSTHROUGH in drivers/target/target_core_pr.c:
* core_setup_reservations()
*/
struct t10_reservation_ops {
int (*t10_reservation_check)(struct se_cmd *, u32 *);
int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
int (*t10_pr_register)(struct se_cmd *);
int (*t10_pr_clear)(struct se_cmd *);
};
struct t10_reservation { struct t10_reservation {
/* Reservation effects all target ports */ /* Reservation effects all target ports */
int pr_all_tg_pt; int pr_all_tg_pt;
@ -446,7 +375,6 @@ struct t10_reservation {
#define PR_APTPL_BUF_LEN 8192 #define PR_APTPL_BUF_LEN 8192
u32 pr_aptpl_buf_len; u32 pr_aptpl_buf_len;
u32 pr_generation; u32 pr_generation;
t10_reservations_index_t res_type;
spinlock_t registration_lock; spinlock_t registration_lock;
spinlock_t aptpl_reg_lock; spinlock_t aptpl_reg_lock;
/* /*
@ -462,7 +390,6 @@ struct t10_reservation {
struct se_node_acl *pr_res_holder; struct se_node_acl *pr_res_holder;
struct list_head registration_list; struct list_head registration_list;
struct list_head aptpl_reg_list; struct list_head aptpl_reg_list;
struct t10_reservation_ops pr_ops;
}; };
struct se_tmr_req { struct se_tmr_req {
@ -485,7 +412,6 @@ struct se_cmd {
u8 scsi_status; u8 scsi_status;
u8 scsi_asc; u8 scsi_asc;
u8 scsi_ascq; u8 scsi_ascq;
u8 scsi_sense_reason;
u16 scsi_sense_length; u16 scsi_sense_length;
/* Delay for ALUA Active/NonOptimized state access in milliseconds */ /* Delay for ALUA Active/NonOptimized state access in milliseconds */
int alua_nonop_delay; int alua_nonop_delay;
@ -523,7 +449,7 @@ struct se_cmd {
struct completion cmd_wait_comp; struct completion cmd_wait_comp;
struct kref cmd_kref; struct kref cmd_kref;
struct target_core_fabric_ops *se_tfo; struct target_core_fabric_ops *se_tfo;
int (*execute_cmd)(struct se_cmd *); sense_reason_t (*execute_cmd)(struct se_cmd *);
void (*transport_complete_callback)(struct se_cmd *); void (*transport_complete_callback)(struct se_cmd *);
unsigned char *t_task_cdb; unsigned char *t_task_cdb;
@ -581,6 +507,8 @@ struct se_node_acl {
bool acl_stop:1; bool acl_stop:1;
u32 queue_depth; u32 queue_depth;
u32 acl_index; u32 acl_index;
#define MAX_ACL_TAG_SIZE 64
char acl_tag[MAX_ACL_TAG_SIZE];
u64 num_cmds; u64 num_cmds;
u64 read_bytes; u64 read_bytes;
u64 write_bytes; u64 write_bytes;
@ -662,15 +590,6 @@ struct se_dev_entry {
struct list_head ua_list; struct list_head ua_list;
}; };
struct se_dev_limits {
/* Max supported HW queue depth */
u32 hw_queue_depth;
/* Max supported virtual queue depth */
u32 queue_depth;
/* From include/linux/blkdev.h for the other HW/SW limits. */
struct queue_limits limits;
};
struct se_dev_attrib { struct se_dev_attrib {
int emulate_dpo; int emulate_dpo;
int emulate_fua_write; int emulate_fua_write;
@ -680,8 +599,6 @@ struct se_dev_attrib {
int emulate_tas; int emulate_tas;
int emulate_tpu; int emulate_tpu;
int emulate_tpws; int emulate_tpws;
int emulate_reservations;
int emulate_alua;
int enforce_pr_isids; int enforce_pr_isids;
int is_nonrot; int is_nonrot;
int emulate_rest_reord; int emulate_rest_reord;
@ -696,7 +613,8 @@ struct se_dev_attrib {
u32 max_unmap_block_desc_count; u32 max_unmap_block_desc_count;
u32 unmap_granularity; u32 unmap_granularity;
u32 unmap_granularity_alignment; u32 unmap_granularity_alignment;
struct se_subsystem_dev *da_sub_dev; u32 max_write_same_len;
struct se_device *da_dev;
struct config_group da_group; struct config_group da_group;
}; };
@ -707,48 +625,25 @@ struct se_dev_stat_grps {
struct config_group scsi_lu_group; struct config_group scsi_lu_group;
}; };
struct se_subsystem_dev {
/* Used for struct se_subsystem_dev-->se_dev_alias, must be less than PAGE_SIZE */
#define SE_DEV_ALIAS_LEN 512
unsigned char se_dev_alias[SE_DEV_ALIAS_LEN];
/* Used for struct se_subsystem_dev->se_dev_udev_path[], must be less than PAGE_SIZE */
#define SE_UDEV_PATH_LEN 512
unsigned char se_dev_udev_path[SE_UDEV_PATH_LEN];
u32 su_dev_flags;
struct se_hba *se_dev_hba;
struct se_device *se_dev_ptr;
struct se_dev_attrib se_dev_attrib;
/* T10 Asymmetric Logical Unit Assignment for Target Ports */
struct t10_alua t10_alua;
/* T10 Inquiry and VPD WWN Information */
struct t10_wwn t10_wwn;
/* T10 SPC-2 + SPC-3 Reservations */
struct t10_reservation t10_pr;
spinlock_t se_dev_lock;
void *se_dev_su_ptr;
struct config_group se_dev_group;
/* For T10 Reservations */
struct config_group se_dev_pr_group;
/* For target_core_stat.c groups */
struct se_dev_stat_grps dev_stat_grps;
};
struct se_device { struct se_device {
#define SE_DEV_LINK_MAGIC 0xfeeddeef
u32 dev_link_magic;
/* RELATIVE TARGET PORT IDENTIFER Counter */ /* RELATIVE TARGET PORT IDENTIFER Counter */
u16 dev_rpti_counter; u16 dev_rpti_counter;
/* Used for SAM Task Attribute ordering */ /* Used for SAM Task Attribute ordering */
u32 dev_cur_ordered_id; u32 dev_cur_ordered_id;
u32 dev_flags; u32 dev_flags;
#define DF_CONFIGURED 0x00000001
#define DF_FIRMWARE_VPD_UNIT_SERIAL 0x00000002
#define DF_EMULATED_VPD_UNIT_SERIAL 0x00000004
#define DF_USING_UDEV_PATH 0x00000008
#define DF_USING_ALIAS 0x00000010
u32 dev_port_count; u32 dev_port_count;
/* See transport_device_status_table */
u32 dev_status;
/* Physical device queue depth */ /* Physical device queue depth */
u32 queue_depth; u32 queue_depth;
/* Used for SPC-2 reservations enforce of ISIDs */ /* Used for SPC-2 reservations enforce of ISIDs */
u64 dev_res_bin_isid; u64 dev_res_bin_isid;
t10_task_attr_index_t dev_task_attr_type;
/* Pointer to transport specific device structure */ /* Pointer to transport specific device structure */
void *dev_ptr;
u32 dev_index; u32 dev_index;
u64 creation_time; u64 creation_time;
u32 num_resets; u32 num_resets;
@ -761,13 +656,13 @@ struct se_device {
atomic_t dev_ordered_id; atomic_t dev_ordered_id;
atomic_t dev_ordered_sync; atomic_t dev_ordered_sync;
atomic_t dev_qf_count; atomic_t dev_qf_count;
struct se_obj dev_obj; int export_count;
struct se_obj dev_access_obj;
struct se_obj dev_export_obj;
spinlock_t delayed_cmd_lock; spinlock_t delayed_cmd_lock;
spinlock_t execute_task_lock; spinlock_t execute_task_lock;
spinlock_t dev_reservation_lock; spinlock_t dev_reservation_lock;
spinlock_t dev_status_lock; unsigned int dev_reservation_flags;
#define DRF_SPC2_RESERVATIONS 0x00000001
#define DRF_SPC2_RESERVATIONS_WITH_ISID 0x00000002
spinlock_t se_port_lock; spinlock_t se_port_lock;
spinlock_t se_tmr_lock; spinlock_t se_tmr_lock;
spinlock_t qf_cmd_lock; spinlock_t qf_cmd_lock;
@ -786,7 +681,20 @@ struct se_device {
struct list_head qf_cmd_list; struct list_head qf_cmd_list;
/* Pointer to associated SE HBA */ /* Pointer to associated SE HBA */
struct se_hba *se_hba; struct se_hba *se_hba;
struct se_subsystem_dev *se_sub_dev; /* T10 Inquiry and VPD WWN Information */
struct t10_wwn t10_wwn;
/* T10 Asymmetric Logical Unit Assignment for Target Ports */
struct t10_alua t10_alua;
/* T10 SPC-2 + SPC-3 Reservations */
struct t10_reservation t10_pr;
struct se_dev_attrib dev_attrib;
struct config_group dev_group;
struct config_group dev_pr_group;
struct se_dev_stat_grps dev_stat_grps;
#define SE_DEV_ALIAS_LEN 512 /* must be less than PAGE_SIZE */
unsigned char dev_alias[SE_DEV_ALIAS_LEN];
#define SE_UDEV_PATH_LEN 512 /* must be less than PAGE_SIZE */
unsigned char udev_path[SE_UDEV_PATH_LEN];
/* Pointer to template of function pointers for transport */ /* Pointer to template of function pointers for transport */
struct se_subsystem_api *transport; struct se_subsystem_api *transport;
/* Linked list for struct se_hba struct se_device list */ /* Linked list for struct se_hba struct se_device list */
@ -803,8 +711,6 @@ struct se_hba {
u32 hba_index; u32 hba_index;
/* Pointer to transport specific host structure. */ /* Pointer to transport specific host structure. */
void *hba_ptr; void *hba_ptr;
/* Linked list for struct se_device */
struct list_head hba_dev_list;
struct list_head hba_node; struct list_head hba_node;
spinlock_t device_lock; spinlock_t device_lock;
struct config_group hba_group; struct config_group hba_group;
@ -820,6 +726,8 @@ struct se_port_stat_grps {
}; };
struct se_lun { struct se_lun {
#define SE_LUN_LINK_MAGIC 0xffff7771
u32 lun_link_magic;
/* See transport_lun_status_table */ /* See transport_lun_status_table */
enum transport_lun_status_table lun_status; enum transport_lun_status_table lun_status;
u32 lun_access; u32 lun_access;

View file

@ -98,8 +98,8 @@ void transport_deregister_session(struct se_session *);
void transport_init_se_cmd(struct se_cmd *, struct target_core_fabric_ops *, void transport_init_se_cmd(struct se_cmd *, struct target_core_fabric_ops *,
struct se_session *, u32, int, int, unsigned char *); struct se_session *, u32, int, int, unsigned char *);
int transport_lookup_cmd_lun(struct se_cmd *, u32); sense_reason_t transport_lookup_cmd_lun(struct se_cmd *, u32);
int target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *); sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *);
int target_submit_cmd_map_sgls(struct se_cmd *, struct se_session *, int target_submit_cmd_map_sgls(struct se_cmd *, struct se_session *,
unsigned char *, unsigned char *, u32, u32, int, int, int, unsigned char *, unsigned char *, u32, u32, int, int, int,
struct scatterlist *, u32, struct scatterlist *, u32); struct scatterlist *, u32, struct scatterlist *, u32);
@ -110,9 +110,7 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
void *fabric_tmr_ptr, unsigned char tm_type, void *fabric_tmr_ptr, unsigned char tm_type,
gfp_t, unsigned int, int); gfp_t, unsigned int, int);
int transport_handle_cdb_direct(struct se_cmd *); int transport_handle_cdb_direct(struct se_cmd *);
int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, sense_reason_t transport_generic_new_cmd(struct se_cmd *);
struct scatterlist *, u32, struct scatterlist *, u32);
int transport_generic_new_cmd(struct se_cmd *);
void target_execute_cmd(struct se_cmd *cmd); void target_execute_cmd(struct se_cmd *cmd);
@ -120,7 +118,8 @@ void transport_generic_free_cmd(struct se_cmd *, int);
bool transport_wait_for_tasks(struct se_cmd *); bool transport_wait_for_tasks(struct se_cmd *);
int transport_check_aborted_status(struct se_cmd *, int); int transport_check_aborted_status(struct se_cmd *, int);
int transport_send_check_condition_and_sense(struct se_cmd *, u8, int); int transport_send_check_condition_and_sense(struct se_cmd *,
sense_reason_t, int);
int target_put_sess_cmd(struct se_session *, struct se_cmd *); int target_put_sess_cmd(struct se_session *, struct se_cmd *);
void target_sess_cmd_list_set_waiting(struct se_session *); void target_sess_cmd_list_set_waiting(struct se_session *);
@ -131,7 +130,7 @@ int core_alua_check_nonop_delay(struct se_cmd *);
int core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t); int core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
void core_tmr_release_req(struct se_tmr_req *); void core_tmr_release_req(struct se_tmr_req *);
int transport_generic_handle_tmr(struct se_cmd *); int transport_generic_handle_tmr(struct se_cmd *);
void transport_generic_request_failure(struct se_cmd *); void transport_generic_request_failure(struct se_cmd *, sense_reason_t);
int transport_lookup_tmr_lun(struct se_cmd *, u32); int transport_lookup_tmr_lun(struct se_cmd *, u32);
struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *, struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *,
@ -143,6 +142,8 @@ int core_tpg_del_initiator_node_acl(struct se_portal_group *,
struct se_node_acl *, int); struct se_node_acl *, int);
int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *, int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *,
unsigned char *, u32, int); unsigned char *, u32, int);
int core_tpg_set_initiator_node_tag(struct se_portal_group *,
struct se_node_acl *, const char *);
int core_tpg_register(struct target_core_fabric_ops *, struct se_wwn *, int core_tpg_register(struct target_core_fabric_ops *, struct se_wwn *,
struct se_portal_group *, void *, int); struct se_portal_group *, void *, int);
int core_tpg_deregister(struct se_portal_group *); int core_tpg_deregister(struct se_portal_group *);