Revert "Revert "BACKPORT: FROMGIT: scsi: ufs: Optimize host lock on transfer requests send/compl paths""
This reverts commit 46575badbb.
We need to go back upstream version with right fix.
Bug: 192095860
Signed-off-by: Jaegeuk Kim <jaegeuk@google.com>
Change-Id: I3dd1eb638bb3a95b3c8d40673f0821afdeb74f96
This commit is contained in:
@@ -730,7 +730,7 @@ static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
|
||||
*/
|
||||
static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
|
||||
{
|
||||
__clear_bit(tag, &hba->outstanding_reqs);
|
||||
clear_bit(tag, &hba->outstanding_reqs);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1956,15 +1956,19 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
|
||||
{
|
||||
bool queue_resume_work = false;
|
||||
ktime_t curr_t = ktime_get();
|
||||
unsigned long flags;
|
||||
|
||||
if (!ufshcd_is_clkscaling_supported(hba))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
if (!hba->clk_scaling.active_reqs++)
|
||||
queue_resume_work = true;
|
||||
|
||||
if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress)
|
||||
if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
if (queue_resume_work)
|
||||
queue_work(hba->clk_scaling.workq,
|
||||
@@ -1980,21 +1984,26 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
|
||||
hba->clk_scaling.busy_start_t = curr_t;
|
||||
hba->clk_scaling.is_busy_started = true;
|
||||
}
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
}
|
||||
|
||||
static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
|
||||
{
|
||||
struct ufs_clk_scaling *scaling = &hba->clk_scaling;
|
||||
unsigned long flags;
|
||||
|
||||
if (!ufshcd_is_clkscaling_supported(hba))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
hba->clk_scaling.active_reqs--;
|
||||
if (!hba->outstanding_reqs && scaling->is_busy_started) {
|
||||
scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
|
||||
scaling->busy_start_t));
|
||||
scaling->busy_start_t = 0;
|
||||
scaling->is_busy_started = false;
|
||||
}
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
}
|
||||
|
||||
static inline int ufshcd_monitor_opcode2dir(u8 opcode)
|
||||
@@ -2020,15 +2029,20 @@ static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
|
||||
static void ufshcd_start_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
|
||||
{
|
||||
int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0)
|
||||
hba->monitor.busy_start_ts[dir] = ktime_get();
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
}
|
||||
|
||||
static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
|
||||
{
|
||||
int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
|
||||
struct request *req = lrbp->cmd->request;
|
||||
struct ufs_hba_monitor *m = &hba->monitor;
|
||||
@@ -2052,6 +2066,7 @@ static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
|
||||
/* Push forward the busy start of monitor */
|
||||
m->busy_start_ts[dir] = now;
|
||||
}
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -2063,6 +2078,7 @@ static inline
|
||||
void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
|
||||
{
|
||||
struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
|
||||
unsigned long flags;
|
||||
|
||||
lrbp->issue_time_stamp = ktime_get();
|
||||
lrbp->compl_time_stamp = ktime_set(0, 0);
|
||||
@@ -2070,10 +2086,12 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
|
||||
trace_android_vh_ufs_send_command(hba, lrbp);
|
||||
ufshcd_add_command_trace(hba, task_tag, "send");
|
||||
ufshcd_clk_scaling_start_busy(hba);
|
||||
__set_bit(task_tag, &hba->outstanding_reqs);
|
||||
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
|
||||
ufshcd_start_monitor(hba, lrbp);
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
set_bit(task_tag, &hba->outstanding_reqs);
|
||||
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
/* Make sure that doorbell is committed immediately */
|
||||
wmb();
|
||||
}
|
||||
@@ -2637,7 +2655,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct ufshcd_lrb *lrbp;
|
||||
struct ufs_hba *hba;
|
||||
unsigned long flags;
|
||||
int tag;
|
||||
int err = 0;
|
||||
|
||||
@@ -2654,6 +2671,43 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
||||
if (!down_read_trylock(&hba->clk_scaling_lock))
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
|
||||
switch (hba->ufshcd_state) {
|
||||
case UFSHCD_STATE_OPERATIONAL:
|
||||
case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
|
||||
break;
|
||||
case UFSHCD_STATE_EH_SCHEDULED_FATAL:
|
||||
/*
|
||||
* pm_runtime_get_sync() is used at error handling preparation
|
||||
* stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
|
||||
* PM ops, it can never be finished if we let SCSI layer keep
|
||||
* retrying it, which gets err handler stuck forever. Neither
|
||||
* can we let the scsi cmd pass through, because UFS is in bad
|
||||
* state, the scsi cmd may eventually time out, which will get
|
||||
* err handler blocked for too long. So, just fail the scsi cmd
|
||||
* sent from PM ops, err handler can recover PM error anyways.
|
||||
*/
|
||||
if (hba->pm_op_in_progress) {
|
||||
hba->force_reset = true;
|
||||
set_host_byte(cmd, DID_BAD_TARGET);
|
||||
cmd->scsi_done(cmd);
|
||||
goto out;
|
||||
}
|
||||
fallthrough;
|
||||
case UFSHCD_STATE_RESET:
|
||||
err = SCSI_MLQUEUE_HOST_BUSY;
|
||||
goto out;
|
||||
case UFSHCD_STATE_ERROR:
|
||||
set_host_byte(cmd, DID_ERROR);
|
||||
cmd->scsi_done(cmd);
|
||||
goto out;
|
||||
default:
|
||||
dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
|
||||
__func__, hba->ufshcd_state);
|
||||
set_host_byte(cmd, DID_BAD_TARGET);
|
||||
cmd->scsi_done(cmd);
|
||||
goto out;
|
||||
}
|
||||
|
||||
hba->req_abort_count = 0;
|
||||
|
||||
err = ufshcd_hold(hba, true);
|
||||
@@ -2664,8 +2718,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
||||
WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
|
||||
(hba->clk_gating.state != CLKS_ON));
|
||||
|
||||
lrbp = &hba->lrb[tag];
|
||||
if (unlikely(lrbp->in_use)) {
|
||||
if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
|
||||
if (hba->pm_op_in_progress)
|
||||
set_host_byte(cmd, DID_BAD_TARGET);
|
||||
else
|
||||
@@ -2674,6 +2727,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
||||
goto out;
|
||||
}
|
||||
|
||||
lrbp = &hba->lrb[tag];
|
||||
WARN_ON(lrbp->cmd);
|
||||
lrbp->cmd = cmd;
|
||||
lrbp->sense_bufflen = UFS_SENSE_SIZE;
|
||||
@@ -2704,51 +2758,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
||||
/* Make sure descriptors are ready before ringing the doorbell */
|
||||
wmb();
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
switch (hba->ufshcd_state) {
|
||||
case UFSHCD_STATE_OPERATIONAL:
|
||||
case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
|
||||
break;
|
||||
case UFSHCD_STATE_EH_SCHEDULED_FATAL:
|
||||
/*
|
||||
* pm_runtime_get_sync() is used at error handling preparation
|
||||
* stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
|
||||
* PM ops, it can never be finished if we let SCSI layer keep
|
||||
* retrying it, which gets err handler stuck forever. Neither
|
||||
* can we let the scsi cmd pass through, because UFS is in bad
|
||||
* state, the scsi cmd may eventually time out, which will get
|
||||
* err handler blocked for too long. So, just fail the scsi cmd
|
||||
* sent from PM ops, err handler can recover PM error anyways.
|
||||
*/
|
||||
if (hba->pm_op_in_progress) {
|
||||
hba->force_reset = true;
|
||||
set_host_byte(cmd, DID_BAD_TARGET);
|
||||
goto out_compl_cmd;
|
||||
}
|
||||
fallthrough;
|
||||
case UFSHCD_STATE_RESET:
|
||||
err = SCSI_MLQUEUE_HOST_BUSY;
|
||||
goto out_compl_cmd;
|
||||
case UFSHCD_STATE_ERROR:
|
||||
set_host_byte(cmd, DID_ERROR);
|
||||
goto out_compl_cmd;
|
||||
default:
|
||||
dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
|
||||
__func__, hba->ufshcd_state);
|
||||
set_host_byte(cmd, DID_BAD_TARGET);
|
||||
goto out_compl_cmd;
|
||||
}
|
||||
ufshcd_send_command(hba, tag);
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
goto out;
|
||||
|
||||
out_compl_cmd:
|
||||
scsi_dma_unmap(lrbp->cmd);
|
||||
lrbp->cmd = NULL;
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
ufshcd_release(hba);
|
||||
if (!err)
|
||||
cmd->scsi_done(cmd);
|
||||
out:
|
||||
up_read(&hba->clk_scaling_lock);
|
||||
return err;
|
||||
@@ -2903,7 +2913,6 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
|
||||
int err;
|
||||
int tag;
|
||||
struct completion wait;
|
||||
unsigned long flags;
|
||||
|
||||
down_read(&hba->clk_scaling_lock);
|
||||
|
||||
@@ -2923,34 +2932,30 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
|
||||
req->timeout = msecs_to_jiffies(2 * timeout);
|
||||
blk_mq_start_request(req);
|
||||
|
||||
init_completion(&wait);
|
||||
lrbp = &hba->lrb[tag];
|
||||
if (unlikely(lrbp->in_use)) {
|
||||
if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
|
||||
err = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
init_completion(&wait);
|
||||
lrbp = &hba->lrb[tag];
|
||||
WARN_ON(lrbp->cmd);
|
||||
err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
|
||||
if (unlikely(err))
|
||||
goto out_put_tag;
|
||||
goto out;
|
||||
|
||||
hba->dev_cmd.complete = &wait;
|
||||
|
||||
ufshcd_add_query_upiu_trace(hba, tag, "query_send");
|
||||
/* Make sure descriptors are ready before ringing the doorbell */
|
||||
wmb();
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
|
||||
ufshcd_send_command(hba, tag);
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
|
||||
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
|
||||
|
||||
out:
|
||||
ufshcd_add_query_upiu_trace(hba, tag,
|
||||
err ? "query_complete_err" : "query_complete");
|
||||
|
||||
out_put_tag:
|
||||
out:
|
||||
blk_put_request(req);
|
||||
out_unlock:
|
||||
up_read(&hba->clk_scaling_lock);
|
||||
@@ -5083,6 +5088,24 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
|
||||
return result;
|
||||
}
|
||||
|
||||
static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
|
||||
u32 intr_mask)
|
||||
{
|
||||
if (!ufshcd_is_auto_hibern8_supported(hba) ||
|
||||
!ufshcd_is_auto_hibern8_enabled(hba))
|
||||
return false;
|
||||
|
||||
if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
|
||||
return false;
|
||||
|
||||
if (hba->active_uic_cmd &&
|
||||
(hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
|
||||
hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* ufshcd_uic_cmd_compl - handle completion of uic command
|
||||
* @hba: per adapter instance
|
||||
@@ -5096,6 +5119,10 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
|
||||
{
|
||||
irqreturn_t retval = IRQ_NONE;
|
||||
|
||||
spin_lock(hba->host->host_lock);
|
||||
if (ufshcd_is_auto_hibern8_error(hba, intr_status))
|
||||
hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
|
||||
|
||||
if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
|
||||
hba->active_uic_cmd->argument2 |=
|
||||
ufshcd_get_uic_cmd_result(hba);
|
||||
@@ -5116,6 +5143,7 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
|
||||
if (retval == IRQ_HANDLED)
|
||||
ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
|
||||
"complete");
|
||||
spin_unlock(hba->host->host_lock);
|
||||
return retval;
|
||||
}
|
||||
|
||||
@@ -5134,8 +5162,9 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
|
||||
bool update_scaling = false;
|
||||
|
||||
for_each_set_bit(index, &completed_reqs, hba->nutrs) {
|
||||
if (!test_and_clear_bit(index, &hba->outstanding_reqs))
|
||||
continue;
|
||||
lrbp = &hba->lrb[index];
|
||||
lrbp->in_use = false;
|
||||
lrbp->compl_time_stamp = ktime_get();
|
||||
cmd = lrbp->cmd;
|
||||
if (cmd) {
|
||||
@@ -5151,7 +5180,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
|
||||
lrbp->cmd = NULL;
|
||||
/* Do not touch lrbp after scsi done */
|
||||
cmd->scsi_done(cmd);
|
||||
__ufshcd_release(hba);
|
||||
ufshcd_release(hba);
|
||||
update_scaling = true;
|
||||
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
|
||||
lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
|
||||
@@ -5163,14 +5192,9 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
|
||||
update_scaling = true;
|
||||
}
|
||||
}
|
||||
if (ufshcd_is_clkscaling_supported(hba) && update_scaling)
|
||||
hba->clk_scaling.active_reqs--;
|
||||
if (update_scaling)
|
||||
ufshcd_clk_scaling_update_busy(hba);
|
||||
}
|
||||
|
||||
/* clear corresponding bits of completed commands */
|
||||
hba->outstanding_reqs ^= completed_reqs;
|
||||
|
||||
ufshcd_clk_scaling_update_busy(hba);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -5183,7 +5207,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
|
||||
*/
|
||||
static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
|
||||
{
|
||||
unsigned long completed_reqs;
|
||||
unsigned long completed_reqs, flags;
|
||||
u32 tr_doorbell;
|
||||
|
||||
/* Resetting interrupt aggregation counters first and reading the
|
||||
@@ -5197,8 +5221,10 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
|
||||
!(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
|
||||
ufshcd_reset_intr_aggr(hba);
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
|
||||
completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
|
||||
if (completed_reqs) {
|
||||
__ufshcd_transfer_req_compl(hba, completed_reqs);
|
||||
@@ -5956,13 +5982,11 @@ static void ufshcd_err_handler(struct work_struct *work)
|
||||
ufshcd_set_eh_in_progress(hba);
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
ufshcd_err_handling_prepare(hba);
|
||||
/* Complete requests that have door-bell cleared by h/w */
|
||||
ufshcd_complete_requests(hba);
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
|
||||
hba->ufshcd_state = UFSHCD_STATE_RESET;
|
||||
|
||||
/* Complete requests that have door-bell cleared by h/w */
|
||||
ufshcd_complete_requests(hba);
|
||||
|
||||
/*
|
||||
* A full reset and restore might have happened after preparation
|
||||
* is finished, double check whether we should stop.
|
||||
@@ -6045,12 +6069,11 @@ static void ufshcd_err_handler(struct work_struct *work)
|
||||
}
|
||||
|
||||
lock_skip_pending_xfer_clear:
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
|
||||
/* Complete the requests that are cleared by s/w */
|
||||
ufshcd_complete_requests(hba);
|
||||
hba->silence_err_logs = false;
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
hba->silence_err_logs = false;
|
||||
if (err_xfer || err_tm) {
|
||||
needs_reset = true;
|
||||
goto do_reset;
|
||||
@@ -6200,37 +6223,23 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
|
||||
return retval;
|
||||
}
|
||||
|
||||
static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
|
||||
u32 intr_mask)
|
||||
{
|
||||
if (!ufshcd_is_auto_hibern8_supported(hba) ||
|
||||
!ufshcd_is_auto_hibern8_enabled(hba))
|
||||
return false;
|
||||
|
||||
if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
|
||||
return false;
|
||||
|
||||
if (hba->active_uic_cmd &&
|
||||
(hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
|
||||
hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* ufshcd_check_errors - Check for errors that need s/w attention
|
||||
* @hba: per-adapter instance
|
||||
* @intr_status: interrupt status generated by the controller
|
||||
*
|
||||
* Returns
|
||||
* IRQ_HANDLED - If interrupt is valid
|
||||
* IRQ_NONE - If invalid interrupt
|
||||
*/
|
||||
static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
|
||||
static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
|
||||
{
|
||||
bool queue_eh_work = false;
|
||||
irqreturn_t retval = IRQ_NONE;
|
||||
|
||||
spin_lock(hba->host->host_lock);
|
||||
hba->errors |= UFSHCD_ERROR_MASK & intr_status;
|
||||
|
||||
if (hba->errors & INT_FATAL_ERRORS) {
|
||||
ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR,
|
||||
hba->errors);
|
||||
@@ -6287,6 +6296,9 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
|
||||
* itself without s/w intervention or errors that will be
|
||||
* handled by the SCSI core layer.
|
||||
*/
|
||||
hba->errors = 0;
|
||||
hba->uic_error = 0;
|
||||
spin_unlock(hba->host->host_lock);
|
||||
return retval;
|
||||
}
|
||||
|
||||
@@ -6321,13 +6333,17 @@ static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
|
||||
*/
|
||||
static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct request_queue *q = hba->tmf_queue;
|
||||
struct ctm_info ci = {
|
||||
.hba = hba,
|
||||
.pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL),
|
||||
};
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
ci.pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
|
||||
blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
|
||||
return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
|
||||
}
|
||||
|
||||
@@ -6344,17 +6360,12 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
|
||||
{
|
||||
irqreturn_t retval = IRQ_NONE;
|
||||
|
||||
hba->errors = UFSHCD_ERROR_MASK & intr_status;
|
||||
|
||||
if (ufshcd_is_auto_hibern8_error(hba, intr_status))
|
||||
hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
|
||||
|
||||
if (hba->errors)
|
||||
retval |= ufshcd_check_errors(hba);
|
||||
|
||||
if (intr_status & UFSHCD_UIC_MASK)
|
||||
retval |= ufshcd_uic_cmd_compl(hba, intr_status);
|
||||
|
||||
if (intr_status & UFSHCD_ERROR_MASK || hba->errors)
|
||||
retval |= ufshcd_check_errors(hba, intr_status);
|
||||
|
||||
if (intr_status & UTP_TASK_REQ_COMPL)
|
||||
retval |= ufshcd_tmc_handler(hba);
|
||||
|
||||
@@ -6380,7 +6391,6 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
|
||||
struct ufs_hba *hba = __hba;
|
||||
int retries = hba->nutrs;
|
||||
|
||||
spin_lock(hba->host->host_lock);
|
||||
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
|
||||
hba->ufs_stats.last_intr_status = intr_status;
|
||||
hba->ufs_stats.last_intr_ts = ktime_get();
|
||||
@@ -6412,7 +6422,6 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
|
||||
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
|
||||
}
|
||||
|
||||
spin_unlock(hba->host->host_lock);
|
||||
return retval;
|
||||
}
|
||||
|
||||
@@ -6589,7 +6598,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
|
||||
int err = 0;
|
||||
int tag;
|
||||
struct completion wait;
|
||||
unsigned long flags;
|
||||
u8 upiu_flags;
|
||||
|
||||
down_read(&hba->clk_scaling_lock);
|
||||
@@ -6602,13 +6610,13 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
|
||||
tag = req->tag;
|
||||
WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
|
||||
|
||||
init_completion(&wait);
|
||||
lrbp = &hba->lrb[tag];
|
||||
if (unlikely(lrbp->in_use)) {
|
||||
if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
|
||||
err = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
init_completion(&wait);
|
||||
lrbp = &hba->lrb[tag];
|
||||
WARN_ON(lrbp->cmd);
|
||||
lrbp->cmd = NULL;
|
||||
lrbp->sense_bufflen = 0;
|
||||
@@ -6646,10 +6654,8 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
|
||||
|
||||
/* Make sure descriptors are ready before ringing the doorbell */
|
||||
wmb();
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
ufshcd_send_command(hba, tag);
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
|
||||
ufshcd_send_command(hba, tag);
|
||||
/*
|
||||
* ignore the returning value here - ufshcd_check_query_response is
|
||||
* bound to fail since dev_cmd.query and dev_cmd.type were left empty.
|
||||
@@ -6768,7 +6774,6 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
|
||||
u32 pos;
|
||||
int err;
|
||||
u8 resp = 0xF, lun;
|
||||
unsigned long flags;
|
||||
|
||||
host = cmd->device->host;
|
||||
hba = shost_priv(host);
|
||||
@@ -6787,11 +6792,9 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
|
||||
err = ufshcd_clear_cmd(hba, pos);
|
||||
if (err)
|
||||
break;
|
||||
__ufshcd_transfer_req_compl(hba, pos);
|
||||
}
|
||||
}
|
||||
spin_lock_irqsave(host->host_lock, flags);
|
||||
ufshcd_transfer_req_compl(hba);
|
||||
spin_unlock_irqrestore(host->host_lock, flags);
|
||||
|
||||
out:
|
||||
hba->req_abort_count = 0;
|
||||
@@ -6967,20 +6970,16 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
|
||||
* will fail, due to spec violation, scsi err handling next step
|
||||
* will be to send LU reset which, again, is a spec violation.
|
||||
* To avoid these unnecessary/illegal steps, first we clean up
|
||||
* the lrb taken by this cmd and mark the lrb as in_use, then
|
||||
* queue the eh_work and bail.
|
||||
* the lrb taken by this cmd and re-set it in outstanding_reqs,
|
||||
* then queue the eh_work and bail.
|
||||
*/
|
||||
if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
|
||||
ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
|
||||
__ufshcd_transfer_req_compl(hba, (1UL << tag));
|
||||
set_bit(tag, &hba->outstanding_reqs);
|
||||
spin_lock_irqsave(host->host_lock, flags);
|
||||
if (lrbp->cmd) {
|
||||
__ufshcd_transfer_req_compl(hba, (1UL << tag));
|
||||
__set_bit(tag, &hba->outstanding_reqs);
|
||||
lrbp->in_use = true;
|
||||
hba->force_reset = true;
|
||||
ufshcd_schedule_eh_work(hba);
|
||||
}
|
||||
|
||||
hba->force_reset = true;
|
||||
ufshcd_schedule_eh_work(hba);
|
||||
spin_unlock_irqrestore(host->host_lock, flags);
|
||||
goto out;
|
||||
}
|
||||
@@ -6993,9 +6992,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
|
||||
|
||||
if (!err) {
|
||||
cleanup:
|
||||
spin_lock_irqsave(host->host_lock, flags);
|
||||
__ufshcd_transfer_req_compl(hba, (1UL << tag));
|
||||
spin_unlock_irqrestore(host->host_lock, flags);
|
||||
out:
|
||||
err = SUCCESS;
|
||||
} else {
|
||||
@@ -7025,19 +7022,15 @@ out:
|
||||
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
|
||||
{
|
||||
int err;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Stop the host controller and complete the requests
|
||||
* cleared by h/w
|
||||
*/
|
||||
ufshcd_hba_stop(hba);
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
hba->silence_err_logs = true;
|
||||
ufshcd_complete_requests(hba);
|
||||
hba->silence_err_logs = false;
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
|
||||
/* scale up clocks to max frequency before full reinitialization */
|
||||
ufshcd_set_clk_freq(hba, true);
|
||||
|
||||
@@ -188,7 +188,6 @@ struct ufs_pm_lvl_states {
|
||||
* @crypto_key_slot: the key slot to use for inline crypto (-1 if none)
|
||||
* @data_unit_num: the data unit number for the first block for inline crypto
|
||||
* @req_abort_skip: skip request abort task flag
|
||||
* @in_use: indicates that this lrb is still in use
|
||||
*/
|
||||
struct ufshcd_lrb {
|
||||
struct utp_transfer_req_desc *utr_descriptor_ptr;
|
||||
@@ -218,7 +217,6 @@ struct ufshcd_lrb {
|
||||
#endif
|
||||
|
||||
bool req_abort_skip;
|
||||
bool in_use;
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
Reference in New Issue
Block a user