mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-26 08:31:13 +00:00
ide: use queue lock instead of ide_lock when possible
This is just a preparation for future changes and there should be no functional changes caused by this patch since ide_lock is currently also used as queue lock. Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
This commit is contained in:
parent
3c8a2cce47
commit
6ea52226ca
2 changed files with 18 additions and 15 deletions
|
@ -424,16 +424,17 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
|
||||||
if (time_after(jiffies, info->write_timeout))
|
if (time_after(jiffies, info->write_timeout))
|
||||||
do_end_request = 1;
|
do_end_request = 1;
|
||||||
else {
|
else {
|
||||||
|
struct request_queue *q = drive->queue;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* take a breather relying on the unplug
|
* take a breather relying on the unplug
|
||||||
* timer to kick us again
|
* timer to kick us again
|
||||||
*/
|
*/
|
||||||
spin_lock_irqsave(&ide_lock, flags);
|
spin_lock_irqsave(q->queue_lock, flags);
|
||||||
blk_plug_device(drive->queue);
|
blk_plug_device(q);
|
||||||
spin_unlock_irqrestore(&ide_lock,
|
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||||
flags);
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -502,11 +503,12 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
|
||||||
|
|
||||||
end_request:
|
end_request:
|
||||||
if (stat & ATA_ERR) {
|
if (stat & ATA_ERR) {
|
||||||
|
struct request_queue *q = drive->queue;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&ide_lock, flags);
|
spin_lock_irqsave(q->queue_lock, flags);
|
||||||
blkdev_dequeue_request(rq);
|
blkdev_dequeue_request(rq);
|
||||||
spin_unlock_irqrestore(&ide_lock, flags);
|
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||||
|
|
||||||
hwgroup->rq = NULL;
|
hwgroup->rq = NULL;
|
||||||
|
|
||||||
|
|
|
@ -247,20 +247,21 @@ EXPORT_SYMBOL_GPL(ide_end_dequeued_request);
|
||||||
*/
|
*/
|
||||||
static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
|
static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
|
||||||
{
|
{
|
||||||
|
struct request_queue *q = drive->queue;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
#ifdef DEBUG_PM
|
#ifdef DEBUG_PM
|
||||||
printk("%s: completing PM request, %s\n", drive->name,
|
printk("%s: completing PM request, %s\n", drive->name,
|
||||||
blk_pm_suspend_request(rq) ? "suspend" : "resume");
|
blk_pm_suspend_request(rq) ? "suspend" : "resume");
|
||||||
#endif
|
#endif
|
||||||
spin_lock_irqsave(&ide_lock, flags);
|
spin_lock_irqsave(q->queue_lock, flags);
|
||||||
if (blk_pm_suspend_request(rq)) {
|
if (blk_pm_suspend_request(rq)) {
|
||||||
blk_stop_queue(drive->queue);
|
blk_stop_queue(q);
|
||||||
} else {
|
} else {
|
||||||
drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
|
drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
|
||||||
blk_start_queue(drive->queue);
|
blk_start_queue(q);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&ide_lock, flags);
|
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||||
|
|
||||||
drive->hwif->hwgroup->rq = NULL;
|
drive->hwif->hwgroup->rq = NULL;
|
||||||
|
|
||||||
|
@ -1469,16 +1470,16 @@ out:
|
||||||
void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq)
|
void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq)
|
||||||
{
|
{
|
||||||
ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
|
ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
|
||||||
|
struct request_queue *q = drive->queue;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
hwgroup->rq = NULL;
|
hwgroup->rq = NULL;
|
||||||
|
|
||||||
spin_lock_irqsave(&ide_lock, flags);
|
spin_lock_irqsave(q->queue_lock, flags);
|
||||||
__elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0);
|
__elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
|
||||||
blk_start_queueing(drive->queue);
|
blk_start_queueing(q);
|
||||||
spin_unlock_irqrestore(&ide_lock, flags);
|
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(ide_do_drive_cmd);
|
EXPORT_SYMBOL(ide_do_drive_cmd);
|
||||||
|
|
||||||
void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
|
void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue