mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-24 07:31:41 +00:00
Orangefs: Don't wait the old-fashioned way.
Get rid of add_wait_queue, set_current_state, etc, and use the wait_event() model. Signed-off-by: Mike Marshall <hubcap@omnibond.com>
This commit is contained in:
parent
97f100277c
commit
ce6c414e17
3 changed files with 26 additions and 43 deletions
|
@ -432,7 +432,6 @@ static ssize_t orangefs_devreq_writev(struct file *file,
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* Change downcall status */
|
|
||||||
gossip_err("writev: could not vmalloc for trailer!\n");
|
gossip_err("writev: could not vmalloc for trailer!\n");
|
||||||
dev_req_release(buffer);
|
dev_req_release(buffer);
|
||||||
put_op(op);
|
put_op(op);
|
||||||
|
@ -453,7 +452,7 @@ no_trailer:
|
||||||
*/
|
*/
|
||||||
if (op->upcall.type == ORANGEFS_VFS_OP_FILE_IO) {
|
if (op->upcall.type == ORANGEFS_VFS_OP_FILE_IO) {
|
||||||
int timed_out = 0;
|
int timed_out = 0;
|
||||||
DECLARE_WAITQUEUE(wait_entry, current);
|
DEFINE_WAIT(wait_entry);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* tell the vfs op waiting on a waitqueue
|
* tell the vfs op waiting on a waitqueue
|
||||||
|
@ -463,14 +462,14 @@ no_trailer:
|
||||||
set_op_state_serviced(op);
|
set_op_state_serviced(op);
|
||||||
spin_unlock(&op->lock);
|
spin_unlock(&op->lock);
|
||||||
|
|
||||||
add_wait_queue_exclusive(&op->io_completion_waitq,
|
|
||||||
&wait_entry);
|
|
||||||
wake_up_interruptible(&op->waitq);
|
wake_up_interruptible(&op->waitq);
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
|
||||||
|
|
||||||
spin_lock(&op->lock);
|
spin_lock(&op->lock);
|
||||||
|
prepare_to_wait_exclusive(
|
||||||
|
&op->io_completion_waitq,
|
||||||
|
&wait_entry,
|
||||||
|
TASK_INTERRUPTIBLE);
|
||||||
if (op->io_completed) {
|
if (op->io_completed) {
|
||||||
spin_unlock(&op->lock);
|
spin_unlock(&op->lock);
|
||||||
break;
|
break;
|
||||||
|
@ -497,9 +496,9 @@ no_trailer:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
set_current_state(TASK_RUNNING);
|
spin_lock(&op->lock);
|
||||||
remove_wait_queue(&op->io_completion_waitq,
|
finish_wait(&op->io_completion_waitq, &wait_entry);
|
||||||
&wait_entry);
|
spin_unlock(&op->lock);
|
||||||
|
|
||||||
/* NOTE: for I/O operations we handle releasing the op
|
/* NOTE: for I/O operations we handle releasing the op
|
||||||
* object except in the case of timeout. the reason we
|
* object except in the case of timeout. the reason we
|
||||||
|
|
|
@ -333,19 +333,17 @@ static int wait_for_a_slot(struct slot_args *slargs, int *buffer_index)
|
||||||
{
|
{
|
||||||
int ret = -1;
|
int ret = -1;
|
||||||
int i = 0;
|
int i = 0;
|
||||||
DECLARE_WAITQUEUE(my_wait, current);
|
DEFINE_WAIT(wait_entry);
|
||||||
|
|
||||||
|
|
||||||
add_wait_queue_exclusive(slargs->slot_wq, &my_wait);
|
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* check for available desc, slot_lock is the appropriate
|
* check for available desc, slot_lock is the appropriate
|
||||||
* index_lock
|
* index_lock
|
||||||
*/
|
*/
|
||||||
spin_lock(slargs->slot_lock);
|
spin_lock(slargs->slot_lock);
|
||||||
|
prepare_to_wait_exclusive(slargs->slot_wq,
|
||||||
|
&wait_entry,
|
||||||
|
TASK_INTERRUPTIBLE);
|
||||||
for (i = 0; i < slargs->slot_count; i++)
|
for (i = 0; i < slargs->slot_count; i++)
|
||||||
if (slargs->slot_array[i] == 0) {
|
if (slargs->slot_array[i] == 0) {
|
||||||
slargs->slot_array[i] = 1;
|
slargs->slot_array[i] = 1;
|
||||||
|
@ -383,8 +381,9 @@ static int wait_for_a_slot(struct slot_args *slargs, int *buffer_index)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
set_current_state(TASK_RUNNING);
|
spin_lock(slargs->slot_lock);
|
||||||
remove_wait_queue(slargs->slot_wq, &my_wait);
|
finish_wait(slargs->slot_wq, &wait_entry);
|
||||||
|
spin_unlock(slargs->slot_lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -62,7 +62,7 @@ int service_operation(struct orangefs_kernel_op_s *op,
|
||||||
/* irqflags and wait_entry are only used IF the client-core aborts */
|
/* irqflags and wait_entry are only used IF the client-core aborts */
|
||||||
unsigned long irqflags;
|
unsigned long irqflags;
|
||||||
|
|
||||||
DECLARE_WAITQUEUE(wait_entry, current);
|
DEFINE_WAIT(wait_entry);
|
||||||
|
|
||||||
op->upcall.tgid = current->tgid;
|
op->upcall.tgid = current->tgid;
|
||||||
op->upcall.pid = current->pid;
|
op->upcall.pid = current->pid;
|
||||||
|
@ -204,11 +204,11 @@ retry_servicing:
|
||||||
* memory system can be initialized.
|
* memory system can be initialized.
|
||||||
*/
|
*/
|
||||||
spin_lock_irqsave(&op->lock, irqflags);
|
spin_lock_irqsave(&op->lock, irqflags);
|
||||||
add_wait_queue(&orangefs_bufmap_init_waitq, &wait_entry);
|
prepare_to_wait(&orangefs_bufmap_init_waitq,
|
||||||
|
&wait_entry,
|
||||||
|
TASK_INTERRUPTIBLE);
|
||||||
spin_unlock_irqrestore(&op->lock, irqflags);
|
spin_unlock_irqrestore(&op->lock, irqflags);
|
||||||
|
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Wait for orangefs_bufmap_initialize() to wake me up
|
* Wait for orangefs_bufmap_initialize() to wake me up
|
||||||
* within the allotted time.
|
* within the allotted time.
|
||||||
|
@ -225,8 +225,7 @@ retry_servicing:
|
||||||
get_bufmap_init());
|
get_bufmap_init());
|
||||||
|
|
||||||
spin_lock_irqsave(&op->lock, irqflags);
|
spin_lock_irqsave(&op->lock, irqflags);
|
||||||
remove_wait_queue(&orangefs_bufmap_init_waitq,
|
finish_wait(&orangefs_bufmap_init_waitq, &wait_entry);
|
||||||
&wait_entry);
|
|
||||||
spin_unlock_irqrestore(&op->lock, irqflags);
|
spin_unlock_irqrestore(&op->lock, irqflags);
|
||||||
|
|
||||||
if (get_bufmap_init() == 0) {
|
if (get_bufmap_init() == 0) {
|
||||||
|
@ -342,16 +341,11 @@ void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *op)
|
||||||
int wait_for_matching_downcall(struct orangefs_kernel_op_s *op)
|
int wait_for_matching_downcall(struct orangefs_kernel_op_s *op)
|
||||||
{
|
{
|
||||||
int ret = -EINVAL;
|
int ret = -EINVAL;
|
||||||
DECLARE_WAITQUEUE(wait_entry, current);
|
DEFINE_WAIT(wait_entry);
|
||||||
|
|
||||||
spin_lock(&op->lock);
|
|
||||||
add_wait_queue(&op->waitq, &wait_entry);
|
|
||||||
spin_unlock(&op->lock);
|
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
|
||||||
|
|
||||||
spin_lock(&op->lock);
|
spin_lock(&op->lock);
|
||||||
|
prepare_to_wait(&op->waitq, &wait_entry, TASK_INTERRUPTIBLE);
|
||||||
if (op_state_serviced(op)) {
|
if (op_state_serviced(op)) {
|
||||||
spin_unlock(&op->lock);
|
spin_unlock(&op->lock);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
@ -434,10 +428,8 @@ int wait_for_matching_downcall(struct orangefs_kernel_op_s *op)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
set_current_state(TASK_RUNNING);
|
|
||||||
|
|
||||||
spin_lock(&op->lock);
|
spin_lock(&op->lock);
|
||||||
remove_wait_queue(&op->waitq, &wait_entry);
|
finish_wait(&op->waitq, &wait_entry);
|
||||||
spin_unlock(&op->lock);
|
spin_unlock(&op->lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -455,16 +447,11 @@ int wait_for_matching_downcall(struct orangefs_kernel_op_s *op)
|
||||||
int wait_for_cancellation_downcall(struct orangefs_kernel_op_s *op)
|
int wait_for_cancellation_downcall(struct orangefs_kernel_op_s *op)
|
||||||
{
|
{
|
||||||
int ret = -EINVAL;
|
int ret = -EINVAL;
|
||||||
DECLARE_WAITQUEUE(wait_entry, current);
|
DEFINE_WAIT(wait_entry);
|
||||||
|
|
||||||
spin_lock(&op->lock);
|
|
||||||
add_wait_queue(&op->waitq, &wait_entry);
|
|
||||||
spin_unlock(&op->lock);
|
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
|
||||||
|
|
||||||
spin_lock(&op->lock);
|
spin_lock(&op->lock);
|
||||||
|
prepare_to_wait(&op->waitq, &wait_entry, TASK_INTERRUPTIBLE);
|
||||||
if (op_state_serviced(op)) {
|
if (op_state_serviced(op)) {
|
||||||
gossip_debug(GOSSIP_WAIT_DEBUG,
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
||||||
"%s:op-state is SERVICED.\n",
|
"%s:op-state is SERVICED.\n",
|
||||||
|
@ -514,10 +501,8 @@ int wait_for_cancellation_downcall(struct orangefs_kernel_op_s *op)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
set_current_state(TASK_RUNNING);
|
|
||||||
|
|
||||||
spin_lock(&op->lock);
|
spin_lock(&op->lock);
|
||||||
remove_wait_queue(&op->waitq, &wait_entry);
|
finish_wait(&op->waitq, &wait_entry);
|
||||||
spin_unlock(&op->lock);
|
spin_unlock(&op->lock);
|
||||||
|
|
||||||
gossip_debug(GOSSIP_WAIT_DEBUG,
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue