mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-03-28 18:14:23 +00:00
[PATCH] Fix semundo lock leakage
semundo->lock can leak if semundo->refcount goes from 2 to 1 while another thread has it locked. This causes major problems for PREEMPT kernels. The simplest fix for now is to undo the single-thread optimization. This bug was found via relentless testing by Dominik Karall. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
ba02508248
commit
00a5dfdb93
1 changed files with 3 additions and 7 deletions
10
ipc/sem.c
10
ipc/sem.c
|
@ -895,7 +895,7 @@ static inline void lock_semundo(void)
|
||||||
struct sem_undo_list *undo_list;
|
struct sem_undo_list *undo_list;
|
||||||
|
|
||||||
undo_list = current->sysvsem.undo_list;
|
undo_list = current->sysvsem.undo_list;
|
||||||
if ((undo_list != NULL) && (atomic_read(&undo_list->refcnt) != 1))
|
if (undo_list)
|
||||||
spin_lock(&undo_list->lock);
|
spin_lock(&undo_list->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -915,7 +915,7 @@ static inline void unlock_semundo(void)
|
||||||
struct sem_undo_list *undo_list;
|
struct sem_undo_list *undo_list;
|
||||||
|
|
||||||
undo_list = current->sysvsem.undo_list;
|
undo_list = current->sysvsem.undo_list;
|
||||||
if ((undo_list != NULL) && (atomic_read(&undo_list->refcnt) != 1))
|
if (undo_list)
|
||||||
spin_unlock(&undo_list->lock);
|
spin_unlock(&undo_list->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -943,9 +943,7 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp)
|
||||||
if (undo_list == NULL)
|
if (undo_list == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
memset(undo_list, 0, size);
|
memset(undo_list, 0, size);
|
||||||
/* don't initialize unodhd->lock here. It's done
|
spin_lock_init(&undo_list->lock);
|
||||||
* in copy_semundo() instead.
|
|
||||||
*/
|
|
||||||
atomic_set(&undo_list->refcnt, 1);
|
atomic_set(&undo_list->refcnt, 1);
|
||||||
current->sysvsem.undo_list = undo_list;
|
current->sysvsem.undo_list = undo_list;
|
||||||
}
|
}
|
||||||
|
@ -1231,8 +1229,6 @@ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
|
||||||
error = get_undo_list(&undo_list);
|
error = get_undo_list(&undo_list);
|
||||||
if (error)
|
if (error)
|
||||||
return error;
|
return error;
|
||||||
if (atomic_read(&undo_list->refcnt) == 1)
|
|
||||||
spin_lock_init(&undo_list->lock);
|
|
||||||
atomic_inc(&undo_list->refcnt);
|
atomic_inc(&undo_list->refcnt);
|
||||||
tsk->sysvsem.undo_list = undo_list;
|
tsk->sysvsem.undo_list = undo_list;
|
||||||
} else
|
} else
|
||||||
|
|
Loading…
Add table
Reference in a new issue