mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-06 06:21:31 +00:00
[PATCH] spin/rwlock init cleanups
locking init cleanups: - convert " = SPIN_LOCK_UNLOCKED" to spin_lock_init() or DEFINE_SPINLOCK() - convert rwlocks in a similar manner this patch was generated automatically. Motivation: - cleanliness - lockdep needs control of lock initialization, which the open-coded variants do not give - it's also useful for -rt and for lock debugging in general Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
b6cd0b772d
commit
34af946a22
51 changed files with 59 additions and 59 deletions
|
@ -117,7 +117,7 @@ struct bclink {
|
|||
static struct bcbearer *bcbearer = NULL;
|
||||
static struct bclink *bclink = NULL;
|
||||
static struct link *bcl = NULL;
|
||||
static spinlock_t bc_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(bc_lock);
|
||||
|
||||
char tipc_bclink_name[] = "multicast-link";
|
||||
|
||||
|
@ -796,7 +796,7 @@ int tipc_bclink_init(void)
|
|||
memset(bclink, 0, sizeof(struct bclink));
|
||||
INIT_LIST_HEAD(&bcl->waiting_ports);
|
||||
bcl->next_out_no = 1;
|
||||
bclink->node.lock = SPIN_LOCK_UNLOCKED;
|
||||
spin_lock_init(&bclink->node.lock);
|
||||
bcl->owner = &bclink->node;
|
||||
bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
|
||||
tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
|
||||
|
|
|
@ -566,7 +566,7 @@ restart:
|
|||
b_ptr->link_req = tipc_disc_init_link_req(b_ptr, &m_ptr->bcast_addr,
|
||||
bcast_scope, 2);
|
||||
}
|
||||
b_ptr->publ.lock = SPIN_LOCK_UNLOCKED;
|
||||
spin_lock_init(&b_ptr->publ.lock);
|
||||
write_unlock_bh(&tipc_net_lock);
|
||||
info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
|
||||
name, addr_string_fill(addr_string, bcast_scope), priority);
|
||||
|
|
|
@ -63,7 +63,7 @@ struct manager {
|
|||
|
||||
static struct manager mng = { 0};
|
||||
|
||||
static spinlock_t config_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(config_lock);
|
||||
|
||||
static const void *req_tlv_area; /* request message TLV area */
|
||||
static int req_tlv_space; /* request message TLV area size */
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
#define MAX_STRING 512
|
||||
|
||||
static char print_string[MAX_STRING];
|
||||
static spinlock_t print_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(print_lock);
|
||||
|
||||
static struct print_buf cons_buf = { NULL, 0, NULL, NULL };
|
||||
struct print_buf *TIPC_CONS = &cons_buf;
|
||||
|
|
|
@ -44,7 +44,7 @@ struct queue_item {
|
|||
|
||||
static kmem_cache_t *tipc_queue_item_cache;
|
||||
static struct list_head signal_queue_head;
|
||||
static spinlock_t qitem_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(qitem_lock);
|
||||
static int handler_enabled = 0;
|
||||
|
||||
static void process_signal_queue(unsigned long dummy);
|
||||
|
|
|
@ -101,7 +101,7 @@ struct name_table {
|
|||
|
||||
static struct name_table table = { NULL } ;
|
||||
static atomic_t rsv_publ_ok = ATOMIC_INIT(0);
|
||||
rwlock_t tipc_nametbl_lock = RW_LOCK_UNLOCKED;
|
||||
DEFINE_RWLOCK(tipc_nametbl_lock);
|
||||
|
||||
|
||||
static int hash(int x)
|
||||
|
@ -172,7 +172,7 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea
|
|||
}
|
||||
|
||||
memset(nseq, 0, sizeof(*nseq));
|
||||
nseq->lock = SPIN_LOCK_UNLOCKED;
|
||||
spin_lock_init(&nseq->lock);
|
||||
nseq->type = type;
|
||||
nseq->sseqs = sseq;
|
||||
dbg("tipc_nameseq_create(): nseq = %p, type %u, ssseqs %p, ff: %u\n",
|
||||
|
|
|
@ -115,7 +115,7 @@
|
|||
* - A local spin_lock protecting the queue of subscriber events.
|
||||
*/
|
||||
|
||||
rwlock_t tipc_net_lock = RW_LOCK_UNLOCKED;
|
||||
DEFINE_RWLOCK(tipc_net_lock);
|
||||
struct network tipc_net = { NULL };
|
||||
|
||||
struct node *tipc_net_select_remote_node(u32 addr, u32 ref)
|
||||
|
|
|
@ -77,7 +77,7 @@ struct node *tipc_node_create(u32 addr)
|
|||
|
||||
memset(n_ptr, 0, sizeof(*n_ptr));
|
||||
n_ptr->addr = addr;
|
||||
n_ptr->lock = SPIN_LOCK_UNLOCKED;
|
||||
spin_lock_init(&n_ptr->lock);
|
||||
INIT_LIST_HEAD(&n_ptr->nsub);
|
||||
n_ptr->owner = c_ptr;
|
||||
tipc_cltr_attach_node(c_ptr, n_ptr);
|
||||
|
|
|
@ -57,8 +57,8 @@
|
|||
static struct sk_buff *msg_queue_head = NULL;
|
||||
static struct sk_buff *msg_queue_tail = NULL;
|
||||
|
||||
spinlock_t tipc_port_list_lock = SPIN_LOCK_UNLOCKED;
|
||||
static spinlock_t queue_lock = SPIN_LOCK_UNLOCKED;
|
||||
DEFINE_SPINLOCK(tipc_port_list_lock);
|
||||
static DEFINE_SPINLOCK(queue_lock);
|
||||
|
||||
static LIST_HEAD(ports);
|
||||
static void port_handle_node_down(unsigned long ref);
|
||||
|
|
|
@ -63,7 +63,7 @@
|
|||
|
||||
struct ref_table tipc_ref_table = { NULL };
|
||||
|
||||
static rwlock_t ref_table_lock = RW_LOCK_UNLOCKED;
|
||||
static DEFINE_RWLOCK(ref_table_lock);
|
||||
|
||||
/**
|
||||
* tipc_ref_table_init - create reference table for objects
|
||||
|
@ -87,7 +87,7 @@ int tipc_ref_table_init(u32 requested_size, u32 start)
|
|||
index_mask = sz - 1;
|
||||
for (i = sz - 1; i >= 0; i--) {
|
||||
table[i].object = NULL;
|
||||
table[i].lock = SPIN_LOCK_UNLOCKED;
|
||||
spin_lock_init(&table[i].lock);
|
||||
table[i].data.next_plus_upper = (start & ~index_mask) + i - 1;
|
||||
}
|
||||
tipc_ref_table.entries = table;
|
||||
|
|
|
@ -457,7 +457,7 @@ int tipc_subscr_start(void)
|
|||
int res = -1;
|
||||
|
||||
memset(&topsrv, 0, sizeof (topsrv));
|
||||
topsrv.lock = SPIN_LOCK_UNLOCKED;
|
||||
spin_lock_init(&topsrv.lock);
|
||||
INIT_LIST_HEAD(&topsrv.subscriber_list);
|
||||
|
||||
spin_lock_bh(&topsrv.lock);
|
||||
|
|
|
@ -67,7 +67,7 @@ struct tipc_user {
|
|||
|
||||
static struct tipc_user *users = NULL;
|
||||
static u32 next_free_user = MAX_USERID + 1;
|
||||
static spinlock_t reg_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(reg_lock);
|
||||
|
||||
/**
|
||||
* reg_init - create TIPC user registry (but don't activate it)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue