mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
ocfs2/trivial: Remove trailing whitespaces
Patch removes trailing whitespaces. Signed-off-by: Sunil Mushran <sunil.mushran@oracle.com> Signed-off-by: Joel Becker <joel.becker@oracle.com>
This commit is contained in:
parent
e5f2cb2b1a
commit
2bd632165c
20 changed files with 72 additions and 72 deletions
|
@ -599,7 +599,7 @@ bail:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ocfs2_dio_end_io is called by the dio core when a dio is finished. We're
|
* ocfs2_dio_end_io is called by the dio core when a dio is finished. We're
|
||||||
* particularly interested in the aio/dio case. Like the core uses
|
* particularly interested in the aio/dio case. Like the core uses
|
||||||
* i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from
|
* i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from
|
||||||
|
@ -670,7 +670,7 @@ static ssize_t ocfs2_direct_IO(int rw,
|
||||||
|
|
||||||
ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
|
ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
|
||||||
inode->i_sb->s_bdev, iov, offset,
|
inode->i_sb->s_bdev, iov, offset,
|
||||||
nr_segs,
|
nr_segs,
|
||||||
ocfs2_direct_IO_get_blocks,
|
ocfs2_direct_IO_get_blocks,
|
||||||
ocfs2_dio_end_io);
|
ocfs2_dio_end_io);
|
||||||
|
|
||||||
|
|
|
@ -368,7 +368,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
|
||||||
}
|
}
|
||||||
ocfs2_metadata_cache_io_unlock(ci);
|
ocfs2_metadata_cache_io_unlock(ci);
|
||||||
|
|
||||||
mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n",
|
mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n",
|
||||||
(unsigned long long)block, nr,
|
(unsigned long long)block, nr,
|
||||||
((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes",
|
((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes",
|
||||||
flags);
|
flags);
|
||||||
|
|
|
@ -78,7 +78,7 @@ static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type);
|
||||||
|
|
||||||
unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD;
|
unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD;
|
||||||
|
|
||||||
/* Only sets a new threshold if there are no active regions.
|
/* Only sets a new threshold if there are no active regions.
|
||||||
*
|
*
|
||||||
* No locking or otherwise interesting code is required for reading
|
* No locking or otherwise interesting code is required for reading
|
||||||
* o2hb_dead_threshold as it can't change once regions are active and
|
* o2hb_dead_threshold as it can't change once regions are active and
|
||||||
|
@ -170,7 +170,7 @@ static void o2hb_write_timeout(struct work_struct *work)
|
||||||
|
|
||||||
mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u "
|
mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u "
|
||||||
"milliseconds\n", reg->hr_dev_name,
|
"milliseconds\n", reg->hr_dev_name,
|
||||||
jiffies_to_msecs(jiffies - reg->hr_last_timeout_start));
|
jiffies_to_msecs(jiffies - reg->hr_last_timeout_start));
|
||||||
o2quo_disk_timeout();
|
o2quo_disk_timeout();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -624,7 +624,7 @@ static int o2hb_check_slot(struct o2hb_region *reg,
|
||||||
"seq %llu last %llu changed %u equal %u\n",
|
"seq %llu last %llu changed %u equal %u\n",
|
||||||
slot->ds_node_num, (long long)slot->ds_last_generation,
|
slot->ds_node_num, (long long)slot->ds_last_generation,
|
||||||
le32_to_cpu(hb_block->hb_cksum),
|
le32_to_cpu(hb_block->hb_cksum),
|
||||||
(unsigned long long)le64_to_cpu(hb_block->hb_seq),
|
(unsigned long long)le64_to_cpu(hb_block->hb_seq),
|
||||||
(unsigned long long)slot->ds_last_time, slot->ds_changed_samples,
|
(unsigned long long)slot->ds_last_time, slot->ds_changed_samples,
|
||||||
slot->ds_equal_samples);
|
slot->ds_equal_samples);
|
||||||
|
|
||||||
|
|
|
@ -930,7 +930,7 @@ static void o2net_sendpage(struct o2net_sock_container *sc,
|
||||||
cond_resched();
|
cond_resched();
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
|
mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
|
||||||
" failed with %zd\n", size, SC_NODEF_ARGS(sc), ret);
|
" failed with %zd\n", size, SC_NODEF_ARGS(sc), ret);
|
||||||
o2net_ensure_shutdown(nn, sc, 0);
|
o2net_ensure_shutdown(nn, sc, 0);
|
||||||
break;
|
break;
|
||||||
|
@ -1483,7 +1483,7 @@ static void o2net_idle_timer(unsigned long data)
|
||||||
mlog(ML_NOTICE, "here are some times that might help debug the "
|
mlog(ML_NOTICE, "here are some times that might help debug the "
|
||||||
"situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv "
|
"situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv "
|
||||||
"%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n",
|
"%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n",
|
||||||
sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec,
|
sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec,
|
||||||
now.tv_sec, (long) now.tv_usec,
|
now.tv_sec, (long) now.tv_usec,
|
||||||
sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec,
|
sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec,
|
||||||
sc->sc_tv_advance_start.tv_sec,
|
sc->sc_tv_advance_start.tv_sec,
|
||||||
|
|
|
@ -32,10 +32,10 @@
|
||||||
* on their number */
|
* on their number */
|
||||||
#define O2NET_QUORUM_DELAY_MS ((o2hb_dead_threshold + 2) * O2HB_REGION_TIMEOUT_MS)
|
#define O2NET_QUORUM_DELAY_MS ((o2hb_dead_threshold + 2) * O2HB_REGION_TIMEOUT_MS)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This version number represents quite a lot, unfortunately. It not
|
* This version number represents quite a lot, unfortunately. It not
|
||||||
* only represents the raw network message protocol on the wire but also
|
* only represents the raw network message protocol on the wire but also
|
||||||
* locking semantics of the file system using the protocol. It should
|
* locking semantics of the file system using the protocol. It should
|
||||||
* be somewhere else, I'm sure, but right now it isn't.
|
* be somewhere else, I'm sure, but right now it isn't.
|
||||||
*
|
*
|
||||||
* With version 11, we separate out the filesystem locking portion. The
|
* With version 11, we separate out the filesystem locking portion. The
|
||||||
|
|
|
@ -95,7 +95,7 @@ const char *dlm_errname(enum dlm_status err);
|
||||||
mlog(ML_ERROR, "dlm status = %s\n", dlm_errname((st))); \
|
mlog(ML_ERROR, "dlm status = %s\n", dlm_errname((st))); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define DLM_LKSB_UNUSED1 0x01
|
#define DLM_LKSB_UNUSED1 0x01
|
||||||
#define DLM_LKSB_PUT_LVB 0x02
|
#define DLM_LKSB_PUT_LVB 0x02
|
||||||
#define DLM_LKSB_GET_LVB 0x04
|
#define DLM_LKSB_GET_LVB 0x04
|
||||||
#define DLM_LKSB_UNUSED2 0x08
|
#define DLM_LKSB_UNUSED2 0x08
|
||||||
|
|
|
@ -123,7 +123,7 @@ static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
|
||||||
dlm_lock_put(lock);
|
dlm_lock_put(lock);
|
||||||
/* free up the reserved bast that we are cancelling.
|
/* free up the reserved bast that we are cancelling.
|
||||||
* guaranteed that this will not be the last reserved
|
* guaranteed that this will not be the last reserved
|
||||||
* ast because *both* an ast and a bast were reserved
|
* ast because *both* an ast and a bast were reserved
|
||||||
* to get to this point. the res->spinlock will not be
|
* to get to this point. the res->spinlock will not be
|
||||||
* taken here */
|
* taken here */
|
||||||
dlm_lockres_release_ast(dlm, res);
|
dlm_lockres_release_ast(dlm, res);
|
||||||
|
|
|
@ -396,7 +396,7 @@ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
|
||||||
/* instead of logging the same network error over
|
/* instead of logging the same network error over
|
||||||
* and over, sleep here and wait for the heartbeat
|
* and over, sleep here and wait for the heartbeat
|
||||||
* to notice the node is dead. times out after 5s. */
|
* to notice the node is dead. times out after 5s. */
|
||||||
dlm_wait_for_node_death(dlm, res->owner,
|
dlm_wait_for_node_death(dlm, res->owner,
|
||||||
DLM_NODE_DEATH_WAIT_MAX);
|
DLM_NODE_DEATH_WAIT_MAX);
|
||||||
ret = DLM_RECOVERING;
|
ret = DLM_RECOVERING;
|
||||||
mlog(0, "node %u died so returning DLM_RECOVERING "
|
mlog(0, "node %u died so returning DLM_RECOVERING "
|
||||||
|
|
|
@ -816,7 +816,7 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Once the dlm ctxt is marked as leaving then we don't want
|
/* Once the dlm ctxt is marked as leaving then we don't want
|
||||||
* to be put in someone's domain map.
|
* to be put in someone's domain map.
|
||||||
* Also, explicitly disallow joining at certain troublesome
|
* Also, explicitly disallow joining at certain troublesome
|
||||||
* times (ie. during recovery). */
|
* times (ie. during recovery). */
|
||||||
if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) {
|
if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) {
|
||||||
|
|
|
@ -269,7 +269,7 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
|
||||||
}
|
}
|
||||||
dlm_revert_pending_lock(res, lock);
|
dlm_revert_pending_lock(res, lock);
|
||||||
dlm_lock_put(lock);
|
dlm_lock_put(lock);
|
||||||
} else if (dlm_is_recovery_lock(res->lockname.name,
|
} else if (dlm_is_recovery_lock(res->lockname.name,
|
||||||
res->lockname.len)) {
|
res->lockname.len)) {
|
||||||
/* special case for the $RECOVERY lock.
|
/* special case for the $RECOVERY lock.
|
||||||
* there will never be an AST delivered to put
|
* there will never be an AST delivered to put
|
||||||
|
|
|
@ -366,7 +366,7 @@ void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
|
||||||
struct dlm_master_list_entry *mle;
|
struct dlm_master_list_entry *mle;
|
||||||
|
|
||||||
assert_spin_locked(&dlm->spinlock);
|
assert_spin_locked(&dlm->spinlock);
|
||||||
|
|
||||||
list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
|
list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
|
||||||
if (node_up)
|
if (node_up)
|
||||||
dlm_mle_node_up(dlm, mle, NULL, idx);
|
dlm_mle_node_up(dlm, mle, NULL, idx);
|
||||||
|
@ -833,7 +833,7 @@ lookup:
|
||||||
__dlm_insert_mle(dlm, mle);
|
__dlm_insert_mle(dlm, mle);
|
||||||
|
|
||||||
/* still holding the dlm spinlock, check the recovery map
|
/* still holding the dlm spinlock, check the recovery map
|
||||||
* to see if there are any nodes that still need to be
|
* to see if there are any nodes that still need to be
|
||||||
* considered. these will not appear in the mle nodemap
|
* considered. these will not appear in the mle nodemap
|
||||||
* but they might own this lockres. wait on them. */
|
* but they might own this lockres. wait on them. */
|
||||||
bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
|
bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
|
||||||
|
@ -883,7 +883,7 @@ redo_request:
|
||||||
msleep(500);
|
msleep(500);
|
||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
dlm_kick_recovery_thread(dlm);
|
dlm_kick_recovery_thread(dlm);
|
||||||
msleep(1000);
|
msleep(1000);
|
||||||
|
@ -939,8 +939,8 @@ wait:
|
||||||
res->lockname.name, blocked);
|
res->lockname.name, blocked);
|
||||||
if (++tries > 20) {
|
if (++tries > 20) {
|
||||||
mlog(ML_ERROR, "%s:%.*s: spinning on "
|
mlog(ML_ERROR, "%s:%.*s: spinning on "
|
||||||
"dlm_wait_for_lock_mastery, blocked=%d\n",
|
"dlm_wait_for_lock_mastery, blocked=%d\n",
|
||||||
dlm->name, res->lockname.len,
|
dlm->name, res->lockname.len,
|
||||||
res->lockname.name, blocked);
|
res->lockname.name, blocked);
|
||||||
dlm_print_one_lock_resource(res);
|
dlm_print_one_lock_resource(res);
|
||||||
dlm_print_one_mle(mle);
|
dlm_print_one_mle(mle);
|
||||||
|
@ -1029,7 +1029,7 @@ recheck:
|
||||||
ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
|
ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
|
||||||
b = (mle->type == DLM_MLE_BLOCK);
|
b = (mle->type == DLM_MLE_BLOCK);
|
||||||
if ((*blocked && !b) || (!*blocked && b)) {
|
if ((*blocked && !b) || (!*blocked && b)) {
|
||||||
mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
|
mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
|
||||||
dlm->name, res->lockname.len, res->lockname.name,
|
dlm->name, res->lockname.len, res->lockname.name,
|
||||||
*blocked, b);
|
*blocked, b);
|
||||||
*blocked = b;
|
*blocked = b;
|
||||||
|
@ -1602,7 +1602,7 @@ send_response:
|
||||||
}
|
}
|
||||||
mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
|
mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
|
||||||
dlm->node_num, res->lockname.len, res->lockname.name);
|
dlm->node_num, res->lockname.len, res->lockname.name);
|
||||||
ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
|
ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
|
||||||
DLM_ASSERT_MASTER_MLE_CLEANUP);
|
DLM_ASSERT_MASTER_MLE_CLEANUP);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
mlog(ML_ERROR, "failed to dispatch assert master work\n");
|
mlog(ML_ERROR, "failed to dispatch assert master work\n");
|
||||||
|
@ -1701,7 +1701,7 @@ again:
|
||||||
|
|
||||||
if (r & DLM_ASSERT_RESPONSE_REASSERT) {
|
if (r & DLM_ASSERT_RESPONSE_REASSERT) {
|
||||||
mlog(0, "%.*s: node %u create mles on other "
|
mlog(0, "%.*s: node %u create mles on other "
|
||||||
"nodes and requests a re-assert\n",
|
"nodes and requests a re-assert\n",
|
||||||
namelen, lockname, to);
|
namelen, lockname, to);
|
||||||
reassert = 1;
|
reassert = 1;
|
||||||
}
|
}
|
||||||
|
@ -1812,7 +1812,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
|
||||||
spin_unlock(&dlm->master_lock);
|
spin_unlock(&dlm->master_lock);
|
||||||
spin_unlock(&dlm->spinlock);
|
spin_unlock(&dlm->spinlock);
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock(&dlm->master_lock);
|
spin_unlock(&dlm->master_lock);
|
||||||
|
@ -1883,7 +1883,7 @@ ok:
|
||||||
int extra_ref = 0;
|
int extra_ref = 0;
|
||||||
int nn = -1;
|
int nn = -1;
|
||||||
int rr, err = 0;
|
int rr, err = 0;
|
||||||
|
|
||||||
spin_lock(&mle->spinlock);
|
spin_lock(&mle->spinlock);
|
||||||
if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
|
if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
|
||||||
extra_ref = 1;
|
extra_ref = 1;
|
||||||
|
@ -1891,7 +1891,7 @@ ok:
|
||||||
/* MASTER mle: if any bits set in the response map
|
/* MASTER mle: if any bits set in the response map
|
||||||
* then the calling node needs to re-assert to clear
|
* then the calling node needs to re-assert to clear
|
||||||
* up nodes that this node contacted */
|
* up nodes that this node contacted */
|
||||||
while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
|
while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
|
||||||
nn+1)) < O2NM_MAX_NODES) {
|
nn+1)) < O2NM_MAX_NODES) {
|
||||||
if (nn != dlm->node_num && nn != assert->node_idx)
|
if (nn != dlm->node_num && nn != assert->node_idx)
|
||||||
master_request = 1;
|
master_request = 1;
|
||||||
|
@ -2002,7 +2002,7 @@ kill:
|
||||||
__dlm_print_one_lock_resource(res);
|
__dlm_print_one_lock_resource(res);
|
||||||
spin_unlock(&res->spinlock);
|
spin_unlock(&res->spinlock);
|
||||||
spin_unlock(&dlm->spinlock);
|
spin_unlock(&dlm->spinlock);
|
||||||
*ret_data = (void *)res;
|
*ret_data = (void *)res;
|
||||||
dlm_put(dlm);
|
dlm_put(dlm);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
@ -2040,10 +2040,10 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
|
||||||
item->u.am.request_from = request_from;
|
item->u.am.request_from = request_from;
|
||||||
item->u.am.flags = flags;
|
item->u.am.flags = flags;
|
||||||
|
|
||||||
if (ignore_higher)
|
if (ignore_higher)
|
||||||
mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
|
mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
|
||||||
res->lockname.name);
|
res->lockname.name);
|
||||||
|
|
||||||
spin_lock(&dlm->work_lock);
|
spin_lock(&dlm->work_lock);
|
||||||
list_add_tail(&item->list, &dlm->work_list);
|
list_add_tail(&item->list, &dlm->work_list);
|
||||||
spin_unlock(&dlm->work_lock);
|
spin_unlock(&dlm->work_lock);
|
||||||
|
@ -2133,7 +2133,7 @@ put:
|
||||||
* think that $RECOVERY is currently mastered by a dead node. If so,
|
* think that $RECOVERY is currently mastered by a dead node. If so,
|
||||||
* we wait a short time to allow that node to get notified by its own
|
* we wait a short time to allow that node to get notified by its own
|
||||||
* heartbeat stack, then check again. All $RECOVERY lock resources
|
* heartbeat stack, then check again. All $RECOVERY lock resources
|
||||||
* mastered by dead nodes are purged when the hearbeat callback is
|
* mastered by dead nodes are purged when the hearbeat callback is
|
||||||
* fired, so we can know for sure that it is safe to continue once
|
* fired, so we can know for sure that it is safe to continue once
|
||||||
* the node returns a live node or no node. */
|
* the node returns a live node or no node. */
|
||||||
static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
|
static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
|
||||||
|
@ -2174,7 +2174,7 @@ static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
|
||||||
ret = -EAGAIN;
|
ret = -EAGAIN;
|
||||||
}
|
}
|
||||||
spin_unlock(&dlm->spinlock);
|
spin_unlock(&dlm->spinlock);
|
||||||
mlog(0, "%s: reco lock master is %u\n", dlm->name,
|
mlog(0, "%s: reco lock master is %u\n", dlm->name,
|
||||||
master);
|
master);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -2602,7 +2602,7 @@ fail:
|
||||||
|
|
||||||
mlog(0, "%s:%.*s: timed out during migration\n",
|
mlog(0, "%s:%.*s: timed out during migration\n",
|
||||||
dlm->name, res->lockname.len, res->lockname.name);
|
dlm->name, res->lockname.len, res->lockname.name);
|
||||||
/* avoid hang during shutdown when migrating lockres
|
/* avoid hang during shutdown when migrating lockres
|
||||||
* to a node which also goes down */
|
* to a node which also goes down */
|
||||||
if (dlm_is_node_dead(dlm, target)) {
|
if (dlm_is_node_dead(dlm, target)) {
|
||||||
mlog(0, "%s:%.*s: expected migration "
|
mlog(0, "%s:%.*s: expected migration "
|
||||||
|
@ -2738,7 +2738,7 @@ static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
|
||||||
can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
|
can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
|
||||||
spin_unlock(&res->spinlock);
|
spin_unlock(&res->spinlock);
|
||||||
|
|
||||||
/* target has died, so make the caller break out of the
|
/* target has died, so make the caller break out of the
|
||||||
* wait_event, but caller must recheck the domain_map */
|
* wait_event, but caller must recheck the domain_map */
|
||||||
spin_lock(&dlm->spinlock);
|
spin_lock(&dlm->spinlock);
|
||||||
if (!test_bit(mig_target, dlm->domain_map))
|
if (!test_bit(mig_target, dlm->domain_map))
|
||||||
|
|
|
@ -1050,7 +1050,7 @@ static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
|
||||||
if (lock->ml.node == dead_node) {
|
if (lock->ml.node == dead_node) {
|
||||||
mlog(0, "AHA! there was "
|
mlog(0, "AHA! there was "
|
||||||
"a $RECOVERY lock for dead "
|
"a $RECOVERY lock for dead "
|
||||||
"node %u (%s)!\n",
|
"node %u (%s)!\n",
|
||||||
dead_node, dlm->name);
|
dead_node, dlm->name);
|
||||||
list_del_init(&lock->list);
|
list_del_init(&lock->list);
|
||||||
dlm_lock_put(lock);
|
dlm_lock_put(lock);
|
||||||
|
@ -1839,7 +1839,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
|
||||||
* the lvb. */
|
* the lvb. */
|
||||||
memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
|
memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
|
||||||
} else {
|
} else {
|
||||||
/* otherwise, the node is sending its
|
/* otherwise, the node is sending its
|
||||||
* most recent valid lvb info */
|
* most recent valid lvb info */
|
||||||
BUG_ON(ml->type != LKM_EXMODE &&
|
BUG_ON(ml->type != LKM_EXMODE &&
|
||||||
ml->type != LKM_PRMODE);
|
ml->type != LKM_PRMODE);
|
||||||
|
@ -2114,7 +2114,7 @@ static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
|
||||||
assert_spin_locked(&res->spinlock);
|
assert_spin_locked(&res->spinlock);
|
||||||
|
|
||||||
if (res->owner == dlm->node_num)
|
if (res->owner == dlm->node_num)
|
||||||
/* if this node owned the lockres, and if the dead node
|
/* if this node owned the lockres, and if the dead node
|
||||||
* had an EX when he died, blank out the lvb */
|
* had an EX when he died, blank out the lvb */
|
||||||
search_node = dead_node;
|
search_node = dead_node;
|
||||||
else {
|
else {
|
||||||
|
@ -2152,7 +2152,7 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
|
||||||
|
|
||||||
/* this node is the lockres master:
|
/* this node is the lockres master:
|
||||||
* 1) remove any stale locks for the dead node
|
* 1) remove any stale locks for the dead node
|
||||||
* 2) if the dead node had an EX when he died, blank out the lvb
|
* 2) if the dead node had an EX when he died, blank out the lvb
|
||||||
*/
|
*/
|
||||||
assert_spin_locked(&dlm->spinlock);
|
assert_spin_locked(&dlm->spinlock);
|
||||||
assert_spin_locked(&res->spinlock);
|
assert_spin_locked(&res->spinlock);
|
||||||
|
@ -2260,7 +2260,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
|
||||||
}
|
}
|
||||||
spin_unlock(&res->spinlock);
|
spin_unlock(&res->spinlock);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
spin_lock(&res->spinlock);
|
spin_lock(&res->spinlock);
|
||||||
/* zero the lvb if necessary */
|
/* zero the lvb if necessary */
|
||||||
dlm_revalidate_lvb(dlm, res, dead_node);
|
dlm_revalidate_lvb(dlm, res, dead_node);
|
||||||
|
@ -2411,7 +2411,7 @@ static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
|
||||||
* this function on each node racing to become the recovery
|
* this function on each node racing to become the recovery
|
||||||
* master will not stop attempting this until either:
|
* master will not stop attempting this until either:
|
||||||
* a) this node gets the EX (and becomes the recovery master),
|
* a) this node gets the EX (and becomes the recovery master),
|
||||||
* or b) dlm->reco.new_master gets set to some nodenum
|
* or b) dlm->reco.new_master gets set to some nodenum
|
||||||
* != O2NM_INVALID_NODE_NUM (another node will do the reco).
|
* != O2NM_INVALID_NODE_NUM (another node will do the reco).
|
||||||
* so each time a recovery master is needed, the entire cluster
|
* so each time a recovery master is needed, the entire cluster
|
||||||
* will sync at this point. if the new master dies, that will
|
* will sync at this point. if the new master dies, that will
|
||||||
|
@ -2424,7 +2424,7 @@ static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
|
||||||
|
|
||||||
mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
|
mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
|
||||||
dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
|
dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
|
||||||
again:
|
again:
|
||||||
memset(&lksb, 0, sizeof(lksb));
|
memset(&lksb, 0, sizeof(lksb));
|
||||||
|
|
||||||
ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
|
ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
|
||||||
|
@ -2437,8 +2437,8 @@ again:
|
||||||
if (ret == DLM_NORMAL) {
|
if (ret == DLM_NORMAL) {
|
||||||
mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
|
mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
|
||||||
dlm->name, dlm->node_num);
|
dlm->name, dlm->node_num);
|
||||||
|
|
||||||
/* got the EX lock. check to see if another node
|
/* got the EX lock. check to see if another node
|
||||||
* just became the reco master */
|
* just became the reco master */
|
||||||
if (dlm_reco_master_ready(dlm)) {
|
if (dlm_reco_master_ready(dlm)) {
|
||||||
mlog(0, "%s: got reco EX lock, but %u will "
|
mlog(0, "%s: got reco EX lock, but %u will "
|
||||||
|
@ -2451,12 +2451,12 @@ again:
|
||||||
/* see if recovery was already finished elsewhere */
|
/* see if recovery was already finished elsewhere */
|
||||||
spin_lock(&dlm->spinlock);
|
spin_lock(&dlm->spinlock);
|
||||||
if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
|
if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
|
||||||
status = -EINVAL;
|
status = -EINVAL;
|
||||||
mlog(0, "%s: got reco EX lock, but "
|
mlog(0, "%s: got reco EX lock, but "
|
||||||
"node got recovered already\n", dlm->name);
|
"node got recovered already\n", dlm->name);
|
||||||
if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
|
if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
|
||||||
mlog(ML_ERROR, "%s: new master is %u "
|
mlog(ML_ERROR, "%s: new master is %u "
|
||||||
"but no dead node!\n",
|
"but no dead node!\n",
|
||||||
dlm->name, dlm->reco.new_master);
|
dlm->name, dlm->reco.new_master);
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
@ -2468,7 +2468,7 @@ again:
|
||||||
* set the master and send the messages to begin recovery */
|
* set the master and send the messages to begin recovery */
|
||||||
if (!status) {
|
if (!status) {
|
||||||
mlog(0, "%s: dead=%u, this=%u, sending "
|
mlog(0, "%s: dead=%u, this=%u, sending "
|
||||||
"begin_reco now\n", dlm->name,
|
"begin_reco now\n", dlm->name,
|
||||||
dlm->reco.dead_node, dlm->node_num);
|
dlm->reco.dead_node, dlm->node_num);
|
||||||
status = dlm_send_begin_reco_message(dlm,
|
status = dlm_send_begin_reco_message(dlm,
|
||||||
dlm->reco.dead_node);
|
dlm->reco.dead_node);
|
||||||
|
@ -2501,7 +2501,7 @@ again:
|
||||||
mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
|
mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
|
||||||
dlm->name, dlm->node_num);
|
dlm->name, dlm->node_num);
|
||||||
/* another node is master. wait on
|
/* another node is master. wait on
|
||||||
* reco.new_master != O2NM_INVALID_NODE_NUM
|
* reco.new_master != O2NM_INVALID_NODE_NUM
|
||||||
* for at most one second */
|
* for at most one second */
|
||||||
wait_event_timeout(dlm->dlm_reco_thread_wq,
|
wait_event_timeout(dlm->dlm_reco_thread_wq,
|
||||||
dlm_reco_master_ready(dlm),
|
dlm_reco_master_ready(dlm),
|
||||||
|
@ -2599,7 +2599,7 @@ retry:
|
||||||
}
|
}
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
struct dlm_lock_resource *res;
|
struct dlm_lock_resource *res;
|
||||||
/* this is now a serious problem, possibly ENOMEM
|
/* this is now a serious problem, possibly ENOMEM
|
||||||
* in the network stack. must retry */
|
* in the network stack. must retry */
|
||||||
mlog_errno(ret);
|
mlog_errno(ret);
|
||||||
mlog(ML_ERROR, "begin reco of dlm %s to node %u "
|
mlog(ML_ERROR, "begin reco of dlm %s to node %u "
|
||||||
|
@ -2612,7 +2612,7 @@ retry:
|
||||||
} else {
|
} else {
|
||||||
mlog(ML_ERROR, "recovery lock not found\n");
|
mlog(ML_ERROR, "recovery lock not found\n");
|
||||||
}
|
}
|
||||||
/* sleep for a bit in hopes that we can avoid
|
/* sleep for a bit in hopes that we can avoid
|
||||||
* another ENOMEM */
|
* another ENOMEM */
|
||||||
msleep(100);
|
msleep(100);
|
||||||
goto retry;
|
goto retry;
|
||||||
|
@ -2664,7 +2664,7 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
|
||||||
}
|
}
|
||||||
if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
|
if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
|
||||||
mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
|
mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
|
||||||
"node %u changing it to %u\n", dlm->name,
|
"node %u changing it to %u\n", dlm->name,
|
||||||
dlm->reco.dead_node, br->node_idx, br->dead_node);
|
dlm->reco.dead_node, br->node_idx, br->dead_node);
|
||||||
}
|
}
|
||||||
dlm_set_reco_master(dlm, br->node_idx);
|
dlm_set_reco_master(dlm, br->node_idx);
|
||||||
|
@ -2730,8 +2730,8 @@ stage2:
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
mlog_errno(ret);
|
mlog_errno(ret);
|
||||||
if (dlm_is_host_down(ret)) {
|
if (dlm_is_host_down(ret)) {
|
||||||
/* this has no effect on this recovery
|
/* this has no effect on this recovery
|
||||||
* session, so set the status to zero to
|
* session, so set the status to zero to
|
||||||
* finish out the last recovery */
|
* finish out the last recovery */
|
||||||
mlog(ML_ERROR, "node %u went down after this "
|
mlog(ML_ERROR, "node %u went down after this "
|
||||||
"node finished recovery.\n", nodenum);
|
"node finished recovery.\n", nodenum);
|
||||||
|
@ -2768,7 +2768,7 @@ int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
|
||||||
mlog(0, "%s: node %u finalizing recovery stage%d of "
|
mlog(0, "%s: node %u finalizing recovery stage%d of "
|
||||||
"node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
|
"node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
|
||||||
fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
|
fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
|
||||||
|
|
||||||
spin_lock(&dlm->spinlock);
|
spin_lock(&dlm->spinlock);
|
||||||
|
|
||||||
if (dlm->reco.new_master != fr->node_idx) {
|
if (dlm->reco.new_master != fr->node_idx) {
|
||||||
|
|
|
@ -190,8 +190,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
|
||||||
actions &= ~(DLM_UNLOCK_REMOVE_LOCK|
|
actions &= ~(DLM_UNLOCK_REMOVE_LOCK|
|
||||||
DLM_UNLOCK_REGRANT_LOCK|
|
DLM_UNLOCK_REGRANT_LOCK|
|
||||||
DLM_UNLOCK_CLEAR_CONVERT_TYPE);
|
DLM_UNLOCK_CLEAR_CONVERT_TYPE);
|
||||||
} else if (status == DLM_RECOVERING ||
|
} else if (status == DLM_RECOVERING ||
|
||||||
status == DLM_MIGRATING ||
|
status == DLM_MIGRATING ||
|
||||||
status == DLM_FORWARD) {
|
status == DLM_FORWARD) {
|
||||||
/* must clear the actions because this unlock
|
/* must clear the actions because this unlock
|
||||||
* is about to be retried. cannot free or do
|
* is about to be retried. cannot free or do
|
||||||
|
@ -661,14 +661,14 @@ retry:
|
||||||
if (call_ast) {
|
if (call_ast) {
|
||||||
mlog(0, "calling unlockast(%p, %d)\n", data, status);
|
mlog(0, "calling unlockast(%p, %d)\n", data, status);
|
||||||
if (is_master) {
|
if (is_master) {
|
||||||
/* it is possible that there is one last bast
|
/* it is possible that there is one last bast
|
||||||
* pending. make sure it is flushed, then
|
* pending. make sure it is flushed, then
|
||||||
* call the unlockast.
|
* call the unlockast.
|
||||||
* not an issue if this is a mastered remotely,
|
* not an issue if this is a mastered remotely,
|
||||||
* since this lock has been removed from the
|
* since this lock has been removed from the
|
||||||
* lockres queues and cannot be found. */
|
* lockres queues and cannot be found. */
|
||||||
dlm_kick_thread(dlm, NULL);
|
dlm_kick_thread(dlm, NULL);
|
||||||
wait_event(dlm->ast_wq,
|
wait_event(dlm->ast_wq,
|
||||||
dlm_lock_basts_flushed(dlm, lock));
|
dlm_lock_basts_flushed(dlm, lock));
|
||||||
}
|
}
|
||||||
(*unlockast)(data, status);
|
(*unlockast)(data, status);
|
||||||
|
|
|
@ -3155,7 +3155,7 @@ out:
|
||||||
/* Mark the lockres as being dropped. It will no longer be
|
/* Mark the lockres as being dropped. It will no longer be
|
||||||
* queued if blocking, but we still may have to wait on it
|
* queued if blocking, but we still may have to wait on it
|
||||||
* being dequeued from the downconvert thread before we can consider
|
* being dequeued from the downconvert thread before we can consider
|
||||||
* it safe to drop.
|
* it safe to drop.
|
||||||
*
|
*
|
||||||
* You can *not* attempt to call cluster_lock on this lockres anymore. */
|
* You can *not* attempt to call cluster_lock on this lockres anymore. */
|
||||||
void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres)
|
void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres)
|
||||||
|
|
|
@ -239,7 +239,7 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len,
|
||||||
mlog(0, "Encoding parent: blkno: %llu, generation: %u\n",
|
mlog(0, "Encoding parent: blkno: %llu, generation: %u\n",
|
||||||
(unsigned long long)blkno, generation);
|
(unsigned long long)blkno, generation);
|
||||||
}
|
}
|
||||||
|
|
||||||
*max_len = len;
|
*max_len = len;
|
||||||
|
|
||||||
bail:
|
bail:
|
||||||
|
|
|
@ -749,7 +749,7 @@ static int ocfs2_write_zero_page(struct inode *inode,
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
|
offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
|
||||||
/* ugh. in prepare/commit_write, if from==to==start of block, we
|
/* ugh. in prepare/commit_write, if from==to==start of block, we
|
||||||
** skip the prepare. make sure we never send an offset for the start
|
** skip the prepare. make sure we never send an offset for the start
|
||||||
** of a block
|
** of a block
|
||||||
*/
|
*/
|
||||||
|
@ -1779,7 +1779,7 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
|
||||||
struct inode *inode = dentry->d_inode;
|
struct inode *inode = dentry->d_inode;
|
||||||
loff_t saved_pos, end;
|
loff_t saved_pos, end;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We start with a read level meta lock and only jump to an ex
|
* We start with a read level meta lock and only jump to an ex
|
||||||
* if we need to make modifications here.
|
* if we need to make modifications here.
|
||||||
*/
|
*/
|
||||||
|
@ -2033,7 +2033,7 @@ out_dio:
|
||||||
pos + count - 1);
|
pos + count - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
|
* deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
|
||||||
* function pointer which is called when o_direct io completes so that
|
* function pointer which is called when o_direct io completes so that
|
||||||
* it can unlock our rw lock. (it's the clustered equivalent of
|
* it can unlock our rw lock. (it's the clustered equivalent of
|
||||||
|
@ -2198,7 +2198,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
|
||||||
goto bail;
|
goto bail;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* buffered reads protect themselves in ->readpage(). O_DIRECT reads
|
* buffered reads protect themselves in ->readpage(). O_DIRECT reads
|
||||||
* need locks to protect pending reads from racing with truncate.
|
* need locks to protect pending reads from racing with truncate.
|
||||||
*/
|
*/
|
||||||
|
@ -2220,10 +2220,10 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
|
||||||
* We're fine letting folks race truncates and extending
|
* We're fine letting folks race truncates and extending
|
||||||
* writes with read across the cluster, just like they can
|
* writes with read across the cluster, just like they can
|
||||||
* locally. Hence no rw_lock during read.
|
* locally. Hence no rw_lock during read.
|
||||||
*
|
*
|
||||||
* Take and drop the meta data lock to update inode fields
|
* Take and drop the meta data lock to update inode fields
|
||||||
* like i_size. This allows the checks down below
|
* like i_size. This allows the checks down below
|
||||||
* generic_file_aio_read() a chance of actually working.
|
* generic_file_aio_read() a chance of actually working.
|
||||||
*/
|
*/
|
||||||
ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level);
|
ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
|
@ -2248,7 +2248,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
|
||||||
bail:
|
bail:
|
||||||
if (have_alloc_sem)
|
if (have_alloc_sem)
|
||||||
up_read(&inode->i_alloc_sem);
|
up_read(&inode->i_alloc_sem);
|
||||||
if (rw_level != -1)
|
if (rw_level != -1)
|
||||||
ocfs2_rw_unlock(inode, rw_level);
|
ocfs2_rw_unlock(inode, rw_level);
|
||||||
mlog_exit(ret);
|
mlog_exit(ret);
|
||||||
|
|
||||||
|
|
|
@ -475,7 +475,7 @@ static int ocfs2_read_locked_inode(struct inode *inode,
|
||||||
if (args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY) {
|
if (args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY) {
|
||||||
status = ocfs2_try_open_lock(inode, 0);
|
status = ocfs2_try_open_lock(inode, 0);
|
||||||
if (status) {
|
if (status) {
|
||||||
make_bad_inode(inode);
|
make_bad_inode(inode);
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -684,7 +684,7 @@ bail:
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Serialize with orphan dir recovery. If the process doing
|
* Serialize with orphan dir recovery. If the process doing
|
||||||
* recovery on this orphan dir does an iget() with the dir
|
* recovery on this orphan dir does an iget() with the dir
|
||||||
* i_mutex held, we'll deadlock here. Instead we detect this
|
* i_mutex held, we'll deadlock here. Instead we detect this
|
||||||
|
|
|
@ -2034,7 +2034,7 @@ static int ocfs2_queue_orphans(struct ocfs2_super *osb,
|
||||||
status = -ENOENT;
|
status = -ENOENT;
|
||||||
mlog_errno(status);
|
mlog_errno(status);
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&orphan_dir_inode->i_mutex);
|
mutex_lock(&orphan_dir_inode->i_mutex);
|
||||||
status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0);
|
status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0);
|
||||||
|
|
|
@ -1062,7 +1062,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
|
||||||
"file system, but write access is "
|
"file system, but write access is "
|
||||||
"unavailable.\n");
|
"unavailable.\n");
|
||||||
else
|
else
|
||||||
mlog_errno(status);
|
mlog_errno(status);
|
||||||
goto read_super_error;
|
goto read_super_error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -267,8 +267,8 @@ static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Warning: even if it returns true, this does *not* guarantee that
|
/* Warning: even if it returns true, this does *not* guarantee that
|
||||||
* the block is stored in our inode metadata cache.
|
* the block is stored in our inode metadata cache.
|
||||||
*
|
*
|
||||||
* This can be called under lock_buffer()
|
* This can be called under lock_buffer()
|
||||||
*/
|
*/
|
||||||
int ocfs2_buffer_uptodate(struct ocfs2_caching_info *ci,
|
int ocfs2_buffer_uptodate(struct ocfs2_caching_info *ci,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue