mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-01 03:11:59 +00:00
drbd: reduce number of spinlock drop/re-aquire cycles
Instead of dropping and re-aquiring the spinlock around the submit, just remember that we want to submit, and do that only once we have dropped the spinlock for good. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
This commit is contained in:
parent
28995af5cf
commit
35b5ed5bba
1 changed files with 13 additions and 5 deletions
|
@ -1086,11 +1086,13 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long
|
||||||
|
|
||||||
static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
|
static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
|
||||||
{
|
{
|
||||||
|
struct drbd_resource *resource = device->resource;
|
||||||
const int rw = bio_rw(req->master_bio);
|
const int rw = bio_rw(req->master_bio);
|
||||||
struct bio_and_error m = { NULL, };
|
struct bio_and_error m = { NULL, };
|
||||||
bool no_remote = false;
|
bool no_remote = false;
|
||||||
|
bool submit_private_bio = false;
|
||||||
|
|
||||||
spin_lock_irq(&device->resource->req_lock);
|
spin_lock_irq(&resource->req_lock);
|
||||||
if (rw == WRITE) {
|
if (rw == WRITE) {
|
||||||
/* This may temporarily give up the req_lock,
|
/* This may temporarily give up the req_lock,
|
||||||
* but will re-aquire it before it returns here.
|
* but will re-aquire it before it returns here.
|
||||||
|
@ -1152,9 +1154,7 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
|
||||||
/* needs to be marked within the same spinlock */
|
/* needs to be marked within the same spinlock */
|
||||||
_req_mod(req, TO_BE_SUBMITTED);
|
_req_mod(req, TO_BE_SUBMITTED);
|
||||||
/* but we need to give up the spinlock to submit */
|
/* but we need to give up the spinlock to submit */
|
||||||
spin_unlock_irq(&device->resource->req_lock);
|
submit_private_bio = true;
|
||||||
drbd_submit_req_private_bio(req);
|
|
||||||
spin_lock_irq(&device->resource->req_lock);
|
|
||||||
} else if (no_remote) {
|
} else if (no_remote) {
|
||||||
nodata:
|
nodata:
|
||||||
if (__ratelimit(&drbd_ratelimit_state))
|
if (__ratelimit(&drbd_ratelimit_state))
|
||||||
|
@ -1167,8 +1167,16 @@ nodata:
|
||||||
out:
|
out:
|
||||||
if (drbd_req_put_completion_ref(req, &m, 1))
|
if (drbd_req_put_completion_ref(req, &m, 1))
|
||||||
kref_put(&req->kref, drbd_req_destroy);
|
kref_put(&req->kref, drbd_req_destroy);
|
||||||
spin_unlock_irq(&device->resource->req_lock);
|
spin_unlock_irq(&resource->req_lock);
|
||||||
|
|
||||||
|
/* Even though above is a kref_put(), this is safe.
|
||||||
|
* As long as we still need to submit our private bio,
|
||||||
|
* we hold a completion ref, and the request cannot disappear.
|
||||||
|
* If however this request did not even have a private bio to submit
|
||||||
|
* (e.g. remote read), req may already be invalid now.
|
||||||
|
* That's why we cannot check on req->private_bio. */
|
||||||
|
if (submit_private_bio)
|
||||||
|
drbd_submit_req_private_bio(req);
|
||||||
if (m.bio)
|
if (m.bio)
|
||||||
complete_master_bio(device, &m);
|
complete_master_bio(device, &m);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue