mirror of
https://github.com/Fishwaldo/build.git
synced 2025-04-15 18:41:26 +00:00
607 lines
21 KiB
Diff
607 lines
21 KiB
Diff
diff --git a/Makefile b/Makefile
|
|
index 03bd927522f7..9df630a513b7 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,6 +1,6 @@
|
|
VERSION = 3
|
|
PATCHLEVEL = 10
|
|
-SUBLEVEL = 56
|
|
+SUBLEVEL = 57
|
|
EXTRAVERSION =
|
|
NAME = TOSSUG Baby Fish
|
|
|
|
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
|
|
index 9e3f441e7e84..9c37f3d896a2 100644
|
|
--- a/drivers/block/drbd/drbd_nl.c
|
|
+++ b/drivers/block/drbd/drbd_nl.c
|
|
@@ -514,6 +514,12 @@ void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
|
|
struct task_struct *opa;
|
|
|
|
kref_get(&tconn->kref);
|
|
+ /* We may just have force_sig()'ed this thread
|
|
+ * to get it out of some blocking network function.
|
|
+ * Clear signals; otherwise kthread_run(), which internally uses
|
|
+ * wait_on_completion_killable(), will mistake our pending signal
|
|
+ * for a new fatal signal and fail. */
|
|
+ flush_signals(current);
|
|
opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
|
|
if (IS_ERR(opa)) {
|
|
conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
|
|
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
|
|
index 28a0b32c73b3..27b0e2a295ea 100644
|
|
--- a/drivers/cpufreq/cpufreq_governor.c
|
|
+++ b/drivers/cpufreq/cpufreq_governor.c
|
|
@@ -97,7 +97,7 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
|
|
|
|
policy = cdbs->cur_policy;
|
|
|
|
- /* Get Absolute Load (in terms of freq for ondemand gov) */
|
|
+ /* Get Absolute Load */
|
|
for_each_cpu(j, policy->cpus) {
|
|
struct cpu_dbs_common_info *j_cdbs;
|
|
u64 cur_wall_time, cur_idle_time;
|
|
@@ -148,14 +148,6 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
|
|
|
|
load = 100 * (wall_time - idle_time) / wall_time;
|
|
|
|
- if (dbs_data->cdata->governor == GOV_ONDEMAND) {
|
|
- int freq_avg = __cpufreq_driver_getavg(policy, j);
|
|
- if (freq_avg <= 0)
|
|
- freq_avg = policy->cur;
|
|
-
|
|
- load *= freq_avg;
|
|
- }
|
|
-
|
|
if (load > max_load)
|
|
max_load = load;
|
|
}
|
|
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
|
|
index 0d9e6befe1d5..4a9058aeb57e 100644
|
|
--- a/drivers/cpufreq/cpufreq_governor.h
|
|
+++ b/drivers/cpufreq/cpufreq_governor.h
|
|
@@ -169,7 +169,6 @@ struct od_dbs_tuners {
|
|
unsigned int sampling_rate;
|
|
unsigned int sampling_down_factor;
|
|
unsigned int up_threshold;
|
|
- unsigned int adj_up_threshold;
|
|
unsigned int powersave_bias;
|
|
unsigned int io_is_busy;
|
|
};
|
|
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
|
|
index c087347d6688..25438bbf96bb 100644
|
|
--- a/drivers/cpufreq/cpufreq_ondemand.c
|
|
+++ b/drivers/cpufreq/cpufreq_ondemand.c
|
|
@@ -29,11 +29,9 @@
|
|
#include "cpufreq_governor.h"
|
|
|
|
/* On-demand governor macros */
|
|
-#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
|
|
#define DEF_FREQUENCY_UP_THRESHOLD (80)
|
|
#define DEF_SAMPLING_DOWN_FACTOR (1)
|
|
#define MAX_SAMPLING_DOWN_FACTOR (100000)
|
|
-#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
|
|
#define MICRO_FREQUENCY_UP_THRESHOLD (95)
|
|
#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
|
|
#define MIN_FREQUENCY_UP_THRESHOLD (11)
|
|
@@ -161,14 +159,10 @@ static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
|
|
|
|
/*
|
|
* Every sampling_rate, we check, if current idle time is less than 20%
|
|
- * (default), then we try to increase frequency. Every sampling_rate, we look
|
|
- * for the lowest frequency which can sustain the load while keeping idle time
|
|
- * over 30%. If such a frequency exist, we try to decrease to this frequency.
|
|
- *
|
|
- * Any frequency increase takes it to the maximum frequency. Frequency reduction
|
|
- * happens at minimum steps of 5% (default) of current frequency
|
|
+ * (default), then we try to increase frequency. Else, we adjust the frequency
|
|
+ * proportional to load.
|
|
*/
|
|
-static void od_check_cpu(int cpu, unsigned int load_freq)
|
|
+static void od_check_cpu(int cpu, unsigned int load)
|
|
{
|
|
struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
|
|
struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
|
|
@@ -178,29 +172,17 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
|
|
dbs_info->freq_lo = 0;
|
|
|
|
/* Check for frequency increase */
|
|
- if (load_freq > od_tuners->up_threshold * policy->cur) {
|
|
+ if (load > od_tuners->up_threshold) {
|
|
/* If switching to max speed, apply sampling_down_factor */
|
|
if (policy->cur < policy->max)
|
|
dbs_info->rate_mult =
|
|
od_tuners->sampling_down_factor;
|
|
dbs_freq_increase(policy, policy->max);
|
|
return;
|
|
- }
|
|
-
|
|
- /* Check for frequency decrease */
|
|
- /* if we cannot reduce the frequency anymore, break out early */
|
|
- if (policy->cur == policy->min)
|
|
- return;
|
|
-
|
|
- /*
|
|
- * The optimal frequency is the frequency that is the lowest that can
|
|
- * support the current CPU usage without triggering the up policy. To be
|
|
- * safe, we focus 10 points under the threshold.
|
|
- */
|
|
- if (load_freq < od_tuners->adj_up_threshold
|
|
- * policy->cur) {
|
|
+ } else {
|
|
+ /* Calculate the next frequency proportional to load */
|
|
unsigned int freq_next;
|
|
- freq_next = load_freq / od_tuners->adj_up_threshold;
|
|
+ freq_next = load * policy->cpuinfo.max_freq / 100;
|
|
|
|
/* No longer fully busy, reset rate_mult */
|
|
dbs_info->rate_mult = 1;
|
|
@@ -374,9 +356,6 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
|
|
input < MIN_FREQUENCY_UP_THRESHOLD) {
|
|
return -EINVAL;
|
|
}
|
|
- /* Calculate the new adj_up_threshold */
|
|
- od_tuners->adj_up_threshold += input;
|
|
- od_tuners->adj_up_threshold -= od_tuners->up_threshold;
|
|
|
|
od_tuners->up_threshold = input;
|
|
return count;
|
|
@@ -525,8 +504,6 @@ static int od_init(struct dbs_data *dbs_data)
|
|
if (idle_time != -1ULL) {
|
|
/* Idle micro accounting is supported. Use finer thresholds */
|
|
tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
|
|
- tuners->adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
|
|
- MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
|
|
/*
|
|
* In nohz/micro accounting case we set the minimum frequency
|
|
* not depending on HZ, but fixed (very low). The deferred
|
|
@@ -535,8 +512,6 @@ static int od_init(struct dbs_data *dbs_data)
|
|
dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
|
|
} else {
|
|
tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
|
|
- tuners->adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
|
|
- DEF_FREQUENCY_DOWN_DIFFERENTIAL;
|
|
|
|
/* For correct statistics, we need 10 ticks for each measure */
|
|
dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
|
|
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
|
|
index bfd6273fd873..7fb600239059 100644
|
|
--- a/drivers/cpufreq/cpufreq_stats.c
|
|
+++ b/drivers/cpufreq/cpufreq_stats.c
|
|
@@ -81,7 +81,7 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
|
|
for (i = 0; i < stat->state_num; i++) {
|
|
len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
|
|
(unsigned long long)
|
|
- cputime64_to_clock_t(stat->time_in_state[i]));
|
|
+ jiffies_64_to_clock_t(stat->time_in_state[i]));
|
|
}
|
|
return len;
|
|
}
|
|
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
|
|
index 774f81423d78..2332b5ced0dd 100644
|
|
--- a/drivers/md/raid5.c
|
|
+++ b/drivers/md/raid5.c
|
|
@@ -60,6 +60,10 @@
|
|
#include "raid0.h"
|
|
#include "bitmap.h"
|
|
|
|
+static bool devices_handle_discard_safely = false;
|
|
+module_param(devices_handle_discard_safely, bool, 0644);
|
|
+MODULE_PARM_DESC(devices_handle_discard_safely,
|
|
+ "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
|
|
/*
|
|
* Stripe cache
|
|
*/
|
|
@@ -5611,7 +5615,7 @@ static int run(struct mddev *mddev)
|
|
mddev->queue->limits.discard_granularity = stripe;
|
|
/*
|
|
* unaligned part of discard request will be ignored, so can't
|
|
- * guarantee discard_zerors_data
|
|
+ * guarantee discard_zeroes_data
|
|
*/
|
|
mddev->queue->limits.discard_zeroes_data = 0;
|
|
|
|
@@ -5636,6 +5640,18 @@ static int run(struct mddev *mddev)
|
|
!bdev_get_queue(rdev->bdev)->
|
|
limits.discard_zeroes_data)
|
|
discard_supported = false;
|
|
+ /* Unfortunately, discard_zeroes_data is not currently
|
|
+ * a guarantee - just a hint. So we only allow DISCARD
|
|
+ * if the sysadmin has confirmed that only safe devices
|
|
+ * are in use by setting a module parameter.
|
|
+ */
|
|
+ if (!devices_handle_discard_safely) {
|
|
+ if (discard_supported) {
|
|
+ pr_info("md/raid456: discard support disabled due to uncertainty.\n");
|
|
+ pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n");
|
|
+ }
|
|
+ discard_supported = false;
|
|
+ }
|
|
}
|
|
|
|
if (discard_supported &&
|
|
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
|
|
index e3bdc3be91e1..5e47ba479e53 100644
|
|
--- a/drivers/media/v4l2-core/videobuf2-core.c
|
|
+++ b/drivers/media/v4l2-core/videobuf2-core.c
|
|
@@ -666,6 +666,7 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
|
|
* to the userspace.
|
|
*/
|
|
req->count = allocated_buffers;
|
|
+ q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
|
|
|
|
return 0;
|
|
}
|
|
@@ -714,6 +715,7 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create
|
|
memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
|
|
memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
|
|
q->memory = create->memory;
|
|
+ q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
|
|
}
|
|
|
|
num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers);
|
|
@@ -1355,6 +1357,7 @@ int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
|
|
* dequeued in dqbuf.
|
|
*/
|
|
list_add_tail(&vb->queued_entry, &q->queued_list);
|
|
+ q->waiting_for_buffers = false;
|
|
vb->state = VB2_BUF_STATE_QUEUED;
|
|
|
|
/*
|
|
@@ -1724,6 +1727,7 @@ int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
|
|
* and videobuf, effectively returning control over them to userspace.
|
|
*/
|
|
__vb2_queue_cancel(q);
|
|
+ q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
|
|
|
|
dprintk(3, "Streamoff successful\n");
|
|
return 0;
|
|
@@ -2009,9 +2013,16 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
|
|
}
|
|
|
|
/*
|
|
- * There is nothing to wait for if no buffers have already been queued.
|
|
+ * There is nothing to wait for if the queue isn't streaming.
|
|
*/
|
|
- if (list_empty(&q->queued_list))
|
|
+ if (!vb2_is_streaming(q))
|
|
+ return res | POLLERR;
|
|
+ /*
|
|
+ * For compatibility with vb1: if QBUF hasn't been called yet, then
|
|
+ * return POLLERR as well. This only affects capture queues, output
|
|
+ * queues will always initialize waiting_for_buffers to false.
|
|
+ */
|
|
+ if (q->waiting_for_buffers)
|
|
return res | POLLERR;
|
|
|
|
if (list_empty(&q->done_list))
|
|
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
|
|
index b6d15d349810..aa023283cc8a 100644
|
|
--- a/fs/udf/inode.c
|
|
+++ b/fs/udf/inode.c
|
|
@@ -1270,13 +1270,22 @@ update_time:
|
|
return 0;
|
|
}
|
|
|
|
+/*
|
|
+ * Maximum length of linked list formed by ICB hierarchy. The chosen number is
|
|
+ * arbitrary - just that we hopefully don't limit any real use of rewritten
|
|
+ * inode on write-once media but avoid looping for too long on corrupted media.
|
|
+ */
|
|
+#define UDF_MAX_ICB_NESTING 1024
|
|
+
|
|
static void __udf_read_inode(struct inode *inode)
|
|
{
|
|
struct buffer_head *bh = NULL;
|
|
struct fileEntry *fe;
|
|
uint16_t ident;
|
|
struct udf_inode_info *iinfo = UDF_I(inode);
|
|
+ unsigned int indirections = 0;
|
|
|
|
+reread:
|
|
/*
|
|
* Set defaults, but the inode is still incomplete!
|
|
* Note: get_new_inode() sets the following on a new inode:
|
|
@@ -1313,28 +1322,26 @@ static void __udf_read_inode(struct inode *inode)
|
|
ibh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 1,
|
|
&ident);
|
|
if (ident == TAG_IDENT_IE && ibh) {
|
|
- struct buffer_head *nbh = NULL;
|
|
struct kernel_lb_addr loc;
|
|
struct indirectEntry *ie;
|
|
|
|
ie = (struct indirectEntry *)ibh->b_data;
|
|
loc = lelb_to_cpu(ie->indirectICB.extLocation);
|
|
|
|
- if (ie->indirectICB.extLength &&
|
|
- (nbh = udf_read_ptagged(inode->i_sb, &loc, 0,
|
|
- &ident))) {
|
|
- if (ident == TAG_IDENT_FE ||
|
|
- ident == TAG_IDENT_EFE) {
|
|
- memcpy(&iinfo->i_location,
|
|
- &loc,
|
|
- sizeof(struct kernel_lb_addr));
|
|
- brelse(bh);
|
|
- brelse(ibh);
|
|
- brelse(nbh);
|
|
- __udf_read_inode(inode);
|
|
+ if (ie->indirectICB.extLength) {
|
|
+ brelse(bh);
|
|
+ brelse(ibh);
|
|
+ memcpy(&iinfo->i_location, &loc,
|
|
+ sizeof(struct kernel_lb_addr));
|
|
+ if (++indirections > UDF_MAX_ICB_NESTING) {
|
|
+ udf_err(inode->i_sb,
|
|
+ "too many ICBs in ICB hierarchy"
|
|
+ " (max %d supported)\n",
|
|
+ UDF_MAX_ICB_NESTING);
|
|
+ make_bad_inode(inode);
|
|
return;
|
|
}
|
|
- brelse(nbh);
|
|
+ goto reread;
|
|
}
|
|
}
|
|
brelse(ibh);
|
|
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
|
|
index 7b5d4a8ab199..c039fe1315eb 100644
|
|
--- a/include/linux/jiffies.h
|
|
+++ b/include/linux/jiffies.h
|
|
@@ -254,23 +254,11 @@ extern unsigned long preset_lpj;
|
|
#define SEC_JIFFIE_SC (32 - SHIFT_HZ)
|
|
#endif
|
|
#define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29)
|
|
-#define USEC_JIFFIE_SC (SEC_JIFFIE_SC + 19)
|
|
#define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\
|
|
TICK_NSEC -1) / (u64)TICK_NSEC))
|
|
|
|
#define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\
|
|
TICK_NSEC -1) / (u64)TICK_NSEC))
|
|
-#define USEC_CONVERSION \
|
|
- ((unsigned long)((((u64)NSEC_PER_USEC << USEC_JIFFIE_SC) +\
|
|
- TICK_NSEC -1) / (u64)TICK_NSEC))
|
|
-/*
|
|
- * USEC_ROUND is used in the timeval to jiffie conversion. See there
|
|
- * for more details. It is the scaled resolution rounding value. Note
|
|
- * that it is a 64-bit value. Since, when it is applied, we are already
|
|
- * in jiffies (albit scaled), it is nothing but the bits we will shift
|
|
- * off.
|
|
- */
|
|
-#define USEC_ROUND (u64)(((u64)1 << USEC_JIFFIE_SC) - 1)
|
|
/*
|
|
* The maximum jiffie value is (MAX_INT >> 1). Here we translate that
|
|
* into seconds. The 64-bit case will overflow if we are not careful,
|
|
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
|
|
index d88a098d1aff..2cc4e0df9c5d 100644
|
|
--- a/include/media/videobuf2-core.h
|
|
+++ b/include/media/videobuf2-core.h
|
|
@@ -318,6 +318,9 @@ struct v4l2_fh;
|
|
* @done_wq: waitqueue for processes waiting for buffers ready to be dequeued
|
|
* @alloc_ctx: memory type/allocator-specific contexts for each plane
|
|
* @streaming: current streaming state
|
|
+ * @waiting_for_buffers: used in poll() to check if vb2 is still waiting for
|
|
+ * buffers. Only set for capture queues if qbuf has not yet been
|
|
+ * called since poll() needs to return POLLERR in that situation.
|
|
* @fileio: file io emulator internal data, used only if emulator is active
|
|
*/
|
|
struct vb2_queue {
|
|
@@ -350,6 +353,7 @@ struct vb2_queue {
|
|
unsigned int plane_sizes[VIDEO_MAX_PLANES];
|
|
|
|
unsigned int streaming:1;
|
|
+ unsigned int waiting_for_buffers:1;
|
|
|
|
struct vb2_fileio_data *fileio;
|
|
};
|
|
diff --git a/init/Kconfig b/init/Kconfig
|
|
index 5d6febaea56d..8fa4f758821a 100644
|
|
--- a/init/Kconfig
|
|
+++ b/init/Kconfig
|
|
@@ -1367,6 +1367,7 @@ config FUTEX
|
|
|
|
config HAVE_FUTEX_CMPXCHG
|
|
bool
|
|
+ depends on FUTEX
|
|
help
|
|
Architectures should select this if futex_atomic_cmpxchg_inatomic()
|
|
is implemented and always working. This removes a couple of runtime
|
|
diff --git a/kernel/events/core.c b/kernel/events/core.c
|
|
index 6bf387a60399..0b4733447151 100644
|
|
--- a/kernel/events/core.c
|
|
+++ b/kernel/events/core.c
|
|
@@ -7482,8 +7482,10 @@ int perf_event_init_task(struct task_struct *child)
|
|
|
|
for_each_task_context_nr(ctxn) {
|
|
ret = perf_event_init_context(child, ctxn);
|
|
- if (ret)
|
|
+ if (ret) {
|
|
+ perf_event_free_task(child);
|
|
return ret;
|
|
+ }
|
|
}
|
|
|
|
return 0;
|
|
diff --git a/kernel/fork.c b/kernel/fork.c
|
|
index 2c76e11ba939..514dbc40f98f 100644
|
|
--- a/kernel/fork.c
|
|
+++ b/kernel/fork.c
|
|
@@ -1324,7 +1324,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|
goto bad_fork_cleanup_policy;
|
|
retval = audit_alloc(p);
|
|
if (retval)
|
|
- goto bad_fork_cleanup_policy;
|
|
+ goto bad_fork_cleanup_perf;
|
|
/* copy all the process information */
|
|
retval = copy_semundo(clone_flags, p);
|
|
if (retval)
|
|
@@ -1522,8 +1522,9 @@ bad_fork_cleanup_semundo:
|
|
exit_sem(p);
|
|
bad_fork_cleanup_audit:
|
|
audit_free(p);
|
|
-bad_fork_cleanup_policy:
|
|
+bad_fork_cleanup_perf:
|
|
perf_event_free_task(p);
|
|
+bad_fork_cleanup_policy:
|
|
#ifdef CONFIG_NUMA
|
|
mpol_put(p->mempolicy);
|
|
bad_fork_cleanup_cgroup:
|
|
diff --git a/kernel/time.c b/kernel/time.c
|
|
index d3617dbd3dca..d21398e6da87 100644
|
|
--- a/kernel/time.c
|
|
+++ b/kernel/time.c
|
|
@@ -496,17 +496,20 @@ EXPORT_SYMBOL(usecs_to_jiffies);
|
|
* that a remainder subtract here would not do the right thing as the
|
|
* resolution values don't fall on second boundries. I.e. the line:
|
|
* nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
|
|
+ * Note that due to the small error in the multiplier here, this
|
|
+ * rounding is incorrect for sufficiently large values of tv_nsec, but
|
|
+ * well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're
|
|
+ * OK.
|
|
*
|
|
* Rather, we just shift the bits off the right.
|
|
*
|
|
* The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
|
|
* value to a scaled second value.
|
|
*/
|
|
-unsigned long
|
|
-timespec_to_jiffies(const struct timespec *value)
|
|
+static unsigned long
|
|
+__timespec_to_jiffies(unsigned long sec, long nsec)
|
|
{
|
|
- unsigned long sec = value->tv_sec;
|
|
- long nsec = value->tv_nsec + TICK_NSEC - 1;
|
|
+ nsec = nsec + TICK_NSEC - 1;
|
|
|
|
if (sec >= MAX_SEC_IN_JIFFIES){
|
|
sec = MAX_SEC_IN_JIFFIES;
|
|
@@ -517,6 +520,13 @@ timespec_to_jiffies(const struct timespec *value)
|
|
(NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
|
|
|
|
}
|
|
+
|
|
+unsigned long
|
|
+timespec_to_jiffies(const struct timespec *value)
|
|
+{
|
|
+ return __timespec_to_jiffies(value->tv_sec, value->tv_nsec);
|
|
+}
|
|
+
|
|
EXPORT_SYMBOL(timespec_to_jiffies);
|
|
|
|
void
|
|
@@ -533,31 +543,27 @@ jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
|
|
}
|
|
EXPORT_SYMBOL(jiffies_to_timespec);
|
|
|
|
-/* Same for "timeval"
|
|
+/*
|
|
+ * We could use a similar algorithm to timespec_to_jiffies (with a
|
|
+ * different multiplier for usec instead of nsec). But this has a
|
|
+ * problem with rounding: we can't exactly add TICK_NSEC - 1 to the
|
|
+ * usec value, since it's not necessarily integral.
|
|
*
|
|
- * Well, almost. The problem here is that the real system resolution is
|
|
- * in nanoseconds and the value being converted is in micro seconds.
|
|
- * Also for some machines (those that use HZ = 1024, in-particular),
|
|
- * there is a LARGE error in the tick size in microseconds.
|
|
-
|
|
- * The solution we use is to do the rounding AFTER we convert the
|
|
- * microsecond part. Thus the USEC_ROUND, the bits to be shifted off.
|
|
- * Instruction wise, this should cost only an additional add with carry
|
|
- * instruction above the way it was done above.
|
|
+ * We could instead round in the intermediate scaled representation
|
|
+ * (i.e. in units of 1/2^(large scale) jiffies) but that's also
|
|
+ * perilous: the scaling introduces a small positive error, which
|
|
+ * combined with a division-rounding-upward (i.e. adding 2^(scale) - 1
|
|
+ * units to the intermediate before shifting) leads to accidental
|
|
+ * overflow and overestimates.
|
|
+ *
|
|
+ * At the cost of one additional multiplication by a constant, just
|
|
+ * use the timespec implementation.
|
|
*/
|
|
unsigned long
|
|
timeval_to_jiffies(const struct timeval *value)
|
|
{
|
|
- unsigned long sec = value->tv_sec;
|
|
- long usec = value->tv_usec;
|
|
-
|
|
- if (sec >= MAX_SEC_IN_JIFFIES){
|
|
- sec = MAX_SEC_IN_JIFFIES;
|
|
- usec = 0;
|
|
- }
|
|
- return (((u64)sec * SEC_CONVERSION) +
|
|
- (((u64)usec * USEC_CONVERSION + USEC_ROUND) >>
|
|
- (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
|
|
+ return __timespec_to_jiffies(value->tv_sec,
|
|
+ value->tv_usec * NSEC_PER_USEC);
|
|
}
|
|
EXPORT_SYMBOL(timeval_to_jiffies);
|
|
|
|
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
|
|
index 0bc181b0524c..3d9fee3a80b3 100644
|
|
--- a/kernel/trace/ring_buffer.c
|
|
+++ b/kernel/trace/ring_buffer.c
|
|
@@ -3371,7 +3371,7 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
|
|
iter->head = cpu_buffer->reader_page->read;
|
|
|
|
iter->cache_reader_page = iter->head_page;
|
|
- iter->cache_read = iter->head;
|
|
+ iter->cache_read = cpu_buffer->read;
|
|
|
|
if (iter->head)
|
|
iter->read_stamp = cpu_buffer->read_stamp;
|
|
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
|
|
index eb00e81601a5..d21c9ef0943c 100644
|
|
--- a/mm/huge_memory.c
|
|
+++ b/mm/huge_memory.c
|
|
@@ -1733,21 +1733,24 @@ static int __split_huge_page_map(struct page *page,
|
|
if (pmd) {
|
|
pgtable = pgtable_trans_huge_withdraw(mm);
|
|
pmd_populate(mm, &_pmd, pgtable);
|
|
+ if (pmd_write(*pmd))
|
|
+ BUG_ON(page_mapcount(page) != 1);
|
|
|
|
haddr = address;
|
|
for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
|
|
pte_t *pte, entry;
|
|
BUG_ON(PageCompound(page+i));
|
|
+ /*
|
|
+ * Note that pmd_numa is not transferred deliberately
|
|
+ * to avoid any possibility that pte_numa leaks to
|
|
+ * a PROT_NONE VMA by accident.
|
|
+ */
|
|
entry = mk_pte(page + i, vma->vm_page_prot);
|
|
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
|
|
if (!pmd_write(*pmd))
|
|
entry = pte_wrprotect(entry);
|
|
- else
|
|
- BUG_ON(page_mapcount(page) != 1);
|
|
if (!pmd_young(*pmd))
|
|
entry = pte_mkold(entry);
|
|
- if (pmd_numa(*pmd))
|
|
- entry = pte_mknuma(entry);
|
|
pte = pte_offset_map(&_pmd, haddr);
|
|
BUG_ON(!pte_none(*pte));
|
|
set_pte_at(mm, haddr, pte, entry);
|
|
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
|
|
index 448c034184e2..62aebed7c6e2 100644
|
|
--- a/net/wireless/nl80211.c
|
|
+++ b/net/wireless/nl80211.c
|
|
@@ -6568,6 +6568,9 @@ int cfg80211_testmode_reply(struct sk_buff *skb)
|
|
void *hdr = ((void **)skb->cb)[1];
|
|
struct nlattr *data = ((void **)skb->cb)[2];
|
|
|
|
+ /* clear CB data for netlink core to own from now on */
|
|
+ memset(skb->cb, 0, sizeof(skb->cb));
|
|
+
|
|
if (WARN_ON(!rdev->testmode_info)) {
|
|
kfree_skb(skb);
|
|
return -EINVAL;
|
|
@@ -6594,6 +6597,9 @@ void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
|
|
void *hdr = ((void **)skb->cb)[1];
|
|
struct nlattr *data = ((void **)skb->cb)[2];
|
|
|
|
+ /* clear CB data for netlink core to own from now on */
|
|
+ memset(skb->cb, 0, sizeof(skb->cb));
|
|
+
|
|
nla_nest_end(skb, data);
|
|
genlmsg_end(skb, hdr);
|
|
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), skb, 0,
|