Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

This commit is contained in:
David S. Miller 2017-05-18 16:11:32 -04:00
commit c6cd850d65
85 changed files with 664 additions and 368 deletions

View file

@ -1172,7 +1172,7 @@ headers_check_all: headers_install_all
PHONY += headers_check PHONY += headers_check
headers_check: headers_install headers_check: headers_install
$(Q)$(MAKE) $(hdr-inst)=include/uapi HDRCHECK=1 $(Q)$(MAKE) $(hdr-inst)=include/uapi HDRCHECK=1
$(Q)$(MAKE) $(hdr-inst)=arch/$(hdr-arch)/include/uapi/ $(hdr-dst) HDRCHECK=1 $(Q)$(MAKE) $(hdr-inst)=arch/$(hdr-arch)/include/uapi $(hdr-dst) HDRCHECK=1
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
# Kernel selftest # Kernel selftest

View file

@ -10,6 +10,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/time.h> #include <linux/time.h>
#include <linux/refcount.h>
#include <uapi/asm/debug.h> #include <uapi/asm/debug.h>
#define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */ #define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */
@ -31,7 +32,7 @@ struct debug_view;
typedef struct debug_info { typedef struct debug_info {
struct debug_info* next; struct debug_info* next;
struct debug_info* prev; struct debug_info* prev;
atomic_t ref_count; refcount_t ref_count;
spinlock_t lock; spinlock_t lock;
int level; int level;
int nr_areas; int nr_areas;

View file

@ -40,6 +40,8 @@ static inline int insn_length(unsigned char code)
return ((((int) code + 64) >> 7) + 1) << 1; return ((((int) code + 64) >> 7) + 1) << 1;
} }
struct pt_regs;
void show_code(struct pt_regs *regs); void show_code(struct pt_regs *regs);
void print_fn_code(unsigned char *code, unsigned long len); void print_fn_code(unsigned char *code, unsigned long len);
int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len); int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len);

View file

@ -27,12 +27,21 @@
* 2005-Dec Used as a template for s390 by Mike Grundy * 2005-Dec Used as a template for s390 by Mike Grundy
* <grundym@us.ibm.com> * <grundym@us.ibm.com>
*/ */
#include <linux/types.h>
#include <asm-generic/kprobes.h> #include <asm-generic/kprobes.h>
#define BREAKPOINT_INSTRUCTION 0x0002 #define BREAKPOINT_INSTRUCTION 0x0002
#define FIXUP_PSW_NORMAL 0x08
#define FIXUP_BRANCH_NOT_TAKEN 0x04
#define FIXUP_RETURN_REGISTER 0x02
#define FIXUP_NOT_REQUIRED 0x01
int probe_is_prohibited_opcode(u16 *insn);
int probe_get_fixup_type(u16 *insn);
int probe_is_insn_relative_long(u16 *insn);
#ifdef CONFIG_KPROBES #ifdef CONFIG_KPROBES
#include <linux/types.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/sched/task_stack.h> #include <linux/sched/task_stack.h>
@ -56,11 +65,6 @@ typedef u16 kprobe_opcode_t;
#define KPROBE_SWAP_INST 0x10 #define KPROBE_SWAP_INST 0x10
#define FIXUP_PSW_NORMAL 0x08
#define FIXUP_BRANCH_NOT_TAKEN 0x04
#define FIXUP_RETURN_REGISTER 0x02
#define FIXUP_NOT_REQUIRED 0x01
/* Architecture specific copy of original instruction */ /* Architecture specific copy of original instruction */
struct arch_specific_insn { struct arch_specific_insn {
/* copy of original instruction */ /* copy of original instruction */
@ -90,10 +94,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
int kprobe_exceptions_notify(struct notifier_block *self, int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data); unsigned long val, void *data);
int probe_is_prohibited_opcode(u16 *insn);
int probe_get_fixup_type(u16 *insn);
int probe_is_insn_relative_long(u16 *insn);
#define flush_insn_slot(p) do { } while (0) #define flush_insn_slot(p) do { } while (0)
#endif /* CONFIG_KPROBES */ #endif /* CONFIG_KPROBES */

View file

@ -146,7 +146,7 @@ extern int topology_max_mnest;
* Returns the maximum nesting level supported by the cpu topology code. * Returns the maximum nesting level supported by the cpu topology code.
* The current maximum level is 4 which is the drawer level. * The current maximum level is 4 which is the drawer level.
*/ */
static inline int topology_mnest_limit(void) static inline unsigned char topology_mnest_limit(void)
{ {
return min(topology_max_mnest, 4); return min(topology_max_mnest, 4);
} }

View file

@ -277,7 +277,7 @@ debug_info_alloc(const char *name, int pages_per_area, int nr_areas,
memset(rc->views, 0, DEBUG_MAX_VIEWS * sizeof(struct debug_view *)); memset(rc->views, 0, DEBUG_MAX_VIEWS * sizeof(struct debug_view *));
memset(rc->debugfs_entries, 0 ,DEBUG_MAX_VIEWS * memset(rc->debugfs_entries, 0 ,DEBUG_MAX_VIEWS *
sizeof(struct dentry*)); sizeof(struct dentry*));
atomic_set(&(rc->ref_count), 0); refcount_set(&(rc->ref_count), 0);
return rc; return rc;
@ -361,7 +361,7 @@ debug_info_create(const char *name, int pages_per_area, int nr_areas,
debug_area_last = rc; debug_area_last = rc;
rc->next = NULL; rc->next = NULL;
debug_info_get(rc); refcount_set(&rc->ref_count, 1);
out: out:
return rc; return rc;
} }
@ -416,7 +416,7 @@ static void
debug_info_get(debug_info_t * db_info) debug_info_get(debug_info_t * db_info)
{ {
if (db_info) if (db_info)
atomic_inc(&db_info->ref_count); refcount_inc(&db_info->ref_count);
} }
/* /*
@ -431,7 +431,7 @@ debug_info_put(debug_info_t *db_info)
if (!db_info) if (!db_info)
return; return;
if (atomic_dec_and_test(&db_info->ref_count)) { if (refcount_dec_and_test(&db_info->ref_count)) {
for (i = 0; i < DEBUG_MAX_VIEWS; i++) { for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
if (!db_info->views[i]) if (!db_info->views[i])
continue; continue;

View file

@ -312,6 +312,7 @@ ENTRY(system_call)
lg %r14,__LC_VDSO_PER_CPU lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11) lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
.Lsysc_exit_timer:
stpt __LC_EXIT_TIMER stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
lmg %r11,%r15,__PT_R11(%r11) lmg %r11,%r15,__PT_R11(%r11)
@ -623,6 +624,7 @@ ENTRY(io_int_handler)
lg %r14,__LC_VDSO_PER_CPU lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11) lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
.Lio_exit_timer:
stpt __LC_EXIT_TIMER stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
lmg %r11,%r15,__PT_R11(%r11) lmg %r11,%r15,__PT_R11(%r11)
@ -1174,15 +1176,23 @@ cleanup_critical:
br %r14 br %r14
.Lcleanup_sysc_restore: .Lcleanup_sysc_restore:
# check if stpt has been executed
clg %r9,BASED(.Lcleanup_sysc_restore_insn) clg %r9,BASED(.Lcleanup_sysc_restore_insn)
jh 0f
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
cghi %r11,__LC_SAVE_AREA_ASYNC
je 0f je 0f
mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
0: clg %r9,BASED(.Lcleanup_sysc_restore_insn+8)
je 1f
lg %r9,24(%r11) # get saved pointer to pt_regs lg %r9,24(%r11) # get saved pointer to pt_regs
mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
mvc 0(64,%r11),__PT_R8(%r9) mvc 0(64,%r11),__PT_R8(%r9)
lmg %r0,%r7,__PT_R0(%r9) lmg %r0,%r7,__PT_R0(%r9)
0: lmg %r8,%r9,__LC_RETURN_PSW 1: lmg %r8,%r9,__LC_RETURN_PSW
br %r14 br %r14
.Lcleanup_sysc_restore_insn: .Lcleanup_sysc_restore_insn:
.quad .Lsysc_exit_timer
.quad .Lsysc_done - 4 .quad .Lsysc_done - 4
.Lcleanup_io_tif: .Lcleanup_io_tif:
@ -1190,15 +1200,20 @@ cleanup_critical:
br %r14 br %r14
.Lcleanup_io_restore: .Lcleanup_io_restore:
# check if stpt has been executed
clg %r9,BASED(.Lcleanup_io_restore_insn) clg %r9,BASED(.Lcleanup_io_restore_insn)
je 0f jh 0f
mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
0: clg %r9,BASED(.Lcleanup_io_restore_insn+8)
je 1f
lg %r9,24(%r11) # get saved r11 pointer to pt_regs lg %r9,24(%r11) # get saved r11 pointer to pt_regs
mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
mvc 0(64,%r11),__PT_R8(%r9) mvc 0(64,%r11),__PT_R8(%r9)
lmg %r0,%r7,__PT_R0(%r9) lmg %r0,%r7,__PT_R0(%r9)
0: lmg %r8,%r9,__LC_RETURN_PSW 1: lmg %r8,%r9,__LC_RETURN_PSW
br %r14 br %r14
.Lcleanup_io_restore_insn: .Lcleanup_io_restore_insn:
.quad .Lio_exit_timer
.quad .Lio_done - 4 .quad .Lio_done - 4
.Lcleanup_idle: .Lcleanup_idle:

View file

@ -173,6 +173,8 @@ int __init ftrace_dyn_arch_init(void)
return 0; return 0;
} }
#ifdef CONFIG_MODULES
static int __init ftrace_plt_init(void) static int __init ftrace_plt_init(void)
{ {
unsigned int *ip; unsigned int *ip;
@ -191,6 +193,8 @@ static int __init ftrace_plt_init(void)
} }
device_initcall(ftrace_plt_init); device_initcall(ftrace_plt_init);
#endif /* CONFIG_MODULES */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* /*
* Hook the return address and push it in the stack of return addresses * Hook the return address and push it in the stack of return addresses

View file

@ -31,8 +31,14 @@ SECTIONS
{ {
. = 0x00000000; . = 0x00000000;
.text : { .text : {
_text = .; /* Text and read-only data */ /* Text and read-only data */
HEAD_TEXT HEAD_TEXT
/*
* E.g. perf doesn't like symbols starting at address zero,
* therefore skip the initial PSW and channel program located
* at address zero and let _text start at 0x200.
*/
_text = 0x200;
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT CPUIDLE_TEXT

View file

@ -4,6 +4,7 @@
* Copyright IBM Corp. 2014 * Copyright IBM Corp. 2014
*/ */
#include <linux/errno.h>
#include <asm/kprobes.h> #include <asm/kprobes.h>
#include <asm/dis.h> #include <asm/dis.h>

View file

@ -337,8 +337,8 @@ long __strncpy_from_user(char *dst, const char __user *src, long size)
return 0; return 0;
done = 0; done = 0;
do { do {
offset = (size_t)src & ~PAGE_MASK; offset = (size_t)src & (L1_CACHE_BYTES - 1);
len = min(size - done, PAGE_SIZE - offset); len = min(size - done, L1_CACHE_BYTES - offset);
if (copy_from_user(dst, src, len)) if (copy_from_user(dst, src, len))
return -EFAULT; return -EFAULT;
len_str = strnlen(dst, len); len_str = strnlen(dst, len);

View file

@ -24,9 +24,11 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
static inline int prepare_hugepage_range(struct file *file, static inline int prepare_hugepage_range(struct file *file,
unsigned long addr, unsigned long len) unsigned long addr, unsigned long len)
{ {
if (len & ~HPAGE_MASK) struct hstate *h = hstate_file(file);
if (len & ~huge_page_mask(h))
return -EINVAL; return -EINVAL;
if (addr & ~HPAGE_MASK) if (addr & ~huge_page_mask(h))
return -EINVAL; return -EINVAL;
return 0; return 0;
} }

View file

@ -91,9 +91,9 @@ extern unsigned long pfn_base;
* ZERO_PAGE is a global shared page that is always zero: used * ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc.. * for zero-mapped memory areas etc..
*/ */
extern unsigned long empty_zero_page; extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page)) #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
/* /*
* In general all page table modifications should use the V8 atomic * In general all page table modifications should use the V8 atomic

View file

@ -16,7 +16,7 @@ extern char reboot_command[];
*/ */
extern unsigned char boot_cpu_id; extern unsigned char boot_cpu_id;
extern unsigned long empty_zero_page; extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
extern int serial_console; extern int serial_console;
static inline int con_is_present(void) static inline int con_is_present(void)

View file

@ -130,18 +130,17 @@ unsigned long prepare_ftrace_return(unsigned long parent,
if (unlikely(atomic_read(&current->tracing_graph_pause))) if (unlikely(atomic_read(&current->tracing_graph_pause)))
return parent + 8UL; return parent + 8UL;
trace.func = self_addr;
trace.depth = current->curr_ret_stack + 1;
/* Only trace if the calling function expects to */
if (!ftrace_graph_entry(&trace))
return parent + 8UL;
if (ftrace_push_return_trace(parent, self_addr, &trace.depth, if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
frame_pointer, NULL) == -EBUSY) frame_pointer, NULL) == -EBUSY)
return parent + 8UL; return parent + 8UL;
trace.func = self_addr;
/* Only trace if the calling function expects to */
if (!ftrace_graph_entry(&trace)) {
current->curr_ret_stack--;
return parent + 8UL;
}
return return_hooker; return return_hooker;
} }
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */

View file

@ -290,7 +290,7 @@ void __init mem_init(void)
/* Saves us work later. */ /* Saves us work later. */
memset((void *)&empty_zero_page, 0, PAGE_SIZE); memset((void *)empty_zero_page, 0, PAGE_SIZE);
i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5); i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5);
i += 1; i += 1;

View file

@ -782,24 +782,26 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl) static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
{ {
u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases; int dimm, size0, size1, cs0, cs1;
int dimm, size0, size1;
edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl); edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
for (dimm = 0; dimm < 4; dimm++) { for (dimm = 0; dimm < 4; dimm++) {
size0 = 0; size0 = 0;
cs0 = dimm * 2;
if (dcsb[dimm*2] & DCSB_CS_ENABLE) if (csrow_enabled(cs0, ctrl, pvt))
size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, dimm); size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs0);
size1 = 0; size1 = 0;
if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE) cs1 = dimm * 2 + 1;
size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, dimm);
if (csrow_enabled(cs1, ctrl, pvt))
size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs1);
amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
dimm * 2, size0, cs0, size0,
dimm * 2 + 1, size1); cs1, size1);
} }
} }
@ -2756,26 +2758,22 @@ skip:
* encompasses * encompasses
* *
*/ */
static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr) static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
{ {
u32 cs_mode, nr_pages;
u32 dbam = dct ? pvt->dbam1 : pvt->dbam0; u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
int csrow_nr = csrow_nr_orig;
u32 cs_mode, nr_pages;
if (!pvt->umc)
csrow_nr >>= 1;
/* cs_mode = DBAM_DIMM(csrow_nr, dbam);
* The math on this doesn't look right on the surface because x/2*4 can
* be simplified to x*2 but this expression makes use of the fact that
* it is integral math where 1/2=0. This intermediate value becomes the
* number of bits to shift the DBAM register to extract the proper CSROW
* field.
*/
cs_mode = DBAM_DIMM(csrow_nr / 2, dbam);
nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, (csrow_nr / 2)) nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
<< (20 - PAGE_SHIFT); nr_pages <<= 20 - PAGE_SHIFT;
edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n", edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
csrow_nr, dct, cs_mode); csrow_nr_orig, dct, cs_mode);
edac_dbg(0, "nr_pages/channel: %u\n", nr_pages); edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
return nr_pages; return nr_pages;

View file

@ -155,19 +155,14 @@ static int efi_pstore_scan_sysfs_exit(struct efivar_entry *pos,
* efi_pstore_sysfs_entry_iter * efi_pstore_sysfs_entry_iter
* *
* @record: pstore record to pass to callback * @record: pstore record to pass to callback
* @pos: entry to begin iterating from
* *
* You MUST call efivar_enter_iter_begin() before this function, and * You MUST call efivar_enter_iter_begin() before this function, and
* efivar_entry_iter_end() afterwards. * efivar_entry_iter_end() afterwards.
* *
* It is possible to begin iteration from an arbitrary entry within
* the list by passing @pos. @pos is updated on return to point to
* the next entry of the last one passed to efi_pstore_read_func().
* To begin iterating from the beginning of the list @pos must be %NULL.
*/ */
static int efi_pstore_sysfs_entry_iter(struct pstore_record *record, static int efi_pstore_sysfs_entry_iter(struct pstore_record *record)
struct efivar_entry **pos)
{ {
struct efivar_entry **pos = (struct efivar_entry **)&record->psi->data;
struct efivar_entry *entry, *n; struct efivar_entry *entry, *n;
struct list_head *head = &efivar_sysfs_list; struct list_head *head = &efivar_sysfs_list;
int size = 0; int size = 0;
@ -218,7 +213,6 @@ static int efi_pstore_sysfs_entry_iter(struct pstore_record *record,
*/ */
static ssize_t efi_pstore_read(struct pstore_record *record) static ssize_t efi_pstore_read(struct pstore_record *record)
{ {
struct efivar_entry *entry = (struct efivar_entry *)record->psi->data;
ssize_t size; ssize_t size;
record->buf = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL); record->buf = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL);
@ -229,7 +223,7 @@ static ssize_t efi_pstore_read(struct pstore_record *record)
size = -EINTR; size = -EINTR;
goto out; goto out;
} }
size = efi_pstore_sysfs_entry_iter(record, &entry); size = efi_pstore_sysfs_entry_iter(record);
efivar_entry_iter_end(); efivar_entry_iter_end();
out: out:

View file

@ -604,6 +604,13 @@ static int coretemp_cpu_online(unsigned int cpu)
struct cpuinfo_x86 *c = &cpu_data(cpu); struct cpuinfo_x86 *c = &cpu_data(cpu);
struct platform_data *pdata; struct platform_data *pdata;
/*
* Don't execute this on resume as the offline callback did
* not get executed on suspend.
*/
if (cpuhp_tasks_frozen)
return 0;
/* /*
* CPUID.06H.EAX[0] indicates whether the CPU has thermal * CPUID.06H.EAX[0] indicates whether the CPU has thermal
* sensors. We check this bit only, all the early CPUs * sensors. We check this bit only, all the early CPUs
@ -654,6 +661,13 @@ static int coretemp_cpu_offline(unsigned int cpu)
struct temp_data *tdata; struct temp_data *tdata;
int indx, target; int indx, target;
/*
* Don't execute this on suspend as the device remove locks
* up the machine.
*/
if (cpuhp_tasks_frozen)
return 0;
/* If the physical CPU device does not exist, just return */ /* If the physical CPU device does not exist, just return */
if (!pdev) if (!pdev)
return 0; return 0;

View file

@ -819,7 +819,6 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
rc = -EINVAL; rc = -EINVAL;
goto out; goto out;
} }
drv_data->irq = irq_of_parse_and_map(np, 0);
drv_data->rstc = devm_reset_control_get_optional(dev, NULL); drv_data->rstc = devm_reset_control_get_optional(dev, NULL);
if (IS_ERR(drv_data->rstc)) { if (IS_ERR(drv_data->rstc)) {
@ -902,10 +901,11 @@ mv64xxx_i2c_probe(struct platform_device *pd)
if (!IS_ERR(drv_data->clk)) if (!IS_ERR(drv_data->clk))
clk_prepare_enable(drv_data->clk); clk_prepare_enable(drv_data->clk);
drv_data->irq = platform_get_irq(pd, 0);
if (pdata) { if (pdata) {
drv_data->freq_m = pdata->freq_m; drv_data->freq_m = pdata->freq_m;
drv_data->freq_n = pdata->freq_n; drv_data->freq_n = pdata->freq_n;
drv_data->irq = platform_get_irq(pd, 0);
drv_data->adapter.timeout = msecs_to_jiffies(pdata->timeout); drv_data->adapter.timeout = msecs_to_jiffies(pdata->timeout);
drv_data->offload_enabled = false; drv_data->offload_enabled = false;
memcpy(&drv_data->reg_offsets, &mv64xxx_i2c_regs_mv64xxx, sizeof(drv_data->reg_offsets)); memcpy(&drv_data->reg_offsets, &mv64xxx_i2c_regs_mv64xxx, sizeof(drv_data->reg_offsets));
@ -915,7 +915,7 @@ mv64xxx_i2c_probe(struct platform_device *pd)
goto exit_clk; goto exit_clk;
} }
if (drv_data->irq < 0) { if (drv_data->irq < 0) {
rc = -ENXIO; rc = drv_data->irq;
goto exit_reset; goto exit_reset;
} }

View file

@ -416,6 +416,7 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
adapter->class = I2C_CLASS_HWMON; adapter->class = I2C_CLASS_HWMON;
adapter->dev.parent = &pdev->dev; adapter->dev.parent = &pdev->dev;
adapter->dev.of_node = pdev->dev.of_node; adapter->dev.of_node = pdev->dev.of_node;
ACPI_COMPANION_SET(&adapter->dev, ACPI_COMPANION(&pdev->dev));
i2c_set_adapdata(adapter, ctx); i2c_set_adapdata(adapter, ctx);
rc = i2c_add_adapter(adapter); rc = i2c_add_adapter(adapter);
if (rc) { if (rc) {

View file

@ -395,18 +395,20 @@ int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
if (force_nr) { if (force_nr) {
priv->adap.nr = force_nr; priv->adap.nr = force_nr;
ret = i2c_add_numbered_adapter(&priv->adap); ret = i2c_add_numbered_adapter(&priv->adap);
dev_err(&parent->dev, if (ret < 0) {
"failed to add mux-adapter %u as bus %u (error=%d)\n", dev_err(&parent->dev,
chan_id, force_nr, ret); "failed to add mux-adapter %u as bus %u (error=%d)\n",
chan_id, force_nr, ret);
goto err_free_priv;
}
} else { } else {
ret = i2c_add_adapter(&priv->adap); ret = i2c_add_adapter(&priv->adap);
dev_err(&parent->dev, if (ret < 0) {
"failed to add mux-adapter %u (error=%d)\n", dev_err(&parent->dev,
chan_id, ret); "failed to add mux-adapter %u (error=%d)\n",
} chan_id, ret);
if (ret < 0) { goto err_free_priv;
kfree(priv); }
return ret;
} }
WARN(sysfs_create_link(&priv->adap.dev.kobj, &muxc->dev->kobj, WARN(sysfs_create_link(&priv->adap.dev.kobj, &muxc->dev->kobj,
@ -422,6 +424,10 @@ int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
muxc->adapter[muxc->num_adapters++] = &priv->adap; muxc->adapter[muxc->num_adapters++] = &priv->adap;
return 0; return 0;
err_free_priv:
kfree(priv);
return ret;
} }
EXPORT_SYMBOL_GPL(i2c_mux_add_adapter); EXPORT_SYMBOL_GPL(i2c_mux_add_adapter);

View file

@ -196,20 +196,25 @@ static int i2c_mux_reg_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mux->data.reg_size = resource_size(res); mux->data.reg_size = resource_size(res);
mux->data.reg = devm_ioremap_resource(&pdev->dev, res); mux->data.reg = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(mux->data.reg)) if (IS_ERR(mux->data.reg)) {
return PTR_ERR(mux->data.reg); ret = PTR_ERR(mux->data.reg);
goto err_put_parent;
}
} }
if (mux->data.reg_size != 4 && mux->data.reg_size != 2 && if (mux->data.reg_size != 4 && mux->data.reg_size != 2 &&
mux->data.reg_size != 1) { mux->data.reg_size != 1) {
dev_err(&pdev->dev, "Invalid register size\n"); dev_err(&pdev->dev, "Invalid register size\n");
return -EINVAL; ret = -EINVAL;
goto err_put_parent;
} }
muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values, 0, 0, muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values, 0, 0,
i2c_mux_reg_select, NULL); i2c_mux_reg_select, NULL);
if (!muxc) if (!muxc) {
return -ENOMEM; ret = -ENOMEM;
goto err_put_parent;
}
muxc->priv = mux; muxc->priv = mux;
platform_set_drvdata(pdev, muxc); platform_set_drvdata(pdev, muxc);
@ -223,7 +228,7 @@ static int i2c_mux_reg_probe(struct platform_device *pdev)
ret = i2c_mux_add_adapter(muxc, nr, mux->data.values[i], class); ret = i2c_mux_add_adapter(muxc, nr, mux->data.values[i], class);
if (ret) if (ret)
goto add_adapter_failed; goto err_del_mux_adapters;
} }
dev_dbg(&pdev->dev, "%d port mux on %s adapter\n", dev_dbg(&pdev->dev, "%d port mux on %s adapter\n",
@ -231,8 +236,10 @@ static int i2c_mux_reg_probe(struct platform_device *pdev)
return 0; return 0;
add_adapter_failed: err_del_mux_adapters:
i2c_mux_del_adapters(muxc); i2c_mux_del_adapters(muxc);
err_put_parent:
i2c_put_adapter(parent);
return ret; return ret;
} }

View file

@ -218,7 +218,7 @@ static DEFINE_SPINLOCK(param_spinlock);
* Buffers are freed after this timeout * Buffers are freed after this timeout
*/ */
static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS; static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
static unsigned dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES; static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
static unsigned long dm_bufio_peak_allocated; static unsigned long dm_bufio_peak_allocated;
static unsigned long dm_bufio_allocated_kmem_cache; static unsigned long dm_bufio_allocated_kmem_cache;
@ -1558,10 +1558,10 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
return true; return true;
} }
static unsigned get_retain_buffers(struct dm_bufio_client *c) static unsigned long get_retain_buffers(struct dm_bufio_client *c)
{ {
unsigned retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes); unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
return retain_bytes / c->block_size; return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT);
} }
static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
@ -1571,7 +1571,7 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
struct dm_buffer *b, *tmp; struct dm_buffer *b, *tmp;
unsigned long freed = 0; unsigned long freed = 0;
unsigned long count = nr_to_scan; unsigned long count = nr_to_scan;
unsigned retain_target = get_retain_buffers(c); unsigned long retain_target = get_retain_buffers(c);
for (l = 0; l < LIST_SIZE; l++) { for (l = 0; l < LIST_SIZE; l++) {
list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
@ -1794,8 +1794,8 @@ static bool older_than(struct dm_buffer *b, unsigned long age_hz)
static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
{ {
struct dm_buffer *b, *tmp; struct dm_buffer *b, *tmp;
unsigned retain_target = get_retain_buffers(c); unsigned long retain_target = get_retain_buffers(c);
unsigned count; unsigned long count;
LIST_HEAD(write_list); LIST_HEAD(write_list);
dm_bufio_lock(c); dm_bufio_lock(c);
@ -1955,7 +1955,7 @@ MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR); module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds"); MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
module_param_named(retain_bytes, dm_bufio_retain_bytes, uint, S_IRUGO | S_IWUSR); module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory"); MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR); module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);

View file

@ -33,6 +33,11 @@ struct background_tracker *btracker_create(unsigned max_work)
{ {
struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL); struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL);
if (!b) {
DMERR("couldn't create background_tracker");
return NULL;
}
b->max_work = max_work; b->max_work = max_work;
atomic_set(&b->pending_promotes, 0); atomic_set(&b->pending_promotes, 0);
atomic_set(&b->pending_writebacks, 0); atomic_set(&b->pending_writebacks, 0);

View file

@ -1120,8 +1120,6 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
* Cache entries may not be populated. So we cannot rely on the * Cache entries may not be populated. So we cannot rely on the
* size of the clean queue. * size of the clean queue.
*/ */
unsigned nr_clean;
if (idle) { if (idle) {
/* /*
* We'd like to clean everything. * We'd like to clean everything.
@ -1129,18 +1127,16 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
return q_size(&mq->dirty) == 0u; return q_size(&mq->dirty) == 0u;
} }
nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty); /*
return (nr_clean + btracker_nr_writebacks_queued(mq->bg_work)) >= * If we're busy we don't worry about cleaning at all.
percent_to_target(mq, CLEAN_TARGET); */
return true;
} }
static bool free_target_met(struct smq_policy *mq, bool idle) static bool free_target_met(struct smq_policy *mq)
{ {
unsigned nr_free; unsigned nr_free;
if (!idle)
return true;
nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated; nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >= return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
percent_to_target(mq, FREE_TARGET); percent_to_target(mq, FREE_TARGET);
@ -1190,9 +1186,9 @@ static void queue_demotion(struct smq_policy *mq)
if (unlikely(WARN_ON_ONCE(!mq->migrations_allowed))) if (unlikely(WARN_ON_ONCE(!mq->migrations_allowed)))
return; return;
e = q_peek(&mq->clean, mq->clean.nr_levels, true); e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true);
if (!e) { if (!e) {
if (!clean_target_met(mq, false)) if (!clean_target_met(mq, true))
queue_writeback(mq); queue_writeback(mq);
return; return;
} }
@ -1220,7 +1216,7 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
* We always claim to be 'idle' to ensure some demotions happen * We always claim to be 'idle' to ensure some demotions happen
* with continuous loads. * with continuous loads.
*/ */
if (!free_target_met(mq, true)) if (!free_target_met(mq))
queue_demotion(mq); queue_demotion(mq);
return; return;
} }
@ -1421,14 +1417,10 @@ static int smq_get_background_work(struct dm_cache_policy *p, bool idle,
spin_lock_irqsave(&mq->lock, flags); spin_lock_irqsave(&mq->lock, flags);
r = btracker_issue(mq->bg_work, result); r = btracker_issue(mq->bg_work, result);
if (r == -ENODATA) { if (r == -ENODATA) {
/* find some writeback work to do */ if (!clean_target_met(mq, idle)) {
if (mq->migrations_allowed && !free_target_met(mq, idle))
queue_demotion(mq);
else if (!clean_target_met(mq, idle))
queue_writeback(mq); queue_writeback(mq);
r = btracker_issue(mq->bg_work, result);
r = btracker_issue(mq->bg_work, result); }
} }
spin_unlock_irqrestore(&mq->lock, flags); spin_unlock_irqrestore(&mq->lock, flags);
@ -1452,6 +1444,7 @@ static void __complete_background_work(struct smq_policy *mq,
clear_pending(mq, e); clear_pending(mq, e);
if (success) { if (success) {
e->oblock = work->oblock; e->oblock = work->oblock;
e->level = NR_CACHE_LEVELS - 1;
push(mq, e); push(mq, e);
// h, q, a // h, q, a
} else { } else {

View file

@ -94,6 +94,9 @@ static void iot_io_begin(struct io_tracker *iot, sector_t len)
static void __iot_io_end(struct io_tracker *iot, sector_t len) static void __iot_io_end(struct io_tracker *iot, sector_t len)
{ {
if (!len)
return;
iot->in_flight -= len; iot->in_flight -= len;
if (!iot->in_flight) if (!iot->in_flight)
iot->idle_time = jiffies; iot->idle_time = jiffies;
@ -474,7 +477,7 @@ struct cache {
spinlock_t invalidation_lock; spinlock_t invalidation_lock;
struct list_head invalidation_requests; struct list_head invalidation_requests;
struct io_tracker origin_tracker; struct io_tracker tracker;
struct work_struct commit_ws; struct work_struct commit_ws;
struct batcher committer; struct batcher committer;
@ -901,8 +904,7 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
static bool accountable_bio(struct cache *cache, struct bio *bio) static bool accountable_bio(struct cache *cache, struct bio *bio)
{ {
return ((bio->bi_bdev == cache->origin_dev->bdev) && return bio_op(bio) != REQ_OP_DISCARD;
bio_op(bio) != REQ_OP_DISCARD);
} }
static void accounted_begin(struct cache *cache, struct bio *bio) static void accounted_begin(struct cache *cache, struct bio *bio)
@ -912,7 +914,7 @@ static void accounted_begin(struct cache *cache, struct bio *bio)
if (accountable_bio(cache, bio)) { if (accountable_bio(cache, bio)) {
pb->len = bio_sectors(bio); pb->len = bio_sectors(bio);
iot_io_begin(&cache->origin_tracker, pb->len); iot_io_begin(&cache->tracker, pb->len);
} }
} }
@ -921,7 +923,7 @@ static void accounted_complete(struct cache *cache, struct bio *bio)
size_t pb_data_size = get_per_bio_data_size(cache); size_t pb_data_size = get_per_bio_data_size(cache);
struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
iot_io_end(&cache->origin_tracker, pb->len); iot_io_end(&cache->tracker, pb->len);
} }
static void accounted_request(struct cache *cache, struct bio *bio) static void accounted_request(struct cache *cache, struct bio *bio)
@ -1716,20 +1718,19 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
enum busy { enum busy {
IDLE, IDLE,
MODERATE,
BUSY BUSY
}; };
static enum busy spare_migration_bandwidth(struct cache *cache) static enum busy spare_migration_bandwidth(struct cache *cache)
{ {
bool idle = iot_idle_for(&cache->origin_tracker, HZ); bool idle = iot_idle_for(&cache->tracker, HZ);
sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) * sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
cache->sectors_per_block; cache->sectors_per_block;
if (current_volume <= cache->migration_threshold) if (idle && current_volume <= cache->migration_threshold)
return idle ? IDLE : MODERATE; return IDLE;
else else
return idle ? MODERATE : BUSY; return BUSY;
} }
static void inc_hit_counter(struct cache *cache, struct bio *bio) static void inc_hit_counter(struct cache *cache, struct bio *bio)
@ -2045,8 +2046,6 @@ static void check_migrations(struct work_struct *ws)
for (;;) { for (;;) {
b = spare_migration_bandwidth(cache); b = spare_migration_bandwidth(cache);
if (b == BUSY)
break;
r = policy_get_background_work(cache->policy, b == IDLE, &op); r = policy_get_background_work(cache->policy, b == IDLE, &op);
if (r == -ENODATA) if (r == -ENODATA)
@ -2717,7 +2716,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
batcher_init(&cache->committer, commit_op, cache, batcher_init(&cache->committer, commit_op, cache,
issue_op, cache, cache->wq); issue_op, cache, cache->wq);
iot_init(&cache->origin_tracker); iot_init(&cache->tracker);
init_rwsem(&cache->background_work_lock); init_rwsem(&cache->background_work_lock);
prevent_background_work(cache); prevent_background_work(cache);
@ -2941,7 +2940,7 @@ static void cache_postsuspend(struct dm_target *ti)
cancel_delayed_work(&cache->waker); cancel_delayed_work(&cache->waker);
flush_workqueue(cache->wq); flush_workqueue(cache->wq);
WARN_ON(cache->origin_tracker.in_flight); WARN_ON(cache->tracker.in_flight);
/* /*
* If it's a flush suspend there won't be any deferred bios, so this * If it's a flush suspend there won't be any deferred bios, so this

View file

@ -447,7 +447,7 @@ failed:
* it has been invoked. * it has been invoked.
*/ */
#define dm_report_EIO(m) \ #define dm_report_EIO(m) \
({ \ do { \
struct mapped_device *md = dm_table_get_md((m)->ti->table); \ struct mapped_device *md = dm_table_get_md((m)->ti->table); \
\ \
pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \ pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \
@ -455,8 +455,7 @@ failed:
test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \ test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \ test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
dm_noflush_suspending((m)->ti)); \ dm_noflush_suspending((m)->ti)); \
-EIO; \ } while (0)
})
/* /*
* Map cloned requests (request-based multipath) * Map cloned requests (request-based multipath)
@ -481,7 +480,8 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
if (!pgpath) { if (!pgpath) {
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
return DM_MAPIO_DELAY_REQUEUE; return DM_MAPIO_DELAY_REQUEUE;
return dm_report_EIO(m); /* Failed */ dm_report_EIO(m); /* Failed */
return DM_MAPIO_KILL;
} else if (test_bit(MPATHF_QUEUE_IO, &m->flags) || } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) { test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
if (pg_init_all_paths(m)) if (pg_init_all_paths(m))
@ -558,7 +558,8 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
if (!pgpath) { if (!pgpath) {
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
return DM_MAPIO_REQUEUE; return DM_MAPIO_REQUEUE;
return dm_report_EIO(m); dm_report_EIO(m);
return -EIO;
} }
mpio->pgpath = pgpath; mpio->pgpath = pgpath;
@ -1493,7 +1494,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
if (atomic_read(&m->nr_valid_paths) == 0 && if (atomic_read(&m->nr_valid_paths) == 0 &&
!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
if (error == -EIO) if (error == -EIO)
error = dm_report_EIO(m); dm_report_EIO(m);
/* complete with the original error */ /* complete with the original error */
r = DM_ENDIO_DONE; r = DM_ENDIO_DONE;
} }
@ -1524,8 +1525,10 @@ static int do_end_io_bio(struct multipath *m, struct bio *clone,
fail_path(mpio->pgpath); fail_path(mpio->pgpath);
if (atomic_read(&m->nr_valid_paths) == 0 && if (atomic_read(&m->nr_valid_paths) == 0 &&
!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
return dm_report_EIO(m); dm_report_EIO(m);
return -EIO;
}
/* Queue for the daemon to resubmit */ /* Queue for the daemon to resubmit */
dm_bio_restore(get_bio_details_from_bio(clone), clone); dm_bio_restore(get_bio_details_from_bio(clone), clone);

View file

@ -507,6 +507,7 @@ static int map_request(struct dm_rq_target_io *tio)
case DM_MAPIO_KILL: case DM_MAPIO_KILL:
/* The target wants to complete the I/O */ /* The target wants to complete the I/O */
dm_kill_unmapped_request(rq, -EIO); dm_kill_unmapped_request(rq, -EIO);
break;
default: default:
DMWARN("unimplemented target map return value: %d", r); DMWARN("unimplemented target map return value: %d", r);
BUG(); BUG();

View file

@ -484,11 +484,11 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd)
if (r < 0) if (r < 0)
return r; return r;
r = save_sm_roots(pmd); r = dm_tm_pre_commit(pmd->tm);
if (r < 0) if (r < 0)
return r; return r;
r = dm_tm_pre_commit(pmd->tm); r = save_sm_roots(pmd);
if (r < 0) if (r < 0)
return r; return r;

View file

@ -8022,18 +8022,15 @@ EXPORT_SYMBOL(md_write_end);
* may proceed without blocking. It is important to call this before * may proceed without blocking. It is important to call this before
* attempting a GFP_KERNEL allocation while holding the mddev lock. * attempting a GFP_KERNEL allocation while holding the mddev lock.
* Must be called with mddev_lock held. * Must be called with mddev_lock held.
*
* In the ->external case MD_SB_CHANGE_PENDING can not be cleared until mddev->lock
* is dropped, so return -EAGAIN after notifying userspace.
*/ */
int md_allow_write(struct mddev *mddev) void md_allow_write(struct mddev *mddev)
{ {
if (!mddev->pers) if (!mddev->pers)
return 0; return;
if (mddev->ro) if (mddev->ro)
return 0; return;
if (!mddev->pers->sync_request) if (!mddev->pers->sync_request)
return 0; return;
spin_lock(&mddev->lock); spin_lock(&mddev->lock);
if (mddev->in_sync) { if (mddev->in_sync) {
@ -8046,13 +8043,12 @@ int md_allow_write(struct mddev *mddev)
spin_unlock(&mddev->lock); spin_unlock(&mddev->lock);
md_update_sb(mddev, 0); md_update_sb(mddev, 0);
sysfs_notify_dirent_safe(mddev->sysfs_state); sysfs_notify_dirent_safe(mddev->sysfs_state);
/* wait for the dirty state to be recorded in the metadata */
wait_event(mddev->sb_wait,
!test_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags) &&
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
} else } else
spin_unlock(&mddev->lock); spin_unlock(&mddev->lock);
if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
return -EAGAIN;
else
return 0;
} }
EXPORT_SYMBOL_GPL(md_allow_write); EXPORT_SYMBOL_GPL(md_allow_write);

View file

@ -665,7 +665,7 @@ extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
bool metadata_op); bool metadata_op);
extern void md_do_sync(struct md_thread *thread); extern void md_do_sync(struct md_thread *thread);
extern void md_new_event(struct mddev *mddev); extern void md_new_event(struct mddev *mddev);
extern int md_allow_write(struct mddev *mddev); extern void md_allow_write(struct mddev *mddev);
extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev); extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors); extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors);
extern int md_check_no_bitmap(struct mddev *mddev); extern int md_check_no_bitmap(struct mddev *mddev);

View file

@ -142,10 +142,23 @@ static int sm_disk_inc_block(struct dm_space_map *sm, dm_block_t b)
static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b) static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b)
{ {
int r;
uint32_t old_count;
enum allocation_event ev; enum allocation_event ev;
struct sm_disk *smd = container_of(sm, struct sm_disk, sm); struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
return sm_ll_dec(&smd->ll, b, &ev); r = sm_ll_dec(&smd->ll, b, &ev);
if (!r && (ev == SM_FREE)) {
/*
* It's only free if it's also free in the last
* transaction.
*/
r = sm_ll_lookup(&smd->old_ll, b, &old_count);
if (!r && !old_count)
smd->nr_allocated_this_transaction--;
}
return r;
} }
static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b) static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)

View file

@ -385,7 +385,7 @@ static int raid0_run(struct mddev *mddev)
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors); blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); blk_queue_max_discard_sectors(mddev->queue, UINT_MAX);
blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
blk_queue_io_opt(mddev->queue, blk_queue_io_opt(mddev->queue,
@ -459,6 +459,95 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
} }
} }
static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
{
struct r0conf *conf = mddev->private;
struct strip_zone *zone;
sector_t start = bio->bi_iter.bi_sector;
sector_t end;
unsigned int stripe_size;
sector_t first_stripe_index, last_stripe_index;
sector_t start_disk_offset;
unsigned int start_disk_index;
sector_t end_disk_offset;
unsigned int end_disk_index;
unsigned int disk;
zone = find_zone(conf, &start);
if (bio_end_sector(bio) > zone->zone_end) {
struct bio *split = bio_split(bio,
zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
mddev->bio_set);
bio_chain(split, bio);
generic_make_request(bio);
bio = split;
end = zone->zone_end;
} else
end = bio_end_sector(bio);
if (zone != conf->strip_zone)
end = end - zone[-1].zone_end;
/* Now start and end is the offset in zone */
stripe_size = zone->nb_dev * mddev->chunk_sectors;
first_stripe_index = start;
sector_div(first_stripe_index, stripe_size);
last_stripe_index = end;
sector_div(last_stripe_index, stripe_size);
start_disk_index = (int)(start - first_stripe_index * stripe_size) /
mddev->chunk_sectors;
start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
mddev->chunk_sectors) +
first_stripe_index * mddev->chunk_sectors;
end_disk_index = (int)(end - last_stripe_index * stripe_size) /
mddev->chunk_sectors;
end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
mddev->chunk_sectors) +
last_stripe_index * mddev->chunk_sectors;
for (disk = 0; disk < zone->nb_dev; disk++) {
sector_t dev_start, dev_end;
struct bio *discard_bio = NULL;
struct md_rdev *rdev;
if (disk < start_disk_index)
dev_start = (first_stripe_index + 1) *
mddev->chunk_sectors;
else if (disk > start_disk_index)
dev_start = first_stripe_index * mddev->chunk_sectors;
else
dev_start = start_disk_offset;
if (disk < end_disk_index)
dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
else if (disk > end_disk_index)
dev_end = last_stripe_index * mddev->chunk_sectors;
else
dev_end = end_disk_offset;
if (dev_end <= dev_start)
continue;
rdev = conf->devlist[(zone - conf->strip_zone) *
conf->strip_zone[0].nb_dev + disk];
if (__blkdev_issue_discard(rdev->bdev,
dev_start + zone->dev_start + rdev->data_offset,
dev_end - dev_start, GFP_NOIO, 0, &discard_bio) ||
!discard_bio)
continue;
bio_chain(discard_bio, bio);
if (mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(rdev->bdev),
discard_bio, disk_devt(mddev->gendisk),
bio->bi_iter.bi_sector);
generic_make_request(discard_bio);
}
bio_endio(bio);
}
static void raid0_make_request(struct mddev *mddev, struct bio *bio) static void raid0_make_request(struct mddev *mddev, struct bio *bio)
{ {
struct strip_zone *zone; struct strip_zone *zone;
@ -473,6 +562,11 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
return; return;
} }
if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
raid0_handle_discard(mddev, bio);
return;
}
bio_sector = bio->bi_iter.bi_sector; bio_sector = bio->bi_iter.bi_sector;
sector = bio_sector; sector = bio_sector;
chunk_sects = mddev->chunk_sectors; chunk_sects = mddev->chunk_sectors;
@ -498,19 +592,13 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
bio->bi_iter.bi_sector = sector + zone->dev_start + bio->bi_iter.bi_sector = sector + zone->dev_start +
tmp_dev->data_offset; tmp_dev->data_offset;
if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && if (mddev->gendisk)
!blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) { trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
/* Just ignore it */ bio, disk_devt(mddev->gendisk),
bio_endio(bio); bio_sector);
} else { mddev_check_writesame(mddev, bio);
if (mddev->gendisk) mddev_check_write_zeroes(mddev, bio);
trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), generic_make_request(bio);
bio, disk_devt(mddev->gendisk),
bio_sector);
mddev_check_writesame(mddev, bio);
mddev_check_write_zeroes(mddev, bio);
generic_make_request(bio);
}
} }
static void raid0_status(struct seq_file *seq, struct mddev *mddev) static void raid0_status(struct seq_file *seq, struct mddev *mddev)

View file

@ -666,8 +666,11 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
break; break;
} }
continue; continue;
} else } else {
if ((sectors > best_good_sectors) && (best_disk >= 0))
best_disk = -1;
best_good_sectors = sectors; best_good_sectors = sectors;
}
if (best_disk >= 0) if (best_disk >= 0)
/* At least two disks to choose from so failfast is OK */ /* At least two disks to choose from so failfast is OK */
@ -1529,17 +1532,16 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
plug = container_of(cb, struct raid1_plug_cb, cb); plug = container_of(cb, struct raid1_plug_cb, cb);
else else
plug = NULL; plug = NULL;
spin_lock_irqsave(&conf->device_lock, flags);
if (plug) { if (plug) {
bio_list_add(&plug->pending, mbio); bio_list_add(&plug->pending, mbio);
plug->pending_cnt++; plug->pending_cnt++;
} else { } else {
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio); bio_list_add(&conf->pending_bio_list, mbio);
conf->pending_count++; conf->pending_count++;
} spin_unlock_irqrestore(&conf->device_lock, flags);
spin_unlock_irqrestore(&conf->device_lock, flags);
if (!plug)
md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->thread);
}
} }
r1_bio_write_done(r1_bio); r1_bio_write_done(r1_bio);
@ -3197,7 +3199,7 @@ static int raid1_reshape(struct mddev *mddev)
struct r1conf *conf = mddev->private; struct r1conf *conf = mddev->private;
int cnt, raid_disks; int cnt, raid_disks;
unsigned long flags; unsigned long flags;
int d, d2, err; int d, d2;
/* Cannot change chunk_size, layout, or level */ /* Cannot change chunk_size, layout, or level */
if (mddev->chunk_sectors != mddev->new_chunk_sectors || if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
@ -3209,11 +3211,8 @@ static int raid1_reshape(struct mddev *mddev)
return -EINVAL; return -EINVAL;
} }
if (!mddev_is_clustered(mddev)) { if (!mddev_is_clustered(mddev))
err = md_allow_write(mddev); md_allow_write(mddev);
if (err)
return err;
}
raid_disks = mddev->raid_disks + mddev->delta_disks; raid_disks = mddev->raid_disks + mddev->delta_disks;

View file

@ -1282,17 +1282,16 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
plug = container_of(cb, struct raid10_plug_cb, cb); plug = container_of(cb, struct raid10_plug_cb, cb);
else else
plug = NULL; plug = NULL;
spin_lock_irqsave(&conf->device_lock, flags);
if (plug) { if (plug) {
bio_list_add(&plug->pending, mbio); bio_list_add(&plug->pending, mbio);
plug->pending_cnt++; plug->pending_cnt++;
} else { } else {
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio); bio_list_add(&conf->pending_bio_list, mbio);
conf->pending_count++; conf->pending_count++;
} spin_unlock_irqrestore(&conf->device_lock, flags);
spin_unlock_irqrestore(&conf->device_lock, flags);
if (!plug)
md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->thread);
}
} }
static void raid10_write_request(struct mddev *mddev, struct bio *bio, static void raid10_write_request(struct mddev *mddev, struct bio *bio,

View file

@ -24,6 +24,7 @@
#include "md.h" #include "md.h"
#include "raid5.h" #include "raid5.h"
#include "bitmap.h" #include "bitmap.h"
#include "raid5-log.h"
/* /*
* metadata/data stored in disk with 4k size unit (a block) regardless * metadata/data stored in disk with 4k size unit (a block) regardless
@ -622,20 +623,30 @@ static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
__r5l_set_io_unit_state(io, IO_UNIT_IO_START); __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
spin_unlock_irqrestore(&log->io_list_lock, flags); spin_unlock_irqrestore(&log->io_list_lock, flags);
/*
* In case of journal device failures, submit_bio will get error
* and calls endio, then active stripes will continue write
* process. Therefore, it is not necessary to check Faulty bit
* of journal device here.
*
* We can't check split_bio after current_bio is submitted. If
* io->split_bio is null, after current_bio is submitted, current_bio
* might already be completed and the io_unit is freed. We submit
* split_bio first to avoid the issue.
*/
if (io->split_bio) {
if (io->has_flush)
io->split_bio->bi_opf |= REQ_PREFLUSH;
if (io->has_fua)
io->split_bio->bi_opf |= REQ_FUA;
submit_bio(io->split_bio);
}
if (io->has_flush) if (io->has_flush)
io->current_bio->bi_opf |= REQ_PREFLUSH; io->current_bio->bi_opf |= REQ_PREFLUSH;
if (io->has_fua) if (io->has_fua)
io->current_bio->bi_opf |= REQ_FUA; io->current_bio->bi_opf |= REQ_FUA;
submit_bio(io->current_bio); submit_bio(io->current_bio);
if (!io->split_bio)
return;
if (io->has_flush)
io->split_bio->bi_opf |= REQ_PREFLUSH;
if (io->has_fua)
io->split_bio->bi_opf |= REQ_FUA;
submit_bio(io->split_bio);
} }
/* deferred io_unit will be dispatched here */ /* deferred io_unit will be dispatched here */
@ -670,6 +681,11 @@ static void r5c_disable_writeback_async(struct work_struct *work)
return; return;
pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n", pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n",
mdname(mddev)); mdname(mddev));
/* wait superblock change before suspend */
wait_event(mddev->sb_wait,
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
mddev_suspend(mddev); mddev_suspend(mddev);
log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
mddev_resume(mddev); mddev_resume(mddev);
@ -2621,8 +2637,11 @@ int r5c_try_caching_write(struct r5conf *conf,
* When run in degraded mode, array is set to write-through mode. * When run in degraded mode, array is set to write-through mode.
* This check helps drain pending write safely in the transition to * This check helps drain pending write safely in the transition to
* write-through mode. * write-through mode.
*
* When a stripe is syncing, the write is also handled in write
* through mode.
*/ */
if (s->failed) { if (s->failed || test_bit(STRIPE_SYNCING, &sh->state)) {
r5c_make_stripe_write_out(sh); r5c_make_stripe_write_out(sh);
return -EAGAIN; return -EAGAIN;
} }
@ -2825,6 +2844,9 @@ void r5c_finish_stripe_write_out(struct r5conf *conf,
} }
r5l_append_flush_payload(log, sh->sector); r5l_append_flush_payload(log, sh->sector);
/* stripe is flused to raid disks, we can do resync now */
if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
set_bit(STRIPE_HANDLE, &sh->state);
} }
int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh) int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh)
@ -2973,7 +2995,7 @@ ioerr:
return ret; return ret;
} }
void r5c_update_on_rdev_error(struct mddev *mddev) void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev)
{ {
struct r5conf *conf = mddev->private; struct r5conf *conf = mddev->private;
struct r5l_log *log = conf->log; struct r5l_log *log = conf->log;
@ -2981,7 +3003,8 @@ void r5c_update_on_rdev_error(struct mddev *mddev)
if (!log) if (!log)
return; return;
if (raid5_calc_degraded(conf) > 0 && if ((raid5_calc_degraded(conf) > 0 ||
test_bit(Journal, &rdev->flags)) &&
conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
schedule_work(&log->disable_writeback_work); schedule_work(&log->disable_writeback_work);
} }

View file

@ -28,7 +28,8 @@ extern void r5c_flush_cache(struct r5conf *conf, int num);
extern void r5c_check_stripe_cache_usage(struct r5conf *conf); extern void r5c_check_stripe_cache_usage(struct r5conf *conf);
extern void r5c_check_cached_full_stripe(struct r5conf *conf); extern void r5c_check_cached_full_stripe(struct r5conf *conf);
extern struct md_sysfs_entry r5c_journal_mode; extern struct md_sysfs_entry r5c_journal_mode;
extern void r5c_update_on_rdev_error(struct mddev *mddev); extern void r5c_update_on_rdev_error(struct mddev *mddev,
struct md_rdev *rdev);
extern bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect); extern bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect);
extern struct dma_async_tx_descriptor * extern struct dma_async_tx_descriptor *

View file

@ -103,8 +103,7 @@ static inline void unlock_device_hash_lock(struct r5conf *conf, int hash)
static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) static inline void lock_all_device_hash_locks_irq(struct r5conf *conf)
{ {
int i; int i;
local_irq_disable(); spin_lock_irq(conf->hash_locks);
spin_lock(conf->hash_locks);
for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks);
spin_lock(&conf->device_lock); spin_lock(&conf->device_lock);
@ -114,9 +113,9 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
{ {
int i; int i;
spin_unlock(&conf->device_lock); spin_unlock(&conf->device_lock);
for (i = NR_STRIPE_HASH_LOCKS; i; i--) for (i = NR_STRIPE_HASH_LOCKS - 1; i; i--)
spin_unlock(conf->hash_locks + i - 1); spin_unlock(conf->hash_locks + i);
local_irq_enable(); spin_unlock_irq(conf->hash_locks);
} }
/* Find first data disk in a raid6 stripe */ /* Find first data disk in a raid6 stripe */
@ -234,11 +233,15 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
if (test_bit(R5_InJournal, &sh->dev[i].flags)) if (test_bit(R5_InJournal, &sh->dev[i].flags))
injournal++; injournal++;
/* /*
* When quiesce in r5c write back, set STRIPE_HANDLE for stripes with * In the following cases, the stripe cannot be released to cached
* data in journal, so they are not released to cached lists * lists. Therefore, we make the stripe write out and set
* STRIPE_HANDLE:
* 1. when quiesce in r5c write back;
* 2. when resync is requested fot the stripe.
*/ */
if (conf->quiesce && r5c_is_writeback(conf->log) && if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) ||
!test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0) { (conf->quiesce && r5c_is_writeback(conf->log) &&
!test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0)) {
if (test_bit(STRIPE_R5C_CACHING, &sh->state)) if (test_bit(STRIPE_R5C_CACHING, &sh->state))
r5c_make_stripe_write_out(sh); r5c_make_stripe_write_out(sh);
set_bit(STRIPE_HANDLE, &sh->state); set_bit(STRIPE_HANDLE, &sh->state);
@ -714,12 +717,11 @@ static bool is_full_stripe_write(struct stripe_head *sh)
static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
{ {
local_irq_disable();
if (sh1 > sh2) { if (sh1 > sh2) {
spin_lock(&sh2->stripe_lock); spin_lock_irq(&sh2->stripe_lock);
spin_lock_nested(&sh1->stripe_lock, 1); spin_lock_nested(&sh1->stripe_lock, 1);
} else { } else {
spin_lock(&sh1->stripe_lock); spin_lock_irq(&sh1->stripe_lock);
spin_lock_nested(&sh2->stripe_lock, 1); spin_lock_nested(&sh2->stripe_lock, 1);
} }
} }
@ -727,8 +729,7 @@ static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
{ {
spin_unlock(&sh1->stripe_lock); spin_unlock(&sh1->stripe_lock);
spin_unlock(&sh2->stripe_lock); spin_unlock_irq(&sh2->stripe_lock);
local_irq_enable();
} }
/* Only freshly new full stripe normal write stripe can be added to a batch list */ /* Only freshly new full stripe normal write stripe can be added to a batch list */
@ -2312,14 +2313,12 @@ static int resize_stripes(struct r5conf *conf, int newsize)
struct stripe_head *osh, *nsh; struct stripe_head *osh, *nsh;
LIST_HEAD(newstripes); LIST_HEAD(newstripes);
struct disk_info *ndisks; struct disk_info *ndisks;
int err; int err = 0;
struct kmem_cache *sc; struct kmem_cache *sc;
int i; int i;
int hash, cnt; int hash, cnt;
err = md_allow_write(conf->mddev); md_allow_write(conf->mddev);
if (err)
return err;
/* Step 1 */ /* Step 1 */
sc = kmem_cache_create(conf->cache_name[1-conf->active_name], sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
@ -2694,7 +2693,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
bdevname(rdev->bdev, b), bdevname(rdev->bdev, b),
mdname(mddev), mdname(mddev),
conf->raid_disks - mddev->degraded); conf->raid_disks - mddev->degraded);
r5c_update_on_rdev_error(mddev); r5c_update_on_rdev_error(mddev, rdev);
} }
/* /*
@ -3055,6 +3054,11 @@ sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous)
* When LOG_CRITICAL, stripes with injournal == 0 will be sent to * When LOG_CRITICAL, stripes with injournal == 0 will be sent to
* no_space_stripes list. * no_space_stripes list.
* *
* 3. during journal failure
* In journal failure, we try to flush all cached data to raid disks
* based on data in stripe cache. The array is read-only to upper
* layers, so we would skip all pending writes.
*
*/ */
static inline bool delay_towrite(struct r5conf *conf, static inline bool delay_towrite(struct r5conf *conf,
struct r5dev *dev, struct r5dev *dev,
@ -3068,6 +3072,9 @@ static inline bool delay_towrite(struct r5conf *conf,
if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) && if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
s->injournal > 0) s->injournal > 0)
return true; return true;
/* case 3 above */
if (s->log_failed && s->injournal)
return true;
return false; return false;
} }
@ -4653,8 +4660,13 @@ static void handle_stripe(struct stripe_head *sh)
if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) {
spin_lock(&sh->stripe_lock); spin_lock(&sh->stripe_lock);
/* Cannot process 'sync' concurrently with 'discard' */ /*
if (!test_bit(STRIPE_DISCARD, &sh->state) && * Cannot process 'sync' concurrently with 'discard'.
* Flush data in r5cache before 'sync'.
*/
if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) &&
!test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) &&
!test_bit(STRIPE_DISCARD, &sh->state) &&
test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
set_bit(STRIPE_SYNCING, &sh->state); set_bit(STRIPE_SYNCING, &sh->state);
clear_bit(STRIPE_INSYNC, &sh->state); clear_bit(STRIPE_INSYNC, &sh->state);
@ -4701,10 +4713,15 @@ static void handle_stripe(struct stripe_head *sh)
" to_write=%d failed=%d failed_num=%d,%d\n", " to_write=%d failed=%d failed_num=%d,%d\n",
s.locked, s.uptodate, s.to_read, s.to_write, s.failed, s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
s.failed_num[0], s.failed_num[1]); s.failed_num[0], s.failed_num[1]);
/* check if the array has lost more than max_degraded devices and, /*
* check if the array has lost more than max_degraded devices and,
* if so, some requests might need to be failed. * if so, some requests might need to be failed.
*
* When journal device failed (log_failed), we will only process
* the stripe if there is data need write to raid disks
*/ */
if (s.failed > conf->max_degraded || s.log_failed) { if (s.failed > conf->max_degraded ||
(s.log_failed && s.injournal == 0)) {
sh->check_state = 0; sh->check_state = 0;
sh->reconstruct_state = 0; sh->reconstruct_state = 0;
break_stripe_batch_list(sh, 0); break_stripe_batch_list(sh, 0);
@ -5277,8 +5294,10 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group)
struct stripe_head *sh, *tmp; struct stripe_head *sh, *tmp;
struct list_head *handle_list = NULL; struct list_head *handle_list = NULL;
struct r5worker_group *wg; struct r5worker_group *wg;
bool second_try = !r5c_is_writeback(conf->log); bool second_try = !r5c_is_writeback(conf->log) &&
bool try_loprio = test_bit(R5C_LOG_TIGHT, &conf->cache_state); !r5l_log_disk_error(conf);
bool try_loprio = test_bit(R5C_LOG_TIGHT, &conf->cache_state) ||
r5l_log_disk_error(conf);
again: again:
wg = NULL; wg = NULL;
@ -6313,7 +6332,6 @@ int
raid5_set_cache_size(struct mddev *mddev, int size) raid5_set_cache_size(struct mddev *mddev, int size)
{ {
struct r5conf *conf = mddev->private; struct r5conf *conf = mddev->private;
int err;
if (size <= 16 || size > 32768) if (size <= 16 || size > 32768)
return -EINVAL; return -EINVAL;
@ -6325,10 +6343,7 @@ raid5_set_cache_size(struct mddev *mddev, int size)
; ;
mutex_unlock(&conf->cache_size_mutex); mutex_unlock(&conf->cache_size_mutex);
md_allow_write(mddev);
err = md_allow_write(mddev);
if (err)
return err;
mutex_lock(&conf->cache_size_mutex); mutex_lock(&conf->cache_size_mutex);
while (size > conf->max_nr_stripes) while (size > conf->max_nr_stripes)
@ -7530,7 +7545,9 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
* neilb: there is no locking about new writes here, * neilb: there is no locking about new writes here,
* so this cannot be safe. * so this cannot be safe.
*/ */
if (atomic_read(&conf->active_stripes)) { if (atomic_read(&conf->active_stripes) ||
atomic_read(&conf->r5c_cached_full_stripes) ||
atomic_read(&conf->r5c_cached_partial_stripes)) {
return -EBUSY; return -EBUSY;
} }
log_exit(conf); log_exit(conf);

View file

@ -7642,8 +7642,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->min_mtu = ETH_ZLEN; dev->min_mtu = ETH_ZLEN;
dev->max_mtu = BNXT_MAX_MTU; dev->max_mtu = BNXT_MAX_MTU;
bnxt_dcb_init(bp);
#ifdef CONFIG_BNXT_SRIOV #ifdef CONFIG_BNXT_SRIOV
init_waitqueue_head(&bp->sriov_cfg_wait); init_waitqueue_head(&bp->sriov_cfg_wait);
#endif #endif
@ -7681,6 +7679,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_hwrm_func_qcfg(bp); bnxt_hwrm_func_qcfg(bp);
bnxt_hwrm_port_led_qcaps(bp); bnxt_hwrm_port_led_qcaps(bp);
bnxt_ethtool_init(bp); bnxt_ethtool_init(bp);
bnxt_dcb_init(bp);
bnxt_set_rx_skb_mode(bp, false); bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp); bnxt_set_tpa_flags(bp);

View file

@ -553,8 +553,10 @@ static u8 bnxt_dcbnl_setdcbx(struct net_device *dev, u8 mode)
if ((mode & DCB_CAP_DCBX_VER_CEE) || !(mode & DCB_CAP_DCBX_VER_IEEE)) if ((mode & DCB_CAP_DCBX_VER_CEE) || !(mode & DCB_CAP_DCBX_VER_IEEE))
return 1; return 1;
if ((mode & DCB_CAP_DCBX_HOST) && BNXT_VF(bp)) if (mode & DCB_CAP_DCBX_HOST) {
return 1; if (BNXT_VF(bp) || (bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
return 1;
}
if (mode == bp->dcbx_cap) if (mode == bp->dcbx_cap)
return 0; return 0;

View file

@ -37,7 +37,7 @@
#define T4FW_VERSION_MAJOR 0x01 #define T4FW_VERSION_MAJOR 0x01
#define T4FW_VERSION_MINOR 0x10 #define T4FW_VERSION_MINOR 0x10
#define T4FW_VERSION_MICRO 0x21 #define T4FW_VERSION_MICRO 0x2B
#define T4FW_VERSION_BUILD 0x00 #define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01 #define T4FW_MIN_VERSION_MAJOR 0x01
@ -46,7 +46,7 @@
#define T5FW_VERSION_MAJOR 0x01 #define T5FW_VERSION_MAJOR 0x01
#define T5FW_VERSION_MINOR 0x10 #define T5FW_VERSION_MINOR 0x10
#define T5FW_VERSION_MICRO 0x21 #define T5FW_VERSION_MICRO 0x2B
#define T5FW_VERSION_BUILD 0x00 #define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00 #define T5FW_MIN_VERSION_MAJOR 0x00
@ -55,7 +55,7 @@
#define T6FW_VERSION_MAJOR 0x01 #define T6FW_VERSION_MAJOR 0x01
#define T6FW_VERSION_MINOR 0x10 #define T6FW_VERSION_MINOR 0x10
#define T6FW_VERSION_MICRO 0x21 #define T6FW_VERSION_MICRO 0x2B
#define T6FW_VERSION_BUILD 0x00 #define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00 #define T6FW_MIN_VERSION_MAJOR 0x00

View file

@ -1174,11 +1174,17 @@ static int ftmac100_remove(struct platform_device *pdev)
return 0; return 0;
} }
static const struct of_device_id ftmac100_of_ids[] = {
{ .compatible = "andestech,atmac100" },
{ }
};
static struct platform_driver ftmac100_driver = { static struct platform_driver ftmac100_driver = {
.probe = ftmac100_probe, .probe = ftmac100_probe,
.remove = ftmac100_remove, .remove = ftmac100_remove,
.driver = { .driver = {
.name = DRV_NAME, .name = DRV_NAME,
.of_match_table = ftmac100_of_ids
}, },
}; };
@ -1202,3 +1208,4 @@ module_exit(ftmac100_exit);
MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>"); MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
MODULE_DESCRIPTION("FTMAC100 driver"); MODULE_DESCRIPTION("FTMAC100 driver");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(of, ftmac100_of_ids);

View file

@ -13,7 +13,7 @@ config MLX5_CORE
config MLX5_CORE_EN config MLX5_CORE_EN
bool "Mellanox Technologies ConnectX-4 Ethernet support" bool "Mellanox Technologies ConnectX-4 Ethernet support"
depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE
depends on IPV6=y || IPV6=n || MLX5_CORE=m depends on IPV6=y || IPV6=n || MLX5_CORE=m
imply PTP_1588_CLOCK imply PTP_1588_CLOCK
default n default n

View file

@ -199,10 +199,11 @@ static int mlxsw_sp_erif_entry_get(struct mlxsw_sp *mlxsw_sp,
entry->counter_valid = false; entry->counter_valid = false;
entry->counter = 0; entry->counter = 0;
entry->index = mlxsw_sp_rif_index(rif);
if (!counters_enabled) if (!counters_enabled)
return 0; return 0;
entry->index = mlxsw_sp_rif_index(rif);
err = mlxsw_sp_rif_counter_value_get(mlxsw_sp, rif, err = mlxsw_sp_rif_counter_value_get(mlxsw_sp, rif,
MLXSW_SP_RIF_COUNTER_EGRESS, MLXSW_SP_RIF_COUNTER_EGRESS,
&cnt); &cnt);

View file

@ -231,6 +231,9 @@ void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
{ {
unsigned int *p_counter_index; unsigned int *p_counter_index;
if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
return;
p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir); p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
if (WARN_ON(!p_counter_index)) if (WARN_ON(!p_counter_index))
return; return;

View file

@ -1369,8 +1369,7 @@ do_fdb_op:
err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
adding, true); adding, true);
if (err) { if (err) {
if (net_ratelimit()) dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
return; return;
} }
@ -1430,8 +1429,7 @@ do_fdb_op:
err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid, err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
adding, true); adding, true);
if (err) { if (err) {
if (net_ratelimit()) dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
return; return;
} }

View file

@ -3220,7 +3220,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* MDIO bus init */ /* MDIO bus init */
ret = sh_mdio_init(mdp, pd); ret = sh_mdio_init(mdp, pd);
if (ret) { if (ret) {
dev_err(&ndev->dev, "failed to initialise MDIO\n"); if (ret != -EPROBE_DEFER)
dev_err(&pdev->dev, "MDIO init failed: %d\n", ret);
goto out_release; goto out_release;
} }

View file

@ -1196,6 +1196,8 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */ {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */
{QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
{QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
{QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
{QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */

View file

@ -35,7 +35,7 @@ static struct bus_type ccwgroup_bus_type;
static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
{ {
int i; int i;
char str[8]; char str[16];
for (i = 0; i < gdev->count; i++) { for (i = 0; i < gdev->count; i++) {
sprintf(str, "cdev%d", i); sprintf(str, "cdev%d", i);
@ -238,7 +238,7 @@ static void ccwgroup_release(struct device *dev)
static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev) static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
{ {
char str[8]; char str[16];
int i, rc; int i, rc;
for (i = 0; i < gdev->count; i++) { for (i = 0; i < gdev->count; i++) {

View file

@ -11,7 +11,7 @@
#include "qdio.h" #include "qdio.h"
/* that gives us 15 characters in the text event views */ /* that gives us 15 characters in the text event views */
#define QDIO_DBF_LEN 16 #define QDIO_DBF_LEN 32
extern debug_info_t *qdio_dbf_setup; extern debug_info_t *qdio_dbf_setup;
extern debug_info_t *qdio_dbf_error; extern debug_info_t *qdio_dbf_error;

View file

@ -87,7 +87,7 @@ struct vq_info_block {
} __packed; } __packed;
struct virtio_feature_desc { struct virtio_feature_desc {
__u32 features; __le32 features;
__u8 index; __u8 index;
} __packed; } __packed;

View file

@ -298,10 +298,10 @@ void x25_check_rbuf(struct sock *);
/* sysctl_net_x25.c */ /* sysctl_net_x25.c */
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
void x25_register_sysctl(void); int x25_register_sysctl(void);
void x25_unregister_sysctl(void); void x25_unregister_sysctl(void);
#else #else
static inline void x25_register_sysctl(void) {}; static inline int x25_register_sysctl(void) { return 0; };
static inline void x25_unregister_sysctl(void) {}; static inline void x25_unregister_sysctl(void) {};
#endif /* CONFIG_SYSCTL */ #endif /* CONFIG_SYSCTL */

View file

@ -140,7 +140,7 @@ struct bpf_verifier_stack_elem {
struct bpf_verifier_stack_elem *next; struct bpf_verifier_stack_elem *next;
}; };
#define BPF_COMPLEXITY_LIMIT_INSNS 65536 #define BPF_COMPLEXITY_LIMIT_INSNS 98304
#define BPF_COMPLEXITY_LIMIT_STACK 1024 #define BPF_COMPLEXITY_LIMIT_STACK 1024
#define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA) #define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA)
@ -2640,6 +2640,7 @@ peek_stack:
env->explored_states[t + 1] = STATE_LIST_MARK; env->explored_states[t + 1] = STATE_LIST_MARK;
} else { } else {
/* conditional jump with two edges */ /* conditional jump with two edges */
env->explored_states[t] = STATE_LIST_MARK;
ret = push_insn(t, t + 1, FALLTHROUGH, env); ret = push_insn(t, t + 1, FALLTHROUGH, env);
if (ret == 1) if (ret == 1)
goto peek_stack; goto peek_stack;
@ -2798,6 +2799,12 @@ static bool states_equal(struct bpf_verifier_env *env,
rcur->type != NOT_INIT)) rcur->type != NOT_INIT))
continue; continue;
/* Don't care about the reg->id in this case. */
if (rold->type == PTR_TO_MAP_VALUE_OR_NULL &&
rcur->type == PTR_TO_MAP_VALUE_OR_NULL &&
rold->map_ptr == rcur->map_ptr)
continue;
if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET && if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET &&
compare_ptrs_to_packet(rold, rcur)) compare_ptrs_to_packet(rold, rcur))
continue; continue;
@ -2932,6 +2939,9 @@ static int do_check(struct bpf_verifier_env *env)
goto process_bpf_exit; goto process_bpf_exit;
} }
if (need_resched())
cond_resched();
if (log_level > 1 || (log_level && do_print_state)) { if (log_level > 1 || (log_level && do_print_state)) {
if (log_level > 1) if (log_level > 1)
verbose("%d:", insn_idx); verbose("%d:", insn_idx);

View file

@ -1845,11 +1845,13 @@ static __latent_entropy struct task_struct *copy_process(
*/ */
recalc_sigpending(); recalc_sigpending();
if (signal_pending(current)) { if (signal_pending(current)) {
spin_unlock(&current->sighand->siglock);
write_unlock_irq(&tasklist_lock);
retval = -ERESTARTNOINTR; retval = -ERESTARTNOINTR;
goto bad_fork_cancel_cgroup; goto bad_fork_cancel_cgroup;
} }
if (unlikely(!(ns_of_pid(pid)->nr_hashed & PIDNS_HASH_ADDING))) {
retval = -ENOMEM;
goto bad_fork_cancel_cgroup;
}
if (likely(p->pid)) { if (likely(p->pid)) {
ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
@ -1907,6 +1909,8 @@ static __latent_entropy struct task_struct *copy_process(
return p; return p;
bad_fork_cancel_cgroup: bad_fork_cancel_cgroup:
spin_unlock(&current->sighand->siglock);
write_unlock_irq(&tasklist_lock);
cgroup_cancel_fork(p); cgroup_cancel_fork(p);
bad_fork_free_pid: bad_fork_free_pid:
cgroup_threadgroup_change_end(current); cgroup_threadgroup_change_end(current);

View file

@ -277,7 +277,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
* if reparented. * if reparented.
*/ */
for (;;) { for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
if (pid_ns->nr_hashed == init_pids) if (pid_ns->nr_hashed == init_pids)
break; break;
schedule(); schedule();

View file

@ -835,6 +835,13 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
return -EPROTONOSUPPORT; return -EPROTONOSUPPORT;
} }
} }
if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
__u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
if (defpvid >= VLAN_VID_MASK)
return -EINVAL;
}
#endif #endif
return 0; return 0;

View file

@ -1132,10 +1132,6 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
lladdr = neigh->ha; lladdr = neigh->ha;
} }
if (new & NUD_CONNECTED)
neigh->confirmed = jiffies;
neigh->updated = jiffies;
/* If entry was valid and address is not changed, /* If entry was valid and address is not changed,
do not change entry state, if new one is STALE. do not change entry state, if new one is STALE.
*/ */
@ -1157,6 +1153,16 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
} }
} }
/* Update timestamps only once we know we will make a change to the
* neighbour entry. Otherwise we risk to move the locktime window with
* noop updates and ignore relevant ARP updates.
*/
if (new != old || lladdr != neigh->ha) {
if (new & NUD_CONNECTED)
neigh->confirmed = jiffies;
neigh->updated = jiffies;
}
if (new != old) { if (new != old) {
neigh_del_timer(neigh); neigh_del_timer(neigh);
if (new & NUD_PROBE) if (new & NUD_PROBE)

View file

@ -1627,13 +1627,13 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
cb->nlh->nlmsg_seq, 0, cb->nlh->nlmsg_seq, 0,
flags, flags,
ext_filter_mask); ext_filter_mask);
/* If we ran out of room on the first message,
* we're in trouble
*/
WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
if (err < 0) if (err < 0) {
goto out; if (likely(skb->len))
goto out;
goto out_err;
}
nl_dump_check_consistent(cb, nlmsg_hdr(skb)); nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont: cont:
@ -1641,10 +1641,12 @@ cont:
} }
} }
out: out:
err = skb->len;
out_err:
cb->args[1] = idx; cb->args[1] = idx;
cb->args[0] = h; cb->args[0] = h;
return skb->len; return err;
} }
int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
@ -3453,8 +3455,12 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
err = br_dev->netdev_ops->ndo_bridge_getlink( err = br_dev->netdev_ops->ndo_bridge_getlink(
skb, portid, seq, dev, skb, portid, seq, dev,
filter_mask, NLM_F_MULTI); filter_mask, NLM_F_MULTI);
if (err < 0 && err != -EOPNOTSUPP) if (err < 0 && err != -EOPNOTSUPP) {
break; if (likely(skb->len))
break;
goto out_err;
}
} }
idx++; idx++;
} }
@ -3465,16 +3471,22 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
seq, dev, seq, dev,
filter_mask, filter_mask,
NLM_F_MULTI); NLM_F_MULTI);
if (err < 0 && err != -EOPNOTSUPP) if (err < 0 && err != -EOPNOTSUPP) {
break; if (likely(skb->len))
break;
goto out_err;
}
} }
idx++; idx++;
} }
} }
err = skb->len;
out_err:
rcu_read_unlock(); rcu_read_unlock();
cb->args[0] = idx; cb->args[0] = idx;
return skb->len; return err;
} }
static inline size_t bridge_nlmsg_size(void) static inline size_t bridge_nlmsg_size(void)

View file

@ -139,10 +139,7 @@
#include <trace/events/sock.h> #include <trace/events/sock.h>
#ifdef CONFIG_INET
#include <net/tcp.h> #include <net/tcp.h>
#endif
#include <net/busy_poll.h> #include <net/busy_poll.h>
static DEFINE_MUTEX(proto_list_mutex); static DEFINE_MUTEX(proto_list_mutex);

View file

@ -653,6 +653,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
unsigned char *arp_ptr; unsigned char *arp_ptr;
struct rtable *rt; struct rtable *rt;
unsigned char *sha; unsigned char *sha;
unsigned char *tha = NULL;
__be32 sip, tip; __be32 sip, tip;
u16 dev_type = dev->type; u16 dev_type = dev->type;
int addr_type; int addr_type;
@ -724,6 +725,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
break; break;
#endif #endif
default: default:
tha = arp_ptr;
arp_ptr += dev->addr_len; arp_ptr += dev->addr_len;
} }
memcpy(&tip, arp_ptr, 4); memcpy(&tip, arp_ptr, 4);
@ -842,8 +844,18 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
It is possible, that this option should be enabled for some It is possible, that this option should be enabled for some
devices (strip is candidate) devices (strip is candidate)
*/ */
is_garp = arp->ar_op == htons(ARPOP_REQUEST) && tip == sip && is_garp = tip == sip && addr_type == RTN_UNICAST;
addr_type == RTN_UNICAST;
/* Unsolicited ARP _replies_ also require target hwaddr to be
* the same as source.
*/
if (is_garp && arp->ar_op == htons(ARPOP_REPLY))
is_garp =
/* IPv4 over IEEE 1394 doesn't provide target
* hardware address field in its ARP payload.
*/
tha &&
!memcmp(tha, sha, dev->addr_len);
if (!n && if (!n &&
((arp->ar_op == htons(ARPOP_REPLY) && ((arp->ar_op == htons(ARPOP_REPLY) &&

View file

@ -763,7 +763,7 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
unsigned int e = 0, s_e; unsigned int e = 0, s_e;
struct fib_table *tb; struct fib_table *tb;
struct hlist_head *head; struct hlist_head *head;
int dumped = 0; int dumped = 0, err;
if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) && if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) &&
((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED) ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED)
@ -783,20 +783,27 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
if (dumped) if (dumped)
memset(&cb->args[2], 0, sizeof(cb->args) - memset(&cb->args[2], 0, sizeof(cb->args) -
2 * sizeof(cb->args[0])); 2 * sizeof(cb->args[0]));
if (fib_table_dump(tb, skb, cb) < 0) err = fib_table_dump(tb, skb, cb);
goto out; if (err < 0) {
if (likely(skb->len))
goto out;
goto out_err;
}
dumped = 1; dumped = 1;
next: next:
e++; e++;
} }
} }
out: out:
err = skb->len;
out_err:
rcu_read_unlock(); rcu_read_unlock();
cb->args[1] = e; cb->args[1] = e;
cb->args[0] = h; cb->args[0] = h;
return skb->len; return err;
} }
/* Prepare and feed intra-kernel routing request. /* Prepare and feed intra-kernel routing request.

View file

@ -1983,6 +1983,8 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
/* rcu_read_lock is hold by caller */ /* rcu_read_lock is hold by caller */
hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
int err;
if (i < s_i) { if (i < s_i) {
i++; i++;
continue; continue;
@ -1993,17 +1995,14 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
continue; continue;
} }
if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid, err = fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, cb->nlh->nlmsg_seq, RTM_NEWROUTE,
RTM_NEWROUTE, tb->tb_id, fa->fa_type,
tb->tb_id, xkey, KEYLENGTH - fa->fa_slen,
fa->fa_type, fa->fa_tos, fa->fa_info, NLM_F_MULTI);
xkey, if (err < 0) {
KEYLENGTH - fa->fa_slen,
fa->fa_tos,
fa->fa_info, NLM_F_MULTI) < 0) {
cb->args[4] = i; cb->args[4] = i;
return -1; return err;
} }
i++; i++;
} }
@ -2025,10 +2024,13 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
t_key key = cb->args[3]; t_key key = cb->args[3];
while ((l = leaf_walk_rcu(&tp, key)) != NULL) { while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) { int err;
err = fn_trie_dump_leaf(l, tb, skb, cb);
if (err < 0) {
cb->args[3] = key; cb->args[3] = key;
cb->args[2] = count; cb->args[2] = count;
return -1; return err;
} }
++count; ++count;

View file

@ -1980,6 +1980,20 @@ int ip_mr_input(struct sk_buff *skb)
struct net *net = dev_net(skb->dev); struct net *net = dev_net(skb->dev);
int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
struct mr_table *mrt; struct mr_table *mrt;
struct net_device *dev;
/* skb->dev passed in is the loX master dev for vrfs.
* As there are no vifs associated with loopback devices,
* get the proper interface that does have a vif associated with it.
*/
dev = skb->dev;
if (netif_is_l3_master(skb->dev)) {
dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
if (!dev) {
kfree_skb(skb);
return -ENODEV;
}
}
/* Packet is looped back after forward, it should not be /* Packet is looped back after forward, it should not be
* forwarded second time, but still can be delivered locally. * forwarded second time, but still can be delivered locally.
@ -2017,7 +2031,7 @@ int ip_mr_input(struct sk_buff *skb)
/* already under rcu_read_lock() */ /* already under rcu_read_lock() */
cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
if (!cache) { if (!cache) {
int vif = ipmr_find_vif(mrt, skb->dev); int vif = ipmr_find_vif(mrt, dev);
if (vif >= 0) if (vif >= 0)
cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr, cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
@ -2037,7 +2051,7 @@ int ip_mr_input(struct sk_buff *skb)
} }
read_lock(&mrt_lock); read_lock(&mrt_lock);
vif = ipmr_find_vif(mrt, skb->dev); vif = ipmr_find_vif(mrt, dev);
if (vif >= 0) { if (vif >= 0) {
int err2 = ipmr_cache_unresolved(mrt, vif, skb); int err2 = ipmr_cache_unresolved(mrt, vif, skb);
read_unlock(&mrt_lock); read_unlock(&mrt_lock);

View file

@ -3190,7 +3190,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
int delta; int delta;
/* Non-retransmitted hole got filled? That's reordering */ /* Non-retransmitted hole got filled? That's reordering */
if (reord < prior_fackets) if (reord < prior_fackets && reord <= tp->fackets_out)
tcp_update_reordering(sk, tp->fackets_out - reord, 0); tcp_update_reordering(sk, tp->fackets_out - reord, 0);
delta = tcp_is_fack(tp) ? pkts_acked : delta = tcp_is_fack(tp) ? pkts_acked :

View file

@ -1727,7 +1727,7 @@ static void udp_v4_rehash(struct sock *sk)
udp_lib_rehash(sk, new_hash); udp_lib_rehash(sk, new_hash);
} }
int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{ {
int rc; int rc;
@ -1772,7 +1772,7 @@ EXPORT_SYMBOL(udp_encap_enable);
* Note that in the success and error cases, the skb is assumed to * Note that in the success and error cases, the skb is assumed to
* have either been requeued or freed. * have either been requeued or freed.
*/ */
int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{ {
struct udp_sock *up = udp_sk(sk); struct udp_sock *up = udp_sk(sk);
int is_udplite = IS_UDPLITE(sk); int is_udplite = IS_UDPLITE(sk);

View file

@ -25,7 +25,6 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
int flags, int *addr_len); int flags, int *addr_len);
int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
int flags); int flags);
int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
void udp_destroy_sock(struct sock *sk); void udp_destroy_sock(struct sock *sk);
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS

View file

@ -63,7 +63,6 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
const struct net_offload *ops; const struct net_offload *ops;
int proto; int proto;
struct frag_hdr *fptr; struct frag_hdr *fptr;
unsigned int unfrag_ip6hlen;
unsigned int payload_len; unsigned int payload_len;
u8 *prevhdr; u8 *prevhdr;
int offset = 0; int offset = 0;
@ -116,8 +115,10 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
skb->network_header = (u8 *)ipv6h - skb->head; skb->network_header = (u8 *)ipv6h - skb->head;
if (udpfrag) { if (udpfrag) {
unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); int err = ip6_find_1stfragopt(skb, &prevhdr);
fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen); if (err < 0)
return ERR_PTR(err);
fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
fptr->frag_off = htons(offset); fptr->frag_off = htons(offset);
if (skb->next) if (skb->next)
fptr->frag_off |= htons(IP6_MF); fptr->frag_off |= htons(IP6_MF);

View file

@ -597,7 +597,10 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
int ptr, offset = 0, err = 0; int ptr, offset = 0, err = 0;
u8 *prevhdr, nexthdr = 0; u8 *prevhdr, nexthdr = 0;
hlen = ip6_find_1stfragopt(skb, &prevhdr); err = ip6_find_1stfragopt(skb, &prevhdr);
if (err < 0)
goto fail;
hlen = err;
nexthdr = *prevhdr; nexthdr = *prevhdr;
mtu = ip6_skb_dst_mtu(skb); mtu = ip6_skb_dst_mtu(skb);

View file

@ -79,14 +79,13 @@ EXPORT_SYMBOL(ipv6_select_ident);
int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
{ {
u16 offset = sizeof(struct ipv6hdr); u16 offset = sizeof(struct ipv6hdr);
struct ipv6_opt_hdr *exthdr =
(struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
unsigned int packet_len = skb_tail_pointer(skb) - unsigned int packet_len = skb_tail_pointer(skb) -
skb_network_header(skb); skb_network_header(skb);
int found_rhdr = 0; int found_rhdr = 0;
*nexthdr = &ipv6_hdr(skb)->nexthdr; *nexthdr = &ipv6_hdr(skb)->nexthdr;
while (offset + 1 <= packet_len) { while (offset <= packet_len) {
struct ipv6_opt_hdr *exthdr;
switch (**nexthdr) { switch (**nexthdr) {
@ -107,13 +106,16 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
return offset; return offset;
} }
offset += ipv6_optlen(exthdr); if (offset + sizeof(struct ipv6_opt_hdr) > packet_len)
*nexthdr = &exthdr->nexthdr; return -EINVAL;
exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
offset); offset);
offset += ipv6_optlen(exthdr);
*nexthdr = &exthdr->nexthdr;
} }
return offset; return -EINVAL;
} }
EXPORT_SYMBOL(ip6_find_1stfragopt); EXPORT_SYMBOL(ip6_find_1stfragopt);

View file

@ -527,7 +527,7 @@ out:
return; return;
} }
int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{ {
int rc; int rc;
@ -570,7 +570,7 @@ void udpv6_encap_enable(void)
} }
EXPORT_SYMBOL(udpv6_encap_enable); EXPORT_SYMBOL(udpv6_encap_enable);
int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{ {
struct udp_sock *up = udp_sk(sk); struct udp_sock *up = udp_sk(sk);
int is_udplite = IS_UDPLITE(sk); int is_udplite = IS_UDPLITE(sk);

View file

@ -26,7 +26,6 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
int flags, int *addr_len); int flags, int *addr_len);
int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
void udpv6_destroy_sock(struct sock *sk); void udpv6_destroy_sock(struct sock *sk);
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS

View file

@ -29,6 +29,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
u8 frag_hdr_sz = sizeof(struct frag_hdr); u8 frag_hdr_sz = sizeof(struct frag_hdr);
__wsum csum; __wsum csum;
int tnl_hlen; int tnl_hlen;
int err;
mss = skb_shinfo(skb)->gso_size; mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss)) if (unlikely(skb->len <= mss))
@ -90,7 +91,10 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
/* Find the unfragmentable header and shift it left by frag_hdr_sz /* Find the unfragmentable header and shift it left by frag_hdr_sz
* bytes to insert fragment header. * bytes to insert fragment header.
*/ */
unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); err = ip6_find_1stfragopt(skb, &prevhdr);
if (err < 0)
return ERR_PTR(err);
unfrag_ip6hlen = err;
nexthdr = *prevhdr; nexthdr = *prevhdr;
*prevhdr = NEXTHDR_FRAGMENT; *prevhdr = NEXTHDR_FRAGMENT;
unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) + unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +

View file

@ -677,6 +677,9 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
newnp = inet6_sk(newsk); newnp = inet6_sk(newsk);
memcpy(newnp, np, sizeof(struct ipv6_pinfo)); memcpy(newnp, np, sizeof(struct ipv6_pinfo));
newnp->ipv6_mc_list = NULL;
newnp->ipv6_ac_list = NULL;
newnp->ipv6_fl_list = NULL;
rcu_read_lock(); rcu_read_lock();
opt = rcu_dereference(np->opt); opt = rcu_dereference(np->opt);

View file

@ -8,6 +8,10 @@ config SMC
The Linux implementation of the SMC-R solution is designed as The Linux implementation of the SMC-R solution is designed as
a separate socket family SMC. a separate socket family SMC.
Warning: SMC will expose all memory for remote reads and writes
once a connection is established. Don't enable this option except
for tightly controlled lab environment.
Select this option if you want to run SMC socket applications Select this option if you want to run SMC socket applications
config SMC_DIAG config SMC_DIAG

View file

@ -204,7 +204,7 @@ int smc_clc_send_confirm(struct smc_sock *smc)
memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], ETH_ALEN); memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], ETH_ALEN);
hton24(cclc.qpn, link->roce_qp->qp_num); hton24(cclc.qpn, link->roce_qp->qp_num);
cclc.rmb_rkey = cclc.rmb_rkey =
htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); htonl(conn->rmb_desc->rkey[SMC_SINGLE_LINK]);
cclc.conn_idx = 1; /* for now: 1 RMB = 1 RMBE */ cclc.conn_idx = 1; /* for now: 1 RMB = 1 RMBE */
cclc.rmbe_alert_token = htonl(conn->alert_token_local); cclc.rmbe_alert_token = htonl(conn->alert_token_local);
cclc.qp_mtu = min(link->path_mtu, link->peer_mtu); cclc.qp_mtu = min(link->path_mtu, link->peer_mtu);
@ -256,7 +256,7 @@ int smc_clc_send_accept(struct smc_sock *new_smc, int srv_first_contact)
memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], ETH_ALEN); memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], ETH_ALEN);
hton24(aclc.qpn, link->roce_qp->qp_num); hton24(aclc.qpn, link->roce_qp->qp_num);
aclc.rmb_rkey = aclc.rmb_rkey =
htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); htonl(conn->rmb_desc->rkey[SMC_SINGLE_LINK]);
aclc.conn_idx = 1; /* as long as 1 RMB = 1 RMBE */ aclc.conn_idx = 1; /* as long as 1 RMB = 1 RMBE */
aclc.rmbe_alert_token = htonl(conn->alert_token_local); aclc.rmbe_alert_token = htonl(conn->alert_token_local);
aclc.qp_mtu = link->path_mtu; aclc.qp_mtu = link->path_mtu;

View file

@ -613,19 +613,8 @@ int smc_rmb_create(struct smc_sock *smc)
rmb_desc = NULL; rmb_desc = NULL;
continue; /* if mapping failed, try smaller one */ continue; /* if mapping failed, try smaller one */
} }
rc = smc_ib_get_memory_region(lgr->lnk[SMC_SINGLE_LINK].roce_pd, rmb_desc->rkey[SMC_SINGLE_LINK] =
IB_ACCESS_REMOTE_WRITE | lgr->lnk[SMC_SINGLE_LINK].roce_pd->unsafe_global_rkey;
IB_ACCESS_LOCAL_WRITE,
&rmb_desc->mr_rx[SMC_SINGLE_LINK]);
if (rc) {
smc_ib_buf_unmap(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
tmp_bufsize, rmb_desc,
DMA_FROM_DEVICE);
kfree(rmb_desc->cpu_addr);
kfree(rmb_desc);
rmb_desc = NULL;
continue;
}
rmb_desc->used = 1; rmb_desc->used = 1;
write_lock_bh(&lgr->rmbs_lock); write_lock_bh(&lgr->rmbs_lock);
list_add(&rmb_desc->list, list_add(&rmb_desc->list,
@ -668,6 +657,7 @@ int smc_rmb_rtoken_handling(struct smc_connection *conn,
for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) { for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) && if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) &&
(lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr == dma_addr) &&
test_bit(i, lgr->rtokens_used_mask)) { test_bit(i, lgr->rtokens_used_mask)) {
conn->rtoken_idx = i; conn->rtoken_idx = i;
return 0; return 0;

View file

@ -93,7 +93,7 @@ struct smc_buf_desc {
u64 dma_addr[SMC_LINKS_PER_LGR_MAX]; u64 dma_addr[SMC_LINKS_PER_LGR_MAX];
/* mapped address of buffer */ /* mapped address of buffer */
void *cpu_addr; /* virtual address of buffer */ void *cpu_addr; /* virtual address of buffer */
struct ib_mr *mr_rx[SMC_LINKS_PER_LGR_MAX]; u32 rkey[SMC_LINKS_PER_LGR_MAX];
/* for rmb only: /* for rmb only:
* rkey provided to peer * rkey provided to peer
*/ */

View file

@ -37,24 +37,6 @@ u8 local_systemid[SMC_SYSTEMID_LEN] = SMC_LOCAL_SYSTEMID_RESET; /* unique system
* identifier * identifier
*/ */
int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
struct ib_mr **mr)
{
int rc;
if (*mr)
return 0; /* already done */
/* obtain unique key -
* next invocation of get_dma_mr returns a different key!
*/
*mr = pd->device->get_dma_mr(pd, access_flags);
rc = PTR_ERR_OR_ZERO(*mr);
if (IS_ERR(*mr))
*mr = NULL;
return rc;
}
static int smc_ib_modify_qp_init(struct smc_link *lnk) static int smc_ib_modify_qp_init(struct smc_link *lnk)
{ {
struct ib_qp_attr qp_attr; struct ib_qp_attr qp_attr;
@ -210,7 +192,8 @@ int smc_ib_create_protection_domain(struct smc_link *lnk)
{ {
int rc; int rc;
lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0); lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev,
IB_PD_UNSAFE_GLOBAL_RKEY);
rc = PTR_ERR_OR_ZERO(lnk->roce_pd); rc = PTR_ERR_OR_ZERO(lnk->roce_pd);
if (IS_ERR(lnk->roce_pd)) if (IS_ERR(lnk->roce_pd))
lnk->roce_pd = NULL; lnk->roce_pd = NULL;

View file

@ -61,8 +61,6 @@ void smc_ib_dealloc_protection_domain(struct smc_link *lnk);
int smc_ib_create_protection_domain(struct smc_link *lnk); int smc_ib_create_protection_domain(struct smc_link *lnk);
void smc_ib_destroy_queue_pair(struct smc_link *lnk); void smc_ib_destroy_queue_pair(struct smc_link *lnk);
int smc_ib_create_queue_pair(struct smc_link *lnk); int smc_ib_create_queue_pair(struct smc_link *lnk);
int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
struct ib_mr **mr);
int smc_ib_ready_link(struct smc_link *lnk); int smc_ib_ready_link(struct smc_link *lnk);
int smc_ib_modify_qp_rts(struct smc_link *lnk); int smc_ib_modify_qp_rts(struct smc_link *lnk);
int smc_ib_modify_qp_reset(struct smc_link *lnk); int smc_ib_modify_qp_reset(struct smc_link *lnk);

View file

@ -1791,32 +1791,40 @@ void x25_kill_by_neigh(struct x25_neigh *nb)
static int __init x25_init(void) static int __init x25_init(void)
{ {
int rc = proto_register(&x25_proto, 0); int rc;
if (rc != 0) rc = proto_register(&x25_proto, 0);
if (rc)
goto out; goto out;
rc = sock_register(&x25_family_ops); rc = sock_register(&x25_family_ops);
if (rc != 0) if (rc)
goto out_proto; goto out_proto;
dev_add_pack(&x25_packet_type); dev_add_pack(&x25_packet_type);
rc = register_netdevice_notifier(&x25_dev_notifier); rc = register_netdevice_notifier(&x25_dev_notifier);
if (rc != 0) if (rc)
goto out_sock; goto out_sock;
rc = x25_register_sysctl();
if (rc)
goto out_dev;
rc = x25_proc_init();
if (rc)
goto out_sysctl;
pr_info("Linux Version 0.2\n"); pr_info("Linux Version 0.2\n");
x25_register_sysctl();
rc = x25_proc_init();
if (rc != 0)
goto out_dev;
out: out:
return rc; return rc;
out_sysctl:
x25_unregister_sysctl();
out_dev: out_dev:
unregister_netdevice_notifier(&x25_dev_notifier); unregister_netdevice_notifier(&x25_dev_notifier);
out_sock: out_sock:
dev_remove_pack(&x25_packet_type);
sock_unregister(AF_X25); sock_unregister(AF_X25);
out_proto: out_proto:
proto_unregister(&x25_proto); proto_unregister(&x25_proto);

View file

@ -73,9 +73,12 @@ static struct ctl_table x25_table[] = {
{ }, { },
}; };
void __init x25_register_sysctl(void) int __init x25_register_sysctl(void)
{ {
x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table); x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table);
if (!x25_table_header)
return -ENOMEM;
return 0;
} }
void x25_unregister_sysctl(void) void x25_unregister_sysctl(void)

View file

@ -8,6 +8,29 @@
# #
# ========================================================================== # ==========================================================================
PHONY := __headers
__headers:
include scripts/Kbuild.include
srcdir := $(srctree)/$(obj)
subdirs := $(patsubst $(srcdir)/%/.,%,$(wildcard $(srcdir)/*/.))
# caller may set destination dir (when installing to asm/)
_dst := $(if $(dst),$(dst),$(obj))
# Recursion
__headers: $(subdirs)
.PHONY: $(subdirs)
$(subdirs):
$(Q)$(MAKE) $(hdr-inst)=$(obj)/$@ dst=$(_dst)/$@
# Skip header install/check for include/uapi and arch/$(hdr-arch)/include/uapi.
# We have only sub-directories there.
skip-inst := $(if $(filter %/uapi,$(obj)),1)
ifeq ($(skip-inst),)
# generated header directory # generated header directory
gen := $(if $(gen),$(gen),$(subst include/,include/generated/,$(obj))) gen := $(if $(gen),$(gen),$(subst include/,include/generated/,$(obj)))
@ -15,21 +38,14 @@ gen := $(if $(gen),$(gen),$(subst include/,include/generated/,$(obj)))
kbuild-file := $(srctree)/$(obj)/Kbuild kbuild-file := $(srctree)/$(obj)/Kbuild
-include $(kbuild-file) -include $(kbuild-file)
# called may set destination dir (when installing to asm/)
_dst := $(if $(dst),$(dst),$(obj))
old-kbuild-file := $(srctree)/$(subst uapi/,,$(obj))/Kbuild old-kbuild-file := $(srctree)/$(subst uapi/,,$(obj))/Kbuild
ifneq ($(wildcard $(old-kbuild-file)),) ifneq ($(wildcard $(old-kbuild-file)),)
include $(old-kbuild-file) include $(old-kbuild-file)
endif endif
include scripts/Kbuild.include
installdir := $(INSTALL_HDR_PATH)/$(subst uapi/,,$(_dst)) installdir := $(INSTALL_HDR_PATH)/$(subst uapi/,,$(_dst))
srcdir := $(srctree)/$(obj)
gendir := $(objtree)/$(gen) gendir := $(objtree)/$(gen)
subdirs := $(patsubst $(srcdir)/%/.,%,$(wildcard $(srcdir)/*/.))
header-files := $(notdir $(wildcard $(srcdir)/*.h)) header-files := $(notdir $(wildcard $(srcdir)/*.h))
header-files += $(notdir $(wildcard $(srcdir)/*.agh)) header-files += $(notdir $(wildcard $(srcdir)/*.agh))
header-files := $(filter-out $(no-export-headers), $(header-files)) header-files := $(filter-out $(no-export-headers), $(header-files))
@ -88,11 +104,9 @@ quiet_cmd_check = CHECK $(printdir) ($(words $(all-files)) files)
$(PERL) $< $(INSTALL_HDR_PATH)/include $(SRCARCH); \ $(PERL) $< $(INSTALL_HDR_PATH)/include $(SRCARCH); \
touch $@ touch $@
PHONY += __headersinst __headerscheck
ifndef HDRCHECK ifndef HDRCHECK
# Rules for installing headers # Rules for installing headers
__headersinst: $(subdirs) $(install-file) __headers: $(install-file)
@: @:
targets += $(install-file) targets += $(install-file)
@ -104,7 +118,7 @@ $(install-file): scripts/headers_install.sh \
$(call if_changed,install) $(call if_changed,install)
else else
__headerscheck: $(subdirs) $(check-file) __headers: $(check-file)
@: @:
targets += $(check-file) targets += $(check-file)
@ -113,11 +127,6 @@ $(check-file): scripts/headers_check.pl $(output-files) FORCE
endif endif
# Recursion
.PHONY: $(subdirs)
$(subdirs):
$(Q)$(MAKE) $(hdr-inst)=$(obj)/$@ dst=$(_dst)/$@
targets := $(wildcard $(sort $(targets))) targets := $(wildcard $(sort $(targets)))
cmd_files := $(wildcard \ cmd_files := $(wildcard \
$(foreach f,$(targets),$(dir $(f)).$(notdir $(f)).cmd)) $(foreach f,$(targets),$(dir $(f)).$(notdir $(f)).cmd))
@ -126,6 +135,8 @@ ifneq ($(cmd_files),)
include $(cmd_files) include $(cmd_files)
endif endif
endif # skip-inst
.PHONY: $(PHONY) .PHONY: $(PHONY)
PHONY += FORCE PHONY += FORCE
FORCE: ; FORCE: ;

View file

@ -3,4 +3,20 @@
#include <asm-generic/int-ll64.h> #include <asm-generic/int-ll64.h>
/* copied from linux:include/uapi/linux/types.h */
#define __bitwise
typedef __u16 __bitwise __le16;
typedef __u16 __bitwise __be16;
typedef __u32 __bitwise __le32;
typedef __u32 __bitwise __be32;
typedef __u64 __bitwise __le64;
typedef __u64 __bitwise __be64;
typedef __u16 __bitwise __sum16;
typedef __u32 __bitwise __wsum;
#define __aligned_u64 __u64 __attribute__((aligned(8)))
#define __aligned_be64 __be64 __attribute__((aligned(8)))
#define __aligned_le64 __le64 __attribute__((aligned(8)))
#endif /* _UAPI_LINUX_TYPES_H */ #endif /* _UAPI_LINUX_TYPES_H */

View file

@ -5,6 +5,7 @@
* License as published by the Free Software Foundation. * License as published by the Free Software Foundation.
*/ */
#include <stddef.h> #include <stddef.h>
#include <string.h>
#include <linux/bpf.h> #include <linux/bpf.h>
#include <linux/if_ether.h> #include <linux/if_ether.h>
#include <linux/if_packet.h> #include <linux/if_packet.h>