Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

One conflict in the BPF samples Makefile, some fixes in 'net' whilst
we were converting over to Makefile.target rules in 'net-next'.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2019-11-09 11:04:37 -08:00
commit 14684b9301
260 changed files with 2206 additions and 943 deletions

View file

@ -436,6 +436,10 @@ by the driver:
encryption. encryption.
* ``tx_tls_ooo`` - number of TX packets which were part of a TLS stream * ``tx_tls_ooo`` - number of TX packets which were part of a TLS stream
but did not arrive in the expected order. but did not arrive in the expected order.
* ``tx_tls_skip_no_sync_data`` - number of TX packets which were part of
a TLS stream and arrived out-of-order, but skipped the HW offload routine
and went to the regular transmit flow as they were retransmissions of the
connection handshake.
* ``tx_tls_drop_no_sync_data`` - number of TX packets which were part of * ``tx_tls_drop_no_sync_data`` - number of TX packets which were part of
a TLS stream dropped, because they arrived out of order and associated a TLS stream dropped, because they arrived out of order and associated
record could not be found. record could not be found.

View file

@ -3060,6 +3060,7 @@ M: Daniel Borkmann <daniel@iogearbox.net>
R: Martin KaFai Lau <kafai@fb.com> R: Martin KaFai Lau <kafai@fb.com>
R: Song Liu <songliubraving@fb.com> R: Song Liu <songliubraving@fb.com>
R: Yonghong Song <yhs@fb.com> R: Yonghong Song <yhs@fb.com>
R: Andrii Nakryiko <andriin@fb.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
L: bpf@vger.kernel.org L: bpf@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
@ -10533,8 +10534,12 @@ F: mm/memblock.c
F: Documentation/core-api/boot-time-mm.rst F: Documentation/core-api/boot-time-mm.rst
MEMORY MANAGEMENT MEMORY MANAGEMENT
M: Andrew Morton <akpm@linux-foundation.org>
L: linux-mm@kvack.org L: linux-mm@kvack.org
W: http://www.linux-mm.org W: http://www.linux-mm.org
T: quilt https://ozlabs.org/~akpm/mmotm/
T: quilt https://ozlabs.org/~akpm/mmots/
T: git git://github.com/hnaz/linux-mm.git
S: Maintained S: Maintained
F: include/linux/mm.h F: include/linux/mm.h
F: include/linux/gfp.h F: include/linux/gfp.h
@ -18048,6 +18053,7 @@ F: Documentation/vm/zsmalloc.rst
ZSWAP COMPRESSED SWAP CACHING ZSWAP COMPRESSED SWAP CACHING
M: Seth Jennings <sjenning@redhat.com> M: Seth Jennings <sjenning@redhat.com>
M: Dan Streetman <ddstreet@ieee.org> M: Dan Streetman <ddstreet@ieee.org>
M: Vitaly Wool <vitaly.wool@konsulko.com>
L: linux-mm@kvack.org L: linux-mm@kvack.org
S: Maintained S: Maintained
F: mm/zswap.c F: mm/zswap.c

View file

@ -2,7 +2,7 @@
VERSION = 5 VERSION = 5
PATCHLEVEL = 4 PATCHLEVEL = 4
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc5 EXTRAVERSION = -rc6
NAME = Kleptomaniac Octopus NAME = Kleptomaniac Octopus
# *DOCUMENTATION* # *DOCUMENTATION*

View file

@ -283,23 +283,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
set_pte(ptep, pte); set_pte(ptep, pte);
} }
#define __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t pte_a, pte_t pte_b)
{
pteval_t lhs, rhs;
lhs = pte_val(pte_a);
rhs = pte_val(pte_b);
if (pte_present(pte_a))
lhs &= ~PTE_RDONLY;
if (pte_present(pte_b))
rhs &= ~PTE_RDONLY;
return (lhs == rhs);
}
/* /*
* Huge pte definitions. * Huge pte definitions.
*/ */

View file

@ -91,6 +91,7 @@
static inline void kuap_update_sr(u32 sr, u32 addr, u32 end) static inline void kuap_update_sr(u32 sr, u32 addr, u32 end)
{ {
addr &= 0xf0000000; /* align addr to start of segment */
barrier(); /* make sure thread.kuap is updated before playing with SRs */ barrier(); /* make sure thread.kuap is updated before playing with SRs */
while (addr < end) { while (addr < end) {
mtsrin(sr, addr); mtsrin(sr, addr);

View file

@ -175,4 +175,7 @@ do { \
ARCH_DLINFO_CACHE_GEOMETRY; \ ARCH_DLINFO_CACHE_GEOMETRY; \
} while (0) } while (0)
/* Relocate the kernel image to @final_address */
void relocate(unsigned long final_address);
#endif /* _ASM_POWERPC_ELF_H */ #endif /* _ASM_POWERPC_ELF_H */

View file

@ -3249,7 +3249,20 @@ static void setup_secure_guest(unsigned long kbase, unsigned long fdt)
/* Switch to secure mode. */ /* Switch to secure mode. */
prom_printf("Switching to secure mode.\n"); prom_printf("Switching to secure mode.\n");
/*
* The ultravisor will do an integrity check of the kernel image but we
* relocated it so the check will fail. Restore the original image by
* relocating it back to the kernel virtual base address.
*/
if (IS_ENABLED(CONFIG_RELOCATABLE))
relocate(KERNELBASE);
ret = enter_secure_mode(kbase, fdt); ret = enter_secure_mode(kbase, fdt);
/* Relocate the kernel again. */
if (IS_ENABLED(CONFIG_RELOCATABLE))
relocate(kbase);
if (ret != U_SUCCESS) { if (ret != U_SUCCESS) {
prom_printf("Returned %d from switching to secure mode.\n", ret); prom_printf("Returned %d from switching to secure mode.\n", ret);
prom_rtas_os_term("Switch to secure mode failed.\n"); prom_rtas_os_term("Switch to secure mode failed.\n");

View file

@ -26,7 +26,8 @@ _end enter_prom $MEM_FUNCS reloc_offset __secondary_hold
__secondary_hold_acknowledge __secondary_hold_spinloop __start __secondary_hold_acknowledge __secondary_hold_spinloop __start
logo_linux_clut224 btext_prepare_BAT logo_linux_clut224 btext_prepare_BAT
reloc_got2 kernstart_addr memstart_addr linux_banner _stext reloc_got2 kernstart_addr memstart_addr linux_banner _stext
__prom_init_toc_start __prom_init_toc_end btext_setup_display TOC." __prom_init_toc_start __prom_init_toc_end btext_setup_display TOC.
relocate"
NM="$1" NM="$1"
OBJ="$2" OBJ="$2"

View file

@ -1141,6 +1141,19 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
goto out_addrs; goto out_addrs;
} }
/*
* If we have seen a tail call, we need a second pass.
* This is because bpf_jit_emit_common_epilogue() is called
* from bpf_jit_emit_tail_call() with a not yet stable ctx->seen.
*/
if (cgctx.seen & SEEN_TAILCALL) {
cgctx.idx = 0;
if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) {
fp = org_fp;
goto out_addrs;
}
}
/* /*
* Pretend to build prologue, given the features we've seen. This will * Pretend to build prologue, given the features we've seen. This will
* update ctgtx.idx as it pretends to output instructions, then we can * update ctgtx.idx as it pretends to output instructions, then we can

View file

@ -42,7 +42,7 @@ void pnv_pcibios_bus_add_device(struct pci_dev *pdev)
{ {
struct pci_dn *pdn = pci_get_pdn(pdev); struct pci_dn *pdn = pci_get_pdn(pdev);
if (eeh_has_flag(EEH_FORCE_DISABLED)) if (!pdn || eeh_has_flag(EEH_FORCE_DISABLED))
return; return;
dev_dbg(&pdev->dev, "EEH: Setting up device\n"); dev_dbg(&pdev->dev, "EEH: Setting up device\n");

View file

@ -146,20 +146,25 @@ static int pnv_smp_cpu_disable(void)
return 0; return 0;
} }
static void pnv_flush_interrupts(void)
{
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
if (xive_enabled())
xive_flush_interrupt();
else
icp_opal_flush_interrupt();
} else {
icp_native_flush_interrupt();
}
}
static void pnv_smp_cpu_kill_self(void) static void pnv_smp_cpu_kill_self(void)
{ {
unsigned long srr1, unexpected_mask, wmask;
unsigned int cpu; unsigned int cpu;
unsigned long srr1, wmask;
u64 lpcr_val; u64 lpcr_val;
/* Standard hot unplug procedure */ /* Standard hot unplug procedure */
/*
* This hard disables local interurpts, ensuring we have no lazy
* irqs pending.
*/
WARN_ON(irqs_disabled());
hard_irq_disable();
WARN_ON(lazy_irq_pending());
idle_task_exit(); idle_task_exit();
current->active_mm = NULL; /* for sanity */ current->active_mm = NULL; /* for sanity */
@ -172,6 +177,27 @@ static void pnv_smp_cpu_kill_self(void)
if (cpu_has_feature(CPU_FTR_ARCH_207S)) if (cpu_has_feature(CPU_FTR_ARCH_207S))
wmask = SRR1_WAKEMASK_P8; wmask = SRR1_WAKEMASK_P8;
/*
* This turns the irq soft-disabled state we're called with, into a
* hard-disabled state with pending irq_happened interrupts cleared.
*
* PACA_IRQ_DEC - Decrementer should be ignored.
* PACA_IRQ_HMI - Can be ignored, processing is done in real mode.
* PACA_IRQ_DBELL, EE, PMI - Unexpected.
*/
hard_irq_disable();
if (generic_check_cpu_restart(cpu))
goto out;
unexpected_mask = ~(PACA_IRQ_DEC | PACA_IRQ_HMI | PACA_IRQ_HARD_DIS);
if (local_paca->irq_happened & unexpected_mask) {
if (local_paca->irq_happened & PACA_IRQ_EE)
pnv_flush_interrupts();
DBG("CPU%d Unexpected exit while offline irq_happened=%lx!\n",
cpu, local_paca->irq_happened);
}
local_paca->irq_happened = PACA_IRQ_HARD_DIS;
/* /*
* We don't want to take decrementer interrupts while we are * We don't want to take decrementer interrupts while we are
* offline, so clear LPCR:PECE1. We keep PECE2 (and * offline, so clear LPCR:PECE1. We keep PECE2 (and
@ -197,6 +223,7 @@ static void pnv_smp_cpu_kill_self(void)
srr1 = pnv_cpu_offline(cpu); srr1 = pnv_cpu_offline(cpu);
WARN_ON_ONCE(!irqs_disabled());
WARN_ON(lazy_irq_pending()); WARN_ON(lazy_irq_pending());
/* /*
@ -212,13 +239,7 @@ static void pnv_smp_cpu_kill_self(void)
*/ */
if (((srr1 & wmask) == SRR1_WAKEEE) || if (((srr1 & wmask) == SRR1_WAKEEE) ||
((srr1 & wmask) == SRR1_WAKEHVI)) { ((srr1 & wmask) == SRR1_WAKEHVI)) {
if (cpu_has_feature(CPU_FTR_ARCH_300)) { pnv_flush_interrupts();
if (xive_enabled())
xive_flush_interrupt();
else
icp_opal_flush_interrupt();
} else
icp_native_flush_interrupt();
} else if ((srr1 & wmask) == SRR1_WAKEHDBELL) { } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
asm volatile(PPC_MSGCLR(%0) : : "r" (msg)); asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
@ -266,7 +287,7 @@ static void pnv_smp_cpu_kill_self(void)
*/ */
lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1; lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1;
pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val); pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
out:
DBG("CPU%d coming online...\n", cpu); DBG("CPU%d coming online...\n", cpu);
} }

View file

@ -35,6 +35,7 @@ struct unwind_state {
struct task_struct *task; struct task_struct *task;
struct pt_regs *regs; struct pt_regs *regs;
unsigned long sp, ip; unsigned long sp, ip;
bool reuse_sp;
int graph_idx; int graph_idx;
bool reliable; bool reliable;
bool error; bool error;

View file

@ -69,18 +69,26 @@ DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
static ssize_t show_idle_time(struct device *dev, static ssize_t show_idle_time(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
unsigned long long now, idle_time, idle_enter, idle_exit, in_idle;
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
unsigned long long now, idle_time, idle_enter, idle_exit;
unsigned int seq; unsigned int seq;
do { do {
now = get_tod_clock();
seq = read_seqcount_begin(&idle->seqcount); seq = read_seqcount_begin(&idle->seqcount);
idle_time = READ_ONCE(idle->idle_time); idle_time = READ_ONCE(idle->idle_time);
idle_enter = READ_ONCE(idle->clock_idle_enter); idle_enter = READ_ONCE(idle->clock_idle_enter);
idle_exit = READ_ONCE(idle->clock_idle_exit); idle_exit = READ_ONCE(idle->clock_idle_exit);
} while (read_seqcount_retry(&idle->seqcount, seq)); } while (read_seqcount_retry(&idle->seqcount, seq));
idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0; in_idle = 0;
now = get_tod_clock();
if (idle_enter) {
if (idle_exit) {
in_idle = idle_exit - idle_enter;
} else if (now > idle_enter) {
in_idle = now - idle_enter;
}
}
idle_time += in_idle;
return sprintf(buf, "%llu\n", idle_time >> 12); return sprintf(buf, "%llu\n", idle_time >> 12);
} }
DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL); DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
@ -88,17 +96,24 @@ DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
u64 arch_cpu_idle_time(int cpu) u64 arch_cpu_idle_time(int cpu)
{ {
struct s390_idle_data *idle = &per_cpu(s390_idle, cpu); struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
unsigned long long now, idle_enter, idle_exit; unsigned long long now, idle_enter, idle_exit, in_idle;
unsigned int seq; unsigned int seq;
do { do {
now = get_tod_clock();
seq = read_seqcount_begin(&idle->seqcount); seq = read_seqcount_begin(&idle->seqcount);
idle_enter = READ_ONCE(idle->clock_idle_enter); idle_enter = READ_ONCE(idle->clock_idle_enter);
idle_exit = READ_ONCE(idle->clock_idle_exit); idle_exit = READ_ONCE(idle->clock_idle_exit);
} while (read_seqcount_retry(&idle->seqcount, seq)); } while (read_seqcount_retry(&idle->seqcount, seq));
in_idle = 0;
return cputime_to_nsecs(idle_enter ? ((idle_exit ?: now) - idle_enter) : 0); now = get_tod_clock();
if (idle_enter) {
if (idle_exit) {
in_idle = idle_exit - idle_enter;
} else if (now > idle_enter) {
in_idle = now - idle_enter;
}
}
return cputime_to_nsecs(in_idle);
} }
void arch_cpu_idle_enter(void) void arch_cpu_idle_enter(void)

View file

@ -46,10 +46,15 @@ bool unwind_next_frame(struct unwind_state *state)
regs = state->regs; regs = state->regs;
if (unlikely(regs)) { if (unlikely(regs)) {
sp = READ_ONCE_NOCHECK(regs->gprs[15]); if (state->reuse_sp) {
if (unlikely(outside_of_stack(state, sp))) { sp = state->sp;
if (!update_stack_info(state, sp)) state->reuse_sp = false;
goto out_err; } else {
sp = READ_ONCE_NOCHECK(regs->gprs[15]);
if (unlikely(outside_of_stack(state, sp))) {
if (!update_stack_info(state, sp))
goto out_err;
}
} }
sf = (struct stack_frame *) sp; sf = (struct stack_frame *) sp;
ip = READ_ONCE_NOCHECK(sf->gprs[8]); ip = READ_ONCE_NOCHECK(sf->gprs[8]);
@ -107,9 +112,9 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
{ {
struct stack_info *info = &state->stack_info; struct stack_info *info = &state->stack_info;
unsigned long *mask = &state->stack_mask; unsigned long *mask = &state->stack_mask;
bool reliable, reuse_sp;
struct stack_frame *sf; struct stack_frame *sf;
unsigned long ip; unsigned long ip;
bool reliable;
memset(state, 0, sizeof(*state)); memset(state, 0, sizeof(*state));
state->task = task; state->task = task;
@ -134,10 +139,12 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
if (regs) { if (regs) {
ip = READ_ONCE_NOCHECK(regs->psw.addr); ip = READ_ONCE_NOCHECK(regs->psw.addr);
reliable = true; reliable = true;
reuse_sp = true;
} else { } else {
sf = (struct stack_frame *) sp; sf = (struct stack_frame *) sp;
ip = READ_ONCE_NOCHECK(sf->gprs[8]); ip = READ_ONCE_NOCHECK(sf->gprs[8]);
reliable = false; reliable = false;
reuse_sp = false;
} }
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
@ -151,5 +158,6 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
state->sp = sp; state->sp = sp;
state->ip = ip; state->ip = ip;
state->reliable = reliable; state->reliable = reliable;
state->reuse_sp = reuse_sp;
} }
EXPORT_SYMBOL_GPL(__unwind_start); EXPORT_SYMBOL_GPL(__unwind_start);

View file

@ -298,16 +298,16 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write,
} }
if (write) { if (write) {
len = *lenp; len = min(*lenp, sizeof(buf));
if (copy_from_user(buf, buffer, if (copy_from_user(buf, buffer, len))
len > sizeof(buf) ? sizeof(buf) : len))
return -EFAULT; return -EFAULT;
buf[sizeof(buf) - 1] = '\0'; buf[len - 1] = '\0';
cmm_skip_blanks(buf, &p); cmm_skip_blanks(buf, &p);
nr = simple_strtoul(p, &p, 0); nr = simple_strtoul(p, &p, 0);
cmm_skip_blanks(p, &p); cmm_skip_blanks(p, &p);
seconds = simple_strtoul(p, &p, 0); seconds = simple_strtoul(p, &p, 0);
cmm_set_timeout(nr, seconds); cmm_set_timeout(nr, seconds);
*ppos += *lenp;
} else { } else {
len = sprintf(buf, "%ld %ld\n", len = sprintf(buf, "%ld %ld\n",
cmm_timeout_pages, cmm_timeout_seconds); cmm_timeout_pages, cmm_timeout_seconds);
@ -315,9 +315,9 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write,
len = *lenp; len = *lenp;
if (copy_to_user(buffer, buf, len)) if (copy_to_user(buffer, buf, len))
return -EFAULT; return -EFAULT;
*lenp = len;
*ppos += len;
} }
*lenp = len;
*ppos += len;
return 0; return 0;
} }

View file

@ -934,9 +934,14 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
int i; int i;
bool has_stats = false; bool has_stats = false;
spin_lock_irq(&blkg->q->queue_lock);
if (!blkg->online)
goto skip;
dname = blkg_dev_name(blkg); dname = blkg_dev_name(blkg);
if (!dname) if (!dname)
continue; goto skip;
/* /*
* Hooray string manipulation, count is the size written NOT * Hooray string manipulation, count is the size written NOT
@ -946,8 +951,6 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
*/ */
off += scnprintf(buf+off, size-off, "%s ", dname); off += scnprintf(buf+off, size-off, "%s ", dname);
spin_lock_irq(&blkg->q->queue_lock);
blkg_rwstat_recursive_sum(blkg, NULL, blkg_rwstat_recursive_sum(blkg, NULL,
offsetof(struct blkcg_gq, stat_bytes), &rwstat); offsetof(struct blkcg_gq, stat_bytes), &rwstat);
rbytes = rwstat.cnt[BLKG_RWSTAT_READ]; rbytes = rwstat.cnt[BLKG_RWSTAT_READ];
@ -960,8 +963,6 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
wios = rwstat.cnt[BLKG_RWSTAT_WRITE]; wios = rwstat.cnt[BLKG_RWSTAT_WRITE];
dios = rwstat.cnt[BLKG_RWSTAT_DISCARD]; dios = rwstat.cnt[BLKG_RWSTAT_DISCARD];
spin_unlock_irq(&blkg->q->queue_lock);
if (rbytes || wbytes || rios || wios) { if (rbytes || wbytes || rios || wios) {
has_stats = true; has_stats = true;
off += scnprintf(buf+off, size-off, off += scnprintf(buf+off, size-off,
@ -999,6 +1000,8 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
seq_commit(sf, -1); seq_commit(sf, -1);
} }
} }
skip:
spin_unlock_irq(&blkg->q->queue_lock);
} }
rcu_read_unlock(); rcu_read_unlock();

View file

@ -786,7 +786,6 @@ int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cm
if (nc->tentative && connection->agreed_pro_version < 92) { if (nc->tentative && connection->agreed_pro_version < 92) {
rcu_read_unlock(); rcu_read_unlock();
mutex_unlock(&sock->mutex);
drbd_err(connection, "--dry-run is not supported by peer"); drbd_err(connection, "--dry-run is not supported by peer");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }

View file

@ -297,7 +297,10 @@ static int clk_main_probe_frequency(struct regmap *regmap)
regmap_read(regmap, AT91_CKGR_MCFR, &mcfr); regmap_read(regmap, AT91_CKGR_MCFR, &mcfr);
if (mcfr & AT91_PMC_MAINRDY) if (mcfr & AT91_PMC_MAINRDY)
return 0; return 0;
usleep_range(MAINF_LOOP_MIN_WAIT, MAINF_LOOP_MAX_WAIT); if (system_state < SYSTEM_RUNNING)
udelay(MAINF_LOOP_MIN_WAIT);
else
usleep_range(MAINF_LOOP_MIN_WAIT, MAINF_LOOP_MAX_WAIT);
} while (time_before(prep_time, timeout)); } while (time_before(prep_time, timeout));
return -ETIMEDOUT; return -ETIMEDOUT;

View file

@ -43,6 +43,7 @@ static const struct clk_pll_characteristics upll_characteristics = {
}; };
static const struct clk_programmable_layout sam9x60_programmable_layout = { static const struct clk_programmable_layout sam9x60_programmable_layout = {
.pres_mask = 0xff,
.pres_shift = 8, .pres_shift = 8,
.css_mask = 0x1f, .css_mask = 0x1f,
.have_slck_mck = 0, .have_slck_mck = 0,

View file

@ -76,7 +76,10 @@ static int clk_slow_osc_prepare(struct clk_hw *hw)
writel(tmp | osc->bits->cr_osc32en, sckcr); writel(tmp | osc->bits->cr_osc32en, sckcr);
usleep_range(osc->startup_usec, osc->startup_usec + 1); if (system_state < SYSTEM_RUNNING)
udelay(osc->startup_usec);
else
usleep_range(osc->startup_usec, osc->startup_usec + 1);
return 0; return 0;
} }
@ -187,7 +190,10 @@ static int clk_slow_rc_osc_prepare(struct clk_hw *hw)
writel(readl(sckcr) | osc->bits->cr_rcen, sckcr); writel(readl(sckcr) | osc->bits->cr_rcen, sckcr);
usleep_range(osc->startup_usec, osc->startup_usec + 1); if (system_state < SYSTEM_RUNNING)
udelay(osc->startup_usec);
else
usleep_range(osc->startup_usec, osc->startup_usec + 1);
return 0; return 0;
} }
@ -288,7 +294,10 @@ static int clk_sam9x5_slow_set_parent(struct clk_hw *hw, u8 index)
writel(tmp, sckcr); writel(tmp, sckcr);
usleep_range(SLOWCK_SW_TIME_USEC, SLOWCK_SW_TIME_USEC + 1); if (system_state < SYSTEM_RUNNING)
udelay(SLOWCK_SW_TIME_USEC);
else
usleep_range(SLOWCK_SW_TIME_USEC, SLOWCK_SW_TIME_USEC + 1);
return 0; return 0;
} }
@ -533,7 +542,10 @@ static int clk_sama5d4_slow_osc_prepare(struct clk_hw *hw)
return 0; return 0;
} }
usleep_range(osc->startup_usec, osc->startup_usec + 1); if (system_state < SYSTEM_RUNNING)
udelay(osc->startup_usec);
else
usleep_range(osc->startup_usec, osc->startup_usec + 1);
osc->prepared = true; osc->prepared = true;
return 0; return 0;

View file

@ -266,10 +266,11 @@ static int aspeed_g6_clk_enable(struct clk_hw *hw)
/* Enable clock */ /* Enable clock */
if (gate->flags & CLK_GATE_SET_TO_DISABLE) { if (gate->flags & CLK_GATE_SET_TO_DISABLE) {
regmap_write(gate->map, get_clock_reg(gate), clk); /* Clock is clear to enable, so use set to clear register */
} else {
/* Use set to clear register */
regmap_write(gate->map, get_clock_reg(gate) + 0x04, clk); regmap_write(gate->map, get_clock_reg(gate) + 0x04, clk);
} else {
/* Clock is set to enable, so use write to set register */
regmap_write(gate->map, get_clock_reg(gate), clk);
} }
if (gate->reset_idx >= 0) { if (gate->reset_idx >= 0) {

View file

@ -638,7 +638,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
clks[IMX8MM_CLK_A53_DIV], clks[IMX8MM_CLK_A53_DIV],
clks[IMX8MM_CLK_A53_SRC], clks[IMX8MM_CLK_A53_SRC],
clks[IMX8MM_ARM_PLL_OUT], clks[IMX8MM_ARM_PLL_OUT],
clks[IMX8MM_CLK_24M]); clks[IMX8MM_SYS_PLL1_800M]);
imx_check_clocks(clks, ARRAY_SIZE(clks)); imx_check_clocks(clks, ARRAY_SIZE(clks));

View file

@ -610,7 +610,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
clks[IMX8MN_CLK_A53_DIV], clks[IMX8MN_CLK_A53_DIV],
clks[IMX8MN_CLK_A53_SRC], clks[IMX8MN_CLK_A53_SRC],
clks[IMX8MN_ARM_PLL_OUT], clks[IMX8MN_ARM_PLL_OUT],
clks[IMX8MN_CLK_24M]); clks[IMX8MN_SYS_PLL1_800M]);
imx_check_clocks(clks, ARRAY_SIZE(clks)); imx_check_clocks(clks, ARRAY_SIZE(clks));

View file

@ -343,6 +343,7 @@ static struct clk_regmap g12a_cpu_clk_premux0 = {
.offset = HHI_SYS_CPU_CLK_CNTL0, .offset = HHI_SYS_CPU_CLK_CNTL0,
.mask = 0x3, .mask = 0x3,
.shift = 0, .shift = 0,
.flags = CLK_MUX_ROUND_CLOSEST,
}, },
.hw.init = &(struct clk_init_data){ .hw.init = &(struct clk_init_data){
.name = "cpu_clk_dyn0_sel", .name = "cpu_clk_dyn0_sel",
@ -353,8 +354,7 @@ static struct clk_regmap g12a_cpu_clk_premux0 = {
{ .hw = &g12a_fclk_div3.hw }, { .hw = &g12a_fclk_div3.hw },
}, },
.num_parents = 3, .num_parents = 3,
/* This sub-tree is used a parking clock */ .flags = CLK_SET_RATE_PARENT,
.flags = CLK_SET_RATE_NO_REPARENT,
}, },
}; };
@ -410,6 +410,7 @@ static struct clk_regmap g12a_cpu_clk_postmux0 = {
.offset = HHI_SYS_CPU_CLK_CNTL0, .offset = HHI_SYS_CPU_CLK_CNTL0,
.mask = 0x1, .mask = 0x1,
.shift = 2, .shift = 2,
.flags = CLK_MUX_ROUND_CLOSEST,
}, },
.hw.init = &(struct clk_init_data){ .hw.init = &(struct clk_init_data){
.name = "cpu_clk_dyn0", .name = "cpu_clk_dyn0",
@ -466,6 +467,7 @@ static struct clk_regmap g12a_cpu_clk_dyn = {
.offset = HHI_SYS_CPU_CLK_CNTL0, .offset = HHI_SYS_CPU_CLK_CNTL0,
.mask = 0x1, .mask = 0x1,
.shift = 10, .shift = 10,
.flags = CLK_MUX_ROUND_CLOSEST,
}, },
.hw.init = &(struct clk_init_data){ .hw.init = &(struct clk_init_data){
.name = "cpu_clk_dyn", .name = "cpu_clk_dyn",
@ -485,6 +487,7 @@ static struct clk_regmap g12a_cpu_clk = {
.offset = HHI_SYS_CPU_CLK_CNTL0, .offset = HHI_SYS_CPU_CLK_CNTL0,
.mask = 0x1, .mask = 0x1,
.shift = 11, .shift = 11,
.flags = CLK_MUX_ROUND_CLOSEST,
}, },
.hw.init = &(struct clk_init_data){ .hw.init = &(struct clk_init_data){
.name = "cpu_clk", .name = "cpu_clk",
@ -504,6 +507,7 @@ static struct clk_regmap g12b_cpu_clk = {
.offset = HHI_SYS_CPU_CLK_CNTL0, .offset = HHI_SYS_CPU_CLK_CNTL0,
.mask = 0x1, .mask = 0x1,
.shift = 11, .shift = 11,
.flags = CLK_MUX_ROUND_CLOSEST,
}, },
.hw.init = &(struct clk_init_data){ .hw.init = &(struct clk_init_data){
.name = "cpu_clk", .name = "cpu_clk",
@ -523,6 +527,7 @@ static struct clk_regmap g12b_cpub_clk_premux0 = {
.offset = HHI_SYS_CPUB_CLK_CNTL, .offset = HHI_SYS_CPUB_CLK_CNTL,
.mask = 0x3, .mask = 0x3,
.shift = 0, .shift = 0,
.flags = CLK_MUX_ROUND_CLOSEST,
}, },
.hw.init = &(struct clk_init_data){ .hw.init = &(struct clk_init_data){
.name = "cpub_clk_dyn0_sel", .name = "cpub_clk_dyn0_sel",
@ -533,6 +538,7 @@ static struct clk_regmap g12b_cpub_clk_premux0 = {
{ .hw = &g12a_fclk_div3.hw }, { .hw = &g12a_fclk_div3.hw },
}, },
.num_parents = 3, .num_parents = 3,
.flags = CLK_SET_RATE_PARENT,
}, },
}; };
@ -567,6 +573,7 @@ static struct clk_regmap g12b_cpub_clk_postmux0 = {
.offset = HHI_SYS_CPUB_CLK_CNTL, .offset = HHI_SYS_CPUB_CLK_CNTL,
.mask = 0x1, .mask = 0x1,
.shift = 2, .shift = 2,
.flags = CLK_MUX_ROUND_CLOSEST,
}, },
.hw.init = &(struct clk_init_data){ .hw.init = &(struct clk_init_data){
.name = "cpub_clk_dyn0", .name = "cpub_clk_dyn0",
@ -644,6 +651,7 @@ static struct clk_regmap g12b_cpub_clk_dyn = {
.offset = HHI_SYS_CPUB_CLK_CNTL, .offset = HHI_SYS_CPUB_CLK_CNTL,
.mask = 0x1, .mask = 0x1,
.shift = 10, .shift = 10,
.flags = CLK_MUX_ROUND_CLOSEST,
}, },
.hw.init = &(struct clk_init_data){ .hw.init = &(struct clk_init_data){
.name = "cpub_clk_dyn", .name = "cpub_clk_dyn",
@ -663,6 +671,7 @@ static struct clk_regmap g12b_cpub_clk = {
.offset = HHI_SYS_CPUB_CLK_CNTL, .offset = HHI_SYS_CPUB_CLK_CNTL,
.mask = 0x1, .mask = 0x1,
.shift = 11, .shift = 11,
.flags = CLK_MUX_ROUND_CLOSEST,
}, },
.hw.init = &(struct clk_init_data){ .hw.init = &(struct clk_init_data){
.name = "cpub_clk", .name = "cpub_clk",

View file

@ -935,6 +935,7 @@ static struct clk_regmap gxbb_sar_adc_clk_div = {
&gxbb_sar_adc_clk_sel.hw &gxbb_sar_adc_clk_sel.hw
}, },
.num_parents = 1, .num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
}, },
}; };

View file

@ -165,12 +165,18 @@ static const unsigned long exynos5x_clk_regs[] __initconst = {
GATE_BUS_CPU, GATE_BUS_CPU,
GATE_SCLK_CPU, GATE_SCLK_CPU,
CLKOUT_CMU_CPU, CLKOUT_CMU_CPU,
CPLL_CON0,
DPLL_CON0,
EPLL_CON0, EPLL_CON0,
EPLL_CON1, EPLL_CON1,
EPLL_CON2, EPLL_CON2,
RPLL_CON0, RPLL_CON0,
RPLL_CON1, RPLL_CON1,
RPLL_CON2, RPLL_CON2,
IPLL_CON0,
SPLL_CON0,
VPLL_CON0,
MPLL_CON0,
SRC_TOP0, SRC_TOP0,
SRC_TOP1, SRC_TOP1,
SRC_TOP2, SRC_TOP2,
@ -1172,8 +1178,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
GATE(CLK_SCLK_ISP_SENSOR2, "sclk_isp_sensor2", "dout_isp_sensor2", GATE(CLK_SCLK_ISP_SENSOR2, "sclk_isp_sensor2", "dout_isp_sensor2",
GATE_TOP_SCLK_ISP, 12, CLK_SET_RATE_PARENT, 0), GATE_TOP_SCLK_ISP, 12, CLK_SET_RATE_PARENT, 0),
GATE(CLK_G3D, "g3d", "mout_user_aclk_g3d", GATE_IP_G3D, 9, 0, 0),
/* CDREX */ /* CDREX */
GATE(CLK_CLKM_PHY0, "clkm_phy0", "dout_sclk_cdrex", GATE(CLK_CLKM_PHY0, "clkm_phy0", "dout_sclk_cdrex",
GATE_BUS_CDREX0, 0, 0, 0), GATE_BUS_CDREX0, 0, 0, 0),
@ -1248,6 +1252,15 @@ static struct exynos5_subcmu_reg_dump exynos5x_gsc_suspend_regs[] = {
{ DIV2_RATIO0, 0, 0x30 }, /* DIV dout_gscl_blk_300 */ { DIV2_RATIO0, 0, 0x30 }, /* DIV dout_gscl_blk_300 */
}; };
static const struct samsung_gate_clock exynos5x_g3d_gate_clks[] __initconst = {
GATE(CLK_G3D, "g3d", "mout_user_aclk_g3d", GATE_IP_G3D, 9, 0, 0),
};
static struct exynos5_subcmu_reg_dump exynos5x_g3d_suspend_regs[] = {
{ GATE_IP_G3D, 0x3ff, 0x3ff }, /* G3D gates */
{ SRC_TOP5, 0, BIT(16) }, /* MUX mout_user_aclk_g3d */
};
static const struct samsung_div_clock exynos5x_mfc_div_clks[] __initconst = { static const struct samsung_div_clock exynos5x_mfc_div_clks[] __initconst = {
DIV(0, "dout_mfc_blk", "mout_user_aclk333", DIV4_RATIO, 0, 2), DIV(0, "dout_mfc_blk", "mout_user_aclk333", DIV4_RATIO, 0, 2),
}; };
@ -1320,6 +1333,14 @@ static const struct exynos5_subcmu_info exynos5x_gsc_subcmu = {
.pd_name = "GSC", .pd_name = "GSC",
}; };
static const struct exynos5_subcmu_info exynos5x_g3d_subcmu = {
.gate_clks = exynos5x_g3d_gate_clks,
.nr_gate_clks = ARRAY_SIZE(exynos5x_g3d_gate_clks),
.suspend_regs = exynos5x_g3d_suspend_regs,
.nr_suspend_regs = ARRAY_SIZE(exynos5x_g3d_suspend_regs),
.pd_name = "G3D",
};
static const struct exynos5_subcmu_info exynos5x_mfc_subcmu = { static const struct exynos5_subcmu_info exynos5x_mfc_subcmu = {
.div_clks = exynos5x_mfc_div_clks, .div_clks = exynos5x_mfc_div_clks,
.nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks), .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks),
@ -1351,6 +1372,7 @@ static const struct exynos5_subcmu_info exynos5800_mau_subcmu = {
static const struct exynos5_subcmu_info *exynos5x_subcmus[] = { static const struct exynos5_subcmu_info *exynos5x_subcmus[] = {
&exynos5x_disp_subcmu, &exynos5x_disp_subcmu,
&exynos5x_gsc_subcmu, &exynos5x_gsc_subcmu,
&exynos5x_g3d_subcmu,
&exynos5x_mfc_subcmu, &exynos5x_mfc_subcmu,
&exynos5x_mscl_subcmu, &exynos5x_mscl_subcmu,
}; };
@ -1358,6 +1380,7 @@ static const struct exynos5_subcmu_info *exynos5x_subcmus[] = {
static const struct exynos5_subcmu_info *exynos5800_subcmus[] = { static const struct exynos5_subcmu_info *exynos5800_subcmus[] = {
&exynos5x_disp_subcmu, &exynos5x_disp_subcmu,
&exynos5x_gsc_subcmu, &exynos5x_gsc_subcmu,
&exynos5x_g3d_subcmu,
&exynos5x_mfc_subcmu, &exynos5x_mfc_subcmu,
&exynos5x_mscl_subcmu, &exynos5x_mscl_subcmu,
&exynos5800_mau_subcmu, &exynos5800_mau_subcmu,

View file

@ -13,6 +13,7 @@
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <dt-bindings/clock/exynos5433.h> #include <dt-bindings/clock/exynos5433.h>
@ -5584,6 +5585,8 @@ static int __init exynos5433_cmu_probe(struct platform_device *pdev)
data->clk_save = samsung_clk_alloc_reg_dump(info->clk_regs, data->clk_save = samsung_clk_alloc_reg_dump(info->clk_regs,
info->nr_clk_regs); info->nr_clk_regs);
if (!data->clk_save)
return -ENOMEM;
data->nr_clk_save = info->nr_clk_regs; data->nr_clk_save = info->nr_clk_regs;
data->clk_suspend = info->suspend_regs; data->clk_suspend = info->suspend_regs;
data->nr_clk_suspend = info->nr_suspend_regs; data->nr_clk_suspend = info->nr_suspend_regs;
@ -5592,12 +5595,19 @@ static int __init exynos5433_cmu_probe(struct platform_device *pdev)
if (data->nr_pclks > 0) { if (data->nr_pclks > 0) {
data->pclks = devm_kcalloc(dev, sizeof(struct clk *), data->pclks = devm_kcalloc(dev, sizeof(struct clk *),
data->nr_pclks, GFP_KERNEL); data->nr_pclks, GFP_KERNEL);
if (!data->pclks) {
kfree(data->clk_save);
return -ENOMEM;
}
for (i = 0; i < data->nr_pclks; i++) { for (i = 0; i < data->nr_pclks; i++) {
struct clk *clk = of_clk_get(dev->of_node, i); struct clk *clk = of_clk_get(dev->of_node, i);
if (IS_ERR(clk)) if (IS_ERR(clk)) {
kfree(data->clk_save);
while (--i >= 0)
clk_put(data->pclks[i]);
return PTR_ERR(clk); return PTR_ERR(clk);
}
data->pclks[i] = clk; data->pclks[i] = clk;
} }
} }

View file

@ -1224,7 +1224,7 @@ static int sun9i_a80_ccu_probe(struct platform_device *pdev)
/* Enforce d1 = 0, d2 = 0 for Audio PLL */ /* Enforce d1 = 0, d2 = 0 for Audio PLL */
val = readl(reg + SUN9I_A80_PLL_AUDIO_REG); val = readl(reg + SUN9I_A80_PLL_AUDIO_REG);
val &= (BIT(16) & BIT(18)); val &= ~(BIT(16) | BIT(18));
writel(val, reg + SUN9I_A80_PLL_AUDIO_REG); writel(val, reg + SUN9I_A80_PLL_AUDIO_REG);
/* Enforce P = 1 for both CPU cluster PLLs */ /* Enforce P = 1 for both CPU cluster PLLs */

View file

@ -1080,8 +1080,8 @@ static struct clk ** __init sunxi_divs_clk_setup(struct device_node *node,
rate_hw, rate_ops, rate_hw, rate_ops,
gate_hw, &clk_gate_ops, gate_hw, &clk_gate_ops,
clkflags | clkflags |
data->div[i].critical ? (data->div[i].critical ?
CLK_IS_CRITICAL : 0); CLK_IS_CRITICAL : 0));
WARN_ON(IS_ERR(clk_data->clks[i])); WARN_ON(IS_ERR(clk_data->clks[i]));
} }

View file

@ -174,7 +174,6 @@ static void __init of_dra7_atl_clock_setup(struct device_node *node)
struct clk_init_data init = { NULL }; struct clk_init_data init = { NULL };
const char **parent_names = NULL; const char **parent_names = NULL;
struct clk *clk; struct clk *clk;
int ret;
clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL); clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
if (!clk_hw) { if (!clk_hw) {
@ -207,11 +206,6 @@ static void __init of_dra7_atl_clock_setup(struct device_node *node)
clk = ti_clk_register(NULL, &clk_hw->hw, node->name); clk = ti_clk_register(NULL, &clk_hw->hw, node->name);
if (!IS_ERR(clk)) { if (!IS_ERR(clk)) {
ret = ti_clk_add_alias(NULL, clk, node->name);
if (ret) {
clk_unregister(clk);
goto cleanup;
}
of_clk_add_provider(node, of_clk_src_simple_get, clk); of_clk_add_provider(node, of_clk_src_simple_get, clk);
kfree(parent_names); kfree(parent_names);
return; return;

View file

@ -100,11 +100,12 @@ static bool _omap4_is_timeout(union omap4_timeout *time, u32 timeout)
* can be from a timer that requires pm_runtime access, which * can be from a timer that requires pm_runtime access, which
* will eventually bring us here with timekeeping_suspended, * will eventually bring us here with timekeeping_suspended,
* during both suspend entry and resume paths. This happens * during both suspend entry and resume paths. This happens
* at least on am43xx platform. * at least on am43xx platform. Account for flakeyness
* with udelay() by multiplying the timeout value by 2.
*/ */
if (unlikely(_early_timeout || timekeeping_suspended)) { if (unlikely(_early_timeout || timekeeping_suspended)) {
if (time->cycles++ < timeout) { if (time->cycles++ < timeout) {
udelay(1); udelay(1 * 2);
return false; return false;
} }
} else { } else {

View file

@ -847,11 +847,9 @@ static void intel_pstate_hwp_force_min_perf(int cpu)
value |= HWP_MAX_PERF(min_perf); value |= HWP_MAX_PERF(min_perf);
value |= HWP_MIN_PERF(min_perf); value |= HWP_MIN_PERF(min_perf);
/* Set EPP/EPB to min */ /* Set EPP to min */
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) if (boot_cpu_has(X86_FEATURE_HWP_EPP))
value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE); value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
else
intel_pstate_set_epb(cpu, HWP_EPP_BALANCE_POWERSAVE);
wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
} }

View file

@ -362,9 +362,8 @@ static void mrfld_irq_handler(struct irq_desc *desc)
chained_irq_exit(irqchip, desc); chained_irq_exit(irqchip, desc);
} }
static int mrfld_irq_init_hw(struct gpio_chip *chip) static void mrfld_irq_init_hw(struct mrfld_gpio *priv)
{ {
struct mrfld_gpio *priv = gpiochip_get_data(chip);
void __iomem *reg; void __iomem *reg;
unsigned int base; unsigned int base;
@ -376,8 +375,6 @@ static int mrfld_irq_init_hw(struct gpio_chip *chip)
reg = gpio_reg(&priv->chip, base, GFER); reg = gpio_reg(&priv->chip, base, GFER);
writel(0, reg); writel(0, reg);
} }
return 0;
} }
static const char *mrfld_gpio_get_pinctrl_dev_name(struct mrfld_gpio *priv) static const char *mrfld_gpio_get_pinctrl_dev_name(struct mrfld_gpio *priv)
@ -400,7 +397,6 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
{ {
const struct mrfld_gpio_pinrange *range; const struct mrfld_gpio_pinrange *range;
const char *pinctrl_dev_name; const char *pinctrl_dev_name;
struct gpio_irq_chip *girq;
struct mrfld_gpio *priv; struct mrfld_gpio *priv;
u32 gpio_base, irq_base; u32 gpio_base, irq_base;
void __iomem *base; void __iomem *base;
@ -448,21 +444,6 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
raw_spin_lock_init(&priv->lock); raw_spin_lock_init(&priv->lock);
girq = &priv->chip.irq;
girq->chip = &mrfld_irqchip;
girq->init_hw = mrfld_irq_init_hw;
girq->parent_handler = mrfld_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
sizeof(*girq->parents),
GFP_KERNEL);
if (!girq->parents)
return -ENOMEM;
girq->parents[0] = pdev->irq;
girq->first = irq_base;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
pci_set_drvdata(pdev, priv); pci_set_drvdata(pdev, priv);
retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv); retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
if (retval) { if (retval) {
@ -484,6 +465,18 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
} }
} }
retval = gpiochip_irqchip_add(&priv->chip, &mrfld_irqchip, irq_base,
handle_bad_irq, IRQ_TYPE_NONE);
if (retval) {
dev_err(&pdev->dev, "could not connect irqchip to gpiochip\n");
return retval;
}
mrfld_irq_init_hw(priv);
gpiochip_set_chained_irqchip(&priv->chip, &mrfld_irqchip, pdev->irq,
mrfld_irq_handler);
return 0; return 0;
} }

View file

@ -604,8 +604,11 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
continue; continue;
} }
for (i = 0; i < num_entities; i++) for (i = 0; i < num_entities; i++) {
mutex_lock(&ctx->adev->lock_reset);
drm_sched_entity_fini(&ctx->entities[0][i].entity); drm_sched_entity_fini(&ctx->entities[0][i].entity);
mutex_unlock(&ctx->adev->lock_reset);
}
} }
} }

View file

@ -2885,6 +2885,13 @@ fence_driver_init:
DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n"); DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
} }
/*
* Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
* Otherwise the mgpu fan boost feature will be skipped due to the
* gpu instance is counted less.
*/
amdgpu_register_gpu_instance(adev);
/* enable clockgating, etc. after ib tests, etc. since some blocks require /* enable clockgating, etc. after ib tests, etc. since some blocks require
* explicit gating rather than handling it automatically. * explicit gating rather than handling it automatically.
*/ */

View file

@ -1016,6 +1016,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
{0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, {0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
{0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, {0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
{0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
/* Renoir */ /* Renoir */
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT}, {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT},

View file

@ -289,6 +289,7 @@ struct amdgpu_gfx {
uint32_t mec2_feature_version; uint32_t mec2_feature_version;
bool mec_fw_write_wait; bool mec_fw_write_wait;
bool me_fw_write_wait; bool me_fw_write_wait;
bool cp_fw_write_wait;
struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
unsigned num_gfx_rings; unsigned num_gfx_rings;
struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];

View file

@ -190,7 +190,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
pm_runtime_put_autosuspend(dev->dev); pm_runtime_put_autosuspend(dev->dev);
} }
amdgpu_register_gpu_instance(adev);
out: out:
if (r) { if (r) {
/* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */ /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */

View file

@ -564,6 +564,32 @@ static void gfx_v10_0_free_microcode(struct amdgpu_device *adev)
kfree(adev->gfx.rlc.register_list_format); kfree(adev->gfx.rlc.register_list_format);
} }
static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
{
adev->gfx.cp_fw_write_wait = false;
switch (adev->asic_type) {
case CHIP_NAVI10:
case CHIP_NAVI12:
case CHIP_NAVI14:
if ((adev->gfx.me_fw_version >= 0x00000046) &&
(adev->gfx.me_feature_version >= 27) &&
(adev->gfx.pfp_fw_version >= 0x00000068) &&
(adev->gfx.pfp_feature_version >= 27) &&
(adev->gfx.mec_fw_version >= 0x0000005b) &&
(adev->gfx.mec_feature_version >= 27))
adev->gfx.cp_fw_write_wait = true;
break;
default:
break;
}
if (adev->gfx.cp_fw_write_wait == false)
DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \
GRBM requires 1-cycle delay in cp firmware\n");
}
static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev) static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
{ {
const struct rlc_firmware_header_v2_1 *rlc_hdr; const struct rlc_firmware_header_v2_1 *rlc_hdr;
@ -832,6 +858,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
} }
} }
gfx_v10_0_check_fw_write_wait(adev);
out: out:
if (err) { if (err) {
dev_err(adev->dev, dev_err(adev->dev,
@ -4765,6 +4792,24 @@ static void gfx_v10_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
gfx_v10_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); gfx_v10_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
} }
static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask)
{
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
struct amdgpu_device *adev = ring->adev;
bool fw_version_ok = false;
fw_version_ok = adev->gfx.cp_fw_write_wait;
if (fw_version_ok)
gfx_v10_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
ref, mask, 0x20);
else
amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
ref, mask);
}
static void static void
gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
uint32_t me, uint32_t pipe, uint32_t me, uint32_t pipe,
@ -5155,6 +5200,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
.emit_tmz = gfx_v10_0_ring_emit_tmz, .emit_tmz = gfx_v10_0_ring_emit_tmz,
.emit_wreg = gfx_v10_0_ring_emit_wreg, .emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
}; };
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = { static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
@ -5188,6 +5234,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
.pad_ib = amdgpu_ring_generic_pad_ib, .pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v10_0_ring_emit_wreg, .emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
}; };
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = { static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
@ -5218,6 +5265,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
.emit_rreg = gfx_v10_0_ring_emit_rreg, .emit_rreg = gfx_v10_0_ring_emit_rreg,
.emit_wreg = gfx_v10_0_ring_emit_wreg, .emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
}; };
static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev) static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev)

View file

@ -973,6 +973,13 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
adev->gfx.me_fw_write_wait = false; adev->gfx.me_fw_write_wait = false;
adev->gfx.mec_fw_write_wait = false; adev->gfx.mec_fw_write_wait = false;
if ((adev->gfx.mec_fw_version < 0x000001a5) ||
(adev->gfx.mec_feature_version < 46) ||
(adev->gfx.pfp_fw_version < 0x000000b7) ||
(adev->gfx.pfp_feature_version < 46))
DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \
GRBM requires 1-cycle delay in cp firmware\n");
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_VEGA10: case CHIP_VEGA10:
if ((adev->gfx.me_fw_version >= 0x0000009c) && if ((adev->gfx.me_fw_version >= 0x0000009c) &&
@ -1039,6 +1046,12 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
!adev->gfx.rlc.is_rlc_v2_1)) !adev->gfx.rlc.is_rlc_v2_1))
adev->pm.pp_feature &= ~PP_GFXOFF_MASK; adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_RLC_SMU_HS;
break;
case CHIP_RENOIR:
if (adev->pm.pp_feature & PP_GFXOFF_MASK) if (adev->pm.pp_feature & PP_GFXOFF_MASK)
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_CP | AMD_PG_SUPPORT_CP |

View file

@ -344,11 +344,9 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid), amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
upper_32_bits(pd_addr)); upper_32_bits(pd_addr));
amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_req + eng, req); amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
hub->vm_inv_eng0_ack + eng,
/* wait for the invalidate to complete */ req, 1 << vmid);
amdgpu_ring_emit_reg_wait(ring, hub->vm_inv_eng0_ack + eng,
1 << vmid, 1 << vmid);
return pd_addr; return pd_addr;
} }

View file

@ -219,6 +219,15 @@ static void mmhub_v9_4_init_cache_regs(struct amdgpu_device *adev, int hubid)
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp); hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
tmp = mmVML2PF0_VM_L2_CNTL3_DEFAULT; tmp = mmVML2PF0_VM_L2_CNTL3_DEFAULT;
if (adev->gmc.translate_further) {
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, BANK_SELECT, 12);
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3,
L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
} else {
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, BANK_SELECT, 9);
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3,
L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
}
WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL3, WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL3,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp); hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);

View file

@ -1173,6 +1173,16 @@ static void sdma_v5_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
} }
static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask)
{
amdgpu_ring_emit_wreg(ring, reg0, ref);
/* wait for a cycle to reset vm_inv_eng*_ack */
amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0);
amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
}
static int sdma_v5_0_early_init(void *handle) static int sdma_v5_0_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@ -1588,7 +1598,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
6 + /* sdma_v5_0_ring_emit_pipeline_sync */ 6 + /* sdma_v5_0_ring_emit_pipeline_sync */
/* sdma_v5_0_ring_emit_vm_flush */ /* sdma_v5_0_ring_emit_vm_flush */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 +
10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */ 10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */ .emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */
.emit_ib = sdma_v5_0_ring_emit_ib, .emit_ib = sdma_v5_0_ring_emit_ib,
@ -1602,6 +1612,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
.pad_ib = sdma_v5_0_ring_pad_ib, .pad_ib = sdma_v5_0_ring_pad_ib,
.emit_wreg = sdma_v5_0_ring_emit_wreg, .emit_wreg = sdma_v5_0_ring_emit_wreg,
.emit_reg_wait = sdma_v5_0_ring_emit_reg_wait, .emit_reg_wait = sdma_v5_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = sdma_v5_0_ring_emit_reg_write_reg_wait,
.init_cond_exec = sdma_v5_0_ring_init_cond_exec, .init_cond_exec = sdma_v5_0_ring_init_cond_exec,
.patch_cond_exec = sdma_v5_0_ring_patch_cond_exec, .patch_cond_exec = sdma_v5_0_ring_patch_cond_exec,
.preempt_ib = sdma_v5_0_ring_preempt_ib, .preempt_ib = sdma_v5_0_ring_preempt_ib,

View file

@ -1186,11 +1186,6 @@ static int soc15_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG; AMD_PG_SUPPORT_VCN_DPG;
adev->external_rev_id = adev->rev_id + 0x91; adev->external_rev_id = adev->rev_id + 0x91;
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_RLC_SMU_HS;
break; break;
default: default:
/* FIXME: not supported yet */ /* FIXME: not supported yet */

View file

@ -2767,15 +2767,6 @@ void core_link_enable_stream(
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
COLOR_DEPTH_UNDEFINED); COLOR_DEPTH_UNDEFINED);
/* This second call is needed to reconfigure the DIG
* as a workaround for the incorrect value being applied
* from transmitter control.
*/
if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
stream->link->link_enc->funcs->setup(
stream->link->link_enc,
pipe_ctx->stream->signal);
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
if (pipe_ctx->stream->timing.flags.DSC) { if (pipe_ctx->stream->timing.flags.DSC) {
if (dc_is_dp_signal(pipe_ctx->stream->signal) || if (dc_is_dp_signal(pipe_ctx->stream->signal) ||

View file

@ -1107,6 +1107,11 @@ struct stream_encoder *dcn20_stream_encoder_create(
if (!enc1) if (!enc1)
return NULL; return NULL;
if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) {
if (eng_id >= ENGINE_ID_DIGD)
eng_id++;
}
dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id,
&stream_enc_regs[eng_id], &stream_enc_regs[eng_id],
&se_shift, &se_mask); &se_shift, &se_mask);

View file

@ -205,7 +205,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_workload_map[PP_SMC_POWER_PROFILE
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_CUSTOM_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
}; };

View file

@ -219,7 +219,7 @@ static struct smu_11_0_cmn2aisc_mapping vega20_workload_map[PP_SMC_POWER_PROFILE
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_CUSTOM_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
}; };

View file

@ -1581,8 +1581,11 @@ static void commit_tail(struct drm_atomic_state *old_state)
{ {
struct drm_device *dev = old_state->dev; struct drm_device *dev = old_state->dev;
const struct drm_mode_config_helper_funcs *funcs; const struct drm_mode_config_helper_funcs *funcs;
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
ktime_t start; ktime_t start;
s64 commit_time_ms; s64 commit_time_ms;
unsigned int i, new_self_refresh_mask = 0;
funcs = dev->mode_config.helper_private; funcs = dev->mode_config.helper_private;
@ -1602,6 +1605,15 @@ static void commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_wait_for_dependencies(old_state); drm_atomic_helper_wait_for_dependencies(old_state);
/*
* We cannot safely access new_crtc_state after
* drm_atomic_helper_commit_hw_done() so figure out which crtc's have
* self-refresh active beforehand:
*/
for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i)
if (new_crtc_state->self_refresh_active)
new_self_refresh_mask |= BIT(i);
if (funcs && funcs->atomic_commit_tail) if (funcs && funcs->atomic_commit_tail)
funcs->atomic_commit_tail(old_state); funcs->atomic_commit_tail(old_state);
else else
@ -1610,7 +1622,8 @@ static void commit_tail(struct drm_atomic_state *old_state)
commit_time_ms = ktime_ms_delta(ktime_get(), start); commit_time_ms = ktime_ms_delta(ktime_get(), start);
if (commit_time_ms > 0) if (commit_time_ms > 0)
drm_self_refresh_helper_update_avg_times(old_state, drm_self_refresh_helper_update_avg_times(old_state,
(unsigned long)commit_time_ms); (unsigned long)commit_time_ms,
new_self_refresh_mask);
drm_atomic_helper_commit_cleanup_done(old_state); drm_atomic_helper_commit_cleanup_done(old_state);

View file

@ -133,29 +133,33 @@ out_drop_locks:
* drm_self_refresh_helper_update_avg_times - Updates a crtc's SR time averages * drm_self_refresh_helper_update_avg_times - Updates a crtc's SR time averages
* @state: the state which has just been applied to hardware * @state: the state which has just been applied to hardware
* @commit_time_ms: the amount of time in ms that this commit took to complete * @commit_time_ms: the amount of time in ms that this commit took to complete
* @new_self_refresh_mask: bitmask of crtc's that have self_refresh_active in
* new state
* *
* Called after &drm_mode_config_funcs.atomic_commit_tail, this function will * Called after &drm_mode_config_funcs.atomic_commit_tail, this function will
* update the average entry/exit self refresh times on self refresh transitions. * update the average entry/exit self refresh times on self refresh transitions.
* These averages will be used when calculating how long to delay before * These averages will be used when calculating how long to delay before
* entering self refresh mode after activity. * entering self refresh mode after activity.
*/ */
void drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state, void
unsigned int commit_time_ms) drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state,
unsigned int commit_time_ms,
unsigned int new_self_refresh_mask)
{ {
struct drm_crtc *crtc; struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct drm_crtc_state *old_crtc_state;
int i; int i;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
new_crtc_state, i) { bool new_self_refresh_active = new_self_refresh_mask & BIT(i);
struct drm_self_refresh_data *sr_data = crtc->self_refresh_data; struct drm_self_refresh_data *sr_data = crtc->self_refresh_data;
struct ewma_psr_time *time; struct ewma_psr_time *time;
if (old_crtc_state->self_refresh_active == if (old_crtc_state->self_refresh_active ==
new_crtc_state->self_refresh_active) new_self_refresh_active)
continue; continue;
if (new_crtc_state->self_refresh_active) if (new_self_refresh_active)
time = &sr_data->entry_avg_ms; time = &sr_data->entry_avg_ms;
else else
time = &sr_data->exit_avg_ms; time = &sr_data->exit_avg_ms;

View file

@ -864,6 +864,13 @@ load_detect:
out: out:
intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref); intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
/*
* Make sure the refs for power wells enabled during detect are
* dropped to avoid a new detect cycle triggered by HPD polling.
*/
intel_display_power_flush_work(dev_priv);
return status; return status;
} }

View file

@ -1256,6 +1256,9 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
u32 unused) u32 unused)
{ {
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *i915 =
to_i915(intel_dig_port->base.base.dev);
enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
u32 ret; u32 ret;
ret = DP_AUX_CH_CTL_SEND_BUSY | ret = DP_AUX_CH_CTL_SEND_BUSY |
@ -1268,7 +1271,8 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT) if (intel_phy_is_tc(i915, phy) &&
intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
ret |= DP_AUX_CH_CTL_TBT_IO; ret |= DP_AUX_CH_CTL_TBT_IO;
return ret; return ret;
@ -5436,6 +5440,12 @@ out:
if (status != connector_status_connected && !intel_dp->is_mst) if (status != connector_status_connected && !intel_dp->is_mst)
intel_dp_unset_edid(intel_dp); intel_dp_unset_edid(intel_dp);
/*
* Make sure the refs for power wells enabled during detect are
* dropped to avoid a new detect cycle triggered by HPD polling.
*/
intel_display_power_flush_work(dev_priv);
return status; return status;
} }

View file

@ -2565,6 +2565,12 @@ out:
if (status != connector_status_connected) if (status != connector_status_connected)
cec_notifier_phys_addr_invalidate(intel_hdmi->cec_notifier); cec_notifier_phys_addr_invalidate(intel_hdmi->cec_notifier);
/*
* Make sure the refs for power wells enabled during detect are
* dropped to avoid a new detect cycle triggered by HPD polling.
*/
intel_display_power_flush_work(dev_priv);
return status; return status;
} }

View file

@ -1958,6 +1958,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
case 0x682C: case 0x682C:
si_pi->cac_weights = cac_weights_cape_verde_pro; si_pi->cac_weights = cac_weights_cape_verde_pro;
si_pi->dte_data = dte_data_sun_xt; si_pi->dte_data = dte_data_sun_xt;
update_dte_from_pl2 = true;
break; break;
case 0x6825: case 0x6825:
case 0x6827: case 0x6827:

View file

@ -447,8 +447,12 @@ static int i2c_hid_hwreset(struct i2c_client *client)
if (ret) { if (ret) {
dev_err(&client->dev, "failed to reset device.\n"); dev_err(&client->dev, "failed to reset device.\n");
i2c_hid_set_power(client, I2C_HID_PWR_SLEEP); i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
goto out_unlock;
} }
/* At least some SIS devices need this after reset */
ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
out_unlock: out_unlock:
mutex_unlock(&ihid->reset_lock); mutex_unlock(&ihid->reset_lock);
return ret; return ret;

View file

@ -202,6 +202,21 @@ static inline void wacom_schedule_work(struct wacom_wac *wacom_wac,
} }
} }
/*
* Convert a signed 32-bit integer to an unsigned n-bit integer. Undoes
* the normally-helpful work of 'hid_snto32' for fields that use signed
* ranges for questionable reasons.
*/
static inline __u32 wacom_s32tou(s32 value, __u8 n)
{
switch (n) {
case 8: return ((__u8)value);
case 16: return ((__u16)value);
case 32: return ((__u32)value);
}
return value & (1 << (n - 1)) ? value & (~(~0U << n)) : value;
}
extern const struct hid_device_id wacom_ids[]; extern const struct hid_device_id wacom_ids[];
void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len); void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len);

View file

@ -2303,7 +2303,7 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
case HID_DG_TOOLSERIALNUMBER: case HID_DG_TOOLSERIALNUMBER:
if (value) { if (value) {
wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL); wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
wacom_wac->serial[0] |= (__u32)value; wacom_wac->serial[0] |= wacom_s32tou(value, field->report_size);
} }
return; return;
case HID_DG_TWIST: case HID_DG_TWIST:
@ -2319,15 +2319,17 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
return; return;
case WACOM_HID_WD_SERIALHI: case WACOM_HID_WD_SERIALHI:
if (value) { if (value) {
__u32 raw_value = wacom_s32tou(value, field->report_size);
wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF); wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF);
wacom_wac->serial[0] |= ((__u64)value) << 32; wacom_wac->serial[0] |= ((__u64)raw_value) << 32;
/* /*
* Non-USI EMR devices may contain additional tool type * Non-USI EMR devices may contain additional tool type
* information here. See WACOM_HID_WD_TOOLTYPE case for * information here. See WACOM_HID_WD_TOOLTYPE case for
* more details. * more details.
*/ */
if (value >> 20 == 1) { if (value >> 20 == 1) {
wacom_wac->id[0] |= value & 0xFFFFF; wacom_wac->id[0] |= raw_value & 0xFFFFF;
} }
} }
return; return;
@ -2339,7 +2341,7 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
* bitwise OR so the complete value can be built * bitwise OR so the complete value can be built
* up over time :( * up over time :(
*/ */
wacom_wac->id[0] |= value; wacom_wac->id[0] |= wacom_s32tou(value, field->report_size);
return; return;
case WACOM_HID_WD_OFFSETLEFT: case WACOM_HID_WD_OFFSETLEFT:
if (features->offset_left && value != features->offset_left) if (features->offset_left && value != features->offset_left)

View file

@ -170,7 +170,7 @@ static inline int ina3221_wait_for_data(struct ina3221_data *ina)
/* Polling the CVRF bit to make sure read data is ready */ /* Polling the CVRF bit to make sure read data is ready */
return regmap_field_read_poll_timeout(ina->fields[F_CVRF], return regmap_field_read_poll_timeout(ina->fields[F_CVRF],
cvrf, cvrf, wait, 100000); cvrf, cvrf, wait, wait * 2);
} }
static int ina3221_read_value(struct ina3221_data *ina, unsigned int reg, static int ina3221_read_value(struct ina3221_data *ina, unsigned int reg,

View file

@ -82,6 +82,10 @@
#define FANCTL1_FMR_REG 0x00 /* Bank 3; 1 reg per channel */ #define FANCTL1_FMR_REG 0x00 /* Bank 3; 1 reg per channel */
#define FANCTL1_OUT_REG 0x10 /* Bank 3; 1 reg per channel */ #define FANCTL1_OUT_REG 0x10 /* Bank 3; 1 reg per channel */
#define VOLT_MONITOR_MODE 0x0
#define THERMAL_DIODE_MODE 0x1
#define THERMISTOR_MODE 0x3
#define ENABLE_TSI BIT(1) #define ENABLE_TSI BIT(1)
static const unsigned short normal_i2c[] = { static const unsigned short normal_i2c[] = {
@ -935,11 +939,16 @@ static int nct7904_probe(struct i2c_client *client,
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
val = (ret >> (i * 2)) & 0x03; val = (ret >> (i * 2)) & 0x03;
bit = (1 << i); bit = (1 << i);
if (val == 0) { if (val == VOLT_MONITOR_MODE) {
data->tcpu_mask &= ~bit; data->tcpu_mask &= ~bit;
} else if (val == THERMAL_DIODE_MODE && i < 2) {
data->temp_mode |= bit;
data->vsen_mask &= ~(0x06 << (i * 2));
} else if (val == THERMISTOR_MODE) {
data->vsen_mask &= ~(0x02 << (i * 2));
} else { } else {
if (val == 0x1 || val == 0x2) /* Reserved */
data->temp_mode |= bit; data->tcpu_mask &= ~bit;
data->vsen_mask &= ~(0x06 << (i * 2)); data->vsen_mask &= ~(0x06 << (i * 2));
} }
} }

View file

@ -2128,8 +2128,7 @@ static int bond_miimon_inspect(struct bonding *bond)
ignore_updelay = !rcu_dereference(bond->curr_active_slave); ignore_updelay = !rcu_dereference(bond->curr_active_slave);
bond_for_each_slave_rcu(bond, slave, iter) { bond_for_each_slave_rcu(bond, slave, iter) {
slave->new_link = BOND_LINK_NOCHANGE; bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
slave->link_new_state = slave->link;
link_state = bond_check_dev_link(bond, slave->dev, 0); link_state = bond_check_dev_link(bond, slave->dev, 0);
@ -2163,7 +2162,7 @@ static int bond_miimon_inspect(struct bonding *bond)
} }
if (slave->delay <= 0) { if (slave->delay <= 0) {
slave->new_link = BOND_LINK_DOWN; bond_propose_link_state(slave, BOND_LINK_DOWN);
commit++; commit++;
continue; continue;
} }
@ -2200,7 +2199,7 @@ static int bond_miimon_inspect(struct bonding *bond)
slave->delay = 0; slave->delay = 0;
if (slave->delay <= 0) { if (slave->delay <= 0) {
slave->new_link = BOND_LINK_UP; bond_propose_link_state(slave, BOND_LINK_UP);
commit++; commit++;
ignore_updelay = false; ignore_updelay = false;
continue; continue;
@ -2238,7 +2237,7 @@ static void bond_miimon_commit(struct bonding *bond)
struct slave *slave, *primary; struct slave *slave, *primary;
bond_for_each_slave(bond, slave, iter) { bond_for_each_slave(bond, slave, iter) {
switch (slave->new_link) { switch (slave->link_new_state) {
case BOND_LINK_NOCHANGE: case BOND_LINK_NOCHANGE:
/* For 802.3ad mode, check current slave speed and /* For 802.3ad mode, check current slave speed and
* duplex again in case its port was disabled after * duplex again in case its port was disabled after
@ -2310,8 +2309,8 @@ static void bond_miimon_commit(struct bonding *bond)
default: default:
slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n", slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n",
slave->new_link); slave->link_new_state);
slave->new_link = BOND_LINK_NOCHANGE; bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
continue; continue;
} }
@ -2719,13 +2718,13 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
bond_for_each_slave_rcu(bond, slave, iter) { bond_for_each_slave_rcu(bond, slave, iter) {
unsigned long trans_start = dev_trans_start(slave->dev); unsigned long trans_start = dev_trans_start(slave->dev);
slave->new_link = BOND_LINK_NOCHANGE; bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
if (slave->link != BOND_LINK_UP) { if (slave->link != BOND_LINK_UP) {
if (bond_time_in_interval(bond, trans_start, 1) && if (bond_time_in_interval(bond, trans_start, 1) &&
bond_time_in_interval(bond, slave->last_rx, 1)) { bond_time_in_interval(bond, slave->last_rx, 1)) {
slave->new_link = BOND_LINK_UP; bond_propose_link_state(slave, BOND_LINK_UP);
slave_state_changed = 1; slave_state_changed = 1;
/* primary_slave has no meaning in round-robin /* primary_slave has no meaning in round-robin
@ -2750,7 +2749,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
if (!bond_time_in_interval(bond, trans_start, 2) || if (!bond_time_in_interval(bond, trans_start, 2) ||
!bond_time_in_interval(bond, slave->last_rx, 2)) { !bond_time_in_interval(bond, slave->last_rx, 2)) {
slave->new_link = BOND_LINK_DOWN; bond_propose_link_state(slave, BOND_LINK_DOWN);
slave_state_changed = 1; slave_state_changed = 1;
if (slave->link_failure_count < UINT_MAX) if (slave->link_failure_count < UINT_MAX)
@ -2781,8 +2780,8 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
goto re_arm; goto re_arm;
bond_for_each_slave(bond, slave, iter) { bond_for_each_slave(bond, slave, iter) {
if (slave->new_link != BOND_LINK_NOCHANGE) if (slave->link_new_state != BOND_LINK_NOCHANGE)
slave->link = slave->new_link; slave->link = slave->link_new_state;
} }
if (slave_state_changed) { if (slave_state_changed) {
@ -2805,9 +2804,9 @@ re_arm:
} }
/* Called to inspect slaves for active-backup mode ARP monitor link state /* Called to inspect slaves for active-backup mode ARP monitor link state
* changes. Sets new_link in slaves to specify what action should take * changes. Sets proposed link state in slaves to specify what action
* place for the slave. Returns 0 if no changes are found, >0 if changes * should take place for the slave. Returns 0 if no changes are found, >0
* to link states must be committed. * if changes to link states must be committed.
* *
* Called with rcu_read_lock held. * Called with rcu_read_lock held.
*/ */
@ -2819,12 +2818,12 @@ static int bond_ab_arp_inspect(struct bonding *bond)
int commit = 0; int commit = 0;
bond_for_each_slave_rcu(bond, slave, iter) { bond_for_each_slave_rcu(bond, slave, iter) {
slave->new_link = BOND_LINK_NOCHANGE; bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
last_rx = slave_last_rx(bond, slave); last_rx = slave_last_rx(bond, slave);
if (slave->link != BOND_LINK_UP) { if (slave->link != BOND_LINK_UP) {
if (bond_time_in_interval(bond, last_rx, 1)) { if (bond_time_in_interval(bond, last_rx, 1)) {
slave->new_link = BOND_LINK_UP; bond_propose_link_state(slave, BOND_LINK_UP);
commit++; commit++;
} }
continue; continue;
@ -2852,7 +2851,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
if (!bond_is_active_slave(slave) && if (!bond_is_active_slave(slave) &&
!rcu_access_pointer(bond->current_arp_slave) && !rcu_access_pointer(bond->current_arp_slave) &&
!bond_time_in_interval(bond, last_rx, 3)) { !bond_time_in_interval(bond, last_rx, 3)) {
slave->new_link = BOND_LINK_DOWN; bond_propose_link_state(slave, BOND_LINK_DOWN);
commit++; commit++;
} }
@ -2865,7 +2864,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
if (bond_is_active_slave(slave) && if (bond_is_active_slave(slave) &&
(!bond_time_in_interval(bond, trans_start, 2) || (!bond_time_in_interval(bond, trans_start, 2) ||
!bond_time_in_interval(bond, last_rx, 2))) { !bond_time_in_interval(bond, last_rx, 2))) {
slave->new_link = BOND_LINK_DOWN; bond_propose_link_state(slave, BOND_LINK_DOWN);
commit++; commit++;
} }
} }
@ -2885,7 +2884,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
struct slave *slave; struct slave *slave;
bond_for_each_slave(bond, slave, iter) { bond_for_each_slave(bond, slave, iter) {
switch (slave->new_link) { switch (slave->link_new_state) {
case BOND_LINK_NOCHANGE: case BOND_LINK_NOCHANGE:
continue; continue;
@ -2935,8 +2934,9 @@ static void bond_ab_arp_commit(struct bonding *bond)
continue; continue;
default: default:
slave_err(bond->dev, slave->dev, "impossible: new_link %d on slave\n", slave_err(bond->dev, slave->dev,
slave->new_link); "impossible: link_new_state %d on slave\n",
slave->link_new_state);
continue; continue;
} }

View file

@ -52,6 +52,7 @@
#define CONTROL_EX_PDR BIT(8) #define CONTROL_EX_PDR BIT(8)
/* control register */ /* control register */
#define CONTROL_SWR BIT(15)
#define CONTROL_TEST BIT(7) #define CONTROL_TEST BIT(7)
#define CONTROL_CCE BIT(6) #define CONTROL_CCE BIT(6)
#define CONTROL_DISABLE_AR BIT(5) #define CONTROL_DISABLE_AR BIT(5)
@ -97,6 +98,9 @@
#define BTR_TSEG2_SHIFT 12 #define BTR_TSEG2_SHIFT 12
#define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT) #define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT)
/* interrupt register */
#define INT_STS_PENDING 0x8000
/* brp extension register */ /* brp extension register */
#define BRP_EXT_BRPE_MASK 0x0f #define BRP_EXT_BRPE_MASK 0x0f
#define BRP_EXT_BRPE_SHIFT 0 #define BRP_EXT_BRPE_SHIFT 0
@ -569,6 +573,26 @@ static void c_can_configure_msg_objects(struct net_device *dev)
IF_MCONT_RCV_EOB); IF_MCONT_RCV_EOB);
} }
static int c_can_software_reset(struct net_device *dev)
{
struct c_can_priv *priv = netdev_priv(dev);
int retry = 0;
if (priv->type != BOSCH_D_CAN)
return 0;
priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_SWR | CONTROL_INIT);
while (priv->read_reg(priv, C_CAN_CTRL_REG) & CONTROL_SWR) {
msleep(20);
if (retry++ > 100) {
netdev_err(dev, "CCTRL: software reset failed\n");
return -EIO;
}
}
return 0;
}
/* /*
* Configure C_CAN chip: * Configure C_CAN chip:
* - enable/disable auto-retransmission * - enable/disable auto-retransmission
@ -578,6 +602,11 @@ static void c_can_configure_msg_objects(struct net_device *dev)
static int c_can_chip_config(struct net_device *dev) static int c_can_chip_config(struct net_device *dev)
{ {
struct c_can_priv *priv = netdev_priv(dev); struct c_can_priv *priv = netdev_priv(dev);
int err;
err = c_can_software_reset(dev);
if (err)
return err;
/* enable automatic retransmission */ /* enable automatic retransmission */
priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR); priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR);
@ -886,6 +915,9 @@ static int c_can_handle_state_change(struct net_device *dev,
struct can_berr_counter bec; struct can_berr_counter bec;
switch (error_type) { switch (error_type) {
case C_CAN_NO_ERROR:
priv->can.state = CAN_STATE_ERROR_ACTIVE;
break;
case C_CAN_ERROR_WARNING: case C_CAN_ERROR_WARNING:
/* error warning state */ /* error warning state */
priv->can.can_stats.error_warning++; priv->can.can_stats.error_warning++;
@ -916,6 +948,13 @@ static int c_can_handle_state_change(struct net_device *dev,
ERR_CNT_RP_SHIFT; ERR_CNT_RP_SHIFT;
switch (error_type) { switch (error_type) {
case C_CAN_NO_ERROR:
/* error warning state */
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_ACTIVE;
cf->data[6] = bec.txerr;
cf->data[7] = bec.rxerr;
break;
case C_CAN_ERROR_WARNING: case C_CAN_ERROR_WARNING:
/* error warning state */ /* error warning state */
cf->can_id |= CAN_ERR_CRTL; cf->can_id |= CAN_ERR_CRTL;
@ -1029,10 +1068,16 @@ static int c_can_poll(struct napi_struct *napi, int quota)
u16 curr, last = priv->last_status; u16 curr, last = priv->last_status;
int work_done = 0; int work_done = 0;
priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG); /* Only read the status register if a status interrupt was pending */
/* Ack status on C_CAN. D_CAN is self clearing */ if (atomic_xchg(&priv->sie_pending, 0)) {
if (priv->type != BOSCH_D_CAN) priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); /* Ack status on C_CAN. D_CAN is self clearing */
if (priv->type != BOSCH_D_CAN)
priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
} else {
/* no change detected ... */
curr = last;
}
/* handle state changes */ /* handle state changes */
if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) { if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) {
@ -1054,11 +1099,17 @@ static int c_can_poll(struct napi_struct *napi, int quota)
/* handle bus recovery events */ /* handle bus recovery events */
if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) { if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) {
netdev_dbg(dev, "left bus off state\n"); netdev_dbg(dev, "left bus off state\n");
priv->can.state = CAN_STATE_ERROR_ACTIVE; work_done += c_can_handle_state_change(dev, C_CAN_ERROR_PASSIVE);
} }
if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) { if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) {
netdev_dbg(dev, "left error passive state\n"); netdev_dbg(dev, "left error passive state\n");
priv->can.state = CAN_STATE_ERROR_ACTIVE; work_done += c_can_handle_state_change(dev, C_CAN_ERROR_WARNING);
}
if ((!(curr & STATUS_EWARN)) && (last & STATUS_EWARN)) {
netdev_dbg(dev, "left error warning state\n");
work_done += c_can_handle_state_change(dev, C_CAN_NO_ERROR);
} }
/* handle lec errors on the bus */ /* handle lec errors on the bus */
@ -1083,10 +1134,16 @@ static irqreturn_t c_can_isr(int irq, void *dev_id)
{ {
struct net_device *dev = (struct net_device *)dev_id; struct net_device *dev = (struct net_device *)dev_id;
struct c_can_priv *priv = netdev_priv(dev); struct c_can_priv *priv = netdev_priv(dev);
int reg_int;
if (!priv->read_reg(priv, C_CAN_INT_REG)) reg_int = priv->read_reg(priv, C_CAN_INT_REG);
if (!reg_int)
return IRQ_NONE; return IRQ_NONE;
/* save for later use */
if (reg_int & INT_STS_PENDING)
atomic_set(&priv->sie_pending, 1);
/* disable all interrupts and schedule the NAPI */ /* disable all interrupts and schedule the NAPI */
c_can_irq_control(priv, false); c_can_irq_control(priv, false);
napi_schedule(&priv->napi); napi_schedule(&priv->napi);

View file

@ -198,6 +198,7 @@ struct c_can_priv {
struct net_device *dev; struct net_device *dev;
struct device *device; struct device *device;
atomic_t tx_active; atomic_t tx_active;
atomic_t sie_pending;
unsigned long tx_dir; unsigned long tx_dir;
int last_status; int last_status;
u16 (*read_reg) (const struct c_can_priv *priv, enum reg index); u16 (*read_reg) (const struct c_can_priv *priv, enum reg index);

View file

@ -848,6 +848,7 @@ void of_can_transceiver(struct net_device *dev)
return; return;
ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max); ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max);
of_node_put(dn);
if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max)) if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max))
netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n"); netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n");
} }

View file

@ -677,6 +677,7 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
struct can_frame *cf; struct can_frame *cf;
bool rx_errors = false, tx_errors = false; bool rx_errors = false, tx_errors = false;
u32 timestamp; u32 timestamp;
int err;
timestamp = priv->read(&regs->timer) << 16; timestamp = priv->read(&regs->timer) << 16;
@ -725,7 +726,9 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
if (tx_errors) if (tx_errors)
dev->stats.tx_errors++; dev->stats.tx_errors++;
can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
if (err)
dev->stats.rx_fifo_errors++;
} }
static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
@ -738,6 +741,7 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
int flt; int flt;
struct can_berr_counter bec; struct can_berr_counter bec;
u32 timestamp; u32 timestamp;
int err;
timestamp = priv->read(&regs->timer) << 16; timestamp = priv->read(&regs->timer) << 16;
@ -769,7 +773,9 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
if (unlikely(new_state == CAN_STATE_BUS_OFF)) if (unlikely(new_state == CAN_STATE_BUS_OFF))
can_bus_off(dev); can_bus_off(dev);
can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
if (err)
dev->stats.rx_fifo_errors++;
} }
static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload) static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload)
@ -1188,6 +1194,7 @@ static int flexcan_chip_start(struct net_device *dev)
reg_mecr = priv->read(&regs->mecr); reg_mecr = priv->read(&regs->mecr);
reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS; reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS;
priv->write(reg_mecr, &regs->mecr); priv->write(reg_mecr, &regs->mecr);
reg_mecr |= FLEXCAN_MECR_ECCDIS;
reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK | reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK |
FLEXCAN_MECR_FANCEI_MSK); FLEXCAN_MECR_FANCEI_MSK);
priv->write(reg_mecr, &regs->mecr); priv->write(reg_mecr, &regs->mecr);

View file

@ -107,37 +107,95 @@ static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
return cb_b->timestamp - cb_a->timestamp; return cb_b->timestamp - cb_a->timestamp;
} }
static struct sk_buff *can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n) /**
* can_rx_offload_offload_one() - Read one CAN frame from HW
* @offload: pointer to rx_offload context
* @n: number of mailbox to read
*
* The task of this function is to read a CAN frame from mailbox @n
* from the device and return the mailbox's content as a struct
* sk_buff.
*
* If the struct can_rx_offload::skb_queue exceeds the maximal queue
* length (struct can_rx_offload::skb_queue_len_max) or no skb can be
* allocated, the mailbox contents is discarded by reading it into an
* overflow buffer. This way the mailbox is marked as free by the
* driver.
*
* Return: A pointer to skb containing the CAN frame on success.
*
* NULL if the mailbox @n is empty.
*
* ERR_PTR() in case of an error
*/
static struct sk_buff *
can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
{ {
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL, *skb_error = NULL;
struct can_rx_offload_cb *cb; struct can_rx_offload_cb *cb;
struct can_frame *cf; struct can_frame *cf;
int ret; int ret;
/* If queue is full or skb not available, read to discard mailbox */ if (likely(skb_queue_len(&offload->skb_queue) <
if (likely(skb_queue_len(&offload->skb_queue) <= offload->skb_queue_len_max)) {
offload->skb_queue_len_max))
skb = alloc_can_skb(offload->dev, &cf); skb = alloc_can_skb(offload->dev, &cf);
if (unlikely(!skb))
skb_error = ERR_PTR(-ENOMEM); /* skb alloc failed */
} else {
skb_error = ERR_PTR(-ENOBUFS); /* skb_queue is full */
}
if (!skb) { /* If queue is full or skb not available, drop by reading into
* overflow buffer.
*/
if (unlikely(skb_error)) {
struct can_frame cf_overflow; struct can_frame cf_overflow;
u32 timestamp; u32 timestamp;
ret = offload->mailbox_read(offload, &cf_overflow, ret = offload->mailbox_read(offload, &cf_overflow,
&timestamp, n); &timestamp, n);
if (ret)
offload->dev->stats.rx_dropped++;
return NULL; /* Mailbox was empty. */
if (unlikely(!ret))
return NULL;
/* Mailbox has been read and we're dropping it or
* there was a problem reading the mailbox.
*
* Increment error counters in any case.
*/
offload->dev->stats.rx_dropped++;
offload->dev->stats.rx_fifo_errors++;
/* There was a problem reading the mailbox, propagate
* error value.
*/
if (unlikely(ret < 0))
return ERR_PTR(ret);
return skb_error;
} }
cb = can_rx_offload_get_cb(skb); cb = can_rx_offload_get_cb(skb);
ret = offload->mailbox_read(offload, cf, &cb->timestamp, n); ret = offload->mailbox_read(offload, cf, &cb->timestamp, n);
if (!ret) {
/* Mailbox was empty. */
if (unlikely(!ret)) {
kfree_skb(skb); kfree_skb(skb);
return NULL; return NULL;
} }
/* There was a problem reading the mailbox, propagate error value. */
if (unlikely(ret < 0)) {
kfree_skb(skb);
offload->dev->stats.rx_dropped++;
offload->dev->stats.rx_fifo_errors++;
return ERR_PTR(ret);
}
/* Mailbox was read. */
return skb; return skb;
} }
@ -157,8 +215,8 @@ int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pen
continue; continue;
skb = can_rx_offload_offload_one(offload, i); skb = can_rx_offload_offload_one(offload, i);
if (!skb) if (IS_ERR_OR_NULL(skb))
break; continue;
__skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare); __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
} }
@ -188,7 +246,13 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
struct sk_buff *skb; struct sk_buff *skb;
int received = 0; int received = 0;
while ((skb = can_rx_offload_offload_one(offload, 0))) { while (1) {
skb = can_rx_offload_offload_one(offload, 0);
if (IS_ERR(skb))
continue;
if (!skb)
break;
skb_queue_tail(&offload->skb_queue, skb); skb_queue_tail(&offload->skb_queue, skb);
received++; received++;
} }
@ -207,8 +271,10 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
unsigned long flags; unsigned long flags;
if (skb_queue_len(&offload->skb_queue) > if (skb_queue_len(&offload->skb_queue) >
offload->skb_queue_len_max) offload->skb_queue_len_max) {
return -ENOMEM; kfree_skb(skb);
return -ENOBUFS;
}
cb = can_rx_offload_get_cb(skb); cb = can_rx_offload_get_cb(skb);
cb->timestamp = timestamp; cb->timestamp = timestamp;
@ -250,8 +316,10 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload,
struct sk_buff *skb) struct sk_buff *skb)
{ {
if (skb_queue_len(&offload->skb_queue) > if (skb_queue_len(&offload->skb_queue) >
offload->skb_queue_len_max) offload->skb_queue_len_max) {
return -ENOMEM; kfree_skb(skb);
return -ENOBUFS;
}
skb_queue_tail(&offload->skb_queue, skb); skb_queue_tail(&offload->skb_queue, skb);
can_rx_offload_schedule(offload); can_rx_offload_schedule(offload);

View file

@ -717,6 +717,7 @@ static void mcp251x_restart_work_handler(struct work_struct *ws)
if (priv->after_suspend) { if (priv->after_suspend) {
mcp251x_hw_reset(spi); mcp251x_hw_reset(spi);
mcp251x_setup(net, spi); mcp251x_setup(net, spi);
priv->force_quit = 0;
if (priv->after_suspend & AFTER_SUSPEND_RESTART) { if (priv->after_suspend & AFTER_SUSPEND_RESTART) {
mcp251x_set_normal_mode(spi); mcp251x_set_normal_mode(spi);
} else if (priv->after_suspend & AFTER_SUSPEND_UP) { } else if (priv->after_suspend & AFTER_SUSPEND_UP) {
@ -728,7 +729,6 @@ static void mcp251x_restart_work_handler(struct work_struct *ws)
mcp251x_hw_sleep(spi); mcp251x_hw_sleep(spi);
} }
priv->after_suspend = 0; priv->after_suspend = 0;
priv->force_quit = 0;
} }
if (priv->restart_tx) { if (priv->restart_tx) {

View file

@ -73,6 +73,7 @@ MODULE_VERSION(HECC_MODULE_VERSION);
*/ */
#define HECC_MAX_RX_MBOX (HECC_MAX_MAILBOXES - HECC_MAX_TX_MBOX) #define HECC_MAX_RX_MBOX (HECC_MAX_MAILBOXES - HECC_MAX_TX_MBOX)
#define HECC_RX_FIRST_MBOX (HECC_MAX_MAILBOXES - 1) #define HECC_RX_FIRST_MBOX (HECC_MAX_MAILBOXES - 1)
#define HECC_RX_LAST_MBOX (HECC_MAX_TX_MBOX)
/* TI HECC module registers */ /* TI HECC module registers */
#define HECC_CANME 0x0 /* Mailbox enable */ #define HECC_CANME 0x0 /* Mailbox enable */
@ -82,7 +83,7 @@ MODULE_VERSION(HECC_MODULE_VERSION);
#define HECC_CANTA 0x10 /* Transmission acknowledge */ #define HECC_CANTA 0x10 /* Transmission acknowledge */
#define HECC_CANAA 0x14 /* Abort acknowledge */ #define HECC_CANAA 0x14 /* Abort acknowledge */
#define HECC_CANRMP 0x18 /* Receive message pending */ #define HECC_CANRMP 0x18 /* Receive message pending */
#define HECC_CANRML 0x1C /* Remote message lost */ #define HECC_CANRML 0x1C /* Receive message lost */
#define HECC_CANRFP 0x20 /* Remote frame pending */ #define HECC_CANRFP 0x20 /* Remote frame pending */
#define HECC_CANGAM 0x24 /* SECC only:Global acceptance mask */ #define HECC_CANGAM 0x24 /* SECC only:Global acceptance mask */
#define HECC_CANMC 0x28 /* Master control */ #define HECC_CANMC 0x28 /* Master control */
@ -149,6 +150,8 @@ MODULE_VERSION(HECC_MODULE_VERSION);
#define HECC_BUS_ERROR (HECC_CANES_FE | HECC_CANES_BE |\ #define HECC_BUS_ERROR (HECC_CANES_FE | HECC_CANES_BE |\
HECC_CANES_CRCE | HECC_CANES_SE |\ HECC_CANES_CRCE | HECC_CANES_SE |\
HECC_CANES_ACKE) HECC_CANES_ACKE)
#define HECC_CANES_FLAGS (HECC_BUS_ERROR | HECC_CANES_BO |\
HECC_CANES_EP | HECC_CANES_EW)
#define HECC_CANMCF_RTR BIT(4) /* Remote transmit request */ #define HECC_CANMCF_RTR BIT(4) /* Remote transmit request */
@ -382,8 +385,18 @@ static void ti_hecc_start(struct net_device *ndev)
hecc_set_bit(priv, HECC_CANMIM, mbx_mask); hecc_set_bit(priv, HECC_CANMIM, mbx_mask);
} }
/* Prevent message over-write & Enable interrupts */ /* Enable tx interrupts */
hecc_write(priv, HECC_CANOPC, HECC_SET_REG); hecc_set_bit(priv, HECC_CANMIM, BIT(HECC_MAX_TX_MBOX) - 1);
/* Prevent message over-write to create a rx fifo, but not for
* the lowest priority mailbox, since that allows detecting
* overflows instead of the hardware silently dropping the
* messages.
*/
mbx_mask = ~BIT(HECC_RX_LAST_MBOX);
hecc_write(priv, HECC_CANOPC, mbx_mask);
/* Enable interrupts */
if (priv->use_hecc1int) { if (priv->use_hecc1int) {
hecc_write(priv, HECC_CANMIL, HECC_SET_REG); hecc_write(priv, HECC_CANMIL, HECC_SET_REG);
hecc_write(priv, HECC_CANGIM, HECC_CANGIM_DEF_MASK | hecc_write(priv, HECC_CANGIM, HECC_CANGIM_DEF_MASK |
@ -400,6 +413,9 @@ static void ti_hecc_stop(struct net_device *ndev)
{ {
struct ti_hecc_priv *priv = netdev_priv(ndev); struct ti_hecc_priv *priv = netdev_priv(ndev);
/* Disable the CPK; stop sending, erroring and acking */
hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
/* Disable interrupts and disable mailboxes */ /* Disable interrupts and disable mailboxes */
hecc_write(priv, HECC_CANGIM, 0); hecc_write(priv, HECC_CANGIM, 0);
hecc_write(priv, HECC_CANMIM, 0); hecc_write(priv, HECC_CANMIM, 0);
@ -508,8 +524,6 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
hecc_set_bit(priv, HECC_CANME, mbx_mask); hecc_set_bit(priv, HECC_CANME, mbx_mask);
spin_unlock_irqrestore(&priv->mbx_lock, flags); spin_unlock_irqrestore(&priv->mbx_lock, flags);
hecc_clear_bit(priv, HECC_CANMD, mbx_mask);
hecc_set_bit(priv, HECC_CANMIM, mbx_mask);
hecc_write(priv, HECC_CANTRS, mbx_mask); hecc_write(priv, HECC_CANTRS, mbx_mask);
return NETDEV_TX_OK; return NETDEV_TX_OK;
@ -526,8 +540,10 @@ static unsigned int ti_hecc_mailbox_read(struct can_rx_offload *offload,
u32 *timestamp, unsigned int mbxno) u32 *timestamp, unsigned int mbxno)
{ {
struct ti_hecc_priv *priv = rx_offload_to_priv(offload); struct ti_hecc_priv *priv = rx_offload_to_priv(offload);
u32 data; u32 data, mbx_mask;
int ret = 1;
mbx_mask = BIT(mbxno);
data = hecc_read_mbx(priv, mbxno, HECC_CANMID); data = hecc_read_mbx(priv, mbxno, HECC_CANMID);
if (data & HECC_CANMID_IDE) if (data & HECC_CANMID_IDE)
cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG; cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
@ -548,7 +564,25 @@ static unsigned int ti_hecc_mailbox_read(struct can_rx_offload *offload,
*timestamp = hecc_read_stamp(priv, mbxno); *timestamp = hecc_read_stamp(priv, mbxno);
return 1; /* Check for FIFO overrun.
*
* All but the last RX mailbox have activated overwrite
* protection. So skip check for overrun, if we're not
* handling the last RX mailbox.
*
* As the overwrite protection for the last RX mailbox is
* disabled, the CAN core might update while we're reading
* it. This means the skb might be inconsistent.
*
* Return an error to let rx-offload discard this CAN frame.
*/
if (unlikely(mbxno == HECC_RX_LAST_MBOX &&
hecc_read(priv, HECC_CANRML) & mbx_mask))
ret = -ENOBUFS;
hecc_write(priv, HECC_CANRMP, mbx_mask);
return ret;
} }
static int ti_hecc_error(struct net_device *ndev, int int_status, static int ti_hecc_error(struct net_device *ndev, int int_status,
@ -558,90 +592,71 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
struct can_frame *cf; struct can_frame *cf;
struct sk_buff *skb; struct sk_buff *skb;
u32 timestamp; u32 timestamp;
int err;
/* propagate the error condition to the can stack */
skb = alloc_can_err_skb(ndev, &cf);
if (!skb) {
if (printk_ratelimit())
netdev_err(priv->ndev,
"%s: alloc_can_err_skb() failed\n",
__func__);
return -ENOMEM;
}
if (int_status & HECC_CANGIF_WLIF) { /* warning level int */
if ((int_status & HECC_CANGIF_BOIF) == 0) {
priv->can.state = CAN_STATE_ERROR_WARNING;
++priv->can.can_stats.error_warning;
cf->can_id |= CAN_ERR_CRTL;
if (hecc_read(priv, HECC_CANTEC) > 96)
cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
if (hecc_read(priv, HECC_CANREC) > 96)
cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
}
hecc_set_bit(priv, HECC_CANES, HECC_CANES_EW);
netdev_dbg(priv->ndev, "Error Warning interrupt\n");
hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
}
if (int_status & HECC_CANGIF_EPIF) { /* error passive int */
if ((int_status & HECC_CANGIF_BOIF) == 0) {
priv->can.state = CAN_STATE_ERROR_PASSIVE;
++priv->can.can_stats.error_passive;
cf->can_id |= CAN_ERR_CRTL;
if (hecc_read(priv, HECC_CANTEC) > 127)
cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
if (hecc_read(priv, HECC_CANREC) > 127)
cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
}
hecc_set_bit(priv, HECC_CANES, HECC_CANES_EP);
netdev_dbg(priv->ndev, "Error passive interrupt\n");
hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
}
/* Need to check busoff condition in error status register too to
* ensure warning interrupts don't hog the system
*/
if ((int_status & HECC_CANGIF_BOIF) || (err_status & HECC_CANES_BO)) {
priv->can.state = CAN_STATE_BUS_OFF;
cf->can_id |= CAN_ERR_BUSOFF;
hecc_set_bit(priv, HECC_CANES, HECC_CANES_BO);
hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
/* Disable all interrupts in bus-off to avoid int hog */
hecc_write(priv, HECC_CANGIM, 0);
++priv->can.can_stats.bus_off;
can_bus_off(ndev);
}
if (err_status & HECC_BUS_ERROR) { if (err_status & HECC_BUS_ERROR) {
/* propagate the error condition to the can stack */
skb = alloc_can_err_skb(ndev, &cf);
if (!skb) {
if (net_ratelimit())
netdev_err(priv->ndev,
"%s: alloc_can_err_skb() failed\n",
__func__);
return -ENOMEM;
}
++priv->can.can_stats.bus_error; ++priv->can.can_stats.bus_error;
cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
if (err_status & HECC_CANES_FE) { if (err_status & HECC_CANES_FE)
hecc_set_bit(priv, HECC_CANES, HECC_CANES_FE);
cf->data[2] |= CAN_ERR_PROT_FORM; cf->data[2] |= CAN_ERR_PROT_FORM;
} if (err_status & HECC_CANES_BE)
if (err_status & HECC_CANES_BE) {
hecc_set_bit(priv, HECC_CANES, HECC_CANES_BE);
cf->data[2] |= CAN_ERR_PROT_BIT; cf->data[2] |= CAN_ERR_PROT_BIT;
} if (err_status & HECC_CANES_SE)
if (err_status & HECC_CANES_SE) {
hecc_set_bit(priv, HECC_CANES, HECC_CANES_SE);
cf->data[2] |= CAN_ERR_PROT_STUFF; cf->data[2] |= CAN_ERR_PROT_STUFF;
} if (err_status & HECC_CANES_CRCE)
if (err_status & HECC_CANES_CRCE) {
hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE);
cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
} if (err_status & HECC_CANES_ACKE)
if (err_status & HECC_CANES_ACKE) {
hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE);
cf->data[3] = CAN_ERR_PROT_LOC_ACK; cf->data[3] = CAN_ERR_PROT_LOC_ACK;
}
timestamp = hecc_read(priv, HECC_CANLNT);
err = can_rx_offload_queue_sorted(&priv->offload, skb,
timestamp);
if (err)
ndev->stats.rx_fifo_errors++;
}
hecc_write(priv, HECC_CANES, HECC_CANES_FLAGS);
return 0;
}
static void ti_hecc_change_state(struct net_device *ndev,
enum can_state rx_state,
enum can_state tx_state)
{
struct ti_hecc_priv *priv = netdev_priv(ndev);
struct can_frame *cf;
struct sk_buff *skb;
u32 timestamp;
int err;
skb = alloc_can_err_skb(priv->ndev, &cf);
if (unlikely(!skb)) {
priv->can.state = max(tx_state, rx_state);
return;
}
can_change_state(priv->ndev, cf, tx_state, rx_state);
if (max(tx_state, rx_state) != CAN_STATE_BUS_OFF) {
cf->data[6] = hecc_read(priv, HECC_CANTEC);
cf->data[7] = hecc_read(priv, HECC_CANREC);
} }
timestamp = hecc_read(priv, HECC_CANLNT); timestamp = hecc_read(priv, HECC_CANLNT);
can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
if (err)
return 0; ndev->stats.rx_fifo_errors++;
} }
static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id) static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
@ -651,6 +666,7 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
struct net_device_stats *stats = &ndev->stats; struct net_device_stats *stats = &ndev->stats;
u32 mbxno, mbx_mask, int_status, err_status, stamp; u32 mbxno, mbx_mask, int_status, err_status, stamp;
unsigned long flags, rx_pending; unsigned long flags, rx_pending;
u32 handled = 0;
int_status = hecc_read(priv, int_status = hecc_read(priv,
priv->use_hecc1int ? priv->use_hecc1int ?
@ -660,17 +676,66 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
return IRQ_NONE; return IRQ_NONE;
err_status = hecc_read(priv, HECC_CANES); err_status = hecc_read(priv, HECC_CANES);
if (err_status & (HECC_BUS_ERROR | HECC_CANES_BO | if (unlikely(err_status & HECC_CANES_FLAGS))
HECC_CANES_EP | HECC_CANES_EW))
ti_hecc_error(ndev, int_status, err_status); ti_hecc_error(ndev, int_status, err_status);
if (unlikely(int_status & HECC_CANGIM_DEF_MASK)) {
enum can_state rx_state, tx_state;
u32 rec = hecc_read(priv, HECC_CANREC);
u32 tec = hecc_read(priv, HECC_CANTEC);
if (int_status & HECC_CANGIF_WLIF) {
handled |= HECC_CANGIF_WLIF;
rx_state = rec >= tec ? CAN_STATE_ERROR_WARNING : 0;
tx_state = rec <= tec ? CAN_STATE_ERROR_WARNING : 0;
netdev_dbg(priv->ndev, "Error Warning interrupt\n");
ti_hecc_change_state(ndev, rx_state, tx_state);
}
if (int_status & HECC_CANGIF_EPIF) {
handled |= HECC_CANGIF_EPIF;
rx_state = rec >= tec ? CAN_STATE_ERROR_PASSIVE : 0;
tx_state = rec <= tec ? CAN_STATE_ERROR_PASSIVE : 0;
netdev_dbg(priv->ndev, "Error passive interrupt\n");
ti_hecc_change_state(ndev, rx_state, tx_state);
}
if (int_status & HECC_CANGIF_BOIF) {
handled |= HECC_CANGIF_BOIF;
rx_state = CAN_STATE_BUS_OFF;
tx_state = CAN_STATE_BUS_OFF;
netdev_dbg(priv->ndev, "Bus off interrupt\n");
/* Disable all interrupts */
hecc_write(priv, HECC_CANGIM, 0);
can_bus_off(ndev);
ti_hecc_change_state(ndev, rx_state, tx_state);
}
} else if (unlikely(priv->can.state != CAN_STATE_ERROR_ACTIVE)) {
enum can_state new_state, tx_state, rx_state;
u32 rec = hecc_read(priv, HECC_CANREC);
u32 tec = hecc_read(priv, HECC_CANTEC);
if (rec >= 128 || tec >= 128)
new_state = CAN_STATE_ERROR_PASSIVE;
else if (rec >= 96 || tec >= 96)
new_state = CAN_STATE_ERROR_WARNING;
else
new_state = CAN_STATE_ERROR_ACTIVE;
if (new_state < priv->can.state) {
rx_state = rec >= tec ? new_state : 0;
tx_state = rec <= tec ? new_state : 0;
ti_hecc_change_state(ndev, rx_state, tx_state);
}
}
if (int_status & HECC_CANGIF_GMIF) { if (int_status & HECC_CANGIF_GMIF) {
while (priv->tx_tail - priv->tx_head > 0) { while (priv->tx_tail - priv->tx_head > 0) {
mbxno = get_tx_tail_mb(priv); mbxno = get_tx_tail_mb(priv);
mbx_mask = BIT(mbxno); mbx_mask = BIT(mbxno);
if (!(mbx_mask & hecc_read(priv, HECC_CANTA))) if (!(mbx_mask & hecc_read(priv, HECC_CANTA)))
break; break;
hecc_clear_bit(priv, HECC_CANMIM, mbx_mask);
hecc_write(priv, HECC_CANTA, mbx_mask); hecc_write(priv, HECC_CANTA, mbx_mask);
spin_lock_irqsave(&priv->mbx_lock, flags); spin_lock_irqsave(&priv->mbx_lock, flags);
hecc_clear_bit(priv, HECC_CANME, mbx_mask); hecc_clear_bit(priv, HECC_CANME, mbx_mask);
@ -695,16 +760,15 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
while ((rx_pending = hecc_read(priv, HECC_CANRMP))) { while ((rx_pending = hecc_read(priv, HECC_CANRMP))) {
can_rx_offload_irq_offload_timestamp(&priv->offload, can_rx_offload_irq_offload_timestamp(&priv->offload,
rx_pending); rx_pending);
hecc_write(priv, HECC_CANRMP, rx_pending);
} }
} }
/* clear all interrupt conditions - read back to avoid spurious ints */ /* clear all interrupt conditions - read back to avoid spurious ints */
if (priv->use_hecc1int) { if (priv->use_hecc1int) {
hecc_write(priv, HECC_CANGIF1, HECC_SET_REG); hecc_write(priv, HECC_CANGIF1, handled);
int_status = hecc_read(priv, HECC_CANGIF1); int_status = hecc_read(priv, HECC_CANGIF1);
} else { } else {
hecc_write(priv, HECC_CANGIF0, HECC_SET_REG); hecc_write(priv, HECC_CANGIF0, handled);
int_status = hecc_read(priv, HECC_CANGIF0); int_status = hecc_read(priv, HECC_CANGIF0);
} }
@ -877,7 +941,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
priv->offload.mailbox_read = ti_hecc_mailbox_read; priv->offload.mailbox_read = ti_hecc_mailbox_read;
priv->offload.mb_first = HECC_RX_FIRST_MBOX; priv->offload.mb_first = HECC_RX_FIRST_MBOX;
priv->offload.mb_last = HECC_MAX_TX_MBOX; priv->offload.mb_last = HECC_RX_LAST_MBOX;
err = can_rx_offload_add_timestamp(ndev, &priv->offload); err = can_rx_offload_add_timestamp(ndev, &priv->offload);
if (err) { if (err) {
dev_err(&pdev->dev, "can_rx_offload_add_timestamp() failed\n"); dev_err(&pdev->dev, "can_rx_offload_add_timestamp() failed\n");

View file

@ -623,6 +623,7 @@ static int gs_can_open(struct net_device *netdev)
rc); rc);
usb_unanchor_urb(urb); usb_unanchor_urb(urb);
usb_free_urb(urb);
break; break;
} }

View file

@ -876,9 +876,8 @@ static void mcba_usb_disconnect(struct usb_interface *intf)
netdev_info(priv->netdev, "device disconnected\n"); netdev_info(priv->netdev, "device disconnected\n");
unregister_candev(priv->netdev); unregister_candev(priv->netdev);
free_candev(priv->netdev);
mcba_urb_unlink(priv); mcba_urb_unlink(priv);
free_candev(priv->netdev);
} }
static struct usb_driver mcba_usb_driver = { static struct usb_driver mcba_usb_driver = {

View file

@ -100,7 +100,7 @@ struct pcan_usb_msg_context {
u8 *end; u8 *end;
u8 rec_cnt; u8 rec_cnt;
u8 rec_idx; u8 rec_idx;
u8 rec_data_idx; u8 rec_ts_idx;
struct net_device *netdev; struct net_device *netdev;
struct pcan_usb *pdev; struct pcan_usb *pdev;
}; };
@ -436,8 +436,8 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
} }
if ((n & PCAN_USB_ERROR_BUS_LIGHT) == 0) { if ((n & PCAN_USB_ERROR_BUS_LIGHT) == 0) {
/* no error (back to active state) */ /* no error (back to active state) */
mc->pdev->dev.can.state = CAN_STATE_ERROR_ACTIVE; new_state = CAN_STATE_ERROR_ACTIVE;
return 0; break;
} }
break; break;
@ -460,9 +460,9 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
} }
if ((n & PCAN_USB_ERROR_BUS_HEAVY) == 0) { if ((n & PCAN_USB_ERROR_BUS_HEAVY) == 0) {
/* no error (back to active state) */ /* no error (back to warning state) */
mc->pdev->dev.can.state = CAN_STATE_ERROR_ACTIVE; new_state = CAN_STATE_ERROR_WARNING;
return 0; break;
} }
break; break;
@ -501,6 +501,11 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
mc->pdev->dev.can.can_stats.error_warning++; mc->pdev->dev.can.can_stats.error_warning++;
break; break;
case CAN_STATE_ERROR_ACTIVE:
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_ACTIVE;
break;
default: default:
/* CAN_STATE_MAX (trick to handle other errors) */ /* CAN_STATE_MAX (trick to handle other errors) */
cf->can_id |= CAN_ERR_CRTL; cf->can_id |= CAN_ERR_CRTL;
@ -547,10 +552,15 @@ static int pcan_usb_decode_status(struct pcan_usb_msg_context *mc,
mc->ptr += PCAN_USB_CMD_ARGS; mc->ptr += PCAN_USB_CMD_ARGS;
if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) { if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) {
int err = pcan_usb_decode_ts(mc, !mc->rec_idx); int err = pcan_usb_decode_ts(mc, !mc->rec_ts_idx);
if (err) if (err)
return err; return err;
/* Next packet in the buffer will have a timestamp on a single
* byte
*/
mc->rec_ts_idx++;
} }
switch (f) { switch (f) {
@ -632,10 +642,13 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
cf->can_dlc = get_can_dlc(rec_len); cf->can_dlc = get_can_dlc(rec_len);
/* first data packet timestamp is a word */ /* Only first packet timestamp is a word */
if (pcan_usb_decode_ts(mc, !mc->rec_data_idx)) if (pcan_usb_decode_ts(mc, !mc->rec_ts_idx))
goto decode_failed; goto decode_failed;
/* Next packet in the buffer will have a timestamp on a single byte */
mc->rec_ts_idx++;
/* read data */ /* read data */
memset(cf->data, 0x0, sizeof(cf->data)); memset(cf->data, 0x0, sizeof(cf->data));
if (status_len & PCAN_USB_STATUSLEN_RTR) { if (status_len & PCAN_USB_STATUSLEN_RTR) {
@ -688,7 +701,6 @@ static int pcan_usb_decode_msg(struct peak_usb_device *dev, u8 *ibuf, u32 lbuf)
/* handle normal can frames here */ /* handle normal can frames here */
} else { } else {
err = pcan_usb_decode_data(&mc, sl); err = pcan_usb_decode_data(&mc, sl);
mc.rec_data_idx++;
} }
} }

View file

@ -750,7 +750,7 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
dev = netdev_priv(netdev); dev = netdev_priv(netdev);
/* allocate a buffer large enough to send commands */ /* allocate a buffer large enough to send commands */
dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL); dev->cmd_buf = kzalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL);
if (!dev->cmd_buf) { if (!dev->cmd_buf) {
err = -ENOMEM; err = -ENOMEM;
goto lbl_free_candev; goto lbl_free_candev;

View file

@ -996,9 +996,8 @@ static void usb_8dev_disconnect(struct usb_interface *intf)
netdev_info(priv->netdev, "device disconnected\n"); netdev_info(priv->netdev, "device disconnected\n");
unregister_netdev(priv->netdev); unregister_netdev(priv->netdev);
free_candev(priv->netdev);
unlink_all_urbs(priv); unlink_all_urbs(priv);
free_candev(priv->netdev);
} }
} }

View file

@ -1599,7 +1599,6 @@ static const struct xcan_devtype_data xcan_zynq_data = {
static const struct xcan_devtype_data xcan_axi_data = { static const struct xcan_devtype_data xcan_axi_data = {
.cantype = XAXI_CAN, .cantype = XAXI_CAN,
.flags = XCAN_FLAG_TXFEMP,
.bittiming_const = &xcan_bittiming_const, .bittiming_const = &xcan_bittiming_const,
.btr_ts2_shift = XCAN_BTR_TS2_SHIFT, .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
.btr_sjw_shift = XCAN_BTR_SJW_SHIFT, .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,

View file

@ -1236,10 +1236,10 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
priv->wol_ports_mask = 0; priv->wol_ports_mask = 0;
/* Disable interrupts */
bcm_sf2_intr_disable(priv);
dsa_unregister_switch(priv->dev->ds); dsa_unregister_switch(priv->dev->ds);
bcm_sf2_cfp_exit(priv->dev->ds); bcm_sf2_cfp_exit(priv->dev->ds);
/* Disable all ports and interrupts */
bcm_sf2_sw_suspend(priv->dev->ds);
bcm_sf2_mdio_unregister(priv); bcm_sf2_mdio_unregister(priv);
if (priv->type == BCM7278_DEVICE_ID && !IS_ERR(priv->rcdev)) if (priv->type == BCM7278_DEVICE_ID && !IS_ERR(priv->rcdev))
reset_control_assert(priv->rcdev); reset_control_assert(priv->rcdev);

View file

@ -1996,8 +1996,6 @@ static void reset_umac(struct bcmgenet_priv *priv)
/* issue soft reset with (rg)mii loopback to ensure a stable rxclk */ /* issue soft reset with (rg)mii loopback to ensure a stable rxclk */
bcmgenet_umac_writel(priv, CMD_SW_RESET | CMD_LCL_LOOP_EN, UMAC_CMD); bcmgenet_umac_writel(priv, CMD_SW_RESET | CMD_LCL_LOOP_EN, UMAC_CMD);
udelay(2);
bcmgenet_umac_writel(priv, 0, UMAC_CMD);
} }
static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
@ -2614,8 +2612,10 @@ static void bcmgenet_irq_task(struct work_struct *work)
spin_unlock_irq(&priv->lock); spin_unlock_irq(&priv->lock);
if (status & UMAC_IRQ_PHY_DET_R && if (status & UMAC_IRQ_PHY_DET_R &&
priv->dev->phydev->autoneg != AUTONEG_ENABLE) priv->dev->phydev->autoneg != AUTONEG_ENABLE) {
phy_init_hw(priv->dev->phydev); phy_init_hw(priv->dev->phydev);
genphy_config_aneg(priv->dev->phydev);
}
/* Link UP/DOWN event */ /* Link UP/DOWN event */
if (status & UMAC_IRQ_LINK_EVENT) if (status & UMAC_IRQ_LINK_EVENT)
@ -2879,12 +2879,6 @@ static int bcmgenet_open(struct net_device *dev)
if (priv->internal_phy) if (priv->internal_phy)
bcmgenet_power_up(priv, GENET_POWER_PASSIVE); bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
ret = bcmgenet_mii_connect(dev);
if (ret) {
netdev_err(dev, "failed to connect to PHY\n");
goto err_clk_disable;
}
/* take MAC out of reset */ /* take MAC out of reset */
bcmgenet_umac_reset(priv); bcmgenet_umac_reset(priv);
@ -2894,12 +2888,6 @@ static int bcmgenet_open(struct net_device *dev)
reg = bcmgenet_umac_readl(priv, UMAC_CMD); reg = bcmgenet_umac_readl(priv, UMAC_CMD);
priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
ret = bcmgenet_mii_config(dev, true);
if (ret) {
netdev_err(dev, "unsupported PHY\n");
goto err_disconnect_phy;
}
bcmgenet_set_hw_addr(priv, dev->dev_addr); bcmgenet_set_hw_addr(priv, dev->dev_addr);
if (priv->internal_phy) { if (priv->internal_phy) {
@ -2915,7 +2903,7 @@ static int bcmgenet_open(struct net_device *dev)
ret = bcmgenet_init_dma(priv); ret = bcmgenet_init_dma(priv);
if (ret) { if (ret) {
netdev_err(dev, "failed to initialize DMA\n"); netdev_err(dev, "failed to initialize DMA\n");
goto err_disconnect_phy; goto err_clk_disable;
} }
/* Always enable ring 16 - descriptor ring */ /* Always enable ring 16 - descriptor ring */
@ -2938,19 +2926,25 @@ static int bcmgenet_open(struct net_device *dev)
goto err_irq0; goto err_irq0;
} }
ret = bcmgenet_mii_probe(dev);
if (ret) {
netdev_err(dev, "failed to connect to PHY\n");
goto err_irq1;
}
bcmgenet_netif_start(dev); bcmgenet_netif_start(dev);
netif_tx_start_all_queues(dev); netif_tx_start_all_queues(dev);
return 0; return 0;
err_irq1:
free_irq(priv->irq1, priv);
err_irq0: err_irq0:
free_irq(priv->irq0, priv); free_irq(priv->irq0, priv);
err_fini_dma: err_fini_dma:
bcmgenet_dma_teardown(priv); bcmgenet_dma_teardown(priv);
bcmgenet_fini_dma(priv); bcmgenet_fini_dma(priv);
err_disconnect_phy:
phy_disconnect(dev->phydev);
err_clk_disable: err_clk_disable:
if (priv->internal_phy) if (priv->internal_phy)
bcmgenet_power_down(priv, GENET_POWER_PASSIVE); bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
@ -3635,8 +3629,6 @@ static int bcmgenet_resume(struct device *d)
if (priv->internal_phy) if (priv->internal_phy)
bcmgenet_power_up(priv, GENET_POWER_PASSIVE); bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
phy_init_hw(dev->phydev);
bcmgenet_umac_reset(priv); bcmgenet_umac_reset(priv);
init_umac(priv); init_umac(priv);
@ -3645,7 +3637,10 @@ static int bcmgenet_resume(struct device *d)
if (priv->wolopts) if (priv->wolopts)
clk_disable_unprepare(priv->clk_wol); clk_disable_unprepare(priv->clk_wol);
phy_init_hw(dev->phydev);
/* Speed settings must be restored */ /* Speed settings must be restored */
genphy_config_aneg(dev->phydev);
bcmgenet_mii_config(priv->dev, false); bcmgenet_mii_config(priv->dev, false);
bcmgenet_set_hw_addr(priv, dev->dev_addr); bcmgenet_set_hw_addr(priv, dev->dev_addr);

View file

@ -720,8 +720,8 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
/* MDIO routines */ /* MDIO routines */
int bcmgenet_mii_init(struct net_device *dev); int bcmgenet_mii_init(struct net_device *dev);
int bcmgenet_mii_connect(struct net_device *dev);
int bcmgenet_mii_config(struct net_device *dev, bool init); int bcmgenet_mii_config(struct net_device *dev, bool init);
int bcmgenet_mii_probe(struct net_device *dev);
void bcmgenet_mii_exit(struct net_device *dev); void bcmgenet_mii_exit(struct net_device *dev);
void bcmgenet_phy_power_set(struct net_device *dev, bool enable); void bcmgenet_phy_power_set(struct net_device *dev, bool enable);
void bcmgenet_mii_setup(struct net_device *dev); void bcmgenet_mii_setup(struct net_device *dev);

View file

@ -173,46 +173,6 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
bcmgenet_fixed_phy_link_update); bcmgenet_fixed_phy_link_update);
} }
int bcmgenet_mii_connect(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device_node *dn = priv->pdev->dev.of_node;
struct phy_device *phydev;
u32 phy_flags = 0;
int ret;
/* Communicate the integrated PHY revision */
if (priv->internal_phy)
phy_flags = priv->gphy_rev;
/* Initialize link state variables that bcmgenet_mii_setup() uses */
priv->old_link = -1;
priv->old_speed = -1;
priv->old_duplex = -1;
priv->old_pause = -1;
if (dn) {
phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
phy_flags, priv->phy_interface);
if (!phydev) {
pr_err("could not attach to PHY\n");
return -ENODEV;
}
} else {
phydev = dev->phydev;
phydev->dev_flags = phy_flags;
ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
priv->phy_interface);
if (ret) {
pr_err("could not attach to PHY\n");
return -ENODEV;
}
}
return 0;
}
int bcmgenet_mii_config(struct net_device *dev, bool init) int bcmgenet_mii_config(struct net_device *dev, bool init)
{ {
struct bcmgenet_priv *priv = netdev_priv(dev); struct bcmgenet_priv *priv = netdev_priv(dev);
@ -221,8 +181,38 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
const char *phy_name = NULL; const char *phy_name = NULL;
u32 id_mode_dis = 0; u32 id_mode_dis = 0;
u32 port_ctrl; u32 port_ctrl;
int bmcr = -1;
int ret;
u32 reg; u32 reg;
/* MAC clocking workaround during reset of umac state machines */
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
if (reg & CMD_SW_RESET) {
/* An MII PHY must be isolated to prevent TXC contention */
if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
ret = phy_read(phydev, MII_BMCR);
if (ret >= 0) {
bmcr = ret;
ret = phy_write(phydev, MII_BMCR,
bmcr | BMCR_ISOLATE);
}
if (ret) {
netdev_err(dev, "failed to isolate PHY\n");
return ret;
}
}
/* Switch MAC clocking to RGMII generated clock */
bcmgenet_sys_writel(priv, PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
/* Ensure 5 clks with Rx disabled
* followed by 5 clks with Reset asserted
*/
udelay(4);
reg &= ~(CMD_SW_RESET | CMD_LCL_LOOP_EN);
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
/* Ensure 5 more clocks before Rx is enabled */
udelay(2);
}
priv->ext_phy = !priv->internal_phy && priv->ext_phy = !priv->internal_phy &&
(priv->phy_interface != PHY_INTERFACE_MODE_MOCA); (priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
@ -254,6 +244,9 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
phy_set_max_speed(phydev, SPEED_100); phy_set_max_speed(phydev, SPEED_100);
bcmgenet_sys_writel(priv, bcmgenet_sys_writel(priv,
PORT_MODE_EXT_EPHY, SYS_PORT_CTRL); PORT_MODE_EXT_EPHY, SYS_PORT_CTRL);
/* Restore the MII PHY after isolation */
if (bmcr >= 0)
phy_write(phydev, MII_BMCR, bmcr);
break; break;
case PHY_INTERFACE_MODE_REVMII: case PHY_INTERFACE_MODE_REVMII:
@ -306,21 +299,71 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
} }
if (init) { if (init)
linkmode_copy(phydev->advertising, phydev->supported);
/* The internal PHY has its link interrupts routed to the
* Ethernet MAC ISRs. On GENETv5 there is a hardware issue
* that prevents the signaling of link UP interrupts when
* the link operates at 10Mbps, so fallback to polling for
* those versions of GENET.
*/
if (priv->internal_phy && !GENET_IS_V5(priv))
phydev->irq = PHY_IGNORE_INTERRUPT;
dev_info(kdev, "configuring instance for %s\n", phy_name); dev_info(kdev, "configuring instance for %s\n", phy_name);
return 0;
}
int bcmgenet_mii_probe(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device_node *dn = priv->pdev->dev.of_node;
struct phy_device *phydev;
u32 phy_flags = 0;
int ret;
/* Communicate the integrated PHY revision */
if (priv->internal_phy)
phy_flags = priv->gphy_rev;
/* Initialize link state variables that bcmgenet_mii_setup() uses */
priv->old_link = -1;
priv->old_speed = -1;
priv->old_duplex = -1;
priv->old_pause = -1;
if (dn) {
phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
phy_flags, priv->phy_interface);
if (!phydev) {
pr_err("could not attach to PHY\n");
return -ENODEV;
}
} else {
phydev = dev->phydev;
phydev->dev_flags = phy_flags;
ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
priv->phy_interface);
if (ret) {
pr_err("could not attach to PHY\n");
return -ENODEV;
}
} }
/* Configure port multiplexer based on what the probed PHY device since
* reading the 'max-speed' property determines the maximum supported
* PHY speed which is needed for bcmgenet_mii_config() to configure
* things appropriately.
*/
ret = bcmgenet_mii_config(dev, true);
if (ret) {
phy_disconnect(dev->phydev);
return ret;
}
linkmode_copy(phydev->advertising, phydev->supported);
/* The internal PHY has its link interrupts routed to the
* Ethernet MAC ISRs. On GENETv5 there is a hardware issue
* that prevents the signaling of link UP interrupts when
* the link operates at 10Mbps, so fallback to polling for
* those versions of GENET.
*/
if (priv->internal_phy && !GENET_IS_V5(priv))
dev->phydev->irq = PHY_IGNORE_INTERRUPT;
return 0; return 0;
} }

View file

@ -1499,7 +1499,7 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
netdev->ethtool_ops = &octeon_mgmt_ethtool_ops; netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM; netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM;
netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM; netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM - VLAN_HLEN;
mac = of_get_mac_address(pdev->dev.of_node); mac = of_get_mac_address(pdev->dev.of_node);

View file

@ -3645,6 +3645,8 @@ fec_drv_remove(struct platform_device *pdev)
regulator_disable(fep->reg_phy); regulator_disable(fep->reg_phy);
pm_runtime_put(&pdev->dev); pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev); pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
if (of_phy_is_fixed_link(np)) if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np); of_phy_deregister_fixed_link(np);
of_node_put(fep->phy_node); of_node_put(fep->phy_node);

View file

@ -199,7 +199,6 @@ hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
ring->q = q; ring->q = q;
ring->flags = flags; ring->flags = flags;
spin_lock_init(&ring->lock);
ring->coal_param = q->handle->coal_param; ring->coal_param = q->handle->coal_param;
assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr); assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr);

View file

@ -274,9 +274,6 @@ struct hnae_ring {
/* statistic */ /* statistic */
struct ring_stats stats; struct ring_stats stats;
/* ring lock for poll one */
spinlock_t lock;
dma_addr_t desc_dma_addr; dma_addr_t desc_dma_addr;
u32 buf_size; /* size for hnae_desc->addr, preset by AE */ u32 buf_size; /* size for hnae_desc->addr, preset by AE */
u16 desc_num; /* total number of desc */ u16 desc_num; /* total number of desc */

View file

@ -943,15 +943,6 @@ static int is_valid_clean_head(struct hnae_ring *ring, int h)
return u > c ? (h > c && h <= u) : (h > c || h <= u); return u > c ? (h > c && h <= u) : (h > c || h <= u);
} }
/* netif_tx_lock will turn down the performance, set only when necessary */
#ifdef CONFIG_NET_POLL_CONTROLLER
#define NETIF_TX_LOCK(ring) spin_lock(&(ring)->lock)
#define NETIF_TX_UNLOCK(ring) spin_unlock(&(ring)->lock)
#else
#define NETIF_TX_LOCK(ring)
#define NETIF_TX_UNLOCK(ring)
#endif
/* reclaim all desc in one budget /* reclaim all desc in one budget
* return error or number of desc left * return error or number of desc left
*/ */
@ -965,21 +956,16 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
int head; int head;
int bytes, pkts; int bytes, pkts;
NETIF_TX_LOCK(ring);
head = readl_relaxed(ring->io_base + RCB_REG_HEAD); head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
rmb(); /* make sure head is ready before touch any data */ rmb(); /* make sure head is ready before touch any data */
if (is_ring_empty(ring) || head == ring->next_to_clean) { if (is_ring_empty(ring) || head == ring->next_to_clean)
NETIF_TX_UNLOCK(ring);
return 0; /* no data to poll */ return 0; /* no data to poll */
}
if (!is_valid_clean_head(ring, head)) { if (!is_valid_clean_head(ring, head)) {
netdev_err(ndev, "wrong head (%d, %d-%d)\n", head, netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
ring->next_to_use, ring->next_to_clean); ring->next_to_use, ring->next_to_clean);
ring->stats.io_err_cnt++; ring->stats.io_err_cnt++;
NETIF_TX_UNLOCK(ring);
return -EIO; return -EIO;
} }
@ -994,8 +980,6 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
ring->stats.tx_pkts += pkts; ring->stats.tx_pkts += pkts;
ring->stats.tx_bytes += bytes; ring->stats.tx_bytes += bytes;
NETIF_TX_UNLOCK(ring);
dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
netdev_tx_completed_queue(dev_queue, pkts, bytes); netdev_tx_completed_queue(dev_queue, pkts, bytes);
@ -1055,16 +1039,12 @@ static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
int head; int head;
int bytes, pkts; int bytes, pkts;
NETIF_TX_LOCK(ring);
head = ring->next_to_use; /* ntu :soft setted ring position*/ head = ring->next_to_use; /* ntu :soft setted ring position*/
bytes = 0; bytes = 0;
pkts = 0; pkts = 0;
while (head != ring->next_to_clean) while (head != ring->next_to_clean)
hns_nic_reclaim_one_desc(ring, &bytes, &pkts); hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
NETIF_TX_UNLOCK(ring);
dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
netdev_tx_reset_queue(dev_queue); netdev_tx_reset_queue(dev_queue);
} }

View file

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+ /* SPDX-License-Identifier: GPL-2.0+ */
// Copyright (c) 2016-2017 Hisilicon Limited. // Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HNAE3_H #ifndef __HNAE3_H

View file

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+ /* SPDX-License-Identifier: GPL-2.0+ */
// Copyright (c) 2016-2017 Hisilicon Limited. // Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HNS3_ENET_H #ifndef __HNS3_ENET_H

View file

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+ /* SPDX-License-Identifier: GPL-2.0+ */
// Copyright (c) 2016-2017 Hisilicon Limited. // Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HCLGE_CMD_H #ifndef __HCLGE_CMD_H

View file

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+ /* SPDX-License-Identifier: GPL-2.0+ */
// Copyright (c) 2016-2017 Hisilicon Limited. // Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HCLGE_DCB_H__ #ifndef __HCLGE_DCB_H__

View file

@ -3679,12 +3679,28 @@ static int hclge_set_rst_done(struct hclge_dev *hdev)
{ {
struct hclge_pf_rst_done_cmd *req; struct hclge_pf_rst_done_cmd *req;
struct hclge_desc desc; struct hclge_desc desc;
int ret;
req = (struct hclge_pf_rst_done_cmd *)desc.data; req = (struct hclge_pf_rst_done_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT; req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
return hclge_cmd_send(&hdev->hw, &desc, 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1);
/* To be compatible with the old firmware, which does not support
* command HCLGE_OPC_PF_RST_DONE, just print a warning and
* return success
*/
if (ret == -EOPNOTSUPP) {
dev_warn(&hdev->pdev->dev,
"current firmware does not support command(0x%x)!\n",
HCLGE_OPC_PF_RST_DONE);
return 0;
} else if (ret) {
dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
ret);
}
return ret;
} }
static int hclge_reset_prepare_up(struct hclge_dev *hdev) static int hclge_reset_prepare_up(struct hclge_dev *hdev)

View file

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+ /* SPDX-License-Identifier: GPL-2.0+ */
// Copyright (c) 2016-2017 Hisilicon Limited. // Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HCLGE_MAIN_H #ifndef __HCLGE_MAIN_H

View file

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+ /* SPDX-License-Identifier: GPL-2.0+ */
// Copyright (c) 2016-2017 Hisilicon Limited. // Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HCLGE_MDIO_H #ifndef __HCLGE_MDIO_H

View file

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+ /* SPDX-License-Identifier: GPL-2.0+ */
// Copyright (c) 2016-2017 Hisilicon Limited. // Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HCLGE_TM_H #ifndef __HCLGE_TM_H

View file

@ -20,6 +20,8 @@
/* API version 1.7 implements additional link and PHY-specific APIs */ /* API version 1.7 implements additional link and PHY-specific APIs */
#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007 #define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
/* API version 1.9 for X722 implements additional link and PHY-specific APIs */
#define I40E_MINOR_VER_GET_LINK_INFO_X722 0x0009
/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */ /* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */
#define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006 #define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006

View file

@ -1896,7 +1896,8 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE &&
hw->mac.type != I40E_MAC_X722) {
__le32 tmp; __le32 tmp;
memcpy(&tmp, resp->link_type, sizeof(tmp)); memcpy(&tmp, resp->link_type, sizeof(tmp));

View file

@ -689,8 +689,6 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
i40e_xdp_ring_update_tail(xdp_ring); i40e_xdp_ring_update_tail(xdp_ring);
xsk_umem_consume_tx_done(xdp_ring->xsk_umem); xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
} }
return !!budget && work_done; return !!budget && work_done;
@ -769,12 +767,8 @@ bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi,
i40e_update_tx_stats(tx_ring, completed_frames, total_bytes); i40e_update_tx_stats(tx_ring, completed_frames, total_bytes);
out_xmit: out_xmit:
if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) { if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
if (tx_ring->next_to_clean == tx_ring->next_to_use) xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
else
xsk_clear_tx_need_wakeup(tx_ring->xsk_umem);
}
xmit_done = i40e_xmit_zc(tx_ring, budget); xmit_done = i40e_xmit_zc(tx_ring, budget);

View file

@ -314,7 +314,7 @@ iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
q_vector->ring_mask |= BIT(r_idx); q_vector->ring_mask |= BIT(r_idx);
wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx), wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
q_vector->rx.current_itr); q_vector->rx.current_itr >> 1);
q_vector->rx.current_itr = q_vector->rx.target_itr; q_vector->rx.current_itr = q_vector->rx.target_itr;
} }
@ -340,7 +340,7 @@ iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
q_vector->num_ringpairs++; q_vector->num_ringpairs++;
wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx), wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
q_vector->tx.target_itr); q_vector->tx.target_itr >> 1);
q_vector->tx.current_itr = q_vector->tx.target_itr; q_vector->tx.current_itr = q_vector->tx.target_itr;
} }

View file

@ -1204,7 +1204,7 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
struct ice_aqc_query_txsched_res_resp *buf; struct ice_aqc_query_txsched_res_resp *buf;
enum ice_status status = 0; enum ice_status status = 0;
__le16 max_sibl; __le16 max_sibl;
u8 i; u16 i;
if (hw->layer_info) if (hw->layer_info)
return status; return status;

View file

@ -5677,8 +5677,8 @@ static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
* should have been handled by the upper layers. * should have been handled by the upper layers.
*/ */
if (tx_ring->launchtime_enable) { if (tx_ring->launchtime_enable) {
ts = ns_to_timespec64(first->skb->tstamp); ts = ktime_to_timespec64(first->skb->tstamp);
first->skb->tstamp = 0; first->skb->tstamp = ktime_set(0, 0);
context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32); context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
} else { } else {
context_desc->seqnum_seed = 0; context_desc->seqnum_seed = 0;

View file

@ -862,8 +862,8 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
* should have been handled by the upper layers. * should have been handled by the upper layers.
*/ */
if (tx_ring->launchtime_enable) { if (tx_ring->launchtime_enable) {
ts = ns_to_timespec64(first->skb->tstamp); ts = ktime_to_timespec64(first->skb->tstamp);
first->skb->tstamp = 0; first->skb->tstamp = ktime_set(0, 0);
context_desc->launch_time = cpu_to_le32(ts.tv_nsec / 32); context_desc->launch_time = cpu_to_le32(ts.tv_nsec / 32);
} else { } else {
context_desc->launch_time = 0; context_desc->launch_time = 0;

View file

@ -622,8 +622,6 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
if (tx_desc) { if (tx_desc) {
ixgbe_xdp_ring_update_tail(xdp_ring); ixgbe_xdp_ring_update_tail(xdp_ring);
xsk_umem_consume_tx_done(xdp_ring->xsk_umem); xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
} }
return !!budget && work_done; return !!budget && work_done;
@ -691,12 +689,8 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
if (xsk_frames) if (xsk_frames)
xsk_umem_complete_tx(umem, xsk_frames); xsk_umem_complete_tx(umem, xsk_frames);
if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) { if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
if (tx_ring->next_to_clean == tx_ring->next_to_use) xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
else
xsk_clear_tx_need_wakeup(tx_ring->xsk_umem);
}
return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
} }

View file

@ -514,8 +514,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
/* /*
* Subtract 1 from the limit because we need to allocate a * Subtract 1 from the limit because we need to allocate a
* spare CQE so the HCA HW can tell the difference between an * spare CQE to enable resizing the CQ.
* empty CQ and a full CQ.
*/ */
dev->caps.max_cqes = dev_cap->max_cq_sz - 1; dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
dev->caps.reserved_cqs = dev_cap->reserved_cqs; dev->caps.reserved_cqs = dev_cap->reserved_cqs;

View file

@ -1079,7 +1079,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
MLX5_CAP_GEN(dev, max_flow_counter_15_0); MLX5_CAP_GEN(dev, max_flow_counter_15_0);
fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size); fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(2^%d))\n", esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(%d))\n",
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size), MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
max_flow_counter, ESW_OFFLOADS_NUM_GROUPS, max_flow_counter, ESW_OFFLOADS_NUM_GROUPS,
fdb_max); fdb_max);

Some files were not shown because too many files have changed in this diff Show more