diff --git a/config/kernel/linux-mvebu64-default.config b/config/kernel/linux-mvebu64-default.config index eb78bdc62..50b9f81e7 100644 --- a/config/kernel/linux-mvebu64-default.config +++ b/config/kernel/linux-mvebu64-default.config @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm64 4.4.52 Kernel Configuration +# Linux/arm64 4.4.71 Kernel Configuration # CONFIG_ARM64=y CONFIG_64BIT=y @@ -50,7 +50,9 @@ CONFIG_FHANDLE=y CONFIG_USELIB=y CONFIG_AUDIT=y CONFIG_HAVE_ARCH_AUDITSYSCALL=y -# CONFIG_AUDITSYSCALL is not set +CONFIG_AUDITSYSCALL=y +CONFIG_AUDIT_WATCH=y +CONFIG_AUDIT_TREE=y # # IRQ subsystem @@ -115,11 +117,12 @@ CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 CONFIG_GENERIC_SCHED_CLOCK=y CONFIG_CGROUPS=y # CONFIG_CGROUP_DEBUG is not set -# CONFIG_CGROUP_FREEZER is not set -# CONFIG_CGROUP_PIDS is not set -# CONFIG_CGROUP_DEVICE is not set -# CONFIG_CPUSETS is not set -# CONFIG_CGROUP_CPUACCT is not set +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_CPUACCT=y CONFIG_PAGE_COUNTER=y CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y @@ -129,19 +132,19 @@ CONFIG_CGROUP_HUGETLB=y # CONFIG_CGROUP_PERF is not set CONFIG_CGROUP_SCHED=y CONFIG_FAIR_GROUP_SCHED=y -# CONFIG_CFS_BANDWIDTH is not set -# CONFIG_RT_GROUP_SCHED is not set +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y # CONFIG_BLK_CGROUP is not set # CONFIG_CHECKPOINT_RESTORE is not set CONFIG_NAMESPACES=y -# CONFIG_UTS_NS is not set -# CONFIG_IPC_NS is not set -# CONFIG_USER_NS is not set +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y CONFIG_PID_NS=y -# CONFIG_NET_NS is not set +CONFIG_NET_NS=y CONFIG_SCHED_AUTOGROUP=y # CONFIG_SYSFS_DEPRECATED is not set -# CONFIG_RELAY is not set +CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y CONFIG_INITRAMFS_SOURCE="" CONFIG_RD_GZIP=y @@ -156,12 +159,12 @@ CONFIG_ANON_INODES=y CONFIG_HAVE_UID16=y CONFIG_SYSCTL_EXCEPTION_TRACE=y CONFIG_BPF=y -# CONFIG_EXPERT is not set +CONFIG_EXPERT=y CONFIG_UID16=y CONFIG_MULTIUSER=y -# CONFIG_SGETMASK_SYSCALL is not set +CONFIG_SGETMASK_SYSCALL=y CONFIG_SYSFS_SYSCALL=y -# CONFIG_SYSCTL_SYSCALL is not set +CONFIG_SYSCTL_SYSCALL=y CONFIG_KALLSYMS=y CONFIG_KALLSYMS_ALL=y CONFIG_PRINTK=y @@ -173,14 +176,14 @@ CONFIG_EPOLL=y CONFIG_SIGNALFD=y CONFIG_TIMERFD=y CONFIG_EVENTFD=y -# CONFIG_BPF_SYSCALL is not set +CONFIG_BPF_SYSCALL=y CONFIG_SHMEM=y CONFIG_AIO=y CONFIG_ADVISE_SYSCALLS=y -# CONFIG_USERFAULTFD is not set +CONFIG_USERFAULTFD=y CONFIG_PCI_QUIRKS=y CONFIG_MEMBARRIER=y -# CONFIG_EMBEDDED is not set +CONFIG_EMBEDDED=y CONFIG_HAVE_PERF_EVENTS=y CONFIG_PERF_USE_VMALLOC=y @@ -194,6 +197,7 @@ CONFIG_SLUB_DEBUG=y # CONFIG_COMPAT_BRK is not set # CONFIG_SLAB is not set CONFIG_SLUB=y +# CONFIG_SLOB is not set CONFIG_SLUB_CPU_PARTIAL=y # CONFIG_SYSTEM_DATA_VERIFICATION is not set CONFIG_PROFILING=y @@ -252,10 +256,10 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_MODULE_COMPRESS is not set CONFIG_MODULES_TREE_LOOKUP=y CONFIG_BLOCK=y -# CONFIG_BLK_DEV_BSG is not set -# CONFIG_BLK_DEV_BSGLIB is not set -# CONFIG_BLK_DEV_INTEGRITY is not set -# CONFIG_BLK_CMDLINE_PARSER is not set +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_CMDLINE_PARSER=y # # Partition Types @@ -269,8 +273,9 @@ CONFIG_BLOCK_COMPAT=y # IO Schedulers # CONFIG_IOSCHED_NOOP=y -# CONFIG_IOSCHED_DEADLINE is not set +CONFIG_IOSCHED_DEADLINE=y CONFIG_IOSCHED_CFQ=y +# CONFIG_DEFAULT_DEADLINE is not set CONFIG_DEFAULT_CFQ=y # CONFIG_DEFAULT_NOOP is not set CONFIG_DEFAULT_IOSCHED="cfq" @@ -428,9 +433,11 @@ CONFIG_CMA=y # CONFIG_CMA_DEBUG is not set # CONFIG_CMA_DEBUGFS is not set CONFIG_CMA_AREAS=7 -# CONFIG_ZPOOL is not set -# CONFIG_ZBUD is not set -# CONFIG_ZSMALLOC is not set +CONFIG_ZPOOL=y +CONFIG_ZBUD=m +CONFIG_ZSMALLOC=m +CONFIG_PGTABLE_MAPPING=y +# CONFIG_ZSMALLOC_STAT is not set CONFIG_GENERIC_EARLY_IOREMAP=y # CONFIG_IDLE_PAGE_TRACKING is not set # CONFIG_SECCOMP is not set @@ -472,6 +479,7 @@ CONFIG_SYSVIPC_COMPAT=y # CONFIG_SUSPEND=y CONFIG_SUSPEND_FREEZER=y +# CONFIG_SUSPEND_SKIP_SYNC is not set CONFIG_PM_SLEEP=y CONFIG_PM_SLEEP_SMP=y # CONFIG_PM_AUTOSLEEP is not set @@ -508,17 +516,17 @@ CONFIG_ARM_CPUIDLE=y CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_GOV_COMMON=y CONFIG_CPU_FREQ_STAT=y -# CONFIG_CPU_FREQ_STAT_DETAILS is not set -CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_STAT_DETAILS=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y # CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set CONFIG_CPU_FREQ_GOV_PERFORMANCE=y -# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set +CONFIG_CPU_FREQ_GOV_POWERSAVE=y CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_GOV_ONDEMAND=y -# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y # # CPU frequency scaling drivers @@ -528,102 +536,622 @@ CONFIG_CPUFREQ_DT=y # CONFIG_ARM_KIRKWOOD_CPUFREQ is not set CONFIG_ARM_ARMADA3700_CPUFREQ=y CONFIG_ARM_ARMADA_8K_CPUFREQ=y +CONFIG_ACPI_CPPC_CPUFREQ=m CONFIG_NET=y +CONFIG_COMPAT_NETLINK_MESSAGES=y +CONFIG_NET_INGRESS=y # # Networking options # CONFIG_PACKET=y -# CONFIG_PACKET_DIAG is not set +CONFIG_PACKET_DIAG=m CONFIG_UNIX=y -# CONFIG_UNIX_DIAG is not set +CONFIG_UNIX_DIAG=m CONFIG_XFRM=y -# CONFIG_XFRM_USER is not set -# CONFIG_XFRM_SUB_POLICY is not set -# CONFIG_XFRM_MIGRATE is not set -# CONFIG_XFRM_STATISTICS is not set -# CONFIG_NET_KEY is not set +CONFIG_XFRM_ALGO=m +CONFIG_XFRM_USER=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y CONFIG_INET=y -# CONFIG_IP_MULTICAST is not set -# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y -# CONFIG_IP_PNP_RARP is not set -# CONFIG_NET_IPIP is not set -# CONFIG_NET_IPGRE_DEMUX is not set -# CONFIG_NET_IP_TUNNEL is not set -# CONFIG_SYN_COOKIES is not set -# CONFIG_NET_IPVTI is not set -# CONFIG_NET_UDP_TUNNEL is not set -# CONFIG_NET_FOU is not set -# CONFIG_INET_AH is not set -# CONFIG_INET_ESP is not set -# CONFIG_INET_IPCOMP is not set -# CONFIG_INET_XFRM_TUNNEL is not set -# CONFIG_INET_TUNNEL is not set -CONFIG_INET_XFRM_MODE_TRANSPORT=y -CONFIG_INET_XFRM_MODE_TUNNEL=y -CONFIG_INET_XFRM_MODE_BEET=y -# CONFIG_INET_LRO is not set -CONFIG_INET_DIAG=y -CONFIG_INET_TCP_DIAG=y -# CONFIG_INET_UDP_DIAG is not set -# CONFIG_TCP_CONG_ADVANCED is not set -CONFIG_TCP_CONG_CUBIC=y -CONFIG_DEFAULT_TCP_CONG="cubic" -# CONFIG_TCP_MD5SIG is not set -# CONFIG_IPV6 is not set +CONFIG_IP_PNP_RARP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +CONFIG_NET_FOU=m +CONFIG_NET_FOU_IP_TUNNELS=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_XFRM_MODE_TRANSPORT=m +CONFIG_INET_XFRM_MODE_TUNNEL=m +CONFIG_INET_XFRM_MODE_BEET=m +CONFIG_INET_LRO=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=m +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m +CONFIG_DEFAULT_RENO=y +CONFIG_DEFAULT_TCP_CONG="reno" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_INET6_XFRM_MODE_TRANSPORT=m +CONFIG_INET6_XFRM_MODE_TUNNEL=m +CONFIG_INET6_XFRM_MODE_BEET=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y # CONFIG_NETLABEL is not set -# CONFIG_NETWORK_SECMARK is not set +CONFIG_NETWORK_SECMARK=y CONFIG_NET_PTP_CLASSIFY=y -# CONFIG_NETWORK_PHY_TIMESTAMPING is not set -# CONFIG_NETFILTER is not set -# CONFIG_IP_DCCP is not set -# CONFIG_IP_SCTP is not set -# CONFIG_RDS is not set -# CONFIG_TIPC is not set -# CONFIG_ATM is not set -# CONFIG_L2TP is not set -CONFIG_STP=y -CONFIG_BRIDGE=y -CONFIG_BRIDGE_IGMP_SNOOPING=y -# CONFIG_BRIDGE_VLAN_FILTERING is not set +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_COMMON=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CT_PROTO_DCCP=m +CONFIG_NF_CT_PROTO_GRE=m +CONFIG_NF_CT_PROTO_SCTP=m +CONFIG_NF_CT_PROTO_UDPLITE=m +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_NEEDED=y +CONFIG_NF_NAT_PROTO_DCCP=m +CONFIG_NF_NAT_PROTO_UDPLITE=m +CONFIG_NF_NAT_PROTO_SCTP=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=m +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=m +CONFIG_NF_TABLES_NETDEV=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NETFILTER_XTABLES=m + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_CONNTRACK_PROC_COMPAT=y +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NF_TABLES_ARP=m +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_NAT_MASQUERADE_IPV4=m +CONFIG_NFT_MASQ_IPV4=m +CONFIG_NFT_REDIR_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PROTO_GRE=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +# CONFIG_IP_NF_SECURITY is not set +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_NF_NAT_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m +CONFIG_NF_NAT_MASQUERADE_IPV6=m +CONFIG_NFT_MASQ_IPV6=m +CONFIG_NFT_REDIR_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +# CONFIG_IP6_NF_SECURITY is not set +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m + +# +# DECnet: Netfilter Configuration +# +CONFIG_DECNET_NF_GRABULATOR=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_META=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_LOG_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_IP_DCCP=m +CONFIG_INET_DCCP_DIAG=m + +# +# DCCP CCIDs Configuration +# +# CONFIG_IP_DCCP_CCID2_DEBUG is not set +CONFIG_IP_DCCP_CCID3=y +# CONFIG_IP_DCCP_CCID3_DEBUG is not set +CONFIG_IP_DCCP_TFRC_LIB=y + +# +# DCCP Kernel Hacking +# +# CONFIG_IP_DCCP_DEBUG is not set +CONFIG_IP_SCTP=m +CONFIG_SCTP_DBG_OBJCNT=y +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1 is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_RDS=m +CONFIG_RDS_TCP=m +# CONFIG_RDS_DEBUG is not set +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +CONFIG_ATM_CLIP_NO_ICMP=y +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +CONFIG_ATM_BR2684=m +CONFIG_ATM_BR2684_IPFILTER=y +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m + CONFIG_HAVE_NET_DSA=y CONFIG_NET_DSA=y CONFIG_NET_DSA_TAG_EDSA=y -CONFIG_VLAN_8021Q=y -# CONFIG_VLAN_8021Q_GVRP is not set -# CONFIG_VLAN_8021Q_MVRP is not set -# CONFIG_DECNET is not set -CONFIG_LLC=y -# CONFIG_LLC2 is not set -# CONFIG_IPX is not set -# CONFIG_ATALK is not set -# CONFIG_X25 is not set -# CONFIG_LAPB is not set -# CONFIG_PHONET is not set -# CONFIG_IEEE802154 is not set -# CONFIG_NET_SCHED is not set -# CONFIG_DCB is not set +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_DECNET=m +CONFIG_DECNET_ROUTER=y +CONFIG_LLC=m +CONFIG_LLC2=m +CONFIG_IPX=m +CONFIG_IPX_INTERN=y +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_X25=m +CONFIG_LAPB=m +CONFIG_PHONET=m +CONFIG_6LOWPAN=m +CONFIG_6LOWPAN_NHC=m +CONFIG_6LOWPAN_NHC_DEST=m +CONFIG_6LOWPAN_NHC_FRAGMENT=m +CONFIG_6LOWPAN_NHC_HOP=m +CONFIG_6LOWPAN_NHC_IPV6=m +CONFIG_6LOWPAN_NHC_MOBILITY=m +CONFIG_6LOWPAN_NHC_ROUTING=m +CONFIG_6LOWPAN_NHC_UDP=m +CONFIG_IEEE802154=m +CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y +CONFIG_IEEE802154_SOCKET=m +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_ATM=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_CANID=m +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_CONNMARK=m +CONFIG_NET_CLS_IND=y +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y CONFIG_DNS_RESOLVER=y -# CONFIG_BATMAN_ADV is not set -CONFIG_OPENVSWITCH=y -# CONFIG_VSOCKETS is not set -# CONFIG_NETLINK_MMAP is not set -# CONFIG_NETLINK_DIAG is not set +CONFIG_BATMAN_ADV=m +CONFIG_BATMAN_ADV_BLA=y +CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_MCAST=y +# CONFIG_BATMAN_ADV_DEBUG is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_VSOCKETS=m +CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y -CONFIG_NET_MPLS_GSO=y -# CONFIG_MPLS_ROUTING is not set -# CONFIG_HSR is not set +CONFIG_NET_MPLS_GSO=m +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_HSR=m CONFIG_NET_SWITCHDEV=y -# CONFIG_NET_L3_MASTER_DEV is not set +CONFIG_NET_L3_MASTER_DEV=y CONFIG_RPS=y CONFIG_RFS_ACCEL=y CONFIG_XPS=y CONFIG_HWBM=y -# CONFIG_CGROUP_NET_PRIO is not set -# CONFIG_CGROUP_NET_CLASSID is not set +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y CONFIG_NET_RX_BUSY_POLL=y CONFIG_BQL=y CONFIG_BPF_JIT=y @@ -632,23 +1160,238 @@ CONFIG_NET_FLOW_LIMIT=y # # Network testing # -# CONFIG_NET_PKTGEN is not set +CONFIG_NET_PKTGEN=m # CONFIG_HAMRADIO is not set -# CONFIG_CAN is not set -# CONFIG_IRDA is not set -# CONFIG_BT is not set -# CONFIG_AF_RXRPC is not set -# CONFIG_WIRELESS is not set -# CONFIG_WIMAX is not set -# CONFIG_RFKILL is not set -# CONFIG_RFKILL_REGULATOR is not set -CONFIG_NET_9P=y -CONFIG_NET_9P_VIRTIO=y +CONFIG_CAN=m +CONFIG_CAN_RAW=m +CONFIG_CAN_BCM=m +CONFIG_CAN_GW=m + +# +# CAN Device Drivers +# +CONFIG_CAN_VCAN=m +CONFIG_CAN_SLCAN=m +CONFIG_CAN_DEV=m +CONFIG_CAN_CALC_BITTIMING=y +CONFIG_CAN_LEDS=y +CONFIG_CAN_GRCAN=m +CONFIG_CAN_XILINXCAN=m +CONFIG_CAN_SJA1000=m +CONFIG_CAN_SJA1000_ISA=m +CONFIG_CAN_SJA1000_PLATFORM=m +CONFIG_CAN_EMS_PCI=m +CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PEAK_PCIEC=y +CONFIG_CAN_KVASER_PCI=m +CONFIG_CAN_PLX_PCI=m +CONFIG_CAN_C_CAN=m +CONFIG_CAN_C_CAN_PLATFORM=m +CONFIG_CAN_C_CAN_PCI=m +CONFIG_CAN_M_CAN=m +CONFIG_CAN_CC770=m +CONFIG_CAN_CC770_ISA=m +CONFIG_CAN_CC770_PLATFORM=m + +# +# CAN SPI interfaces +# +CONFIG_CAN_MCP251X=m + +# +# CAN USB interfaces +# +CONFIG_CAN_EMS_USB=m +CONFIG_CAN_ESD_USB2=m +CONFIG_CAN_GS_USB=m +CONFIG_CAN_KVASER_USB=m +CONFIG_CAN_PEAK_USB=m +CONFIG_CAN_8DEV_USB=m +CONFIG_CAN_SOFTING=m +# CONFIG_CAN_DEBUG_DEVICES is not set +CONFIG_IRDA=m + +# +# IrDA protocols +# +CONFIG_IRLAN=m +CONFIG_IRNET=m +CONFIG_IRCOMM=m +CONFIG_IRDA_ULTRA=y + +# +# IrDA options +# +CONFIG_IRDA_CACHE_LAST_LSAP=y +CONFIG_IRDA_FAST_RR=y +# CONFIG_IRDA_DEBUG is not set + +# +# Infrared-port device drivers +# + +# +# SIR device drivers +# +CONFIG_IRTTY_SIR=m + +# +# Dongle support +# +CONFIG_DONGLE=y +CONFIG_ESI_DONGLE=m +CONFIG_ACTISYS_DONGLE=m +CONFIG_TEKRAM_DONGLE=m +CONFIG_TOIM3232_DONGLE=m +CONFIG_LITELINK_DONGLE=m +CONFIG_MA600_DONGLE=m +CONFIG_GIRBIL_DONGLE=m +CONFIG_MCP2120_DONGLE=m +CONFIG_OLD_BELKIN_DONGLE=m +CONFIG_ACT200L_DONGLE=m +CONFIG_KINGSUN_DONGLE=m +CONFIG_KSDAZZLE_DONGLE=m +CONFIG_KS959_DONGLE=m + +# +# FIR device drivers +# +CONFIG_USB_IRDA=m +CONFIG_SIGMATEL_FIR=m +CONFIG_VLSI_FIR=m +CONFIG_MCS_FIR=m +CONFIG_BT=m +CONFIG_BT_BREDR=y +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_HIDP=m +CONFIG_BT_HS=y +CONFIG_BT_LE=y +CONFIG_BT_6LOWPAN=m +# CONFIG_BT_SELFTEST is not set +# CONFIG_BT_DEBUGFS is not set + +# +# Bluetooth device drivers +# +CONFIG_BT_INTEL=m +CONFIG_BT_BCM=m +CONFIG_BT_RTL=m +CONFIG_BT_QCA=m +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIBTUSB_BCM=y +CONFIG_BT_HCIBTUSB_RTL=y +CONFIG_BT_HCIBTSDIO=m +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_H4=y +CONFIG_BT_HCIUART_BCSP=y +CONFIG_BT_HCIUART_ATH3K=y +CONFIG_BT_HCIUART_LL=y +CONFIG_BT_HCIUART_3WIRE=y +CONFIG_BT_HCIUART_INTEL=y +CONFIG_BT_HCIUART_BCM=y +CONFIG_BT_HCIUART_QCA=y +CONFIG_BT_HCIBCM203X=m +CONFIG_BT_HCIBPA10X=m +CONFIG_BT_HCIBFUSB=m +CONFIG_BT_HCIVHCI=m +CONFIG_BT_MRVL=m +CONFIG_BT_MRVL_SDIO=m +CONFIG_BT_ATH3K=m +CONFIG_AF_RXRPC=m +# CONFIG_AF_RXRPC_DEBUG is not set +CONFIG_RXKAD=m +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WIRELESS_EXT=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_WEXT_SPY=y +CONFIG_WEXT_PRIV=y +CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_REG_DEBUG is not set +CONFIG_CFG80211_CERTIFICATION_ONUS=y +CONFIG_CFG80211_REG_CELLULAR_HINTS=y +CONFIG_CFG80211_REG_RELAX_NO_IR=y +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +# CONFIG_CFG80211_INTERNAL_REGDB is not set +CONFIG_CFG80211_CRDA_SUPPORT=y +CONFIG_CFG80211_WEXT=y +CONFIG_CFG80211_WEXT_EXPORT=y +CONFIG_LIB80211=m +CONFIG_LIB80211_CRYPT_WEP=m +CONFIG_LIB80211_CRYPT_CCMP=m +CONFIG_LIB80211_CRYPT_TKIP=m +# CONFIG_LIB80211_DEBUG is not set +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_MINSTREL_HT=y +CONFIG_MAC80211_RC_MINSTREL_VHT=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +CONFIG_MAC80211_MESH=y +CONFIG_MAC80211_LEDS=y +# CONFIG_MAC80211_DEBUGFS is not set +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +CONFIG_WIMAX=m +CONFIG_WIMAX_DEBUG_LEVEL=8 +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +CONFIG_RFKILL_REGULATOR=m +CONFIG_RFKILL_GPIO=m +CONFIG_NET_9P=m +CONFIG_NET_9P_VIRTIO=m # CONFIG_NET_9P_DEBUG is not set # CONFIG_CAIF is not set -# CONFIG_CEPH_LIB is not set -# CONFIG_NFC is not set -# CONFIG_LWTUNNEL is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +CONFIG_NFC=m +CONFIG_NFC_DIGITAL=m +CONFIG_NFC_NCI=m +CONFIG_NFC_NCI_SPI=m +CONFIG_NFC_NCI_UART=m +CONFIG_NFC_HCI=m +CONFIG_NFC_SHDLC=y + +# +# Near Field Communication (NFC) devices +# +CONFIG_NFC_PN533=m +CONFIG_NFC_TRF7970A=m +CONFIG_NFC_SIM=m +CONFIG_NFC_PORT100=m +CONFIG_NFC_FDP=m +CONFIG_NFC_FDP_I2C=m +CONFIG_NFC_PN544=m +CONFIG_NFC_PN544_I2C=m +CONFIG_NFC_MICROREAD=m +CONFIG_NFC_MICROREAD_I2C=m +CONFIG_NFC_MRVL=m +CONFIG_NFC_MRVL_USB=m +CONFIG_NFC_MRVL_UART=m +CONFIG_NFC_MRVL_I2C=m +CONFIG_NFC_MRVL_SPI=m +CONFIG_NFC_ST21NFCA=m +CONFIG_NFC_ST21NFCA_I2C=m +CONFIG_NFC_ST_NCI=m +CONFIG_NFC_ST_NCI_I2C=m +CONFIG_NFC_ST_NCI_SPI=m +CONFIG_NFC_NXP_NCI=m +CONFIG_NFC_NXP_NCI_I2C=m +CONFIG_NFC_S3FWRN5=m +CONFIG_NFC_S3FWRN5_I2C=m +CONFIG_LWTUNNEL=y CONFIG_HAVE_BPF_JIT=y # @@ -670,7 +1413,9 @@ CONFIG_FW_LOADER=y CONFIG_FIRMWARE_IN_KERNEL=y CONFIG_EXTRA_FIRMWARE="" # CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set +CONFIG_WANT_DEV_COREDUMP=y CONFIG_ALLOW_DEV_COREDUMP=y +CONFIG_DEV_COREDUMP=y # CONFIG_DEBUG_DRIVER is not set # CONFIG_DEBUG_DEVRES is not set # CONFIG_SYS_HYPERVISOR is not set @@ -812,9 +1557,18 @@ CONFIG_OF_MTD=y CONFIG_OF_RESERVED_MEM=y # CONFIG_OF_OVERLAY is not set # CONFIG_PARPORT is not set +CONFIG_PNP=y +CONFIG_PNP_DEBUG_MESSAGES=y + +# +# Protocols +# +CONFIG_PNPACPI=y CONFIG_BLK_DEV=y # CONFIG_BLK_DEV_NULL_BLK is not set # CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +CONFIG_ZRAM=m +# CONFIG_ZRAM_LZ4_COMPRESS is not set # CONFIG_BLK_CPQ_CISS_DA is not set # CONFIG_BLK_DEV_DAC960 is not set # CONFIG_BLK_DEV_UMEM is not set @@ -825,6 +1579,7 @@ CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 # CONFIG_BLK_DEV_DRBD is not set # CONFIG_BLK_DEV_NBD is not set # CONFIG_BLK_DEV_SKD is not set +CONFIG_BLK_DEV_OSD=m # CONFIG_BLK_DEV_SX8 is not set CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=1 @@ -875,7 +1630,7 @@ CONFIG_EEPROM_AT24=y # CONFIG_EEPROM_AT25 is not set # CONFIG_EEPROM_LEGACY is not set # CONFIG_EEPROM_MAX6875 is not set -# CONFIG_EEPROM_93CX6 is not set +CONFIG_EEPROM_93CX6=m # CONFIG_EEPROM_93XX46 is not set # CONFIG_CB710_CORE is not set @@ -883,7 +1638,6 @@ CONFIG_EEPROM_AT24=y # Texas Instruments shared transport line discipline # # CONFIG_TI_ST is not set -# CONFIG_SENSORS_LIS3_SPI is not set # CONFIG_SENSORS_LIS3_I2C is not set # @@ -947,20 +1701,74 @@ CONFIG_BLK_DEV_SD=y # # SCSI Transports # -# CONFIG_SCSI_SPI_ATTRS is not set +CONFIG_SCSI_SPI_ATTRS=m # CONFIG_SCSI_FC_ATTRS is not set -# CONFIG_SCSI_ISCSI_ATTRS is not set -# CONFIG_SCSI_SAS_ATTRS is not set -# CONFIG_SCSI_SAS_LIBSAS is not set +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y # CONFIG_SCSI_SRP_ATTRS is not set -# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +# CONFIG_SCSI_CXGB3_ISCSI is not set +# CONFIG_SCSI_CXGB4_ISCSI is not set +# CONFIG_SCSI_BNX2_ISCSI is not set +# CONFIG_BE2ISCSI is not set +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +# CONFIG_SCSI_HPSA is not set +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +# CONFIG_SCSI_AACRAID is not set +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +CONFIG_SCSI_MVSAS=m +# CONFIG_SCSI_MVSAS_DEBUG is not set +CONFIG_SCSI_MVSAS_TASKLET=y +CONFIG_SCSI_MVUMI=m +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +# CONFIG_MEGARAID_SAS is not set +# CONFIG_SCSI_MPT3SAS is not set +# CONFIG_SCSI_MPT2SAS is not set +# CONFIG_SCSI_UFSHCD is not set +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FUTURE_DOMAIN is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +# CONFIG_SCSI_QLA_ISCSI is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +# CONFIG_SCSI_DEBUG is not set +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +CONFIG_SCSI_VIRTIO=m # CONFIG_SCSI_LOWLEVEL_PCMCIA is not set # CONFIG_SCSI_DH is not set -# CONFIG_SCSI_OSD_INITIATOR is not set +CONFIG_SCSI_OSD_INITIATOR=m +CONFIG_SCSI_OSD_ULD=m +CONFIG_SCSI_OSD_DPRINT_SENSE=1 +# CONFIG_SCSI_OSD_DEBUG is not set CONFIG_HAVE_PATA_PLATFORM=y CONFIG_ATA=y # CONFIG_ATA_NONSTANDARD is not set -CONFIG_ATA_VERBOSE_ERROR=y +# CONFIG_ATA_VERBOSE_ERROR is not set +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set CONFIG_SATA_PMP=y # @@ -1050,6 +1858,7 @@ CONFIG_PATA_OF_PLATFORM=y # # Generic fallback / legacy drivers # +# CONFIG_PATA_ACPI is not set # CONFIG_ATA_GENERIC is not set # CONFIG_PATA_LEGACY is not set CONFIG_MD=y @@ -1097,10 +1906,13 @@ CONFIG_BONDING=y # CONFIG_DUMMY is not set # CONFIG_EQUALIZER is not set # CONFIG_NET_FC is not set +CONFIG_IFB=m # CONFIG_NET_TEAM is not set CONFIG_MACVLAN=y CONFIG_MACVTAP=y +CONFIG_IPVLAN=m # CONFIG_VXLAN is not set +CONFIG_GENEVE=m # CONFIG_NETCONSOLE is not set # CONFIG_NETPOLL is not set # CONFIG_NET_POLL_CONTROLLER is not set @@ -1109,7 +1921,19 @@ CONFIG_TUN=y # CONFIG_VETH is not set CONFIG_VIRTIO_NET=y # CONFIG_NLMON is not set +CONFIG_NET_VRF=m # CONFIG_ARCNET is not set +CONFIG_ATM_DRIVERS=y +# CONFIG_ATM_DUMMY is not set +# CONFIG_ATM_TCP is not set +# CONFIG_ATM_LANAI is not set +# CONFIG_ATM_ENI is not set +# CONFIG_ATM_NICSTAR is not set +# CONFIG_ATM_IDT77252 is not set +# CONFIG_ATM_IA is not set +# CONFIG_ATM_FORE200E is not set +# CONFIG_ATM_HE is not set +# CONFIG_ATM_SOLOS is not set # # CAIF transport drivers @@ -1205,6 +2029,7 @@ CONFIG_IGB=y CONFIG_IGBVF=y CONFIG_IXGB=y CONFIG_IXGBE=y +# CONFIG_IXGBE_DCB is not set CONFIG_IXGBEVF=y # CONFIG_I40E is not set # CONFIG_I40EVF is not set @@ -1304,6 +2129,7 @@ CONFIG_NET_VENDOR_WIZNET=y # CONFIG_WIZNET_W5300 is not set # CONFIG_FDDI is not set # CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set CONFIG_PHYLIB=y # @@ -1320,8 +2146,9 @@ CONFIG_MARVELL_PHY=y # CONFIG_VITESSE_PHY is not set # CONFIG_TERANETICS_PHY is not set # CONFIG_SMSC_PHY is not set +CONFIG_BCM_NET_PHYLIB=m # CONFIG_BROADCOM_PHY is not set -# CONFIG_BCM7XXX_PHY is not set +CONFIG_BCM7XXX_PHY=m # CONFIG_BCM87XX_PHY is not set # CONFIG_ICPLUS_PHY is not set # CONFIG_REALTEK_PHY is not set @@ -1331,66 +2158,302 @@ CONFIG_MARVELL_PHY=y # CONFIG_MICREL_PHY is not set # CONFIG_DP83848_PHY is not set # CONFIG_DP83867_PHY is not set -# CONFIG_MICROCHIP_PHY is not set +CONFIG_MICROCHIP_PHY=m CONFIG_FIXED_PHY=y -# CONFIG_MDIO_BITBANG is not set -# CONFIG_MDIO_OCTEON is not set -# CONFIG_MDIO_BUS_MUX_GPIO is not set -# CONFIG_MDIO_BUS_MUX_MMIOREG is not set -# CONFIG_MDIO_BCM_UNIMAC is not set -# CONFIG_MICREL_KS8995MA is not set -# CONFIG_PPP is not set -# CONFIG_SLIP is not set -CONFIG_USB_NET_DRIVERS=y -# CONFIG_USB_CATC is not set -# CONFIG_USB_KAWETH is not set -# CONFIG_USB_PEGASUS is not set -# CONFIG_USB_RTL8150 is not set +CONFIG_MDIO_BITBANG=m +CONFIG_MDIO_GPIO=m +CONFIG_MDIO_OCTEON=m +CONFIG_MDIO_BUS_MUX=m +CONFIG_MDIO_BUS_MUX_GPIO=m +CONFIG_MDIO_BUS_MUX_MMIOREG=m +CONFIG_MDIO_BCM_UNIMAC=m +CONFIG_MICREL_KS8995MA=m +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +CONFIG_SLIP_MODE_SLIP6=y +CONFIG_USB_NET_DRIVERS=m +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m # CONFIG_USB_RTL8152 is not set -# CONFIG_USB_LAN78XX is not set -CONFIG_USB_USBNET=y -CONFIG_USB_NET_AX8817X=y -CONFIG_USB_NET_AX88179_178A=y -CONFIG_USB_NET_CDCETHER=y -# CONFIG_USB_NET_CDC_EEM is not set -CONFIG_USB_NET_CDC_NCM=y -# CONFIG_USB_NET_HUAWEI_CDC_NCM is not set -# CONFIG_USB_NET_CDC_MBIM is not set -# CONFIG_USB_NET_DM9601 is not set -# CONFIG_USB_NET_SR9700 is not set -# CONFIG_USB_NET_SR9800 is not set -# CONFIG_USB_NET_SMSC75XX is not set -# CONFIG_USB_NET_SMSC95XX is not set -# CONFIG_USB_NET_GL620A is not set -CONFIG_USB_NET_NET1080=y -# CONFIG_USB_NET_PLUSB is not set -# CONFIG_USB_NET_MCS7830 is not set -# CONFIG_USB_NET_RNDIS_HOST is not set -CONFIG_USB_NET_CDC_SUBSET=y -# CONFIG_USB_ALI_M5632 is not set -# CONFIG_USB_AN2720 is not set +CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_AX88179_178A=m +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SR9700=m +CONFIG_USB_NET_SR9800=m +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +CONFIG_USB_NET_NET1080=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y CONFIG_USB_BELKIN=y CONFIG_USB_ARMLINUX=y -# CONFIG_USB_EPSON2888 is not set -# CONFIG_USB_KC2190 is not set -CONFIG_USB_NET_ZAURUS=y -# CONFIG_USB_NET_CX82310_ETH is not set -# CONFIG_USB_NET_KALMIA is not set -# CONFIG_USB_NET_QMI_WWAN is not set -# CONFIG_USB_NET_INT51X1 is not set -# CONFIG_USB_IPHETH is not set -# CONFIG_USB_SIERRA_NET is not set -# CONFIG_USB_VL600 is not set -# CONFIG_USB_NET_CH9200 is not set -# CONFIG_WLAN is not set +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +CONFIG_USB_NET_ZAURUS=m +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_CDC_PHONET=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +CONFIG_WLAN=y +CONFIG_LIBERTAS_THINFIRM=m +# CONFIG_LIBERTAS_THINFIRM_DEBUG is not set +CONFIG_LIBERTAS_THINFIRM_USB=m +CONFIG_ATMEL=m +CONFIG_PCI_ATMEL=m +CONFIG_AT76C50X_USB=m +# CONFIG_PRISM54 is not set +CONFIG_USB_ZD1201=m +CONFIG_USB_NET_RNDIS_WLAN=m +CONFIG_ADM8211=m +CONFIG_RTL8180=m +CONFIG_RTL8187=m +CONFIG_RTL8187_LEDS=y +CONFIG_MAC80211_HWSIM=m +CONFIG_MWL8K=m +CONFIG_ATH_COMMON=m +CONFIG_ATH_CARDS=m +# CONFIG_ATH_DEBUG is not set +CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS=y +# CONFIG_ATH_REG_DYNAMIC_USER_CERT_TESTING is not set +CONFIG_ATH5K=m +# CONFIG_ATH5K_DEBUG is not set +CONFIG_ATH5K_PCI=y +# CONFIG_ATH5K_TEST_CHANNELS is not set +CONFIG_ATH9K_HW=m +CONFIG_ATH9K_COMMON=m +CONFIG_ATH9K_BTCOEX_SUPPORT=y +CONFIG_ATH9K=m +CONFIG_ATH9K_PCI=y +CONFIG_ATH9K_AHB=y +# CONFIG_ATH9K_DEBUGFS is not set +CONFIG_ATH9K_DFS_CERTIFIED=y +CONFIG_ATH9K_DYNACK=y +CONFIG_ATH9K_WOW=y +CONFIG_ATH9K_RFKILL=y +CONFIG_ATH9K_CHANNEL_CONTEXT=y +CONFIG_ATH9K_PCOEM=y +CONFIG_ATH9K_HTC=m +# CONFIG_ATH9K_HTC_DEBUGFS is not set +CONFIG_CARL9170=m +CONFIG_CARL9170_LEDS=y +CONFIG_CARL9170_WPC=y +CONFIG_CARL9170_HWRNG=y +CONFIG_ATH6KL=m +CONFIG_ATH6KL_SDIO=m +CONFIG_ATH6KL_USB=m +# CONFIG_ATH6KL_DEBUG is not set +CONFIG_ATH6KL_REGDOMAIN=y +CONFIG_AR5523=m +CONFIG_WIL6210=m +CONFIG_WIL6210_ISR_COR=y +CONFIG_ATH10K=m +CONFIG_ATH10K_PCI=m +# CONFIG_ATH10K_DEBUG is not set +# CONFIG_ATH10K_DEBUGFS is not set +CONFIG_ATH10K_DFS_CERTIFIED=y +CONFIG_WCN36XX=m +# CONFIG_WCN36XX_DEBUGFS is not set +CONFIG_B43=m +CONFIG_B43_BCMA=y +CONFIG_B43_SSB=y +CONFIG_B43_BUSES_BCMA_AND_SSB=y +# CONFIG_B43_BUSES_BCMA is not set +# CONFIG_B43_BUSES_SSB is not set +CONFIG_B43_PCI_AUTOSELECT=y +CONFIG_B43_PCICORE_AUTOSELECT=y +CONFIG_B43_SDIO=y +CONFIG_B43_BCMA_PIO=y +CONFIG_B43_PIO=y +CONFIG_B43_PHY_G=y +CONFIG_B43_PHY_N=y +CONFIG_B43_PHY_LP=y +CONFIG_B43_PHY_HT=y +CONFIG_B43_LEDS=y +CONFIG_B43_HWRNG=y +# CONFIG_B43_DEBUG is not set +CONFIG_B43LEGACY=m +CONFIG_B43LEGACY_PCI_AUTOSELECT=y +CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y +CONFIG_B43LEGACY_LEDS=y +CONFIG_B43LEGACY_HWRNG=y +# CONFIG_B43LEGACY_DEBUG is not set +CONFIG_B43LEGACY_DMA=y +CONFIG_B43LEGACY_PIO=y +CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y +# CONFIG_B43LEGACY_DMA_MODE is not set +# CONFIG_B43LEGACY_PIO_MODE is not set +CONFIG_BRCMUTIL=m +CONFIG_BRCMSMAC=m +CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_PROTO_BCDC=y +CONFIG_BRCMFMAC_PROTO_MSGBUF=y +CONFIG_BRCMFMAC_SDIO=y +CONFIG_BRCMFMAC_USB=y +CONFIG_BRCMFMAC_PCIE=y +CONFIG_BRCM_TRACING=y +# CONFIG_BRCMDBG is not set +# CONFIG_HOSTAP is not set +CONFIG_IPW2100=m +CONFIG_IPW2100_MONITOR=y +# CONFIG_IPW2100_DEBUG is not set +CONFIG_IPW2200=m +CONFIG_IPW2200_MONITOR=y +CONFIG_IPW2200_RADIOTAP=y +CONFIG_IPW2200_PROMISCUOUS=y +CONFIG_IPW2200_QOS=y +# CONFIG_IPW2200_DEBUG is not set +CONFIG_LIBIPW=m +# CONFIG_LIBIPW_DEBUG is not set +CONFIG_IWLWIFI=m +CONFIG_IWLWIFI_LEDS=y +CONFIG_IWLDVM=m +CONFIG_IWLMVM=m +CONFIG_IWLWIFI_OPMODE_MODULAR=y +CONFIG_IWLWIFI_BCAST_FILTERING=y +CONFIG_IWLWIFI_UAPSD=y # -# Enable WiMAX (Networking options) to see the WiMAX drivers +# Debugging Options # +# CONFIG_IWLWIFI_DEBUG is not set +CONFIG_IWLEGACY=m +CONFIG_IWL4965=m +CONFIG_IWL3945=m + +# +# iwl3945 / iwl4965 Debugging Options +# +# CONFIG_IWLEGACY_DEBUG is not set +CONFIG_LIBERTAS=m +CONFIG_LIBERTAS_USB=m +CONFIG_LIBERTAS_SDIO=m +CONFIG_LIBERTAS_SPI=m +# CONFIG_LIBERTAS_DEBUG is not set +CONFIG_LIBERTAS_MESH=y +# CONFIG_HERMES is not set +# CONFIG_P54_COMMON is not set +CONFIG_RT2X00=m +CONFIG_RT2400PCI=m +CONFIG_RT2500PCI=m +CONFIG_RT61PCI=m +CONFIG_RT2800PCI=m +CONFIG_RT2800PCI_RT33XX=y +CONFIG_RT2800PCI_RT35XX=y +CONFIG_RT2800PCI_RT53XX=y +CONFIG_RT2800PCI_RT3290=y +CONFIG_RT2500USB=m +CONFIG_RT73USB=m +CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT33XX=y +CONFIG_RT2800USB_RT35XX=y +CONFIG_RT2800USB_RT3573=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_RT55XX=y +CONFIG_RT2800USB_UNKNOWN=y +CONFIG_RT2800_LIB=m +CONFIG_RT2800_LIB_MMIO=m +CONFIG_RT2X00_LIB_MMIO=m +CONFIG_RT2X00_LIB_PCI=m +CONFIG_RT2X00_LIB_USB=m +CONFIG_RT2X00_LIB=m +CONFIG_RT2X00_LIB_FIRMWARE=y +CONFIG_RT2X00_LIB_CRYPTO=y +CONFIG_RT2X00_LIB_LEDS=y +# CONFIG_RT2X00_DEBUG is not set +CONFIG_WL_MEDIATEK=y +CONFIG_MT7601U=m +CONFIG_RTL_CARDS=m +CONFIG_RTL8192CE=m +CONFIG_RTL8192SE=m +CONFIG_RTL8192DE=m +CONFIG_RTL8723AE=m +CONFIG_RTL8723BE=m +CONFIG_RTL8188EE=m +CONFIG_RTL8192EE=m +CONFIG_RTL8821AE=m +CONFIG_RTL8192CU=m +CONFIG_RTLWIFI=m +CONFIG_RTLWIFI_PCI=m +CONFIG_RTLWIFI_USB=m +# CONFIG_RTLWIFI_DEBUG is not set +CONFIG_RTL8192C_COMMON=m +CONFIG_RTL8723_COMMON=m +CONFIG_RTLBTCOEXIST=m +CONFIG_RTL8XXXU=m +# CONFIG_RTL8XXXU_UNTESTED is not set +CONFIG_WL_TI=y +CONFIG_WL1251=m +CONFIG_WL1251_SPI=m +CONFIG_WL1251_SDIO=m +CONFIG_WL12XX=m +CONFIG_WL18XX=m +CONFIG_WLCORE=m +CONFIG_WLCORE_SPI=m +CONFIG_WLCORE_SDIO=m +CONFIG_WILINK_PLATFORM_DATA=y +CONFIG_ZD1211RW=m +# CONFIG_ZD1211RW_DEBUG is not set +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_SDIO=m +CONFIG_MWIFIEX_PCIE=m +CONFIG_MWIFIEX_USB=m +CONFIG_CW1200=m +CONFIG_CW1200_WLAN_SDIO=m +CONFIG_CW1200_WLAN_SPI=m +CONFIG_RSI_91X=m +# CONFIG_RSI_DEBUGFS is not set +CONFIG_RSI_SDIO=m +CONFIG_RSI_USB=m + +# +# WiMAX Wireless Broadband devices +# +CONFIG_WIMAX_I2400M=m +CONFIG_WIMAX_I2400M_USB=m +CONFIG_WIMAX_I2400M_DEBUG_LEVEL=8 # CONFIG_WAN is not set +# CONFIG_IEEE802154_DRIVERS is not set # CONFIG_VMXNET3 is not set +CONFIG_FUJITSU_ES=m # CONFIG_ISDN is not set -# CONFIG_NVM is not set +CONFIG_NVM=y +# CONFIG_NVM_DEBUG is not set +CONFIG_NVM_GENNVM=m +CONFIG_NVM_RRPC=m # # Input device support @@ -1471,7 +2534,7 @@ CONFIG_MOUSE_PS2_FOCALTECH=y # Hardware I/O ports # CONFIG_SERIO=y -# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_SERPORT=m CONFIG_SERIO_AMBAKMI=y # CONFIG_SERIO_PCIPS2 is not set CONFIG_SERIO_LIBPS2=y @@ -1510,6 +2573,7 @@ CONFIG_DEVKMEM=y CONFIG_SERIAL_EARLYCON=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y +CONFIG_SERIAL_8250_PNP=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_DMA=y CONFIG_SERIAL_8250_PCI=y @@ -1519,6 +2583,7 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=4 CONFIG_SERIAL_8250_FSL=y CONFIG_SERIAL_8250_DW=y # CONFIG_SERIAL_8250_RT288X is not set +# CONFIG_SERIAL_8250_FINTEK is not set # CONFIG_SERIAL_8250_INGENIC is not set # CONFIG_SERIAL_8250_MID is not set @@ -1549,6 +2614,7 @@ CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y # CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set CONFIG_SERIAL_MVEBU_UART=y CONFIG_SERIAL_MVEBU_CONSOLE=y +CONFIG_TTY_PRINTK=m CONFIG_HVC_DRIVER=y # CONFIG_HVC_DCC is not set CONFIG_VIRTIO_CONSOLE=y @@ -1564,6 +2630,7 @@ CONFIG_HW_RANDOM_OMAP=y # PCMCIA character devices # # CONFIG_RAW_DRIVER is not set +# CONFIG_HPET is not set # CONFIG_TCG_TPM is not set CONFIG_DEVPORT=y # CONFIG_XILLYBUS is not set @@ -1572,12 +2639,25 @@ CONFIG_DEVPORT=y # I2C support # CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y CONFIG_I2C_BOARDINFO=y CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=y -# CONFIG_I2C_MUX is not set +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +CONFIG_I2C_ARB_GPIO_CHALLENGE=m +CONFIG_I2C_MUX_GPIO=m +CONFIG_I2C_MUX_PCA9541=m +CONFIG_I2C_MUX_PCA954x=m +CONFIG_I2C_MUX_PINCTRL=m +CONFIG_I2C_MUX_REG=m CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_SMBUS=m CONFIG_I2C_ALGOBIT=y +CONFIG_I2C_ALGOPCA=m # # I2C Hardware Bus support @@ -1586,55 +2666,62 @@ CONFIG_I2C_ALGOBIT=y # # PC SMBus host controller drivers # -# CONFIG_I2C_ALI1535 is not set -# CONFIG_I2C_ALI1563 is not set -# CONFIG_I2C_ALI15X3 is not set -# CONFIG_I2C_AMD756 is not set -# CONFIG_I2C_AMD8111 is not set -# CONFIG_I2C_I801 is not set -# CONFIG_I2C_ISCH is not set -# CONFIG_I2C_PIIX4 is not set -# CONFIG_I2C_NFORCE2 is not set -# CONFIG_I2C_SIS5595 is not set -# CONFIG_I2C_SIS630 is not set -# CONFIG_I2C_SIS96X is not set -# CONFIG_I2C_VIA is not set -# CONFIG_I2C_VIAPRO is not set +CONFIG_I2C_ALI1535=m +CONFIG_I2C_ALI1563=m +CONFIG_I2C_ALI15X3=m +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD8111=m +CONFIG_I2C_I801=m +CONFIG_I2C_ISCH=m +CONFIG_I2C_PIIX4=m +CONFIG_I2C_NFORCE2=m +CONFIG_I2C_SIS5595=m +CONFIG_I2C_SIS630=m +CONFIG_I2C_SIS96X=m +CONFIG_I2C_VIA=m +CONFIG_I2C_VIAPRO=m + +# +# ACPI drivers +# +CONFIG_I2C_SCMI=m # # I2C system bus drivers (mostly embedded / system-on-chip) # -# CONFIG_I2C_CADENCE is not set -# CONFIG_I2C_CBUS_GPIO is not set -# CONFIG_I2C_DESIGNWARE_PLATFORM is not set -# CONFIG_I2C_DESIGNWARE_PCI is not set -# CONFIG_I2C_EMEV2 is not set -# CONFIG_I2C_GPIO is not set +CONFIG_I2C_CADENCE=m +CONFIG_I2C_CBUS_GPIO=m +CONFIG_I2C_DESIGNWARE_CORE=m +CONFIG_I2C_DESIGNWARE_PLATFORM=m +CONFIG_I2C_DESIGNWARE_PCI=m +CONFIG_I2C_EMEV2=m +CONFIG_I2C_GPIO=m CONFIG_I2C_MV64XXX=y -# CONFIG_I2C_NOMADIK is not set -# CONFIG_I2C_OCORES is not set -# CONFIG_I2C_PCA_PLATFORM is not set -CONFIG_I2C_PXA=y +CONFIG_I2C_NOMADIK=m +CONFIG_I2C_OCORES=m +CONFIG_I2C_PCA_PLATFORM=m +# CONFIG_I2C_PXA is not set # CONFIG_I2C_PXA_PCI is not set -# CONFIG_I2C_PXA_SLAVE is not set -# CONFIG_I2C_RK3X is not set -# CONFIG_I2C_SIMTEC is not set -# CONFIG_I2C_XILINX is not set +CONFIG_I2C_PXA_SLAVE=y +CONFIG_I2C_RK3X=m +CONFIG_I2C_SIMTEC=m +CONFIG_I2C_XILINX=m # # External I2C/SMBus adapter drivers # -# CONFIG_I2C_DIOLAN_U2C is not set -# CONFIG_I2C_PARPORT_LIGHT is not set -# CONFIG_I2C_ROBOTFUZZ_OSIF is not set -# CONFIG_I2C_TAOS_EVM is not set -# CONFIG_I2C_TINY_USB is not set +CONFIG_I2C_DIOLAN_U2C=m +CONFIG_I2C_PARPORT_LIGHT=m +CONFIG_I2C_ROBOTFUZZ_OSIF=m +CONFIG_I2C_TAOS_EVM=m +CONFIG_I2C_TINY_USB=m # # Other I2C/SMBus bus drivers # # CONFIG_I2C_STUB is not set -# CONFIG_I2C_SLAVE is not set +CONFIG_I2C_SLAVE=y +CONFIG_I2C_SLAVE_EEPROM=m # CONFIG_I2C_DEBUG_CORE is not set # CONFIG_I2C_DEBUG_ALGO is not set # CONFIG_I2C_DEBUG_BUS is not set @@ -1693,10 +2780,7 @@ CONFIG_PPS=y # PTP clock support # CONFIG_PTP_1588_CLOCK=y - -# -# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. -# +CONFIG_DP83640_PHY=m CONFIG_PINCTRL=y # @@ -1707,6 +2791,10 @@ CONFIG_PINCONF=y # CONFIG_DEBUG_PINCTRL is not set # CONFIG_PINCTRL_AMD is not set # CONFIG_PINCTRL_SINGLE is not set +# CONFIG_PINCTRL_BAYTRAIL is not set +# CONFIG_PINCTRL_CHERRYVIEW is not set +# CONFIG_PINCTRL_BROXTON is not set +# CONFIG_PINCTRL_SUNRISEPOINT is not set CONFIG_PINCTRL_MVEBU=y CONFIG_PINCTRL_ARMADA_3700=y CONFIG_PINCTRL_ARMADA_AP806=y @@ -1715,6 +2803,7 @@ CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y CONFIG_GPIOLIB=y CONFIG_GPIO_DEVRES=y CONFIG_OF_GPIO=y +CONFIG_GPIO_ACPI=y CONFIG_GPIOLIB_IRQCHIP=y # CONFIG_DEBUG_GPIO is not set # CONFIG_GPIO_SYSFS is not set @@ -1725,6 +2814,7 @@ CONFIG_GPIO_GENERIC=y # # CONFIG_GPIO_74XX_MMIO is not set # CONFIG_GPIO_ALTERA is not set +CONFIG_GPIO_AMDPT=m # CONFIG_GPIO_DWAPB is not set CONFIG_GPIO_GENERIC_PLATFORM=y # CONFIG_GPIO_GRGPIO is not set @@ -1833,13 +2923,34 @@ CONFIG_SSB_POSSIBLE=y # # Sonics Silicon Backplane # -# CONFIG_SSB is not set +CONFIG_SSB=m +CONFIG_SSB_SPROM=y +CONFIG_SSB_BLOCKIO=y +CONFIG_SSB_PCIHOST_POSSIBLE=y +CONFIG_SSB_PCIHOST=y +CONFIG_SSB_B43_PCI_BRIDGE=y +CONFIG_SSB_SDIOHOST_POSSIBLE=y +CONFIG_SSB_SDIOHOST=y +# CONFIG_SSB_HOST_SOC is not set +# CONFIG_SSB_SILENT is not set +# CONFIG_SSB_DEBUG is not set +CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y +CONFIG_SSB_DRIVER_PCICORE=y +# CONFIG_SSB_DRIVER_GPIO is not set CONFIG_BCMA_POSSIBLE=y # # Broadcom specific AMBA # -# CONFIG_BCMA is not set +CONFIG_BCMA=m +CONFIG_BCMA_BLOCKIO=y +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y +# CONFIG_BCMA_DRIVER_GMAC_CMN is not set +# CONFIG_BCMA_DRIVER_GPIO is not set +# CONFIG_BCMA_DEBUG is not set # # Multifunction device drivers @@ -1867,7 +2978,7 @@ CONFIG_MFD_CORE=y # CONFIG_HTC_PASIC3 is not set # CONFIG_HTC_I2CPLD is not set # CONFIG_LPC_ICH is not set -# CONFIG_LPC_SCH is not set +CONFIG_LPC_SCH=m # CONFIG_INTEL_SOC_PMIC is not set # CONFIG_MFD_JANZ_CMODIO is not set # CONFIG_MFD_KEMPLD is not set @@ -2166,6 +3277,7 @@ CONFIG_USB_DEFAULT_PERSIST=y # CONFIG_USB_DYNAMIC_MINORS is not set # CONFIG_USB_OTG is not set # CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set # CONFIG_USB_ULPI_BUS is not set # CONFIG_USB_MON is not set # CONFIG_USB_WUSB_CBAF is not set @@ -2195,14 +3307,16 @@ CONFIG_USB_OHCI_HCD_PLATFORM=y # CONFIG_USB_UHCI_HCD is not set # CONFIG_USB_SL811_HCD is not set # CONFIG_USB_R8A66597_HCD is not set +CONFIG_USB_HCD_BCMA=m +# CONFIG_USB_HCD_SSB is not set # CONFIG_USB_HCD_TEST_MODE is not set # # USB Device Class drivers # -# CONFIG_USB_ACM is not set +CONFIG_USB_ACM=m # CONFIG_USB_PRINTER is not set -# CONFIG_USB_WDM is not set +CONFIG_USB_WDM=m # CONFIG_USB_TMC is not set # @@ -2279,6 +3393,7 @@ CONFIG_USB_ISP1760_DUAL_ROLE=y # CONFIG_USB_HSIC_USB3503 is not set # CONFIG_USB_LINK_LAYER_TEST is not set # CONFIG_USB_CHAOSKEY is not set +# CONFIG_USB_ATM is not set # # USB Physical Layer drivers @@ -2328,6 +3443,7 @@ CONFIG_USB_MASS_STORAGE=m # CONFIG_USB_G_SERIAL is not set # CONFIG_USB_G_PRINTER is not set # CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_NOKIA is not set # CONFIG_USB_G_ACM_MS is not set # CONFIG_USB_G_MULTI is not set # CONFIG_USB_G_HID is not set @@ -2352,6 +3468,7 @@ CONFIG_MMC_BLOCK_BOUNCE=y CONFIG_MMC_ARMMMCI=y CONFIG_MMC_SDHCI=y # CONFIG_MMC_SDHCI_PCI is not set +CONFIG_MMC_SDHCI_ACPI=m CONFIG_MMC_SDHCI_PLTFM=y # CONFIG_MMC_SDHCI_OF_ARASAN is not set # CONFIG_MMC_SDHCI_OF_AT91 is not set @@ -2532,6 +3649,7 @@ CONFIG_DMADEVICES=y # CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=y CONFIG_DMA_ENGINE=y +CONFIG_DMA_ACPI=y CONFIG_DMA_OF=y # CONFIG_AMBA_PL08X is not set # CONFIG_FSL_EDMA is not set @@ -2581,8 +3699,19 @@ CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y # Microsoft Hyper-V guest support # CONFIG_STAGING=y +# CONFIG_PRISM2_USB is not set # CONFIG_COMEDI is not set +# CONFIG_RTL8192U is not set +# CONFIG_RTLLIB is not set +# CONFIG_R8712U is not set +CONFIG_R8188EU=m +CONFIG_88EU_AP_MODE=y +CONFIG_R8723AU=m +CONFIG_8723AU_AP_MODE=y +CONFIG_8723AU_BT_COEXIST=y # CONFIG_RTS5208 is not set +CONFIG_VT6655=m +CONFIG_VT6656=m # CONFIG_FB_SM750 is not set # CONFIG_FB_XGI is not set @@ -2609,6 +3738,7 @@ CONFIG_MTD_SPINAND_ONDIEECC=y # CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set # CONFIG_FB_TFT is not set # CONFIG_FSL_MC_BUS is not set +# CONFIG_WILC1000_DRIVER is not set # CONFIG_MOST is not set # CONFIG_CHROME_PLATFORMS is not set CONFIG_CLKDEV_LOOKUP=y @@ -2621,6 +3751,7 @@ CONFIG_COMMON_CLK=y CONFIG_COMMON_CLK_VERSATILE=y CONFIG_CLK_SP810=y CONFIG_CLK_VEXPRESS_OSC=y +# CONFIG_COMMON_CLK_SCPI is not set # CONFIG_COMMON_CLK_SI5351 is not set # CONFIG_COMMON_CLK_SI514 is not set # CONFIG_COMMON_CLK_SI570 is not set @@ -2646,6 +3777,7 @@ CONFIG_ARMADA_37XX_PM_CLK=y # Clock Source drivers # CONFIG_CLKSRC_OF=y +CONFIG_CLKSRC_ACPI=y CONFIG_CLKSRC_PROBE=y CONFIG_CLKSRC_MMIO=y CONFIG_ARM_ARCH_TIMER=y @@ -2656,7 +3788,12 @@ CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y # CONFIG_SH_TIMER_MTU2 is not set # CONFIG_SH_TIMER_TMU is not set # CONFIG_EM_TIMER_STI is not set -# CONFIG_MAILBOX is not set +CONFIG_MAILBOX=y +CONFIG_ARM_MHU=m +# CONFIG_PL320_MBOX is not set +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +# CONFIG_MAILBOX_TEST is not set CONFIG_IOMMU_API=y CONFIG_IOMMU_SUPPORT=y @@ -2754,6 +3891,7 @@ CONFIG_MVEBU_PHONE=y # Firmware Drivers # CONFIG_ARM_PSCI_FW=y +CONFIG_ARM_SCPI_PROTOCOL=m # CONFIG_FIRMWARE_MEMMAP is not set CONFIG_DMIID=y # CONFIG_DMI_SYSFS is not set @@ -2766,6 +3904,26 @@ CONFIG_EFI_ESRT=y CONFIG_EFI_PARAMS_FROM_FDT=y CONFIG_EFI_RUNTIME_WRAPPERS=y CONFIG_EFI_ARMSTUB=y +CONFIG_ACPI=y +CONFIG_ACPI_GENERIC_GSI=y +CONFIG_ACPI_CCA_REQUIRED=y +# CONFIG_ACPI_DEBUGGER is not set +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_FAN=y +# CONFIG_ACPI_DOCK is not set +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_THERMAL=y +# CONFIG_ACPI_CUSTOM_DSDT is not set +# CONFIG_ACPI_DEBUG is not set +# CONFIG_ACPI_PCI_SLOT is not set +CONFIG_ACPI_CONTAINER=y +# CONFIG_ACPI_HED is not set +# CONFIG_ACPI_CUSTOM_METHOD is not set +CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y +# CONFIG_PMIC_OPREGION is not set # # File systems @@ -2798,7 +3956,7 @@ CONFIG_BTRFS_FS=y # CONFIG_NILFS2_FS is not set # CONFIG_F2FS_FS is not set # CONFIG_FS_DAX is not set -# CONFIG_FS_POSIX_ACL is not set +CONFIG_FS_POSIX_ACL=y CONFIG_EXPORTFS=y CONFIG_FILE_LOCKING=y CONFIG_FSNOTIFY=y @@ -2894,6 +4052,9 @@ CONFIG_UBIFS_FS_ZLIB=y # CONFIG_PSTORE is not set # CONFIG_SYSV_FS is not set # CONFIG_UFS_FS is not set +CONFIG_EXOFS_FS=m +# CONFIG_EXOFS_DEBUG is not set +CONFIG_ORE=m CONFIG_NETWORK_FILESYSTEMS=y CONFIG_NFS_FS=y CONFIG_NFS_V2=y @@ -2912,6 +4073,7 @@ CONFIG_NFSD_V3=y CONFIG_GRACE_PERIOD=y CONFIG_LOCKD=y CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=y CONFIG_NFS_COMMON=y CONFIG_SUNRPC=y CONFIG_SUNRPC_GSS=y @@ -2921,7 +4083,7 @@ CONFIG_SUNRPC_GSS=y # CONFIG_NCP_FS is not set # CONFIG_CODA_FS is not set # CONFIG_AFS_FS is not set -CONFIG_9P_FS=y +CONFIG_9P_FS=m # CONFIG_9P_FS_POSIX_ACL is not set # CONFIG_9P_FS_SECURITY is not set CONFIG_NLS=y @@ -3005,11 +4167,7 @@ CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 # # Compile-time checks and compiler options # -CONFIG_DEBUG_INFO=y -# CONFIG_DEBUG_INFO_REDUCED is not set -# CONFIG_DEBUG_INFO_SPLIT is not set -# CONFIG_DEBUG_INFO_DWARF4 is not set -# CONFIG_GDB_SCRIPTS is not set +# CONFIG_DEBUG_INFO is not set CONFIG_ENABLE_WARN_DEPRECATED=y CONFIG_ENABLE_MUST_CHECK=y CONFIG_FRAME_WARN=2048 @@ -3206,42 +4364,42 @@ CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_MANAGER2=y # CONFIG_CRYPTO_USER is not set CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y -# CONFIG_CRYPTO_GF128MUL is not set +CONFIG_CRYPTO_GF128MUL=m CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_NULL2=y # CONFIG_CRYPTO_PCRYPT is not set CONFIG_CRYPTO_WORKQUEUE=y CONFIG_CRYPTO_CRYPTD=y # CONFIG_CRYPTO_MCRYPTD is not set -# CONFIG_CRYPTO_AUTHENC is not set +CONFIG_CRYPTO_AUTHENC=m # CONFIG_CRYPTO_TEST is not set CONFIG_CRYPTO_ABLK_HELPER=y # # Authenticated Encryption with Associated Data # -# CONFIG_CRYPTO_CCM is not set -# CONFIG_CRYPTO_GCM is not set +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=m # CONFIG_CRYPTO_CHACHA20POLY1305 is not set -# CONFIG_CRYPTO_SEQIV is not set +CONFIG_CRYPTO_SEQIV=m CONFIG_CRYPTO_ECHAINIV=m # # Block modes # CONFIG_CRYPTO_CBC=y -# CONFIG_CRYPTO_CTR is not set +CONFIG_CRYPTO_CTR=m # CONFIG_CRYPTO_CTS is not set -# CONFIG_CRYPTO_ECB is not set +CONFIG_CRYPTO_ECB=m # CONFIG_CRYPTO_LRW is not set -# CONFIG_CRYPTO_PCBC is not set +CONFIG_CRYPTO_PCBC=m # CONFIG_CRYPTO_XTS is not set # CONFIG_CRYPTO_KEYWRAP is not set # # Hash modes # -# CONFIG_CRYPTO_CMAC is not set +CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_HMAC=m # CONFIG_CRYPTO_XCBC is not set # CONFIG_CRYPTO_VMAC is not set @@ -3251,12 +4409,12 @@ CONFIG_CRYPTO_HMAC=m # CONFIG_CRYPTO_CRC32C=y # CONFIG_CRYPTO_CRC32 is not set -# CONFIG_CRYPTO_CRCT10DIF is not set -# CONFIG_CRYPTO_GHASH is not set +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_GHASH=m # CONFIG_CRYPTO_POLY1305 is not set # CONFIG_CRYPTO_MD4 is not set -# CONFIG_CRYPTO_MD5 is not set -# CONFIG_CRYPTO_MICHAEL_MIC is not set +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m # CONFIG_CRYPTO_RMD128 is not set # CONFIG_CRYPTO_RMD160 is not set # CONFIG_CRYPTO_RMD256 is not set @@ -3272,13 +4430,13 @@ CONFIG_CRYPTO_SHA256=m # CONFIG_CRYPTO_AES=y # CONFIG_CRYPTO_ANUBIS is not set -# CONFIG_CRYPTO_ARC4 is not set +CONFIG_CRYPTO_ARC4=m # CONFIG_CRYPTO_BLOWFISH is not set # CONFIG_CRYPTO_CAMELLIA is not set # CONFIG_CRYPTO_CAST5 is not set # CONFIG_CRYPTO_CAST6 is not set -# CONFIG_CRYPTO_DES is not set -# CONFIG_CRYPTO_FCRYPT is not set +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m # CONFIG_CRYPTO_KHAZAD is not set # CONFIG_CRYPTO_SALSA20 is not set # CONFIG_CRYPTO_CHACHA20 is not set @@ -3347,15 +4505,15 @@ CONFIG_GENERIC_IO=y CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y CONFIG_CRC_CCITT=y CONFIG_CRC16=y -# CONFIG_CRC_T10DIF is not set -# CONFIG_CRC_ITU_T is not set +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=m CONFIG_CRC32=y # CONFIG_CRC32_SELFTEST is not set CONFIG_CRC32_SLICEBY8=y # CONFIG_CRC32_SLICEBY4 is not set # CONFIG_CRC32_SARWATE is not set # CONFIG_CRC32_BIT is not set -# CONFIG_CRC7 is not set +CONFIG_CRC7=m CONFIG_LIBCRC32C=y # CONFIG_CRC8 is not set CONFIG_AUDIT_GENERIC=y @@ -3383,6 +4541,10 @@ CONFIG_DECOMPRESS_XZ=y CONFIG_DECOMPRESS_LZO=y CONFIG_DECOMPRESS_LZ4=y CONFIG_GENERIC_ALLOCATOR=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m CONFIG_ASSOCIATIVE_ARRAY=y CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT_MAP=y @@ -3393,7 +4555,7 @@ CONFIG_GLOB=y # CONFIG_GLOB_SELFTEST is not set CONFIG_NLATTR=y CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y -# CONFIG_CORDIC is not set +CONFIG_CORDIC=m # CONFIG_DDR is not set CONFIG_LIBFDT=y CONFIG_OID_REGISTRY=y diff --git a/config/sources/mvebu64.conf b/config/sources/mvebu64.conf index 1872a0962..b08f15e55 100644 --- a/config/sources/mvebu64.conf +++ b/config/sources/mvebu64.conf @@ -7,7 +7,7 @@ BOOTSCRIPT="boot-espressobin.cmd:boot.cmd" BOOTSOURCE=$MAINLINE_UBOOT_SOURCE BOOTDIR=$MAINLINE_UBOOT_DIR -BOOTBRANCH=tag:v2017.05-rc1 +BOOTBRANCH=$MAINLINE_UBOOT_BRANCH BOOTENV_FILE='clearfog-default.txt' diff --git a/patch/kernel/mvebu64-default/03-patch-4.4.52-53.patch b/patch/kernel/mvebu64-default/03-patch-4.4.52-53.patch new file mode 100644 index 000000000..e4bbcaf95 --- /dev/null +++ b/patch/kernel/mvebu64-default/03-patch-4.4.52-53.patch @@ -0,0 +1,8420 @@ +diff --git a/Documentation/Makefile b/Documentation/Makefile +index bc0548201755..fc759598c4c9 100644 +--- a/Documentation/Makefile ++++ b/Documentation/Makefile +@@ -1,4 +1,4 @@ + subdir-y := accounting auxdisplay blackfin connector \ +- filesystems filesystems ia64 laptops mic misc-devices \ ++ filesystems filesystems ia64 laptops misc-devices \ + networking pcmcia prctl ptp spi timers vDSO video4linux \ + watchdog +diff --git a/Documentation/mic/Makefile b/Documentation/mic/Makefile +deleted file mode 100644 +index a191d453badf..000000000000 +--- a/Documentation/mic/Makefile ++++ /dev/null +@@ -1 +0,0 @@ +-subdir-y := mpssd +diff --git a/Documentation/mic/mpssd/.gitignore b/Documentation/mic/mpssd/.gitignore +deleted file mode 100644 +index 8b7c72f07c92..000000000000 +--- a/Documentation/mic/mpssd/.gitignore ++++ /dev/null +@@ -1 +0,0 @@ +-mpssd +diff --git a/Documentation/mic/mpssd/Makefile b/Documentation/mic/mpssd/Makefile +deleted file mode 100644 +index 06871b0c08a6..000000000000 +--- a/Documentation/mic/mpssd/Makefile ++++ /dev/null +@@ -1,21 +0,0 @@ +-ifndef CROSS_COMPILE +-# List of programs to build +-hostprogs-$(CONFIG_X86_64) := mpssd +- +-mpssd-objs := mpssd.o sysfs.o +- +-# Tell kbuild to always build the programs +-always := $(hostprogs-y) +- +-HOSTCFLAGS += -I$(objtree)/usr/include -I$(srctree)/tools/include +- +-ifdef DEBUG +-HOSTCFLAGS += -DDEBUG=$(DEBUG) +-endif +- +-HOSTLOADLIBES_mpssd := -lpthread +- +-install: +- install mpssd /usr/sbin/mpssd +- install micctrl /usr/sbin/micctrl +-endif +diff --git a/Documentation/mic/mpssd/micctrl b/Documentation/mic/mpssd/micctrl +deleted file mode 100755 +index 8f2629b41c5f..000000000000 +--- a/Documentation/mic/mpssd/micctrl ++++ /dev/null +@@ -1,173 +0,0 @@ +-#!/bin/bash +-# Intel MIC Platform Software Stack (MPSS) +-# +-# Copyright(c) 2013 Intel Corporation. +-# +-# This program is free software; you can redistribute it and/or modify +-# it under the terms of the GNU General Public License, version 2, as +-# published by the Free Software Foundation. +-# +-# This program is distributed in the hope that it will be useful, but +-# WITHOUT ANY WARRANTY; without even the implied warranty of +-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +-# General Public License for more details. +-# +-# The full GNU General Public License is included in this distribution in +-# the file called "COPYING". +-# +-# Intel MIC User Space Tools. +-# +-# micctrl - Controls MIC boot/start/stop. +-# +-# chkconfig: 2345 95 05 +-# description: start MPSS stack processing. +-# +-### BEGIN INIT INFO +-# Provides: micctrl +-### END INIT INFO +- +-# Source function library. +-. /etc/init.d/functions +- +-sysfs="/sys/class/mic" +- +-_status() +-{ +- f=$sysfs/$1 +- echo -e $1 state: "`cat $f/state`" shutdown_status: "`cat $f/shutdown_status`" +-} +- +-status() +-{ +- if [ "`echo $1 | head -c3`" == "mic" ]; then +- _status $1 +- return $? +- fi +- for f in $sysfs/* +- do +- _status `basename $f` +- RETVAL=$? +- [ $RETVAL -ne 0 ] && return $RETVAL +- done +- return 0 +-} +- +-_reset() +-{ +- f=$sysfs/$1 +- echo reset > $f/state +-} +- +-reset() +-{ +- if [ "`echo $1 | head -c3`" == "mic" ]; then +- _reset $1 +- return $? +- fi +- for f in $sysfs/* +- do +- _reset `basename $f` +- RETVAL=$? +- [ $RETVAL -ne 0 ] && return $RETVAL +- done +- return 0 +-} +- +-_boot() +-{ +- f=$sysfs/$1 +- echo "linux" > $f/bootmode +- echo "mic/uos.img" > $f/firmware +- echo "mic/$1.image" > $f/ramdisk +- echo "boot" > $f/state +-} +- +-boot() +-{ +- if [ "`echo $1 | head -c3`" == "mic" ]; then +- _boot $1 +- return $? +- fi +- for f in $sysfs/* +- do +- _boot `basename $f` +- RETVAL=$? +- [ $RETVAL -ne 0 ] && return $RETVAL +- done +- return 0 +-} +- +-_shutdown() +-{ +- f=$sysfs/$1 +- echo shutdown > $f/state +-} +- +-shutdown() +-{ +- if [ "`echo $1 | head -c3`" == "mic" ]; then +- _shutdown $1 +- return $? +- fi +- for f in $sysfs/* +- do +- _shutdown `basename $f` +- RETVAL=$? +- [ $RETVAL -ne 0 ] && return $RETVAL +- done +- return 0 +-} +- +-_wait() +-{ +- f=$sysfs/$1 +- while [ "`cat $f/state`" != "offline" -a "`cat $f/state`" != "online" ] +- do +- sleep 1 +- echo -e "Waiting for $1 to go offline" +- done +-} +- +-wait() +-{ +- if [ "`echo $1 | head -c3`" == "mic" ]; then +- _wait $1 +- return $? +- fi +- # Wait for the cards to go offline +- for f in $sysfs/* +- do +- _wait `basename $f` +- RETVAL=$? +- [ $RETVAL -ne 0 ] && return $RETVAL +- done +- return 0 +-} +- +-if [ ! -d "$sysfs" ]; then +- echo -e $"Module unloaded " +- exit 3 +-fi +- +-case $1 in +- -s) +- status $2 +- ;; +- -r) +- reset $2 +- ;; +- -b) +- boot $2 +- ;; +- -S) +- shutdown $2 +- ;; +- -w) +- wait $2 +- ;; +- *) +- echo $"Usage: $0 {-s (status) |-r (reset) |-b (boot) |-S (shutdown) |-w (wait)}" +- exit 2 +-esac +- +-exit $? +diff --git a/Documentation/mic/mpssd/mpss b/Documentation/mic/mpssd/mpss +deleted file mode 100755 +index 09ea90931649..000000000000 +--- a/Documentation/mic/mpssd/mpss ++++ /dev/null +@@ -1,200 +0,0 @@ +-#!/bin/bash +-# Intel MIC Platform Software Stack (MPSS) +-# +-# Copyright(c) 2013 Intel Corporation. +-# +-# This program is free software; you can redistribute it and/or modify +-# it under the terms of the GNU General Public License, version 2, as +-# published by the Free Software Foundation. +-# +-# This program is distributed in the hope that it will be useful, but +-# WITHOUT ANY WARRANTY; without even the implied warranty of +-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +-# General Public License for more details. +-# +-# The full GNU General Public License is included in this distribution in +-# the file called "COPYING". +-# +-# Intel MIC User Space Tools. +-# +-# mpss Start mpssd. +-# +-# chkconfig: 2345 95 05 +-# description: start MPSS stack processing. +-# +-### BEGIN INIT INFO +-# Provides: mpss +-# Required-Start: +-# Required-Stop: +-# Short-Description: MPSS stack control +-# Description: MPSS stack control +-### END INIT INFO +- +-# Source function library. +-. /etc/init.d/functions +- +-exec=/usr/sbin/mpssd +-sysfs="/sys/class/mic" +-mic_modules="mic_host mic_x100_dma scif" +- +-start() +-{ +- [ -x $exec ] || exit 5 +- +- if [ "`ps -e | awk '{print $4}' | grep mpssd | head -1`" = "mpssd" ]; then +- echo -e $"MPSSD already running! " +- success +- echo +- return 0 +- fi +- +- echo -e $"Starting MPSS Stack" +- echo -e $"Loading MIC drivers:" $mic_modules +- +- modprobe -a $mic_modules +- RETVAL=$? +- if [ $RETVAL -ne 0 ]; then +- failure +- echo +- return $RETVAL +- fi +- +- # Start the daemon +- echo -n $"Starting MPSSD " +- $exec +- RETVAL=$? +- if [ $RETVAL -ne 0 ]; then +- failure +- echo +- return $RETVAL +- fi +- success +- echo +- +- sleep 5 +- +- # Boot the cards +- micctrl -b +- +- # Wait till ping works +- for f in $sysfs/* +- do +- count=100 +- ipaddr=`cat $f/cmdline` +- ipaddr=${ipaddr#*address,} +- ipaddr=`echo $ipaddr | cut -d, -f1 | cut -d\; -f1` +- while [ $count -ge 0 ] +- do +- echo -e "Pinging "`basename $f`" " +- ping -c 1 $ipaddr &> /dev/null +- RETVAL=$? +- if [ $RETVAL -eq 0 ]; then +- success +- break +- fi +- sleep 1 +- count=`expr $count - 1` +- done +- [ $RETVAL -ne 0 ] && failure || success +- echo +- done +- return $RETVAL +-} +- +-stop() +-{ +- echo -e $"Shutting down MPSS Stack: " +- +- # Bail out if module is unloaded +- if [ ! -d "$sysfs" ]; then +- echo -n $"Module unloaded " +- success +- echo +- return 0 +- fi +- +- # Shut down the cards. +- micctrl -S +- +- # Wait for the cards to go offline +- for f in $sysfs/* +- do +- while [ "`cat $f/state`" != "ready" ] +- do +- sleep 1 +- echo -e "Waiting for "`basename $f`" to become ready" +- done +- done +- +- # Display the status of the cards +- micctrl -s +- +- # Kill MPSSD now +- echo -n $"Killing MPSSD" +- killall -9 mpssd 2>/dev/null +- RETVAL=$? +- [ $RETVAL -ne 0 ] && failure || success +- echo +- return $RETVAL +-} +- +-restart() +-{ +- stop +- sleep 5 +- start +-} +- +-status() +-{ +- micctrl -s +- if [ "`ps -e | awk '{print $4}' | grep mpssd | head -n 1`" = "mpssd" ]; then +- echo "mpssd is running" +- else +- echo "mpssd is stopped" +- fi +- return 0 +-} +- +-unload() +-{ +- if [ ! -d "$sysfs" ]; then +- echo -n $"No MIC_HOST Module: " +- success +- echo +- return +- fi +- +- stop +- +- sleep 5 +- echo -n $"Removing MIC drivers:" $mic_modules +- modprobe -r $mic_modules +- RETVAL=$? +- [ $RETVAL -ne 0 ] && failure || success +- echo +- return $RETVAL +-} +- +-case $1 in +- start) +- start +- ;; +- stop) +- stop +- ;; +- restart) +- restart +- ;; +- status) +- status +- ;; +- unload) +- unload +- ;; +- *) +- echo $"Usage: $0 {start|stop|restart|status|unload}" +- exit 2 +-esac +- +-exit $? +diff --git a/Documentation/mic/mpssd/mpssd.c b/Documentation/mic/mpssd/mpssd.c +deleted file mode 100644 +index c99a75968c01..000000000000 +--- a/Documentation/mic/mpssd/mpssd.c ++++ /dev/null +@@ -1,1826 +0,0 @@ +-/* +- * Intel MIC Platform Software Stack (MPSS) +- * +- * Copyright(c) 2013 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License, version 2, as +- * published by the Free Software Foundation. +- * +- * This program is distributed in the hope that it will be useful, but +- * WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- * General Public License for more details. +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Intel MIC User Space Tools. +- */ +- +-#define _GNU_SOURCE +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include "mpssd.h" +-#include +-#include +-#include +- +-static void *init_mic(void *arg); +- +-static FILE *logfp; +-static struct mic_info mic_list; +- +-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +- +-#define min_t(type, x, y) ({ \ +- type __min1 = (x); \ +- type __min2 = (y); \ +- __min1 < __min2 ? __min1 : __min2; }) +- +-/* align addr on a size boundary - adjust address up/down if needed */ +-#define _ALIGN_DOWN(addr, size) ((addr)&(~((size)-1))) +-#define _ALIGN_UP(addr, size) _ALIGN_DOWN(addr + size - 1, size) +- +-/* align addr on a size boundary - adjust address up if needed */ +-#define _ALIGN(addr, size) _ALIGN_UP(addr, size) +- +-/* to align the pointer to the (next) page boundary */ +-#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE) +- +-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) +- +-#define GSO_ENABLED 1 +-#define MAX_GSO_SIZE (64 * 1024) +-#define ETH_H_LEN 14 +-#define MAX_NET_PKT_SIZE (_ALIGN_UP(MAX_GSO_SIZE + ETH_H_LEN, 64)) +-#define MIC_DEVICE_PAGE_END 0x1000 +- +-#ifndef VIRTIO_NET_HDR_F_DATA_VALID +-#define VIRTIO_NET_HDR_F_DATA_VALID 2 /* Csum is valid */ +-#endif +- +-static struct { +- struct mic_device_desc dd; +- struct mic_vqconfig vqconfig[2]; +- __u32 host_features, guest_acknowledgements; +- struct virtio_console_config cons_config; +-} virtcons_dev_page = { +- .dd = { +- .type = VIRTIO_ID_CONSOLE, +- .num_vq = ARRAY_SIZE(virtcons_dev_page.vqconfig), +- .feature_len = sizeof(virtcons_dev_page.host_features), +- .config_len = sizeof(virtcons_dev_page.cons_config), +- }, +- .vqconfig[0] = { +- .num = htole16(MIC_VRING_ENTRIES), +- }, +- .vqconfig[1] = { +- .num = htole16(MIC_VRING_ENTRIES), +- }, +-}; +- +-static struct { +- struct mic_device_desc dd; +- struct mic_vqconfig vqconfig[2]; +- __u32 host_features, guest_acknowledgements; +- struct virtio_net_config net_config; +-} virtnet_dev_page = { +- .dd = { +- .type = VIRTIO_ID_NET, +- .num_vq = ARRAY_SIZE(virtnet_dev_page.vqconfig), +- .feature_len = sizeof(virtnet_dev_page.host_features), +- .config_len = sizeof(virtnet_dev_page.net_config), +- }, +- .vqconfig[0] = { +- .num = htole16(MIC_VRING_ENTRIES), +- }, +- .vqconfig[1] = { +- .num = htole16(MIC_VRING_ENTRIES), +- }, +-#if GSO_ENABLED +- .host_features = htole32( +- 1 << VIRTIO_NET_F_CSUM | +- 1 << VIRTIO_NET_F_GSO | +- 1 << VIRTIO_NET_F_GUEST_TSO4 | +- 1 << VIRTIO_NET_F_GUEST_TSO6 | +- 1 << VIRTIO_NET_F_GUEST_ECN), +-#else +- .host_features = 0, +-#endif +-}; +- +-static const char *mic_config_dir = "/etc/mpss"; +-static const char *virtblk_backend = "VIRTBLK_BACKEND"; +-static struct { +- struct mic_device_desc dd; +- struct mic_vqconfig vqconfig[1]; +- __u32 host_features, guest_acknowledgements; +- struct virtio_blk_config blk_config; +-} virtblk_dev_page = { +- .dd = { +- .type = VIRTIO_ID_BLOCK, +- .num_vq = ARRAY_SIZE(virtblk_dev_page.vqconfig), +- .feature_len = sizeof(virtblk_dev_page.host_features), +- .config_len = sizeof(virtblk_dev_page.blk_config), +- }, +- .vqconfig[0] = { +- .num = htole16(MIC_VRING_ENTRIES), +- }, +- .host_features = +- htole32(1<name, strerror(errno)); +- return ret; +- } +- } +- if (pid < 0) { +- mpsslog("%s fork failed errno %s\n", +- mic->name, strerror(errno)); +- return ret; +- } +- +- ret = waitpid(pid, NULL, 0); +- if (ret < 0) { +- mpsslog("%s waitpid failed errno %s\n", +- mic->name, strerror(errno)); +- return ret; +- } +- +- snprintf(ipaddr, IFNAMSIZ, "172.31.%d.254/24", mic->id + 1); +- +- pid = fork(); +- if (pid == 0) { +- ifargv[0] = "ip"; +- ifargv[1] = "addr"; +- ifargv[2] = "add"; +- ifargv[3] = ipaddr; +- ifargv[4] = "dev"; +- ifargv[5] = dev; +- ifargv[6] = NULL; +- mpsslog("Configuring %s ipaddr %s\n", dev, ipaddr); +- ret = execvp("ip", ifargv); +- if (ret < 0) { +- mpsslog("%s execvp failed errno %s\n", +- mic->name, strerror(errno)); +- return ret; +- } +- } +- if (pid < 0) { +- mpsslog("%s fork failed errno %s\n", +- mic->name, strerror(errno)); +- return ret; +- } +- +- ret = waitpid(pid, NULL, 0); +- if (ret < 0) { +- mpsslog("%s waitpid failed errno %s\n", +- mic->name, strerror(errno)); +- return ret; +- } +- mpsslog("MIC name %s %s %d DONE!\n", +- mic->name, __func__, __LINE__); +- return 0; +-} +- +-static int tun_alloc(struct mic_info *mic, char *dev) +-{ +- struct ifreq ifr; +- int fd, err; +-#if GSO_ENABLED +- unsigned offload; +-#endif +- fd = open("/dev/net/tun", O_RDWR); +- if (fd < 0) { +- mpsslog("Could not open /dev/net/tun %s\n", strerror(errno)); +- goto done; +- } +- +- memset(&ifr, 0, sizeof(ifr)); +- +- ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR; +- if (*dev) +- strncpy(ifr.ifr_name, dev, IFNAMSIZ); +- +- err = ioctl(fd, TUNSETIFF, (void *)&ifr); +- if (err < 0) { +- mpsslog("%s %s %d TUNSETIFF failed %s\n", +- mic->name, __func__, __LINE__, strerror(errno)); +- close(fd); +- return err; +- } +-#if GSO_ENABLED +- offload = TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | TUN_F_TSO_ECN; +- +- err = ioctl(fd, TUNSETOFFLOAD, offload); +- if (err < 0) { +- mpsslog("%s %s %d TUNSETOFFLOAD failed %s\n", +- mic->name, __func__, __LINE__, strerror(errno)); +- close(fd); +- return err; +- } +-#endif +- strcpy(dev, ifr.ifr_name); +- mpsslog("Created TAP %s\n", dev); +-done: +- return fd; +-} +- +-#define NET_FD_VIRTIO_NET 0 +-#define NET_FD_TUN 1 +-#define MAX_NET_FD 2 +- +-static void set_dp(struct mic_info *mic, int type, void *dp) +-{ +- switch (type) { +- case VIRTIO_ID_CONSOLE: +- mic->mic_console.console_dp = dp; +- return; +- case VIRTIO_ID_NET: +- mic->mic_net.net_dp = dp; +- return; +- case VIRTIO_ID_BLOCK: +- mic->mic_virtblk.block_dp = dp; +- return; +- } +- mpsslog("%s %s %d not found\n", mic->name, __func__, type); +- assert(0); +-} +- +-static void *get_dp(struct mic_info *mic, int type) +-{ +- switch (type) { +- case VIRTIO_ID_CONSOLE: +- return mic->mic_console.console_dp; +- case VIRTIO_ID_NET: +- return mic->mic_net.net_dp; +- case VIRTIO_ID_BLOCK: +- return mic->mic_virtblk.block_dp; +- } +- mpsslog("%s %s %d not found\n", mic->name, __func__, type); +- assert(0); +- return NULL; +-} +- +-static struct mic_device_desc *get_device_desc(struct mic_info *mic, int type) +-{ +- struct mic_device_desc *d; +- int i; +- void *dp = get_dp(mic, type); +- +- for (i = sizeof(struct mic_bootparam); i < PAGE_SIZE; +- i += mic_total_desc_size(d)) { +- d = dp + i; +- +- /* End of list */ +- if (d->type == 0) +- break; +- +- if (d->type == -1) +- continue; +- +- mpsslog("%s %s d-> type %d d %p\n", +- mic->name, __func__, d->type, d); +- +- if (d->type == (__u8)type) +- return d; +- } +- mpsslog("%s %s %d not found\n", mic->name, __func__, type); +- return NULL; +-} +- +-/* See comments in vhost.c for explanation of next_desc() */ +-static unsigned next_desc(struct vring_desc *desc) +-{ +- unsigned int next; +- +- if (!(le16toh(desc->flags) & VRING_DESC_F_NEXT)) +- return -1U; +- next = le16toh(desc->next); +- return next; +-} +- +-/* Sum up all the IOVEC length */ +-static ssize_t +-sum_iovec_len(struct mic_copy_desc *copy) +-{ +- ssize_t sum = 0; +- int i; +- +- for (i = 0; i < copy->iovcnt; i++) +- sum += copy->iov[i].iov_len; +- return sum; +-} +- +-static inline void verify_out_len(struct mic_info *mic, +- struct mic_copy_desc *copy) +-{ +- if (copy->out_len != sum_iovec_len(copy)) { +- mpsslog("%s %s %d BUG copy->out_len 0x%x len 0x%zx\n", +- mic->name, __func__, __LINE__, +- copy->out_len, sum_iovec_len(copy)); +- assert(copy->out_len == sum_iovec_len(copy)); +- } +-} +- +-/* Display an iovec */ +-static void +-disp_iovec(struct mic_info *mic, struct mic_copy_desc *copy, +- const char *s, int line) +-{ +- int i; +- +- for (i = 0; i < copy->iovcnt; i++) +- mpsslog("%s %s %d copy->iov[%d] addr %p len 0x%zx\n", +- mic->name, s, line, i, +- copy->iov[i].iov_base, copy->iov[i].iov_len); +-} +- +-static inline __u16 read_avail_idx(struct mic_vring *vr) +-{ +- return ACCESS_ONCE(vr->info->avail_idx); +-} +- +-static inline void txrx_prepare(int type, bool tx, struct mic_vring *vr, +- struct mic_copy_desc *copy, ssize_t len) +-{ +- copy->vr_idx = tx ? 0 : 1; +- copy->update_used = true; +- if (type == VIRTIO_ID_NET) +- copy->iov[1].iov_len = len - sizeof(struct virtio_net_hdr); +- else +- copy->iov[0].iov_len = len; +-} +- +-/* Central API which triggers the copies */ +-static int +-mic_virtio_copy(struct mic_info *mic, int fd, +- struct mic_vring *vr, struct mic_copy_desc *copy) +-{ +- int ret; +- +- ret = ioctl(fd, MIC_VIRTIO_COPY_DESC, copy); +- if (ret) { +- mpsslog("%s %s %d errno %s ret %d\n", +- mic->name, __func__, __LINE__, +- strerror(errno), ret); +- } +- return ret; +-} +- +-static inline unsigned _vring_size(unsigned int num, unsigned long align) +-{ +- return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num) +- + align - 1) & ~(align - 1)) +- + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num; +-} +- +-/* +- * This initialization routine requires at least one +- * vring i.e. vr0. vr1 is optional. +- */ +-static void * +-init_vr(struct mic_info *mic, int fd, int type, +- struct mic_vring *vr0, struct mic_vring *vr1, int num_vq) +-{ +- int vr_size; +- char *va; +- +- vr_size = PAGE_ALIGN(_vring_size(MIC_VRING_ENTRIES, +- MIC_VIRTIO_RING_ALIGN) + +- sizeof(struct _mic_vring_info)); +- va = mmap(NULL, MIC_DEVICE_PAGE_END + vr_size * num_vq, +- PROT_READ, MAP_SHARED, fd, 0); +- if (MAP_FAILED == va) { +- mpsslog("%s %s %d mmap failed errno %s\n", +- mic->name, __func__, __LINE__, +- strerror(errno)); +- goto done; +- } +- set_dp(mic, type, va); +- vr0->va = (struct mic_vring *)&va[MIC_DEVICE_PAGE_END]; +- vr0->info = vr0->va + +- _vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN); +- vring_init(&vr0->vr, +- MIC_VRING_ENTRIES, vr0->va, MIC_VIRTIO_RING_ALIGN); +- mpsslog("%s %s vr0 %p vr0->info %p vr_size 0x%x vring 0x%x ", +- __func__, mic->name, vr0->va, vr0->info, vr_size, +- _vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN)); +- mpsslog("magic 0x%x expected 0x%x\n", +- le32toh(vr0->info->magic), MIC_MAGIC + type); +- assert(le32toh(vr0->info->magic) == MIC_MAGIC + type); +- if (vr1) { +- vr1->va = (struct mic_vring *) +- &va[MIC_DEVICE_PAGE_END + vr_size]; +- vr1->info = vr1->va + _vring_size(MIC_VRING_ENTRIES, +- MIC_VIRTIO_RING_ALIGN); +- vring_init(&vr1->vr, +- MIC_VRING_ENTRIES, vr1->va, MIC_VIRTIO_RING_ALIGN); +- mpsslog("%s %s vr1 %p vr1->info %p vr_size 0x%x vring 0x%x ", +- __func__, mic->name, vr1->va, vr1->info, vr_size, +- _vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN)); +- mpsslog("magic 0x%x expected 0x%x\n", +- le32toh(vr1->info->magic), MIC_MAGIC + type + 1); +- assert(le32toh(vr1->info->magic) == MIC_MAGIC + type + 1); +- } +-done: +- return va; +-} +- +-static int +-wait_for_card_driver(struct mic_info *mic, int fd, int type) +-{ +- struct pollfd pollfd; +- int err; +- struct mic_device_desc *desc = get_device_desc(mic, type); +- __u8 prev_status; +- +- if (!desc) +- return -ENODEV; +- prev_status = desc->status; +- pollfd.fd = fd; +- mpsslog("%s %s Waiting .... desc-> type %d status 0x%x\n", +- mic->name, __func__, type, desc->status); +- +- while (1) { +- pollfd.events = POLLIN; +- pollfd.revents = 0; +- err = poll(&pollfd, 1, -1); +- if (err < 0) { +- mpsslog("%s %s poll failed %s\n", +- mic->name, __func__, strerror(errno)); +- continue; +- } +- +- if (pollfd.revents) { +- if (desc->status != prev_status) { +- mpsslog("%s %s Waiting... desc-> type %d " +- "status 0x%x\n", +- mic->name, __func__, type, +- desc->status); +- prev_status = desc->status; +- } +- if (desc->status & VIRTIO_CONFIG_S_DRIVER_OK) { +- mpsslog("%s %s poll.revents %d\n", +- mic->name, __func__, pollfd.revents); +- mpsslog("%s %s desc-> type %d status 0x%x\n", +- mic->name, __func__, type, +- desc->status); +- break; +- } +- } +- } +- return 0; +-} +- +-/* Spin till we have some descriptors */ +-static void +-spin_for_descriptors(struct mic_info *mic, struct mic_vring *vr) +-{ +- __u16 avail_idx = read_avail_idx(vr); +- +- while (avail_idx == le16toh(ACCESS_ONCE(vr->vr.avail->idx))) { +-#ifdef DEBUG +- mpsslog("%s %s waiting for desc avail %d info_avail %d\n", +- mic->name, __func__, +- le16toh(vr->vr.avail->idx), vr->info->avail_idx); +-#endif +- sched_yield(); +- } +-} +- +-static void * +-virtio_net(void *arg) +-{ +- static __u8 vnet_hdr[2][sizeof(struct virtio_net_hdr)]; +- static __u8 vnet_buf[2][MAX_NET_PKT_SIZE] __attribute__ ((aligned(64))); +- struct iovec vnet_iov[2][2] = { +- { { .iov_base = vnet_hdr[0], .iov_len = sizeof(vnet_hdr[0]) }, +- { .iov_base = vnet_buf[0], .iov_len = sizeof(vnet_buf[0]) } }, +- { { .iov_base = vnet_hdr[1], .iov_len = sizeof(vnet_hdr[1]) }, +- { .iov_base = vnet_buf[1], .iov_len = sizeof(vnet_buf[1]) } }, +- }; +- struct iovec *iov0 = vnet_iov[0], *iov1 = vnet_iov[1]; +- struct mic_info *mic = (struct mic_info *)arg; +- char if_name[IFNAMSIZ]; +- struct pollfd net_poll[MAX_NET_FD]; +- struct mic_vring tx_vr, rx_vr; +- struct mic_copy_desc copy; +- struct mic_device_desc *desc; +- int err; +- +- snprintf(if_name, IFNAMSIZ, "mic%d", mic->id); +- mic->mic_net.tap_fd = tun_alloc(mic, if_name); +- if (mic->mic_net.tap_fd < 0) +- goto done; +- +- if (tap_configure(mic, if_name)) +- goto done; +- mpsslog("MIC name %s id %d\n", mic->name, mic->id); +- +- net_poll[NET_FD_VIRTIO_NET].fd = mic->mic_net.virtio_net_fd; +- net_poll[NET_FD_VIRTIO_NET].events = POLLIN; +- net_poll[NET_FD_TUN].fd = mic->mic_net.tap_fd; +- net_poll[NET_FD_TUN].events = POLLIN; +- +- if (MAP_FAILED == init_vr(mic, mic->mic_net.virtio_net_fd, +- VIRTIO_ID_NET, &tx_vr, &rx_vr, +- virtnet_dev_page.dd.num_vq)) { +- mpsslog("%s init_vr failed %s\n", +- mic->name, strerror(errno)); +- goto done; +- } +- +- copy.iovcnt = 2; +- desc = get_device_desc(mic, VIRTIO_ID_NET); +- +- while (1) { +- ssize_t len; +- +- net_poll[NET_FD_VIRTIO_NET].revents = 0; +- net_poll[NET_FD_TUN].revents = 0; +- +- /* Start polling for data from tap and virtio net */ +- err = poll(net_poll, 2, -1); +- if (err < 0) { +- mpsslog("%s poll failed %s\n", +- __func__, strerror(errno)); +- continue; +- } +- if (!(desc->status & VIRTIO_CONFIG_S_DRIVER_OK)) { +- err = wait_for_card_driver(mic, +- mic->mic_net.virtio_net_fd, +- VIRTIO_ID_NET); +- if (err) { +- mpsslog("%s %s %d Exiting...\n", +- mic->name, __func__, __LINE__); +- break; +- } +- } +- /* +- * Check if there is data to be read from TUN and write to +- * virtio net fd if there is. +- */ +- if (net_poll[NET_FD_TUN].revents & POLLIN) { +- copy.iov = iov0; +- len = readv(net_poll[NET_FD_TUN].fd, +- copy.iov, copy.iovcnt); +- if (len > 0) { +- struct virtio_net_hdr *hdr +- = (struct virtio_net_hdr *)vnet_hdr[0]; +- +- /* Disable checksums on the card since we are on +- a reliable PCIe link */ +- hdr->flags |= VIRTIO_NET_HDR_F_DATA_VALID; +-#ifdef DEBUG +- mpsslog("%s %s %d hdr->flags 0x%x ", mic->name, +- __func__, __LINE__, hdr->flags); +- mpsslog("copy.out_len %d hdr->gso_type 0x%x\n", +- copy.out_len, hdr->gso_type); +-#endif +-#ifdef DEBUG +- disp_iovec(mic, copy, __func__, __LINE__); +- mpsslog("%s %s %d read from tap 0x%lx\n", +- mic->name, __func__, __LINE__, +- len); +-#endif +- spin_for_descriptors(mic, &tx_vr); +- txrx_prepare(VIRTIO_ID_NET, 1, &tx_vr, ©, +- len); +- +- err = mic_virtio_copy(mic, +- mic->mic_net.virtio_net_fd, &tx_vr, +- ©); +- if (err < 0) { +- mpsslog("%s %s %d mic_virtio_copy %s\n", +- mic->name, __func__, __LINE__, +- strerror(errno)); +- } +- if (!err) +- verify_out_len(mic, ©); +-#ifdef DEBUG +- disp_iovec(mic, copy, __func__, __LINE__); +- mpsslog("%s %s %d wrote to net 0x%lx\n", +- mic->name, __func__, __LINE__, +- sum_iovec_len(©)); +-#endif +- /* Reinitialize IOV for next run */ +- iov0[1].iov_len = MAX_NET_PKT_SIZE; +- } else if (len < 0) { +- disp_iovec(mic, ©, __func__, __LINE__); +- mpsslog("%s %s %d read failed %s ", mic->name, +- __func__, __LINE__, strerror(errno)); +- mpsslog("cnt %d sum %zd\n", +- copy.iovcnt, sum_iovec_len(©)); +- } +- } +- +- /* +- * Check if there is data to be read from virtio net and +- * write to TUN if there is. +- */ +- if (net_poll[NET_FD_VIRTIO_NET].revents & POLLIN) { +- while (rx_vr.info->avail_idx != +- le16toh(rx_vr.vr.avail->idx)) { +- copy.iov = iov1; +- txrx_prepare(VIRTIO_ID_NET, 0, &rx_vr, ©, +- MAX_NET_PKT_SIZE +- + sizeof(struct virtio_net_hdr)); +- +- err = mic_virtio_copy(mic, +- mic->mic_net.virtio_net_fd, &rx_vr, +- ©); +- if (!err) { +-#ifdef DEBUG +- struct virtio_net_hdr *hdr +- = (struct virtio_net_hdr *) +- vnet_hdr[1]; +- +- mpsslog("%s %s %d hdr->flags 0x%x, ", +- mic->name, __func__, __LINE__, +- hdr->flags); +- mpsslog("out_len %d gso_type 0x%x\n", +- copy.out_len, +- hdr->gso_type); +-#endif +- /* Set the correct output iov_len */ +- iov1[1].iov_len = copy.out_len - +- sizeof(struct virtio_net_hdr); +- verify_out_len(mic, ©); +-#ifdef DEBUG +- disp_iovec(mic, copy, __func__, +- __LINE__); +- mpsslog("%s %s %d ", +- mic->name, __func__, __LINE__); +- mpsslog("read from net 0x%lx\n", +- sum_iovec_len(copy)); +-#endif +- len = writev(net_poll[NET_FD_TUN].fd, +- copy.iov, copy.iovcnt); +- if (len != sum_iovec_len(©)) { +- mpsslog("Tun write failed %s ", +- strerror(errno)); +- mpsslog("len 0x%zx ", len); +- mpsslog("read_len 0x%zx\n", +- sum_iovec_len(©)); +- } else { +-#ifdef DEBUG +- disp_iovec(mic, ©, __func__, +- __LINE__); +- mpsslog("%s %s %d ", +- mic->name, __func__, +- __LINE__); +- mpsslog("wrote to tap 0x%lx\n", +- len); +-#endif +- } +- } else { +- mpsslog("%s %s %d mic_virtio_copy %s\n", +- mic->name, __func__, __LINE__, +- strerror(errno)); +- break; +- } +- } +- } +- if (net_poll[NET_FD_VIRTIO_NET].revents & POLLERR) +- mpsslog("%s: %s: POLLERR\n", __func__, mic->name); +- } +-done: +- pthread_exit(NULL); +-} +- +-/* virtio_console */ +-#define VIRTIO_CONSOLE_FD 0 +-#define MONITOR_FD (VIRTIO_CONSOLE_FD + 1) +-#define MAX_CONSOLE_FD (MONITOR_FD + 1) /* must be the last one + 1 */ +-#define MAX_BUFFER_SIZE PAGE_SIZE +- +-static void * +-virtio_console(void *arg) +-{ +- static __u8 vcons_buf[2][PAGE_SIZE]; +- struct iovec vcons_iov[2] = { +- { .iov_base = vcons_buf[0], .iov_len = sizeof(vcons_buf[0]) }, +- { .iov_base = vcons_buf[1], .iov_len = sizeof(vcons_buf[1]) }, +- }; +- struct iovec *iov0 = &vcons_iov[0], *iov1 = &vcons_iov[1]; +- struct mic_info *mic = (struct mic_info *)arg; +- int err; +- struct pollfd console_poll[MAX_CONSOLE_FD]; +- int pty_fd; +- char *pts_name; +- ssize_t len; +- struct mic_vring tx_vr, rx_vr; +- struct mic_copy_desc copy; +- struct mic_device_desc *desc; +- +- pty_fd = posix_openpt(O_RDWR); +- if (pty_fd < 0) { +- mpsslog("can't open a pseudoterminal master device: %s\n", +- strerror(errno)); +- goto _return; +- } +- pts_name = ptsname(pty_fd); +- if (pts_name == NULL) { +- mpsslog("can't get pts name\n"); +- goto _close_pty; +- } +- printf("%s console message goes to %s\n", mic->name, pts_name); +- mpsslog("%s console message goes to %s\n", mic->name, pts_name); +- err = grantpt(pty_fd); +- if (err < 0) { +- mpsslog("can't grant access: %s %s\n", +- pts_name, strerror(errno)); +- goto _close_pty; +- } +- err = unlockpt(pty_fd); +- if (err < 0) { +- mpsslog("can't unlock a pseudoterminal: %s %s\n", +- pts_name, strerror(errno)); +- goto _close_pty; +- } +- console_poll[MONITOR_FD].fd = pty_fd; +- console_poll[MONITOR_FD].events = POLLIN; +- +- console_poll[VIRTIO_CONSOLE_FD].fd = mic->mic_console.virtio_console_fd; +- console_poll[VIRTIO_CONSOLE_FD].events = POLLIN; +- +- if (MAP_FAILED == init_vr(mic, mic->mic_console.virtio_console_fd, +- VIRTIO_ID_CONSOLE, &tx_vr, &rx_vr, +- virtcons_dev_page.dd.num_vq)) { +- mpsslog("%s init_vr failed %s\n", +- mic->name, strerror(errno)); +- goto _close_pty; +- } +- +- copy.iovcnt = 1; +- desc = get_device_desc(mic, VIRTIO_ID_CONSOLE); +- +- for (;;) { +- console_poll[MONITOR_FD].revents = 0; +- console_poll[VIRTIO_CONSOLE_FD].revents = 0; +- err = poll(console_poll, MAX_CONSOLE_FD, -1); +- if (err < 0) { +- mpsslog("%s %d: poll failed: %s\n", __func__, __LINE__, +- strerror(errno)); +- continue; +- } +- if (!(desc->status & VIRTIO_CONFIG_S_DRIVER_OK)) { +- err = wait_for_card_driver(mic, +- mic->mic_console.virtio_console_fd, +- VIRTIO_ID_CONSOLE); +- if (err) { +- mpsslog("%s %s %d Exiting...\n", +- mic->name, __func__, __LINE__); +- break; +- } +- } +- +- if (console_poll[MONITOR_FD].revents & POLLIN) { +- copy.iov = iov0; +- len = readv(pty_fd, copy.iov, copy.iovcnt); +- if (len > 0) { +-#ifdef DEBUG +- disp_iovec(mic, copy, __func__, __LINE__); +- mpsslog("%s %s %d read from tap 0x%lx\n", +- mic->name, __func__, __LINE__, +- len); +-#endif +- spin_for_descriptors(mic, &tx_vr); +- txrx_prepare(VIRTIO_ID_CONSOLE, 1, &tx_vr, +- ©, len); +- +- err = mic_virtio_copy(mic, +- mic->mic_console.virtio_console_fd, +- &tx_vr, ©); +- if (err < 0) { +- mpsslog("%s %s %d mic_virtio_copy %s\n", +- mic->name, __func__, __LINE__, +- strerror(errno)); +- } +- if (!err) +- verify_out_len(mic, ©); +-#ifdef DEBUG +- disp_iovec(mic, copy, __func__, __LINE__); +- mpsslog("%s %s %d wrote to net 0x%lx\n", +- mic->name, __func__, __LINE__, +- sum_iovec_len(copy)); +-#endif +- /* Reinitialize IOV for next run */ +- iov0->iov_len = PAGE_SIZE; +- } else if (len < 0) { +- disp_iovec(mic, ©, __func__, __LINE__); +- mpsslog("%s %s %d read failed %s ", +- mic->name, __func__, __LINE__, +- strerror(errno)); +- mpsslog("cnt %d sum %zd\n", +- copy.iovcnt, sum_iovec_len(©)); +- } +- } +- +- if (console_poll[VIRTIO_CONSOLE_FD].revents & POLLIN) { +- while (rx_vr.info->avail_idx != +- le16toh(rx_vr.vr.avail->idx)) { +- copy.iov = iov1; +- txrx_prepare(VIRTIO_ID_CONSOLE, 0, &rx_vr, +- ©, PAGE_SIZE); +- +- err = mic_virtio_copy(mic, +- mic->mic_console.virtio_console_fd, +- &rx_vr, ©); +- if (!err) { +- /* Set the correct output iov_len */ +- iov1->iov_len = copy.out_len; +- verify_out_len(mic, ©); +-#ifdef DEBUG +- disp_iovec(mic, copy, __func__, +- __LINE__); +- mpsslog("%s %s %d ", +- mic->name, __func__, __LINE__); +- mpsslog("read from net 0x%lx\n", +- sum_iovec_len(copy)); +-#endif +- len = writev(pty_fd, +- copy.iov, copy.iovcnt); +- if (len != sum_iovec_len(©)) { +- mpsslog("Tun write failed %s ", +- strerror(errno)); +- mpsslog("len 0x%zx ", len); +- mpsslog("read_len 0x%zx\n", +- sum_iovec_len(©)); +- } else { +-#ifdef DEBUG +- disp_iovec(mic, copy, __func__, +- __LINE__); +- mpsslog("%s %s %d ", +- mic->name, __func__, +- __LINE__); +- mpsslog("wrote to tap 0x%lx\n", +- len); +-#endif +- } +- } else { +- mpsslog("%s %s %d mic_virtio_copy %s\n", +- mic->name, __func__, __LINE__, +- strerror(errno)); +- break; +- } +- } +- } +- if (console_poll[NET_FD_VIRTIO_NET].revents & POLLERR) +- mpsslog("%s: %s: POLLERR\n", __func__, mic->name); +- } +-_close_pty: +- close(pty_fd); +-_return: +- pthread_exit(NULL); +-} +- +-static void +-add_virtio_device(struct mic_info *mic, struct mic_device_desc *dd) +-{ +- char path[PATH_MAX]; +- int fd, err; +- +- snprintf(path, PATH_MAX, "/dev/mic%d", mic->id); +- fd = open(path, O_RDWR); +- if (fd < 0) { +- mpsslog("Could not open %s %s\n", path, strerror(errno)); +- return; +- } +- +- err = ioctl(fd, MIC_VIRTIO_ADD_DEVICE, dd); +- if (err < 0) { +- mpsslog("Could not add %d %s\n", dd->type, strerror(errno)); +- close(fd); +- return; +- } +- switch (dd->type) { +- case VIRTIO_ID_NET: +- mic->mic_net.virtio_net_fd = fd; +- mpsslog("Added VIRTIO_ID_NET for %s\n", mic->name); +- break; +- case VIRTIO_ID_CONSOLE: +- mic->mic_console.virtio_console_fd = fd; +- mpsslog("Added VIRTIO_ID_CONSOLE for %s\n", mic->name); +- break; +- case VIRTIO_ID_BLOCK: +- mic->mic_virtblk.virtio_block_fd = fd; +- mpsslog("Added VIRTIO_ID_BLOCK for %s\n", mic->name); +- break; +- } +-} +- +-static bool +-set_backend_file(struct mic_info *mic) +-{ +- FILE *config; +- char buff[PATH_MAX], *line, *evv, *p; +- +- snprintf(buff, PATH_MAX, "%s/mpssd%03d.conf", mic_config_dir, mic->id); +- config = fopen(buff, "r"); +- if (config == NULL) +- return false; +- do { /* look for "virtblk_backend=XXXX" */ +- line = fgets(buff, PATH_MAX, config); +- if (line == NULL) +- break; +- if (*line == '#') +- continue; +- p = strchr(line, '\n'); +- if (p) +- *p = '\0'; +- } while (strncmp(line, virtblk_backend, strlen(virtblk_backend)) != 0); +- fclose(config); +- if (line == NULL) +- return false; +- evv = strchr(line, '='); +- if (evv == NULL) +- return false; +- mic->mic_virtblk.backend_file = malloc(strlen(evv) + 1); +- if (mic->mic_virtblk.backend_file == NULL) { +- mpsslog("%s %d can't allocate memory\n", mic->name, mic->id); +- return false; +- } +- strcpy(mic->mic_virtblk.backend_file, evv + 1); +- return true; +-} +- +-#define SECTOR_SIZE 512 +-static bool +-set_backend_size(struct mic_info *mic) +-{ +- mic->mic_virtblk.backend_size = lseek(mic->mic_virtblk.backend, 0, +- SEEK_END); +- if (mic->mic_virtblk.backend_size < 0) { +- mpsslog("%s: can't seek: %s\n", +- mic->name, mic->mic_virtblk.backend_file); +- return false; +- } +- virtblk_dev_page.blk_config.capacity = +- mic->mic_virtblk.backend_size / SECTOR_SIZE; +- if ((mic->mic_virtblk.backend_size % SECTOR_SIZE) != 0) +- virtblk_dev_page.blk_config.capacity++; +- +- virtblk_dev_page.blk_config.capacity = +- htole64(virtblk_dev_page.blk_config.capacity); +- +- return true; +-} +- +-static bool +-open_backend(struct mic_info *mic) +-{ +- if (!set_backend_file(mic)) +- goto _error_exit; +- mic->mic_virtblk.backend = open(mic->mic_virtblk.backend_file, O_RDWR); +- if (mic->mic_virtblk.backend < 0) { +- mpsslog("%s: can't open: %s\n", mic->name, +- mic->mic_virtblk.backend_file); +- goto _error_free; +- } +- if (!set_backend_size(mic)) +- goto _error_close; +- mic->mic_virtblk.backend_addr = mmap(NULL, +- mic->mic_virtblk.backend_size, +- PROT_READ|PROT_WRITE, MAP_SHARED, +- mic->mic_virtblk.backend, 0L); +- if (mic->mic_virtblk.backend_addr == MAP_FAILED) { +- mpsslog("%s: can't map: %s %s\n", +- mic->name, mic->mic_virtblk.backend_file, +- strerror(errno)); +- goto _error_close; +- } +- return true; +- +- _error_close: +- close(mic->mic_virtblk.backend); +- _error_free: +- free(mic->mic_virtblk.backend_file); +- _error_exit: +- return false; +-} +- +-static void +-close_backend(struct mic_info *mic) +-{ +- munmap(mic->mic_virtblk.backend_addr, mic->mic_virtblk.backend_size); +- close(mic->mic_virtblk.backend); +- free(mic->mic_virtblk.backend_file); +-} +- +-static bool +-start_virtblk(struct mic_info *mic, struct mic_vring *vring) +-{ +- if (((unsigned long)&virtblk_dev_page.blk_config % 8) != 0) { +- mpsslog("%s: blk_config is not 8 byte aligned.\n", +- mic->name); +- return false; +- } +- add_virtio_device(mic, &virtblk_dev_page.dd); +- if (MAP_FAILED == init_vr(mic, mic->mic_virtblk.virtio_block_fd, +- VIRTIO_ID_BLOCK, vring, NULL, +- virtblk_dev_page.dd.num_vq)) { +- mpsslog("%s init_vr failed %s\n", +- mic->name, strerror(errno)); +- return false; +- } +- return true; +-} +- +-static void +-stop_virtblk(struct mic_info *mic) +-{ +- int vr_size, ret; +- +- vr_size = PAGE_ALIGN(_vring_size(MIC_VRING_ENTRIES, +- MIC_VIRTIO_RING_ALIGN) + +- sizeof(struct _mic_vring_info)); +- ret = munmap(mic->mic_virtblk.block_dp, +- MIC_DEVICE_PAGE_END + vr_size * virtblk_dev_page.dd.num_vq); +- if (ret < 0) +- mpsslog("%s munmap errno %d\n", mic->name, errno); +- close(mic->mic_virtblk.virtio_block_fd); +-} +- +-static __u8 +-header_error_check(struct vring_desc *desc) +-{ +- if (le32toh(desc->len) != sizeof(struct virtio_blk_outhdr)) { +- mpsslog("%s() %d: length is not sizeof(virtio_blk_outhd)\n", +- __func__, __LINE__); +- return -EIO; +- } +- if (!(le16toh(desc->flags) & VRING_DESC_F_NEXT)) { +- mpsslog("%s() %d: alone\n", +- __func__, __LINE__); +- return -EIO; +- } +- if (le16toh(desc->flags) & VRING_DESC_F_WRITE) { +- mpsslog("%s() %d: not read\n", +- __func__, __LINE__); +- return -EIO; +- } +- return 0; +-} +- +-static int +-read_header(int fd, struct virtio_blk_outhdr *hdr, __u32 desc_idx) +-{ +- struct iovec iovec; +- struct mic_copy_desc copy; +- +- iovec.iov_len = sizeof(*hdr); +- iovec.iov_base = hdr; +- copy.iov = &iovec; +- copy.iovcnt = 1; +- copy.vr_idx = 0; /* only one vring on virtio_block */ +- copy.update_used = false; /* do not update used index */ +- return ioctl(fd, MIC_VIRTIO_COPY_DESC, ©); +-} +- +-static int +-transfer_blocks(int fd, struct iovec *iovec, __u32 iovcnt) +-{ +- struct mic_copy_desc copy; +- +- copy.iov = iovec; +- copy.iovcnt = iovcnt; +- copy.vr_idx = 0; /* only one vring on virtio_block */ +- copy.update_used = false; /* do not update used index */ +- return ioctl(fd, MIC_VIRTIO_COPY_DESC, ©); +-} +- +-static __u8 +-status_error_check(struct vring_desc *desc) +-{ +- if (le32toh(desc->len) != sizeof(__u8)) { +- mpsslog("%s() %d: length is not sizeof(status)\n", +- __func__, __LINE__); +- return -EIO; +- } +- return 0; +-} +- +-static int +-write_status(int fd, __u8 *status) +-{ +- struct iovec iovec; +- struct mic_copy_desc copy; +- +- iovec.iov_base = status; +- iovec.iov_len = sizeof(*status); +- copy.iov = &iovec; +- copy.iovcnt = 1; +- copy.vr_idx = 0; /* only one vring on virtio_block */ +- copy.update_used = true; /* Update used index */ +- return ioctl(fd, MIC_VIRTIO_COPY_DESC, ©); +-} +- +-#ifndef VIRTIO_BLK_T_GET_ID +-#define VIRTIO_BLK_T_GET_ID 8 +-#endif +- +-static void * +-virtio_block(void *arg) +-{ +- struct mic_info *mic = (struct mic_info *)arg; +- int ret; +- struct pollfd block_poll; +- struct mic_vring vring; +- __u16 avail_idx; +- __u32 desc_idx; +- struct vring_desc *desc; +- struct iovec *iovec, *piov; +- __u8 status; +- __u32 buffer_desc_idx; +- struct virtio_blk_outhdr hdr; +- void *fos; +- +- for (;;) { /* forever */ +- if (!open_backend(mic)) { /* No virtblk */ +- for (mic->mic_virtblk.signaled = 0; +- !mic->mic_virtblk.signaled;) +- sleep(1); +- continue; +- } +- +- /* backend file is specified. */ +- if (!start_virtblk(mic, &vring)) +- goto _close_backend; +- iovec = malloc(sizeof(*iovec) * +- le32toh(virtblk_dev_page.blk_config.seg_max)); +- if (!iovec) { +- mpsslog("%s: can't alloc iovec: %s\n", +- mic->name, strerror(ENOMEM)); +- goto _stop_virtblk; +- } +- +- block_poll.fd = mic->mic_virtblk.virtio_block_fd; +- block_poll.events = POLLIN; +- for (mic->mic_virtblk.signaled = 0; +- !mic->mic_virtblk.signaled;) { +- block_poll.revents = 0; +- /* timeout in 1 sec to see signaled */ +- ret = poll(&block_poll, 1, 1000); +- if (ret < 0) { +- mpsslog("%s %d: poll failed: %s\n", +- __func__, __LINE__, +- strerror(errno)); +- continue; +- } +- +- if (!(block_poll.revents & POLLIN)) { +-#ifdef DEBUG +- mpsslog("%s %d: block_poll.revents=0x%x\n", +- __func__, __LINE__, block_poll.revents); +-#endif +- continue; +- } +- +- /* POLLIN */ +- while (vring.info->avail_idx != +- le16toh(vring.vr.avail->idx)) { +- /* read header element */ +- avail_idx = +- vring.info->avail_idx & +- (vring.vr.num - 1); +- desc_idx = le16toh( +- vring.vr.avail->ring[avail_idx]); +- desc = &vring.vr.desc[desc_idx]; +-#ifdef DEBUG +- mpsslog("%s() %d: avail_idx=%d ", +- __func__, __LINE__, +- vring.info->avail_idx); +- mpsslog("vring.vr.num=%d desc=%p\n", +- vring.vr.num, desc); +-#endif +- status = header_error_check(desc); +- ret = read_header( +- mic->mic_virtblk.virtio_block_fd, +- &hdr, desc_idx); +- if (ret < 0) { +- mpsslog("%s() %d %s: ret=%d %s\n", +- __func__, __LINE__, +- mic->name, ret, +- strerror(errno)); +- break; +- } +- /* buffer element */ +- piov = iovec; +- status = 0; +- fos = mic->mic_virtblk.backend_addr + +- (hdr.sector * SECTOR_SIZE); +- buffer_desc_idx = next_desc(desc); +- desc_idx = buffer_desc_idx; +- for (desc = &vring.vr.desc[buffer_desc_idx]; +- desc->flags & VRING_DESC_F_NEXT; +- desc_idx = next_desc(desc), +- desc = &vring.vr.desc[desc_idx]) { +- piov->iov_len = desc->len; +- piov->iov_base = fos; +- piov++; +- fos += desc->len; +- } +- /* Returning NULLs for VIRTIO_BLK_T_GET_ID. */ +- if (hdr.type & ~(VIRTIO_BLK_T_OUT | +- VIRTIO_BLK_T_GET_ID)) { +- /* +- VIRTIO_BLK_T_IN - does not do +- anything. Probably for documenting. +- VIRTIO_BLK_T_SCSI_CMD - for +- virtio_scsi. +- VIRTIO_BLK_T_FLUSH - turned off in +- config space. +- VIRTIO_BLK_T_BARRIER - defined but not +- used in anywhere. +- */ +- mpsslog("%s() %d: type %x ", +- __func__, __LINE__, +- hdr.type); +- mpsslog("is not supported\n"); +- status = -ENOTSUP; +- +- } else { +- ret = transfer_blocks( +- mic->mic_virtblk.virtio_block_fd, +- iovec, +- piov - iovec); +- if (ret < 0 && +- status != 0) +- status = ret; +- } +- /* write status and update used pointer */ +- if (status != 0) +- status = status_error_check(desc); +- ret = write_status( +- mic->mic_virtblk.virtio_block_fd, +- &status); +-#ifdef DEBUG +- mpsslog("%s() %d: write status=%d on desc=%p\n", +- __func__, __LINE__, +- status, desc); +-#endif +- } +- } +- free(iovec); +-_stop_virtblk: +- stop_virtblk(mic); +-_close_backend: +- close_backend(mic); +- } /* forever */ +- +- pthread_exit(NULL); +-} +- +-static void +-reset(struct mic_info *mic) +-{ +-#define RESET_TIMEOUT 120 +- int i = RESET_TIMEOUT; +- setsysfs(mic->name, "state", "reset"); +- while (i) { +- char *state; +- state = readsysfs(mic->name, "state"); +- if (!state) +- goto retry; +- mpsslog("%s: %s %d state %s\n", +- mic->name, __func__, __LINE__, state); +- +- if (!strcmp(state, "ready")) { +- free(state); +- break; +- } +- free(state); +-retry: +- sleep(1); +- i--; +- } +-} +- +-static int +-get_mic_shutdown_status(struct mic_info *mic, char *shutdown_status) +-{ +- if (!strcmp(shutdown_status, "nop")) +- return MIC_NOP; +- if (!strcmp(shutdown_status, "crashed")) +- return MIC_CRASHED; +- if (!strcmp(shutdown_status, "halted")) +- return MIC_HALTED; +- if (!strcmp(shutdown_status, "poweroff")) +- return MIC_POWER_OFF; +- if (!strcmp(shutdown_status, "restart")) +- return MIC_RESTART; +- mpsslog("%s: BUG invalid status %s\n", mic->name, shutdown_status); +- /* Invalid state */ +- assert(0); +-}; +- +-static int get_mic_state(struct mic_info *mic) +-{ +- char *state = NULL; +- enum mic_states mic_state; +- +- while (!state) { +- state = readsysfs(mic->name, "state"); +- sleep(1); +- } +- mpsslog("%s: %s %d state %s\n", +- mic->name, __func__, __LINE__, state); +- +- if (!strcmp(state, "ready")) { +- mic_state = MIC_READY; +- } else if (!strcmp(state, "booting")) { +- mic_state = MIC_BOOTING; +- } else if (!strcmp(state, "online")) { +- mic_state = MIC_ONLINE; +- } else if (!strcmp(state, "shutting_down")) { +- mic_state = MIC_SHUTTING_DOWN; +- } else if (!strcmp(state, "reset_failed")) { +- mic_state = MIC_RESET_FAILED; +- } else if (!strcmp(state, "resetting")) { +- mic_state = MIC_RESETTING; +- } else { +- mpsslog("%s: BUG invalid state %s\n", mic->name, state); +- assert(0); +- } +- +- free(state); +- return mic_state; +-}; +- +-static void mic_handle_shutdown(struct mic_info *mic) +-{ +-#define SHUTDOWN_TIMEOUT 60 +- int i = SHUTDOWN_TIMEOUT; +- char *shutdown_status; +- while (i) { +- shutdown_status = readsysfs(mic->name, "shutdown_status"); +- if (!shutdown_status) { +- sleep(1); +- continue; +- } +- mpsslog("%s: %s %d shutdown_status %s\n", +- mic->name, __func__, __LINE__, shutdown_status); +- switch (get_mic_shutdown_status(mic, shutdown_status)) { +- case MIC_RESTART: +- mic->restart = 1; +- case MIC_HALTED: +- case MIC_POWER_OFF: +- case MIC_CRASHED: +- free(shutdown_status); +- goto reset; +- default: +- break; +- } +- free(shutdown_status); +- sleep(1); +- i--; +- } +-reset: +- if (!i) +- mpsslog("%s: %s %d timing out waiting for shutdown_status %s\n", +- mic->name, __func__, __LINE__, shutdown_status); +- reset(mic); +-} +- +-static int open_state_fd(struct mic_info *mic) +-{ +- char pathname[PATH_MAX]; +- int fd; +- +- snprintf(pathname, PATH_MAX - 1, "%s/%s/%s", +- MICSYSFSDIR, mic->name, "state"); +- +- fd = open(pathname, O_RDONLY); +- if (fd < 0) +- mpsslog("%s: opening file %s failed %s\n", +- mic->name, pathname, strerror(errno)); +- return fd; +-} +- +-static int block_till_state_change(int fd, struct mic_info *mic) +-{ +- struct pollfd ufds[1]; +- char value[PAGE_SIZE]; +- int ret; +- +- ufds[0].fd = fd; +- ufds[0].events = POLLERR | POLLPRI; +- ret = poll(ufds, 1, -1); +- if (ret < 0) { +- mpsslog("%s: %s %d poll failed %s\n", +- mic->name, __func__, __LINE__, strerror(errno)); +- return ret; +- } +- +- ret = lseek(fd, 0, SEEK_SET); +- if (ret < 0) { +- mpsslog("%s: %s %d Failed to seek to 0: %s\n", +- mic->name, __func__, __LINE__, strerror(errno)); +- return ret; +- } +- +- ret = read(fd, value, sizeof(value)); +- if (ret < 0) { +- mpsslog("%s: %s %d Failed to read sysfs entry: %s\n", +- mic->name, __func__, __LINE__, strerror(errno)); +- return ret; +- } +- +- return 0; +-} +- +-static void * +-mic_config(void *arg) +-{ +- struct mic_info *mic = (struct mic_info *)arg; +- int fd, ret, stat = 0; +- +- fd = open_state_fd(mic); +- if (fd < 0) { +- mpsslog("%s: %s %d open state fd failed %s\n", +- mic->name, __func__, __LINE__, strerror(errno)); +- goto exit; +- } +- +- do { +- ret = block_till_state_change(fd, mic); +- if (ret < 0) { +- mpsslog("%s: %s %d block_till_state_change error %s\n", +- mic->name, __func__, __LINE__, strerror(errno)); +- goto close_exit; +- } +- +- switch (get_mic_state(mic)) { +- case MIC_SHUTTING_DOWN: +- mic_handle_shutdown(mic); +- break; +- case MIC_READY: +- case MIC_RESET_FAILED: +- ret = kill(mic->pid, SIGTERM); +- mpsslog("%s: %s %d kill pid %d ret %d\n", +- mic->name, __func__, __LINE__, +- mic->pid, ret); +- if (!ret) { +- ret = waitpid(mic->pid, &stat, +- WIFSIGNALED(stat)); +- mpsslog("%s: %s %d waitpid ret %d pid %d\n", +- mic->name, __func__, __LINE__, +- ret, mic->pid); +- } +- if (mic->boot_on_resume) { +- setsysfs(mic->name, "state", "boot"); +- mic->boot_on_resume = 0; +- } +- goto close_exit; +- default: +- break; +- } +- } while (1); +- +-close_exit: +- close(fd); +-exit: +- init_mic(mic); +- pthread_exit(NULL); +-} +- +-static void +-set_cmdline(struct mic_info *mic) +-{ +- char buffer[PATH_MAX]; +- int len; +- +- len = snprintf(buffer, PATH_MAX, +- "clocksource=tsc highres=off nohz=off "); +- len += snprintf(buffer + len, PATH_MAX - len, +- "cpufreq_on;corec6_off;pc3_off;pc6_off "); +- len += snprintf(buffer + len, PATH_MAX - len, +- "ifcfg=static;address,172.31.%d.1;netmask,255.255.255.0", +- mic->id + 1); +- +- setsysfs(mic->name, "cmdline", buffer); +- mpsslog("%s: Command line: \"%s\"\n", mic->name, buffer); +- snprintf(buffer, PATH_MAX, "172.31.%d.1", mic->id + 1); +- mpsslog("%s: IPADDR: \"%s\"\n", mic->name, buffer); +-} +- +-static void +-set_log_buf_info(struct mic_info *mic) +-{ +- int fd; +- off_t len; +- char system_map[] = "/lib/firmware/mic/System.map"; +- char *map, *temp, log_buf[17] = {'\0'}; +- +- fd = open(system_map, O_RDONLY); +- if (fd < 0) { +- mpsslog("%s: Opening System.map failed: %d\n", +- mic->name, errno); +- return; +- } +- len = lseek(fd, 0, SEEK_END); +- if (len < 0) { +- mpsslog("%s: Reading System.map size failed: %d\n", +- mic->name, errno); +- close(fd); +- return; +- } +- map = mmap(NULL, len, PROT_READ, MAP_PRIVATE, fd, 0); +- if (map == MAP_FAILED) { +- mpsslog("%s: mmap of System.map failed: %d\n", +- mic->name, errno); +- close(fd); +- return; +- } +- temp = strstr(map, "__log_buf"); +- if (!temp) { +- mpsslog("%s: __log_buf not found: %d\n", mic->name, errno); +- munmap(map, len); +- close(fd); +- return; +- } +- strncpy(log_buf, temp - 19, 16); +- setsysfs(mic->name, "log_buf_addr", log_buf); +- mpsslog("%s: log_buf_addr: %s\n", mic->name, log_buf); +- temp = strstr(map, "log_buf_len"); +- if (!temp) { +- mpsslog("%s: log_buf_len not found: %d\n", mic->name, errno); +- munmap(map, len); +- close(fd); +- return; +- } +- strncpy(log_buf, temp - 19, 16); +- setsysfs(mic->name, "log_buf_len", log_buf); +- mpsslog("%s: log_buf_len: %s\n", mic->name, log_buf); +- munmap(map, len); +- close(fd); +-} +- +-static void +-change_virtblk_backend(int x, siginfo_t *siginfo, void *p) +-{ +- struct mic_info *mic; +- +- for (mic = mic_list.next; mic != NULL; mic = mic->next) +- mic->mic_virtblk.signaled = 1/* true */; +-} +- +-static void +-set_mic_boot_params(struct mic_info *mic) +-{ +- set_log_buf_info(mic); +- set_cmdline(mic); +-} +- +-static void * +-init_mic(void *arg) +-{ +- struct mic_info *mic = (struct mic_info *)arg; +- struct sigaction ignore = { +- .sa_flags = 0, +- .sa_handler = SIG_IGN +- }; +- struct sigaction act = { +- .sa_flags = SA_SIGINFO, +- .sa_sigaction = change_virtblk_backend, +- }; +- char buffer[PATH_MAX]; +- int err, fd; +- +- /* +- * Currently, one virtio block device is supported for each MIC card +- * at a time. Any user (or test) can send a SIGUSR1 to the MIC daemon. +- * The signal informs the virtio block backend about a change in the +- * configuration file which specifies the virtio backend file name on +- * the host. Virtio block backend then re-reads the configuration file +- * and switches to the new block device. This signalling mechanism may +- * not be required once multiple virtio block devices are supported by +- * the MIC daemon. +- */ +- sigaction(SIGUSR1, &ignore, NULL); +-retry: +- fd = open_state_fd(mic); +- if (fd < 0) { +- mpsslog("%s: %s %d open state fd failed %s\n", +- mic->name, __func__, __LINE__, strerror(errno)); +- sleep(2); +- goto retry; +- } +- +- if (mic->restart) { +- snprintf(buffer, PATH_MAX, "boot"); +- setsysfs(mic->name, "state", buffer); +- mpsslog("%s restarting mic %d\n", +- mic->name, mic->restart); +- mic->restart = 0; +- } +- +- while (1) { +- while (block_till_state_change(fd, mic)) { +- mpsslog("%s: %s %d block_till_state_change error %s\n", +- mic->name, __func__, __LINE__, strerror(errno)); +- sleep(2); +- continue; +- } +- +- if (get_mic_state(mic) == MIC_BOOTING) +- break; +- } +- +- mic->pid = fork(); +- switch (mic->pid) { +- case 0: +- add_virtio_device(mic, &virtcons_dev_page.dd); +- add_virtio_device(mic, &virtnet_dev_page.dd); +- err = pthread_create(&mic->mic_console.console_thread, NULL, +- virtio_console, mic); +- if (err) +- mpsslog("%s virtcons pthread_create failed %s\n", +- mic->name, strerror(err)); +- err = pthread_create(&mic->mic_net.net_thread, NULL, +- virtio_net, mic); +- if (err) +- mpsslog("%s virtnet pthread_create failed %s\n", +- mic->name, strerror(err)); +- err = pthread_create(&mic->mic_virtblk.block_thread, NULL, +- virtio_block, mic); +- if (err) +- mpsslog("%s virtblk pthread_create failed %s\n", +- mic->name, strerror(err)); +- sigemptyset(&act.sa_mask); +- err = sigaction(SIGUSR1, &act, NULL); +- if (err) +- mpsslog("%s sigaction SIGUSR1 failed %s\n", +- mic->name, strerror(errno)); +- while (1) +- sleep(60); +- case -1: +- mpsslog("fork failed MIC name %s id %d errno %d\n", +- mic->name, mic->id, errno); +- break; +- default: +- err = pthread_create(&mic->config_thread, NULL, +- mic_config, mic); +- if (err) +- mpsslog("%s mic_config pthread_create failed %s\n", +- mic->name, strerror(err)); +- } +- +- return NULL; +-} +- +-static void +-start_daemon(void) +-{ +- struct mic_info *mic; +- int err; +- +- for (mic = mic_list.next; mic; mic = mic->next) { +- set_mic_boot_params(mic); +- err = pthread_create(&mic->init_thread, NULL, init_mic, mic); +- if (err) +- mpsslog("%s init_mic pthread_create failed %s\n", +- mic->name, strerror(err)); +- } +- +- while (1) +- sleep(60); +-} +- +-static int +-init_mic_list(void) +-{ +- struct mic_info *mic = &mic_list; +- struct dirent *file; +- DIR *dp; +- int cnt = 0; +- +- dp = opendir(MICSYSFSDIR); +- if (!dp) +- return 0; +- +- while ((file = readdir(dp)) != NULL) { +- if (!strncmp(file->d_name, "mic", 3)) { +- mic->next = calloc(1, sizeof(struct mic_info)); +- if (mic->next) { +- mic = mic->next; +- mic->id = atoi(&file->d_name[3]); +- mic->name = malloc(strlen(file->d_name) + 16); +- if (mic->name) +- strcpy(mic->name, file->d_name); +- mpsslog("MIC name %s id %d\n", mic->name, +- mic->id); +- cnt++; +- } +- } +- } +- +- closedir(dp); +- return cnt; +-} +- +-void +-mpsslog(char *format, ...) +-{ +- va_list args; +- char buffer[4096]; +- char ts[52], *ts1; +- time_t t; +- +- if (logfp == NULL) +- return; +- +- va_start(args, format); +- vsprintf(buffer, format, args); +- va_end(args); +- +- time(&t); +- ts1 = ctime_r(&t, ts); +- ts1[strlen(ts1) - 1] = '\0'; +- fprintf(logfp, "%s: %s", ts1, buffer); +- +- fflush(logfp); +-} +- +-int +-main(int argc, char *argv[]) +-{ +- int cnt; +- pid_t pid; +- +- myname = argv[0]; +- +- logfp = fopen(LOGFILE_NAME, "a+"); +- if (!logfp) { +- fprintf(stderr, "cannot open logfile '%s'\n", LOGFILE_NAME); +- exit(1); +- } +- pid = fork(); +- switch (pid) { +- case 0: +- break; +- case -1: +- exit(2); +- default: +- exit(0); +- } +- +- mpsslog("MIC Daemon start\n"); +- +- cnt = init_mic_list(); +- if (cnt == 0) { +- mpsslog("MIC module not loaded\n"); +- exit(3); +- } +- mpsslog("MIC found %d devices\n", cnt); +- +- start_daemon(); +- +- exit(0); +-} +diff --git a/Documentation/mic/mpssd/mpssd.h b/Documentation/mic/mpssd/mpssd.h +deleted file mode 100644 +index 8bd64944aacc..000000000000 +--- a/Documentation/mic/mpssd/mpssd.h ++++ /dev/null +@@ -1,103 +0,0 @@ +-/* +- * Intel MIC Platform Software Stack (MPSS) +- * +- * Copyright(c) 2013 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License, version 2, as +- * published by the Free Software Foundation. +- * +- * This program is distributed in the hope that it will be useful, but +- * WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- * General Public License for more details. +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Intel MIC User Space Tools. +- */ +-#ifndef _MPSSD_H_ +-#define _MPSSD_H_ +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-#define MICSYSFSDIR "/sys/class/mic" +-#define LOGFILE_NAME "/var/log/mpssd" +-#define PAGE_SIZE 4096 +- +-struct mic_console_info { +- pthread_t console_thread; +- int virtio_console_fd; +- void *console_dp; +-}; +- +-struct mic_net_info { +- pthread_t net_thread; +- int virtio_net_fd; +- int tap_fd; +- void *net_dp; +-}; +- +-struct mic_virtblk_info { +- pthread_t block_thread; +- int virtio_block_fd; +- void *block_dp; +- volatile sig_atomic_t signaled; +- char *backend_file; +- int backend; +- void *backend_addr; +- long backend_size; +-}; +- +-struct mic_info { +- int id; +- char *name; +- pthread_t config_thread; +- pthread_t init_thread; +- pid_t pid; +- struct mic_console_info mic_console; +- struct mic_net_info mic_net; +- struct mic_virtblk_info mic_virtblk; +- int restart; +- int boot_on_resume; +- struct mic_info *next; +-}; +- +-__attribute__((format(printf, 1, 2))) +-void mpsslog(char *format, ...); +-char *readsysfs(char *dir, char *entry); +-int setsysfs(char *dir, char *entry, char *value); +-#endif +diff --git a/Documentation/mic/mpssd/sysfs.c b/Documentation/mic/mpssd/sysfs.c +deleted file mode 100644 +index 8dd326936083..000000000000 +--- a/Documentation/mic/mpssd/sysfs.c ++++ /dev/null +@@ -1,102 +0,0 @@ +-/* +- * Intel MIC Platform Software Stack (MPSS) +- * +- * Copyright(c) 2013 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License, version 2, as +- * published by the Free Software Foundation. +- * +- * This program is distributed in the hope that it will be useful, but +- * WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- * General Public License for more details. +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Intel MIC User Space Tools. +- */ +- +-#include "mpssd.h" +- +-#define PAGE_SIZE 4096 +- +-char * +-readsysfs(char *dir, char *entry) +-{ +- char filename[PATH_MAX]; +- char value[PAGE_SIZE]; +- char *string = NULL; +- int fd; +- int len; +- +- if (dir == NULL) +- snprintf(filename, PATH_MAX, "%s/%s", MICSYSFSDIR, entry); +- else +- snprintf(filename, PATH_MAX, +- "%s/%s/%s", MICSYSFSDIR, dir, entry); +- +- fd = open(filename, O_RDONLY); +- if (fd < 0) { +- mpsslog("Failed to open sysfs entry '%s': %s\n", +- filename, strerror(errno)); +- return NULL; +- } +- +- len = read(fd, value, sizeof(value)); +- if (len < 0) { +- mpsslog("Failed to read sysfs entry '%s': %s\n", +- filename, strerror(errno)); +- goto readsys_ret; +- } +- if (len == 0) +- goto readsys_ret; +- +- value[len - 1] = '\0'; +- +- string = malloc(strlen(value) + 1); +- if (string) +- strcpy(string, value); +- +-readsys_ret: +- close(fd); +- return string; +-} +- +-int +-setsysfs(char *dir, char *entry, char *value) +-{ +- char filename[PATH_MAX]; +- char *oldvalue; +- int fd, ret = 0; +- +- if (dir == NULL) +- snprintf(filename, PATH_MAX, "%s/%s", MICSYSFSDIR, entry); +- else +- snprintf(filename, PATH_MAX, "%s/%s/%s", +- MICSYSFSDIR, dir, entry); +- +- oldvalue = readsysfs(dir, entry); +- +- fd = open(filename, O_RDWR); +- if (fd < 0) { +- ret = errno; +- mpsslog("Failed to open sysfs entry '%s': %s\n", +- filename, strerror(errno)); +- goto done; +- } +- +- if (!oldvalue || strcmp(value, oldvalue)) { +- if (write(fd, value, strlen(value)) < 0) { +- ret = errno; +- mpsslog("Failed to write new sysfs entry '%s': %s\n", +- filename, strerror(errno)); +- } +- } +- close(fd); +-done: +- if (oldvalue) +- free(oldvalue); +- return ret; +-} +diff --git a/Makefile b/Makefile +index 671e183bd507..10aec937e9e4 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 52 ++SUBLEVEL = 53 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts +index e74df327cdd3..20618a897c99 100644 +--- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts ++++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts +@@ -122,6 +122,8 @@ + uart1: serial@f8020000 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart1_default>; ++ atmel,use-dma-rx; ++ atmel,use-dma-tx; + status = "okay"; + }; + +diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts +index da84e65b56ef..e27024cdf48b 100644 +--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts ++++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts +@@ -110,6 +110,8 @@ + }; + + usart3: serial@fc00c000 { ++ atmel,use-dma-rx; ++ atmel,use-dma-tx; + status = "okay"; + }; + +diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h +index 405aa1883307..23d5cad56ddc 100644 +--- a/arch/arm/include/asm/kvm_mmu.h ++++ b/arch/arm/include/asm/kvm_mmu.h +@@ -204,18 +204,12 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, + * and iterate over the range. + */ + +- bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached; +- + VM_BUG_ON(size & ~PAGE_MASK); + +- if (!need_flush && !icache_is_pipt()) +- goto vipt_cache; +- + while (size) { + void *va = kmap_atomic_pfn(pfn); + +- if (need_flush) +- kvm_flush_dcache_to_poc(va, PAGE_SIZE); ++ kvm_flush_dcache_to_poc(va, PAGE_SIZE); + + if (icache_is_pipt()) + __cpuc_coherent_user_range((unsigned long)va, +@@ -227,7 +221,6 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, + kunmap_atomic(va); + } + +-vipt_cache: + if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) { + /* any kind of VIPT cache */ + __flush_icache_all(); +diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h +index 61505676d085..819b21a9851c 100644 +--- a/arch/arm64/include/asm/kvm_mmu.h ++++ b/arch/arm64/include/asm/kvm_mmu.h +@@ -236,8 +236,7 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, + { + void *va = page_address(pfn_to_page(pfn)); + +- if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) +- kvm_flush_dcache_to_poc(va, size); ++ kvm_flush_dcache_to_poc(va, size); + + if (!icache_is_aliasing()) { /* PIPT */ + flush_icache_range((unsigned long)va, +diff --git a/arch/mips/bcm47xx/buttons.c b/arch/mips/bcm47xx/buttons.c +index 52caa75bfe4e..e2f50d690624 100644 +--- a/arch/mips/bcm47xx/buttons.c ++++ b/arch/mips/bcm47xx/buttons.c +@@ -17,6 +17,12 @@ + .active_low = 1, \ + } + ++#define BCM47XX_GPIO_KEY_H(_gpio, _code) \ ++ { \ ++ .code = _code, \ ++ .gpio = _gpio, \ ++ } ++ + /* Asus */ + + static const struct gpio_keys_button +@@ -79,8 +85,8 @@ bcm47xx_buttons_asus_wl500gpv2[] __initconst = { + + static const struct gpio_keys_button + bcm47xx_buttons_asus_wl500w[] __initconst = { +- BCM47XX_GPIO_KEY(6, KEY_RESTART), +- BCM47XX_GPIO_KEY(7, KEY_WPS_BUTTON), ++ BCM47XX_GPIO_KEY_H(6, KEY_RESTART), ++ BCM47XX_GPIO_KEY_H(7, KEY_WPS_BUTTON), + }; + + static const struct gpio_keys_button +diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S +index 64e08df51d65..8b7004132491 100644 +--- a/arch/mips/cavium-octeon/octeon-memcpy.S ++++ b/arch/mips/cavium-octeon/octeon-memcpy.S +@@ -208,18 +208,18 @@ EXC( STORE t2, UNIT(6)(dst), s_exc_p10u) + ADD src, src, 16*NBYTES + EXC( STORE t3, UNIT(7)(dst), s_exc_p9u) + ADD dst, dst, 16*NBYTES +-EXC( LOAD t0, UNIT(-8)(src), l_exc_copy) +-EXC( LOAD t1, UNIT(-7)(src), l_exc_copy) +-EXC( LOAD t2, UNIT(-6)(src), l_exc_copy) +-EXC( LOAD t3, UNIT(-5)(src), l_exc_copy) ++EXC( LOAD t0, UNIT(-8)(src), l_exc_copy_rewind16) ++EXC( LOAD t1, UNIT(-7)(src), l_exc_copy_rewind16) ++EXC( LOAD t2, UNIT(-6)(src), l_exc_copy_rewind16) ++EXC( LOAD t3, UNIT(-5)(src), l_exc_copy_rewind16) + EXC( STORE t0, UNIT(-8)(dst), s_exc_p8u) + EXC( STORE t1, UNIT(-7)(dst), s_exc_p7u) + EXC( STORE t2, UNIT(-6)(dst), s_exc_p6u) + EXC( STORE t3, UNIT(-5)(dst), s_exc_p5u) +-EXC( LOAD t0, UNIT(-4)(src), l_exc_copy) +-EXC( LOAD t1, UNIT(-3)(src), l_exc_copy) +-EXC( LOAD t2, UNIT(-2)(src), l_exc_copy) +-EXC( LOAD t3, UNIT(-1)(src), l_exc_copy) ++EXC( LOAD t0, UNIT(-4)(src), l_exc_copy_rewind16) ++EXC( LOAD t1, UNIT(-3)(src), l_exc_copy_rewind16) ++EXC( LOAD t2, UNIT(-2)(src), l_exc_copy_rewind16) ++EXC( LOAD t3, UNIT(-1)(src), l_exc_copy_rewind16) + EXC( STORE t0, UNIT(-4)(dst), s_exc_p4u) + EXC( STORE t1, UNIT(-3)(dst), s_exc_p3u) + EXC( STORE t2, UNIT(-2)(dst), s_exc_p2u) +@@ -383,6 +383,10 @@ done: + nop + END(memcpy) + ++l_exc_copy_rewind16: ++ /* Rewind src and dst by 16*NBYTES for l_exc_copy */ ++ SUB src, src, 16*NBYTES ++ SUB dst, dst, 16*NBYTES + l_exc_copy: + /* + * Copy bytes from src until faulting load address (or until a +diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h +index 3ceacde5eb6e..17f89f9670b2 100644 +--- a/arch/mips/include/asm/checksum.h ++++ b/arch/mips/include/asm/checksum.h +@@ -186,7 +186,9 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, + " daddu %0, %4 \n" + " dsll32 $1, %0, 0 \n" + " daddu %0, $1 \n" ++ " sltu $1, %0, $1 \n" + " dsra32 %0, %0, 0 \n" ++ " addu %0, $1 \n" + #endif + " .set pop" + : "=r" (sum) +diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c +index 44a6f25e902e..fc537d1b649d 100644 +--- a/arch/mips/kernel/process.c ++++ b/arch/mips/kernel/process.c +@@ -191,11 +191,9 @@ struct mips_frame_info { + #define J_TARGET(pc,target) \ + (((unsigned long)(pc) & 0xf0000000) | ((target) << 2)) + +-static inline int is_ra_save_ins(union mips_instruction *ip) ++static inline int is_ra_save_ins(union mips_instruction *ip, int *poff) + { + #ifdef CONFIG_CPU_MICROMIPS +- union mips_instruction mmi; +- + /* + * swsp ra,offset + * swm16 reglist,offset(sp) +@@ -205,29 +203,71 @@ static inline int is_ra_save_ins(union mips_instruction *ip) + * + * microMIPS is way more fun... + */ +- if (mm_insn_16bit(ip->halfword[0])) { +- mmi.word = (ip->halfword[0] << 16); +- return (mmi.mm16_r5_format.opcode == mm_swsp16_op && +- mmi.mm16_r5_format.rt == 31) || +- (mmi.mm16_m_format.opcode == mm_pool16c_op && +- mmi.mm16_m_format.func == mm_swm16_op); ++ if (mm_insn_16bit(ip->halfword[1])) { ++ switch (ip->mm16_r5_format.opcode) { ++ case mm_swsp16_op: ++ if (ip->mm16_r5_format.rt != 31) ++ return 0; ++ ++ *poff = ip->mm16_r5_format.simmediate; ++ *poff = (*poff << 2) / sizeof(ulong); ++ return 1; ++ ++ case mm_pool16c_op: ++ switch (ip->mm16_m_format.func) { ++ case mm_swm16_op: ++ *poff = ip->mm16_m_format.imm; ++ *poff += 1 + ip->mm16_m_format.rlist; ++ *poff = (*poff << 2) / sizeof(ulong); ++ return 1; ++ ++ default: ++ return 0; ++ } ++ ++ default: ++ return 0; ++ } + } +- else { +- mmi.halfword[0] = ip->halfword[1]; +- mmi.halfword[1] = ip->halfword[0]; +- return (mmi.mm_m_format.opcode == mm_pool32b_op && +- mmi.mm_m_format.rd > 9 && +- mmi.mm_m_format.base == 29 && +- mmi.mm_m_format.func == mm_swm32_func) || +- (mmi.i_format.opcode == mm_sw32_op && +- mmi.i_format.rs == 29 && +- mmi.i_format.rt == 31); ++ ++ switch (ip->i_format.opcode) { ++ case mm_sw32_op: ++ if (ip->i_format.rs != 29) ++ return 0; ++ if (ip->i_format.rt != 31) ++ return 0; ++ ++ *poff = ip->i_format.simmediate / sizeof(ulong); ++ return 1; ++ ++ case mm_pool32b_op: ++ switch (ip->mm_m_format.func) { ++ case mm_swm32_func: ++ if (ip->mm_m_format.rd < 0x10) ++ return 0; ++ if (ip->mm_m_format.base != 29) ++ return 0; ++ ++ *poff = ip->mm_m_format.simmediate; ++ *poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32); ++ *poff /= sizeof(ulong); ++ return 1; ++ default: ++ return 0; ++ } ++ ++ default: ++ return 0; + } + #else + /* sw / sd $ra, offset($sp) */ +- return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) && +- ip->i_format.rs == 29 && +- ip->i_format.rt == 31; ++ if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) && ++ ip->i_format.rs == 29 && ip->i_format.rt == 31) { ++ *poff = ip->i_format.simmediate / sizeof(ulong); ++ return 1; ++ } ++ ++ return 0; + #endif + } + +@@ -242,13 +282,16 @@ static inline int is_jump_ins(union mips_instruction *ip) + * + * microMIPS is kind of more fun... + */ +- union mips_instruction mmi; +- +- mmi.word = (ip->halfword[0] << 16); ++ if (mm_insn_16bit(ip->halfword[1])) { ++ if ((ip->mm16_r5_format.opcode == mm_pool16c_op && ++ (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op)) ++ return 1; ++ return 0; ++ } + +- if ((mmi.mm16_r5_format.opcode == mm_pool16c_op && +- (mmi.mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op) || +- ip->j_format.opcode == mm_jal32_op) ++ if (ip->j_format.opcode == mm_j32_op) ++ return 1; ++ if (ip->j_format.opcode == mm_jal32_op) + return 1; + if (ip->r_format.opcode != mm_pool32a_op || + ip->r_format.func != mm_pool32axf_op) +@@ -276,15 +319,13 @@ static inline int is_sp_move_ins(union mips_instruction *ip) + * + * microMIPS is not more fun... + */ +- if (mm_insn_16bit(ip->halfword[0])) { +- union mips_instruction mmi; +- +- mmi.word = (ip->halfword[0] << 16); +- return (mmi.mm16_r3_format.opcode == mm_pool16d_op && +- mmi.mm16_r3_format.simmediate && mm_addiusp_func) || +- (mmi.mm16_r5_format.opcode == mm_pool16d_op && +- mmi.mm16_r5_format.rt == 29); ++ if (mm_insn_16bit(ip->halfword[1])) { ++ return (ip->mm16_r3_format.opcode == mm_pool16d_op && ++ ip->mm16_r3_format.simmediate && mm_addiusp_func) || ++ (ip->mm16_r5_format.opcode == mm_pool16d_op && ++ ip->mm16_r5_format.rt == 29); + } ++ + return ip->mm_i_format.opcode == mm_addiu32_op && + ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29; + #else +@@ -299,30 +340,36 @@ static inline int is_sp_move_ins(union mips_instruction *ip) + + static int get_frame_info(struct mips_frame_info *info) + { +-#ifdef CONFIG_CPU_MICROMIPS +- union mips_instruction *ip = (void *) (((char *) info->func) - 1); +-#else +- union mips_instruction *ip = info->func; +-#endif +- unsigned max_insns = info->func_size / sizeof(union mips_instruction); +- unsigned i; ++ bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS); ++ union mips_instruction insn, *ip, *ip_end; ++ const unsigned int max_insns = 128; ++ unsigned int i; + + info->pc_offset = -1; + info->frame_size = 0; + ++ ip = (void *)msk_isa16_mode((ulong)info->func); + if (!ip) + goto err; + +- if (max_insns == 0) +- max_insns = 128U; /* unknown function size */ +- max_insns = min(128U, max_insns); ++ ip_end = (void *)ip + info->func_size; + +- for (i = 0; i < max_insns; i++, ip++) { ++ for (i = 0; i < max_insns && ip < ip_end; i++, ip++) { ++ if (is_mmips && mm_insn_16bit(ip->halfword[0])) { ++ insn.halfword[0] = 0; ++ insn.halfword[1] = ip->halfword[0]; ++ } else if (is_mmips) { ++ insn.halfword[0] = ip->halfword[1]; ++ insn.halfword[1] = ip->halfword[0]; ++ } else { ++ insn.word = ip->word; ++ } + +- if (is_jump_ins(ip)) ++ if (is_jump_ins(&insn)) + break; ++ + if (!info->frame_size) { +- if (is_sp_move_ins(ip)) ++ if (is_sp_move_ins(&insn)) + { + #ifdef CONFIG_CPU_MICROMIPS + if (mm_insn_16bit(ip->halfword[0])) +@@ -345,11 +392,9 @@ static int get_frame_info(struct mips_frame_info *info) + } + continue; + } +- if (info->pc_offset == -1 && is_ra_save_ins(ip)) { +- info->pc_offset = +- ip->i_format.simmediate / sizeof(long); ++ if (info->pc_offset == -1 && ++ is_ra_save_ins(&insn, &info->pc_offset)) + break; +- } + } + if (info->frame_size && info->pc_offset >= 0) /* nested */ + return 0; +diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c +index 80554e8f6037..3e390a4e3897 100644 +--- a/arch/mips/lantiq/xway/sysctrl.c ++++ b/arch/mips/lantiq/xway/sysctrl.c +@@ -545,7 +545,7 @@ void __init ltq_soc_init(void) + clkdev_add_pmu("1a800000.pcie", "msi", 1, 1, PMU1_PCIE2_MSI); + clkdev_add_pmu("1a800000.pcie", "pdi", 1, 1, PMU1_PCIE2_PDI); + clkdev_add_pmu("1a800000.pcie", "ctl", 1, 1, PMU1_PCIE2_CTL); +- clkdev_add_pmu("1e108000.eth", NULL, 1, 0, PMU_SWITCH | PMU_PPE_DP); ++ clkdev_add_pmu("1e108000.eth", NULL, 0, 0, PMU_SWITCH | PMU_PPE_DP); + clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF); + clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); + } else if (of_machine_is_compatible("lantiq,ar10")) { +@@ -553,7 +553,7 @@ void __init ltq_soc_init(void) + ltq_ar10_fpi_hz(), ltq_ar10_pp32_hz()); + clkdev_add_pmu("1e101000.usb", "ctl", 1, 0, PMU_USB0); + clkdev_add_pmu("1e106000.usb", "ctl", 1, 0, PMU_USB1); +- clkdev_add_pmu("1e108000.eth", NULL, 1, 0, PMU_SWITCH | ++ clkdev_add_pmu("1e108000.eth", NULL, 0, 0, PMU_SWITCH | + PMU_PPE_DP | PMU_PPE_TC); + clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF); + clkdev_add_pmu("1f203000.rcu", "gphy", 1, 0, PMU_GPHY); +@@ -575,11 +575,11 @@ void __init ltq_soc_init(void) + clkdev_add_pmu(NULL, "ahb", 1, 0, PMU_AHBM | PMU_AHBS); + + clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF); +- clkdev_add_pmu("1e108000.eth", NULL, 1, 0, ++ clkdev_add_pmu("1e108000.eth", NULL, 0, 0, + PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM | + PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 | + PMU_PPE_QSB | PMU_PPE_TOP); +- clkdev_add_pmu("1f203000.rcu", "gphy", 1, 0, PMU_GPHY); ++ clkdev_add_pmu("1f203000.rcu", "gphy", 0, 0, PMU_GPHY); + clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO); + clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); + clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE); +diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c +index dc7c5a5214a9..efaf364fe581 100644 +--- a/arch/mips/mm/sc-ip22.c ++++ b/arch/mips/mm/sc-ip22.c +@@ -31,26 +31,40 @@ static inline void indy_sc_wipe(unsigned long first, unsigned long last) + unsigned long tmp; + + __asm__ __volatile__( +- ".set\tpush\t\t\t# indy_sc_wipe\n\t" +- ".set\tnoreorder\n\t" +- ".set\tmips3\n\t" +- ".set\tnoat\n\t" +- "mfc0\t%2, $12\n\t" +- "li\t$1, 0x80\t\t\t# Go 64 bit\n\t" +- "mtc0\t$1, $12\n\t" +- +- "dli\t$1, 0x9000000080000000\n\t" +- "or\t%0, $1\t\t\t# first line to flush\n\t" +- "or\t%1, $1\t\t\t# last line to flush\n\t" +- ".set\tat\n\t" +- +- "1:\tsw\t$0, 0(%0)\n\t" +- "bne\t%0, %1, 1b\n\t" +- " daddu\t%0, 32\n\t" +- +- "mtc0\t%2, $12\t\t\t# Back to 32 bit\n\t" +- "nop; nop; nop; nop;\n\t" +- ".set\tpop" ++ " .set push # indy_sc_wipe \n" ++ " .set noreorder \n" ++ " .set mips3 \n" ++ " .set noat \n" ++ " mfc0 %2, $12 \n" ++ " li $1, 0x80 # Go 64 bit \n" ++ " mtc0 $1, $12 \n" ++ " \n" ++ " # \n" ++ " # Open code a dli $1, 0x9000000080000000 \n" ++ " # \n" ++ " # Required because binutils 2.25 will happily accept \n" ++ " # 64 bit instructions in .set mips3 mode but puke on \n" ++ " # 64 bit constants when generating 32 bit ELF \n" ++ " # \n" ++ " lui $1,0x9000 \n" ++ " dsll $1,$1,0x10 \n" ++ " ori $1,$1,0x8000 \n" ++ " dsll $1,$1,0x10 \n" ++ " \n" ++ " or %0, $1 # first line to flush \n" ++ " or %1, $1 # last line to flush \n" ++ " .set at \n" ++ " \n" ++ "1: sw $0, 0(%0) \n" ++ " bne %0, %1, 1b \n" ++ " daddu %0, 32 \n" ++ " \n" ++ " mtc0 %2, $12 # Back to 32 bit \n" ++ " nop # pipeline hazard \n" ++ " nop \n" ++ " nop \n" ++ " nop \n" ++ " .set pop \n" + : "=r" (first), "=r" (last), "=&r" (tmp) + : "0" (first), "1" (last)); + } +diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c +index 05e804cdecaa..fdf48785d3e9 100644 +--- a/arch/powerpc/kernel/hw_breakpoint.c ++++ b/arch/powerpc/kernel/hw_breakpoint.c +@@ -227,8 +227,10 @@ int __kprobes hw_breakpoint_handler(struct die_args *args) + rcu_read_lock(); + + bp = __this_cpu_read(bp_per_reg); +- if (!bp) ++ if (!bp) { ++ rc = NOTIFY_DONE; + goto out; ++ } + info = counter_arch_bp(bp); + + /* +diff --git a/crypto/testmgr.h b/crypto/testmgr.h +index da0a8fd765f4..0e02c60a57b6 100644 +--- a/crypto/testmgr.h ++++ b/crypto/testmgr.h +@@ -21778,7 +21778,7 @@ static struct aead_testvec aes_ccm_enc_tv_template[] = { + "\x09\x75\x9a\x9b\x3c\x9b\x27\x39", + .klen = 32, + .iv = "\x03\xf9\xd9\x4e\x63\xb5\x3d\x9d" +- "\x43\xf6\x1e\x50", ++ "\x43\xf6\x1e\x50\0\0\0\0", + .assoc = "\x57\xf5\x6b\x8b\x57\x5c\x3d\x3b" + "\x13\x02\x01\x0c\x83\x4c\x96\x35" + "\x8e\xd6\x39\xcf\x7d\x14\x9b\x94" +diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c +index 59d8d0d14824..327f9e374b44 100644 +--- a/drivers/bcma/main.c ++++ b/drivers/bcma/main.c +@@ -640,8 +640,11 @@ static int bcma_device_probe(struct device *dev) + drv); + int err = 0; + ++ get_device(dev); + if (adrv->probe) + err = adrv->probe(core); ++ if (err) ++ put_device(dev); + + return err; + } +@@ -654,6 +657,7 @@ static int bcma_device_remove(struct device *dev) + + if (adrv->remove) + adrv->remove(core); ++ put_device(dev); + + return 0; + } +diff --git a/drivers/block/loop.c b/drivers/block/loop.c +index ab0b2dd3f629..cec36d5c24f5 100644 +--- a/drivers/block/loop.c ++++ b/drivers/block/loop.c +@@ -1108,9 +1108,12 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) + if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) + return -EINVAL; + ++ /* I/O need to be drained during transfer transition */ ++ blk_mq_freeze_queue(lo->lo_queue); ++ + err = loop_release_xfer(lo); + if (err) +- return err; ++ goto exit; + + if (info->lo_encrypt_type) { + unsigned int type = info->lo_encrypt_type; +@@ -1125,12 +1128,14 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) + + err = loop_init_xfer(lo, xfer, info); + if (err) +- return err; ++ goto exit; + + if (lo->lo_offset != info->lo_offset || + lo->lo_sizelimit != info->lo_sizelimit) +- if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) +- return -EFBIG; ++ if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) { ++ err = -EFBIG; ++ goto exit; ++ } + + loop_config_discard(lo); + +@@ -1148,13 +1153,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) + (info->lo_flags & LO_FLAGS_AUTOCLEAR)) + lo->lo_flags ^= LO_FLAGS_AUTOCLEAR; + +- if ((info->lo_flags & LO_FLAGS_PARTSCAN) && +- !(lo->lo_flags & LO_FLAGS_PARTSCAN)) { +- lo->lo_flags |= LO_FLAGS_PARTSCAN; +- lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN; +- loop_reread_partitions(lo, lo->lo_device); +- } +- + lo->lo_encrypt_key_size = info->lo_encrypt_key_size; + lo->lo_init[0] = info->lo_init[0]; + lo->lo_init[1] = info->lo_init[1]; +@@ -1167,7 +1165,17 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) + /* update dio if lo_offset or transfer is changed */ + __loop_update_dio(lo, lo->use_dio); + +- return 0; ++ exit: ++ blk_mq_unfreeze_queue(lo->lo_queue); ++ ++ if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) && ++ !(lo->lo_flags & LO_FLAGS_PARTSCAN)) { ++ lo->lo_flags |= LO_FLAGS_PARTSCAN; ++ lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN; ++ loop_reread_partitions(lo, lo->lo_device); ++ } ++ ++ return err; + } + + static int +diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c +index dd184b50e5b4..284627806b88 100644 +--- a/drivers/dma/ipu/ipu_irq.c ++++ b/drivers/dma/ipu/ipu_irq.c +@@ -272,7 +272,7 @@ static void ipu_irq_handler(struct irq_desc *desc) + u32 status; + int i, line; + +- for (i = IPU_IRQ_NR_FN_BANKS; i < IPU_IRQ_NR_BANKS; i++) { ++ for (i = 0; i < IPU_IRQ_NR_BANKS; i++) { + struct ipu_irq_bank *bank = irq_bank + i; + + raw_spin_lock(&bank_lock); +diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c +index 63194a9a7189..89fd0113aa5c 100644 +--- a/drivers/hv/hv.c ++++ b/drivers/hv/hv.c +@@ -422,7 +422,7 @@ int hv_synic_alloc(void) + goto err; + } + +- for_each_online_cpu(cpu) { ++ for_each_present_cpu(cpu) { + hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC); + if (hv_context.event_dpc[cpu] == NULL) { + pr_err("Unable to allocate event dpc\n"); +@@ -461,6 +461,8 @@ int hv_synic_alloc(void) + pr_err("Unable to allocate post msg page\n"); + goto err; + } ++ ++ INIT_LIST_HEAD(&hv_context.percpu_list[cpu]); + } + + return 0; +@@ -485,7 +487,7 @@ void hv_synic_free(void) + int cpu; + + kfree(hv_context.hv_numa_map); +- for_each_online_cpu(cpu) ++ for_each_present_cpu(cpu) + hv_synic_free_cpu(cpu); + } + +@@ -555,8 +557,6 @@ void hv_synic_init(void *arg) + rdmsrl(HV_X64_MSR_VP_INDEX, vp_index); + hv_context.vp_index[cpu] = (u32)vp_index; + +- INIT_LIST_HEAD(&hv_context.percpu_list[cpu]); +- + /* + * Register the per-cpu clockevent source. + */ +diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c +index c37a71e13de0..1fb02dcbc500 100644 +--- a/drivers/hv/hv_fcopy.c ++++ b/drivers/hv/hv_fcopy.c +@@ -61,6 +61,7 @@ static DECLARE_WORK(fcopy_send_work, fcopy_send_data); + static const char fcopy_devname[] = "vmbus/hv_fcopy"; + static u8 *recv_buffer; + static struct hvutil_transport *hvt; ++static struct completion release_event; + /* + * This state maintains the version number registered by the daemon. + */ +@@ -312,12 +313,14 @@ static void fcopy_on_reset(void) + + if (cancel_delayed_work_sync(&fcopy_timeout_work)) + fcopy_respond_to_host(HV_E_FAIL); ++ complete(&release_event); + } + + int hv_fcopy_init(struct hv_util_service *srv) + { + recv_buffer = srv->recv_buffer; + ++ init_completion(&release_event); + /* + * When this driver loads, the user level daemon that + * processes the host requests may not yet be running. +@@ -339,4 +342,5 @@ void hv_fcopy_deinit(void) + fcopy_transaction.state = HVUTIL_DEVICE_DYING; + cancel_delayed_work_sync(&fcopy_timeout_work); + hvutil_transport_destroy(hvt); ++ wait_for_completion(&release_event); + } +diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c +index 2a3420c4ca59..ce4d3a935491 100644 +--- a/drivers/hv/hv_kvp.c ++++ b/drivers/hv/hv_kvp.c +@@ -86,6 +86,7 @@ static DECLARE_WORK(kvp_sendkey_work, kvp_send_key); + static const char kvp_devname[] = "vmbus/hv_kvp"; + static u8 *recv_buffer; + static struct hvutil_transport *hvt; ++static struct completion release_event; + /* + * Register the kernel component with the user-level daemon. + * As part of this registration, pass the LIC version number. +@@ -682,6 +683,7 @@ static void kvp_on_reset(void) + if (cancel_delayed_work_sync(&kvp_timeout_work)) + kvp_respond_to_host(NULL, HV_E_FAIL); + kvp_transaction.state = HVUTIL_DEVICE_INIT; ++ complete(&release_event); + } + + int +@@ -689,6 +691,7 @@ hv_kvp_init(struct hv_util_service *srv) + { + recv_buffer = srv->recv_buffer; + ++ init_completion(&release_event); + /* + * When this driver loads, the user level daemon that + * processes the host requests may not yet be running. +@@ -711,4 +714,5 @@ void hv_kvp_deinit(void) + cancel_delayed_work_sync(&kvp_timeout_work); + cancel_work_sync(&kvp_sendkey_work); + hvutil_transport_destroy(hvt); ++ wait_for_completion(&release_event); + } +diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c +index 81882d4848bd..faad79ae318a 100644 +--- a/drivers/hv/hv_snapshot.c ++++ b/drivers/hv/hv_snapshot.c +@@ -66,6 +66,7 @@ static int dm_reg_value; + static const char vss_devname[] = "vmbus/hv_vss"; + static __u8 *recv_buffer; + static struct hvutil_transport *hvt; ++static struct completion release_event; + + static void vss_send_op(struct work_struct *dummy); + static void vss_timeout_func(struct work_struct *dummy); +@@ -326,11 +327,13 @@ static void vss_on_reset(void) + if (cancel_delayed_work_sync(&vss_timeout_work)) + vss_respond_to_host(HV_E_FAIL); + vss_transaction.state = HVUTIL_DEVICE_INIT; ++ complete(&release_event); + } + + int + hv_vss_init(struct hv_util_service *srv) + { ++ init_completion(&release_event); + if (vmbus_proto_version < VERSION_WIN8_1) { + pr_warn("Integration service 'Backup (volume snapshot)'" + " not supported on this host version.\n"); +@@ -360,4 +363,5 @@ void hv_vss_deinit(void) + cancel_delayed_work_sync(&vss_timeout_work); + cancel_work_sync(&vss_send_op_work); + hvutil_transport_destroy(hvt); ++ wait_for_completion(&release_event); + } +diff --git a/drivers/iio/pressure/mpl115.c b/drivers/iio/pressure/mpl115.c +index a0d7deeac62f..3f90985d545e 100644 +--- a/drivers/iio/pressure/mpl115.c ++++ b/drivers/iio/pressure/mpl115.c +@@ -136,6 +136,7 @@ static const struct iio_chan_spec mpl115_channels[] = { + { + .type = IIO_TEMP, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), ++ .info_mask_shared_by_type = + BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE), + }, + }; +diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c +index 01b2e0b18878..0f5b8767ec2e 100644 +--- a/drivers/iio/pressure/mpl3115.c ++++ b/drivers/iio/pressure/mpl3115.c +@@ -182,7 +182,7 @@ static const struct iio_chan_spec mpl3115_channels[] = { + { + .type = IIO_PRESSURE, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), +- BIT(IIO_CHAN_INFO_SCALE), ++ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .scan_index = 0, + .scan_type = { + .sign = 'u', +@@ -195,7 +195,7 @@ static const struct iio_chan_spec mpl3115_channels[] = { + { + .type = IIO_TEMP, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), +- BIT(IIO_CHAN_INFO_SCALE), ++ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .scan_index = 1, + .scan_type = { + .sign = 's', +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c +index c9dcad6a53bf..3f5741a3e728 100644 +--- a/drivers/infiniband/core/cma.c ++++ b/drivers/infiniband/core/cma.c +@@ -3349,6 +3349,9 @@ static int cma_accept_iw(struct rdma_id_private *id_priv, + struct iw_cm_conn_param iw_param; + int ret; + ++ if (!conn_param) ++ return -EINVAL; ++ + ret = cma_modify_qp_rtr(id_priv, conn_param); + if (ret) + return ret; +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c +index 9413b0726237..f0fc6f7b5d98 100644 +--- a/drivers/iommu/intel-iommu.c ++++ b/drivers/iommu/intel-iommu.c +@@ -3238,13 +3238,14 @@ static int __init init_dmars(void) + iommu_identity_mapping |= IDENTMAP_GFX; + #endif + ++ check_tylersburg_isoch(); ++ + if (iommu_identity_mapping) { + ret = si_domain_init(hw_pass_through); + if (ret) + goto free_iommu; + } + +- check_tylersburg_isoch(); + + /* + * If we copied translations from a previous kernel in the kdump +diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c +index 515f83e7d9ab..b59615ddf6ba 100644 +--- a/drivers/md/dm-cache-target.c ++++ b/drivers/md/dm-cache-target.c +@@ -251,7 +251,7 @@ struct cache { + /* + * Fields for converting from sectors to blocks. + */ +- uint32_t sectors_per_block; ++ sector_t sectors_per_block; + int sectors_per_block_shift; + + spinlock_t lock; +@@ -3547,11 +3547,11 @@ static void cache_status(struct dm_target *ti, status_type_t type, + + residency = policy_residency(cache->policy); + +- DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %lu ", ++ DMEMIT("%u %llu/%llu %llu %llu/%llu %u %u %u %u %u %u %lu ", + (unsigned)DM_CACHE_METADATA_BLOCK_SIZE, + (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata), + (unsigned long long)nr_blocks_metadata, +- cache->sectors_per_block, ++ (unsigned long long)cache->sectors_per_block, + (unsigned long long) from_cblock(residency), + (unsigned long long) from_cblock(cache->cache_size), + (unsigned) atomic_read(&cache->stats.read_hit), +diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c +index 8289804ccd99..d5ea9f28ae70 100644 +--- a/drivers/md/dm-stats.c ++++ b/drivers/md/dm-stats.c +@@ -175,6 +175,7 @@ static void dm_stat_free(struct rcu_head *head) + int cpu; + struct dm_stat *s = container_of(head, struct dm_stat, rcu_head); + ++ kfree(s->histogram_boundaries); + kfree(s->program_id); + kfree(s->aux_data); + for_each_possible_cpu(cpu) { +diff --git a/drivers/md/linear.c b/drivers/md/linear.c +index b7fe7e9fc777..6ba3227e29b2 100644 +--- a/drivers/md/linear.c ++++ b/drivers/md/linear.c +@@ -52,18 +52,26 @@ static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector) + return conf->disks + lo; + } + ++/* ++ * In linear_congested() conf->raid_disks is used as a copy of ++ * mddev->raid_disks to iterate conf->disks[], because conf->raid_disks ++ * and conf->disks[] are created in linear_conf(), they are always ++ * consitent with each other, but mddev->raid_disks does not. ++ */ + static int linear_congested(struct mddev *mddev, int bits) + { + struct linear_conf *conf; + int i, ret = 0; + +- conf = mddev->private; ++ rcu_read_lock(); ++ conf = rcu_dereference(mddev->private); + +- for (i = 0; i < mddev->raid_disks && !ret ; i++) { ++ for (i = 0; i < conf->raid_disks && !ret ; i++) { + struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); + ret |= bdi_congested(&q->backing_dev_info, bits); + } + ++ rcu_read_unlock(); + return ret; + } + +@@ -143,6 +151,19 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks) + conf->disks[i-1].end_sector + + conf->disks[i].rdev->sectors; + ++ /* ++ * conf->raid_disks is copy of mddev->raid_disks. The reason to ++ * keep a copy of mddev->raid_disks in struct linear_conf is, ++ * mddev->raid_disks may not be consistent with pointers number of ++ * conf->disks[] when it is updated in linear_add() and used to ++ * iterate old conf->disks[] earray in linear_congested(). ++ * Here conf->raid_disks is always consitent with number of ++ * pointers in conf->disks[] array, and mddev->private is updated ++ * with rcu_assign_pointer() in linear_addr(), such race can be ++ * avoided. ++ */ ++ conf->raid_disks = raid_disks; ++ + return conf; + + out: +@@ -195,15 +216,23 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev) + if (!newconf) + return -ENOMEM; + ++ /* newconf->raid_disks already keeps a copy of * the increased ++ * value of mddev->raid_disks, WARN_ONCE() is just used to make ++ * sure of this. It is possible that oldconf is still referenced ++ * in linear_congested(), therefore kfree_rcu() is used to free ++ * oldconf until no one uses it anymore. ++ */ + mddev_suspend(mddev); +- oldconf = mddev->private; ++ oldconf = rcu_dereference(mddev->private); + mddev->raid_disks++; +- mddev->private = newconf; ++ WARN_ONCE(mddev->raid_disks != newconf->raid_disks, ++ "copied raid_disks doesn't match mddev->raid_disks"); ++ rcu_assign_pointer(mddev->private, newconf); + md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); + set_capacity(mddev->gendisk, mddev->array_sectors); + mddev_resume(mddev); + revalidate_disk(mddev->gendisk); +- kfree(oldconf); ++ kfree_rcu(oldconf, rcu); + return 0; + } + +diff --git a/drivers/md/linear.h b/drivers/md/linear.h +index b685ddd7d7f7..8d392e6098b3 100644 +--- a/drivers/md/linear.h ++++ b/drivers/md/linear.h +@@ -10,6 +10,7 @@ struct linear_conf + { + struct rcu_head rcu; + sector_t array_sectors; ++ int raid_disks; /* a copy of mddev->raid_disks */ + struct dev_info disks[0]; + }; + #endif +diff --git a/drivers/media/pci/dm1105/Kconfig b/drivers/media/pci/dm1105/Kconfig +index 173daf0c0847..14fa7e40f2a6 100644 +--- a/drivers/media/pci/dm1105/Kconfig ++++ b/drivers/media/pci/dm1105/Kconfig +@@ -1,6 +1,6 @@ + config DVB_DM1105 + tristate "SDMC DM1105 based PCI cards" +- depends on DVB_CORE && PCI && I2C ++ depends on DVB_CORE && PCI && I2C && I2C_ALGOBIT + select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT + select DVB_STV0299 if MEDIA_SUBDRV_AUTOSELECT + select DVB_STV0288 if MEDIA_SUBDRV_AUTOSELECT +diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c +index ba780c45f645..572bc043b62d 100644 +--- a/drivers/media/platform/am437x/am437x-vpfe.c ++++ b/drivers/media/platform/am437x/am437x-vpfe.c +@@ -1576,7 +1576,7 @@ static int vpfe_s_fmt(struct file *file, void *priv, + return -EBUSY; + } + +- ret = vpfe_try_fmt(file, priv, &format); ++ ret = __vpfe_get_format(vpfe, &format, &bpp); + if (ret) + return ret; + +diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c +index cfb868a48b5f..ff6feff21e94 100644 +--- a/drivers/media/usb/uvc/uvc_queue.c ++++ b/drivers/media/usb/uvc/uvc_queue.c +@@ -416,7 +416,7 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue, + nextbuf = NULL; + spin_unlock_irqrestore(&queue->irqlock, flags); + +- buf->state = buf->error ? VB2_BUF_STATE_ERROR : UVC_BUF_STATE_DONE; ++ buf->state = buf->error ? UVC_BUF_STATE_ERROR : UVC_BUF_STATE_DONE; + vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused); + vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE); + +diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c +index a731720f1d13..449b2a47f9a8 100644 +--- a/drivers/net/can/usb/usb_8dev.c ++++ b/drivers/net/can/usb/usb_8dev.c +@@ -954,8 +954,8 @@ static int usb_8dev_probe(struct usb_interface *intf, + for (i = 0; i < MAX_TX_URBS; i++) + priv->tx_contexts[i].echo_index = MAX_TX_URBS; + +- priv->cmd_msg_buffer = kzalloc(sizeof(struct usb_8dev_cmd_msg), +- GFP_KERNEL); ++ priv->cmd_msg_buffer = devm_kzalloc(&intf->dev, sizeof(struct usb_8dev_cmd_msg), ++ GFP_KERNEL); + if (!priv->cmd_msg_buffer) + goto cleanup_candev; + +@@ -969,7 +969,7 @@ static int usb_8dev_probe(struct usb_interface *intf, + if (err) { + netdev_err(netdev, + "couldn't register CAN device: %d\n", err); +- goto cleanup_cmd_msg_buffer; ++ goto cleanup_candev; + } + + err = usb_8dev_cmd_version(priv, &version); +@@ -990,9 +990,6 @@ static int usb_8dev_probe(struct usb_interface *intf, + cleanup_unregister_candev: + unregister_netdev(priv->netdev); + +-cleanup_cmd_msg_buffer: +- kfree(priv->cmd_msg_buffer); +- + cleanup_candev: + free_candev(netdev); + +diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c +index dc44cfef7517..16e052d02c94 100644 +--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c ++++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c +@@ -502,8 +502,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + break; + return -EOPNOTSUPP; + default: +- WARN_ON(1); +- return -EINVAL; ++ return -EOPNOTSUPP; + } + + mutex_lock(&ah->lock); +diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h +index 694ca2e680e5..74670e08e6da 100644 +--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h ++++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h +@@ -73,13 +73,13 @@ + #define AR9300_OTP_BASE \ + ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30000 : 0x14000) + #define AR9300_OTP_STATUS \ +- ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30018 : 0x15f18) ++ ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x31018 : 0x15f18) + #define AR9300_OTP_STATUS_TYPE 0x7 + #define AR9300_OTP_STATUS_VALID 0x4 + #define AR9300_OTP_STATUS_ACCESS_BUSY 0x2 + #define AR9300_OTP_STATUS_SM_BUSY 0x1 + #define AR9300_OTP_READ_DATA \ +- ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3001c : 0x15f1c) ++ ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3101c : 0x15f1c) + + enum targetPowerHTRates { + HT_TARGET_RATE_0_8_16, +diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h +index b42f4a963ef4..a660e40f2df1 100644 +--- a/drivers/net/wireless/ath/ath9k/ath9k.h ++++ b/drivers/net/wireless/ath/ath9k/ath9k.h +@@ -959,6 +959,7 @@ struct ath_softc { + struct survey_info *cur_survey; + struct survey_info survey[ATH9K_NUM_CHANNELS]; + ++ spinlock_t intr_lock; + struct tasklet_struct intr_tq; + struct tasklet_struct bcon_tasklet; + struct ath_hw *sc_ah; +diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c +index bc70ce62bc03..0f5672f5c9ba 100644 +--- a/drivers/net/wireless/ath/ath9k/init.c ++++ b/drivers/net/wireless/ath/ath9k/init.c +@@ -619,6 +619,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, + common->bt_ant_diversity = 1; + + spin_lock_init(&common->cc_lock); ++ spin_lock_init(&sc->intr_lock); + spin_lock_init(&sc->sc_serial_rw); + spin_lock_init(&sc->sc_pm_lock); + spin_lock_init(&sc->chan_lock); +diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c +index bba85d1a6cd1..d937c39b3a0b 100644 +--- a/drivers/net/wireless/ath/ath9k/mac.c ++++ b/drivers/net/wireless/ath/ath9k/mac.c +@@ -805,21 +805,12 @@ void ath9k_hw_disable_interrupts(struct ath_hw *ah) + } + EXPORT_SYMBOL(ath9k_hw_disable_interrupts); + +-void ath9k_hw_enable_interrupts(struct ath_hw *ah) ++static void __ath9k_hw_enable_interrupts(struct ath_hw *ah) + { + struct ath_common *common = ath9k_hw_common(ah); + u32 sync_default = AR_INTR_SYNC_DEFAULT; + u32 async_mask; + +- if (!(ah->imask & ATH9K_INT_GLOBAL)) +- return; +- +- if (!atomic_inc_and_test(&ah->intr_ref_cnt)) { +- ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n", +- atomic_read(&ah->intr_ref_cnt)); +- return; +- } +- + if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) || + AR_SREV_9561(ah)) + sync_default &= ~AR_INTR_SYNC_HOST1_FATAL; +@@ -841,6 +832,39 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah) + ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n", + REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER)); + } ++ ++void ath9k_hw_resume_interrupts(struct ath_hw *ah) ++{ ++ struct ath_common *common = ath9k_hw_common(ah); ++ ++ if (!(ah->imask & ATH9K_INT_GLOBAL)) ++ return; ++ ++ if (atomic_read(&ah->intr_ref_cnt) != 0) { ++ ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n", ++ atomic_read(&ah->intr_ref_cnt)); ++ return; ++ } ++ ++ __ath9k_hw_enable_interrupts(ah); ++} ++EXPORT_SYMBOL(ath9k_hw_resume_interrupts); ++ ++void ath9k_hw_enable_interrupts(struct ath_hw *ah) ++{ ++ struct ath_common *common = ath9k_hw_common(ah); ++ ++ if (!(ah->imask & ATH9K_INT_GLOBAL)) ++ return; ++ ++ if (!atomic_inc_and_test(&ah->intr_ref_cnt)) { ++ ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n", ++ atomic_read(&ah->intr_ref_cnt)); ++ return; ++ } ++ ++ __ath9k_hw_enable_interrupts(ah); ++} + EXPORT_SYMBOL(ath9k_hw_enable_interrupts); + + void ath9k_hw_set_interrupts(struct ath_hw *ah) +diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h +index 7fbf7f965f61..1b63d26f30ce 100644 +--- a/drivers/net/wireless/ath/ath9k/mac.h ++++ b/drivers/net/wireless/ath/ath9k/mac.h +@@ -748,6 +748,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah); + void ath9k_hw_enable_interrupts(struct ath_hw *ah); + void ath9k_hw_disable_interrupts(struct ath_hw *ah); + void ath9k_hw_kill_interrupts(struct ath_hw *ah); ++void ath9k_hw_resume_interrupts(struct ath_hw *ah); + + void ar9002_hw_attach_mac_ops(struct ath_hw *ah); + +diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c +index 8c5d2cf9c979..b114e57a823f 100644 +--- a/drivers/net/wireless/ath/ath9k/main.c ++++ b/drivers/net/wireless/ath/ath9k/main.c +@@ -373,21 +373,20 @@ void ath9k_tasklet(unsigned long data) + struct ath_common *common = ath9k_hw_common(ah); + enum ath_reset_type type; + unsigned long flags; +- u32 status = sc->intrstatus; ++ u32 status; + u32 rxmask; + ++ spin_lock_irqsave(&sc->intr_lock, flags); ++ status = sc->intrstatus; ++ sc->intrstatus = 0; ++ spin_unlock_irqrestore(&sc->intr_lock, flags); ++ + ath9k_ps_wakeup(sc); + spin_lock(&sc->sc_pcu_lock); + + if (status & ATH9K_INT_FATAL) { + type = RESET_TYPE_FATAL_INT; + ath9k_queue_reset(sc, type); +- +- /* +- * Increment the ref. counter here so that +- * interrupts are enabled in the reset routine. +- */ +- atomic_inc(&ah->intr_ref_cnt); + ath_dbg(common, RESET, "FATAL: Skipping interrupts\n"); + goto out; + } +@@ -403,11 +402,6 @@ void ath9k_tasklet(unsigned long data) + type = RESET_TYPE_BB_WATCHDOG; + ath9k_queue_reset(sc, type); + +- /* +- * Increment the ref. counter here so that +- * interrupts are enabled in the reset routine. +- */ +- atomic_inc(&ah->intr_ref_cnt); + ath_dbg(common, RESET, + "BB_WATCHDOG: Skipping interrupts\n"); + goto out; +@@ -420,7 +414,6 @@ void ath9k_tasklet(unsigned long data) + if ((sc->gtt_cnt >= MAX_GTT_CNT) && !ath9k_hw_check_alive(ah)) { + type = RESET_TYPE_TX_GTT; + ath9k_queue_reset(sc, type); +- atomic_inc(&ah->intr_ref_cnt); + ath_dbg(common, RESET, + "GTT: Skipping interrupts\n"); + goto out; +@@ -477,7 +470,7 @@ void ath9k_tasklet(unsigned long data) + ath9k_btcoex_handle_interrupt(sc, status); + + /* re-enable hardware interrupt */ +- ath9k_hw_enable_interrupts(ah); ++ ath9k_hw_resume_interrupts(ah); + out: + spin_unlock(&sc->sc_pcu_lock); + ath9k_ps_restore(sc); +@@ -541,7 +534,9 @@ irqreturn_t ath_isr(int irq, void *dev) + return IRQ_NONE; + + /* Cache the status */ +- sc->intrstatus = status; ++ spin_lock(&sc->intr_lock); ++ sc->intrstatus |= status; ++ spin_unlock(&sc->intr_lock); + + if (status & SCHED_INTR) + sched = true; +@@ -587,7 +582,7 @@ chip_reset: + + if (sched) { + /* turn off every interrupt */ +- ath9k_hw_disable_interrupts(ah); ++ ath9k_hw_kill_interrupts(ah); + tasklet_schedule(&sc->intr_tq); + } + +diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.h b/drivers/net/wireless/realtek/rtlwifi/pci.h +index 5da6703942d9..672f81ea02d0 100644 +--- a/drivers/net/wireless/realtek/rtlwifi/pci.h ++++ b/drivers/net/wireless/realtek/rtlwifi/pci.h +@@ -275,10 +275,10 @@ struct mp_adapter { + }; + + struct rtl_pci_priv { ++ struct bt_coexist_info bt_coexist; ++ struct rtl_led_ctl ledctl; + struct rtl_pci dev; + struct mp_adapter ndis_adapter; +- struct rtl_led_ctl ledctl; +- struct bt_coexist_info bt_coexist; + }; + + #define rtl_pcipriv(hw) (((struct rtl_pci_priv *)(rtl_priv(hw))->priv)) +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c +index 5f14308e8eb3..b1601441991d 100644 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c +@@ -1003,7 +1003,7 @@ static void _rtl92ee_hw_configure(struct ieee80211_hw *hw) + rtl_write_word(rtlpriv, REG_SIFS_TRX, 0x100a); + + /* Note Data sheet don't define */ +- rtl_write_word(rtlpriv, 0x4C7, 0x80); ++ rtl_write_byte(rtlpriv, 0x4C7, 0x80); + + rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x20); + +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +index bbb789f8990b..c2103e7a8132 100644 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +@@ -1127,7 +1127,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr) + } + if (0 == tmp) { + read_addr = REG_DBI_RDATA + addr % 4; +- ret = rtl_read_word(rtlpriv, read_addr); ++ ret = rtl_read_byte(rtlpriv, read_addr); + } + return ret; + } +diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.h b/drivers/net/wireless/realtek/rtlwifi/usb.h +index 685273ca9561..441c4412130c 100644 +--- a/drivers/net/wireless/realtek/rtlwifi/usb.h ++++ b/drivers/net/wireless/realtek/rtlwifi/usb.h +@@ -150,8 +150,9 @@ struct rtl_usb { + }; + + struct rtl_usb_priv { +- struct rtl_usb dev; ++ struct bt_coexist_info bt_coexist; + struct rtl_led_ctl ledctl; ++ struct rtl_usb dev; + }; + + #define rtl_usbpriv(hw) (((struct rtl_usb_priv *)(rtl_priv(hw))->priv)) +diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c +index 732ac71b82cd..88dbbeb8569b 100644 +--- a/drivers/regulator/core.c ++++ b/drivers/regulator/core.c +@@ -4273,12 +4273,13 @@ static void regulator_summary_show_subtree(struct seq_file *s, + seq_puts(s, "\n"); + + list_for_each_entry(consumer, &rdev->consumer_list, list) { +- if (consumer->dev->class == ®ulator_class) ++ if (consumer->dev && consumer->dev->class == ®ulator_class) + continue; + + seq_printf(s, "%*s%-*s ", + (level + 1) * 3 + 1, "", +- 30 - (level + 1) * 3, dev_name(consumer->dev)); ++ 30 - (level + 1) * 3, ++ consumer->dev ? dev_name(consumer->dev) : "deviceless"); + + switch (rdev->desc->type) { + case REGULATOR_VOLTAGE: +diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c +index c169a2cd4727..e29cc9fca0bf 100644 +--- a/drivers/rtc/rtc-sun6i.c ++++ b/drivers/rtc/rtc-sun6i.c +@@ -37,9 +37,11 @@ + + /* Control register */ + #define SUN6I_LOSC_CTRL 0x0000 ++#define SUN6I_LOSC_CTRL_KEY (0x16aa << 16) + #define SUN6I_LOSC_CTRL_ALM_DHMS_ACC BIT(9) + #define SUN6I_LOSC_CTRL_RTC_HMS_ACC BIT(8) + #define SUN6I_LOSC_CTRL_RTC_YMD_ACC BIT(7) ++#define SUN6I_LOSC_CTRL_EXT_OSC BIT(0) + #define SUN6I_LOSC_CTRL_ACC_MASK GENMASK(9, 7) + + /* RTC */ +@@ -114,13 +116,17 @@ struct sun6i_rtc_dev { + void __iomem *base; + int irq; + unsigned long alarm; ++ ++ spinlock_t lock; + }; + + static irqreturn_t sun6i_rtc_alarmirq(int irq, void *id) + { + struct sun6i_rtc_dev *chip = (struct sun6i_rtc_dev *) id; ++ irqreturn_t ret = IRQ_NONE; + u32 val; + ++ spin_lock(&chip->lock); + val = readl(chip->base + SUN6I_ALRM_IRQ_STA); + + if (val & SUN6I_ALRM_IRQ_STA_CNT_IRQ_PEND) { +@@ -129,10 +135,11 @@ static irqreturn_t sun6i_rtc_alarmirq(int irq, void *id) + + rtc_update_irq(chip->rtc, 1, RTC_AF | RTC_IRQF); + +- return IRQ_HANDLED; ++ ret = IRQ_HANDLED; + } ++ spin_unlock(&chip->lock); + +- return IRQ_NONE; ++ return ret; + } + + static void sun6i_rtc_setaie(int to, struct sun6i_rtc_dev *chip) +@@ -140,6 +147,7 @@ static void sun6i_rtc_setaie(int to, struct sun6i_rtc_dev *chip) + u32 alrm_val = 0; + u32 alrm_irq_val = 0; + u32 alrm_wake_val = 0; ++ unsigned long flags; + + if (to) { + alrm_val = SUN6I_ALRM_EN_CNT_EN; +@@ -150,9 +158,11 @@ static void sun6i_rtc_setaie(int to, struct sun6i_rtc_dev *chip) + chip->base + SUN6I_ALRM_IRQ_STA); + } + ++ spin_lock_irqsave(&chip->lock, flags); + writel(alrm_val, chip->base + SUN6I_ALRM_EN); + writel(alrm_irq_val, chip->base + SUN6I_ALRM_IRQ_EN); + writel(alrm_wake_val, chip->base + SUN6I_ALARM_CONFIG); ++ spin_unlock_irqrestore(&chip->lock, flags); + } + + static int sun6i_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) +@@ -191,11 +201,15 @@ static int sun6i_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) + static int sun6i_rtc_getalarm(struct device *dev, struct rtc_wkalrm *wkalrm) + { + struct sun6i_rtc_dev *chip = dev_get_drvdata(dev); ++ unsigned long flags; + u32 alrm_st; + u32 alrm_en; + ++ spin_lock_irqsave(&chip->lock, flags); + alrm_en = readl(chip->base + SUN6I_ALRM_IRQ_EN); + alrm_st = readl(chip->base + SUN6I_ALRM_IRQ_STA); ++ spin_unlock_irqrestore(&chip->lock, flags); ++ + wkalrm->enabled = !!(alrm_en & SUN6I_ALRM_EN_CNT_EN); + wkalrm->pending = !!(alrm_st & SUN6I_ALRM_EN_CNT_EN); + rtc_time_to_tm(chip->alarm, &wkalrm->time); +@@ -356,6 +370,7 @@ static int sun6i_rtc_probe(struct platform_device *pdev) + chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; ++ spin_lock_init(&chip->lock); + + platform_set_drvdata(pdev, chip); + chip->dev = &pdev->dev; +@@ -404,6 +419,10 @@ static int sun6i_rtc_probe(struct platform_device *pdev) + /* disable alarm wakeup */ + writel(0, chip->base + SUN6I_ALARM_CONFIG); + ++ /* switch to the external, more precise, oscillator */ ++ writel(SUN6I_LOSC_CTRL_KEY | SUN6I_LOSC_CTRL_EXT_OSC, ++ chip->base + SUN6I_LOSC_CTRL); ++ + chip->rtc = rtc_device_register("rtc-sun6i", &pdev->dev, + &sun6i_rtc_ops, THIS_MODULE); + if (IS_ERR(chip->rtc)) { +diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c +index bc0203f3d243..e415e1c58eb5 100644 +--- a/drivers/scsi/aacraid/src.c ++++ b/drivers/scsi/aacraid/src.c +@@ -413,16 +413,23 @@ static int aac_src_check_health(struct aac_dev *dev) + u32 status = src_readl(dev, MUnit.OMR); + + /* ++ * Check to see if the board panic'd. ++ */ ++ if (unlikely(status & KERNEL_PANIC)) ++ goto err_blink; ++ ++ /* + * Check to see if the board failed any self tests. + */ + if (unlikely(status & SELF_TEST_FAILED)) +- return -1; ++ goto err_out; + + /* +- * Check to see if the board panic'd. ++ * Check to see if the board failed any self tests. + */ +- if (unlikely(status & KERNEL_PANIC)) +- return (status >> 16) & 0xFF; ++ if (unlikely(status & MONITOR_PANIC)) ++ goto err_out; ++ + /* + * Wait for the adapter to be up and running. + */ +@@ -432,6 +439,12 @@ static int aac_src_check_health(struct aac_dev *dev) + * Everything is OK + */ + return 0; ++ ++err_out: ++ return -1; ++ ++err_blink: ++ return (status > 16) & 0xFF; + } + + /** +diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h +index 33ec4fa39ccb..f224cdb2fce4 100644 +--- a/drivers/scsi/lpfc/lpfc_hw4.h ++++ b/drivers/scsi/lpfc/lpfc_hw4.h +@@ -1182,6 +1182,7 @@ struct lpfc_mbx_wq_create { + #define lpfc_mbx_wq_create_page_size_SHIFT 0 + #define lpfc_mbx_wq_create_page_size_MASK 0x000000FF + #define lpfc_mbx_wq_create_page_size_WORD word1 ++#define LPFC_WQ_PAGE_SIZE_4096 0x1 + #define lpfc_mbx_wq_create_wqe_size_SHIFT 8 + #define lpfc_mbx_wq_create_wqe_size_MASK 0x0000000F + #define lpfc_mbx_wq_create_wqe_size_WORD word1 +@@ -1253,6 +1254,7 @@ struct rq_context { + #define lpfc_rq_context_page_size_SHIFT 0 /* Version 1 Only */ + #define lpfc_rq_context_page_size_MASK 0x000000FF + #define lpfc_rq_context_page_size_WORD word0 ++#define LPFC_RQ_PAGE_SIZE_4096 0x1 + uint32_t reserved1; + uint32_t word2; + #define lpfc_rq_context_cq_id_SHIFT 16 +diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c +index 92dfd6a5178c..f5aeda8f014f 100644 +--- a/drivers/scsi/lpfc/lpfc_sli.c ++++ b/drivers/scsi/lpfc/lpfc_sli.c +@@ -13475,7 +13475,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, + LPFC_WQ_WQE_SIZE_128); + bf_set(lpfc_mbx_wq_create_page_size, + &wq_create->u.request_1, +- (PAGE_SIZE/SLI4_PAGE_SIZE)); ++ LPFC_WQ_PAGE_SIZE_4096); + page = wq_create->u.request_1.page; + break; + } +@@ -13501,8 +13501,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, + LPFC_WQ_WQE_SIZE_128); + break; + } +- bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1, +- (PAGE_SIZE/SLI4_PAGE_SIZE)); ++ bf_set(lpfc_mbx_wq_create_page_size, ++ &wq_create->u.request_1, ++ LPFC_WQ_PAGE_SIZE_4096); + page = wq_create->u.request_1.page; + break; + default: +@@ -13688,7 +13689,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, + LPFC_RQE_SIZE_8); + bf_set(lpfc_rq_context_page_size, + &rq_create->u.request.context, +- (PAGE_SIZE/SLI4_PAGE_SIZE)); ++ LPFC_RQ_PAGE_SIZE_4096); + } else { + switch (hrq->entry_count) { + default: +diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c +index e7649ed3f667..4d655b568269 100644 +--- a/drivers/scsi/scsi_dh.c ++++ b/drivers/scsi/scsi_dh.c +@@ -289,20 +289,6 @@ int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh) + } + EXPORT_SYMBOL_GPL(scsi_unregister_device_handler); + +-static struct scsi_device *get_sdev_from_queue(struct request_queue *q) +-{ +- struct scsi_device *sdev; +- unsigned long flags; +- +- spin_lock_irqsave(q->queue_lock, flags); +- sdev = q->queuedata; +- if (!sdev || !get_device(&sdev->sdev_gendev)) +- sdev = NULL; +- spin_unlock_irqrestore(q->queue_lock, flags); +- +- return sdev; +-} +- + /* + * scsi_dh_activate - activate the path associated with the scsi_device + * corresponding to the given request queue. +@@ -321,7 +307,7 @@ int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data) + struct scsi_device *sdev; + int err = SCSI_DH_NOSYS; + +- sdev = get_sdev_from_queue(q); ++ sdev = scsi_device_from_queue(q); + if (!sdev) { + if (fn) + fn(data, err); +@@ -368,7 +354,7 @@ int scsi_dh_set_params(struct request_queue *q, const char *params) + struct scsi_device *sdev; + int err = -SCSI_DH_NOSYS; + +- sdev = get_sdev_from_queue(q); ++ sdev = scsi_device_from_queue(q); + if (!sdev) + return err; + +@@ -391,7 +377,7 @@ int scsi_dh_attach(struct request_queue *q, const char *name) + struct scsi_device_handler *scsi_dh; + int err = 0; + +- sdev = get_sdev_from_queue(q); ++ sdev = scsi_device_from_queue(q); + if (!sdev) + return -ENODEV; + +@@ -429,7 +415,7 @@ const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp) + struct scsi_device *sdev; + const char *handler_name = NULL; + +- sdev = get_sdev_from_queue(q); ++ sdev = scsi_device_from_queue(q); + if (!sdev) + return NULL; + +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c +index 8558e3886960..887045ae5d10 100644 +--- a/drivers/scsi/scsi_lib.c ++++ b/drivers/scsi/scsi_lib.c +@@ -2215,6 +2215,29 @@ void scsi_mq_destroy_tags(struct Scsi_Host *shost) + blk_mq_free_tag_set(&shost->tag_set); + } + ++/** ++ * scsi_device_from_queue - return sdev associated with a request_queue ++ * @q: The request queue to return the sdev from ++ * ++ * Return the sdev associated with a request queue or NULL if the ++ * request_queue does not reference a SCSI device. ++ */ ++struct scsi_device *scsi_device_from_queue(struct request_queue *q) ++{ ++ struct scsi_device *sdev = NULL; ++ ++ if (q->mq_ops) { ++ if (q->mq_ops == &scsi_mq_ops) ++ sdev = q->queuedata; ++ } else if (q->request_fn == scsi_request_fn) ++ sdev = q->queuedata; ++ if (!sdev || !get_device(&sdev->sdev_gendev)) ++ sdev = NULL; ++ ++ return sdev; ++} ++EXPORT_SYMBOL_GPL(scsi_device_from_queue); ++ + /* + * Function: scsi_block_requests() + * +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c +index 6ee50742f6a5..78430ef28ea4 100644 +--- a/drivers/scsi/sd.c ++++ b/drivers/scsi/sd.c +@@ -1398,11 +1398,15 @@ static int media_not_present(struct scsi_disk *sdkp, + **/ + static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) + { +- struct scsi_disk *sdkp = scsi_disk(disk); +- struct scsi_device *sdp = sdkp->device; ++ struct scsi_disk *sdkp = scsi_disk_get(disk); ++ struct scsi_device *sdp; + struct scsi_sense_hdr *sshdr = NULL; + int retval; + ++ if (!sdkp) ++ return 0; ++ ++ sdp = sdkp->device; + SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n")); + + /* +@@ -1459,6 +1463,7 @@ out: + kfree(sshdr); + retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0; + sdp->changed = 0; ++ scsi_disk_put(sdkp); + return retval; + } + +diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c +index 0f636cc4c809..cd5c1c060481 100644 +--- a/drivers/scsi/storvsc_drv.c ++++ b/drivers/scsi/storvsc_drv.c +@@ -135,6 +135,8 @@ struct hv_fc_wwn_packet { + #define SRB_FLAGS_PORT_DRIVER_RESERVED 0x0F000000 + #define SRB_FLAGS_CLASS_DRIVER_RESERVED 0xF0000000 + ++#define SP_UNTAGGED ((unsigned char) ~0) ++#define SRB_SIMPLE_TAG_REQUEST 0x20 + + /* + * Platform neutral description of a scsi request - +@@ -354,6 +356,7 @@ enum storvsc_request_type { + #define SRB_STATUS_SUCCESS 0x01 + #define SRB_STATUS_ABORTED 0x02 + #define SRB_STATUS_ERROR 0x04 ++#define SRB_STATUS_DATA_OVERRUN 0x12 + + #define SRB_STATUS(status) \ + (status & ~(SRB_STATUS_AUTOSENSE_VALID | SRB_STATUS_QUEUE_FROZEN)) +@@ -864,6 +867,13 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb, + switch (SRB_STATUS(vm_srb->srb_status)) { + case SRB_STATUS_ERROR: + /* ++ * Let upper layer deal with error when ++ * sense message is present. ++ */ ++ ++ if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID) ++ break; ++ /* + * If there is an error; offline the device since all + * error recovery strategies would have already been + * deployed on the host side. However, if the command +@@ -927,6 +937,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request) + struct hv_host_device *host_dev = shost_priv(scmnd->device->host); + struct scsi_sense_hdr sense_hdr; + struct vmscsi_request *vm_srb; ++ u32 data_transfer_length; + struct Scsi_Host *host; + struct storvsc_device *stor_dev; + struct hv_device *dev = host_dev->dev; +@@ -937,6 +948,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request) + host = stor_dev->host; + + vm_srb = &cmd_request->vstor_packet.vm_srb; ++ data_transfer_length = vm_srb->data_transfer_length; + + scmnd->result = vm_srb->scsi_status; + +@@ -947,13 +959,20 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request) + &sense_hdr); + } + +- if (vm_srb->srb_status != SRB_STATUS_SUCCESS) ++ if (vm_srb->srb_status != SRB_STATUS_SUCCESS) { + storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc, + sense_hdr.ascq); ++ /* ++ * The Windows driver set data_transfer_length on ++ * SRB_STATUS_DATA_OVERRUN. On other errors, this value ++ * is untouched. In these cases we set it to 0. ++ */ ++ if (vm_srb->srb_status != SRB_STATUS_DATA_OVERRUN) ++ data_transfer_length = 0; ++ } + + scsi_set_resid(scmnd, +- cmd_request->payload->range.len - +- vm_srb->data_transfer_length); ++ cmd_request->payload->range.len - data_transfer_length); + + scmnd->scsi_done(scmnd); + +@@ -1409,6 +1428,13 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) + vm_srb->win8_extension.srb_flags |= + SRB_FLAGS_DISABLE_SYNCH_TRANSFER; + ++ if (scmnd->device->tagged_supported) { ++ vm_srb->win8_extension.srb_flags |= ++ (SRB_FLAGS_QUEUE_ACTION_ENABLE | SRB_FLAGS_NO_QUEUE_FREEZE); ++ vm_srb->win8_extension.queue_tag = SP_UNTAGGED; ++ vm_srb->win8_extension.queue_action = SRB_SIMPLE_TAG_REQUEST; ++ } ++ + /* Build the SRB */ + switch (scmnd->sc_data_direction) { + case DMA_TO_DEVICE: +diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c +index 110b8c0b6cd7..0f2fe34e14c2 100644 +--- a/drivers/staging/rtl8188eu/core/rtw_recv.c ++++ b/drivers/staging/rtl8188eu/core/rtw_recv.c +@@ -1405,6 +1405,9 @@ static int wlanhdr_to_ethhdr(struct recv_frame *precvframe) + ptr = recvframe_pull(precvframe, (rmv_len-sizeof(struct ethhdr) + (bsnaphdr ? 2 : 0))); + } + ++ if (!ptr) ++ return _FAIL; ++ + memcpy(ptr, pattrib->dst, ETH_ALEN); + memcpy(ptr+ETH_ALEN, pattrib->src, ETH_ALEN); + +diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c +index 4ff530155187..04ac23cc47a8 100644 +--- a/drivers/staging/rtl8712/rtl871x_recv.c ++++ b/drivers/staging/rtl8712/rtl871x_recv.c +@@ -641,11 +641,16 @@ sint r8712_wlanhdr_to_ethhdr(union recv_frame *precvframe) + /* append rx status for mp test packets */ + ptr = recvframe_pull(precvframe, (rmv_len - + sizeof(struct ethhdr) + 2) - 24); ++ if (!ptr) ++ return _FAIL; + memcpy(ptr, get_rxmem(precvframe), 24); + ptr += 24; +- } else ++ } else { + ptr = recvframe_pull(precvframe, (rmv_len - + sizeof(struct ethhdr) + (bsnaphdr ? 2 : 0))); ++ if (!ptr) ++ return _FAIL; ++ } + + memcpy(ptr, pattrib->dst, ETH_ALEN); + memcpy(ptr + ETH_ALEN, pattrib->src, ETH_ALEN); +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c +index bd810c109277..6ed80b05d674 100644 +--- a/drivers/target/iscsi/iscsi_target.c ++++ b/drivers/target/iscsi/iscsi_target.c +@@ -3436,7 +3436,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, + + if ((tpg->tpg_attrib.generate_node_acls == 0) && + (tpg->tpg_attrib.demo_mode_discovery == 0) && +- (!core_tpg_get_initiator_node_acl(&tpg->tpg_se_tpg, ++ (!target_tpg_has_node_acl(&tpg->tpg_se_tpg, + cmd->conn->sess->sess_ops->InitiatorName))) { + continue; + } +diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c +index 5fb9dd7f08bb..028854cda97b 100644 +--- a/drivers/target/target_core_tpg.c ++++ b/drivers/target/target_core_tpg.c +@@ -75,9 +75,21 @@ struct se_node_acl *core_tpg_get_initiator_node_acl( + unsigned char *initiatorname) + { + struct se_node_acl *acl; +- ++ /* ++ * Obtain se_node_acl->acl_kref using fabric driver provided ++ * initiatorname[] during node acl endpoint lookup driven by ++ * new se_session login. ++ * ++ * The reference is held until se_session shutdown -> release ++ * occurs via fabric driver invoked transport_deregister_session() ++ * or transport_free_session() code. ++ */ + mutex_lock(&tpg->acl_node_mutex); + acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); ++ if (acl) { ++ if (!kref_get_unless_zero(&acl->acl_kref)) ++ acl = NULL; ++ } + mutex_unlock(&tpg->acl_node_mutex); + + return acl; +@@ -232,6 +244,25 @@ static void target_add_node_acl(struct se_node_acl *acl) + acl->initiatorname); + } + ++bool target_tpg_has_node_acl(struct se_portal_group *tpg, ++ const char *initiatorname) ++{ ++ struct se_node_acl *acl; ++ bool found = false; ++ ++ mutex_lock(&tpg->acl_node_mutex); ++ list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { ++ if (!strcmp(acl->initiatorname, initiatorname)) { ++ found = true; ++ break; ++ } ++ } ++ mutex_unlock(&tpg->acl_node_mutex); ++ ++ return found; ++} ++EXPORT_SYMBOL(target_tpg_has_node_acl); ++ + struct se_node_acl *core_tpg_check_initiator_node_acl( + struct se_portal_group *tpg, + unsigned char *initiatorname) +@@ -248,6 +279,15 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( + acl = target_alloc_node_acl(tpg, initiatorname); + if (!acl) + return NULL; ++ /* ++ * When allocating a dynamically generated node_acl, go ahead ++ * and take the extra kref now before returning to the fabric ++ * driver caller. ++ * ++ * Note this reference will be released at session shutdown ++ * time within transport_free_session() code. ++ */ ++ kref_get(&acl->acl_kref); + acl->dynamic_node_acl = 1; + + /* +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c +index aa517c4fadb9..befe22744802 100644 +--- a/drivers/target/target_core_transport.c ++++ b/drivers/target/target_core_transport.c +@@ -341,7 +341,6 @@ void __transport_register_session( + &buf[0], PR_REG_ISID_LEN); + se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); + } +- kref_get(&se_nacl->acl_kref); + + spin_lock_irq(&se_nacl->nacl_sess_lock); + /* +@@ -424,14 +423,27 @@ static void target_complete_nacl(struct kref *kref) + { + struct se_node_acl *nacl = container_of(kref, + struct se_node_acl, acl_kref); ++ struct se_portal_group *se_tpg = nacl->se_tpg; + +- complete(&nacl->acl_free_comp); ++ if (!nacl->dynamic_stop) { ++ complete(&nacl->acl_free_comp); ++ return; ++ } ++ ++ mutex_lock(&se_tpg->acl_node_mutex); ++ list_del(&nacl->acl_list); ++ mutex_unlock(&se_tpg->acl_node_mutex); ++ ++ core_tpg_wait_for_nacl_pr_ref(nacl); ++ core_free_device_list_for_node(nacl, se_tpg); ++ kfree(nacl); + } + + void target_put_nacl(struct se_node_acl *nacl) + { + kref_put(&nacl->acl_kref, target_complete_nacl); + } ++EXPORT_SYMBOL(target_put_nacl); + + void transport_deregister_session_configfs(struct se_session *se_sess) + { +@@ -464,6 +476,42 @@ EXPORT_SYMBOL(transport_deregister_session_configfs); + + void transport_free_session(struct se_session *se_sess) + { ++ struct se_node_acl *se_nacl = se_sess->se_node_acl; ++ ++ /* ++ * Drop the se_node_acl->nacl_kref obtained from within ++ * core_tpg_get_initiator_node_acl(). ++ */ ++ if (se_nacl) { ++ struct se_portal_group *se_tpg = se_nacl->se_tpg; ++ const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo; ++ unsigned long flags; ++ ++ se_sess->se_node_acl = NULL; ++ ++ /* ++ * Also determine if we need to drop the extra ->cmd_kref if ++ * it had been previously dynamically generated, and ++ * the endpoint is not caching dynamic ACLs. ++ */ ++ mutex_lock(&se_tpg->acl_node_mutex); ++ if (se_nacl->dynamic_node_acl && ++ !se_tfo->tpg_check_demo_mode_cache(se_tpg)) { ++ spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); ++ if (list_empty(&se_nacl->acl_sess_list)) ++ se_nacl->dynamic_stop = true; ++ spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); ++ ++ if (se_nacl->dynamic_stop) ++ list_del(&se_nacl->acl_list); ++ } ++ mutex_unlock(&se_tpg->acl_node_mutex); ++ ++ if (se_nacl->dynamic_stop) ++ target_put_nacl(se_nacl); ++ ++ target_put_nacl(se_nacl); ++ } + if (se_sess->sess_cmd_map) { + percpu_ida_destroy(&se_sess->sess_tag_pool); + kvfree(se_sess->sess_cmd_map); +@@ -475,16 +523,12 @@ EXPORT_SYMBOL(transport_free_session); + void transport_deregister_session(struct se_session *se_sess) + { + struct se_portal_group *se_tpg = se_sess->se_tpg; +- const struct target_core_fabric_ops *se_tfo; +- struct se_node_acl *se_nacl; + unsigned long flags; +- bool comp_nacl = true, drop_nacl = false; + + if (!se_tpg) { + transport_free_session(se_sess); + return; + } +- se_tfo = se_tpg->se_tpg_tfo; + + spin_lock_irqsave(&se_tpg->session_lock, flags); + list_del(&se_sess->sess_list); +@@ -492,37 +536,16 @@ void transport_deregister_session(struct se_session *se_sess) + se_sess->fabric_sess_ptr = NULL; + spin_unlock_irqrestore(&se_tpg->session_lock, flags); + +- /* +- * Determine if we need to do extra work for this initiator node's +- * struct se_node_acl if it had been previously dynamically generated. +- */ +- se_nacl = se_sess->se_node_acl; +- +- mutex_lock(&se_tpg->acl_node_mutex); +- if (se_nacl && se_nacl->dynamic_node_acl) { +- if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) { +- list_del(&se_nacl->acl_list); +- se_tpg->num_node_acls--; +- drop_nacl = true; +- } +- } +- mutex_unlock(&se_tpg->acl_node_mutex); +- +- if (drop_nacl) { +- core_tpg_wait_for_nacl_pr_ref(se_nacl); +- core_free_device_list_for_node(se_nacl, se_tpg); +- kfree(se_nacl); +- comp_nacl = false; +- } + pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", + se_tpg->se_tpg_tfo->get_fabric_name()); + /* + * If last kref is dropping now for an explicit NodeACL, awake sleeping + * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group +- * removal context. ++ * removal context from within transport_free_session() code. ++ * ++ * For dynamic ACL, target_put_nacl() uses target_complete_nacl() ++ * to release all remaining generate_node_acl=1 created ACL resources. + */ +- if (se_nacl && comp_nacl) +- target_put_nacl(se_nacl); + + transport_free_session(se_sess); + } +diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c +index aab5221d6c2e..aac0ce8aeb0b 100644 +--- a/drivers/usb/gadget/udc/fsl_udc_core.c ++++ b/drivers/usb/gadget/udc/fsl_udc_core.c +@@ -1249,6 +1249,12 @@ static const struct usb_gadget_ops fsl_gadget_ops = { + .udc_stop = fsl_udc_stop, + }; + ++/* ++ * Empty complete function used by this driver to fill in the req->complete ++ * field when creating a request since the complete field is mandatory. ++ */ ++static void fsl_noop_complete(struct usb_ep *ep, struct usb_request *req) { } ++ + /* Set protocol stall on ep0, protocol stall will automatically be cleared + on new transaction */ + static void ep0stall(struct fsl_udc *udc) +@@ -1283,7 +1289,7 @@ static int ep0_prime_status(struct fsl_udc *udc, int direction) + req->req.length = 0; + req->req.status = -EINPROGRESS; + req->req.actual = 0; +- req->req.complete = NULL; ++ req->req.complete = fsl_noop_complete; + req->dtd_count = 0; + + ret = usb_gadget_map_request(&ep->udc->gadget, &req->req, ep_is_in(ep)); +@@ -1366,7 +1372,7 @@ static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value, + req->req.length = 2; + req->req.status = -EINPROGRESS; + req->req.actual = 0; +- req->req.complete = NULL; ++ req->req.complete = fsl_noop_complete; + req->dtd_count = 0; + + ret = usb_gadget_map_request(&ep->udc->gadget, &req->req, ep_is_in(ep)); +diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c +index b03d3b867fca..9a9c82a4d35d 100644 +--- a/drivers/usb/musb/da8xx.c ++++ b/drivers/usb/musb/da8xx.c +@@ -458,15 +458,11 @@ static int da8xx_musb_exit(struct musb *musb) + } + + static const struct musb_platform_ops da8xx_ops = { +- .quirks = MUSB_DMA_CPPI | MUSB_INDEXED_EP, ++ .quirks = MUSB_INDEXED_EP, + .init = da8xx_musb_init, + .exit = da8xx_musb_exit, + + .fifo_mode = 2, +-#ifdef CONFIG_USB_TI_CPPI_DMA +- .dma_init = cppi_dma_controller_create, +- .dma_exit = cppi_dma_controller_destroy, +-#endif + .enable = da8xx_musb_enable, + .disable = da8xx_musb_disable, + +diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c +index 049a884a756f..59d74d1b47a8 100644 +--- a/drivers/w1/masters/ds2490.c ++++ b/drivers/w1/masters/ds2490.c +@@ -153,6 +153,9 @@ struct ds_device + */ + u16 spu_bit; + ++ u8 st_buf[ST_SIZE]; ++ u8 byte_buf; ++ + struct w1_bus_master master; + }; + +@@ -174,7 +177,6 @@ struct ds_status + u8 data_in_buffer_status; + u8 reserved1; + u8 reserved2; +- + }; + + static struct usb_device_id ds_id_table [] = { +@@ -244,28 +246,6 @@ static int ds_send_control(struct ds_device *dev, u16 value, u16 index) + return err; + } + +-static int ds_recv_status_nodump(struct ds_device *dev, struct ds_status *st, +- unsigned char *buf, int size) +-{ +- int count, err; +- +- memset(st, 0, sizeof(*st)); +- +- count = 0; +- err = usb_interrupt_msg(dev->udev, usb_rcvintpipe(dev->udev, +- dev->ep[EP_STATUS]), buf, size, &count, 1000); +- if (err < 0) { +- pr_err("Failed to read 1-wire data from 0x%x: err=%d.\n", +- dev->ep[EP_STATUS], err); +- return err; +- } +- +- if (count >= sizeof(*st)) +- memcpy(st, buf, sizeof(*st)); +- +- return count; +-} +- + static inline void ds_print_msg(unsigned char *buf, unsigned char *str, int off) + { + pr_info("%45s: %8x\n", str, buf[off]); +@@ -324,6 +304,35 @@ static void ds_dump_status(struct ds_device *dev, unsigned char *buf, int count) + } + } + ++static int ds_recv_status(struct ds_device *dev, struct ds_status *st, ++ bool dump) ++{ ++ int count, err; ++ ++ if (st) ++ memset(st, 0, sizeof(*st)); ++ ++ count = 0; ++ err = usb_interrupt_msg(dev->udev, ++ usb_rcvintpipe(dev->udev, ++ dev->ep[EP_STATUS]), ++ dev->st_buf, sizeof(dev->st_buf), ++ &count, 1000); ++ if (err < 0) { ++ pr_err("Failed to read 1-wire data from 0x%x: err=%d.\n", ++ dev->ep[EP_STATUS], err); ++ return err; ++ } ++ ++ if (dump) ++ ds_dump_status(dev, dev->st_buf, count); ++ ++ if (st && count >= sizeof(*st)) ++ memcpy(st, dev->st_buf, sizeof(*st)); ++ ++ return count; ++} ++ + static void ds_reset_device(struct ds_device *dev) + { + ds_send_control_cmd(dev, CTL_RESET_DEVICE, 0); +@@ -344,7 +353,6 @@ static void ds_reset_device(struct ds_device *dev) + static int ds_recv_data(struct ds_device *dev, unsigned char *buf, int size) + { + int count, err; +- struct ds_status st; + + /* Careful on size. If size is less than what is available in + * the input buffer, the device fails the bulk transfer and +@@ -359,14 +367,9 @@ static int ds_recv_data(struct ds_device *dev, unsigned char *buf, int size) + err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]), + buf, size, &count, 1000); + if (err < 0) { +- u8 buf[ST_SIZE]; +- int count; +- + pr_info("Clearing ep0x%x.\n", dev->ep[EP_DATA_IN]); + usb_clear_halt(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN])); +- +- count = ds_recv_status_nodump(dev, &st, buf, sizeof(buf)); +- ds_dump_status(dev, buf, count); ++ ds_recv_status(dev, NULL, true); + return err; + } + +@@ -404,7 +407,6 @@ int ds_stop_pulse(struct ds_device *dev, int limit) + { + struct ds_status st; + int count = 0, err = 0; +- u8 buf[ST_SIZE]; + + do { + err = ds_send_control(dev, CTL_HALT_EXE_IDLE, 0); +@@ -413,7 +415,7 @@ int ds_stop_pulse(struct ds_device *dev, int limit) + err = ds_send_control(dev, CTL_RESUME_EXE, 0); + if (err) + break; +- err = ds_recv_status_nodump(dev, &st, buf, sizeof(buf)); ++ err = ds_recv_status(dev, &st, false); + if (err) + break; + +@@ -456,18 +458,17 @@ int ds_detect(struct ds_device *dev, struct ds_status *st) + + static int ds_wait_status(struct ds_device *dev, struct ds_status *st) + { +- u8 buf[ST_SIZE]; + int err, count = 0; + + do { + st->status = 0; +- err = ds_recv_status_nodump(dev, st, buf, sizeof(buf)); ++ err = ds_recv_status(dev, st, false); + #if 0 + if (err >= 0) { + int i; + printk("0x%x: count=%d, status: ", dev->ep[EP_STATUS], err); + for (i=0; ist_buf[i]); + printk("\n"); + } + #endif +@@ -485,7 +486,7 @@ static int ds_wait_status(struct ds_device *dev, struct ds_status *st) + * can do something with it). + */ + if (err > 16 || count >= 100 || err < 0) +- ds_dump_status(dev, buf, err); ++ ds_dump_status(dev, dev->st_buf, err); + + /* Extended data isn't an error. Well, a short is, but the dump + * would have already told the user that and we can't do anything +@@ -608,7 +609,6 @@ static int ds_write_byte(struct ds_device *dev, u8 byte) + { + int err; + struct ds_status st; +- u8 rbyte; + + err = ds_send_control(dev, COMM_BYTE_IO | COMM_IM | dev->spu_bit, byte); + if (err) +@@ -621,11 +621,11 @@ static int ds_write_byte(struct ds_device *dev, u8 byte) + if (err) + return err; + +- err = ds_recv_data(dev, &rbyte, sizeof(rbyte)); ++ err = ds_recv_data(dev, &dev->byte_buf, 1); + if (err < 0) + return err; + +- return !(byte == rbyte); ++ return !(byte == dev->byte_buf); + } + + static int ds_read_byte(struct ds_device *dev, u8 *byte) +@@ -712,7 +712,6 @@ static void ds9490r_search(void *data, struct w1_master *master, + int err; + u16 value, index; + struct ds_status st; +- u8 st_buf[ST_SIZE]; + int search_limit; + int found = 0; + int i; +@@ -724,7 +723,12 @@ static void ds9490r_search(void *data, struct w1_master *master, + /* FIFO 128 bytes, bulk packet size 64, read a multiple of the + * packet size. + */ +- u64 buf[2*64/8]; ++ const size_t bufsize = 2 * 64; ++ u64 *buf; ++ ++ buf = kmalloc(bufsize, GFP_KERNEL); ++ if (!buf) ++ return; + + mutex_lock(&master->bus_mutex); + +@@ -745,10 +749,9 @@ static void ds9490r_search(void *data, struct w1_master *master, + do { + schedule_timeout(jtime); + +- if (ds_recv_status_nodump(dev, &st, st_buf, sizeof(st_buf)) < +- sizeof(st)) { ++ err = ds_recv_status(dev, &st, false); ++ if (err < 0 || err < sizeof(st)) + break; +- } + + if (st.data_in_buffer_status) { + /* Bulk in can receive partial ids, but when it does +@@ -758,7 +761,7 @@ static void ds9490r_search(void *data, struct w1_master *master, + * bulk without first checking if status says there + * is data to read. + */ +- err = ds_recv_data(dev, (u8 *)buf, sizeof(buf)); ++ err = ds_recv_data(dev, (u8 *)buf, bufsize); + if (err < 0) + break; + for (i = 0; i < err/8; ++i) { +@@ -794,9 +797,14 @@ static void ds9490r_search(void *data, struct w1_master *master, + } + search_out: + mutex_unlock(&master->bus_mutex); ++ kfree(buf); + } + + #if 0 ++/* ++ * FIXME: if this disabled code is ever used in the future all ds_send_data() ++ * calls must be changed to use a DMAable buffer. ++ */ + static int ds_match_access(struct ds_device *dev, u64 init) + { + int err; +@@ -845,13 +853,12 @@ static int ds_set_path(struct ds_device *dev, u64 init) + + static u8 ds9490r_touch_bit(void *data, u8 bit) + { +- u8 ret; + struct ds_device *dev = data; + +- if (ds_touch_bit(dev, bit, &ret)) ++ if (ds_touch_bit(dev, bit, &dev->byte_buf)) + return 0; + +- return ret; ++ return dev->byte_buf; + } + + #if 0 +@@ -866,13 +873,12 @@ static u8 ds9490r_read_bit(void *data) + { + struct ds_device *dev = data; + int err; +- u8 bit = 0; + +- err = ds_touch_bit(dev, 1, &bit); ++ err = ds_touch_bit(dev, 1, &dev->byte_buf); + if (err) + return 0; + +- return bit & 1; ++ return dev->byte_buf & 1; + } + #endif + +@@ -887,32 +893,52 @@ static u8 ds9490r_read_byte(void *data) + { + struct ds_device *dev = data; + int err; +- u8 byte = 0; + +- err = ds_read_byte(dev, &byte); ++ err = ds_read_byte(dev, &dev->byte_buf); + if (err) + return 0; + +- return byte; ++ return dev->byte_buf; + } + + static void ds9490r_write_block(void *data, const u8 *buf, int len) + { + struct ds_device *dev = data; ++ u8 *tbuf; ++ ++ if (len <= 0) ++ return; ++ ++ tbuf = kmalloc(len, GFP_KERNEL); ++ if (!tbuf) ++ return; + +- ds_write_block(dev, (u8 *)buf, len); ++ memcpy(tbuf, buf, len); ++ ds_write_block(dev, tbuf, len); ++ ++ kfree(tbuf); + } + + static u8 ds9490r_read_block(void *data, u8 *buf, int len) + { + struct ds_device *dev = data; + int err; ++ u8 *tbuf; + +- err = ds_read_block(dev, buf, len); +- if (err < 0) ++ if (len <= 0) ++ return 0; ++ ++ tbuf = kmalloc(len, GFP_KERNEL); ++ if (!tbuf) + return 0; + +- return len; ++ err = ds_read_block(dev, tbuf, len); ++ if (err >= 0) ++ memcpy(buf, tbuf, len); ++ ++ kfree(tbuf); ++ ++ return err >= 0 ? len : 0; + } + + static u8 ds9490r_reset(void *data) +diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c +index c9a7ff67d395..39886edfa222 100644 +--- a/drivers/w1/w1.c ++++ b/drivers/w1/w1.c +@@ -763,6 +763,7 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn) + dev_err(&dev->dev, "%s: Attaching %s failed.\n", __func__, + sl->name); + w1_family_put(sl->family); ++ atomic_dec(&sl->master->refcnt); + kfree(sl); + return err; + } +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c +index 9da42ace762a..8a456f9b8a44 100644 +--- a/fs/ext4/extents.c ++++ b/fs/ext4/extents.c +@@ -5362,7 +5362,8 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle, + ext4_lblk_t stop, *iterator, ex_start, ex_end; + + /* Let path point to the last extent */ +- path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 0); ++ path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, ++ EXT4_EX_NOCACHE); + if (IS_ERR(path)) + return PTR_ERR(path); + +@@ -5371,15 +5372,15 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle, + if (!extent) + goto out; + +- stop = le32_to_cpu(extent->ee_block) + +- ext4_ext_get_actual_len(extent); ++ stop = le32_to_cpu(extent->ee_block); + + /* + * In case of left shift, Don't start shifting extents until we make + * sure the hole is big enough to accommodate the shift. + */ + if (SHIFT == SHIFT_LEFT) { +- path = ext4_find_extent(inode, start - 1, &path, 0); ++ path = ext4_find_extent(inode, start - 1, &path, ++ EXT4_EX_NOCACHE); + if (IS_ERR(path)) + return PTR_ERR(path); + depth = path->p_depth; +@@ -5411,9 +5412,14 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle, + else + iterator = &stop; + +- /* Its safe to start updating extents */ +- while (start < stop) { +- path = ext4_find_extent(inode, *iterator, &path, 0); ++ /* ++ * Its safe to start updating extents. Start and stop are unsigned, so ++ * in case of right shift if extent with 0 block is reached, iterator ++ * becomes NULL to indicate the end of the loop. ++ */ ++ while (iterator && start <= stop) { ++ path = ext4_find_extent(inode, *iterator, &path, ++ EXT4_EX_NOCACHE); + if (IS_ERR(path)) + return PTR_ERR(path); + depth = path->p_depth; +@@ -5440,8 +5446,11 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle, + ext4_ext_get_actual_len(extent); + } else { + extent = EXT_FIRST_EXTENT(path[depth].p_hdr); +- *iterator = le32_to_cpu(extent->ee_block) > 0 ? +- le32_to_cpu(extent->ee_block) - 1 : 0; ++ if (le32_to_cpu(extent->ee_block) > 0) ++ *iterator = le32_to_cpu(extent->ee_block) - 1; ++ else ++ /* Beginning is reached, end of the loop */ ++ iterator = NULL; + /* Update path extent in case we need to stop */ + while (le32_to_cpu(extent->ee_block) < start) + extent++; +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c +index 8968a93e2150..d4be4e23bc21 100644 +--- a/fs/ext4/inline.c ++++ b/fs/ext4/inline.c +@@ -933,8 +933,15 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos, + struct page *page) + { + int i_size_changed = 0; ++ int ret; + +- copied = ext4_write_inline_data_end(inode, pos, len, copied, page); ++ ret = ext4_write_inline_data_end(inode, pos, len, copied, page); ++ if (ret < 0) { ++ unlock_page(page); ++ put_page(page); ++ return ret; ++ } ++ copied = ret; + + /* + * No need to use i_size_read() here, the i_size +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index 10690e5ba2eb..e0f862146793 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -1165,8 +1165,11 @@ static int ext4_write_end(struct file *file, + if (ext4_has_inline_data(inode)) { + ret = ext4_write_inline_data_end(inode, pos, len, + copied, page); +- if (ret < 0) ++ if (ret < 0) { ++ unlock_page(page); ++ put_page(page); + goto errout; ++ } + copied = ret; + } else + copied = block_write_end(file, mapping, pos, +@@ -1220,7 +1223,9 @@ errout: + * set the buffer to be dirty, since in data=journalled mode we need + * to call ext4_handle_dirty_metadata() instead. + */ +-static void zero_new_buffers(struct page *page, unsigned from, unsigned to) ++static void ext4_journalled_zero_new_buffers(handle_t *handle, ++ struct page *page, ++ unsigned from, unsigned to) + { + unsigned int block_start = 0, block_end; + struct buffer_head *head, *bh; +@@ -1237,7 +1242,7 @@ static void zero_new_buffers(struct page *page, unsigned from, unsigned to) + size = min(to, block_end) - start; + + zero_user(page, start, size); +- set_buffer_uptodate(bh); ++ write_end_fn(handle, bh); + } + clear_buffer_new(bh); + } +@@ -1266,18 +1271,25 @@ static int ext4_journalled_write_end(struct file *file, + + BUG_ON(!ext4_handle_valid(handle)); + +- if (ext4_has_inline_data(inode)) +- copied = ext4_write_inline_data_end(inode, pos, len, +- copied, page); +- else { +- if (copied < len) { +- if (!PageUptodate(page)) +- copied = 0; +- zero_new_buffers(page, from+copied, to); ++ if (ext4_has_inline_data(inode)) { ++ ret = ext4_write_inline_data_end(inode, pos, len, ++ copied, page); ++ if (ret < 0) { ++ unlock_page(page); ++ put_page(page); ++ goto errout; + } +- ++ copied = ret; ++ } else if (unlikely(copied < len) && !PageUptodate(page)) { ++ copied = 0; ++ ext4_journalled_zero_new_buffers(handle, page, from, to); ++ } else { ++ if (unlikely(copied < len)) ++ ext4_journalled_zero_new_buffers(handle, page, ++ from + copied, to); + ret = ext4_walk_page_buffers(handle, page_buffers(page), from, +- to, &partial, write_end_fn); ++ from + copied, &partial, ++ write_end_fn); + if (!partial) + SetPageUptodate(page); + } +@@ -1303,6 +1315,7 @@ static int ext4_journalled_write_end(struct file *file, + */ + ext4_orphan_add(handle, inode); + ++errout: + ret2 = ext4_journal_stop(handle); + if (!ret) + ret = ret2; +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c +index b7a3957a9dca..84cd77663e1f 100644 +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -3120,6 +3120,13 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac, + if (ar->pright && start + size - 1 >= ar->lright) + size -= start + size - ar->lright; + ++ /* ++ * Trim allocation request for filesystems with artificially small ++ * groups. ++ */ ++ if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) ++ size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb); ++ + end = start + size; + + /* check we don't cross already preallocated blocks */ +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index b405a7b74ce0..6fe8e30eeb99 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -793,6 +793,7 @@ static void ext4_put_super(struct super_block *sb) + { + struct ext4_sb_info *sbi = EXT4_SB(sb); + struct ext4_super_block *es = sbi->s_es; ++ int aborted = 0; + int i, err; + + ext4_unregister_li_request(sb); +@@ -802,9 +803,10 @@ static void ext4_put_super(struct super_block *sb) + destroy_workqueue(sbi->rsv_conversion_wq); + + if (sbi->s_journal) { ++ aborted = is_journal_aborted(sbi->s_journal); + err = jbd2_journal_destroy(sbi->s_journal); + sbi->s_journal = NULL; +- if (err < 0) ++ if ((err < 0) && !aborted) + ext4_abort(sb, "Couldn't clean up the journal"); + } + +@@ -816,7 +818,7 @@ static void ext4_put_super(struct super_block *sb) + ext4_ext_release(sb); + ext4_xattr_put_super(sb); + +- if (!(sb->s_flags & MS_RDONLY)) { ++ if (!(sb->s_flags & MS_RDONLY) && !aborted) { + ext4_clear_feature_journal_needs_recovery(sb); + es->s_state = cpu_to_le16(sbi->s_mount_state); + } +@@ -3746,7 +3748,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + * root first: it may be modified in the journal! + */ + if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) { +- if (ext4_load_journal(sb, es, journal_devnum)) ++ err = ext4_load_journal(sb, es, journal_devnum); ++ if (err) + goto failed_mount3a; + } else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) && + ext4_has_feature_journal_needs_recovery(sb)) { +diff --git a/fs/fuse/file.c b/fs/fuse/file.c +index 8821c380a71a..11538a8be9f0 100644 +--- a/fs/fuse/file.c ++++ b/fs/fuse/file.c +@@ -100,6 +100,7 @@ static void fuse_file_put(struct fuse_file *ff, bool sync) + iput(req->misc.release.inode); + fuse_put_request(ff->fc, req); + } else if (sync) { ++ __set_bit(FR_FORCE, &req->flags); + __clear_bit(FR_BACKGROUND, &req->flags); + fuse_request_send(ff->fc, req); + iput(req->misc.release.inode); +diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c +index 32e74710b1aa..9cd8c92b953d 100644 +--- a/fs/gfs2/glock.c ++++ b/fs/gfs2/glock.c +@@ -651,9 +651,11 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, + struct kmem_cache *cachep; + int ret, tries = 0; + ++ rcu_read_lock(); + gl = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms); + if (gl && !lockref_get_not_dead(&gl->gl_lockref)) + gl = NULL; ++ rcu_read_unlock(); + + *glp = gl; + if (gl) +@@ -721,15 +723,18 @@ again: + + if (ret == -EEXIST) { + ret = 0; ++ rcu_read_lock(); + tmp = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms); + if (tmp == NULL || !lockref_get_not_dead(&tmp->gl_lockref)) { + if (++tries < 100) { ++ rcu_read_unlock(); + cond_resched(); + goto again; + } + tmp = NULL; + ret = -ENOMEM; + } ++ rcu_read_unlock(); + } else { + WARN_ON_ONCE(ret); + } +diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c +index fa1b8e0dcacf..a2e724053919 100644 +--- a/fs/jbd2/transaction.c ++++ b/fs/jbd2/transaction.c +@@ -1876,7 +1876,9 @@ static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh) + + __blist_del_buffer(list, jh); + jh->b_jlist = BJ_None; +- if (test_clear_buffer_jbddirty(bh)) ++ if (transaction && is_journal_aborted(transaction->t_journal)) ++ clear_buffer_jbddirty(bh); ++ else if (test_clear_buffer_jbddirty(bh)) + mark_buffer_dirty(bh); /* Expose it to the VM */ + } + +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index 9a524e763c3e..4e3679b25b9b 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -2452,6 +2452,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, + ret = PTR_ERR(state); + if (IS_ERR(state)) + goto out; ++ ctx->state = state; + if (server->caps & NFS_CAP_POSIX_LOCK) + set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); + +@@ -2474,7 +2475,6 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, + if (ret != 0) + goto out; + +- ctx->state = state; + if (d_inode(dentry) == state->inode) { + nfs_inode_attach_open_context(ctx); + if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) +@@ -4711,7 +4711,7 @@ out: + */ + static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) + { +- struct page *pages[NFS4ACL_MAXPAGES] = {NULL, }; ++ struct page *pages[NFS4ACL_MAXPAGES + 1] = {NULL, }; + struct nfs_getaclargs args = { + .fh = NFS_FH(inode), + .acl_pages = pages, +@@ -4725,13 +4725,9 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu + .rpc_argp = &args, + .rpc_resp = &res, + }; +- unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); ++ unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1; + int ret = -ENOMEM, i; + +- /* As long as we're doing a round trip to the server anyway, +- * let's be prepared for a page of acl data. */ +- if (npages == 0) +- npages = 1; + if (npages > ARRAY_SIZE(pages)) + return -ERANGE; + +diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c +index 4e4441216804..1cb50bb898b0 100644 +--- a/fs/nfs/nfs4xdr.c ++++ b/fs/nfs/nfs4xdr.c +@@ -2487,7 +2487,7 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr, + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); +- replen = hdr.replen + op_decode_hdr_maxsz + 1; ++ replen = hdr.replen + op_decode_hdr_maxsz; + encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr); + + xdr_inline_pages(&req->rq_rcv_buf, replen << 2, +diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c +index 994d66fbb446..91e0c5429b4d 100644 +--- a/fs/nfsd/vfs.c ++++ b/fs/nfsd/vfs.c +@@ -369,7 +369,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, + __be32 err; + int host_err; + bool get_write_count; +- int size_change = 0; ++ bool size_change = (iap->ia_valid & ATTR_SIZE); + + if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE)) + accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE; +@@ -382,11 +382,11 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, + /* Get inode */ + err = fh_verify(rqstp, fhp, ftype, accmode); + if (err) +- goto out; ++ return err; + if (get_write_count) { + host_err = fh_want_write(fhp); + if (host_err) +- return nfserrno(host_err); ++ goto out; + } + + dentry = fhp->fh_dentry; +@@ -397,20 +397,28 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, + iap->ia_valid &= ~ATTR_MODE; + + if (!iap->ia_valid) +- goto out; ++ return 0; + + nfsd_sanitize_attrs(inode, iap); + ++ if (check_guard && guardtime != inode->i_ctime.tv_sec) ++ return nfserr_notsync; ++ + /* + * The size case is special, it changes the file in addition to the +- * attributes. ++ * attributes, and file systems don't expect it to be mixed with ++ * "random" attribute changes. We thus split out the size change ++ * into a separate call to ->setattr, and do the rest as a separate ++ * setattr call. + */ +- if (iap->ia_valid & ATTR_SIZE) { ++ if (size_change) { + err = nfsd_get_write_access(rqstp, fhp, iap); + if (err) +- goto out; +- size_change = 1; ++ return err; ++ } + ++ fh_lock(fhp); ++ if (size_change) { + /* + * RFC5661, Section 18.30.4: + * Changing the size of a file with SETATTR indirectly +@@ -418,29 +426,36 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, + * + * (and similar for the older RFCs) + */ +- if (iap->ia_size != i_size_read(inode)) +- iap->ia_valid |= ATTR_MTIME; +- } ++ struct iattr size_attr = { ++ .ia_valid = ATTR_SIZE | ATTR_CTIME | ATTR_MTIME, ++ .ia_size = iap->ia_size, ++ }; + +- iap->ia_valid |= ATTR_CTIME; ++ host_err = notify_change(dentry, &size_attr, NULL); ++ if (host_err) ++ goto out_unlock; ++ iap->ia_valid &= ~ATTR_SIZE; + +- if (check_guard && guardtime != inode->i_ctime.tv_sec) { +- err = nfserr_notsync; +- goto out_put_write_access; ++ /* ++ * Avoid the additional setattr call below if the only other ++ * attribute that the client sends is the mtime, as we update ++ * it as part of the size change above. ++ */ ++ if ((iap->ia_valid & ~ATTR_MTIME) == 0) ++ goto out_unlock; + } + +- fh_lock(fhp); ++ iap->ia_valid |= ATTR_CTIME; + host_err = notify_change(dentry, iap, NULL); +- fh_unlock(fhp); +- err = nfserrno(host_err); + +-out_put_write_access: ++out_unlock: ++ fh_unlock(fhp); + if (size_change) + put_write_access(inode); +- if (!err) +- err = nfserrno(commit_metadata(fhp)); + out: +- return err; ++ if (!host_err) ++ host_err = commit_metadata(fhp); ++ return nfserrno(host_err); + } + + #if defined(CONFIG_NFSD_V4) +diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h +index d49e26c6cdc7..23e129ef6726 100644 +--- a/include/linux/intel-iommu.h ++++ b/include/linux/intel-iommu.h +@@ -153,8 +153,8 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) + #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60) + #define DMA_TLB_DSI_FLUSH (((u64)2) << 60) + #define DMA_TLB_PSI_FLUSH (((u64)3) << 60) +-#define DMA_TLB_IIRG(type) ((type >> 60) & 7) +-#define DMA_TLB_IAIG(val) (((val) >> 57) & 7) ++#define DMA_TLB_IIRG(type) ((type >> 60) & 3) ++#define DMA_TLB_IAIG(val) (((val) >> 57) & 3) + #define DMA_TLB_READ_DRAIN (((u64)1) << 49) + #define DMA_TLB_WRITE_DRAIN (((u64)1) << 48) + #define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32) +@@ -164,9 +164,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) + + /* INVALID_DESC */ + #define DMA_CCMD_INVL_GRANU_OFFSET 61 +-#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3) +-#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3) +-#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3) ++#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 4) ++#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 4) ++#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 4) + #define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7) + #define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6) + #define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16))) +@@ -316,8 +316,8 @@ enum { + #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) + #define QI_DEV_EIOTLB_GLOB(g) ((u64)g) + #define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) +-#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32) +-#define QI_DEV_EIOTLB_QDEP(qd) (((qd) & 0x1f) << 16) ++#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) ++#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) + #define QI_DEV_EIOTLB_MAX_INVS 32 + + #define QI_PGRP_IDX(idx) (((u64)(idx)) << 55) +diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h +index 301969552d0a..b43e64d69734 100644 +--- a/include/rdma/ib_sa.h ++++ b/include/rdma/ib_sa.h +@@ -138,12 +138,12 @@ struct ib_sa_path_rec { + union ib_gid sgid; + __be16 dlid; + __be16 slid; +- int raw_traffic; ++ u8 raw_traffic; + /* reserved */ + __be32 flow_label; + u8 hop_limit; + u8 traffic_class; +- int reversible; ++ u8 reversible; + u8 numb_path; + __be16 pkey; + __be16 qos_class; +@@ -204,7 +204,7 @@ struct ib_sa_mcmember_rec { + u8 hop_limit; + u8 scope; + u8 join_state; +- int proxy_join; ++ u8 proxy_join; + }; + + /* Service Record Component Mask Sec 15.2.5.14 Ver 1.1 */ +diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h +index 4f6ba34cdee6..293b9a7f53bc 100644 +--- a/include/scsi/scsi_device.h ++++ b/include/scsi/scsi_device.h +@@ -307,6 +307,7 @@ extern void scsi_remove_device(struct scsi_device *); + extern int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh); + void scsi_attach_vpd(struct scsi_device *sdev); + ++extern struct scsi_device *scsi_device_from_queue(struct request_queue *q); + extern int scsi_device_get(struct scsi_device *); + extern void scsi_device_put(struct scsi_device *); + extern struct scsi_device *scsi_device_lookup(struct Scsi_Host *, +diff --git a/include/soc/at91/at91sam9_ddrsdr.h b/include/soc/at91/at91sam9_ddrsdr.h +index dc10c52e0e91..393362bdb860 100644 +--- a/include/soc/at91/at91sam9_ddrsdr.h ++++ b/include/soc/at91/at91sam9_ddrsdr.h +@@ -81,6 +81,7 @@ + #define AT91_DDRSDRC_LPCB_POWER_DOWN 2 + #define AT91_DDRSDRC_LPCB_DEEP_POWER_DOWN 3 + #define AT91_DDRSDRC_CLKFR (1 << 2) /* Clock Frozen */ ++#define AT91_DDRSDRC_LPDDR2_PWOFF (1 << 3) /* LPDDR Power Off */ + #define AT91_DDRSDRC_PASR (7 << 4) /* Partial Array Self Refresh */ + #define AT91_DDRSDRC_TCSR (3 << 8) /* Temperature Compensated Self Refresh */ + #define AT91_DDRSDRC_DS (3 << 10) /* Drive Strength */ +@@ -96,7 +97,9 @@ + #define AT91_DDRSDRC_MD_SDR 0 + #define AT91_DDRSDRC_MD_LOW_POWER_SDR 1 + #define AT91_DDRSDRC_MD_LOW_POWER_DDR 3 ++#define AT91_DDRSDRC_MD_LPDDR3 5 + #define AT91_DDRSDRC_MD_DDR2 6 /* [SAM9 Only] */ ++#define AT91_DDRSDRC_MD_LPDDR2 7 + #define AT91_DDRSDRC_DBW (1 << 4) /* Data Bus Width */ + #define AT91_DDRSDRC_DBW_32BITS (0 << 4) + #define AT91_DDRSDRC_DBW_16BITS (1 << 4) +diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h +index 6afc6f388edf..800fe16cc36f 100644 +--- a/include/target/target_core_base.h ++++ b/include/target/target_core_base.h +@@ -544,6 +544,7 @@ struct se_node_acl { + /* Used to signal demo mode created ACL, disabled by default */ + bool dynamic_node_acl; + bool acl_stop:1; ++ bool dynamic_stop; + u32 queue_depth; + u32 acl_index; + enum target_prot_type saved_prot_type; +diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h +index ce9ea736f1d7..97069ecabe49 100644 +--- a/include/target/target_core_fabric.h ++++ b/include/target/target_core_fabric.h +@@ -168,6 +168,8 @@ void core_allocate_nexus_loss_ua(struct se_node_acl *acl); + + struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg, + unsigned char *); ++bool target_tpg_has_node_acl(struct se_portal_group *tpg, ++ const char *); + struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *, + unsigned char *); + int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *, +diff --git a/ipc/shm.c b/ipc/shm.c +index 3174634ca4e5..4982a4e7f009 100644 +--- a/ipc/shm.c ++++ b/ipc/shm.c +@@ -1083,8 +1083,8 @@ out_unlock1: + * "raddr" thing points to kernel space, and there has to be a wrapper around + * this. + */ +-long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, +- unsigned long shmlba) ++long do_shmat(int shmid, char __user *shmaddr, int shmflg, ++ ulong *raddr, unsigned long shmlba) + { + struct shmid_kernel *shp; + unsigned long addr; +@@ -1105,8 +1105,13 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, + goto out; + else if ((addr = (ulong)shmaddr)) { + if (addr & (shmlba - 1)) { +- if (shmflg & SHM_RND) +- addr &= ~(shmlba - 1); /* round down */ ++ /* ++ * Round down to the nearest multiple of shmlba. ++ * For sane do_mmap_pgoff() parameters, avoid ++ * round downs that trigger nil-page and MAP_FIXED. ++ */ ++ if ((shmflg & SHM_RND) && addr >= shmlba) ++ addr &= ~(shmlba - 1); + else + #ifndef __ARCH_FORCE_SHMLBA + if (addr & ~PAGE_MASK) +diff --git a/kernel/membarrier.c b/kernel/membarrier.c +index 536c727a56e9..9f9284f37f8d 100644 +--- a/kernel/membarrier.c ++++ b/kernel/membarrier.c +@@ -16,6 +16,7 @@ + + #include + #include ++#include + + /* + * Bitmask made from a "or" of all commands within enum membarrier_cmd, +@@ -51,6 +52,9 @@ + */ + SYSCALL_DEFINE2(membarrier, int, cmd, int, flags) + { ++ /* MEMBARRIER_CMD_SHARED is not compatible with nohz_full. */ ++ if (tick_nohz_full_enabled()) ++ return -ENOSYS; + if (unlikely(flags)) + return -EINVAL; + switch (cmd) { +diff --git a/mm/filemap.c b/mm/filemap.c +index c33c31d75a2b..69f75c77c098 100644 +--- a/mm/filemap.c ++++ b/mm/filemap.c +@@ -865,9 +865,12 @@ void page_endio(struct page *page, int rw, int err) + unlock_page(page); + } else { /* rw == WRITE */ + if (err) { ++ struct address_space *mapping; ++ + SetPageError(page); +- if (page->mapping) +- mapping_set_error(page->mapping, err); ++ mapping = page_mapping(page); ++ if (mapping) ++ mapping_set_error(mapping, err); + } + end_page_writeback(page); + } +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 6a117213feb8..6f9005dcca2e 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -2467,7 +2467,7 @@ static bool zone_local(struct zone *local_zone, struct zone *zone) + + static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) + { +- return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) < ++ return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= + RECLAIM_DISTANCE; + } + #else /* CONFIG_NUMA */ +diff --git a/mm/vmpressure.c b/mm/vmpressure.c +index c5afd573d7da..3fb15c25af87 100644 +--- a/mm/vmpressure.c ++++ b/mm/vmpressure.c +@@ -112,9 +112,16 @@ static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned, + unsigned long reclaimed) + { + unsigned long scale = scanned + reclaimed; +- unsigned long pressure; ++ unsigned long pressure = 0; + + /* ++ * reclaimed can be greater than scanned in cases ++ * like THP, where the scanned is 1 and reclaimed ++ * could be 512 ++ */ ++ if (reclaimed >= scanned) ++ goto out; ++ /* + * We calculate the ratio (in percents) of how many pages were + * scanned vs. reclaimed in a given time frame (window). Note that + * time is in VM reclaimer's "ticks", i.e. number of pages +@@ -124,6 +131,7 @@ static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned, + pressure = scale - (reclaimed * scale / scanned); + pressure = pressure * 100 / scale; + ++out: + pr_debug("%s: %3lu (s: %lu r: %lu)\n", __func__, pressure, + scanned, reclaimed); + +diff --git a/samples/mic/mpssd/.gitignore b/samples/mic/mpssd/.gitignore +new file mode 100644 +index 000000000000..8b7c72f07c92 +--- /dev/null ++++ b/samples/mic/mpssd/.gitignore +@@ -0,0 +1 @@ ++mpssd +diff --git a/samples/mic/mpssd/Makefile b/samples/mic/mpssd/Makefile +new file mode 100644 +index 000000000000..3e3ef91fed6b +--- /dev/null ++++ b/samples/mic/mpssd/Makefile +@@ -0,0 +1,27 @@ ++ifndef CROSS_COMPILE ++uname_M := $(shell uname -m 2>/dev/null || echo not) ++ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/) ++ ++ifeq ($(ARCH),x86) ++ ++PROGS := mpssd ++CC = $(CROSS_COMPILE)gcc ++CFLAGS := -I../../../usr/include -I../../../tools/include ++ ++ifdef DEBUG ++CFLAGS += -DDEBUG=$(DEBUG) ++endif ++ ++all: $(PROGS) ++mpssd: mpssd.c sysfs.c ++ $(CC) $(CFLAGS) mpssd.c sysfs.c -o mpssd -lpthread ++ ++install: ++ install mpssd /usr/sbin/mpssd ++ install micctrl /usr/sbin/micctrl ++ ++clean: ++ rm -fr $(PROGS) ++ ++endif ++endif +diff --git a/samples/mic/mpssd/micctrl b/samples/mic/mpssd/micctrl +new file mode 100644 +index 000000000000..8f2629b41c5f +--- /dev/null ++++ b/samples/mic/mpssd/micctrl +@@ -0,0 +1,173 @@ ++#!/bin/bash ++# Intel MIC Platform Software Stack (MPSS) ++# ++# Copyright(c) 2013 Intel Corporation. ++# ++# This program is free software; you can redistribute it and/or modify ++# it under the terms of the GNU General Public License, version 2, as ++# published by the Free Software Foundation. ++# ++# This program is distributed in the hope that it will be useful, but ++# WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++# General Public License for more details. ++# ++# The full GNU General Public License is included in this distribution in ++# the file called "COPYING". ++# ++# Intel MIC User Space Tools. ++# ++# micctrl - Controls MIC boot/start/stop. ++# ++# chkconfig: 2345 95 05 ++# description: start MPSS stack processing. ++# ++### BEGIN INIT INFO ++# Provides: micctrl ++### END INIT INFO ++ ++# Source function library. ++. /etc/init.d/functions ++ ++sysfs="/sys/class/mic" ++ ++_status() ++{ ++ f=$sysfs/$1 ++ echo -e $1 state: "`cat $f/state`" shutdown_status: "`cat $f/shutdown_status`" ++} ++ ++status() ++{ ++ if [ "`echo $1 | head -c3`" == "mic" ]; then ++ _status $1 ++ return $? ++ fi ++ for f in $sysfs/* ++ do ++ _status `basename $f` ++ RETVAL=$? ++ [ $RETVAL -ne 0 ] && return $RETVAL ++ done ++ return 0 ++} ++ ++_reset() ++{ ++ f=$sysfs/$1 ++ echo reset > $f/state ++} ++ ++reset() ++{ ++ if [ "`echo $1 | head -c3`" == "mic" ]; then ++ _reset $1 ++ return $? ++ fi ++ for f in $sysfs/* ++ do ++ _reset `basename $f` ++ RETVAL=$? ++ [ $RETVAL -ne 0 ] && return $RETVAL ++ done ++ return 0 ++} ++ ++_boot() ++{ ++ f=$sysfs/$1 ++ echo "linux" > $f/bootmode ++ echo "mic/uos.img" > $f/firmware ++ echo "mic/$1.image" > $f/ramdisk ++ echo "boot" > $f/state ++} ++ ++boot() ++{ ++ if [ "`echo $1 | head -c3`" == "mic" ]; then ++ _boot $1 ++ return $? ++ fi ++ for f in $sysfs/* ++ do ++ _boot `basename $f` ++ RETVAL=$? ++ [ $RETVAL -ne 0 ] && return $RETVAL ++ done ++ return 0 ++} ++ ++_shutdown() ++{ ++ f=$sysfs/$1 ++ echo shutdown > $f/state ++} ++ ++shutdown() ++{ ++ if [ "`echo $1 | head -c3`" == "mic" ]; then ++ _shutdown $1 ++ return $? ++ fi ++ for f in $sysfs/* ++ do ++ _shutdown `basename $f` ++ RETVAL=$? ++ [ $RETVAL -ne 0 ] && return $RETVAL ++ done ++ return 0 ++} ++ ++_wait() ++{ ++ f=$sysfs/$1 ++ while [ "`cat $f/state`" != "offline" -a "`cat $f/state`" != "online" ] ++ do ++ sleep 1 ++ echo -e "Waiting for $1 to go offline" ++ done ++} ++ ++wait() ++{ ++ if [ "`echo $1 | head -c3`" == "mic" ]; then ++ _wait $1 ++ return $? ++ fi ++ # Wait for the cards to go offline ++ for f in $sysfs/* ++ do ++ _wait `basename $f` ++ RETVAL=$? ++ [ $RETVAL -ne 0 ] && return $RETVAL ++ done ++ return 0 ++} ++ ++if [ ! -d "$sysfs" ]; then ++ echo -e $"Module unloaded " ++ exit 3 ++fi ++ ++case $1 in ++ -s) ++ status $2 ++ ;; ++ -r) ++ reset $2 ++ ;; ++ -b) ++ boot $2 ++ ;; ++ -S) ++ shutdown $2 ++ ;; ++ -w) ++ wait $2 ++ ;; ++ *) ++ echo $"Usage: $0 {-s (status) |-r (reset) |-b (boot) |-S (shutdown) |-w (wait)}" ++ exit 2 ++esac ++ ++exit $? +diff --git a/samples/mic/mpssd/mpss b/samples/mic/mpssd/mpss +new file mode 100644 +index 000000000000..09ea90931649 +--- /dev/null ++++ b/samples/mic/mpssd/mpss +@@ -0,0 +1,200 @@ ++#!/bin/bash ++# Intel MIC Platform Software Stack (MPSS) ++# ++# Copyright(c) 2013 Intel Corporation. ++# ++# This program is free software; you can redistribute it and/or modify ++# it under the terms of the GNU General Public License, version 2, as ++# published by the Free Software Foundation. ++# ++# This program is distributed in the hope that it will be useful, but ++# WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++# General Public License for more details. ++# ++# The full GNU General Public License is included in this distribution in ++# the file called "COPYING". ++# ++# Intel MIC User Space Tools. ++# ++# mpss Start mpssd. ++# ++# chkconfig: 2345 95 05 ++# description: start MPSS stack processing. ++# ++### BEGIN INIT INFO ++# Provides: mpss ++# Required-Start: ++# Required-Stop: ++# Short-Description: MPSS stack control ++# Description: MPSS stack control ++### END INIT INFO ++ ++# Source function library. ++. /etc/init.d/functions ++ ++exec=/usr/sbin/mpssd ++sysfs="/sys/class/mic" ++mic_modules="mic_host mic_x100_dma scif" ++ ++start() ++{ ++ [ -x $exec ] || exit 5 ++ ++ if [ "`ps -e | awk '{print $4}' | grep mpssd | head -1`" = "mpssd" ]; then ++ echo -e $"MPSSD already running! " ++ success ++ echo ++ return 0 ++ fi ++ ++ echo -e $"Starting MPSS Stack" ++ echo -e $"Loading MIC drivers:" $mic_modules ++ ++ modprobe -a $mic_modules ++ RETVAL=$? ++ if [ $RETVAL -ne 0 ]; then ++ failure ++ echo ++ return $RETVAL ++ fi ++ ++ # Start the daemon ++ echo -n $"Starting MPSSD " ++ $exec ++ RETVAL=$? ++ if [ $RETVAL -ne 0 ]; then ++ failure ++ echo ++ return $RETVAL ++ fi ++ success ++ echo ++ ++ sleep 5 ++ ++ # Boot the cards ++ micctrl -b ++ ++ # Wait till ping works ++ for f in $sysfs/* ++ do ++ count=100 ++ ipaddr=`cat $f/cmdline` ++ ipaddr=${ipaddr#*address,} ++ ipaddr=`echo $ipaddr | cut -d, -f1 | cut -d\; -f1` ++ while [ $count -ge 0 ] ++ do ++ echo -e "Pinging "`basename $f`" " ++ ping -c 1 $ipaddr &> /dev/null ++ RETVAL=$? ++ if [ $RETVAL -eq 0 ]; then ++ success ++ break ++ fi ++ sleep 1 ++ count=`expr $count - 1` ++ done ++ [ $RETVAL -ne 0 ] && failure || success ++ echo ++ done ++ return $RETVAL ++} ++ ++stop() ++{ ++ echo -e $"Shutting down MPSS Stack: " ++ ++ # Bail out if module is unloaded ++ if [ ! -d "$sysfs" ]; then ++ echo -n $"Module unloaded " ++ success ++ echo ++ return 0 ++ fi ++ ++ # Shut down the cards. ++ micctrl -S ++ ++ # Wait for the cards to go offline ++ for f in $sysfs/* ++ do ++ while [ "`cat $f/state`" != "ready" ] ++ do ++ sleep 1 ++ echo -e "Waiting for "`basename $f`" to become ready" ++ done ++ done ++ ++ # Display the status of the cards ++ micctrl -s ++ ++ # Kill MPSSD now ++ echo -n $"Killing MPSSD" ++ killall -9 mpssd 2>/dev/null ++ RETVAL=$? ++ [ $RETVAL -ne 0 ] && failure || success ++ echo ++ return $RETVAL ++} ++ ++restart() ++{ ++ stop ++ sleep 5 ++ start ++} ++ ++status() ++{ ++ micctrl -s ++ if [ "`ps -e | awk '{print $4}' | grep mpssd | head -n 1`" = "mpssd" ]; then ++ echo "mpssd is running" ++ else ++ echo "mpssd is stopped" ++ fi ++ return 0 ++} ++ ++unload() ++{ ++ if [ ! -d "$sysfs" ]; then ++ echo -n $"No MIC_HOST Module: " ++ success ++ echo ++ return ++ fi ++ ++ stop ++ ++ sleep 5 ++ echo -n $"Removing MIC drivers:" $mic_modules ++ modprobe -r $mic_modules ++ RETVAL=$? ++ [ $RETVAL -ne 0 ] && failure || success ++ echo ++ return $RETVAL ++} ++ ++case $1 in ++ start) ++ start ++ ;; ++ stop) ++ stop ++ ;; ++ restart) ++ restart ++ ;; ++ status) ++ status ++ ;; ++ unload) ++ unload ++ ;; ++ *) ++ echo $"Usage: $0 {start|stop|restart|status|unload}" ++ exit 2 ++esac ++ ++exit $? +diff --git a/samples/mic/mpssd/mpssd.c b/samples/mic/mpssd/mpssd.c +new file mode 100644 +index 000000000000..c99a75968c01 +--- /dev/null ++++ b/samples/mic/mpssd/mpssd.c +@@ -0,0 +1,1826 @@ ++/* ++ * Intel MIC Platform Software Stack (MPSS) ++ * ++ * Copyright(c) 2013 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * The full GNU General Public License is included in this distribution in ++ * the file called "COPYING". ++ * ++ * Intel MIC User Space Tools. ++ */ ++ ++#define _GNU_SOURCE ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "mpssd.h" ++#include ++#include ++#include ++ ++static void *init_mic(void *arg); ++ ++static FILE *logfp; ++static struct mic_info mic_list; ++ ++#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) ++ ++#define min_t(type, x, y) ({ \ ++ type __min1 = (x); \ ++ type __min2 = (y); \ ++ __min1 < __min2 ? __min1 : __min2; }) ++ ++/* align addr on a size boundary - adjust address up/down if needed */ ++#define _ALIGN_DOWN(addr, size) ((addr)&(~((size)-1))) ++#define _ALIGN_UP(addr, size) _ALIGN_DOWN(addr + size - 1, size) ++ ++/* align addr on a size boundary - adjust address up if needed */ ++#define _ALIGN(addr, size) _ALIGN_UP(addr, size) ++ ++/* to align the pointer to the (next) page boundary */ ++#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE) ++ ++#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) ++ ++#define GSO_ENABLED 1 ++#define MAX_GSO_SIZE (64 * 1024) ++#define ETH_H_LEN 14 ++#define MAX_NET_PKT_SIZE (_ALIGN_UP(MAX_GSO_SIZE + ETH_H_LEN, 64)) ++#define MIC_DEVICE_PAGE_END 0x1000 ++ ++#ifndef VIRTIO_NET_HDR_F_DATA_VALID ++#define VIRTIO_NET_HDR_F_DATA_VALID 2 /* Csum is valid */ ++#endif ++ ++static struct { ++ struct mic_device_desc dd; ++ struct mic_vqconfig vqconfig[2]; ++ __u32 host_features, guest_acknowledgements; ++ struct virtio_console_config cons_config; ++} virtcons_dev_page = { ++ .dd = { ++ .type = VIRTIO_ID_CONSOLE, ++ .num_vq = ARRAY_SIZE(virtcons_dev_page.vqconfig), ++ .feature_len = sizeof(virtcons_dev_page.host_features), ++ .config_len = sizeof(virtcons_dev_page.cons_config), ++ }, ++ .vqconfig[0] = { ++ .num = htole16(MIC_VRING_ENTRIES), ++ }, ++ .vqconfig[1] = { ++ .num = htole16(MIC_VRING_ENTRIES), ++ }, ++}; ++ ++static struct { ++ struct mic_device_desc dd; ++ struct mic_vqconfig vqconfig[2]; ++ __u32 host_features, guest_acknowledgements; ++ struct virtio_net_config net_config; ++} virtnet_dev_page = { ++ .dd = { ++ .type = VIRTIO_ID_NET, ++ .num_vq = ARRAY_SIZE(virtnet_dev_page.vqconfig), ++ .feature_len = sizeof(virtnet_dev_page.host_features), ++ .config_len = sizeof(virtnet_dev_page.net_config), ++ }, ++ .vqconfig[0] = { ++ .num = htole16(MIC_VRING_ENTRIES), ++ }, ++ .vqconfig[1] = { ++ .num = htole16(MIC_VRING_ENTRIES), ++ }, ++#if GSO_ENABLED ++ .host_features = htole32( ++ 1 << VIRTIO_NET_F_CSUM | ++ 1 << VIRTIO_NET_F_GSO | ++ 1 << VIRTIO_NET_F_GUEST_TSO4 | ++ 1 << VIRTIO_NET_F_GUEST_TSO6 | ++ 1 << VIRTIO_NET_F_GUEST_ECN), ++#else ++ .host_features = 0, ++#endif ++}; ++ ++static const char *mic_config_dir = "/etc/mpss"; ++static const char *virtblk_backend = "VIRTBLK_BACKEND"; ++static struct { ++ struct mic_device_desc dd; ++ struct mic_vqconfig vqconfig[1]; ++ __u32 host_features, guest_acknowledgements; ++ struct virtio_blk_config blk_config; ++} virtblk_dev_page = { ++ .dd = { ++ .type = VIRTIO_ID_BLOCK, ++ .num_vq = ARRAY_SIZE(virtblk_dev_page.vqconfig), ++ .feature_len = sizeof(virtblk_dev_page.host_features), ++ .config_len = sizeof(virtblk_dev_page.blk_config), ++ }, ++ .vqconfig[0] = { ++ .num = htole16(MIC_VRING_ENTRIES), ++ }, ++ .host_features = ++ htole32(1<name, strerror(errno)); ++ return ret; ++ } ++ } ++ if (pid < 0) { ++ mpsslog("%s fork failed errno %s\n", ++ mic->name, strerror(errno)); ++ return ret; ++ } ++ ++ ret = waitpid(pid, NULL, 0); ++ if (ret < 0) { ++ mpsslog("%s waitpid failed errno %s\n", ++ mic->name, strerror(errno)); ++ return ret; ++ } ++ ++ snprintf(ipaddr, IFNAMSIZ, "172.31.%d.254/24", mic->id + 1); ++ ++ pid = fork(); ++ if (pid == 0) { ++ ifargv[0] = "ip"; ++ ifargv[1] = "addr"; ++ ifargv[2] = "add"; ++ ifargv[3] = ipaddr; ++ ifargv[4] = "dev"; ++ ifargv[5] = dev; ++ ifargv[6] = NULL; ++ mpsslog("Configuring %s ipaddr %s\n", dev, ipaddr); ++ ret = execvp("ip", ifargv); ++ if (ret < 0) { ++ mpsslog("%s execvp failed errno %s\n", ++ mic->name, strerror(errno)); ++ return ret; ++ } ++ } ++ if (pid < 0) { ++ mpsslog("%s fork failed errno %s\n", ++ mic->name, strerror(errno)); ++ return ret; ++ } ++ ++ ret = waitpid(pid, NULL, 0); ++ if (ret < 0) { ++ mpsslog("%s waitpid failed errno %s\n", ++ mic->name, strerror(errno)); ++ return ret; ++ } ++ mpsslog("MIC name %s %s %d DONE!\n", ++ mic->name, __func__, __LINE__); ++ return 0; ++} ++ ++static int tun_alloc(struct mic_info *mic, char *dev) ++{ ++ struct ifreq ifr; ++ int fd, err; ++#if GSO_ENABLED ++ unsigned offload; ++#endif ++ fd = open("/dev/net/tun", O_RDWR); ++ if (fd < 0) { ++ mpsslog("Could not open /dev/net/tun %s\n", strerror(errno)); ++ goto done; ++ } ++ ++ memset(&ifr, 0, sizeof(ifr)); ++ ++ ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR; ++ if (*dev) ++ strncpy(ifr.ifr_name, dev, IFNAMSIZ); ++ ++ err = ioctl(fd, TUNSETIFF, (void *)&ifr); ++ if (err < 0) { ++ mpsslog("%s %s %d TUNSETIFF failed %s\n", ++ mic->name, __func__, __LINE__, strerror(errno)); ++ close(fd); ++ return err; ++ } ++#if GSO_ENABLED ++ offload = TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | TUN_F_TSO_ECN; ++ ++ err = ioctl(fd, TUNSETOFFLOAD, offload); ++ if (err < 0) { ++ mpsslog("%s %s %d TUNSETOFFLOAD failed %s\n", ++ mic->name, __func__, __LINE__, strerror(errno)); ++ close(fd); ++ return err; ++ } ++#endif ++ strcpy(dev, ifr.ifr_name); ++ mpsslog("Created TAP %s\n", dev); ++done: ++ return fd; ++} ++ ++#define NET_FD_VIRTIO_NET 0 ++#define NET_FD_TUN 1 ++#define MAX_NET_FD 2 ++ ++static void set_dp(struct mic_info *mic, int type, void *dp) ++{ ++ switch (type) { ++ case VIRTIO_ID_CONSOLE: ++ mic->mic_console.console_dp = dp; ++ return; ++ case VIRTIO_ID_NET: ++ mic->mic_net.net_dp = dp; ++ return; ++ case VIRTIO_ID_BLOCK: ++ mic->mic_virtblk.block_dp = dp; ++ return; ++ } ++ mpsslog("%s %s %d not found\n", mic->name, __func__, type); ++ assert(0); ++} ++ ++static void *get_dp(struct mic_info *mic, int type) ++{ ++ switch (type) { ++ case VIRTIO_ID_CONSOLE: ++ return mic->mic_console.console_dp; ++ case VIRTIO_ID_NET: ++ return mic->mic_net.net_dp; ++ case VIRTIO_ID_BLOCK: ++ return mic->mic_virtblk.block_dp; ++ } ++ mpsslog("%s %s %d not found\n", mic->name, __func__, type); ++ assert(0); ++ return NULL; ++} ++ ++static struct mic_device_desc *get_device_desc(struct mic_info *mic, int type) ++{ ++ struct mic_device_desc *d; ++ int i; ++ void *dp = get_dp(mic, type); ++ ++ for (i = sizeof(struct mic_bootparam); i < PAGE_SIZE; ++ i += mic_total_desc_size(d)) { ++ d = dp + i; ++ ++ /* End of list */ ++ if (d->type == 0) ++ break; ++ ++ if (d->type == -1) ++ continue; ++ ++ mpsslog("%s %s d-> type %d d %p\n", ++ mic->name, __func__, d->type, d); ++ ++ if (d->type == (__u8)type) ++ return d; ++ } ++ mpsslog("%s %s %d not found\n", mic->name, __func__, type); ++ return NULL; ++} ++ ++/* See comments in vhost.c for explanation of next_desc() */ ++static unsigned next_desc(struct vring_desc *desc) ++{ ++ unsigned int next; ++ ++ if (!(le16toh(desc->flags) & VRING_DESC_F_NEXT)) ++ return -1U; ++ next = le16toh(desc->next); ++ return next; ++} ++ ++/* Sum up all the IOVEC length */ ++static ssize_t ++sum_iovec_len(struct mic_copy_desc *copy) ++{ ++ ssize_t sum = 0; ++ int i; ++ ++ for (i = 0; i < copy->iovcnt; i++) ++ sum += copy->iov[i].iov_len; ++ return sum; ++} ++ ++static inline void verify_out_len(struct mic_info *mic, ++ struct mic_copy_desc *copy) ++{ ++ if (copy->out_len != sum_iovec_len(copy)) { ++ mpsslog("%s %s %d BUG copy->out_len 0x%x len 0x%zx\n", ++ mic->name, __func__, __LINE__, ++ copy->out_len, sum_iovec_len(copy)); ++ assert(copy->out_len == sum_iovec_len(copy)); ++ } ++} ++ ++/* Display an iovec */ ++static void ++disp_iovec(struct mic_info *mic, struct mic_copy_desc *copy, ++ const char *s, int line) ++{ ++ int i; ++ ++ for (i = 0; i < copy->iovcnt; i++) ++ mpsslog("%s %s %d copy->iov[%d] addr %p len 0x%zx\n", ++ mic->name, s, line, i, ++ copy->iov[i].iov_base, copy->iov[i].iov_len); ++} ++ ++static inline __u16 read_avail_idx(struct mic_vring *vr) ++{ ++ return ACCESS_ONCE(vr->info->avail_idx); ++} ++ ++static inline void txrx_prepare(int type, bool tx, struct mic_vring *vr, ++ struct mic_copy_desc *copy, ssize_t len) ++{ ++ copy->vr_idx = tx ? 0 : 1; ++ copy->update_used = true; ++ if (type == VIRTIO_ID_NET) ++ copy->iov[1].iov_len = len - sizeof(struct virtio_net_hdr); ++ else ++ copy->iov[0].iov_len = len; ++} ++ ++/* Central API which triggers the copies */ ++static int ++mic_virtio_copy(struct mic_info *mic, int fd, ++ struct mic_vring *vr, struct mic_copy_desc *copy) ++{ ++ int ret; ++ ++ ret = ioctl(fd, MIC_VIRTIO_COPY_DESC, copy); ++ if (ret) { ++ mpsslog("%s %s %d errno %s ret %d\n", ++ mic->name, __func__, __LINE__, ++ strerror(errno), ret); ++ } ++ return ret; ++} ++ ++static inline unsigned _vring_size(unsigned int num, unsigned long align) ++{ ++ return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num) ++ + align - 1) & ~(align - 1)) ++ + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num; ++} ++ ++/* ++ * This initialization routine requires at least one ++ * vring i.e. vr0. vr1 is optional. ++ */ ++static void * ++init_vr(struct mic_info *mic, int fd, int type, ++ struct mic_vring *vr0, struct mic_vring *vr1, int num_vq) ++{ ++ int vr_size; ++ char *va; ++ ++ vr_size = PAGE_ALIGN(_vring_size(MIC_VRING_ENTRIES, ++ MIC_VIRTIO_RING_ALIGN) + ++ sizeof(struct _mic_vring_info)); ++ va = mmap(NULL, MIC_DEVICE_PAGE_END + vr_size * num_vq, ++ PROT_READ, MAP_SHARED, fd, 0); ++ if (MAP_FAILED == va) { ++ mpsslog("%s %s %d mmap failed errno %s\n", ++ mic->name, __func__, __LINE__, ++ strerror(errno)); ++ goto done; ++ } ++ set_dp(mic, type, va); ++ vr0->va = (struct mic_vring *)&va[MIC_DEVICE_PAGE_END]; ++ vr0->info = vr0->va + ++ _vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN); ++ vring_init(&vr0->vr, ++ MIC_VRING_ENTRIES, vr0->va, MIC_VIRTIO_RING_ALIGN); ++ mpsslog("%s %s vr0 %p vr0->info %p vr_size 0x%x vring 0x%x ", ++ __func__, mic->name, vr0->va, vr0->info, vr_size, ++ _vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN)); ++ mpsslog("magic 0x%x expected 0x%x\n", ++ le32toh(vr0->info->magic), MIC_MAGIC + type); ++ assert(le32toh(vr0->info->magic) == MIC_MAGIC + type); ++ if (vr1) { ++ vr1->va = (struct mic_vring *) ++ &va[MIC_DEVICE_PAGE_END + vr_size]; ++ vr1->info = vr1->va + _vring_size(MIC_VRING_ENTRIES, ++ MIC_VIRTIO_RING_ALIGN); ++ vring_init(&vr1->vr, ++ MIC_VRING_ENTRIES, vr1->va, MIC_VIRTIO_RING_ALIGN); ++ mpsslog("%s %s vr1 %p vr1->info %p vr_size 0x%x vring 0x%x ", ++ __func__, mic->name, vr1->va, vr1->info, vr_size, ++ _vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN)); ++ mpsslog("magic 0x%x expected 0x%x\n", ++ le32toh(vr1->info->magic), MIC_MAGIC + type + 1); ++ assert(le32toh(vr1->info->magic) == MIC_MAGIC + type + 1); ++ } ++done: ++ return va; ++} ++ ++static int ++wait_for_card_driver(struct mic_info *mic, int fd, int type) ++{ ++ struct pollfd pollfd; ++ int err; ++ struct mic_device_desc *desc = get_device_desc(mic, type); ++ __u8 prev_status; ++ ++ if (!desc) ++ return -ENODEV; ++ prev_status = desc->status; ++ pollfd.fd = fd; ++ mpsslog("%s %s Waiting .... desc-> type %d status 0x%x\n", ++ mic->name, __func__, type, desc->status); ++ ++ while (1) { ++ pollfd.events = POLLIN; ++ pollfd.revents = 0; ++ err = poll(&pollfd, 1, -1); ++ if (err < 0) { ++ mpsslog("%s %s poll failed %s\n", ++ mic->name, __func__, strerror(errno)); ++ continue; ++ } ++ ++ if (pollfd.revents) { ++ if (desc->status != prev_status) { ++ mpsslog("%s %s Waiting... desc-> type %d " ++ "status 0x%x\n", ++ mic->name, __func__, type, ++ desc->status); ++ prev_status = desc->status; ++ } ++ if (desc->status & VIRTIO_CONFIG_S_DRIVER_OK) { ++ mpsslog("%s %s poll.revents %d\n", ++ mic->name, __func__, pollfd.revents); ++ mpsslog("%s %s desc-> type %d status 0x%x\n", ++ mic->name, __func__, type, ++ desc->status); ++ break; ++ } ++ } ++ } ++ return 0; ++} ++ ++/* Spin till we have some descriptors */ ++static void ++spin_for_descriptors(struct mic_info *mic, struct mic_vring *vr) ++{ ++ __u16 avail_idx = read_avail_idx(vr); ++ ++ while (avail_idx == le16toh(ACCESS_ONCE(vr->vr.avail->idx))) { ++#ifdef DEBUG ++ mpsslog("%s %s waiting for desc avail %d info_avail %d\n", ++ mic->name, __func__, ++ le16toh(vr->vr.avail->idx), vr->info->avail_idx); ++#endif ++ sched_yield(); ++ } ++} ++ ++static void * ++virtio_net(void *arg) ++{ ++ static __u8 vnet_hdr[2][sizeof(struct virtio_net_hdr)]; ++ static __u8 vnet_buf[2][MAX_NET_PKT_SIZE] __attribute__ ((aligned(64))); ++ struct iovec vnet_iov[2][2] = { ++ { { .iov_base = vnet_hdr[0], .iov_len = sizeof(vnet_hdr[0]) }, ++ { .iov_base = vnet_buf[0], .iov_len = sizeof(vnet_buf[0]) } }, ++ { { .iov_base = vnet_hdr[1], .iov_len = sizeof(vnet_hdr[1]) }, ++ { .iov_base = vnet_buf[1], .iov_len = sizeof(vnet_buf[1]) } }, ++ }; ++ struct iovec *iov0 = vnet_iov[0], *iov1 = vnet_iov[1]; ++ struct mic_info *mic = (struct mic_info *)arg; ++ char if_name[IFNAMSIZ]; ++ struct pollfd net_poll[MAX_NET_FD]; ++ struct mic_vring tx_vr, rx_vr; ++ struct mic_copy_desc copy; ++ struct mic_device_desc *desc; ++ int err; ++ ++ snprintf(if_name, IFNAMSIZ, "mic%d", mic->id); ++ mic->mic_net.tap_fd = tun_alloc(mic, if_name); ++ if (mic->mic_net.tap_fd < 0) ++ goto done; ++ ++ if (tap_configure(mic, if_name)) ++ goto done; ++ mpsslog("MIC name %s id %d\n", mic->name, mic->id); ++ ++ net_poll[NET_FD_VIRTIO_NET].fd = mic->mic_net.virtio_net_fd; ++ net_poll[NET_FD_VIRTIO_NET].events = POLLIN; ++ net_poll[NET_FD_TUN].fd = mic->mic_net.tap_fd; ++ net_poll[NET_FD_TUN].events = POLLIN; ++ ++ if (MAP_FAILED == init_vr(mic, mic->mic_net.virtio_net_fd, ++ VIRTIO_ID_NET, &tx_vr, &rx_vr, ++ virtnet_dev_page.dd.num_vq)) { ++ mpsslog("%s init_vr failed %s\n", ++ mic->name, strerror(errno)); ++ goto done; ++ } ++ ++ copy.iovcnt = 2; ++ desc = get_device_desc(mic, VIRTIO_ID_NET); ++ ++ while (1) { ++ ssize_t len; ++ ++ net_poll[NET_FD_VIRTIO_NET].revents = 0; ++ net_poll[NET_FD_TUN].revents = 0; ++ ++ /* Start polling for data from tap and virtio net */ ++ err = poll(net_poll, 2, -1); ++ if (err < 0) { ++ mpsslog("%s poll failed %s\n", ++ __func__, strerror(errno)); ++ continue; ++ } ++ if (!(desc->status & VIRTIO_CONFIG_S_DRIVER_OK)) { ++ err = wait_for_card_driver(mic, ++ mic->mic_net.virtio_net_fd, ++ VIRTIO_ID_NET); ++ if (err) { ++ mpsslog("%s %s %d Exiting...\n", ++ mic->name, __func__, __LINE__); ++ break; ++ } ++ } ++ /* ++ * Check if there is data to be read from TUN and write to ++ * virtio net fd if there is. ++ */ ++ if (net_poll[NET_FD_TUN].revents & POLLIN) { ++ copy.iov = iov0; ++ len = readv(net_poll[NET_FD_TUN].fd, ++ copy.iov, copy.iovcnt); ++ if (len > 0) { ++ struct virtio_net_hdr *hdr ++ = (struct virtio_net_hdr *)vnet_hdr[0]; ++ ++ /* Disable checksums on the card since we are on ++ a reliable PCIe link */ ++ hdr->flags |= VIRTIO_NET_HDR_F_DATA_VALID; ++#ifdef DEBUG ++ mpsslog("%s %s %d hdr->flags 0x%x ", mic->name, ++ __func__, __LINE__, hdr->flags); ++ mpsslog("copy.out_len %d hdr->gso_type 0x%x\n", ++ copy.out_len, hdr->gso_type); ++#endif ++#ifdef DEBUG ++ disp_iovec(mic, copy, __func__, __LINE__); ++ mpsslog("%s %s %d read from tap 0x%lx\n", ++ mic->name, __func__, __LINE__, ++ len); ++#endif ++ spin_for_descriptors(mic, &tx_vr); ++ txrx_prepare(VIRTIO_ID_NET, 1, &tx_vr, ©, ++ len); ++ ++ err = mic_virtio_copy(mic, ++ mic->mic_net.virtio_net_fd, &tx_vr, ++ ©); ++ if (err < 0) { ++ mpsslog("%s %s %d mic_virtio_copy %s\n", ++ mic->name, __func__, __LINE__, ++ strerror(errno)); ++ } ++ if (!err) ++ verify_out_len(mic, ©); ++#ifdef DEBUG ++ disp_iovec(mic, copy, __func__, __LINE__); ++ mpsslog("%s %s %d wrote to net 0x%lx\n", ++ mic->name, __func__, __LINE__, ++ sum_iovec_len(©)); ++#endif ++ /* Reinitialize IOV for next run */ ++ iov0[1].iov_len = MAX_NET_PKT_SIZE; ++ } else if (len < 0) { ++ disp_iovec(mic, ©, __func__, __LINE__); ++ mpsslog("%s %s %d read failed %s ", mic->name, ++ __func__, __LINE__, strerror(errno)); ++ mpsslog("cnt %d sum %zd\n", ++ copy.iovcnt, sum_iovec_len(©)); ++ } ++ } ++ ++ /* ++ * Check if there is data to be read from virtio net and ++ * write to TUN if there is. ++ */ ++ if (net_poll[NET_FD_VIRTIO_NET].revents & POLLIN) { ++ while (rx_vr.info->avail_idx != ++ le16toh(rx_vr.vr.avail->idx)) { ++ copy.iov = iov1; ++ txrx_prepare(VIRTIO_ID_NET, 0, &rx_vr, ©, ++ MAX_NET_PKT_SIZE ++ + sizeof(struct virtio_net_hdr)); ++ ++ err = mic_virtio_copy(mic, ++ mic->mic_net.virtio_net_fd, &rx_vr, ++ ©); ++ if (!err) { ++#ifdef DEBUG ++ struct virtio_net_hdr *hdr ++ = (struct virtio_net_hdr *) ++ vnet_hdr[1]; ++ ++ mpsslog("%s %s %d hdr->flags 0x%x, ", ++ mic->name, __func__, __LINE__, ++ hdr->flags); ++ mpsslog("out_len %d gso_type 0x%x\n", ++ copy.out_len, ++ hdr->gso_type); ++#endif ++ /* Set the correct output iov_len */ ++ iov1[1].iov_len = copy.out_len - ++ sizeof(struct virtio_net_hdr); ++ verify_out_len(mic, ©); ++#ifdef DEBUG ++ disp_iovec(mic, copy, __func__, ++ __LINE__); ++ mpsslog("%s %s %d ", ++ mic->name, __func__, __LINE__); ++ mpsslog("read from net 0x%lx\n", ++ sum_iovec_len(copy)); ++#endif ++ len = writev(net_poll[NET_FD_TUN].fd, ++ copy.iov, copy.iovcnt); ++ if (len != sum_iovec_len(©)) { ++ mpsslog("Tun write failed %s ", ++ strerror(errno)); ++ mpsslog("len 0x%zx ", len); ++ mpsslog("read_len 0x%zx\n", ++ sum_iovec_len(©)); ++ } else { ++#ifdef DEBUG ++ disp_iovec(mic, ©, __func__, ++ __LINE__); ++ mpsslog("%s %s %d ", ++ mic->name, __func__, ++ __LINE__); ++ mpsslog("wrote to tap 0x%lx\n", ++ len); ++#endif ++ } ++ } else { ++ mpsslog("%s %s %d mic_virtio_copy %s\n", ++ mic->name, __func__, __LINE__, ++ strerror(errno)); ++ break; ++ } ++ } ++ } ++ if (net_poll[NET_FD_VIRTIO_NET].revents & POLLERR) ++ mpsslog("%s: %s: POLLERR\n", __func__, mic->name); ++ } ++done: ++ pthread_exit(NULL); ++} ++ ++/* virtio_console */ ++#define VIRTIO_CONSOLE_FD 0 ++#define MONITOR_FD (VIRTIO_CONSOLE_FD + 1) ++#define MAX_CONSOLE_FD (MONITOR_FD + 1) /* must be the last one + 1 */ ++#define MAX_BUFFER_SIZE PAGE_SIZE ++ ++static void * ++virtio_console(void *arg) ++{ ++ static __u8 vcons_buf[2][PAGE_SIZE]; ++ struct iovec vcons_iov[2] = { ++ { .iov_base = vcons_buf[0], .iov_len = sizeof(vcons_buf[0]) }, ++ { .iov_base = vcons_buf[1], .iov_len = sizeof(vcons_buf[1]) }, ++ }; ++ struct iovec *iov0 = &vcons_iov[0], *iov1 = &vcons_iov[1]; ++ struct mic_info *mic = (struct mic_info *)arg; ++ int err; ++ struct pollfd console_poll[MAX_CONSOLE_FD]; ++ int pty_fd; ++ char *pts_name; ++ ssize_t len; ++ struct mic_vring tx_vr, rx_vr; ++ struct mic_copy_desc copy; ++ struct mic_device_desc *desc; ++ ++ pty_fd = posix_openpt(O_RDWR); ++ if (pty_fd < 0) { ++ mpsslog("can't open a pseudoterminal master device: %s\n", ++ strerror(errno)); ++ goto _return; ++ } ++ pts_name = ptsname(pty_fd); ++ if (pts_name == NULL) { ++ mpsslog("can't get pts name\n"); ++ goto _close_pty; ++ } ++ printf("%s console message goes to %s\n", mic->name, pts_name); ++ mpsslog("%s console message goes to %s\n", mic->name, pts_name); ++ err = grantpt(pty_fd); ++ if (err < 0) { ++ mpsslog("can't grant access: %s %s\n", ++ pts_name, strerror(errno)); ++ goto _close_pty; ++ } ++ err = unlockpt(pty_fd); ++ if (err < 0) { ++ mpsslog("can't unlock a pseudoterminal: %s %s\n", ++ pts_name, strerror(errno)); ++ goto _close_pty; ++ } ++ console_poll[MONITOR_FD].fd = pty_fd; ++ console_poll[MONITOR_FD].events = POLLIN; ++ ++ console_poll[VIRTIO_CONSOLE_FD].fd = mic->mic_console.virtio_console_fd; ++ console_poll[VIRTIO_CONSOLE_FD].events = POLLIN; ++ ++ if (MAP_FAILED == init_vr(mic, mic->mic_console.virtio_console_fd, ++ VIRTIO_ID_CONSOLE, &tx_vr, &rx_vr, ++ virtcons_dev_page.dd.num_vq)) { ++ mpsslog("%s init_vr failed %s\n", ++ mic->name, strerror(errno)); ++ goto _close_pty; ++ } ++ ++ copy.iovcnt = 1; ++ desc = get_device_desc(mic, VIRTIO_ID_CONSOLE); ++ ++ for (;;) { ++ console_poll[MONITOR_FD].revents = 0; ++ console_poll[VIRTIO_CONSOLE_FD].revents = 0; ++ err = poll(console_poll, MAX_CONSOLE_FD, -1); ++ if (err < 0) { ++ mpsslog("%s %d: poll failed: %s\n", __func__, __LINE__, ++ strerror(errno)); ++ continue; ++ } ++ if (!(desc->status & VIRTIO_CONFIG_S_DRIVER_OK)) { ++ err = wait_for_card_driver(mic, ++ mic->mic_console.virtio_console_fd, ++ VIRTIO_ID_CONSOLE); ++ if (err) { ++ mpsslog("%s %s %d Exiting...\n", ++ mic->name, __func__, __LINE__); ++ break; ++ } ++ } ++ ++ if (console_poll[MONITOR_FD].revents & POLLIN) { ++ copy.iov = iov0; ++ len = readv(pty_fd, copy.iov, copy.iovcnt); ++ if (len > 0) { ++#ifdef DEBUG ++ disp_iovec(mic, copy, __func__, __LINE__); ++ mpsslog("%s %s %d read from tap 0x%lx\n", ++ mic->name, __func__, __LINE__, ++ len); ++#endif ++ spin_for_descriptors(mic, &tx_vr); ++ txrx_prepare(VIRTIO_ID_CONSOLE, 1, &tx_vr, ++ ©, len); ++ ++ err = mic_virtio_copy(mic, ++ mic->mic_console.virtio_console_fd, ++ &tx_vr, ©); ++ if (err < 0) { ++ mpsslog("%s %s %d mic_virtio_copy %s\n", ++ mic->name, __func__, __LINE__, ++ strerror(errno)); ++ } ++ if (!err) ++ verify_out_len(mic, ©); ++#ifdef DEBUG ++ disp_iovec(mic, copy, __func__, __LINE__); ++ mpsslog("%s %s %d wrote to net 0x%lx\n", ++ mic->name, __func__, __LINE__, ++ sum_iovec_len(copy)); ++#endif ++ /* Reinitialize IOV for next run */ ++ iov0->iov_len = PAGE_SIZE; ++ } else if (len < 0) { ++ disp_iovec(mic, ©, __func__, __LINE__); ++ mpsslog("%s %s %d read failed %s ", ++ mic->name, __func__, __LINE__, ++ strerror(errno)); ++ mpsslog("cnt %d sum %zd\n", ++ copy.iovcnt, sum_iovec_len(©)); ++ } ++ } ++ ++ if (console_poll[VIRTIO_CONSOLE_FD].revents & POLLIN) { ++ while (rx_vr.info->avail_idx != ++ le16toh(rx_vr.vr.avail->idx)) { ++ copy.iov = iov1; ++ txrx_prepare(VIRTIO_ID_CONSOLE, 0, &rx_vr, ++ ©, PAGE_SIZE); ++ ++ err = mic_virtio_copy(mic, ++ mic->mic_console.virtio_console_fd, ++ &rx_vr, ©); ++ if (!err) { ++ /* Set the correct output iov_len */ ++ iov1->iov_len = copy.out_len; ++ verify_out_len(mic, ©); ++#ifdef DEBUG ++ disp_iovec(mic, copy, __func__, ++ __LINE__); ++ mpsslog("%s %s %d ", ++ mic->name, __func__, __LINE__); ++ mpsslog("read from net 0x%lx\n", ++ sum_iovec_len(copy)); ++#endif ++ len = writev(pty_fd, ++ copy.iov, copy.iovcnt); ++ if (len != sum_iovec_len(©)) { ++ mpsslog("Tun write failed %s ", ++ strerror(errno)); ++ mpsslog("len 0x%zx ", len); ++ mpsslog("read_len 0x%zx\n", ++ sum_iovec_len(©)); ++ } else { ++#ifdef DEBUG ++ disp_iovec(mic, copy, __func__, ++ __LINE__); ++ mpsslog("%s %s %d ", ++ mic->name, __func__, ++ __LINE__); ++ mpsslog("wrote to tap 0x%lx\n", ++ len); ++#endif ++ } ++ } else { ++ mpsslog("%s %s %d mic_virtio_copy %s\n", ++ mic->name, __func__, __LINE__, ++ strerror(errno)); ++ break; ++ } ++ } ++ } ++ if (console_poll[NET_FD_VIRTIO_NET].revents & POLLERR) ++ mpsslog("%s: %s: POLLERR\n", __func__, mic->name); ++ } ++_close_pty: ++ close(pty_fd); ++_return: ++ pthread_exit(NULL); ++} ++ ++static void ++add_virtio_device(struct mic_info *mic, struct mic_device_desc *dd) ++{ ++ char path[PATH_MAX]; ++ int fd, err; ++ ++ snprintf(path, PATH_MAX, "/dev/mic%d", mic->id); ++ fd = open(path, O_RDWR); ++ if (fd < 0) { ++ mpsslog("Could not open %s %s\n", path, strerror(errno)); ++ return; ++ } ++ ++ err = ioctl(fd, MIC_VIRTIO_ADD_DEVICE, dd); ++ if (err < 0) { ++ mpsslog("Could not add %d %s\n", dd->type, strerror(errno)); ++ close(fd); ++ return; ++ } ++ switch (dd->type) { ++ case VIRTIO_ID_NET: ++ mic->mic_net.virtio_net_fd = fd; ++ mpsslog("Added VIRTIO_ID_NET for %s\n", mic->name); ++ break; ++ case VIRTIO_ID_CONSOLE: ++ mic->mic_console.virtio_console_fd = fd; ++ mpsslog("Added VIRTIO_ID_CONSOLE for %s\n", mic->name); ++ break; ++ case VIRTIO_ID_BLOCK: ++ mic->mic_virtblk.virtio_block_fd = fd; ++ mpsslog("Added VIRTIO_ID_BLOCK for %s\n", mic->name); ++ break; ++ } ++} ++ ++static bool ++set_backend_file(struct mic_info *mic) ++{ ++ FILE *config; ++ char buff[PATH_MAX], *line, *evv, *p; ++ ++ snprintf(buff, PATH_MAX, "%s/mpssd%03d.conf", mic_config_dir, mic->id); ++ config = fopen(buff, "r"); ++ if (config == NULL) ++ return false; ++ do { /* look for "virtblk_backend=XXXX" */ ++ line = fgets(buff, PATH_MAX, config); ++ if (line == NULL) ++ break; ++ if (*line == '#') ++ continue; ++ p = strchr(line, '\n'); ++ if (p) ++ *p = '\0'; ++ } while (strncmp(line, virtblk_backend, strlen(virtblk_backend)) != 0); ++ fclose(config); ++ if (line == NULL) ++ return false; ++ evv = strchr(line, '='); ++ if (evv == NULL) ++ return false; ++ mic->mic_virtblk.backend_file = malloc(strlen(evv) + 1); ++ if (mic->mic_virtblk.backend_file == NULL) { ++ mpsslog("%s %d can't allocate memory\n", mic->name, mic->id); ++ return false; ++ } ++ strcpy(mic->mic_virtblk.backend_file, evv + 1); ++ return true; ++} ++ ++#define SECTOR_SIZE 512 ++static bool ++set_backend_size(struct mic_info *mic) ++{ ++ mic->mic_virtblk.backend_size = lseek(mic->mic_virtblk.backend, 0, ++ SEEK_END); ++ if (mic->mic_virtblk.backend_size < 0) { ++ mpsslog("%s: can't seek: %s\n", ++ mic->name, mic->mic_virtblk.backend_file); ++ return false; ++ } ++ virtblk_dev_page.blk_config.capacity = ++ mic->mic_virtblk.backend_size / SECTOR_SIZE; ++ if ((mic->mic_virtblk.backend_size % SECTOR_SIZE) != 0) ++ virtblk_dev_page.blk_config.capacity++; ++ ++ virtblk_dev_page.blk_config.capacity = ++ htole64(virtblk_dev_page.blk_config.capacity); ++ ++ return true; ++} ++ ++static bool ++open_backend(struct mic_info *mic) ++{ ++ if (!set_backend_file(mic)) ++ goto _error_exit; ++ mic->mic_virtblk.backend = open(mic->mic_virtblk.backend_file, O_RDWR); ++ if (mic->mic_virtblk.backend < 0) { ++ mpsslog("%s: can't open: %s\n", mic->name, ++ mic->mic_virtblk.backend_file); ++ goto _error_free; ++ } ++ if (!set_backend_size(mic)) ++ goto _error_close; ++ mic->mic_virtblk.backend_addr = mmap(NULL, ++ mic->mic_virtblk.backend_size, ++ PROT_READ|PROT_WRITE, MAP_SHARED, ++ mic->mic_virtblk.backend, 0L); ++ if (mic->mic_virtblk.backend_addr == MAP_FAILED) { ++ mpsslog("%s: can't map: %s %s\n", ++ mic->name, mic->mic_virtblk.backend_file, ++ strerror(errno)); ++ goto _error_close; ++ } ++ return true; ++ ++ _error_close: ++ close(mic->mic_virtblk.backend); ++ _error_free: ++ free(mic->mic_virtblk.backend_file); ++ _error_exit: ++ return false; ++} ++ ++static void ++close_backend(struct mic_info *mic) ++{ ++ munmap(mic->mic_virtblk.backend_addr, mic->mic_virtblk.backend_size); ++ close(mic->mic_virtblk.backend); ++ free(mic->mic_virtblk.backend_file); ++} ++ ++static bool ++start_virtblk(struct mic_info *mic, struct mic_vring *vring) ++{ ++ if (((unsigned long)&virtblk_dev_page.blk_config % 8) != 0) { ++ mpsslog("%s: blk_config is not 8 byte aligned.\n", ++ mic->name); ++ return false; ++ } ++ add_virtio_device(mic, &virtblk_dev_page.dd); ++ if (MAP_FAILED == init_vr(mic, mic->mic_virtblk.virtio_block_fd, ++ VIRTIO_ID_BLOCK, vring, NULL, ++ virtblk_dev_page.dd.num_vq)) { ++ mpsslog("%s init_vr failed %s\n", ++ mic->name, strerror(errno)); ++ return false; ++ } ++ return true; ++} ++ ++static void ++stop_virtblk(struct mic_info *mic) ++{ ++ int vr_size, ret; ++ ++ vr_size = PAGE_ALIGN(_vring_size(MIC_VRING_ENTRIES, ++ MIC_VIRTIO_RING_ALIGN) + ++ sizeof(struct _mic_vring_info)); ++ ret = munmap(mic->mic_virtblk.block_dp, ++ MIC_DEVICE_PAGE_END + vr_size * virtblk_dev_page.dd.num_vq); ++ if (ret < 0) ++ mpsslog("%s munmap errno %d\n", mic->name, errno); ++ close(mic->mic_virtblk.virtio_block_fd); ++} ++ ++static __u8 ++header_error_check(struct vring_desc *desc) ++{ ++ if (le32toh(desc->len) != sizeof(struct virtio_blk_outhdr)) { ++ mpsslog("%s() %d: length is not sizeof(virtio_blk_outhd)\n", ++ __func__, __LINE__); ++ return -EIO; ++ } ++ if (!(le16toh(desc->flags) & VRING_DESC_F_NEXT)) { ++ mpsslog("%s() %d: alone\n", ++ __func__, __LINE__); ++ return -EIO; ++ } ++ if (le16toh(desc->flags) & VRING_DESC_F_WRITE) { ++ mpsslog("%s() %d: not read\n", ++ __func__, __LINE__); ++ return -EIO; ++ } ++ return 0; ++} ++ ++static int ++read_header(int fd, struct virtio_blk_outhdr *hdr, __u32 desc_idx) ++{ ++ struct iovec iovec; ++ struct mic_copy_desc copy; ++ ++ iovec.iov_len = sizeof(*hdr); ++ iovec.iov_base = hdr; ++ copy.iov = &iovec; ++ copy.iovcnt = 1; ++ copy.vr_idx = 0; /* only one vring on virtio_block */ ++ copy.update_used = false; /* do not update used index */ ++ return ioctl(fd, MIC_VIRTIO_COPY_DESC, ©); ++} ++ ++static int ++transfer_blocks(int fd, struct iovec *iovec, __u32 iovcnt) ++{ ++ struct mic_copy_desc copy; ++ ++ copy.iov = iovec; ++ copy.iovcnt = iovcnt; ++ copy.vr_idx = 0; /* only one vring on virtio_block */ ++ copy.update_used = false; /* do not update used index */ ++ return ioctl(fd, MIC_VIRTIO_COPY_DESC, ©); ++} ++ ++static __u8 ++status_error_check(struct vring_desc *desc) ++{ ++ if (le32toh(desc->len) != sizeof(__u8)) { ++ mpsslog("%s() %d: length is not sizeof(status)\n", ++ __func__, __LINE__); ++ return -EIO; ++ } ++ return 0; ++} ++ ++static int ++write_status(int fd, __u8 *status) ++{ ++ struct iovec iovec; ++ struct mic_copy_desc copy; ++ ++ iovec.iov_base = status; ++ iovec.iov_len = sizeof(*status); ++ copy.iov = &iovec; ++ copy.iovcnt = 1; ++ copy.vr_idx = 0; /* only one vring on virtio_block */ ++ copy.update_used = true; /* Update used index */ ++ return ioctl(fd, MIC_VIRTIO_COPY_DESC, ©); ++} ++ ++#ifndef VIRTIO_BLK_T_GET_ID ++#define VIRTIO_BLK_T_GET_ID 8 ++#endif ++ ++static void * ++virtio_block(void *arg) ++{ ++ struct mic_info *mic = (struct mic_info *)arg; ++ int ret; ++ struct pollfd block_poll; ++ struct mic_vring vring; ++ __u16 avail_idx; ++ __u32 desc_idx; ++ struct vring_desc *desc; ++ struct iovec *iovec, *piov; ++ __u8 status; ++ __u32 buffer_desc_idx; ++ struct virtio_blk_outhdr hdr; ++ void *fos; ++ ++ for (;;) { /* forever */ ++ if (!open_backend(mic)) { /* No virtblk */ ++ for (mic->mic_virtblk.signaled = 0; ++ !mic->mic_virtblk.signaled;) ++ sleep(1); ++ continue; ++ } ++ ++ /* backend file is specified. */ ++ if (!start_virtblk(mic, &vring)) ++ goto _close_backend; ++ iovec = malloc(sizeof(*iovec) * ++ le32toh(virtblk_dev_page.blk_config.seg_max)); ++ if (!iovec) { ++ mpsslog("%s: can't alloc iovec: %s\n", ++ mic->name, strerror(ENOMEM)); ++ goto _stop_virtblk; ++ } ++ ++ block_poll.fd = mic->mic_virtblk.virtio_block_fd; ++ block_poll.events = POLLIN; ++ for (mic->mic_virtblk.signaled = 0; ++ !mic->mic_virtblk.signaled;) { ++ block_poll.revents = 0; ++ /* timeout in 1 sec to see signaled */ ++ ret = poll(&block_poll, 1, 1000); ++ if (ret < 0) { ++ mpsslog("%s %d: poll failed: %s\n", ++ __func__, __LINE__, ++ strerror(errno)); ++ continue; ++ } ++ ++ if (!(block_poll.revents & POLLIN)) { ++#ifdef DEBUG ++ mpsslog("%s %d: block_poll.revents=0x%x\n", ++ __func__, __LINE__, block_poll.revents); ++#endif ++ continue; ++ } ++ ++ /* POLLIN */ ++ while (vring.info->avail_idx != ++ le16toh(vring.vr.avail->idx)) { ++ /* read header element */ ++ avail_idx = ++ vring.info->avail_idx & ++ (vring.vr.num - 1); ++ desc_idx = le16toh( ++ vring.vr.avail->ring[avail_idx]); ++ desc = &vring.vr.desc[desc_idx]; ++#ifdef DEBUG ++ mpsslog("%s() %d: avail_idx=%d ", ++ __func__, __LINE__, ++ vring.info->avail_idx); ++ mpsslog("vring.vr.num=%d desc=%p\n", ++ vring.vr.num, desc); ++#endif ++ status = header_error_check(desc); ++ ret = read_header( ++ mic->mic_virtblk.virtio_block_fd, ++ &hdr, desc_idx); ++ if (ret < 0) { ++ mpsslog("%s() %d %s: ret=%d %s\n", ++ __func__, __LINE__, ++ mic->name, ret, ++ strerror(errno)); ++ break; ++ } ++ /* buffer element */ ++ piov = iovec; ++ status = 0; ++ fos = mic->mic_virtblk.backend_addr + ++ (hdr.sector * SECTOR_SIZE); ++ buffer_desc_idx = next_desc(desc); ++ desc_idx = buffer_desc_idx; ++ for (desc = &vring.vr.desc[buffer_desc_idx]; ++ desc->flags & VRING_DESC_F_NEXT; ++ desc_idx = next_desc(desc), ++ desc = &vring.vr.desc[desc_idx]) { ++ piov->iov_len = desc->len; ++ piov->iov_base = fos; ++ piov++; ++ fos += desc->len; ++ } ++ /* Returning NULLs for VIRTIO_BLK_T_GET_ID. */ ++ if (hdr.type & ~(VIRTIO_BLK_T_OUT | ++ VIRTIO_BLK_T_GET_ID)) { ++ /* ++ VIRTIO_BLK_T_IN - does not do ++ anything. Probably for documenting. ++ VIRTIO_BLK_T_SCSI_CMD - for ++ virtio_scsi. ++ VIRTIO_BLK_T_FLUSH - turned off in ++ config space. ++ VIRTIO_BLK_T_BARRIER - defined but not ++ used in anywhere. ++ */ ++ mpsslog("%s() %d: type %x ", ++ __func__, __LINE__, ++ hdr.type); ++ mpsslog("is not supported\n"); ++ status = -ENOTSUP; ++ ++ } else { ++ ret = transfer_blocks( ++ mic->mic_virtblk.virtio_block_fd, ++ iovec, ++ piov - iovec); ++ if (ret < 0 && ++ status != 0) ++ status = ret; ++ } ++ /* write status and update used pointer */ ++ if (status != 0) ++ status = status_error_check(desc); ++ ret = write_status( ++ mic->mic_virtblk.virtio_block_fd, ++ &status); ++#ifdef DEBUG ++ mpsslog("%s() %d: write status=%d on desc=%p\n", ++ __func__, __LINE__, ++ status, desc); ++#endif ++ } ++ } ++ free(iovec); ++_stop_virtblk: ++ stop_virtblk(mic); ++_close_backend: ++ close_backend(mic); ++ } /* forever */ ++ ++ pthread_exit(NULL); ++} ++ ++static void ++reset(struct mic_info *mic) ++{ ++#define RESET_TIMEOUT 120 ++ int i = RESET_TIMEOUT; ++ setsysfs(mic->name, "state", "reset"); ++ while (i) { ++ char *state; ++ state = readsysfs(mic->name, "state"); ++ if (!state) ++ goto retry; ++ mpsslog("%s: %s %d state %s\n", ++ mic->name, __func__, __LINE__, state); ++ ++ if (!strcmp(state, "ready")) { ++ free(state); ++ break; ++ } ++ free(state); ++retry: ++ sleep(1); ++ i--; ++ } ++} ++ ++static int ++get_mic_shutdown_status(struct mic_info *mic, char *shutdown_status) ++{ ++ if (!strcmp(shutdown_status, "nop")) ++ return MIC_NOP; ++ if (!strcmp(shutdown_status, "crashed")) ++ return MIC_CRASHED; ++ if (!strcmp(shutdown_status, "halted")) ++ return MIC_HALTED; ++ if (!strcmp(shutdown_status, "poweroff")) ++ return MIC_POWER_OFF; ++ if (!strcmp(shutdown_status, "restart")) ++ return MIC_RESTART; ++ mpsslog("%s: BUG invalid status %s\n", mic->name, shutdown_status); ++ /* Invalid state */ ++ assert(0); ++}; ++ ++static int get_mic_state(struct mic_info *mic) ++{ ++ char *state = NULL; ++ enum mic_states mic_state; ++ ++ while (!state) { ++ state = readsysfs(mic->name, "state"); ++ sleep(1); ++ } ++ mpsslog("%s: %s %d state %s\n", ++ mic->name, __func__, __LINE__, state); ++ ++ if (!strcmp(state, "ready")) { ++ mic_state = MIC_READY; ++ } else if (!strcmp(state, "booting")) { ++ mic_state = MIC_BOOTING; ++ } else if (!strcmp(state, "online")) { ++ mic_state = MIC_ONLINE; ++ } else if (!strcmp(state, "shutting_down")) { ++ mic_state = MIC_SHUTTING_DOWN; ++ } else if (!strcmp(state, "reset_failed")) { ++ mic_state = MIC_RESET_FAILED; ++ } else if (!strcmp(state, "resetting")) { ++ mic_state = MIC_RESETTING; ++ } else { ++ mpsslog("%s: BUG invalid state %s\n", mic->name, state); ++ assert(0); ++ } ++ ++ free(state); ++ return mic_state; ++}; ++ ++static void mic_handle_shutdown(struct mic_info *mic) ++{ ++#define SHUTDOWN_TIMEOUT 60 ++ int i = SHUTDOWN_TIMEOUT; ++ char *shutdown_status; ++ while (i) { ++ shutdown_status = readsysfs(mic->name, "shutdown_status"); ++ if (!shutdown_status) { ++ sleep(1); ++ continue; ++ } ++ mpsslog("%s: %s %d shutdown_status %s\n", ++ mic->name, __func__, __LINE__, shutdown_status); ++ switch (get_mic_shutdown_status(mic, shutdown_status)) { ++ case MIC_RESTART: ++ mic->restart = 1; ++ case MIC_HALTED: ++ case MIC_POWER_OFF: ++ case MIC_CRASHED: ++ free(shutdown_status); ++ goto reset; ++ default: ++ break; ++ } ++ free(shutdown_status); ++ sleep(1); ++ i--; ++ } ++reset: ++ if (!i) ++ mpsslog("%s: %s %d timing out waiting for shutdown_status %s\n", ++ mic->name, __func__, __LINE__, shutdown_status); ++ reset(mic); ++} ++ ++static int open_state_fd(struct mic_info *mic) ++{ ++ char pathname[PATH_MAX]; ++ int fd; ++ ++ snprintf(pathname, PATH_MAX - 1, "%s/%s/%s", ++ MICSYSFSDIR, mic->name, "state"); ++ ++ fd = open(pathname, O_RDONLY); ++ if (fd < 0) ++ mpsslog("%s: opening file %s failed %s\n", ++ mic->name, pathname, strerror(errno)); ++ return fd; ++} ++ ++static int block_till_state_change(int fd, struct mic_info *mic) ++{ ++ struct pollfd ufds[1]; ++ char value[PAGE_SIZE]; ++ int ret; ++ ++ ufds[0].fd = fd; ++ ufds[0].events = POLLERR | POLLPRI; ++ ret = poll(ufds, 1, -1); ++ if (ret < 0) { ++ mpsslog("%s: %s %d poll failed %s\n", ++ mic->name, __func__, __LINE__, strerror(errno)); ++ return ret; ++ } ++ ++ ret = lseek(fd, 0, SEEK_SET); ++ if (ret < 0) { ++ mpsslog("%s: %s %d Failed to seek to 0: %s\n", ++ mic->name, __func__, __LINE__, strerror(errno)); ++ return ret; ++ } ++ ++ ret = read(fd, value, sizeof(value)); ++ if (ret < 0) { ++ mpsslog("%s: %s %d Failed to read sysfs entry: %s\n", ++ mic->name, __func__, __LINE__, strerror(errno)); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static void * ++mic_config(void *arg) ++{ ++ struct mic_info *mic = (struct mic_info *)arg; ++ int fd, ret, stat = 0; ++ ++ fd = open_state_fd(mic); ++ if (fd < 0) { ++ mpsslog("%s: %s %d open state fd failed %s\n", ++ mic->name, __func__, __LINE__, strerror(errno)); ++ goto exit; ++ } ++ ++ do { ++ ret = block_till_state_change(fd, mic); ++ if (ret < 0) { ++ mpsslog("%s: %s %d block_till_state_change error %s\n", ++ mic->name, __func__, __LINE__, strerror(errno)); ++ goto close_exit; ++ } ++ ++ switch (get_mic_state(mic)) { ++ case MIC_SHUTTING_DOWN: ++ mic_handle_shutdown(mic); ++ break; ++ case MIC_READY: ++ case MIC_RESET_FAILED: ++ ret = kill(mic->pid, SIGTERM); ++ mpsslog("%s: %s %d kill pid %d ret %d\n", ++ mic->name, __func__, __LINE__, ++ mic->pid, ret); ++ if (!ret) { ++ ret = waitpid(mic->pid, &stat, ++ WIFSIGNALED(stat)); ++ mpsslog("%s: %s %d waitpid ret %d pid %d\n", ++ mic->name, __func__, __LINE__, ++ ret, mic->pid); ++ } ++ if (mic->boot_on_resume) { ++ setsysfs(mic->name, "state", "boot"); ++ mic->boot_on_resume = 0; ++ } ++ goto close_exit; ++ default: ++ break; ++ } ++ } while (1); ++ ++close_exit: ++ close(fd); ++exit: ++ init_mic(mic); ++ pthread_exit(NULL); ++} ++ ++static void ++set_cmdline(struct mic_info *mic) ++{ ++ char buffer[PATH_MAX]; ++ int len; ++ ++ len = snprintf(buffer, PATH_MAX, ++ "clocksource=tsc highres=off nohz=off "); ++ len += snprintf(buffer + len, PATH_MAX - len, ++ "cpufreq_on;corec6_off;pc3_off;pc6_off "); ++ len += snprintf(buffer + len, PATH_MAX - len, ++ "ifcfg=static;address,172.31.%d.1;netmask,255.255.255.0", ++ mic->id + 1); ++ ++ setsysfs(mic->name, "cmdline", buffer); ++ mpsslog("%s: Command line: \"%s\"\n", mic->name, buffer); ++ snprintf(buffer, PATH_MAX, "172.31.%d.1", mic->id + 1); ++ mpsslog("%s: IPADDR: \"%s\"\n", mic->name, buffer); ++} ++ ++static void ++set_log_buf_info(struct mic_info *mic) ++{ ++ int fd; ++ off_t len; ++ char system_map[] = "/lib/firmware/mic/System.map"; ++ char *map, *temp, log_buf[17] = {'\0'}; ++ ++ fd = open(system_map, O_RDONLY); ++ if (fd < 0) { ++ mpsslog("%s: Opening System.map failed: %d\n", ++ mic->name, errno); ++ return; ++ } ++ len = lseek(fd, 0, SEEK_END); ++ if (len < 0) { ++ mpsslog("%s: Reading System.map size failed: %d\n", ++ mic->name, errno); ++ close(fd); ++ return; ++ } ++ map = mmap(NULL, len, PROT_READ, MAP_PRIVATE, fd, 0); ++ if (map == MAP_FAILED) { ++ mpsslog("%s: mmap of System.map failed: %d\n", ++ mic->name, errno); ++ close(fd); ++ return; ++ } ++ temp = strstr(map, "__log_buf"); ++ if (!temp) { ++ mpsslog("%s: __log_buf not found: %d\n", mic->name, errno); ++ munmap(map, len); ++ close(fd); ++ return; ++ } ++ strncpy(log_buf, temp - 19, 16); ++ setsysfs(mic->name, "log_buf_addr", log_buf); ++ mpsslog("%s: log_buf_addr: %s\n", mic->name, log_buf); ++ temp = strstr(map, "log_buf_len"); ++ if (!temp) { ++ mpsslog("%s: log_buf_len not found: %d\n", mic->name, errno); ++ munmap(map, len); ++ close(fd); ++ return; ++ } ++ strncpy(log_buf, temp - 19, 16); ++ setsysfs(mic->name, "log_buf_len", log_buf); ++ mpsslog("%s: log_buf_len: %s\n", mic->name, log_buf); ++ munmap(map, len); ++ close(fd); ++} ++ ++static void ++change_virtblk_backend(int x, siginfo_t *siginfo, void *p) ++{ ++ struct mic_info *mic; ++ ++ for (mic = mic_list.next; mic != NULL; mic = mic->next) ++ mic->mic_virtblk.signaled = 1/* true */; ++} ++ ++static void ++set_mic_boot_params(struct mic_info *mic) ++{ ++ set_log_buf_info(mic); ++ set_cmdline(mic); ++} ++ ++static void * ++init_mic(void *arg) ++{ ++ struct mic_info *mic = (struct mic_info *)arg; ++ struct sigaction ignore = { ++ .sa_flags = 0, ++ .sa_handler = SIG_IGN ++ }; ++ struct sigaction act = { ++ .sa_flags = SA_SIGINFO, ++ .sa_sigaction = change_virtblk_backend, ++ }; ++ char buffer[PATH_MAX]; ++ int err, fd; ++ ++ /* ++ * Currently, one virtio block device is supported for each MIC card ++ * at a time. Any user (or test) can send a SIGUSR1 to the MIC daemon. ++ * The signal informs the virtio block backend about a change in the ++ * configuration file which specifies the virtio backend file name on ++ * the host. Virtio block backend then re-reads the configuration file ++ * and switches to the new block device. This signalling mechanism may ++ * not be required once multiple virtio block devices are supported by ++ * the MIC daemon. ++ */ ++ sigaction(SIGUSR1, &ignore, NULL); ++retry: ++ fd = open_state_fd(mic); ++ if (fd < 0) { ++ mpsslog("%s: %s %d open state fd failed %s\n", ++ mic->name, __func__, __LINE__, strerror(errno)); ++ sleep(2); ++ goto retry; ++ } ++ ++ if (mic->restart) { ++ snprintf(buffer, PATH_MAX, "boot"); ++ setsysfs(mic->name, "state", buffer); ++ mpsslog("%s restarting mic %d\n", ++ mic->name, mic->restart); ++ mic->restart = 0; ++ } ++ ++ while (1) { ++ while (block_till_state_change(fd, mic)) { ++ mpsslog("%s: %s %d block_till_state_change error %s\n", ++ mic->name, __func__, __LINE__, strerror(errno)); ++ sleep(2); ++ continue; ++ } ++ ++ if (get_mic_state(mic) == MIC_BOOTING) ++ break; ++ } ++ ++ mic->pid = fork(); ++ switch (mic->pid) { ++ case 0: ++ add_virtio_device(mic, &virtcons_dev_page.dd); ++ add_virtio_device(mic, &virtnet_dev_page.dd); ++ err = pthread_create(&mic->mic_console.console_thread, NULL, ++ virtio_console, mic); ++ if (err) ++ mpsslog("%s virtcons pthread_create failed %s\n", ++ mic->name, strerror(err)); ++ err = pthread_create(&mic->mic_net.net_thread, NULL, ++ virtio_net, mic); ++ if (err) ++ mpsslog("%s virtnet pthread_create failed %s\n", ++ mic->name, strerror(err)); ++ err = pthread_create(&mic->mic_virtblk.block_thread, NULL, ++ virtio_block, mic); ++ if (err) ++ mpsslog("%s virtblk pthread_create failed %s\n", ++ mic->name, strerror(err)); ++ sigemptyset(&act.sa_mask); ++ err = sigaction(SIGUSR1, &act, NULL); ++ if (err) ++ mpsslog("%s sigaction SIGUSR1 failed %s\n", ++ mic->name, strerror(errno)); ++ while (1) ++ sleep(60); ++ case -1: ++ mpsslog("fork failed MIC name %s id %d errno %d\n", ++ mic->name, mic->id, errno); ++ break; ++ default: ++ err = pthread_create(&mic->config_thread, NULL, ++ mic_config, mic); ++ if (err) ++ mpsslog("%s mic_config pthread_create failed %s\n", ++ mic->name, strerror(err)); ++ } ++ ++ return NULL; ++} ++ ++static void ++start_daemon(void) ++{ ++ struct mic_info *mic; ++ int err; ++ ++ for (mic = mic_list.next; mic; mic = mic->next) { ++ set_mic_boot_params(mic); ++ err = pthread_create(&mic->init_thread, NULL, init_mic, mic); ++ if (err) ++ mpsslog("%s init_mic pthread_create failed %s\n", ++ mic->name, strerror(err)); ++ } ++ ++ while (1) ++ sleep(60); ++} ++ ++static int ++init_mic_list(void) ++{ ++ struct mic_info *mic = &mic_list; ++ struct dirent *file; ++ DIR *dp; ++ int cnt = 0; ++ ++ dp = opendir(MICSYSFSDIR); ++ if (!dp) ++ return 0; ++ ++ while ((file = readdir(dp)) != NULL) { ++ if (!strncmp(file->d_name, "mic", 3)) { ++ mic->next = calloc(1, sizeof(struct mic_info)); ++ if (mic->next) { ++ mic = mic->next; ++ mic->id = atoi(&file->d_name[3]); ++ mic->name = malloc(strlen(file->d_name) + 16); ++ if (mic->name) ++ strcpy(mic->name, file->d_name); ++ mpsslog("MIC name %s id %d\n", mic->name, ++ mic->id); ++ cnt++; ++ } ++ } ++ } ++ ++ closedir(dp); ++ return cnt; ++} ++ ++void ++mpsslog(char *format, ...) ++{ ++ va_list args; ++ char buffer[4096]; ++ char ts[52], *ts1; ++ time_t t; ++ ++ if (logfp == NULL) ++ return; ++ ++ va_start(args, format); ++ vsprintf(buffer, format, args); ++ va_end(args); ++ ++ time(&t); ++ ts1 = ctime_r(&t, ts); ++ ts1[strlen(ts1) - 1] = '\0'; ++ fprintf(logfp, "%s: %s", ts1, buffer); ++ ++ fflush(logfp); ++} ++ ++int ++main(int argc, char *argv[]) ++{ ++ int cnt; ++ pid_t pid; ++ ++ myname = argv[0]; ++ ++ logfp = fopen(LOGFILE_NAME, "a+"); ++ if (!logfp) { ++ fprintf(stderr, "cannot open logfile '%s'\n", LOGFILE_NAME); ++ exit(1); ++ } ++ pid = fork(); ++ switch (pid) { ++ case 0: ++ break; ++ case -1: ++ exit(2); ++ default: ++ exit(0); ++ } ++ ++ mpsslog("MIC Daemon start\n"); ++ ++ cnt = init_mic_list(); ++ if (cnt == 0) { ++ mpsslog("MIC module not loaded\n"); ++ exit(3); ++ } ++ mpsslog("MIC found %d devices\n", cnt); ++ ++ start_daemon(); ++ ++ exit(0); ++} +diff --git a/samples/mic/mpssd/mpssd.h b/samples/mic/mpssd/mpssd.h +new file mode 100644 +index 000000000000..8bd64944aacc +--- /dev/null ++++ b/samples/mic/mpssd/mpssd.h +@@ -0,0 +1,103 @@ ++/* ++ * Intel MIC Platform Software Stack (MPSS) ++ * ++ * Copyright(c) 2013 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * The full GNU General Public License is included in this distribution in ++ * the file called "COPYING". ++ * ++ * Intel MIC User Space Tools. ++ */ ++#ifndef _MPSSD_H_ ++#define _MPSSD_H_ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define MICSYSFSDIR "/sys/class/mic" ++#define LOGFILE_NAME "/var/log/mpssd" ++#define PAGE_SIZE 4096 ++ ++struct mic_console_info { ++ pthread_t console_thread; ++ int virtio_console_fd; ++ void *console_dp; ++}; ++ ++struct mic_net_info { ++ pthread_t net_thread; ++ int virtio_net_fd; ++ int tap_fd; ++ void *net_dp; ++}; ++ ++struct mic_virtblk_info { ++ pthread_t block_thread; ++ int virtio_block_fd; ++ void *block_dp; ++ volatile sig_atomic_t signaled; ++ char *backend_file; ++ int backend; ++ void *backend_addr; ++ long backend_size; ++}; ++ ++struct mic_info { ++ int id; ++ char *name; ++ pthread_t config_thread; ++ pthread_t init_thread; ++ pid_t pid; ++ struct mic_console_info mic_console; ++ struct mic_net_info mic_net; ++ struct mic_virtblk_info mic_virtblk; ++ int restart; ++ int boot_on_resume; ++ struct mic_info *next; ++}; ++ ++__attribute__((format(printf, 1, 2))) ++void mpsslog(char *format, ...); ++char *readsysfs(char *dir, char *entry); ++int setsysfs(char *dir, char *entry, char *value); ++#endif +diff --git a/samples/mic/mpssd/sysfs.c b/samples/mic/mpssd/sysfs.c +new file mode 100644 +index 000000000000..8dd326936083 +--- /dev/null ++++ b/samples/mic/mpssd/sysfs.c +@@ -0,0 +1,102 @@ ++/* ++ * Intel MIC Platform Software Stack (MPSS) ++ * ++ * Copyright(c) 2013 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * The full GNU General Public License is included in this distribution in ++ * the file called "COPYING". ++ * ++ * Intel MIC User Space Tools. ++ */ ++ ++#include "mpssd.h" ++ ++#define PAGE_SIZE 4096 ++ ++char * ++readsysfs(char *dir, char *entry) ++{ ++ char filename[PATH_MAX]; ++ char value[PAGE_SIZE]; ++ char *string = NULL; ++ int fd; ++ int len; ++ ++ if (dir == NULL) ++ snprintf(filename, PATH_MAX, "%s/%s", MICSYSFSDIR, entry); ++ else ++ snprintf(filename, PATH_MAX, ++ "%s/%s/%s", MICSYSFSDIR, dir, entry); ++ ++ fd = open(filename, O_RDONLY); ++ if (fd < 0) { ++ mpsslog("Failed to open sysfs entry '%s': %s\n", ++ filename, strerror(errno)); ++ return NULL; ++ } ++ ++ len = read(fd, value, sizeof(value)); ++ if (len < 0) { ++ mpsslog("Failed to read sysfs entry '%s': %s\n", ++ filename, strerror(errno)); ++ goto readsys_ret; ++ } ++ if (len == 0) ++ goto readsys_ret; ++ ++ value[len - 1] = '\0'; ++ ++ string = malloc(strlen(value) + 1); ++ if (string) ++ strcpy(string, value); ++ ++readsys_ret: ++ close(fd); ++ return string; ++} ++ ++int ++setsysfs(char *dir, char *entry, char *value) ++{ ++ char filename[PATH_MAX]; ++ char *oldvalue; ++ int fd, ret = 0; ++ ++ if (dir == NULL) ++ snprintf(filename, PATH_MAX, "%s/%s", MICSYSFSDIR, entry); ++ else ++ snprintf(filename, PATH_MAX, "%s/%s/%s", ++ MICSYSFSDIR, dir, entry); ++ ++ oldvalue = readsysfs(dir, entry); ++ ++ fd = open(filename, O_RDWR); ++ if (fd < 0) { ++ ret = errno; ++ mpsslog("Failed to open sysfs entry '%s': %s\n", ++ filename, strerror(errno)); ++ goto done; ++ } ++ ++ if (!oldvalue || strcmp(value, oldvalue)) { ++ if (write(fd, value, strlen(value)) < 0) { ++ ret = errno; ++ mpsslog("Failed to write new sysfs entry '%s': %s\n", ++ filename, strerror(errno)); ++ } ++ } ++ close(fd); ++done: ++ if (oldvalue) ++ free(oldvalue); ++ return ret; ++} +diff --git a/samples/seccomp/bpf-helper.h b/samples/seccomp/bpf-helper.h +index 38ee70f3cd5b..1d8de9edd858 100644 +--- a/samples/seccomp/bpf-helper.h ++++ b/samples/seccomp/bpf-helper.h +@@ -138,7 +138,7 @@ union arg64 { + #define ARG_32(idx) \ + BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx)) + +-/* Loads hi into A and lo in X */ ++/* Loads lo into M[0] and hi into M[1] and A */ + #define ARG_64(idx) \ + BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx)), \ + BPF_STMT(BPF_ST, 0), /* lo -> M[0] */ \ +@@ -153,88 +153,107 @@ union arg64 { + BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (value), 1, 0), \ + jt + +-/* Checks the lo, then swaps to check the hi. A=lo,X=hi */ ++#define JA32(value, jt) \ ++ BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (value), 0, 1), \ ++ jt ++ ++#define JGE32(value, jt) \ ++ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 0, 1), \ ++ jt ++ ++#define JGT32(value, jt) \ ++ BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 0, 1), \ ++ jt ++ ++#define JLE32(value, jt) \ ++ BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 1, 0), \ ++ jt ++ ++#define JLT32(value, jt) \ ++ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 1, 0), \ ++ jt ++ ++/* ++ * All the JXX64 checks assume lo is saved in M[0] and hi is saved in both ++ * A and M[1]. This invariant is kept by restoring A if necessary. ++ */ + #define JEQ64(lo, hi, jt) \ ++ /* if (hi != arg.hi) goto NOMATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ + BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ ++ /* if (lo != arg.lo) goto NOMATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 0, 2), \ +- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ ++ BPF_STMT(BPF_LD+BPF_MEM, 1), \ + jt, \ +- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ ++ BPF_STMT(BPF_LD+BPF_MEM, 1) + + #define JNE64(lo, hi, jt) \ +- BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 5, 0), \ +- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ ++ /* if (hi != arg.hi) goto MATCH; */ \ ++ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 3), \ ++ BPF_STMT(BPF_LD+BPF_MEM, 0), \ ++ /* if (lo != arg.lo) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 2, 0), \ +- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ ++ BPF_STMT(BPF_LD+BPF_MEM, 1), \ + jt, \ +- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ +- +-#define JA32(value, jt) \ +- BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (value), 0, 1), \ +- jt ++ BPF_STMT(BPF_LD+BPF_MEM, 1) + + #define JA64(lo, hi, jt) \ ++ /* if (hi & arg.hi) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (hi), 3, 0), \ +- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ ++ BPF_STMT(BPF_LD+BPF_MEM, 0), \ ++ /* if (lo & arg.lo) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (lo), 0, 2), \ +- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ ++ BPF_STMT(BPF_LD+BPF_MEM, 1), \ + jt, \ +- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ ++ BPF_STMT(BPF_LD+BPF_MEM, 1) + +-#define JGE32(value, jt) \ +- BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 0, 1), \ +- jt +- +-#define JLT32(value, jt) \ +- BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 1, 0), \ +- jt +- +-/* Shortcut checking if hi > arg.hi. */ + #define JGE64(lo, hi, jt) \ ++ /* if (hi > arg.hi) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \ ++ /* if (hi != arg.hi) goto NOMATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ +- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ ++ BPF_STMT(BPF_LD+BPF_MEM, 0), \ ++ /* if (lo >= arg.lo) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 0, 2), \ +- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ +- jt, \ +- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ +- +-#define JLT64(lo, hi, jt) \ +- BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \ +- BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ +- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ +- BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \ +- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ ++ BPF_STMT(BPF_LD+BPF_MEM, 1), \ + jt, \ +- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ ++ BPF_STMT(BPF_LD+BPF_MEM, 1) + +-#define JGT32(value, jt) \ +- BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 0, 1), \ +- jt +- +-#define JLE32(value, jt) \ +- BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 1, 0), \ +- jt +- +-/* Check hi > args.hi first, then do the GE checking */ + #define JGT64(lo, hi, jt) \ ++ /* if (hi > arg.hi) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \ ++ /* if (hi != arg.hi) goto NOMATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ +- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ ++ BPF_STMT(BPF_LD+BPF_MEM, 0), \ ++ /* if (lo > arg.lo) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 0, 2), \ +- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ ++ BPF_STMT(BPF_LD+BPF_MEM, 1), \ + jt, \ +- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ ++ BPF_STMT(BPF_LD+BPF_MEM, 1) + + #define JLE64(lo, hi, jt) \ +- BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 6, 0), \ +- BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 3), \ +- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ ++ /* if (hi < arg.hi) goto MATCH; */ \ ++ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \ ++ /* if (hi != arg.hi) goto NOMATCH; */ \ ++ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ ++ BPF_STMT(BPF_LD+BPF_MEM, 0), \ ++ /* if (lo <= arg.lo) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \ +- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ ++ BPF_STMT(BPF_LD+BPF_MEM, 1), \ ++ jt, \ ++ BPF_STMT(BPF_LD+BPF_MEM, 1) ++ ++#define JLT64(lo, hi, jt) \ ++ /* if (hi < arg.hi) goto MATCH; */ \ ++ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \ ++ /* if (hi != arg.hi) goto NOMATCH; */ \ ++ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ ++ BPF_STMT(BPF_LD+BPF_MEM, 0), \ ++ /* if (lo < arg.lo) goto MATCH; */ \ ++ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 2, 0), \ ++ BPF_STMT(BPF_LD+BPF_MEM, 1), \ + jt, \ +- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ ++ BPF_STMT(BPF_LD+BPF_MEM, 1) + + #define LOAD_SYSCALL_NR \ + BPF_STMT(BPF_LD+BPF_W+BPF_ABS, \ +diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c +index 1d5acbe0c08b..86240d02b530 100644 +--- a/sound/core/seq/seq_fifo.c ++++ b/sound/core/seq/seq_fifo.c +@@ -135,6 +135,7 @@ int snd_seq_fifo_event_in(struct snd_seq_fifo *f, + f->tail = cell; + if (f->head == NULL) + f->head = cell; ++ cell->next = NULL; + f->cells++; + spin_unlock_irqrestore(&f->lock, flags); + +@@ -214,6 +215,8 @@ void snd_seq_fifo_cell_putback(struct snd_seq_fifo *f, + spin_lock_irqsave(&f->lock, flags); + cell->next = f->head; + f->head = cell; ++ if (!f->tail) ++ f->tail = cell; + f->cells++; + spin_unlock_irqrestore(&f->lock, flags); + } +diff --git a/sound/core/timer.c b/sound/core/timer.c +index ae4ea2e2e7fe..278a332f97bd 100644 +--- a/sound/core/timer.c ++++ b/sound/core/timer.c +@@ -1700,9 +1700,21 @@ static int snd_timer_user_params(struct file *file, + return -EBADFD; + if (copy_from_user(¶ms, _params, sizeof(params))) + return -EFAULT; +- if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE) && params.ticks < 1) { +- err = -EINVAL; +- goto _end; ++ if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE)) { ++ u64 resolution; ++ ++ if (params.ticks < 1) { ++ err = -EINVAL; ++ goto _end; ++ } ++ ++ /* Don't allow resolution less than 1ms */ ++ resolution = snd_timer_resolution(tu->timeri); ++ resolution *= params.ticks; ++ if (resolution < 1000000) { ++ err = -EINVAL; ++ goto _end; ++ } + } + if (params.queue_size > 0 && + (params.queue_size < 32 || params.queue_size > 1024)) { +diff --git a/sound/pci/ctxfi/cthw20k1.c b/sound/pci/ctxfi/cthw20k1.c +index 9667cbfb0ca2..ab4cdab5cfa5 100644 +--- a/sound/pci/ctxfi/cthw20k1.c ++++ b/sound/pci/ctxfi/cthw20k1.c +@@ -27,12 +27,6 @@ + #include "cthw20k1.h" + #include "ct20k1reg.h" + +-#if BITS_PER_LONG == 32 +-#define CT_XFI_DMA_MASK DMA_BIT_MASK(32) /* 32 bit PTE */ +-#else +-#define CT_XFI_DMA_MASK DMA_BIT_MASK(64) /* 64 bit PTE */ +-#endif +- + struct hw20k1 { + struct hw hw; + spinlock_t reg_20k1_lock; +@@ -1904,19 +1898,18 @@ static int hw_card_start(struct hw *hw) + { + int err; + struct pci_dev *pci = hw->pci; ++ const unsigned int dma_bits = BITS_PER_LONG; + + err = pci_enable_device(pci); + if (err < 0) + return err; + + /* Set DMA transfer mask */ +- if (dma_set_mask(&pci->dev, CT_XFI_DMA_MASK) < 0 || +- dma_set_coherent_mask(&pci->dev, CT_XFI_DMA_MASK) < 0) { +- dev_err(hw->card->dev, +- "architecture does not support PCI busmaster DMA with mask 0x%llx\n", +- CT_XFI_DMA_MASK); +- err = -ENXIO; +- goto error1; ++ if (dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) { ++ dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits)); ++ } else { ++ dma_set_mask(&pci->dev, DMA_BIT_MASK(32)); ++ dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(32)); + } + + if (!hw->io_base) { +diff --git a/sound/pci/ctxfi/cthw20k2.c b/sound/pci/ctxfi/cthw20k2.c +index 9dc2950e1ab7..d86678c2a957 100644 +--- a/sound/pci/ctxfi/cthw20k2.c ++++ b/sound/pci/ctxfi/cthw20k2.c +@@ -26,12 +26,6 @@ + #include "cthw20k2.h" + #include "ct20k2reg.h" + +-#if BITS_PER_LONG == 32 +-#define CT_XFI_DMA_MASK DMA_BIT_MASK(32) /* 32 bit PTE */ +-#else +-#define CT_XFI_DMA_MASK DMA_BIT_MASK(64) /* 64 bit PTE */ +-#endif +- + struct hw20k2 { + struct hw hw; + /* for i2c */ +@@ -2029,19 +2023,18 @@ static int hw_card_start(struct hw *hw) + int err = 0; + struct pci_dev *pci = hw->pci; + unsigned int gctl; ++ const unsigned int dma_bits = BITS_PER_LONG; + + err = pci_enable_device(pci); + if (err < 0) + return err; + + /* Set DMA transfer mask */ +- if (dma_set_mask(&pci->dev, CT_XFI_DMA_MASK) < 0 || +- dma_set_coherent_mask(&pci->dev, CT_XFI_DMA_MASK) < 0) { +- dev_err(hw->card->dev, +- "architecture does not support PCI busmaster DMA with mask 0x%llx\n", +- CT_XFI_DMA_MASK); +- err = -ENXIO; +- goto error1; ++ if (!dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) { ++ dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits)); ++ } else { ++ dma_set_mask(&pci->dev, DMA_BIT_MASK(32)); ++ dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(32)); + } + + if (!hw->io_base) { +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index ad4a1e9a3ae1..8f3e5e9d8bdb 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -2208,9 +2208,9 @@ static const struct pci_device_id azx_ids[] = { + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, + /* Lewisburg */ + { PCI_DEVICE(0x8086, 0xa1f0), +- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, ++ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, + { PCI_DEVICE(0x8086, 0xa270), +- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, ++ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, + /* Lynx Point-LP */ + { PCI_DEVICE(0x8086, 0x9c20), + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 00c50d58f108..cf0785ddbd14 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -5560,6 +5560,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE), + SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), + SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME), ++ SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), + SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), +@@ -5674,6 +5675,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460), + SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), + SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), ++ SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), + SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), + SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), + SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP), +@@ -6047,6 +6049,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { + SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, + ALC298_STANDARD_PINS, + {0x17, 0x90170150}), ++ SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_SPK_VOLUME, ++ {0x12, 0xb7a60140}, ++ {0x13, 0xb7a60150}, ++ {0x17, 0x90170110}, ++ {0x1a, 0x03011020}, ++ {0x21, 0x03211030}), + {} + }; + diff --git a/patch/kernel/mvebu64-default/03-patch-4.4.53-54.patch b/patch/kernel/mvebu64-default/03-patch-4.4.53-54.patch new file mode 100644 index 000000000..ecbe89670 --- /dev/null +++ b/patch/kernel/mvebu64-default/03-patch-4.4.53-54.patch @@ -0,0 +1,1624 @@ +diff --git a/Makefile b/Makefile +index 10aec937e9e4..7f54ac081cf3 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 53 ++SUBLEVEL = 54 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h +index c1ea67db8404..c61ed7890cef 100644 +--- a/arch/s390/include/asm/processor.h ++++ b/arch/s390/include/asm/processor.h +@@ -74,7 +74,8 @@ extern void execve_tail(void); + * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. + */ + +-#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit) ++#define TASK_SIZE_OF(tsk) ((tsk)->mm ? \ ++ (tsk)->mm->context.asce_limit : TASK_MAX_SIZE) + #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ + (1UL << 30) : (1UL << 41)) + #define TASK_SIZE TASK_SIZE_OF(current) +diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c +index 1f581eb61bc2..d097d71685df 100644 +--- a/arch/s390/kernel/setup.c ++++ b/arch/s390/kernel/setup.c +@@ -805,10 +805,10 @@ static void __init setup_randomness(void) + { + struct sysinfo_3_2_2 *vmms; + +- vmms = (struct sysinfo_3_2_2 *) alloc_page(GFP_KERNEL); +- if (vmms && stsi(vmms, 3, 2, 2) == 0 && vmms->count) +- add_device_randomness(&vmms, vmms->count); +- free_page((unsigned long) vmms); ++ vmms = (struct sysinfo_3_2_2 *) memblock_alloc(PAGE_SIZE, PAGE_SIZE); ++ if (stsi(vmms, 3, 2, 2) == 0 && vmms->count) ++ add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count); ++ memblock_free((unsigned long) vmms, PAGE_SIZE); + } + + /* +diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c +index 575dc123bda2..23e3f5d77a24 100644 +--- a/arch/s390/kvm/kvm-s390.c ++++ b/arch/s390/kvm/kvm-s390.c +@@ -295,6 +295,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, + struct kvm_memory_slot *memslot; + int is_dirty = 0; + ++ if (kvm_is_ucontrol(kvm)) ++ return -EINVAL; ++ + mutex_lock(&kvm->slots_lock); + + r = -EINVAL; +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 64f60a48def1..3a7ae80dc49d 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -3499,7 +3499,7 @@ static void fix_rmode_seg(int seg, struct kvm_segment *save) + } + + vmcs_write16(sf->selector, var.selector); +- vmcs_write32(sf->base, var.base); ++ vmcs_writel(sf->base, var.base); + vmcs_write32(sf->limit, var.limit); + vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var)); + } +@@ -7905,7 +7905,7 @@ static void kvm_flush_pml_buffers(struct kvm *kvm) + static void vmx_dump_sel(char *name, uint32_t sel) + { + pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n", +- name, vmcs_read32(sel), ++ name, vmcs_read16(sel), + vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR), + vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR), + vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR)); +diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c +index 9735691f37f1..49ccbd9022f6 100644 +--- a/arch/xtensa/kernel/setup.c ++++ b/arch/xtensa/kernel/setup.c +@@ -133,6 +133,8 @@ static int __init parse_tag_initrd(const bp_tag_t* tag) + + __tagtable(BP_TAG_INITRD, parse_tag_initrd); + ++#endif /* CONFIG_BLK_DEV_INITRD */ ++ + #ifdef CONFIG_OF + + static int __init parse_tag_fdt(const bp_tag_t *tag) +@@ -145,8 +147,6 @@ __tagtable(BP_TAG_FDT, parse_tag_fdt); + + #endif /* CONFIG_OF */ + +-#endif /* CONFIG_BLK_DEV_INITRD */ +- + static int __init parse_tag_cmdline(const bp_tag_t* tag) + { + strlcpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE); +diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c +index 0beaa52df66b..5df8e1234505 100644 +--- a/drivers/bluetooth/ath3k.c ++++ b/drivers/bluetooth/ath3k.c +@@ -94,6 +94,7 @@ static const struct usb_device_id ath3k_table[] = { + { USB_DEVICE(0x04CA, 0x300f) }, + { USB_DEVICE(0x04CA, 0x3010) }, + { USB_DEVICE(0x04CA, 0x3014) }, ++ { USB_DEVICE(0x04CA, 0x3018) }, + { USB_DEVICE(0x0930, 0x0219) }, + { USB_DEVICE(0x0930, 0x021c) }, + { USB_DEVICE(0x0930, 0x0220) }, +@@ -160,6 +161,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = { + { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x04ca, 0x3018), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index c306b483de60..cd6b141b9825 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -208,6 +208,7 @@ static const struct usb_device_id blacklist_table[] = { + { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x04ca, 0x3018), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, +diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +index c161eeda417b..267749a94c5a 100644 +--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +@@ -3704,9 +3704,15 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev, + default: + encoder->possible_crtcs = 0x3; + break; ++ case 3: ++ encoder->possible_crtcs = 0x7; ++ break; + case 4: + encoder->possible_crtcs = 0xf; + break; ++ case 5: ++ encoder->possible_crtcs = 0x1f; ++ break; + case 6: + encoder->possible_crtcs = 0x3f; + break; +diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c +index 810c51d92b99..30672a3df8a9 100644 +--- a/drivers/gpu/drm/ast/ast_post.c ++++ b/drivers/gpu/drm/ast/ast_post.c +@@ -58,13 +58,9 @@ bool ast_is_vga_enabled(struct drm_device *dev) + /* TODO 1180 */ + } else { + ch = ast_io_read8(ast, AST_IO_VGA_ENABLE_PORT); +- if (ch) { +- ast_open_key(ast); +- ch = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff); +- return ch & 0x04; +- } ++ return !!(ch & 0x01); + } +- return 0; ++ return false; + } + + static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff }; +@@ -375,8 +371,8 @@ void ast_post_gpu(struct drm_device *dev) + pci_write_config_dword(ast->dev->pdev, 0x04, reg); + + ast_enable_vga(dev); +- ast_enable_mmio(dev); + ast_open_key(ast); ++ ast_enable_mmio(dev); + ast_set_def_ext_reg(dev); + + if (ast->chip == AST2300 || ast->chip == AST2400) +@@ -1630,12 +1626,44 @@ static void ast_init_dram_2300(struct drm_device *dev) + temp |= 0x73; + ast_write32(ast, 0x12008, temp); + ++ param.dram_freq = 396; + param.dram_type = AST_DDR3; ++ temp = ast_mindwm(ast, 0x1e6e2070); + if (temp & 0x01000000) + param.dram_type = AST_DDR2; +- param.dram_chipid = ast->dram_type; +- param.dram_freq = ast->mclk; +- param.vram_size = ast->vram_size; ++ switch (temp & 0x18000000) { ++ case 0: ++ param.dram_chipid = AST_DRAM_512Mx16; ++ break; ++ default: ++ case 0x08000000: ++ param.dram_chipid = AST_DRAM_1Gx16; ++ break; ++ case 0x10000000: ++ param.dram_chipid = AST_DRAM_2Gx16; ++ break; ++ case 0x18000000: ++ param.dram_chipid = AST_DRAM_4Gx16; ++ break; ++ } ++ switch (temp & 0x0c) { ++ default: ++ case 0x00: ++ param.vram_size = AST_VIDMEM_SIZE_8M; ++ break; ++ ++ case 0x04: ++ param.vram_size = AST_VIDMEM_SIZE_16M; ++ break; ++ ++ case 0x08: ++ param.vram_size = AST_VIDMEM_SIZE_32M; ++ break; ++ ++ case 0x0c: ++ param.vram_size = AST_VIDMEM_SIZE_64M; ++ break; ++ } + + if (param.dram_type == AST_DDR3) { + get_ddr3_info(ast, ¶m); +diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c +index 1ac29d703c12..ea443fafb934 100644 +--- a/drivers/gpu/drm/drm_atomic_helper.c ++++ b/drivers/gpu/drm/drm_atomic_helper.c +@@ -265,7 +265,7 @@ mode_fixup(struct drm_atomic_state *state) + struct drm_connector *connector; + struct drm_connector_state *conn_state; + int i; +- bool ret; ++ int ret; + + for_each_crtc_in_state(state, crtc, crtc_state, i) { + if (!crtc_state->mode_changed && +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c +index 8c9ac021608f..cc1e16fd7e76 100644 +--- a/drivers/gpu/drm/drm_edid.c ++++ b/drivers/gpu/drm/drm_edid.c +@@ -144,6 +144,9 @@ static struct edid_quirk { + + /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */ + { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC }, ++ ++ /* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/ ++ { "ETR", 13896, EDID_QUIRK_FORCE_8BPC }, + }; + + /* +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c +index 3f802163f7d4..e7c18519274a 100644 +--- a/drivers/gpu/drm/i915/intel_pm.c ++++ b/drivers/gpu/drm/i915/intel_pm.c +@@ -6803,7 +6803,18 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) + + static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) + { +- I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); ++ u32 val; ++ ++ /* ++ * On driver load, a pipe may be active and driving a DSI display. ++ * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck ++ * (and never recovering) in this case. intel_dsi_post_disable() will ++ * clear it when we turn off the display. ++ */ ++ val = I915_READ(DSPCLK_GATE_D); ++ val &= DPOUNIT_CLOCK_GATE_DISABLE; ++ val |= VRHUNIT_CLOCK_GATE_DISABLE; ++ I915_WRITE(DSPCLK_GATE_D, val); + + /* + * Disable trickle feed and enable pnd deadline calculation +diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c +index 4ae8b56b1847..037c38bb5333 100644 +--- a/drivers/gpu/drm/ttm/ttm_bo.c ++++ b/drivers/gpu/drm/ttm/ttm_bo.c +@@ -1621,7 +1621,6 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) + struct ttm_buffer_object *bo; + int ret = -EBUSY; + int put_count; +- uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); + + spin_lock(&glob->lru_lock); + list_for_each_entry(bo, &glob->swap_lru, swap) { +@@ -1657,7 +1656,8 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) + if (unlikely(ret != 0)) + goto out; + +- if ((bo->mem.placement & swap_placement) != swap_placement) { ++ if (bo->mem.mem_type != TTM_PL_SYSTEM || ++ bo->ttm->caching_state != tt_cached) { + struct ttm_mem_reg evict_mem; + + evict_mem = bo->mem; +diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c +index 89fd0113aa5c..57c191798699 100644 +--- a/drivers/hv/hv.c ++++ b/drivers/hv/hv.c +@@ -219,7 +219,7 @@ int hv_init(void) + /* See if the hypercall page is already set */ + rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); + +- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC); ++ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX); + + if (!virtaddr) + goto cleanup; +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c +index 3ba7de5f9379..2018d24344de 100644 +--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c +@@ -1488,12 +1488,14 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr, + + ret = ipoib_set_mode(dev, buf); + +- rtnl_unlock(); +- +- if (!ret) +- return count; ++ /* The assumption is that the function ipoib_set_mode returned ++ * with the rtnl held by it, if not the value -EBUSY returned, ++ * then no need to rtnl_unlock ++ */ ++ if (ret != -EBUSY) ++ rtnl_unlock(); + +- return ret; ++ return (!ret || ret == -EBUSY) ? count : ret; + } + + static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode); +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c +index 8a4d10452d61..8efcff1beb8f 100644 +--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c +@@ -464,8 +464,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf) + priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM; + + ipoib_flush_paths(dev); +- rtnl_lock(); +- return 0; ++ return (!rtnl_trylock()) ? -EBUSY : 0; + } + + if (!strcmp(buf, "datagram\n")) { +@@ -474,8 +473,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf) + dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu)); + rtnl_unlock(); + ipoib_flush_paths(dev); +- rtnl_lock(); +- return 0; ++ return (!rtnl_trylock()) ? -EBUSY : 0; + } + + return -EINVAL; +@@ -628,6 +626,14 @@ void ipoib_mark_paths_invalid(struct net_device *dev) + spin_unlock_irq(&priv->lock); + } + ++static void push_pseudo_header(struct sk_buff *skb, const char *daddr) ++{ ++ struct ipoib_pseudo_header *phdr; ++ ++ phdr = (struct ipoib_pseudo_header *)skb_push(skb, sizeof(*phdr)); ++ memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN); ++} ++ + void ipoib_flush_paths(struct net_device *dev) + { + struct ipoib_dev_priv *priv = netdev_priv(dev); +@@ -852,8 +858,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, + } + if (skb_queue_len(&neigh->queue) < + IPOIB_MAX_PATH_REC_QUEUE) { +- /* put pseudoheader back on for next time */ +- skb_push(skb, IPOIB_PSEUDO_LEN); ++ push_pseudo_header(skb, neigh->daddr); + __skb_queue_tail(&neigh->queue, skb); + } else { + ipoib_warn(priv, "queue length limit %d. Packet drop.\n", +@@ -871,10 +876,12 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, + + if (!path->query && path_rec_start(dev, path)) + goto err_path; +- if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) ++ if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { ++ push_pseudo_header(skb, neigh->daddr); + __skb_queue_tail(&neigh->queue, skb); +- else ++ } else { + goto err_drop; ++ } + } + + spin_unlock_irqrestore(&priv->lock, flags); +@@ -910,8 +917,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, + } + if (path) { + if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { +- /* put pseudoheader back on for next time */ +- skb_push(skb, IPOIB_PSEUDO_LEN); ++ push_pseudo_header(skb, phdr->hwaddr); + __skb_queue_tail(&path->queue, skb); + } else { + ++dev->stats.tx_dropped; +@@ -943,8 +949,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, + return; + } else if ((path->query || !path_rec_start(dev, path)) && + skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { +- /* put pseudoheader back on for next time */ +- skb_push(skb, IPOIB_PSEUDO_LEN); ++ push_pseudo_header(skb, phdr->hwaddr); + __skb_queue_tail(&path->queue, skb); + } else { + ++dev->stats.tx_dropped; +@@ -1025,8 +1030,7 @@ send_using_neigh: + } + + if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { +- /* put pseudoheader back on for next time */ +- skb_push(skb, sizeof(*phdr)); ++ push_pseudo_header(skb, phdr->hwaddr); + spin_lock_irqsave(&priv->lock, flags); + __skb_queue_tail(&neigh->queue, skb); + spin_unlock_irqrestore(&priv->lock, flags); +@@ -1058,7 +1062,6 @@ static int ipoib_hard_header(struct sk_buff *skb, + unsigned short type, + const void *daddr, const void *saddr, unsigned len) + { +- struct ipoib_pseudo_header *phdr; + struct ipoib_header *header; + + header = (struct ipoib_header *) skb_push(skb, sizeof *header); +@@ -1071,8 +1074,7 @@ static int ipoib_hard_header(struct sk_buff *skb, + * destination address into skb hard header so we can figure out where + * to send the packet later. + */ +- phdr = (struct ipoib_pseudo_header *) skb_push(skb, sizeof(*phdr)); +- memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN); ++ push_pseudo_header(skb, daddr); + + return IPOIB_HARD_LEN; + } +diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c +index 5f0f4fc58f43..e397f1b0af09 100644 +--- a/drivers/infiniband/ulp/srp/ib_srp.c ++++ b/drivers/infiniband/ulp/srp/ib_srp.c +@@ -1787,17 +1787,24 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp) + if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { + spin_lock_irqsave(&ch->lock, flags); + ch->req_lim += be32_to_cpu(rsp->req_lim_delta); ++ if (rsp->tag == ch->tsk_mgmt_tag) { ++ ch->tsk_mgmt_status = -1; ++ if (be32_to_cpu(rsp->resp_data_len) >= 4) ++ ch->tsk_mgmt_status = rsp->data[3]; ++ complete(&ch->tsk_mgmt_done); ++ } else { ++ shost_printk(KERN_ERR, target->scsi_host, ++ "Received tsk mgmt response too late for tag %#llx\n", ++ rsp->tag); ++ } + spin_unlock_irqrestore(&ch->lock, flags); +- +- ch->tsk_mgmt_status = -1; +- if (be32_to_cpu(rsp->resp_data_len) >= 4) +- ch->tsk_mgmt_status = rsp->data[3]; +- complete(&ch->tsk_mgmt_done); + } else { + scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag); +- if (scmnd) { ++ if (scmnd && scmnd->host_scribble) { + req = (void *)scmnd->host_scribble; + scmnd = srp_claim_req(ch, req, NULL, scmnd); ++ } else { ++ scmnd = NULL; + } + if (!scmnd) { + shost_printk(KERN_ERR, target->scsi_host, +@@ -2469,19 +2476,18 @@ srp_change_queue_depth(struct scsi_device *sdev, int qdepth) + } + + static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun, +- u8 func) ++ u8 func, u8 *status) + { + struct srp_target_port *target = ch->target; + struct srp_rport *rport = target->rport; + struct ib_device *dev = target->srp_host->srp_dev->dev; + struct srp_iu *iu; + struct srp_tsk_mgmt *tsk_mgmt; ++ int res; + + if (!ch->connected || target->qp_in_error) + return -1; + +- init_completion(&ch->tsk_mgmt_done); +- + /* + * Lock the rport mutex to avoid that srp_create_ch_ib() is + * invoked while a task management function is being sent. +@@ -2504,10 +2510,16 @@ static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun, + + tsk_mgmt->opcode = SRP_TSK_MGMT; + int_to_scsilun(lun, &tsk_mgmt->lun); +- tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT; + tsk_mgmt->tsk_mgmt_func = func; + tsk_mgmt->task_tag = req_tag; + ++ spin_lock_irq(&ch->lock); ++ ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT; ++ tsk_mgmt->tag = ch->tsk_mgmt_tag; ++ spin_unlock_irq(&ch->lock); ++ ++ init_completion(&ch->tsk_mgmt_done); ++ + ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt, + DMA_TO_DEVICE); + if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) { +@@ -2516,13 +2528,15 @@ static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun, + + return -1; + } ++ res = wait_for_completion_timeout(&ch->tsk_mgmt_done, ++ msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)); ++ if (res > 0 && status) ++ *status = ch->tsk_mgmt_status; + mutex_unlock(&rport->mutex); + +- if (!wait_for_completion_timeout(&ch->tsk_mgmt_done, +- msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) +- return -1; ++ WARN_ON_ONCE(res < 0); + +- return 0; ++ return res > 0 ? 0 : -1; + } + + static int srp_abort(struct scsi_cmnd *scmnd) +@@ -2548,7 +2562,7 @@ static int srp_abort(struct scsi_cmnd *scmnd) + shost_printk(KERN_ERR, target->scsi_host, + "Sending SRP abort for tag %#x\n", tag); + if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun, +- SRP_TSK_ABORT_TASK) == 0) ++ SRP_TSK_ABORT_TASK, NULL) == 0) + ret = SUCCESS; + else if (target->rport->state == SRP_RPORT_LOST) + ret = FAST_IO_FAIL; +@@ -2566,14 +2580,15 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) + struct srp_target_port *target = host_to_target(scmnd->device->host); + struct srp_rdma_ch *ch; + int i; ++ u8 status; + + shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); + + ch = &target->ch[0]; + if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun, +- SRP_TSK_LUN_RESET)) ++ SRP_TSK_LUN_RESET, &status)) + return FAILED; +- if (ch->tsk_mgmt_status) ++ if (status) + return FAILED; + + for (i = 0; i < target->ch_count; i++) { +diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h +index f6af531f9f32..109eea94d0f9 100644 +--- a/drivers/infiniband/ulp/srp/ib_srp.h ++++ b/drivers/infiniband/ulp/srp/ib_srp.h +@@ -168,6 +168,7 @@ struct srp_rdma_ch { + int max_ti_iu_len; + int comp_vector; + ++ u64 tsk_mgmt_tag; + struct completion tsk_mgmt_done; + u8 tsk_mgmt_status; + bool connected; +diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c +index 25aba9886990..0e67145bc418 100644 +--- a/drivers/net/ethernet/marvell/mvpp2.c ++++ b/drivers/net/ethernet/marvell/mvpp2.c +@@ -993,7 +993,7 @@ static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu, + txq_pcpu->buffs + txq_pcpu->txq_put_index; + tx_buf->skb = skb; + tx_buf->size = tx_desc->data_size; +- tx_buf->phys = tx_desc->buf_phys_addr; ++ tx_buf->phys = tx_desc->buf_phys_addr + tx_desc->packet_offset; + txq_pcpu->txq_put_index++; + if (txq_pcpu->txq_put_index == txq_pcpu->size) + txq_pcpu->txq_put_index = 0; +diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c +index 860d4aed8274..43617ded3773 100644 +--- a/drivers/net/ieee802154/fakelb.c ++++ b/drivers/net/ieee802154/fakelb.c +@@ -30,7 +30,7 @@ + static int numlbs = 2; + + static LIST_HEAD(fakelb_phys); +-static DEFINE_SPINLOCK(fakelb_phys_lock); ++static DEFINE_MUTEX(fakelb_phys_lock); + + static LIST_HEAD(fakelb_ifup_phys); + static DEFINE_RWLOCK(fakelb_ifup_phys_lock); +@@ -180,9 +180,9 @@ static int fakelb_add_one(struct device *dev) + if (err) + goto err_reg; + +- spin_lock(&fakelb_phys_lock); ++ mutex_lock(&fakelb_phys_lock); + list_add_tail(&phy->list, &fakelb_phys); +- spin_unlock(&fakelb_phys_lock); ++ mutex_unlock(&fakelb_phys_lock); + + return 0; + +@@ -214,10 +214,10 @@ static int fakelb_probe(struct platform_device *pdev) + return 0; + + err_slave: +- spin_lock(&fakelb_phys_lock); ++ mutex_lock(&fakelb_phys_lock); + list_for_each_entry_safe(phy, tmp, &fakelb_phys, list) + fakelb_del(phy); +- spin_unlock(&fakelb_phys_lock); ++ mutex_unlock(&fakelb_phys_lock); + return err; + } + +@@ -225,10 +225,10 @@ static int fakelb_remove(struct platform_device *pdev) + { + struct fakelb_phy *phy, *tmp; + +- spin_lock(&fakelb_phys_lock); ++ mutex_lock(&fakelb_phys_lock); + list_for_each_entry_safe(phy, tmp, &fakelb_phys, list) + fakelb_del(phy); +- spin_unlock(&fakelb_phys_lock); ++ mutex_unlock(&fakelb_phys_lock); + return 0; + } + +diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c +index 117fccf7934a..01a6a83f625d 100644 +--- a/drivers/pwm/pwm-pca9685.c ++++ b/drivers/pwm/pwm-pca9685.c +@@ -65,7 +65,6 @@ + #define PCA9685_MAXCHAN 0x10 + + #define LED_FULL (1 << 4) +-#define MODE1_RESTART (1 << 7) + #define MODE1_SLEEP (1 << 4) + #define MODE2_INVRT (1 << 4) + #define MODE2_OUTDRV (1 << 2) +@@ -117,16 +116,6 @@ static int pca9685_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, + udelay(500); + + pca->period_ns = period_ns; +- +- /* +- * If the duty cycle did not change, restart PWM with +- * the same duty cycle to period ratio and return. +- */ +- if (duty_ns == pca->duty_ns) { +- regmap_update_bits(pca->regmap, PCA9685_MODE1, +- MODE1_RESTART, 0x1); +- return 0; +- } + } else { + dev_err(chip->dev, + "prescaler not set: period out of bounds!\n"); +diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c +index 94a8f4ab57bc..ae1dc37e4068 100644 +--- a/drivers/s390/block/dcssblk.c ++++ b/drivers/s390/block/dcssblk.c +@@ -892,7 +892,7 @@ dcssblk_direct_access (struct block_device *bdev, sector_t secnum, + dev_info = bdev->bd_disk->private_data; + if (!dev_info) + return -ENODEV; +- dev_sz = dev_info->end - dev_info->start; ++ dev_sz = dev_info->end - dev_info->start + 1; + offset = secnum * 512; + addr = (void *) (dev_info->start + offset); + *pfn = virt_to_phys(addr) >> PAGE_SHIFT; +diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c +index 5d06253c2a7a..30e9fbbff051 100644 +--- a/drivers/s390/cio/qdio_thinint.c ++++ b/drivers/s390/cio/qdio_thinint.c +@@ -147,11 +147,11 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq) + struct qdio_q *q; + int i; + +- for_each_input_queue(irq, q, i) { +- if (!references_shared_dsci(irq) && +- has_multiple_inq_on_dsci(irq)) +- xchg(q->irq_ptr->dsci, 0); ++ if (!references_shared_dsci(irq) && ++ has_multiple_inq_on_dsci(irq)) ++ xchg(irq->dsci, 0); + ++ for_each_input_queue(irq, q, i) { + if (q->u.in.queue_start_poll) { + /* skip if polling is enabled or already in work */ + if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, +diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c +index 356c80fbb304..bb6a6c35324a 100644 +--- a/drivers/target/target_core_device.c ++++ b/drivers/target/target_core_device.c +@@ -77,12 +77,16 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) + &deve->read_bytes); + + se_lun = rcu_dereference(deve->se_lun); ++ ++ if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { ++ se_lun = NULL; ++ goto out_unlock; ++ } ++ + se_cmd->se_lun = rcu_dereference(deve->se_lun); + se_cmd->pr_res_key = deve->pr_res_key; + se_cmd->orig_fe_lun = unpacked_lun; + se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; +- +- percpu_ref_get(&se_lun->lun_ref); + se_cmd->lun_ref_active = true; + + if ((se_cmd->data_direction == DMA_TO_DEVICE) && +@@ -96,6 +100,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) + goto ref_dev; + } + } ++out_unlock: + rcu_read_unlock(); + + if (!se_lun) { +@@ -826,6 +831,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) + xcopy_lun = &dev->xcopy_lun; + rcu_assign_pointer(xcopy_lun->lun_se_dev, dev); + init_completion(&xcopy_lun->lun_ref_comp); ++ init_completion(&xcopy_lun->lun_shutdown_comp); + INIT_LIST_HEAD(&xcopy_lun->lun_deve_list); + INIT_LIST_HEAD(&xcopy_lun->lun_dev_link); + mutex_init(&xcopy_lun->lun_tg_pt_md_mutex); +diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c +index 028854cda97b..2794c6ec5c3c 100644 +--- a/drivers/target/target_core_tpg.c ++++ b/drivers/target/target_core_tpg.c +@@ -539,7 +539,7 @@ static void core_tpg_lun_ref_release(struct percpu_ref *ref) + { + struct se_lun *lun = container_of(ref, struct se_lun, lun_ref); + +- complete(&lun->lun_ref_comp); ++ complete(&lun->lun_shutdown_comp); + } + + int core_tpg_register( +@@ -666,6 +666,7 @@ struct se_lun *core_tpg_alloc_lun( + lun->lun_link_magic = SE_LUN_LINK_MAGIC; + atomic_set(&lun->lun_acl_count, 0); + init_completion(&lun->lun_ref_comp); ++ init_completion(&lun->lun_shutdown_comp); + INIT_LIST_HEAD(&lun->lun_deve_list); + INIT_LIST_HEAD(&lun->lun_dev_link); + atomic_set(&lun->lun_tg_pt_secondary_offline, 0); +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c +index befe22744802..df2059984e14 100644 +--- a/drivers/target/target_core_transport.c ++++ b/drivers/target/target_core_transport.c +@@ -2680,10 +2680,39 @@ void target_wait_for_sess_cmds(struct se_session *se_sess) + } + EXPORT_SYMBOL(target_wait_for_sess_cmds); + ++static void target_lun_confirm(struct percpu_ref *ref) ++{ ++ struct se_lun *lun = container_of(ref, struct se_lun, lun_ref); ++ ++ complete(&lun->lun_ref_comp); ++} ++ + void transport_clear_lun_ref(struct se_lun *lun) + { +- percpu_ref_kill(&lun->lun_ref); ++ /* ++ * Mark the percpu-ref as DEAD, switch to atomic_t mode, drop ++ * the initial reference and schedule confirm kill to be ++ * executed after one full RCU grace period has completed. ++ */ ++ percpu_ref_kill_and_confirm(&lun->lun_ref, target_lun_confirm); ++ /* ++ * The first completion waits for percpu_ref_switch_to_atomic_rcu() ++ * to call target_lun_confirm after lun->lun_ref has been marked ++ * as __PERCPU_REF_DEAD on all CPUs, and switches to atomic_t ++ * mode so that percpu_ref_tryget_live() lookup of lun->lun_ref ++ * fails for all new incoming I/O. ++ */ + wait_for_completion(&lun->lun_ref_comp); ++ /* ++ * The second completion waits for percpu_ref_put_many() to ++ * invoke ->release() after lun->lun_ref has switched to ++ * atomic_t mode, and lun->lun_ref.count has reached zero. ++ * ++ * At this point all target-core lun->lun_ref references have ++ * been dropped via transport_lun_remove_cmd(), and it's safe ++ * to proceed with the remaining LUN shutdown. ++ */ ++ wait_for_completion(&lun->lun_shutdown_comp); + } + + static bool +diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c +index 644ddb841d9f..6d1e2f746ab4 100644 +--- a/drivers/tty/n_hdlc.c ++++ b/drivers/tty/n_hdlc.c +@@ -114,7 +114,7 @@ + #define DEFAULT_TX_BUF_COUNT 3 + + struct n_hdlc_buf { +- struct n_hdlc_buf *link; ++ struct list_head list_item; + int count; + char buf[1]; + }; +@@ -122,8 +122,7 @@ struct n_hdlc_buf { + #define N_HDLC_BUF_SIZE (sizeof(struct n_hdlc_buf) + maxframe) + + struct n_hdlc_buf_list { +- struct n_hdlc_buf *head; +- struct n_hdlc_buf *tail; ++ struct list_head list; + int count; + spinlock_t spinlock; + }; +@@ -136,7 +135,6 @@ struct n_hdlc_buf_list { + * @backup_tty - TTY to use if tty gets closed + * @tbusy - reentrancy flag for tx wakeup code + * @woke_up - FIXME: describe this field +- * @tbuf - currently transmitting tx buffer + * @tx_buf_list - list of pending transmit frame buffers + * @rx_buf_list - list of received frame buffers + * @tx_free_buf_list - list unused transmit frame buffers +@@ -149,7 +147,6 @@ struct n_hdlc { + struct tty_struct *backup_tty; + int tbusy; + int woke_up; +- struct n_hdlc_buf *tbuf; + struct n_hdlc_buf_list tx_buf_list; + struct n_hdlc_buf_list rx_buf_list; + struct n_hdlc_buf_list tx_free_buf_list; +@@ -159,7 +156,8 @@ struct n_hdlc { + /* + * HDLC buffer list manipulation functions + */ +-static void n_hdlc_buf_list_init(struct n_hdlc_buf_list *list); ++static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list, ++ struct n_hdlc_buf *buf); + static void n_hdlc_buf_put(struct n_hdlc_buf_list *list, + struct n_hdlc_buf *buf); + static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list); +@@ -209,16 +207,9 @@ static void flush_tx_queue(struct tty_struct *tty) + { + struct n_hdlc *n_hdlc = tty2n_hdlc(tty); + struct n_hdlc_buf *buf; +- unsigned long flags; + + while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list))) + n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf); +- spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags); +- if (n_hdlc->tbuf) { +- n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, n_hdlc->tbuf); +- n_hdlc->tbuf = NULL; +- } +- spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags); + } + + static struct tty_ldisc_ops n_hdlc_ldisc = { +@@ -284,7 +275,6 @@ static void n_hdlc_release(struct n_hdlc *n_hdlc) + } else + break; + } +- kfree(n_hdlc->tbuf); + kfree(n_hdlc); + + } /* end of n_hdlc_release() */ +@@ -403,13 +393,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty) + n_hdlc->woke_up = 0; + spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags); + +- /* get current transmit buffer or get new transmit */ +- /* buffer from list of pending transmit buffers */ +- +- tbuf = n_hdlc->tbuf; +- if (!tbuf) +- tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list); +- ++ tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list); + while (tbuf) { + if (debuglevel >= DEBUG_LEVEL_INFO) + printk("%s(%d)sending frame %p, count=%d\n", +@@ -421,7 +405,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty) + + /* rollback was possible and has been done */ + if (actual == -ERESTARTSYS) { +- n_hdlc->tbuf = tbuf; ++ n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf); + break; + } + /* if transmit error, throw frame away by */ +@@ -436,10 +420,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty) + + /* free current transmit buffer */ + n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, tbuf); +- +- /* this tx buffer is done */ +- n_hdlc->tbuf = NULL; +- ++ + /* wait up sleeping writers */ + wake_up_interruptible(&tty->write_wait); + +@@ -449,10 +430,12 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty) + if (debuglevel >= DEBUG_LEVEL_INFO) + printk("%s(%d)frame %p pending\n", + __FILE__,__LINE__,tbuf); +- +- /* buffer not accepted by driver */ +- /* set this buffer as pending buffer */ +- n_hdlc->tbuf = tbuf; ++ ++ /* ++ * the buffer was not accepted by driver, ++ * return it back into tx queue ++ */ ++ n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf); + break; + } + } +@@ -750,7 +733,8 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file, + int error = 0; + int count; + unsigned long flags; +- ++ struct n_hdlc_buf *buf = NULL; ++ + if (debuglevel >= DEBUG_LEVEL_INFO) + printk("%s(%d)n_hdlc_tty_ioctl() called %d\n", + __FILE__,__LINE__,cmd); +@@ -764,8 +748,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file, + /* report count of read data available */ + /* in next available frame (if any) */ + spin_lock_irqsave(&n_hdlc->rx_buf_list.spinlock,flags); +- if (n_hdlc->rx_buf_list.head) +- count = n_hdlc->rx_buf_list.head->count; ++ buf = list_first_entry_or_null(&n_hdlc->rx_buf_list.list, ++ struct n_hdlc_buf, list_item); ++ if (buf) ++ count = buf->count; + else + count = 0; + spin_unlock_irqrestore(&n_hdlc->rx_buf_list.spinlock,flags); +@@ -777,8 +763,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file, + count = tty_chars_in_buffer(tty); + /* add size of next output frame in queue */ + spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock,flags); +- if (n_hdlc->tx_buf_list.head) +- count += n_hdlc->tx_buf_list.head->count; ++ buf = list_first_entry_or_null(&n_hdlc->tx_buf_list.list, ++ struct n_hdlc_buf, list_item); ++ if (buf) ++ count += buf->count; + spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock,flags); + error = put_user(count, (int __user *)arg); + break; +@@ -826,14 +814,14 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp, + poll_wait(filp, &tty->write_wait, wait); + + /* set bits for operations that won't block */ +- if (n_hdlc->rx_buf_list.head) ++ if (!list_empty(&n_hdlc->rx_buf_list.list)) + mask |= POLLIN | POLLRDNORM; /* readable */ + if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) + mask |= POLLHUP; + if (tty_hung_up_p(filp)) + mask |= POLLHUP; + if (!tty_is_writelocked(tty) && +- n_hdlc->tx_free_buf_list.head) ++ !list_empty(&n_hdlc->tx_free_buf_list.list)) + mask |= POLLOUT | POLLWRNORM; /* writable */ + } + return mask; +@@ -853,11 +841,16 @@ static struct n_hdlc *n_hdlc_alloc(void) + if (!n_hdlc) + return NULL; + +- n_hdlc_buf_list_init(&n_hdlc->rx_free_buf_list); +- n_hdlc_buf_list_init(&n_hdlc->tx_free_buf_list); +- n_hdlc_buf_list_init(&n_hdlc->rx_buf_list); +- n_hdlc_buf_list_init(&n_hdlc->tx_buf_list); +- ++ spin_lock_init(&n_hdlc->rx_free_buf_list.spinlock); ++ spin_lock_init(&n_hdlc->tx_free_buf_list.spinlock); ++ spin_lock_init(&n_hdlc->rx_buf_list.spinlock); ++ spin_lock_init(&n_hdlc->tx_buf_list.spinlock); ++ ++ INIT_LIST_HEAD(&n_hdlc->rx_free_buf_list.list); ++ INIT_LIST_HEAD(&n_hdlc->tx_free_buf_list.list); ++ INIT_LIST_HEAD(&n_hdlc->rx_buf_list.list); ++ INIT_LIST_HEAD(&n_hdlc->tx_buf_list.list); ++ + /* allocate free rx buffer list */ + for(i=0;ispinlock); +-} /* end of n_hdlc_buf_list_init() */ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&buf_list->spinlock, flags); ++ ++ list_add(&buf->list_item, &buf_list->list); ++ buf_list->count++; ++ ++ spin_unlock_irqrestore(&buf_list->spinlock, flags); ++} + + /** + * n_hdlc_buf_put - add specified HDLC buffer to tail of specified list +- * @list - pointer to buffer list ++ * @buf_list - pointer to buffer list + * @buf - pointer to buffer + */ +-static void n_hdlc_buf_put(struct n_hdlc_buf_list *list, ++static void n_hdlc_buf_put(struct n_hdlc_buf_list *buf_list, + struct n_hdlc_buf *buf) + { + unsigned long flags; +- spin_lock_irqsave(&list->spinlock,flags); +- +- buf->link=NULL; +- if (list->tail) +- list->tail->link = buf; +- else +- list->head = buf; +- list->tail = buf; +- (list->count)++; +- +- spin_unlock_irqrestore(&list->spinlock,flags); +- ++ ++ spin_lock_irqsave(&buf_list->spinlock, flags); ++ ++ list_add_tail(&buf->list_item, &buf_list->list); ++ buf_list->count++; ++ ++ spin_unlock_irqrestore(&buf_list->spinlock, flags); + } /* end of n_hdlc_buf_put() */ + + /** + * n_hdlc_buf_get - remove and return an HDLC buffer from list +- * @list - pointer to HDLC buffer list ++ * @buf_list - pointer to HDLC buffer list + * + * Remove and return an HDLC buffer from the head of the specified HDLC buffer + * list. + * Returns a pointer to HDLC buffer if available, otherwise %NULL. + */ +-static struct n_hdlc_buf* n_hdlc_buf_get(struct n_hdlc_buf_list *list) ++static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *buf_list) + { + unsigned long flags; + struct n_hdlc_buf *buf; +- spin_lock_irqsave(&list->spinlock,flags); +- +- buf = list->head; ++ ++ spin_lock_irqsave(&buf_list->spinlock, flags); ++ ++ buf = list_first_entry_or_null(&buf_list->list, ++ struct n_hdlc_buf, list_item); + if (buf) { +- list->head = buf->link; +- (list->count)--; ++ list_del(&buf->list_item); ++ buf_list->count--; + } +- if (!list->head) +- list->tail = NULL; +- +- spin_unlock_irqrestore(&list->spinlock,flags); ++ ++ spin_unlock_irqrestore(&buf_list->spinlock, flags); + return buf; +- + } /* end of n_hdlc_buf_get() */ + + static char hdlc_banner[] __initdata = +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c +index 029de3f99752..5b24ffd93649 100644 +--- a/drivers/tty/serial/8250/8250_pci.c ++++ b/drivers/tty/serial/8250/8250_pci.c +@@ -2880,6 +2880,8 @@ enum pci_board_num_t { + pbn_b0_4_1152000_200, + pbn_b0_8_1152000_200, + ++ pbn_b0_4_1250000, ++ + pbn_b0_2_1843200, + pbn_b0_4_1843200, + +@@ -3113,6 +3115,13 @@ static struct pciserial_board pci_boards[] = { + .uart_offset = 0x200, + }, + ++ [pbn_b0_4_1250000] = { ++ .flags = FL_BASE0, ++ .num_ports = 4, ++ .base_baud = 1250000, ++ .uart_offset = 8, ++ }, ++ + [pbn_b0_2_1843200] = { + .flags = FL_BASE0, + .num_ports = 2, +@@ -5778,6 +5787,10 @@ static struct pci_device_id serial_pci_tbl[] = { + { PCI_DEVICE(0x1c29, 0x1108), .driver_data = pbn_fintek_8 }, + { PCI_DEVICE(0x1c29, 0x1112), .driver_data = pbn_fintek_12 }, + ++ /* MKS Tenta SCOM-080x serial cards */ ++ { PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 }, ++ { PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 }, ++ + /* + * These entries match devices with class COMMUNICATION_SERIAL, + * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL +diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c +index 239bc9cba28c..f54f77037d22 100644 +--- a/fs/ceph/mds_client.c ++++ b/fs/ceph/mds_client.c +@@ -644,6 +644,9 @@ static void __unregister_request(struct ceph_mds_client *mdsc, + { + dout("__unregister_request %p tid %lld\n", req, req->r_tid); + ++ /* Never leave an unregistered request on an unsafe list! */ ++ list_del_init(&req->r_unsafe_item); ++ + if (req->r_tid == mdsc->oldest_tid) { + struct rb_node *p = rb_next(&req->r_node); + mdsc->oldest_tid = 0; +@@ -1051,7 +1054,6 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc, + while (!list_empty(&session->s_unsafe)) { + req = list_first_entry(&session->s_unsafe, + struct ceph_mds_request, r_unsafe_item); +- list_del_init(&req->r_unsafe_item); + pr_warn_ratelimited(" dropping unsafe request %llu\n", + req->r_tid); + __unregister_request(mdsc, req); +@@ -2477,7 +2479,6 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) + * useful we could do with a revised return value. + */ + dout("got safe reply %llu, mds%d\n", tid, mds); +- list_del_init(&req->r_unsafe_item); + + /* last unsafe request during umount? */ + if (mdsc->stopping && !__get_oldest_req(mdsc)) +diff --git a/fs/fat/inode.c b/fs/fat/inode.c +index 509411dd3698..cf644d52c0cf 100644 +--- a/fs/fat/inode.c ++++ b/fs/fat/inode.c +@@ -1269,6 +1269,16 @@ out: + return 0; + } + ++static void fat_dummy_inode_init(struct inode *inode) ++{ ++ /* Initialize this dummy inode to work as no-op. */ ++ MSDOS_I(inode)->mmu_private = 0; ++ MSDOS_I(inode)->i_start = 0; ++ MSDOS_I(inode)->i_logstart = 0; ++ MSDOS_I(inode)->i_attrs = 0; ++ MSDOS_I(inode)->i_pos = 0; ++} ++ + static int fat_read_root(struct inode *inode) + { + struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); +@@ -1713,12 +1723,13 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat, + fat_inode = new_inode(sb); + if (!fat_inode) + goto out_fail; +- MSDOS_I(fat_inode)->i_pos = 0; ++ fat_dummy_inode_init(fat_inode); + sbi->fat_inode = fat_inode; + + fsinfo_inode = new_inode(sb); + if (!fsinfo_inode) + goto out_fail; ++ fat_dummy_inode_init(fsinfo_inode); + fsinfo_inode->i_ino = MSDOS_FSINFO_INO; + sbi->fsinfo_inode = fsinfo_inode; + insert_inode_hash(fsinfo_inode); +diff --git a/fs/mount.h b/fs/mount.h +index 14db05d424f7..3dc7dea5a357 100644 +--- a/fs/mount.h ++++ b/fs/mount.h +@@ -86,7 +86,6 @@ static inline int is_mounted(struct vfsmount *mnt) + } + + extern struct mount *__lookup_mnt(struct vfsmount *, struct dentry *); +-extern struct mount *__lookup_mnt_last(struct vfsmount *, struct dentry *); + + extern int __legitimize_mnt(struct vfsmount *, unsigned); + extern bool legitimize_mnt(struct vfsmount *, unsigned); +diff --git a/fs/namespace.c b/fs/namespace.c +index da98a1bbd8b5..7df3d406d3e0 100644 +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@ -638,28 +638,6 @@ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) + } + + /* +- * find the last mount at @dentry on vfsmount @mnt. +- * mount_lock must be held. +- */ +-struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry) +-{ +- struct mount *p, *res = NULL; +- p = __lookup_mnt(mnt, dentry); +- if (!p) +- goto out; +- if (!(p->mnt.mnt_flags & MNT_UMOUNT)) +- res = p; +- hlist_for_each_entry_continue(p, mnt_hash) { +- if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry) +- break; +- if (!(p->mnt.mnt_flags & MNT_UMOUNT)) +- res = p; +- } +-out: +- return res; +-} +- +-/* + * lookup_mnt - Return the first child mount mounted at path + * + * "First" means first mounted chronologically. If you create the +@@ -879,6 +857,13 @@ void mnt_set_mountpoint(struct mount *mnt, + hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list); + } + ++static void __attach_mnt(struct mount *mnt, struct mount *parent) ++{ ++ hlist_add_head_rcu(&mnt->mnt_hash, ++ m_hash(&parent->mnt, mnt->mnt_mountpoint)); ++ list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); ++} ++ + /* + * vfsmount lock must be held for write + */ +@@ -887,28 +872,45 @@ static void attach_mnt(struct mount *mnt, + struct mountpoint *mp) + { + mnt_set_mountpoint(parent, mp, mnt); +- hlist_add_head_rcu(&mnt->mnt_hash, m_hash(&parent->mnt, mp->m_dentry)); +- list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); ++ __attach_mnt(mnt, parent); + } + +-static void attach_shadowed(struct mount *mnt, +- struct mount *parent, +- struct mount *shadows) ++void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt) + { +- if (shadows) { +- hlist_add_behind_rcu(&mnt->mnt_hash, &shadows->mnt_hash); +- list_add(&mnt->mnt_child, &shadows->mnt_child); +- } else { +- hlist_add_head_rcu(&mnt->mnt_hash, +- m_hash(&parent->mnt, mnt->mnt_mountpoint)); +- list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); +- } ++ struct mountpoint *old_mp = mnt->mnt_mp; ++ struct dentry *old_mountpoint = mnt->mnt_mountpoint; ++ struct mount *old_parent = mnt->mnt_parent; ++ ++ list_del_init(&mnt->mnt_child); ++ hlist_del_init(&mnt->mnt_mp_list); ++ hlist_del_init_rcu(&mnt->mnt_hash); ++ ++ attach_mnt(mnt, parent, mp); ++ ++ put_mountpoint(old_mp); ++ ++ /* ++ * Safely avoid even the suggestion this code might sleep or ++ * lock the mount hash by taking advantage of the knowledge that ++ * mnt_change_mountpoint will not release the final reference ++ * to a mountpoint. ++ * ++ * During mounting, the mount passed in as the parent mount will ++ * continue to use the old mountpoint and during unmounting, the ++ * old mountpoint will continue to exist until namespace_unlock, ++ * which happens well after mnt_change_mountpoint. ++ */ ++ spin_lock(&old_mountpoint->d_lock); ++ old_mountpoint->d_lockref.count--; ++ spin_unlock(&old_mountpoint->d_lock); ++ ++ mnt_add_count(old_parent, -1); + } + + /* + * vfsmount lock must be held for write + */ +-static void commit_tree(struct mount *mnt, struct mount *shadows) ++static void commit_tree(struct mount *mnt) + { + struct mount *parent = mnt->mnt_parent; + struct mount *m; +@@ -923,7 +925,7 @@ static void commit_tree(struct mount *mnt, struct mount *shadows) + + list_splice(&head, n->list.prev); + +- attach_shadowed(mnt, parent, shadows); ++ __attach_mnt(mnt, parent); + touch_mnt_namespace(n); + } + +@@ -1718,7 +1720,6 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry, + continue; + + for (s = r; s; s = next_mnt(s, r)) { +- struct mount *t = NULL; + if (!(flag & CL_COPY_UNBINDABLE) && + IS_MNT_UNBINDABLE(s)) { + s = skip_mnt_tree(s); +@@ -1740,14 +1741,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry, + goto out; + lock_mount_hash(); + list_add_tail(&q->mnt_list, &res->mnt_list); +- mnt_set_mountpoint(parent, p->mnt_mp, q); +- if (!list_empty(&parent->mnt_mounts)) { +- t = list_last_entry(&parent->mnt_mounts, +- struct mount, mnt_child); +- if (t->mnt_mp != p->mnt_mp) +- t = NULL; +- } +- attach_shadowed(q, parent, t); ++ attach_mnt(q, parent, p->mnt_mp); + unlock_mount_hash(); + } + } +@@ -1925,10 +1919,18 @@ static int attach_recursive_mnt(struct mount *source_mnt, + struct path *parent_path) + { + HLIST_HEAD(tree_list); ++ struct mountpoint *smp; + struct mount *child, *p; + struct hlist_node *n; + int err; + ++ /* Preallocate a mountpoint in case the new mounts need ++ * to be tucked under other mounts. ++ */ ++ smp = get_mountpoint(source_mnt->mnt.mnt_root); ++ if (IS_ERR(smp)) ++ return PTR_ERR(smp); ++ + if (IS_MNT_SHARED(dest_mnt)) { + err = invent_group_ids(source_mnt, true); + if (err) +@@ -1948,16 +1950,19 @@ static int attach_recursive_mnt(struct mount *source_mnt, + touch_mnt_namespace(source_mnt->mnt_ns); + } else { + mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt); +- commit_tree(source_mnt, NULL); ++ commit_tree(source_mnt); + } + + hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) { + struct mount *q; + hlist_del_init(&child->mnt_hash); +- q = __lookup_mnt_last(&child->mnt_parent->mnt, +- child->mnt_mountpoint); +- commit_tree(child, q); ++ q = __lookup_mnt(&child->mnt_parent->mnt, ++ child->mnt_mountpoint); ++ if (q) ++ mnt_change_mountpoint(child, smp, q); ++ commit_tree(child); + } ++ put_mountpoint(smp); + unlock_mount_hash(); + + return 0; +@@ -1970,6 +1975,10 @@ static int attach_recursive_mnt(struct mount *source_mnt, + unlock_mount_hash(); + cleanup_group_ids(source_mnt, NULL); + out: ++ read_seqlock_excl(&mount_lock); ++ put_mountpoint(smp); ++ read_sequnlock_excl(&mount_lock); ++ + return err; + } + +diff --git a/fs/pnode.c b/fs/pnode.c +index 99899705b105..b9f2af59b9a6 100644 +--- a/fs/pnode.c ++++ b/fs/pnode.c +@@ -324,6 +324,21 @@ out: + return ret; + } + ++static struct mount *find_topper(struct mount *mnt) ++{ ++ /* If there is exactly one mount covering mnt completely return it. */ ++ struct mount *child; ++ ++ if (!list_is_singular(&mnt->mnt_mounts)) ++ return NULL; ++ ++ child = list_first_entry(&mnt->mnt_mounts, struct mount, mnt_child); ++ if (child->mnt_mountpoint != mnt->mnt.mnt_root) ++ return NULL; ++ ++ return child; ++} ++ + /* + * return true if the refcount is greater than count + */ +@@ -344,9 +359,8 @@ static inline int do_refcount_check(struct mount *mnt, int count) + */ + int propagate_mount_busy(struct mount *mnt, int refcnt) + { +- struct mount *m, *child; ++ struct mount *m, *child, *topper; + struct mount *parent = mnt->mnt_parent; +- int ret = 0; + + if (mnt == parent) + return do_refcount_check(mnt, refcnt); +@@ -361,12 +375,24 @@ int propagate_mount_busy(struct mount *mnt, int refcnt) + + for (m = propagation_next(parent, parent); m; + m = propagation_next(m, parent)) { +- child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint); +- if (child && list_empty(&child->mnt_mounts) && +- (ret = do_refcount_check(child, 1))) +- break; ++ int count = 1; ++ child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint); ++ if (!child) ++ continue; ++ ++ /* Is there exactly one mount on the child that covers ++ * it completely whose reference should be ignored? ++ */ ++ topper = find_topper(child); ++ if (topper) ++ count += 1; ++ else if (!list_empty(&child->mnt_mounts)) ++ continue; ++ ++ if (do_refcount_check(child, count)) ++ return 1; + } +- return ret; ++ return 0; + } + + /* +@@ -383,7 +409,7 @@ void propagate_mount_unlock(struct mount *mnt) + + for (m = propagation_next(parent, parent); m; + m = propagation_next(m, parent)) { +- child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint); ++ child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint); + if (child) + child->mnt.mnt_flags &= ~MNT_LOCKED; + } +@@ -401,9 +427,11 @@ static void mark_umount_candidates(struct mount *mnt) + + for (m = propagation_next(parent, parent); m; + m = propagation_next(m, parent)) { +- struct mount *child = __lookup_mnt_last(&m->mnt, ++ struct mount *child = __lookup_mnt(&m->mnt, + mnt->mnt_mountpoint); +- if (child && (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m))) { ++ if (!child || (child->mnt.mnt_flags & MNT_UMOUNT)) ++ continue; ++ if (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m)) { + SET_MNT_MARK(child); + } + } +@@ -422,8 +450,8 @@ static void __propagate_umount(struct mount *mnt) + + for (m = propagation_next(parent, parent); m; + m = propagation_next(m, parent)) { +- +- struct mount *child = __lookup_mnt_last(&m->mnt, ++ struct mount *topper; ++ struct mount *child = __lookup_mnt(&m->mnt, + mnt->mnt_mountpoint); + /* + * umount the child only if the child has no children +@@ -432,6 +460,15 @@ static void __propagate_umount(struct mount *mnt) + if (!child || !IS_MNT_MARKED(child)) + continue; + CLEAR_MNT_MARK(child); ++ ++ /* If there is exactly one mount covering all of child ++ * replace child with that mount. ++ */ ++ topper = find_topper(child); ++ if (topper) ++ mnt_change_mountpoint(child->mnt_parent, child->mnt_mp, ++ topper); ++ + if (list_empty(&child->mnt_mounts)) { + list_del_init(&child->mnt_child); + child->mnt.mnt_flags |= MNT_UMOUNT; +diff --git a/fs/pnode.h b/fs/pnode.h +index 0fcdbe7ca648..623f01772bec 100644 +--- a/fs/pnode.h ++++ b/fs/pnode.h +@@ -49,6 +49,8 @@ int get_dominating_id(struct mount *mnt, const struct path *root); + unsigned int mnt_get_count(struct mount *mnt); + void mnt_set_mountpoint(struct mount *, struct mountpoint *, + struct mount *); ++void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, ++ struct mount *mnt); + struct mount *copy_tree(struct mount *, struct dentry *, int); + bool is_path_reachable(struct mount *, struct dentry *, + const struct path *root); +diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h +index e55c08bc3a96..0abc56140c83 100644 +--- a/include/linux/ceph/osdmap.h ++++ b/include/linux/ceph/osdmap.h +@@ -49,7 +49,7 @@ static inline bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool) + case CEPH_POOL_TYPE_EC: + return false; + default: +- BUG_ON(1); ++ BUG(); + } + } + +diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h +index c15373894a42..b37dee3acaba 100644 +--- a/include/linux/lockd/lockd.h ++++ b/include/linux/lockd/lockd.h +@@ -355,7 +355,8 @@ static inline int nlm_privileged_requester(const struct svc_rqst *rqstp) + static inline int nlm_compare_locks(const struct file_lock *fl1, + const struct file_lock *fl2) + { +- return fl1->fl_pid == fl2->fl_pid ++ return file_inode(fl1->fl_file) == file_inode(fl2->fl_file) ++ && fl1->fl_pid == fl2->fl_pid + && fl1->fl_owner == fl2->fl_owner + && fl1->fl_start == fl2->fl_start + && fl1->fl_end == fl2->fl_end +diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h +index 800fe16cc36f..ed66414b91f0 100644 +--- a/include/target/target_core_base.h ++++ b/include/target/target_core_base.h +@@ -740,6 +740,7 @@ struct se_lun { + struct config_group lun_group; + struct se_port_stat_grps port_stat_grps; + struct completion lun_ref_comp; ++ struct completion lun_shutdown_comp; + struct percpu_ref lun_ref; + struct list_head lun_dev_link; + struct hlist_node link; +diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c +index 00a43a70e1fc..0402fa45b343 100644 +--- a/net/mac80211/pm.c ++++ b/net/mac80211/pm.c +@@ -168,6 +168,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) + break; + } + ++ flush_delayed_work(&sdata->dec_tailroom_needed_wk); + drv_remove_interface(local, sdata); + } + +diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl +index d08e214ec6e7..223d88e25e05 100755 +--- a/tools/testing/ktest/ktest.pl ++++ b/tools/testing/ktest/ktest.pl +@@ -2629,7 +2629,7 @@ sub do_run_test { + } + + waitpid $child_pid, 0; +- $child_exit = $?; ++ $child_exit = $? >> 8; + + my $end_time = time; + $test_time = $end_time - $start_time; diff --git a/patch/kernel/mvebu64-default/03-patch-4.4.54-55.patch b/patch/kernel/mvebu64-default/03-patch-4.4.54-55.patch new file mode 100644 index 000000000..5d29ecf30 --- /dev/null +++ b/patch/kernel/mvebu64-default/03-patch-4.4.54-55.patch @@ -0,0 +1,1203 @@ +diff --git a/Makefile b/Makefile +index 7f54ac081cf3..d9cc21df444d 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 54 ++SUBLEVEL = 55 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig +index 57ed466e00db..2f140d75d01c 100644 +--- a/arch/mips/configs/ip22_defconfig ++++ b/arch/mips/configs/ip22_defconfig +@@ -68,8 +68,8 @@ CONFIG_NETFILTER_NETLINK_QUEUE=m + CONFIG_NF_CONNTRACK=m + CONFIG_NF_CONNTRACK_SECMARK=y + CONFIG_NF_CONNTRACK_EVENTS=y +-CONFIG_NF_CT_PROTO_DCCP=m +-CONFIG_NF_CT_PROTO_UDPLITE=m ++CONFIG_NF_CT_PROTO_DCCP=y ++CONFIG_NF_CT_PROTO_UDPLITE=y + CONFIG_NF_CONNTRACK_AMANDA=m + CONFIG_NF_CONNTRACK_FTP=m + CONFIG_NF_CONNTRACK_H323=m +diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig +index 48e16d98b2cc..b15508447366 100644 +--- a/arch/mips/configs/ip27_defconfig ++++ b/arch/mips/configs/ip27_defconfig +@@ -134,7 +134,7 @@ CONFIG_LIBFC=m + CONFIG_SCSI_QLOGIC_1280=y + CONFIG_SCSI_PMCRAID=m + CONFIG_SCSI_BFA_FC=m +-CONFIG_SCSI_DH=m ++CONFIG_SCSI_DH=y + CONFIG_SCSI_DH_RDAC=m + CONFIG_SCSI_DH_HP_SW=m + CONFIG_SCSI_DH_EMC=m +@@ -206,7 +206,6 @@ CONFIG_MLX4_EN=m + # CONFIG_MLX4_DEBUG is not set + CONFIG_TEHUTI=m + CONFIG_BNX2X=m +-CONFIG_QLGE=m + CONFIG_SFC=m + CONFIG_BE2NET=m + CONFIG_LIBERTAS_THINFIRM=m +diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig +index 004cf52d1b7d..c24b87819ccb 100644 +--- a/arch/mips/configs/lemote2f_defconfig ++++ b/arch/mips/configs/lemote2f_defconfig +@@ -39,7 +39,7 @@ CONFIG_HIBERNATION=y + CONFIG_PM_STD_PARTITION="/dev/hda3" + CONFIG_CPU_FREQ=y + CONFIG_CPU_FREQ_DEBUG=y +-CONFIG_CPU_FREQ_STAT=m ++CONFIG_CPU_FREQ_STAT=y + CONFIG_CPU_FREQ_STAT_DETAILS=y + CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y + CONFIG_CPU_FREQ_GOV_POWERSAVE=m +diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig +index 5afb4840aec7..739ccd0dca64 100644 +--- a/arch/mips/configs/malta_defconfig ++++ b/arch/mips/configs/malta_defconfig +@@ -59,8 +59,8 @@ CONFIG_NETFILTER=y + CONFIG_NF_CONNTRACK=m + CONFIG_NF_CONNTRACK_SECMARK=y + CONFIG_NF_CONNTRACK_EVENTS=y +-CONFIG_NF_CT_PROTO_DCCP=m +-CONFIG_NF_CT_PROTO_UDPLITE=m ++CONFIG_NF_CT_PROTO_DCCP=y ++CONFIG_NF_CT_PROTO_UDPLITE=y + CONFIG_NF_CONNTRACK_AMANDA=m + CONFIG_NF_CONNTRACK_FTP=m + CONFIG_NF_CONNTRACK_H323=m +diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig +index 98f13879bb8f..47f4ecf125ba 100644 +--- a/arch/mips/configs/malta_kvm_defconfig ++++ b/arch/mips/configs/malta_kvm_defconfig +@@ -60,8 +60,8 @@ CONFIG_NETFILTER=y + CONFIG_NF_CONNTRACK=m + CONFIG_NF_CONNTRACK_SECMARK=y + CONFIG_NF_CONNTRACK_EVENTS=y +-CONFIG_NF_CT_PROTO_DCCP=m +-CONFIG_NF_CT_PROTO_UDPLITE=m ++CONFIG_NF_CT_PROTO_DCCP=y ++CONFIG_NF_CT_PROTO_UDPLITE=y + CONFIG_NF_CONNTRACK_AMANDA=m + CONFIG_NF_CONNTRACK_FTP=m + CONFIG_NF_CONNTRACK_H323=m +diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig +index 3b5d5913f548..e79d325aa085 100644 +--- a/arch/mips/configs/malta_kvm_guest_defconfig ++++ b/arch/mips/configs/malta_kvm_guest_defconfig +@@ -59,8 +59,8 @@ CONFIG_NETFILTER=y + CONFIG_NF_CONNTRACK=m + CONFIG_NF_CONNTRACK_SECMARK=y + CONFIG_NF_CONNTRACK_EVENTS=y +-CONFIG_NF_CT_PROTO_DCCP=m +-CONFIG_NF_CT_PROTO_UDPLITE=m ++CONFIG_NF_CT_PROTO_DCCP=y ++CONFIG_NF_CT_PROTO_UDPLITE=y + CONFIG_NF_CONNTRACK_AMANDA=m + CONFIG_NF_CONNTRACK_FTP=m + CONFIG_NF_CONNTRACK_H323=m +diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig +index 732215732751..ae87ad86243b 100644 +--- a/arch/mips/configs/maltaup_xpa_defconfig ++++ b/arch/mips/configs/maltaup_xpa_defconfig +@@ -61,8 +61,8 @@ CONFIG_NETFILTER=y + CONFIG_NF_CONNTRACK=m + CONFIG_NF_CONNTRACK_SECMARK=y + CONFIG_NF_CONNTRACK_EVENTS=y +-CONFIG_NF_CT_PROTO_DCCP=m +-CONFIG_NF_CT_PROTO_UDPLITE=m ++CONFIG_NF_CT_PROTO_DCCP=y ++CONFIG_NF_CT_PROTO_UDPLITE=y + CONFIG_NF_CONNTRACK_AMANDA=m + CONFIG_NF_CONNTRACK_FTP=m + CONFIG_NF_CONNTRACK_H323=m +diff --git a/arch/mips/configs/nlm_xlp_defconfig b/arch/mips/configs/nlm_xlp_defconfig +index b3d1d37f85ea..47492fee2952 100644 +--- a/arch/mips/configs/nlm_xlp_defconfig ++++ b/arch/mips/configs/nlm_xlp_defconfig +@@ -111,7 +111,7 @@ CONFIG_NETFILTER=y + CONFIG_NF_CONNTRACK=m + CONFIG_NF_CONNTRACK_SECMARK=y + CONFIG_NF_CONNTRACK_EVENTS=y +-CONFIG_NF_CT_PROTO_UDPLITE=m ++CONFIG_NF_CT_PROTO_UDPLITE=y + CONFIG_NF_CONNTRACK_AMANDA=m + CONFIG_NF_CONNTRACK_FTP=m + CONFIG_NF_CONNTRACK_H323=m +diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig +index 3d8016d6cf3e..472a818f1eb8 100644 +--- a/arch/mips/configs/nlm_xlr_defconfig ++++ b/arch/mips/configs/nlm_xlr_defconfig +@@ -91,7 +91,7 @@ CONFIG_NETFILTER=y + CONFIG_NF_CONNTRACK=m + CONFIG_NF_CONNTRACK_SECMARK=y + CONFIG_NF_CONNTRACK_EVENTS=y +-CONFIG_NF_CT_PROTO_UDPLITE=m ++CONFIG_NF_CT_PROTO_UDPLITE=y + CONFIG_NF_CONNTRACK_AMANDA=m + CONFIG_NF_CONNTRACK_FTP=m + CONFIG_NF_CONNTRACK_H323=m +diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S +index 8c6f508e59de..554d1da97743 100644 +--- a/arch/mips/dec/int-handler.S ++++ b/arch/mips/dec/int-handler.S +@@ -146,7 +146,25 @@ + /* + * Find irq with highest priority + */ +- PTR_LA t1,cpu_mask_nr_tbl ++ # open coded PTR_LA t1, cpu_mask_nr_tbl ++#if (_MIPS_SZPTR == 32) ++ # open coded la t1, cpu_mask_nr_tbl ++ lui t1, %hi(cpu_mask_nr_tbl) ++ addiu t1, %lo(cpu_mask_nr_tbl) ++ ++#endif ++#if (_MIPS_SZPTR == 64) ++ # open coded dla t1, cpu_mask_nr_tbl ++ .set push ++ .set noat ++ lui t1, %highest(cpu_mask_nr_tbl) ++ lui AT, %hi(cpu_mask_nr_tbl) ++ daddiu t1, t1, %higher(cpu_mask_nr_tbl) ++ daddiu AT, AT, %lo(cpu_mask_nr_tbl) ++ dsll t1, 32 ++ daddu t1, t1, AT ++ .set pop ++#endif + 1: lw t2,(t1) + nop + and t2,t0 +@@ -195,7 +213,25 @@ + /* + * Find irq with highest priority + */ +- PTR_LA t1,asic_mask_nr_tbl ++ # open coded PTR_LA t1,asic_mask_nr_tbl ++#if (_MIPS_SZPTR == 32) ++ # open coded la t1, asic_mask_nr_tbl ++ lui t1, %hi(asic_mask_nr_tbl) ++ addiu t1, %lo(asic_mask_nr_tbl) ++ ++#endif ++#if (_MIPS_SZPTR == 64) ++ # open coded dla t1, asic_mask_nr_tbl ++ .set push ++ .set noat ++ lui t1, %highest(asic_mask_nr_tbl) ++ lui AT, %hi(asic_mask_nr_tbl) ++ daddiu t1, t1, %higher(asic_mask_nr_tbl) ++ daddiu AT, AT, %lo(asic_mask_nr_tbl) ++ dsll t1, 32 ++ daddu t1, t1, AT ++ .set pop ++#endif + 2: lw t2,(t1) + nop + and t2,t0 +diff --git a/arch/mips/netlogic/common/reset.S b/arch/mips/netlogic/common/reset.S +index edbab9b8691f..c474981a6c0d 100644 +--- a/arch/mips/netlogic/common/reset.S ++++ b/arch/mips/netlogic/common/reset.S +@@ -50,7 +50,6 @@ + #include + #include + +-#define CP0_EBASE $15 + #define SYS_CPU_COHERENT_BASE CKSEG1ADDR(XLP_DEFAULT_IO_BASE) + \ + XLP_IO_SYS_OFFSET(0) + XLP_IO_PCI_HDRSZ + \ + SYS_CPU_NONCOHERENT_MODE * 4 +@@ -92,7 +91,7 @@ + * registers. On XLPII CPUs, usual cache instructions work. + */ + .macro xlp_flush_l1_dcache +- mfc0 t0, CP0_EBASE, 0 ++ mfc0 t0, CP0_PRID + andi t0, t0, PRID_IMP_MASK + slt t1, t0, 0x1200 + beqz t1, 15f +@@ -171,7 +170,7 @@ FEXPORT(nlm_reset_entry) + nop + + 1: /* Entry point on core wakeup */ +- mfc0 t0, CP0_EBASE, 0 /* processor ID */ ++ mfc0 t0, CP0_PRID /* processor ID */ + andi t0, PRID_IMP_MASK + li t1, 0x1500 /* XLP 9xx */ + beq t0, t1, 2f /* does not need to set coherent */ +@@ -182,8 +181,8 @@ FEXPORT(nlm_reset_entry) + nop + + /* set bit in SYS coherent register for the core */ +- mfc0 t0, CP0_EBASE, 1 +- mfc0 t1, CP0_EBASE, 1 ++ mfc0 t0, CP0_EBASE ++ mfc0 t1, CP0_EBASE + srl t1, 5 + andi t1, 0x3 /* t1 <- node */ + li t2, 0x40000 +@@ -232,7 +231,7 @@ EXPORT(nlm_boot_siblings) + + * NOTE: All GPR contents are lost after the mtcr above! + */ +- mfc0 v0, CP0_EBASE, 1 ++ mfc0 v0, CP0_EBASE + andi v0, 0x3ff /* v0 <- node/core */ + + /* +diff --git a/arch/mips/netlogic/common/smpboot.S b/arch/mips/netlogic/common/smpboot.S +index 805355b0bd05..f0cc4c9de2bb 100644 +--- a/arch/mips/netlogic/common/smpboot.S ++++ b/arch/mips/netlogic/common/smpboot.S +@@ -48,8 +48,6 @@ + #include + #include + +-#define CP0_EBASE $15 +- + .set noreorder + .set noat + .set arch=xlr /* for mfcr/mtcr, XLR is sufficient */ +@@ -86,7 +84,7 @@ NESTED(nlm_boot_secondary_cpus, 16, sp) + PTR_L gp, 0(t1) + + /* a0 has the processor id */ +- mfc0 a0, CP0_EBASE, 1 ++ mfc0 a0, CP0_EBASE + andi a0, 0x3ff /* a0 <- node/core */ + PTR_LA t0, nlm_early_init_secondary + jalr t0 +diff --git a/arch/mips/ralink/prom.c b/arch/mips/ralink/prom.c +index 39a9142f71be..7ecb4af79b7b 100644 +--- a/arch/mips/ralink/prom.c ++++ b/arch/mips/ralink/prom.c +@@ -30,8 +30,10 @@ const char *get_system_type(void) + return soc_info.sys_type; + } + +-static __init void prom_init_cmdline(int argc, char **argv) ++static __init void prom_init_cmdline(void) + { ++ int argc; ++ char **argv; + int i; + + pr_debug("prom: fw_arg0=%08x fw_arg1=%08x fw_arg2=%08x fw_arg3=%08x\n", +@@ -60,14 +62,11 @@ static __init void prom_init_cmdline(int argc, char **argv) + + void __init prom_init(void) + { +- int argc; +- char **argv; +- + prom_soc_init(&soc_info); + + pr_info("SoC Type: %s\n", get_system_type()); + +- prom_init_cmdline(argc, argv); ++ prom_init_cmdline(); + } + + void __init prom_free_prom_memory(void) +diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c +index 844f5cd55c8f..15506a1ff22a 100644 +--- a/arch/mips/ralink/rt288x.c ++++ b/arch/mips/ralink/rt288x.c +@@ -40,16 +40,6 @@ static struct rt2880_pmx_group rt2880_pinmux_data_act[] = { + { 0 } + }; + +-static void rt288x_wdt_reset(void) +-{ +- u32 t; +- +- /* enable WDT reset output on pin SRAM_CS_N */ +- t = rt_sysc_r32(SYSC_REG_CLKCFG); +- t |= CLKCFG_SRAM_CS_N_WDT; +- rt_sysc_w32(t, SYSC_REG_CLKCFG); +-} +- + void __init ralink_clk_init(void) + { + unsigned long cpu_rate, wmac_rate = 40000000; +diff --git a/arch/mips/ralink/rt305x.c b/arch/mips/ralink/rt305x.c +index 9e4572592065..15b32cd01906 100644 +--- a/arch/mips/ralink/rt305x.c ++++ b/arch/mips/ralink/rt305x.c +@@ -89,17 +89,6 @@ static struct rt2880_pmx_group rt5350_pinmux_data[] = { + { 0 } + }; + +-static void rt305x_wdt_reset(void) +-{ +- u32 t; +- +- /* enable WDT reset output on pin SRAM_CS_N */ +- t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG); +- t |= RT305X_SYSCFG_SRAM_CS0_MODE_WDT << +- RT305X_SYSCFG_SRAM_CS0_MODE_SHIFT; +- rt_sysc_w32(t, SYSC_REG_SYSTEM_CONFIG); +-} +- + static unsigned long rt5350_get_mem_size(void) + { + void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE); +diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c +index 582995aaaf4e..f42834c7f007 100644 +--- a/arch/mips/ralink/rt3883.c ++++ b/arch/mips/ralink/rt3883.c +@@ -63,16 +63,6 @@ static struct rt2880_pmx_group rt3883_pinmux_data[] = { + { 0 } + }; + +-static void rt3883_wdt_reset(void) +-{ +- u32 t; +- +- /* enable WDT reset output on GPIO 2 */ +- t = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG1); +- t |= RT3883_SYSCFG1_GPIO2_AS_WDT_OUT; +- rt_sysc_w32(t, RT3883_SYSC_REG_SYSCFG1); +-} +- + void __init ralink_clk_init(void) + { + unsigned long cpu_rate, sys_rate; +diff --git a/arch/mips/sgi-ip22/Platform b/arch/mips/sgi-ip22/Platform +index b7a4b7e04c38..e8f6b3a42a48 100644 +--- a/arch/mips/sgi-ip22/Platform ++++ b/arch/mips/sgi-ip22/Platform +@@ -25,7 +25,7 @@ endif + # Simplified: what IP22 does at 128MB+ in ksegN, IP28 does at 512MB+ in xkphys + # + ifdef CONFIG_SGI_IP28 +- ifeq ($(call cc-option-yn,-mr10k-cache-barrier=store), n) ++ ifeq ($(call cc-option-yn,-march=r10000 -mr10k-cache-barrier=store), n) + $(error gcc doesn't support needed option -mr10k-cache-barrier=store) + endif + endif +diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c +index dc885b30f7a6..4014881e9843 100644 +--- a/arch/powerpc/lib/sstep.c ++++ b/arch/powerpc/lib/sstep.c +@@ -1806,8 +1806,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) + goto instr_done; + + case LARX: +- if (regs->msr & MSR_LE) +- return 0; + if (op.ea & (size - 1)) + break; /* can't handle misaligned */ + err = -EFAULT; +@@ -1829,8 +1827,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) + goto ldst_done; + + case STCX: +- if (regs->msr & MSR_LE) +- return 0; + if (op.ea & (size - 1)) + break; /* can't handle misaligned */ + err = -EFAULT; +@@ -1854,8 +1850,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) + goto ldst_done; + + case LOAD: +- if (regs->msr & MSR_LE) +- return 0; + err = read_mem(®s->gpr[op.reg], op.ea, size, regs); + if (!err) { + if (op.type & SIGNEXT) +@@ -1867,8 +1861,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) + + #ifdef CONFIG_PPC_FPU + case LOAD_FP: +- if (regs->msr & MSR_LE) +- return 0; + if (size == 4) + err = do_fp_load(op.reg, do_lfs, op.ea, size, regs); + else +@@ -1877,15 +1869,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) + #endif + #ifdef CONFIG_ALTIVEC + case LOAD_VMX: +- if (regs->msr & MSR_LE) +- return 0; + err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs); + goto ldst_done; + #endif + #ifdef CONFIG_VSX + case LOAD_VSX: +- if (regs->msr & MSR_LE) +- return 0; + err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs); + goto ldst_done; + #endif +@@ -1908,8 +1896,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) + goto instr_done; + + case STORE: +- if (regs->msr & MSR_LE) +- return 0; + if ((op.type & UPDATE) && size == sizeof(long) && + op.reg == 1 && op.update_reg == 1 && + !(regs->msr & MSR_PR) && +@@ -1922,8 +1908,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) + + #ifdef CONFIG_PPC_FPU + case STORE_FP: +- if (regs->msr & MSR_LE) +- return 0; + if (size == 4) + err = do_fp_store(op.reg, do_stfs, op.ea, size, regs); + else +@@ -1932,15 +1916,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) + #endif + #ifdef CONFIG_ALTIVEC + case STORE_VMX: +- if (regs->msr & MSR_LE) +- return 0; + err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs); + goto ldst_done; + #endif + #ifdef CONFIG_VSX + case STORE_VSX: +- if (regs->msr & MSR_LE) +- return 0; + err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs); + goto ldst_done; + #endif +diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c +index 171e09bb8ea2..f7c3a61040bd 100644 +--- a/arch/s390/kernel/crash_dump.c ++++ b/arch/s390/kernel/crash_dump.c +@@ -23,6 +23,8 @@ + #define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y))) + #define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y)))) + ++#define LINUX_NOTE_NAME "LINUX" ++ + static struct memblock_region oldmem_region; + + static struct memblock_type oldmem_type = { +@@ -312,7 +314,7 @@ static void *nt_fpregset(void *ptr, struct save_area *sa) + static void *nt_s390_timer(void *ptr, struct save_area *sa) + { + return nt_init(ptr, NT_S390_TIMER, &sa->timer, sizeof(sa->timer), +- KEXEC_CORE_NOTE_NAME); ++ LINUX_NOTE_NAME); + } + + /* +@@ -321,7 +323,7 @@ static void *nt_s390_timer(void *ptr, struct save_area *sa) + static void *nt_s390_tod_cmp(void *ptr, struct save_area *sa) + { + return nt_init(ptr, NT_S390_TODCMP, &sa->clk_cmp, +- sizeof(sa->clk_cmp), KEXEC_CORE_NOTE_NAME); ++ sizeof(sa->clk_cmp), LINUX_NOTE_NAME); + } + + /* +@@ -330,7 +332,7 @@ static void *nt_s390_tod_cmp(void *ptr, struct save_area *sa) + static void *nt_s390_tod_preg(void *ptr, struct save_area *sa) + { + return nt_init(ptr, NT_S390_TODPREG, &sa->tod_reg, +- sizeof(sa->tod_reg), KEXEC_CORE_NOTE_NAME); ++ sizeof(sa->tod_reg), LINUX_NOTE_NAME); + } + + /* +@@ -339,7 +341,7 @@ static void *nt_s390_tod_preg(void *ptr, struct save_area *sa) + static void *nt_s390_ctrs(void *ptr, struct save_area *sa) + { + return nt_init(ptr, NT_S390_CTRS, &sa->ctrl_regs, +- sizeof(sa->ctrl_regs), KEXEC_CORE_NOTE_NAME); ++ sizeof(sa->ctrl_regs), LINUX_NOTE_NAME); + } + + /* +@@ -348,7 +350,7 @@ static void *nt_s390_ctrs(void *ptr, struct save_area *sa) + static void *nt_s390_prefix(void *ptr, struct save_area *sa) + { + return nt_init(ptr, NT_S390_PREFIX, &sa->pref_reg, +- sizeof(sa->pref_reg), KEXEC_CORE_NOTE_NAME); ++ sizeof(sa->pref_reg), LINUX_NOTE_NAME); + } + + /* +@@ -357,7 +359,7 @@ static void *nt_s390_prefix(void *ptr, struct save_area *sa) + static void *nt_s390_vx_high(void *ptr, __vector128 *vx_regs) + { + return nt_init(ptr, NT_S390_VXRS_HIGH, &vx_regs[16], +- 16 * sizeof(__vector128), KEXEC_CORE_NOTE_NAME); ++ 16 * sizeof(__vector128), LINUX_NOTE_NAME); + } + + /* +@@ -370,12 +372,12 @@ static void *nt_s390_vx_low(void *ptr, __vector128 *vx_regs) + int i; + + note = (Elf64_Nhdr *)ptr; +- note->n_namesz = strlen(KEXEC_CORE_NOTE_NAME) + 1; ++ note->n_namesz = strlen(LINUX_NOTE_NAME) + 1; + note->n_descsz = 16 * 8; + note->n_type = NT_S390_VXRS_LOW; + len = sizeof(Elf64_Nhdr); + +- memcpy(ptr + len, KEXEC_CORE_NOTE_NAME, note->n_namesz); ++ memcpy(ptr + len, LINUX_NOTE_NAME, note->n_namesz); + len = roundup(len + note->n_namesz, 4); + + ptr += len; +diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c +index 8345ae1f117d..05ae254f84cf 100644 +--- a/arch/s390/mm/pgtable.c ++++ b/arch/s390/mm/pgtable.c +@@ -1237,11 +1237,28 @@ EXPORT_SYMBOL_GPL(s390_reset_cmma); + */ + bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap) + { ++ pgd_t *pgd; ++ pud_t *pud; ++ pmd_t *pmd; + pte_t *pte; + spinlock_t *ptl; + bool dirty = false; + +- pte = get_locked_pte(gmap->mm, address, &ptl); ++ pgd = pgd_offset(gmap->mm, address); ++ pud = pud_alloc(gmap->mm, pgd, address); ++ if (!pud) ++ return false; ++ pmd = pmd_alloc(gmap->mm, pud, address); ++ if (!pmd) ++ return false; ++ /* We can't run guests backed by huge pages, but userspace can ++ * still set them up and then try to migrate them without any ++ * migration support. ++ */ ++ if (pmd_large(*pmd)) ++ return true; ++ ++ pte = pte_alloc_map_lock(gmap->mm, pmd, address, &ptl); + if (unlikely(!pte)) + return false; + +diff --git a/crypto/Makefile b/crypto/Makefile +index 82fbff180ad3..03e66097eb0c 100644 +--- a/crypto/Makefile ++++ b/crypto/Makefile +@@ -62,6 +62,7 @@ obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o + obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o + obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o + obj-$(CONFIG_CRYPTO_WP512) += wp512.o ++CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 + obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o + obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o + obj-$(CONFIG_CRYPTO_ECB) += ecb.o +@@ -85,6 +86,7 @@ obj-$(CONFIG_CRYPTO_BLOWFISH_COMMON) += blowfish_common.o + obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o + obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o + obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o ++CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 + obj-$(CONFIG_CRYPTO_AES) += aes_generic.o + obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o + obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o +diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c +index c097f477c74c..14c2a07c9f3f 100644 +--- a/drivers/acpi/nfit.c ++++ b/drivers/acpi/nfit.c +@@ -965,7 +965,7 @@ static size_t sizeof_nfit_set_info(int num_mappings) + + num_mappings * sizeof(struct nfit_set_info_map); + } + +-static int cmp_map(const void *m0, const void *m1) ++static int cmp_map_compat(const void *m0, const void *m1) + { + const struct nfit_set_info_map *map0 = m0; + const struct nfit_set_info_map *map1 = m1; +@@ -974,6 +974,14 @@ static int cmp_map(const void *m0, const void *m1) + sizeof(u64)); + } + ++static int cmp_map(const void *m0, const void *m1) ++{ ++ const struct nfit_set_info_map *map0 = m0; ++ const struct nfit_set_info_map *map1 = m1; ++ ++ return map0->region_offset - map1->region_offset; ++} ++ + /* Retrieve the nth entry referencing this spa */ + static struct acpi_nfit_memory_map *memdev_from_spa( + struct acpi_nfit_desc *acpi_desc, u16 range_index, int n) +@@ -1029,6 +1037,12 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, + sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), + cmp_map, NULL); + nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); ++ ++ /* support namespaces created with the wrong sort order */ ++ sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), ++ cmp_map_compat, NULL); ++ nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); ++ + ndr_desc->nd_set = nd_set; + devm_kfree(dev, info); + +diff --git a/drivers/md/dm.c b/drivers/md/dm.c +index 3384a3eef917..397f0454100b 100644 +--- a/drivers/md/dm.c ++++ b/drivers/md/dm.c +@@ -1467,11 +1467,62 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) + } + EXPORT_SYMBOL_GPL(dm_accept_partial_bio); + ++/* ++ * Flush current->bio_list when the target map method blocks. ++ * This fixes deadlocks in snapshot and possibly in other targets. ++ */ ++struct dm_offload { ++ struct blk_plug plug; ++ struct blk_plug_cb cb; ++}; ++ ++static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule) ++{ ++ struct dm_offload *o = container_of(cb, struct dm_offload, cb); ++ struct bio_list list; ++ struct bio *bio; ++ ++ INIT_LIST_HEAD(&o->cb.list); ++ ++ if (unlikely(!current->bio_list)) ++ return; ++ ++ list = *current->bio_list; ++ bio_list_init(current->bio_list); ++ ++ while ((bio = bio_list_pop(&list))) { ++ struct bio_set *bs = bio->bi_pool; ++ if (unlikely(!bs) || bs == fs_bio_set) { ++ bio_list_add(current->bio_list, bio); ++ continue; ++ } ++ ++ spin_lock(&bs->rescue_lock); ++ bio_list_add(&bs->rescue_list, bio); ++ queue_work(bs->rescue_workqueue, &bs->rescue_work); ++ spin_unlock(&bs->rescue_lock); ++ } ++} ++ ++static void dm_offload_start(struct dm_offload *o) ++{ ++ blk_start_plug(&o->plug); ++ o->cb.callback = flush_current_bio_list; ++ list_add(&o->cb.list, ¤t->plug->cb_list); ++} ++ ++static void dm_offload_end(struct dm_offload *o) ++{ ++ list_del(&o->cb.list); ++ blk_finish_plug(&o->plug); ++} ++ + static void __map_bio(struct dm_target_io *tio) + { + int r; + sector_t sector; + struct mapped_device *md; ++ struct dm_offload o; + struct bio *clone = &tio->clone; + struct dm_target *ti = tio->ti; + +@@ -1484,7 +1535,11 @@ static void __map_bio(struct dm_target_io *tio) + */ + atomic_inc(&tio->io->io_count); + sector = clone->bi_iter.bi_sector; ++ ++ dm_offload_start(&o); + r = ti->type->map(ti, clone); ++ dm_offload_end(&o); ++ + if (r == DM_MAPIO_REMAPPED) { + /* the bio has been remapped so dispatch it */ + +diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c +index f9fa3fad728e..2051f28ddac6 100644 +--- a/drivers/mtd/maps/pmcmsp-flash.c ++++ b/drivers/mtd/maps/pmcmsp-flash.c +@@ -139,15 +139,13 @@ static int __init init_msp_flash(void) + } + + msp_maps[i].bankwidth = 1; +- msp_maps[i].name = kmalloc(7, GFP_KERNEL); ++ msp_maps[i].name = kstrndup(flash_name, 7, GFP_KERNEL); + if (!msp_maps[i].name) { + iounmap(msp_maps[i].virt); + kfree(msp_parts[i]); + goto cleanup_loop; + } + +- msp_maps[i].name = strncpy(msp_maps[i].name, flash_name, 7); +- + for (j = 0; j < pcnt; j++) { + part_name[5] = '0' + i; + part_name[7] = '0' + j; +diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c +index d52ea3008946..7e8bce46e6b4 100644 +--- a/drivers/net/ethernet/ti/cpmac.c ++++ b/drivers/net/ethernet/ti/cpmac.c +@@ -1237,7 +1237,7 @@ int cpmac_init(void) + goto fail_alloc; + } + +-#warning FIXME: unhardcode gpio&reset bits ++ /* FIXME: unhardcode gpio&reset bits */ + ar7_gpio_disable(26); + ar7_gpio_disable(27); + ar7_device_reset(AR7_RESET_BIT_CPMAC_LO); +diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c +index 62120c38d56b..aae7379af4e4 100644 +--- a/drivers/nvdimm/namespace_devs.c ++++ b/drivers/nvdimm/namespace_devs.c +@@ -1534,6 +1534,7 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id) + static int find_pmem_label_set(struct nd_region *nd_region, + struct nd_namespace_pmem *nspm) + { ++ u64 altcookie = nd_region_interleave_set_altcookie(nd_region); + u64 cookie = nd_region_interleave_set_cookie(nd_region); + struct nd_namespace_label *nd_label; + u8 select_id[NSLABEL_UUID_LEN]; +@@ -1542,8 +1543,10 @@ static int find_pmem_label_set(struct nd_region *nd_region, + int rc = -ENODEV, l; + u16 i; + +- if (cookie == 0) ++ if (cookie == 0) { ++ dev_dbg(&nd_region->dev, "invalid interleave-set-cookie\n"); + return -ENXIO; ++ } + + /* + * Find a complete set of labels by uuid. By definition we can start +@@ -1552,13 +1555,24 @@ static int find_pmem_label_set(struct nd_region *nd_region, + for_each_label(l, nd_label, nd_region->mapping[0].labels) { + u64 isetcookie = __le64_to_cpu(nd_label->isetcookie); + +- if (isetcookie != cookie) +- continue; ++ if (isetcookie != cookie) { ++ dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n", ++ nd_label->uuid); ++ if (isetcookie != altcookie) ++ continue; ++ ++ dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n", ++ nd_label->uuid); ++ } ++ ++ for (i = 0; nd_region->ndr_mappings; i++) { ++ if (has_uuid_at_pos(nd_region, nd_label->uuid, cookie, i)) ++ continue; ++ if (has_uuid_at_pos(nd_region, nd_label->uuid, altcookie, i)) ++ continue; ++ break; ++ } + +- for (i = 0; nd_region->ndr_mappings; i++) +- if (!has_uuid_at_pos(nd_region, nd_label->uuid, +- cookie, i)) +- break; + if (i < nd_region->ndr_mappings) { + /* + * Give up if we don't find an instance of a +diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h +index 417e521d299c..fc870e55bb66 100644 +--- a/drivers/nvdimm/nd.h ++++ b/drivers/nvdimm/nd.h +@@ -245,6 +245,7 @@ struct nd_region *to_nd_region(struct device *dev); + int nd_region_to_nstype(struct nd_region *nd_region); + int nd_region_register_namespaces(struct nd_region *nd_region, int *err); + u64 nd_region_interleave_set_cookie(struct nd_region *nd_region); ++u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region); + void nvdimm_bus_lock(struct device *dev); + void nvdimm_bus_unlock(struct device *dev); + bool is_nvdimm_bus_locked(struct device *dev); +diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c +index 9521696c9385..dc2e919daa39 100644 +--- a/drivers/nvdimm/region_devs.c ++++ b/drivers/nvdimm/region_devs.c +@@ -379,6 +379,15 @@ u64 nd_region_interleave_set_cookie(struct nd_region *nd_region) + return 0; + } + ++u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region) ++{ ++ struct nd_interleave_set *nd_set = nd_region->nd_set; ++ ++ if (nd_set) ++ return nd_set->altcookie; ++ return 0; ++} ++ + /* + * Upon successful probe/remove, take/release a reference on the + * associated interleave set (if present), and plant new btt + namespace +diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c +index 9c780740fb82..e712fe745955 100644 +--- a/drivers/scsi/mvsas/mv_sas.c ++++ b/drivers/scsi/mvsas/mv_sas.c +@@ -737,8 +737,8 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf + mv_dprintk("device %016llx not ready.\n", + SAS_ADDR(dev->sas_addr)); + +- rc = SAS_PHY_DOWN; +- return rc; ++ rc = SAS_PHY_DOWN; ++ return rc; + } + tei.port = dev->port->lldd_port; + if (tei.port && !tei.port->port_attached && !tmf) { +diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c +index 237ef5573c18..6deb06147202 100644 +--- a/drivers/tty/serial/samsung.c ++++ b/drivers/tty/serial/samsung.c +@@ -1030,8 +1030,10 @@ static int s3c64xx_serial_startup(struct uart_port *port) + if (ourport->dma) { + ret = s3c24xx_serial_request_dma(ourport); + if (ret < 0) { +- dev_warn(port->dev, "DMA request failed\n"); +- return ret; ++ dev_warn(port->dev, ++ "DMA request failed, DMA will not be used\n"); ++ devm_kfree(port->dev, ourport->dma); ++ ourport->dma = NULL; + } + } + +diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h +index 18ae3eaa8b6f..ccd9694f8e36 100644 +--- a/drivers/usb/dwc3/gadget.h ++++ b/drivers/usb/dwc3/gadget.h +@@ -28,23 +28,23 @@ struct dwc3; + #define gadget_to_dwc(g) (container_of(g, struct dwc3, gadget)) + + /* DEPCFG parameter 1 */ +-#define DWC3_DEPCFG_INT_NUM(n) ((n) << 0) ++#define DWC3_DEPCFG_INT_NUM(n) (((n) & 0x1f) << 0) + #define DWC3_DEPCFG_XFER_COMPLETE_EN (1 << 8) + #define DWC3_DEPCFG_XFER_IN_PROGRESS_EN (1 << 9) + #define DWC3_DEPCFG_XFER_NOT_READY_EN (1 << 10) + #define DWC3_DEPCFG_FIFO_ERROR_EN (1 << 11) + #define DWC3_DEPCFG_STREAM_EVENT_EN (1 << 13) +-#define DWC3_DEPCFG_BINTERVAL_M1(n) ((n) << 16) ++#define DWC3_DEPCFG_BINTERVAL_M1(n) (((n) & 0xff) << 16) + #define DWC3_DEPCFG_STREAM_CAPABLE (1 << 24) +-#define DWC3_DEPCFG_EP_NUMBER(n) ((n) << 25) ++#define DWC3_DEPCFG_EP_NUMBER(n) (((n) & 0x1f) << 25) + #define DWC3_DEPCFG_BULK_BASED (1 << 30) + #define DWC3_DEPCFG_FIFO_BASED (1 << 31) + + /* DEPCFG parameter 0 */ +-#define DWC3_DEPCFG_EP_TYPE(n) ((n) << 1) +-#define DWC3_DEPCFG_MAX_PACKET_SIZE(n) ((n) << 3) +-#define DWC3_DEPCFG_FIFO_NUMBER(n) ((n) << 17) +-#define DWC3_DEPCFG_BURST_SIZE(n) ((n) << 22) ++#define DWC3_DEPCFG_EP_TYPE(n) (((n) & 0x3) << 1) ++#define DWC3_DEPCFG_MAX_PACKET_SIZE(n) (((n) & 0x7ff) << 3) ++#define DWC3_DEPCFG_FIFO_NUMBER(n) (((n) & 0x1f) << 17) ++#define DWC3_DEPCFG_BURST_SIZE(n) (((n) & 0xf) << 22) + #define DWC3_DEPCFG_DATA_SEQ_NUM(n) ((n) << 26) + /* This applies for core versions earlier than 1.94a */ + #define DWC3_DEPCFG_IGN_SEQ_NUM (1 << 31) +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c +index cfda1a1c0ab6..9ad5145d3103 100644 +--- a/drivers/usb/gadget/function/f_fs.c ++++ b/drivers/usb/gadget/function/f_fs.c +@@ -1643,11 +1643,14 @@ static int ffs_func_eps_enable(struct ffs_function *func) + spin_lock_irqsave(&func->ffs->eps_lock, flags); + do { + struct usb_endpoint_descriptor *ds; ++ struct usb_ss_ep_comp_descriptor *comp_desc = NULL; ++ int needs_comp_desc = false; + int desc_idx; + +- if (ffs->gadget->speed == USB_SPEED_SUPER) ++ if (ffs->gadget->speed == USB_SPEED_SUPER) { + desc_idx = 2; +- else if (ffs->gadget->speed == USB_SPEED_HIGH) ++ needs_comp_desc = true; ++ } else if (ffs->gadget->speed == USB_SPEED_HIGH) + desc_idx = 1; + else + desc_idx = 0; +@@ -1664,6 +1667,14 @@ static int ffs_func_eps_enable(struct ffs_function *func) + + ep->ep->driver_data = ep; + ep->ep->desc = ds; ++ ++ comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds + ++ USB_DT_ENDPOINT_SIZE); ++ ep->ep->maxburst = comp_desc->bMaxBurst + 1; ++ ++ if (needs_comp_desc) ++ ep->ep->comp_desc = comp_desc; ++ + ret = usb_ep_enable(ep->ep); + if (likely(!ret)) { + epfile->ep = ep; +diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c +index 22d067cd5aa3..6610f7a023d3 100644 +--- a/drivers/usb/gadget/udc/dummy_hcd.c ++++ b/drivers/usb/gadget/udc/dummy_hcd.c +@@ -1033,6 +1033,8 @@ static int dummy_udc_probe(struct platform_device *pdev) + int rc; + + dum = *((void **)dev_get_platdata(&pdev->dev)); ++ /* Clear usb_gadget region for new registration to udc-core */ ++ memzero_explicit(&dum->gadget, sizeof(struct usb_gadget)); + dum->gadget.name = gadget_name; + dum->gadget.ops = &dummy_ops; + dum->gadget.max_speed = USB_SPEED_SUPER; +diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c +index 74c42f722678..3425154baf8b 100644 +--- a/drivers/usb/host/xhci-dbg.c ++++ b/drivers/usb/host/xhci-dbg.c +@@ -111,7 +111,7 @@ static void xhci_print_cap_regs(struct xhci_hcd *xhci) + xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK); + + /* xhci 1.1 controllers have the HCCPARAMS2 register */ +- if (hci_version > 100) { ++ if (hci_version > 0x100) { + temp = readl(&xhci->cap_regs->hcc_params2); + xhci_dbg(xhci, "HCC PARAMS2 0x%x:\n", (unsigned int) temp); + xhci_dbg(xhci, " HC %s Force save context capability", +diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c +index 1950e87b4219..775690bed4c0 100644 +--- a/drivers/usb/misc/iowarrior.c ++++ b/drivers/usb/misc/iowarrior.c +@@ -787,12 +787,6 @@ static int iowarrior_probe(struct usb_interface *interface, + iface_desc = interface->cur_altsetting; + dev->product_id = le16_to_cpu(udev->descriptor.idProduct); + +- if (iface_desc->desc.bNumEndpoints < 1) { +- dev_err(&interface->dev, "Invalid number of endpoints\n"); +- retval = -EINVAL; +- goto error; +- } +- + /* set up the endpoint information */ + for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { + endpoint = &iface_desc->endpoint[i].desc; +@@ -803,6 +797,21 @@ static int iowarrior_probe(struct usb_interface *interface, + /* this one will match for the IOWarrior56 only */ + dev->int_out_endpoint = endpoint; + } ++ ++ if (!dev->int_in_endpoint) { ++ dev_err(&interface->dev, "no interrupt-in endpoint found\n"); ++ retval = -ENODEV; ++ goto error; ++ } ++ ++ if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) { ++ if (!dev->int_out_endpoint) { ++ dev_err(&interface->dev, "no interrupt-out endpoint found\n"); ++ retval = -ENODEV; ++ goto error; ++ } ++ } ++ + /* we have to check the report_size often, so remember it in the endianness suitable for our machine */ + dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint); + if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) && +diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c +index 3df7b7ec178e..e0b1fe2f60e1 100644 +--- a/drivers/usb/serial/digi_acceleport.c ++++ b/drivers/usb/serial/digi_acceleport.c +@@ -1483,16 +1483,20 @@ static int digi_read_oob_callback(struct urb *urb) + struct usb_serial *serial = port->serial; + struct tty_struct *tty; + struct digi_port *priv = usb_get_serial_port_data(port); ++ unsigned char *buf = urb->transfer_buffer; + int opcode, line, status, val; + int i; + unsigned int rts; + ++ if (urb->actual_length < 4) ++ return -1; ++ + /* handle each oob command */ +- for (i = 0; i < urb->actual_length - 3;) { +- opcode = ((unsigned char *)urb->transfer_buffer)[i++]; +- line = ((unsigned char *)urb->transfer_buffer)[i++]; +- status = ((unsigned char *)urb->transfer_buffer)[i++]; +- val = ((unsigned char *)urb->transfer_buffer)[i++]; ++ for (i = 0; i < urb->actual_length - 3; i += 4) { ++ opcode = buf[i]; ++ line = buf[i + 1]; ++ status = buf[i + 2]; ++ val = buf[i + 3]; + + dev_dbg(&port->dev, "digi_read_oob_callback: opcode=%d, line=%d, status=%d, val=%d\n", + opcode, line, status, val); +diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c +index c02808a30436..f1a8fdcd8674 100644 +--- a/drivers/usb/serial/io_ti.c ++++ b/drivers/usb/serial/io_ti.c +@@ -1674,6 +1674,12 @@ static void edge_interrupt_callback(struct urb *urb) + function = TIUMP_GET_FUNC_FROM_CODE(data[0]); + dev_dbg(dev, "%s - port_number %d, function %d, info 0x%x\n", __func__, + port_number, function, data[1]); ++ ++ if (port_number >= edge_serial->serial->num_ports) { ++ dev_err(dev, "bad port number %d\n", port_number); ++ goto exit; ++ } ++ + port = edge_serial->serial->port[port_number]; + edge_port = usb_get_serial_port_data(port); + if (!edge_port) { +@@ -1755,7 +1761,7 @@ static void edge_bulk_in_callback(struct urb *urb) + + port_number = edge_port->port->port_number; + +- if (edge_port->lsr_event) { ++ if (urb->actual_length > 0 && edge_port->lsr_event) { + edge_port->lsr_event = 0; + dev_dbg(dev, "%s ===== Port %u LSR Status = %02x, Data = %02x ======\n", + __func__, port_number, edge_port->lsr_mask, *data); +diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c +index a180b17d2432..76564b3bebb9 100644 +--- a/drivers/usb/serial/omninet.c ++++ b/drivers/usb/serial/omninet.c +@@ -142,12 +142,6 @@ static int omninet_port_remove(struct usb_serial_port *port) + + static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port) + { +- struct usb_serial *serial = port->serial; +- struct usb_serial_port *wport; +- +- wport = serial->port[1]; +- tty_port_tty_set(&wport->port, tty); +- + return usb_serial_generic_open(tty, port); + } + +diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c +index b2dff0f14743..236ea43f7815 100644 +--- a/drivers/usb/serial/safe_serial.c ++++ b/drivers/usb/serial/safe_serial.c +@@ -205,6 +205,11 @@ static void safe_process_read_urb(struct urb *urb) + if (!safe) + goto out; + ++ if (length < 2) { ++ dev_err(&port->dev, "malformed packet\n"); ++ return; ++ } ++ + fcs = fcs_compute10(data, length, CRC10_INITFCS); + if (fcs) { + dev_err(&port->dev, "%s - bad CRC %x\n", __func__, fcs); +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index e0f862146793..7dcc97eadb12 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -3573,6 +3573,10 @@ static int ext4_block_truncate_page(handle_t *handle, + unsigned blocksize; + struct inode *inode = mapping->host; + ++ /* If we are processing an encrypted inode during orphan list handling */ ++ if (ext4_encrypted_inode(inode) && !ext4_has_encryption_key(inode)) ++ return 0; ++ + blocksize = inode->i_sb->s_blocksize; + length = blocksize - (offset & (blocksize - 1)); + +diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h +index 3f021dc5da8c..30201b9be7bc 100644 +--- a/include/linux/libnvdimm.h ++++ b/include/linux/libnvdimm.h +@@ -83,6 +83,8 @@ struct nd_cmd_desc { + + struct nd_interleave_set { + u64 cookie; ++ /* compatibility with initial buggy Linux implementation */ ++ u64 altcookie; + }; + + struct nd_region_desc { +diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h +index 14e49c798135..b35533b94277 100644 +--- a/include/trace/events/syscalls.h ++++ b/include/trace/events/syscalls.h +@@ -1,5 +1,6 @@ + #undef TRACE_SYSTEM + #define TRACE_SYSTEM raw_syscalls ++#undef TRACE_INCLUDE_FILE + #define TRACE_INCLUDE_FILE syscalls + + #if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ) +diff --git a/mm/memcontrol.c b/mm/memcontrol.c +index 43eefe9d834c..e25b93a4267d 100644 +--- a/mm/memcontrol.c ++++ b/mm/memcontrol.c +@@ -4150,24 +4150,6 @@ static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n) + atomic_add(n, &memcg->id.ref); + } + +-static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) +-{ +- while (!atomic_inc_not_zero(&memcg->id.ref)) { +- /* +- * The root cgroup cannot be destroyed, so it's refcount must +- * always be >= 1. +- */ +- if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { +- VM_BUG_ON(1); +- break; +- } +- memcg = parent_mem_cgroup(memcg); +- if (!memcg) +- memcg = root_mem_cgroup; +- } +- return memcg; +-} +- + static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) + { + if (atomic_sub_and_test(n, &memcg->id.ref)) { +@@ -5751,6 +5733,24 @@ static int __init mem_cgroup_init(void) + subsys_initcall(mem_cgroup_init); + + #ifdef CONFIG_MEMCG_SWAP ++static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) ++{ ++ while (!atomic_inc_not_zero(&memcg->id.ref)) { ++ /* ++ * The root cgroup cannot be destroyed, so it's refcount must ++ * always be >= 1. ++ */ ++ if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { ++ VM_BUG_ON(1); ++ break; ++ } ++ memcg = parent_mem_cgroup(memcg); ++ if (!memcg) ++ memcg = root_mem_cgroup; ++ } ++ return memcg; ++} ++ + /** + * mem_cgroup_swapout - transfer a memsw charge to swap + * @page: page whose memsw charge to transfer diff --git a/patch/kernel/mvebu64-default/03-patch-4.4.55-56.patch b/patch/kernel/mvebu64-default/03-patch-4.4.55-56.patch new file mode 100644 index 000000000..cf1f3dfb0 --- /dev/null +++ b/patch/kernel/mvebu64-default/03-patch-4.4.55-56.patch @@ -0,0 +1,2116 @@ +diff --git a/Documentation/networking/netlink_mmap.txt b/Documentation/networking/netlink_mmap.txt +deleted file mode 100644 +index 54f10478e8e3..000000000000 +--- a/Documentation/networking/netlink_mmap.txt ++++ /dev/null +@@ -1,332 +0,0 @@ +-This file documents how to use memory mapped I/O with netlink. +- +-Author: Patrick McHardy +- +-Overview +--------- +- +-Memory mapped netlink I/O can be used to increase throughput and decrease +-overhead of unicast receive and transmit operations. Some netlink subsystems +-require high throughput, these are mainly the netfilter subsystems +-nfnetlink_queue and nfnetlink_log, but it can also help speed up large +-dump operations of f.i. the routing database. +- +-Memory mapped netlink I/O used two circular ring buffers for RX and TX which +-are mapped into the processes address space. +- +-The RX ring is used by the kernel to directly construct netlink messages into +-user-space memory without copying them as done with regular socket I/O, +-additionally as long as the ring contains messages no recvmsg() or poll() +-syscalls have to be issued by user-space to get more message. +- +-The TX ring is used to process messages directly from user-space memory, the +-kernel processes all messages contained in the ring using a single sendmsg() +-call. +- +-Usage overview +--------------- +- +-In order to use memory mapped netlink I/O, user-space needs three main changes: +- +-- ring setup +-- conversion of the RX path to get messages from the ring instead of recvmsg() +-- conversion of the TX path to construct messages into the ring +- +-Ring setup is done using setsockopt() to provide the ring parameters to the +-kernel, then a call to mmap() to map the ring into the processes address space: +- +-- setsockopt(fd, SOL_NETLINK, NETLINK_RX_RING, ¶ms, sizeof(params)); +-- setsockopt(fd, SOL_NETLINK, NETLINK_TX_RING, ¶ms, sizeof(params)); +-- ring = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0) +- +-Usage of either ring is optional, but even if only the RX ring is used the +-mapping still needs to be writable in order to update the frame status after +-processing. +- +-Conversion of the reception path involves calling poll() on the file +-descriptor, once the socket is readable the frames from the ring are +-processed in order until no more messages are available, as indicated by +-a status word in the frame header. +- +-On kernel side, in order to make use of memory mapped I/O on receive, the +-originating netlink subsystem needs to support memory mapped I/O, otherwise +-it will use an allocated socket buffer as usual and the contents will be +- copied to the ring on transmission, nullifying most of the performance gains. +-Dumps of kernel databases automatically support memory mapped I/O. +- +-Conversion of the transmit path involves changing message construction to +-use memory from the TX ring instead of (usually) a buffer declared on the +-stack and setting up the frame header appropriately. Optionally poll() can +-be used to wait for free frames in the TX ring. +- +-Structured and definitions for using memory mapped I/O are contained in +-. +- +-RX and TX rings +----------------- +- +-Each ring contains a number of continuous memory blocks, containing frames of +-fixed size dependent on the parameters used for ring setup. +- +-Ring: [ block 0 ] +- [ frame 0 ] +- [ frame 1 ] +- [ block 1 ] +- [ frame 2 ] +- [ frame 3 ] +- ... +- [ block n ] +- [ frame 2 * n ] +- [ frame 2 * n + 1 ] +- +-The blocks are only visible to the kernel, from the point of view of user-space +-the ring just contains the frames in a continuous memory zone. +- +-The ring parameters used for setting up the ring are defined as follows: +- +-struct nl_mmap_req { +- unsigned int nm_block_size; +- unsigned int nm_block_nr; +- unsigned int nm_frame_size; +- unsigned int nm_frame_nr; +-}; +- +-Frames are grouped into blocks, where each block is a continuous region of memory +-and holds nm_block_size / nm_frame_size frames. The total number of frames in +-the ring is nm_frame_nr. The following invariants hold: +- +-- frames_per_block = nm_block_size / nm_frame_size +- +-- nm_frame_nr = frames_per_block * nm_block_nr +- +-Some parameters are constrained, specifically: +- +-- nm_block_size must be a multiple of the architectures memory page size. +- The getpagesize() function can be used to get the page size. +- +-- nm_frame_size must be equal or larger to NL_MMAP_HDRLEN, IOW a frame must be +- able to hold at least the frame header +- +-- nm_frame_size must be smaller or equal to nm_block_size +- +-- nm_frame_size must be a multiple of NL_MMAP_MSG_ALIGNMENT +- +-- nm_frame_nr must equal the actual number of frames as specified above. +- +-When the kernel can't allocate physically continuous memory for a ring block, +-it will fall back to use physically discontinuous memory. This might affect +-performance negatively, in order to avoid this the nm_frame_size parameter +-should be chosen to be as small as possible for the required frame size and +-the number of blocks should be increased instead. +- +-Ring frames +------------- +- +-Each frames contain a frame header, consisting of a synchronization word and some +-meta-data, and the message itself. +- +-Frame: [ header message ] +- +-The frame header is defined as follows: +- +-struct nl_mmap_hdr { +- unsigned int nm_status; +- unsigned int nm_len; +- __u32 nm_group; +- /* credentials */ +- __u32 nm_pid; +- __u32 nm_uid; +- __u32 nm_gid; +-}; +- +-- nm_status is used for synchronizing processing between the kernel and user- +- space and specifies ownership of the frame as well as the operation to perform +- +-- nm_len contains the length of the message contained in the data area +- +-- nm_group specified the destination multicast group of message +- +-- nm_pid, nm_uid and nm_gid contain the netlink pid, UID and GID of the sending +- process. These values correspond to the data available using SOCK_PASSCRED in +- the SCM_CREDENTIALS cmsg. +- +-The possible values in the status word are: +- +-- NL_MMAP_STATUS_UNUSED: +- RX ring: frame belongs to the kernel and contains no message +- for user-space. Approriate action is to invoke poll() +- to wait for new messages. +- +- TX ring: frame belongs to user-space and can be used for +- message construction. +- +-- NL_MMAP_STATUS_RESERVED: +- RX ring only: frame is currently used by the kernel for message +- construction and contains no valid message yet. +- Appropriate action is to invoke poll() to wait for +- new messages. +- +-- NL_MMAP_STATUS_VALID: +- RX ring: frame contains a valid message. Approriate action is +- to process the message and release the frame back to +- the kernel by setting the status to +- NL_MMAP_STATUS_UNUSED or queue the frame by setting the +- status to NL_MMAP_STATUS_SKIP. +- +- TX ring: the frame contains a valid message from user-space to +- be processed by the kernel. After completing processing +- the kernel will release the frame back to user-space by +- setting the status to NL_MMAP_STATUS_UNUSED. +- +-- NL_MMAP_STATUS_COPY: +- RX ring only: a message is ready to be processed but could not be +- stored in the ring, either because it exceeded the +- frame size or because the originating subsystem does +- not support memory mapped I/O. Appropriate action is +- to invoke recvmsg() to receive the message and release +- the frame back to the kernel by setting the status to +- NL_MMAP_STATUS_UNUSED. +- +-- NL_MMAP_STATUS_SKIP: +- RX ring only: user-space queued the message for later processing, but +- processed some messages following it in the ring. The +- kernel should skip this frame when looking for unused +- frames. +- +-The data area of a frame begins at a offset of NL_MMAP_HDRLEN relative to the +-frame header. +- +-TX limitations +--------------- +- +-As of Jan 2015 the message is always copied from the ring frame to an +-allocated buffer due to unresolved security concerns. +-See commit 4682a0358639b29cf ("netlink: Always copy on mmap TX."). +- +-Example +-------- +- +-Ring setup: +- +- unsigned int block_size = 16 * getpagesize(); +- struct nl_mmap_req req = { +- .nm_block_size = block_size, +- .nm_block_nr = 64, +- .nm_frame_size = 16384, +- .nm_frame_nr = 64 * block_size / 16384, +- }; +- unsigned int ring_size; +- void *rx_ring, *tx_ring; +- +- /* Configure ring parameters */ +- if (setsockopt(fd, SOL_NETLINK, NETLINK_RX_RING, &req, sizeof(req)) < 0) +- exit(1); +- if (setsockopt(fd, SOL_NETLINK, NETLINK_TX_RING, &req, sizeof(req)) < 0) +- exit(1) +- +- /* Calculate size of each individual ring */ +- ring_size = req.nm_block_nr * req.nm_block_size; +- +- /* Map RX/TX rings. The TX ring is located after the RX ring */ +- rx_ring = mmap(NULL, 2 * ring_size, PROT_READ | PROT_WRITE, +- MAP_SHARED, fd, 0); +- if ((long)rx_ring == -1L) +- exit(1); +- tx_ring = rx_ring + ring_size: +- +-Message reception: +- +-This example assumes some ring parameters of the ring setup are available. +- +- unsigned int frame_offset = 0; +- struct nl_mmap_hdr *hdr; +- struct nlmsghdr *nlh; +- unsigned char buf[16384]; +- ssize_t len; +- +- while (1) { +- struct pollfd pfds[1]; +- +- pfds[0].fd = fd; +- pfds[0].events = POLLIN | POLLERR; +- pfds[0].revents = 0; +- +- if (poll(pfds, 1, -1) < 0 && errno != -EINTR) +- exit(1); +- +- /* Check for errors. Error handling omitted */ +- if (pfds[0].revents & POLLERR) +- +- +- /* If no new messages, poll again */ +- if (!(pfds[0].revents & POLLIN)) +- continue; +- +- /* Process all frames */ +- while (1) { +- /* Get next frame header */ +- hdr = rx_ring + frame_offset; +- +- if (hdr->nm_status == NL_MMAP_STATUS_VALID) { +- /* Regular memory mapped frame */ +- nlh = (void *)hdr + NL_MMAP_HDRLEN; +- len = hdr->nm_len; +- +- /* Release empty message immediately. May happen +- * on error during message construction. +- */ +- if (len == 0) +- goto release; +- } else if (hdr->nm_status == NL_MMAP_STATUS_COPY) { +- /* Frame queued to socket receive queue */ +- len = recv(fd, buf, sizeof(buf), MSG_DONTWAIT); +- if (len <= 0) +- break; +- nlh = buf; +- } else +- /* No more messages to process, continue polling */ +- break; +- +- process_msg(nlh); +-release: +- /* Release frame back to the kernel */ +- hdr->nm_status = NL_MMAP_STATUS_UNUSED; +- +- /* Advance frame offset to next frame */ +- frame_offset = (frame_offset + frame_size) % ring_size; +- } +- } +- +-Message transmission: +- +-This example assumes some ring parameters of the ring setup are available. +-A single message is constructed and transmitted, to send multiple messages +-at once they would be constructed in consecutive frames before a final call +-to sendto(). +- +- unsigned int frame_offset = 0; +- struct nl_mmap_hdr *hdr; +- struct nlmsghdr *nlh; +- struct sockaddr_nl addr = { +- .nl_family = AF_NETLINK, +- }; +- +- hdr = tx_ring + frame_offset; +- if (hdr->nm_status != NL_MMAP_STATUS_UNUSED) +- /* No frame available. Use poll() to avoid. */ +- exit(1); +- +- nlh = (void *)hdr + NL_MMAP_HDRLEN; +- +- /* Build message */ +- build_message(nlh); +- +- /* Fill frame header: length and status need to be set */ +- hdr->nm_len = nlh->nlmsg_len; +- hdr->nm_status = NL_MMAP_STATUS_VALID; +- +- if (sendto(fd, NULL, 0, 0, &addr, sizeof(addr)) < 0) +- exit(1); +- +- /* Advance frame offset to next frame */ +- frame_offset = (frame_offset + frame_size) % ring_size; +diff --git a/Makefile b/Makefile +index d9cc21df444d..cf9303a5d621 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 55 ++SUBLEVEL = 56 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c +index 1a8256dd6729..5b2f2306fbcc 100644 +--- a/arch/x86/kernel/cpu/perf_event.c ++++ b/arch/x86/kernel/cpu/perf_event.c +@@ -1996,8 +1996,8 @@ static int x86_pmu_event_init(struct perf_event *event) + + static void refresh_pce(void *ignored) + { +- if (current->mm) +- load_mm_cr4(current->mm); ++ if (current->active_mm) ++ load_mm_cr4(current->active_mm); + } + + static void x86_pmu_event_mapped(struct perf_event *event) +diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c +index f129a9af6357..b6b0077da1af 100644 +--- a/arch/x86/kernel/head64.c ++++ b/arch/x86/kernel/head64.c +@@ -4,6 +4,7 @@ + * Copyright (C) 2000 Andrea Arcangeli SuSE + */ + ++#define DISABLE_BRANCH_PROFILING + #include + #include + #include +diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c +index d470cf219a2d..4e5ac46adc9d 100644 +--- a/arch/x86/mm/kasan_init_64.c ++++ b/arch/x86/mm/kasan_init_64.c +@@ -1,3 +1,4 @@ ++#define DISABLE_BRANCH_PROFILING + #define pr_fmt(fmt) "kasan: " fmt + #include + #include +diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c +index d6b619667f1a..349aecbc210a 100644 +--- a/drivers/net/vrf.c ++++ b/drivers/net/vrf.c +@@ -345,6 +345,7 @@ static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev) + + static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) + { ++ int len = skb->len; + netdev_tx_t ret = is_ip_tx_frame(skb, dev); + + if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { +@@ -352,7 +353,7 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) + + u64_stats_update_begin(&dstats->syncp); + dstats->tx_pkts++; +- dstats->tx_bytes += skb->len; ++ dstats->tx_bytes += len; + u64_stats_update_end(&dstats->syncp); + } else { + this_cpu_inc(dev->dstats->tx_drps); +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c +index 6fa8e165878e..590750ab6564 100644 +--- a/drivers/net/vxlan.c ++++ b/drivers/net/vxlan.c +@@ -2600,7 +2600,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[]) + + if (data[IFLA_VXLAN_ID]) { + __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]); +- if (id >= VXLAN_VID_MASK) ++ if (id >= VXLAN_N_VID) + return -ERANGE; + } + +diff --git a/fs/ext4/crypto_policy.c b/fs/ext4/crypto_policy.c +index 8a9feb341f31..dd561f916f0b 100644 +--- a/fs/ext4/crypto_policy.c ++++ b/fs/ext4/crypto_policy.c +@@ -156,6 +156,12 @@ int ext4_is_child_context_consistent_with_parent(struct inode *parent, + WARN_ON(1); /* Should never happen */ + return 0; + } ++ ++ /* No restrictions on file types which are never encrypted */ ++ if (!S_ISREG(child->i_mode) && !S_ISDIR(child->i_mode) && ++ !S_ISLNK(child->i_mode)) ++ return 1; ++ + /* no restrictions if the parent directory is not encrypted */ + if (!ext4_encrypted_inode(parent)) + return 1; +diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c +index 1fb12f9c97a6..789e2d6724a9 100644 +--- a/fs/ext4/ioctl.c ++++ b/fs/ext4/ioctl.c +@@ -633,8 +633,12 @@ resizefs_out: + if (err) + goto encryption_policy_out; + ++ mutex_lock(&inode->i_mutex); ++ + err = ext4_process_policy(&policy, inode); + ++ mutex_unlock(&inode->i_mutex); ++ + mnt_drop_write_file(filp); + encryption_policy_out: + return err; +diff --git a/fs/f2fs/crypto_policy.c b/fs/f2fs/crypto_policy.c +index e504f548b64e..5bbd1989d5e6 100644 +--- a/fs/f2fs/crypto_policy.c ++++ b/fs/f2fs/crypto_policy.c +@@ -149,6 +149,11 @@ int f2fs_is_child_context_consistent_with_parent(struct inode *parent, + BUG_ON(1); + } + ++ /* No restrictions on file types which are never encrypted */ ++ if (!S_ISREG(child->i_mode) && !S_ISDIR(child->i_mode) && ++ !S_ISLNK(child->i_mode)) ++ return 1; ++ + /* no restrictions if the parent directory is not encrypted */ + if (!f2fs_encrypted_inode(parent)) + return 1; +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c +index a197215ad52b..4b449d263333 100644 +--- a/fs/f2fs/file.c ++++ b/fs/f2fs/file.c +@@ -1535,12 +1535,19 @@ static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg) + #ifdef CONFIG_F2FS_FS_ENCRYPTION + struct f2fs_encryption_policy policy; + struct inode *inode = file_inode(filp); ++ int err; + + if (copy_from_user(&policy, (struct f2fs_encryption_policy __user *)arg, + sizeof(policy))) + return -EFAULT; + +- return f2fs_process_policy(&policy, inode); ++ mutex_lock(&inode->i_mutex); ++ ++ err = f2fs_process_policy(&policy, inode); ++ ++ mutex_unlock(&inode->i_mutex); ++ ++ return err; + #else + return -EOPNOTSUPP; + #endif +diff --git a/include/linux/dccp.h b/include/linux/dccp.h +index 61d042bbbf60..68449293c4b6 100644 +--- a/include/linux/dccp.h ++++ b/include/linux/dccp.h +@@ -163,6 +163,7 @@ struct dccp_request_sock { + __u64 dreq_isr; + __u64 dreq_gsr; + __be32 dreq_service; ++ spinlock_t dreq_lock; + struct list_head dreq_featneg; + __u32 dreq_timestamp_echo; + __u32 dreq_timestamp_time; +diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h +index f095155d8749..0dba4e4ed2be 100644 +--- a/include/uapi/linux/netlink.h ++++ b/include/uapi/linux/netlink.h +@@ -107,8 +107,10 @@ struct nlmsgerr { + #define NETLINK_PKTINFO 3 + #define NETLINK_BROADCAST_ERROR 4 + #define NETLINK_NO_ENOBUFS 5 ++#ifndef __KERNEL__ + #define NETLINK_RX_RING 6 + #define NETLINK_TX_RING 7 ++#endif + #define NETLINK_LISTEN_ALL_NSID 8 + #define NETLINK_LIST_MEMBERSHIPS 9 + #define NETLINK_CAP_ACK 10 +@@ -134,6 +136,7 @@ struct nl_mmap_hdr { + __u32 nm_gid; + }; + ++#ifndef __KERNEL__ + enum nl_mmap_status { + NL_MMAP_STATUS_UNUSED, + NL_MMAP_STATUS_RESERVED, +@@ -145,6 +148,7 @@ enum nl_mmap_status { + #define NL_MMAP_MSG_ALIGNMENT NLMSG_ALIGNTO + #define NL_MMAP_MSG_ALIGN(sz) __ALIGN_KERNEL(sz, NL_MMAP_MSG_ALIGNMENT) + #define NL_MMAP_HDRLEN NL_MMAP_MSG_ALIGN(sizeof(struct nl_mmap_hdr)) ++#endif + + #define NET_MAJOR 36 /* Major 36 is reserved for networking */ + +diff --git a/include/uapi/linux/netlink_diag.h b/include/uapi/linux/netlink_diag.h +index f2159d30d1f5..d79399394b46 100644 +--- a/include/uapi/linux/netlink_diag.h ++++ b/include/uapi/linux/netlink_diag.h +@@ -48,6 +48,8 @@ enum { + + #define NDIAG_SHOW_MEMINFO 0x00000001 /* show memory info of a socket */ + #define NDIAG_SHOW_GROUPS 0x00000002 /* show groups of a netlink socket */ ++#ifndef __KERNEL__ + #define NDIAG_SHOW_RING_CFG 0x00000004 /* show ring configuration */ ++#endif + + #endif +diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h +index d08c63f3dd6f..0c5d5dd61b6a 100644 +--- a/include/uapi/linux/packet_diag.h ++++ b/include/uapi/linux/packet_diag.h +@@ -64,7 +64,7 @@ struct packet_diag_mclist { + __u32 pdmc_count; + __u16 pdmc_type; + __u16 pdmc_alen; +- __u8 pdmc_addr[MAX_ADDR_LEN]; ++ __u8 pdmc_addr[32]; /* MAX_ADDR_LEN */ + }; + + struct packet_diag_ring { +diff --git a/kernel/futex.c b/kernel/futex.c +index 9d251dc3ec40..3057dabf726f 100644 +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -2690,7 +2690,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + { + struct hrtimer_sleeper timeout, *to = NULL; + struct rt_mutex_waiter rt_waiter; +- struct rt_mutex *pi_mutex = NULL; + struct futex_hash_bucket *hb; + union futex_key key2 = FUTEX_KEY_INIT; + struct futex_q q = futex_q_init; +@@ -2774,6 +2773,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + if (q.pi_state && (q.pi_state->owner != current)) { + spin_lock(q.lock_ptr); + ret = fixup_pi_state_owner(uaddr2, &q, current); ++ if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) ++ rt_mutex_unlock(&q.pi_state->pi_mutex); + /* + * Drop the reference to the pi state which + * the requeue_pi() code acquired for us. +@@ -2782,6 +2783,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + spin_unlock(q.lock_ptr); + } + } else { ++ struct rt_mutex *pi_mutex; ++ + /* + * We have been woken up by futex_unlock_pi(), a timeout, or a + * signal. futex_unlock_pi() will not destroy the lock_ptr nor +@@ -2805,18 +2808,19 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + if (res) + ret = (res < 0) ? res : 0; + ++ /* ++ * If fixup_pi_state_owner() faulted and was unable to handle ++ * the fault, unlock the rt_mutex and return the fault to ++ * userspace. ++ */ ++ if (ret && rt_mutex_owner(pi_mutex) == current) ++ rt_mutex_unlock(pi_mutex); ++ + /* Unqueue and drop the lock. */ + unqueue_me_pi(&q); + } + +- /* +- * If fixup_pi_state_owner() faulted and was unable to handle the +- * fault, unlock the rt_mutex and return the fault to userspace. +- */ +- if (ret == -EFAULT) { +- if (pi_mutex && rt_mutex_owner(pi_mutex) == current) +- rt_mutex_unlock(pi_mutex); +- } else if (ret == -EINTR) { ++ if (ret == -EINTR) { + /* + * We've already been requeued, but cannot restart by calling + * futex_lock_pi() directly. We could restart this syscall, but +diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c +index f7fba74108a9..e24754a0e052 100644 +--- a/net/bridge/br_input.c ++++ b/net/bridge/br_input.c +@@ -29,6 +29,7 @@ EXPORT_SYMBOL(br_should_route_hook); + static int + br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb) + { ++ br_drop_fake_rtable(skb); + return netif_receive_skb(skb); + } + +diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c +index 7ddbe7ec81d6..97fc19f001bf 100644 +--- a/net/bridge/br_netfilter_hooks.c ++++ b/net/bridge/br_netfilter_hooks.c +@@ -516,21 +516,6 @@ static unsigned int br_nf_pre_routing(void *priv, + } + + +-/* PF_BRIDGE/LOCAL_IN ************************************************/ +-/* The packet is locally destined, which requires a real +- * dst_entry, so detach the fake one. On the way up, the +- * packet would pass through PRE_ROUTING again (which already +- * took place when the packet entered the bridge), but we +- * register an IPv4 PRE_ROUTING 'sabotage' hook that will +- * prevent this from happening. */ +-static unsigned int br_nf_local_in(void *priv, +- struct sk_buff *skb, +- const struct nf_hook_state *state) +-{ +- br_drop_fake_rtable(skb); +- return NF_ACCEPT; +-} +- + /* PF_BRIDGE/FORWARD *************************************************/ + static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) + { +@@ -901,12 +886,6 @@ static struct nf_hook_ops br_nf_ops[] __read_mostly = { + .priority = NF_BR_PRI_BRNF, + }, + { +- .hook = br_nf_local_in, +- .pf = NFPROTO_BRIDGE, +- .hooknum = NF_BR_LOCAL_IN, +- .priority = NF_BR_PRI_BRNF, +- }, +- { + .hook = br_nf_forward_ip, + .pf = NFPROTO_BRIDGE, + .hooknum = NF_BR_FORWARD, +diff --git a/net/core/dev.c b/net/core/dev.c +index 08215a85c742..48399d8ce614 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -1677,27 +1677,54 @@ EXPORT_SYMBOL_GPL(net_dec_ingress_queue); + static struct static_key netstamp_needed __read_mostly; + #ifdef HAVE_JUMP_LABEL + static atomic_t netstamp_needed_deferred; ++static atomic_t netstamp_wanted; + static void netstamp_clear(struct work_struct *work) + { + int deferred = atomic_xchg(&netstamp_needed_deferred, 0); ++ int wanted; + +- while (deferred--) +- static_key_slow_dec(&netstamp_needed); ++ wanted = atomic_add_return(deferred, &netstamp_wanted); ++ if (wanted > 0) ++ static_key_enable(&netstamp_needed); ++ else ++ static_key_disable(&netstamp_needed); + } + static DECLARE_WORK(netstamp_work, netstamp_clear); + #endif + + void net_enable_timestamp(void) + { ++#ifdef HAVE_JUMP_LABEL ++ int wanted; ++ ++ while (1) { ++ wanted = atomic_read(&netstamp_wanted); ++ if (wanted <= 0) ++ break; ++ if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted) ++ return; ++ } ++ atomic_inc(&netstamp_needed_deferred); ++ schedule_work(&netstamp_work); ++#else + static_key_slow_inc(&netstamp_needed); ++#endif + } + EXPORT_SYMBOL(net_enable_timestamp); + + void net_disable_timestamp(void) + { + #ifdef HAVE_JUMP_LABEL +- /* net_disable_timestamp() can be called from non process context */ +- atomic_inc(&netstamp_needed_deferred); ++ int wanted; ++ ++ while (1) { ++ wanted = atomic_read(&netstamp_wanted); ++ if (wanted <= 1) ++ break; ++ if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted) ++ return; ++ } ++ atomic_dec(&netstamp_needed_deferred); + schedule_work(&netstamp_work); + #else + static_key_slow_dec(&netstamp_needed); +diff --git a/net/core/skbuff.c b/net/core/skbuff.c +index 4968b5ddea69..73dfd7729bc9 100644 +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -3678,13 +3678,14 @@ void skb_complete_tx_timestamp(struct sk_buff *skb, + if (!skb_may_tx_timestamp(sk, false)) + return; + +- /* take a reference to prevent skb_orphan() from freeing the socket */ +- sock_hold(sk); +- +- *skb_hwtstamps(skb) = *hwtstamps; +- __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); +- +- sock_put(sk); ++ /* Take a reference to prevent skb_orphan() from freeing the socket, ++ * but only if the socket refcount is not zero. ++ */ ++ if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) { ++ *skb_hwtstamps(skb) = *hwtstamps; ++ __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); ++ sock_put(sk); ++ } + } + EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); + +@@ -3735,7 +3736,7 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) + { + struct sock *sk = skb->sk; + struct sock_exterr_skb *serr; +- int err; ++ int err = 1; + + skb->wifi_acked_valid = 1; + skb->wifi_acked = acked; +@@ -3745,14 +3746,15 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) + serr->ee.ee_errno = ENOMSG; + serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; + +- /* take a reference to prevent skb_orphan() from freeing the socket */ +- sock_hold(sk); +- +- err = sock_queue_err_skb(sk, skb); ++ /* Take a reference to prevent skb_orphan() from freeing the socket, ++ * but only if the socket refcount is not zero. ++ */ ++ if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) { ++ err = sock_queue_err_skb(sk, skb); ++ sock_put(sk); ++ } + if (err) + kfree_skb(skb); +- +- sock_put(sk); + } + EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); + +diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c +index f053198e730c..5e3a7302f774 100644 +--- a/net/dccp/ccids/ccid2.c ++++ b/net/dccp/ccids/ccid2.c +@@ -749,6 +749,7 @@ static void ccid2_hc_tx_exit(struct sock *sk) + for (i = 0; i < hc->tx_seqbufc; i++) + kfree(hc->tx_seqbuf[i]); + hc->tx_seqbufc = 0; ++ dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks); + } + + static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) +diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c +index 0759f5b9180e..6467bf392e1b 100644 +--- a/net/dccp/ipv4.c ++++ b/net/dccp/ipv4.c +@@ -289,7 +289,8 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) + + switch (type) { + case ICMP_REDIRECT: +- dccp_do_redirect(skb, sk); ++ if (!sock_owned_by_user(sk)) ++ dccp_do_redirect(skb, sk); + goto out; + case ICMP_SOURCE_QUENCH: + /* Just silently ignore these. */ +diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c +index 27c4e81efa24..8113ad58fcb4 100644 +--- a/net/dccp/ipv6.c ++++ b/net/dccp/ipv6.c +@@ -122,10 +122,12 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, + np = inet6_sk(sk); + + if (type == NDISC_REDIRECT) { +- struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); ++ if (!sock_owned_by_user(sk)) { ++ struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); + +- if (dst) +- dst->ops->redirect(dst, sk, skb); ++ if (dst) ++ dst->ops->redirect(dst, sk, skb); ++ } + goto out; + } + +diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c +index 1994f8af646b..68eed344b471 100644 +--- a/net/dccp/minisocks.c ++++ b/net/dccp/minisocks.c +@@ -122,6 +122,7 @@ struct sock *dccp_create_openreq_child(const struct sock *sk, + /* It is still raw copy of parent, so invalidate + * destructor and make plain sk_free() */ + newsk->sk_destruct = NULL; ++ bh_unlock_sock(newsk); + sk_free(newsk); + return NULL; + } +@@ -145,6 +146,13 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, + struct dccp_request_sock *dreq = dccp_rsk(req); + bool own_req; + ++ /* TCP/DCCP listeners became lockless. ++ * DCCP stores complex state in its request_sock, so we need ++ * a protection for them, now this code runs without being protected ++ * by the parent (listener) lock. ++ */ ++ spin_lock_bh(&dreq->dreq_lock); ++ + /* Check for retransmitted REQUEST */ + if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) { + +@@ -159,7 +167,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, + inet_rtx_syn_ack(sk, req); + } + /* Network Duplicate, discard packet */ +- return NULL; ++ goto out; + } + + DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR; +@@ -185,20 +193,20 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, + + child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, + req, &own_req); +- if (!child) +- goto listen_overflow; +- +- return inet_csk_complete_hashdance(sk, child, req, own_req); ++ if (child) { ++ child = inet_csk_complete_hashdance(sk, child, req, own_req); ++ goto out; ++ } + +-listen_overflow: +- dccp_pr_debug("listen_overflow!\n"); + DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY; + drop: + if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET) + req->rsk_ops->send_reset(sk, skb); + + inet_csk_reqsk_queue_drop(sk, req); +- return NULL; ++out: ++ spin_unlock_bh(&dreq->dreq_lock); ++ return child; + } + + EXPORT_SYMBOL_GPL(dccp_check_req); +@@ -249,6 +257,7 @@ int dccp_reqsk_init(struct request_sock *req, + { + struct dccp_request_sock *dreq = dccp_rsk(req); + ++ spin_lock_init(&dreq->dreq_lock); + inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport; + inet_rsk(req)->ir_num = ntohs(dccp_hdr(skb)->dccph_dport); + inet_rsk(req)->acked = 0; +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index ef2f527a119b..da4d68d78590 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -1958,6 +1958,7 @@ int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr, + { + int res; + ++ tos &= IPTOS_RT_MASK; + rcu_read_lock(); + + /* Multicast recognition logic is moved from route cache to here. +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c +index b58a38eea059..198fc2314c82 100644 +--- a/net/ipv4/tcp_ipv4.c ++++ b/net/ipv4/tcp_ipv4.c +@@ -271,10 +271,13 @@ EXPORT_SYMBOL(tcp_v4_connect); + */ + void tcp_v4_mtu_reduced(struct sock *sk) + { +- struct dst_entry *dst; + struct inet_sock *inet = inet_sk(sk); +- u32 mtu = tcp_sk(sk)->mtu_info; ++ struct dst_entry *dst; ++ u32 mtu; + ++ if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) ++ return; ++ mtu = tcp_sk(sk)->mtu_info; + dst = inet_csk_update_pmtu(sk, mtu); + if (!dst) + return; +@@ -420,7 +423,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) + + switch (type) { + case ICMP_REDIRECT: +- do_redirect(icmp_skb, sk); ++ if (!sock_owned_by_user(sk)) ++ do_redirect(icmp_skb, sk); + goto out; + case ICMP_SOURCE_QUENCH: + /* Just silently ignore these. */ +diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c +index 193ba1fa8a9a..ebb34d0c5e80 100644 +--- a/net/ipv4/tcp_timer.c ++++ b/net/ipv4/tcp_timer.c +@@ -223,7 +223,8 @@ void tcp_delack_timer_handler(struct sock *sk) + + sk_mem_reclaim_partial(sk); + +- if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) ++ if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || ++ !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) + goto out; + + if (time_after(icsk->icsk_ack.timeout, jiffies)) { +@@ -504,7 +505,8 @@ void tcp_write_timer_handler(struct sock *sk) + struct inet_connection_sock *icsk = inet_csk(sk); + int event; + +- if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending) ++ if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || ++ !icsk->icsk_pending) + goto out; + + if (time_after(icsk->icsk_timeout, jiffies)) { +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c +index 34cf46d74554..85bf86458706 100644 +--- a/net/ipv6/ip6_fib.c ++++ b/net/ipv6/ip6_fib.c +@@ -903,6 +903,8 @@ add: + ins = &rt->dst.rt6_next; + iter = *ins; + while (iter) { ++ if (iter->rt6i_metric > rt->rt6i_metric) ++ break; + if (rt6_qualify_for_ecmp(iter)) { + *ins = iter->dst.rt6_next; + fib6_purge_rt(iter, fn, info->nl_net); +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c +index 58900c21e4e4..8004532fa882 100644 +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -742,13 +742,14 @@ slow_path: + * Fragment the datagram. + */ + +- *prevhdr = NEXTHDR_FRAGMENT; + troom = rt->dst.dev->needed_tailroom; + + /* + * Keep copying data until we run out. + */ + while (left > 0) { ++ u8 *fragnexthdr_offset; ++ + len = left; + /* IF: it doesn't fit, use 'mtu' - the data space left */ + if (len > mtu) +@@ -793,6 +794,10 @@ slow_path: + */ + skb_copy_from_linear_data(skb, skb_network_header(frag), hlen); + ++ fragnexthdr_offset = skb_network_header(frag); ++ fragnexthdr_offset += prevhdr - skb_network_header(skb); ++ *fragnexthdr_offset = NEXTHDR_FRAGMENT; ++ + /* + * Build fragment header. + */ +diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c +index 0a8610b33d79..bdcc4d9cedd3 100644 +--- a/net/ipv6/ip6_vti.c ++++ b/net/ipv6/ip6_vti.c +@@ -680,6 +680,10 @@ vti6_parm_to_user(struct ip6_tnl_parm2 *u, const struct __ip6_tnl_parm *p) + u->link = p->link; + u->i_key = p->i_key; + u->o_key = p->o_key; ++ if (u->i_key) ++ u->i_flags |= GRE_KEY; ++ if (u->o_key) ++ u->o_flags |= GRE_KEY; + u->proto = p->proto; + + memcpy(u->name, p->name, sizeof(u->name)); +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c +index 76a8c8057a23..1a63c4deef26 100644 +--- a/net/ipv6/tcp_ipv6.c ++++ b/net/ipv6/tcp_ipv6.c +@@ -376,10 +376,12 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, + np = inet6_sk(sk); + + if (type == NDISC_REDIRECT) { +- struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); ++ if (!sock_owned_by_user(sk)) { ++ struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); + +- if (dst) +- dst->ops->redirect(dst, sk, skb); ++ if (dst) ++ dst->ops->redirect(dst, sk, skb); ++ } + goto out; + } + +diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c +index 445b7cd0826a..48ab93842322 100644 +--- a/net/l2tp/l2tp_ip.c ++++ b/net/l2tp/l2tp_ip.c +@@ -383,7 +383,7 @@ static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb) + drop: + IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS); + kfree_skb(skb); +- return -1; ++ return 0; + } + + /* Userspace will call sendmsg() on the tunnel socket to send L2TP +diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c +index 881bc2072809..52cfc4478511 100644 +--- a/net/mpls/af_mpls.c ++++ b/net/mpls/af_mpls.c +@@ -1567,6 +1567,7 @@ static void mpls_net_exit(struct net *net) + for (index = 0; index < platform_labels; index++) { + struct mpls_route *rt = rtnl_dereference(platform_label[index]); + RCU_INIT_POINTER(platform_label[index], NULL); ++ mpls_notify_route(net, index, rt, NULL, NULL); + mpls_rt_free(rt); + } + rtnl_unlock(); +diff --git a/net/netlink/Kconfig b/net/netlink/Kconfig +index 2c5e95e9bfbd..5d6e8c05b3d4 100644 +--- a/net/netlink/Kconfig ++++ b/net/netlink/Kconfig +@@ -2,15 +2,6 @@ + # Netlink Sockets + # + +-config NETLINK_MMAP +- bool "NETLINK: mmaped IO" +- ---help--- +- This option enables support for memory mapped netlink IO. This +- reduces overhead by avoiding copying data between kernel- and +- userspace. +- +- If unsure, say N. +- + config NETLINK_DIAG + tristate "NETLINK: socket monitoring interface" + default n +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c +index 360700a2f46c..8e33019d8e7b 100644 +--- a/net/netlink/af_netlink.c ++++ b/net/netlink/af_netlink.c +@@ -225,7 +225,7 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb, + + dev_hold(dev); + +- if (netlink_skb_is_mmaped(skb) || is_vmalloc_addr(skb->head)) ++ if (is_vmalloc_addr(skb->head)) + nskb = netlink_to_full_skb(skb, GFP_ATOMIC); + else + nskb = skb_clone(skb, GFP_ATOMIC); +@@ -300,610 +300,8 @@ static void netlink_rcv_wake(struct sock *sk) + wake_up_interruptible(&nlk->wait); + } + +-#ifdef CONFIG_NETLINK_MMAP +-static bool netlink_rx_is_mmaped(struct sock *sk) +-{ +- return nlk_sk(sk)->rx_ring.pg_vec != NULL; +-} +- +-static bool netlink_tx_is_mmaped(struct sock *sk) +-{ +- return nlk_sk(sk)->tx_ring.pg_vec != NULL; +-} +- +-static __pure struct page *pgvec_to_page(const void *addr) +-{ +- if (is_vmalloc_addr(addr)) +- return vmalloc_to_page(addr); +- else +- return virt_to_page(addr); +-} +- +-static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len) +-{ +- unsigned int i; +- +- for (i = 0; i < len; i++) { +- if (pg_vec[i] != NULL) { +- if (is_vmalloc_addr(pg_vec[i])) +- vfree(pg_vec[i]); +- else +- free_pages((unsigned long)pg_vec[i], order); +- } +- } +- kfree(pg_vec); +-} +- +-static void *alloc_one_pg_vec_page(unsigned long order) +-{ +- void *buffer; +- gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO | +- __GFP_NOWARN | __GFP_NORETRY; +- +- buffer = (void *)__get_free_pages(gfp_flags, order); +- if (buffer != NULL) +- return buffer; +- +- buffer = vzalloc((1 << order) * PAGE_SIZE); +- if (buffer != NULL) +- return buffer; +- +- gfp_flags &= ~__GFP_NORETRY; +- return (void *)__get_free_pages(gfp_flags, order); +-} +- +-static void **alloc_pg_vec(struct netlink_sock *nlk, +- struct nl_mmap_req *req, unsigned int order) +-{ +- unsigned int block_nr = req->nm_block_nr; +- unsigned int i; +- void **pg_vec; +- +- pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL); +- if (pg_vec == NULL) +- return NULL; +- +- for (i = 0; i < block_nr; i++) { +- pg_vec[i] = alloc_one_pg_vec_page(order); +- if (pg_vec[i] == NULL) +- goto err1; +- } +- +- return pg_vec; +-err1: +- free_pg_vec(pg_vec, order, block_nr); +- return NULL; +-} +- +- +-static void +-__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec, +- unsigned int order) +-{ +- struct netlink_sock *nlk = nlk_sk(sk); +- struct sk_buff_head *queue; +- struct netlink_ring *ring; +- +- queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; +- ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring; +- +- spin_lock_bh(&queue->lock); +- +- ring->frame_max = req->nm_frame_nr - 1; +- ring->head = 0; +- ring->frame_size = req->nm_frame_size; +- ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE; +- +- swap(ring->pg_vec_len, req->nm_block_nr); +- swap(ring->pg_vec_order, order); +- swap(ring->pg_vec, pg_vec); +- +- __skb_queue_purge(queue); +- spin_unlock_bh(&queue->lock); +- +- WARN_ON(atomic_read(&nlk->mapped)); +- +- if (pg_vec) +- free_pg_vec(pg_vec, order, req->nm_block_nr); +-} +- +-static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, +- bool tx_ring) +-{ +- struct netlink_sock *nlk = nlk_sk(sk); +- struct netlink_ring *ring; +- void **pg_vec = NULL; +- unsigned int order = 0; +- +- ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring; +- +- if (atomic_read(&nlk->mapped)) +- return -EBUSY; +- if (atomic_read(&ring->pending)) +- return -EBUSY; +- +- if (req->nm_block_nr) { +- if (ring->pg_vec != NULL) +- return -EBUSY; +- +- if ((int)req->nm_block_size <= 0) +- return -EINVAL; +- if (!PAGE_ALIGNED(req->nm_block_size)) +- return -EINVAL; +- if (req->nm_frame_size < NL_MMAP_HDRLEN) +- return -EINVAL; +- if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT)) +- return -EINVAL; +- +- ring->frames_per_block = req->nm_block_size / +- req->nm_frame_size; +- if (ring->frames_per_block == 0) +- return -EINVAL; +- if (ring->frames_per_block * req->nm_block_nr != +- req->nm_frame_nr) +- return -EINVAL; +- +- order = get_order(req->nm_block_size); +- pg_vec = alloc_pg_vec(nlk, req, order); +- if (pg_vec == NULL) +- return -ENOMEM; +- } else { +- if (req->nm_frame_nr) +- return -EINVAL; +- } +- +- mutex_lock(&nlk->pg_vec_lock); +- if (atomic_read(&nlk->mapped) == 0) { +- __netlink_set_ring(sk, req, tx_ring, pg_vec, order); +- mutex_unlock(&nlk->pg_vec_lock); +- return 0; +- } +- +- mutex_unlock(&nlk->pg_vec_lock); +- +- if (pg_vec) +- free_pg_vec(pg_vec, order, req->nm_block_nr); +- +- return -EBUSY; +-} +- +-static void netlink_mm_open(struct vm_area_struct *vma) +-{ +- struct file *file = vma->vm_file; +- struct socket *sock = file->private_data; +- struct sock *sk = sock->sk; +- +- if (sk) +- atomic_inc(&nlk_sk(sk)->mapped); +-} +- +-static void netlink_mm_close(struct vm_area_struct *vma) +-{ +- struct file *file = vma->vm_file; +- struct socket *sock = file->private_data; +- struct sock *sk = sock->sk; +- +- if (sk) +- atomic_dec(&nlk_sk(sk)->mapped); +-} +- +-static const struct vm_operations_struct netlink_mmap_ops = { +- .open = netlink_mm_open, +- .close = netlink_mm_close, +-}; +- +-static int netlink_mmap(struct file *file, struct socket *sock, +- struct vm_area_struct *vma) +-{ +- struct sock *sk = sock->sk; +- struct netlink_sock *nlk = nlk_sk(sk); +- struct netlink_ring *ring; +- unsigned long start, size, expected; +- unsigned int i; +- int err = -EINVAL; +- +- if (vma->vm_pgoff) +- return -EINVAL; +- +- mutex_lock(&nlk->pg_vec_lock); +- +- expected = 0; +- for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) { +- if (ring->pg_vec == NULL) +- continue; +- expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE; +- } +- +- if (expected == 0) +- goto out; +- +- size = vma->vm_end - vma->vm_start; +- if (size != expected) +- goto out; +- +- start = vma->vm_start; +- for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) { +- if (ring->pg_vec == NULL) +- continue; +- +- for (i = 0; i < ring->pg_vec_len; i++) { +- struct page *page; +- void *kaddr = ring->pg_vec[i]; +- unsigned int pg_num; +- +- for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) { +- page = pgvec_to_page(kaddr); +- err = vm_insert_page(vma, start, page); +- if (err < 0) +- goto out; +- start += PAGE_SIZE; +- kaddr += PAGE_SIZE; +- } +- } +- } +- +- atomic_inc(&nlk->mapped); +- vma->vm_ops = &netlink_mmap_ops; +- err = 0; +-out: +- mutex_unlock(&nlk->pg_vec_lock); +- return err; +-} +- +-static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len) +-{ +-#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 +- struct page *p_start, *p_end; +- +- /* First page is flushed through netlink_{get,set}_status */ +- p_start = pgvec_to_page(hdr + PAGE_SIZE); +- p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1); +- while (p_start <= p_end) { +- flush_dcache_page(p_start); +- p_start++; +- } +-#endif +-} +- +-static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr) +-{ +- smp_rmb(); +- flush_dcache_page(pgvec_to_page(hdr)); +- return hdr->nm_status; +-} +- +-static void netlink_set_status(struct nl_mmap_hdr *hdr, +- enum nl_mmap_status status) +-{ +- smp_mb(); +- hdr->nm_status = status; +- flush_dcache_page(pgvec_to_page(hdr)); +-} +- +-static struct nl_mmap_hdr * +-__netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos) +-{ +- unsigned int pg_vec_pos, frame_off; +- +- pg_vec_pos = pos / ring->frames_per_block; +- frame_off = pos % ring->frames_per_block; +- +- return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size); +-} +- +-static struct nl_mmap_hdr * +-netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos, +- enum nl_mmap_status status) +-{ +- struct nl_mmap_hdr *hdr; +- +- hdr = __netlink_lookup_frame(ring, pos); +- if (netlink_get_status(hdr) != status) +- return NULL; +- +- return hdr; +-} +- +-static struct nl_mmap_hdr * +-netlink_current_frame(const struct netlink_ring *ring, +- enum nl_mmap_status status) +-{ +- return netlink_lookup_frame(ring, ring->head, status); +-} +- +-static void netlink_increment_head(struct netlink_ring *ring) +-{ +- ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0; +-} +- +-static void netlink_forward_ring(struct netlink_ring *ring) +-{ +- unsigned int head = ring->head; +- const struct nl_mmap_hdr *hdr; +- +- do { +- hdr = __netlink_lookup_frame(ring, ring->head); +- if (hdr->nm_status == NL_MMAP_STATUS_UNUSED) +- break; +- if (hdr->nm_status != NL_MMAP_STATUS_SKIP) +- break; +- netlink_increment_head(ring); +- } while (ring->head != head); +-} +- +-static bool netlink_has_valid_frame(struct netlink_ring *ring) +-{ +- unsigned int head = ring->head, pos = head; +- const struct nl_mmap_hdr *hdr; +- +- do { +- hdr = __netlink_lookup_frame(ring, pos); +- if (hdr->nm_status == NL_MMAP_STATUS_VALID) +- return true; +- pos = pos != 0 ? pos - 1 : ring->frame_max; +- } while (pos != head); +- +- return false; +-} +- +-static bool netlink_dump_space(struct netlink_sock *nlk) +-{ +- struct netlink_ring *ring = &nlk->rx_ring; +- struct nl_mmap_hdr *hdr; +- unsigned int n; +- +- hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED); +- if (hdr == NULL) +- return false; +- +- n = ring->head + ring->frame_max / 2; +- if (n > ring->frame_max) +- n -= ring->frame_max; +- +- hdr = __netlink_lookup_frame(ring, n); +- +- return hdr->nm_status == NL_MMAP_STATUS_UNUSED; +-} +- +-static unsigned int netlink_poll(struct file *file, struct socket *sock, +- poll_table *wait) +-{ +- struct sock *sk = sock->sk; +- struct netlink_sock *nlk = nlk_sk(sk); +- unsigned int mask; +- int err; +- +- if (nlk->rx_ring.pg_vec != NULL) { +- /* Memory mapped sockets don't call recvmsg(), so flow control +- * for dumps is performed here. A dump is allowed to continue +- * if at least half the ring is unused. +- */ +- while (nlk->cb_running && netlink_dump_space(nlk)) { +- err = netlink_dump(sk); +- if (err < 0) { +- sk->sk_err = -err; +- sk->sk_error_report(sk); +- break; +- } +- } +- netlink_rcv_wake(sk); +- } +- +- mask = datagram_poll(file, sock, wait); +- +- /* We could already have received frames in the normal receive +- * queue, that will show up as NL_MMAP_STATUS_COPY in the ring, +- * so if mask contains pollin/etc already, there's no point +- * walking the ring. +- */ +- if ((mask & (POLLIN | POLLRDNORM)) != (POLLIN | POLLRDNORM)) { +- spin_lock_bh(&sk->sk_receive_queue.lock); +- if (nlk->rx_ring.pg_vec) { +- if (netlink_has_valid_frame(&nlk->rx_ring)) +- mask |= POLLIN | POLLRDNORM; +- } +- spin_unlock_bh(&sk->sk_receive_queue.lock); +- } +- +- spin_lock_bh(&sk->sk_write_queue.lock); +- if (nlk->tx_ring.pg_vec) { +- if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED)) +- mask |= POLLOUT | POLLWRNORM; +- } +- spin_unlock_bh(&sk->sk_write_queue.lock); +- +- return mask; +-} +- +-static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb) +-{ +- return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN); +-} +- +-static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk, +- struct netlink_ring *ring, +- struct nl_mmap_hdr *hdr) +-{ +- unsigned int size; +- void *data; +- +- size = ring->frame_size - NL_MMAP_HDRLEN; +- data = (void *)hdr + NL_MMAP_HDRLEN; +- +- skb->head = data; +- skb->data = data; +- skb_reset_tail_pointer(skb); +- skb->end = skb->tail + size; +- skb->len = 0; +- +- skb->destructor = netlink_skb_destructor; +- NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED; +- NETLINK_CB(skb).sk = sk; +-} +- +-static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg, +- u32 dst_portid, u32 dst_group, +- struct scm_cookie *scm) +-{ +- struct netlink_sock *nlk = nlk_sk(sk); +- struct netlink_ring *ring; +- struct nl_mmap_hdr *hdr; +- struct sk_buff *skb; +- unsigned int maxlen; +- int err = 0, len = 0; +- +- mutex_lock(&nlk->pg_vec_lock); +- +- ring = &nlk->tx_ring; +- maxlen = ring->frame_size - NL_MMAP_HDRLEN; +- +- do { +- unsigned int nm_len; +- +- hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID); +- if (hdr == NULL) { +- if (!(msg->msg_flags & MSG_DONTWAIT) && +- atomic_read(&nlk->tx_ring.pending)) +- schedule(); +- continue; +- } +- +- nm_len = ACCESS_ONCE(hdr->nm_len); +- if (nm_len > maxlen) { +- err = -EINVAL; +- goto out; +- } +- +- netlink_frame_flush_dcache(hdr, nm_len); +- +- skb = alloc_skb(nm_len, GFP_KERNEL); +- if (skb == NULL) { +- err = -ENOBUFS; +- goto out; +- } +- __skb_put(skb, nm_len); +- memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len); +- netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED); +- +- netlink_increment_head(ring); +- +- NETLINK_CB(skb).portid = nlk->portid; +- NETLINK_CB(skb).dst_group = dst_group; +- NETLINK_CB(skb).creds = scm->creds; +- +- err = security_netlink_send(sk, skb); +- if (err) { +- kfree_skb(skb); +- goto out; +- } +- +- if (unlikely(dst_group)) { +- atomic_inc(&skb->users); +- netlink_broadcast(sk, skb, dst_portid, dst_group, +- GFP_KERNEL); +- } +- err = netlink_unicast(sk, skb, dst_portid, +- msg->msg_flags & MSG_DONTWAIT); +- if (err < 0) +- goto out; +- len += err; +- +- } while (hdr != NULL || +- (!(msg->msg_flags & MSG_DONTWAIT) && +- atomic_read(&nlk->tx_ring.pending))); +- +- if (len > 0) +- err = len; +-out: +- mutex_unlock(&nlk->pg_vec_lock); +- return err; +-} +- +-static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb) +-{ +- struct nl_mmap_hdr *hdr; +- +- hdr = netlink_mmap_hdr(skb); +- hdr->nm_len = skb->len; +- hdr->nm_group = NETLINK_CB(skb).dst_group; +- hdr->nm_pid = NETLINK_CB(skb).creds.pid; +- hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid); +- hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid); +- netlink_frame_flush_dcache(hdr, hdr->nm_len); +- netlink_set_status(hdr, NL_MMAP_STATUS_VALID); +- +- NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED; +- kfree_skb(skb); +-} +- +-static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb) +-{ +- struct netlink_sock *nlk = nlk_sk(sk); +- struct netlink_ring *ring = &nlk->rx_ring; +- struct nl_mmap_hdr *hdr; +- +- spin_lock_bh(&sk->sk_receive_queue.lock); +- hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED); +- if (hdr == NULL) { +- spin_unlock_bh(&sk->sk_receive_queue.lock); +- kfree_skb(skb); +- netlink_overrun(sk); +- return; +- } +- netlink_increment_head(ring); +- __skb_queue_tail(&sk->sk_receive_queue, skb); +- spin_unlock_bh(&sk->sk_receive_queue.lock); +- +- hdr->nm_len = skb->len; +- hdr->nm_group = NETLINK_CB(skb).dst_group; +- hdr->nm_pid = NETLINK_CB(skb).creds.pid; +- hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid); +- hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid); +- netlink_set_status(hdr, NL_MMAP_STATUS_COPY); +-} +- +-#else /* CONFIG_NETLINK_MMAP */ +-#define netlink_rx_is_mmaped(sk) false +-#define netlink_tx_is_mmaped(sk) false +-#define netlink_mmap sock_no_mmap +-#define netlink_poll datagram_poll +-#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, scm) 0 +-#endif /* CONFIG_NETLINK_MMAP */ +- + static void netlink_skb_destructor(struct sk_buff *skb) + { +-#ifdef CONFIG_NETLINK_MMAP +- struct nl_mmap_hdr *hdr; +- struct netlink_ring *ring; +- struct sock *sk; +- +- /* If a packet from the kernel to userspace was freed because of an +- * error without being delivered to userspace, the kernel must reset +- * the status. In the direction userspace to kernel, the status is +- * always reset here after the packet was processed and freed. +- */ +- if (netlink_skb_is_mmaped(skb)) { +- hdr = netlink_mmap_hdr(skb); +- sk = NETLINK_CB(skb).sk; +- +- if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) { +- netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED); +- ring = &nlk_sk(sk)->tx_ring; +- } else { +- if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) { +- hdr->nm_len = 0; +- netlink_set_status(hdr, NL_MMAP_STATUS_VALID); +- } +- ring = &nlk_sk(sk)->rx_ring; +- } +- +- WARN_ON(atomic_read(&ring->pending) == 0); +- atomic_dec(&ring->pending); +- sock_put(sk); +- +- skb->head = NULL; +- } +-#endif + if (is_vmalloc_addr(skb->head)) { + if (!skb->cloned || + !atomic_dec_return(&(skb_shinfo(skb)->dataref))) +@@ -936,18 +334,6 @@ static void netlink_sock_destruct(struct sock *sk) + } + + skb_queue_purge(&sk->sk_receive_queue); +-#ifdef CONFIG_NETLINK_MMAP +- if (1) { +- struct nl_mmap_req req; +- +- memset(&req, 0, sizeof(req)); +- if (nlk->rx_ring.pg_vec) +- __netlink_set_ring(sk, &req, false, NULL, 0); +- memset(&req, 0, sizeof(req)); +- if (nlk->tx_ring.pg_vec) +- __netlink_set_ring(sk, &req, true, NULL, 0); +- } +-#endif /* CONFIG_NETLINK_MMAP */ + + if (!sock_flag(sk, SOCK_DEAD)) { + printk(KERN_ERR "Freeing alive netlink socket %p\n", sk); +@@ -1201,9 +587,6 @@ static int __netlink_create(struct net *net, struct socket *sock, + mutex_init(nlk->cb_mutex); + } + init_waitqueue_head(&nlk->wait); +-#ifdef CONFIG_NETLINK_MMAP +- mutex_init(&nlk->pg_vec_lock); +-#endif + + sk->sk_destruct = netlink_sock_destruct; + sk->sk_protocol = protocol; +@@ -1745,8 +1128,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, + nlk = nlk_sk(sk); + + if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || +- test_bit(NETLINK_S_CONGESTED, &nlk->state)) && +- !netlink_skb_is_mmaped(skb)) { ++ test_bit(NETLINK_S_CONGESTED, &nlk->state))) { + DECLARE_WAITQUEUE(wait, current); + if (!*timeo) { + if (!ssk || netlink_is_kernel(ssk)) +@@ -1784,14 +1166,7 @@ static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb) + + netlink_deliver_tap(skb); + +-#ifdef CONFIG_NETLINK_MMAP +- if (netlink_skb_is_mmaped(skb)) +- netlink_queue_mmaped_skb(sk, skb); +- else if (netlink_rx_is_mmaped(sk)) +- netlink_ring_set_copied(sk, skb); +- else +-#endif /* CONFIG_NETLINK_MMAP */ +- skb_queue_tail(&sk->sk_receive_queue, skb); ++ skb_queue_tail(&sk->sk_receive_queue, skb); + sk->sk_data_ready(sk); + return len; + } +@@ -1815,9 +1190,6 @@ static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation) + int delta; + + WARN_ON(skb->sk != NULL); +- if (netlink_skb_is_mmaped(skb)) +- return skb; +- + delta = skb->end - skb->tail; + if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize) + return skb; +@@ -1897,71 +1269,6 @@ struct sk_buff *__netlink_alloc_skb(struct sock *ssk, unsigned int size, + unsigned int ldiff, u32 dst_portid, + gfp_t gfp_mask) + { +-#ifdef CONFIG_NETLINK_MMAP +- unsigned int maxlen, linear_size; +- struct sock *sk = NULL; +- struct sk_buff *skb; +- struct netlink_ring *ring; +- struct nl_mmap_hdr *hdr; +- +- sk = netlink_getsockbyportid(ssk, dst_portid); +- if (IS_ERR(sk)) +- goto out; +- +- ring = &nlk_sk(sk)->rx_ring; +- /* fast-path without atomic ops for common case: non-mmaped receiver */ +- if (ring->pg_vec == NULL) +- goto out_put; +- +- /* We need to account the full linear size needed as a ring +- * slot cannot have non-linear parts. +- */ +- linear_size = size + ldiff; +- if (ring->frame_size - NL_MMAP_HDRLEN < linear_size) +- goto out_put; +- +- skb = alloc_skb_head(gfp_mask); +- if (skb == NULL) +- goto err1; +- +- spin_lock_bh(&sk->sk_receive_queue.lock); +- /* check again under lock */ +- if (ring->pg_vec == NULL) +- goto out_free; +- +- /* check again under lock */ +- maxlen = ring->frame_size - NL_MMAP_HDRLEN; +- if (maxlen < linear_size) +- goto out_free; +- +- netlink_forward_ring(ring); +- hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED); +- if (hdr == NULL) +- goto err2; +- +- netlink_ring_setup_skb(skb, sk, ring, hdr); +- netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED); +- atomic_inc(&ring->pending); +- netlink_increment_head(ring); +- +- spin_unlock_bh(&sk->sk_receive_queue.lock); +- return skb; +- +-err2: +- kfree_skb(skb); +- spin_unlock_bh(&sk->sk_receive_queue.lock); +- netlink_overrun(sk); +-err1: +- sock_put(sk); +- return NULL; +- +-out_free: +- kfree_skb(skb); +- spin_unlock_bh(&sk->sk_receive_queue.lock); +-out_put: +- sock_put(sk); +-out: +-#endif + return alloc_skb(size, gfp_mask); + } + EXPORT_SYMBOL_GPL(__netlink_alloc_skb); +@@ -2242,8 +1549,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname, + if (level != SOL_NETLINK) + return -ENOPROTOOPT; + +- if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING && +- optlen >= sizeof(int) && ++ if (optlen >= sizeof(int) && + get_user(val, (unsigned int __user *)optval)) + return -EFAULT; + +@@ -2296,25 +1602,6 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname, + } + err = 0; + break; +-#ifdef CONFIG_NETLINK_MMAP +- case NETLINK_RX_RING: +- case NETLINK_TX_RING: { +- struct nl_mmap_req req; +- +- /* Rings might consume more memory than queue limits, require +- * CAP_NET_ADMIN. +- */ +- if (!capable(CAP_NET_ADMIN)) +- return -EPERM; +- if (optlen < sizeof(req)) +- return -EINVAL; +- if (copy_from_user(&req, optval, sizeof(req))) +- return -EFAULT; +- err = netlink_set_ring(sk, &req, +- optname == NETLINK_TX_RING); +- break; +- } +-#endif /* CONFIG_NETLINK_MMAP */ + case NETLINK_LISTEN_ALL_NSID: + if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST)) + return -EPERM; +@@ -2484,18 +1771,6 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) + smp_rmb(); + } + +- /* It's a really convoluted way for userland to ask for mmaped +- * sendmsg(), but that's what we've got... +- */ +- if (netlink_tx_is_mmaped(sk) && +- iter_is_iovec(&msg->msg_iter) && +- msg->msg_iter.nr_segs == 1 && +- msg->msg_iter.iov->iov_base == NULL) { +- err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, +- &scm); +- goto out; +- } +- + err = -EMSGSIZE; + if (len > sk->sk_sndbuf - 32) + goto out; +@@ -2812,8 +2087,7 @@ static int netlink_dump(struct sock *sk) + goto errout_skb; + } + +- if (!netlink_rx_is_mmaped(sk) && +- atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) ++ if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) + goto errout_skb; + + /* NLMSG_GOODSIZE is small to avoid high order allocations being +@@ -2902,16 +2176,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, + struct netlink_sock *nlk; + int ret; + +- /* Memory mapped dump requests need to be copied to avoid looping +- * on the pending state in netlink_mmap_sendmsg() while the CB hold +- * a reference to the skb. +- */ +- if (netlink_skb_is_mmaped(skb)) { +- skb = skb_copy(skb, GFP_KERNEL); +- if (skb == NULL) +- return -ENOBUFS; +- } else +- atomic_inc(&skb->users); ++ atomic_inc(&skb->users); + + sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid); + if (sk == NULL) { +@@ -3255,7 +2520,7 @@ static const struct proto_ops netlink_ops = { + .socketpair = sock_no_socketpair, + .accept = sock_no_accept, + .getname = netlink_getname, +- .poll = netlink_poll, ++ .poll = datagram_poll, + .ioctl = sock_no_ioctl, + .listen = sock_no_listen, + .shutdown = sock_no_shutdown, +@@ -3263,7 +2528,7 @@ static const struct proto_ops netlink_ops = { + .getsockopt = netlink_getsockopt, + .sendmsg = netlink_sendmsg, + .recvmsg = netlink_recvmsg, +- .mmap = netlink_mmap, ++ .mmap = sock_no_mmap, + .sendpage = sock_no_sendpage, + }; + +diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h +index df32cb92d9fc..ea4600aea6b0 100644 +--- a/net/netlink/af_netlink.h ++++ b/net/netlink/af_netlink.h +@@ -45,12 +45,6 @@ struct netlink_sock { + int (*netlink_bind)(struct net *net, int group); + void (*netlink_unbind)(struct net *net, int group); + struct module *module; +-#ifdef CONFIG_NETLINK_MMAP +- struct mutex pg_vec_lock; +- struct netlink_ring rx_ring; +- struct netlink_ring tx_ring; +- atomic_t mapped; +-#endif /* CONFIG_NETLINK_MMAP */ + + struct rhash_head node; + struct rcu_head rcu; +@@ -62,15 +56,6 @@ static inline struct netlink_sock *nlk_sk(struct sock *sk) + return container_of(sk, struct netlink_sock, sk); + } + +-static inline bool netlink_skb_is_mmaped(const struct sk_buff *skb) +-{ +-#ifdef CONFIG_NETLINK_MMAP +- return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED; +-#else +- return false; +-#endif /* CONFIG_NETLINK_MMAP */ +-} +- + struct netlink_table { + struct rhashtable hash; + struct hlist_head mc_list; +diff --git a/net/netlink/diag.c b/net/netlink/diag.c +index 3ee63a3cff30..8dd836a8dd60 100644 +--- a/net/netlink/diag.c ++++ b/net/netlink/diag.c +@@ -8,41 +8,6 @@ + + #include "af_netlink.h" + +-#ifdef CONFIG_NETLINK_MMAP +-static int sk_diag_put_ring(struct netlink_ring *ring, int nl_type, +- struct sk_buff *nlskb) +-{ +- struct netlink_diag_ring ndr; +- +- ndr.ndr_block_size = ring->pg_vec_pages << PAGE_SHIFT; +- ndr.ndr_block_nr = ring->pg_vec_len; +- ndr.ndr_frame_size = ring->frame_size; +- ndr.ndr_frame_nr = ring->frame_max + 1; +- +- return nla_put(nlskb, nl_type, sizeof(ndr), &ndr); +-} +- +-static int sk_diag_put_rings_cfg(struct sock *sk, struct sk_buff *nlskb) +-{ +- struct netlink_sock *nlk = nlk_sk(sk); +- int ret; +- +- mutex_lock(&nlk->pg_vec_lock); +- ret = sk_diag_put_ring(&nlk->rx_ring, NETLINK_DIAG_RX_RING, nlskb); +- if (!ret) +- ret = sk_diag_put_ring(&nlk->tx_ring, NETLINK_DIAG_TX_RING, +- nlskb); +- mutex_unlock(&nlk->pg_vec_lock); +- +- return ret; +-} +-#else +-static int sk_diag_put_rings_cfg(struct sock *sk, struct sk_buff *nlskb) +-{ +- return 0; +-} +-#endif +- + static int sk_diag_dump_groups(struct sock *sk, struct sk_buff *nlskb) + { + struct netlink_sock *nlk = nlk_sk(sk); +@@ -87,10 +52,6 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, + sock_diag_put_meminfo(sk, skb, NETLINK_DIAG_MEMINFO)) + goto out_nlmsg_trim; + +- if ((req->ndiag_show & NDIAG_SHOW_RING_CFG) && +- sk_diag_put_rings_cfg(sk, skb)) +- goto out_nlmsg_trim; +- + nlmsg_end(skb, nlh); + return 0; + +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c +index d805cd577a60..3975ac809934 100644 +--- a/net/packet/af_packet.c ++++ b/net/packet/af_packet.c +@@ -3021,7 +3021,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, + int addr_len) + { + struct sock *sk = sock->sk; +- char name[15]; ++ char name[sizeof(uaddr->sa_data) + 1]; + + /* + * Check legality +@@ -3029,7 +3029,11 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, + + if (addr_len != sizeof(struct sockaddr)) + return -EINVAL; +- strlcpy(name, uaddr->sa_data, sizeof(name)); ++ /* uaddr->sa_data comes from the userspace, it's not guaranteed to be ++ * zero-terminated. ++ */ ++ memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data)); ++ name[sizeof(uaddr->sa_data)] = 0; + + return packet_do_bind(sk, name, 0, pkt_sk(sk)->num); + } +diff --git a/net/sched/act_api.c b/net/sched/act_api.c +index 06e7c4a37245..694a06f1e0d5 100644 +--- a/net/sched/act_api.c ++++ b/net/sched/act_api.c +@@ -820,10 +820,8 @@ static int tca_action_flush(struct net *net, struct nlattr *nla, + goto out_module_put; + + err = a.ops->walk(skb, &dcb, RTM_DELACTION, &a); +- if (err < 0) ++ if (err <= 0) + goto out_module_put; +- if (err == 0) +- goto noflush_out; + + nla_nest_end(skb, nest); + +@@ -840,7 +838,6 @@ static int tca_action_flush(struct net *net, struct nlattr *nla, + out_module_put: + module_put(a.ops->owner); + err_out: +-noflush_out: + kfree_skb(skb); + return err; + } +diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c +index bb41699c6c49..7ecb14f3db54 100644 +--- a/net/sched/act_connmark.c ++++ b/net/sched/act_connmark.c +@@ -109,6 +109,9 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla, + if (ret < 0) + return ret; + ++ if (!tb[TCA_CONNMARK_PARMS]) ++ return -EINVAL; ++ + parm = nla_data(tb[TCA_CONNMARK_PARMS]); + + if (!tcf_hash_check(parm->index, a, bind)) { diff --git a/patch/kernel/mvebu64-default/03-patch-4.4.56-57.patch b/patch/kernel/mvebu64-default/03-patch-4.4.56-57.patch new file mode 100644 index 000000000..d28c5d6c4 --- /dev/null +++ b/patch/kernel/mvebu64-default/03-patch-4.4.56-57.patch @@ -0,0 +1,1172 @@ +diff --git a/Makefile b/Makefile +index cf9303a5d621..841675e63a38 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 56 ++SUBLEVEL = 57 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S +index 861e72109df2..f080abfc2f83 100644 +--- a/arch/powerpc/boot/zImage.lds.S ++++ b/arch/powerpc/boot/zImage.lds.S +@@ -68,6 +68,7 @@ SECTIONS + } + + #ifdef CONFIG_PPC64_BOOT_WRAPPER ++ . = ALIGN(256); + .got : + { + __toc_start = .; +diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c +index 5cc2e7af3a7b..b379146de55b 100644 +--- a/arch/powerpc/kvm/emulate.c ++++ b/arch/powerpc/kvm/emulate.c +@@ -302,7 +302,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) + advance = 0; + printk(KERN_ERR "Couldn't emulate instruction 0x%08x " + "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); +- kvmppc_core_queue_program(vcpu, 0); + } + } + +diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c +index 3a40f718baef..4004e03267cd 100644 +--- a/arch/s390/pci/pci_dma.c ++++ b/arch/s390/pci/pci_dma.c +@@ -455,7 +455,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev) + zdev->dma_table = dma_alloc_cpu_table(); + if (!zdev->dma_table) { + rc = -ENOMEM; +- goto out_clean; ++ goto out; + } + + /* +@@ -475,18 +475,22 @@ int zpci_dma_init_device(struct zpci_dev *zdev) + zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8); + if (!zdev->iommu_bitmap) { + rc = -ENOMEM; +- goto out_reg; ++ goto free_dma_table; + } + + rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, + (u64) zdev->dma_table); + if (rc) +- goto out_reg; +- return 0; ++ goto free_bitmap; + +-out_reg: ++ return 0; ++free_bitmap: ++ vfree(zdev->iommu_bitmap); ++ zdev->iommu_bitmap = NULL; ++free_dma_table: + dma_free_cpu_table(zdev->dma_table); +-out_clean: ++ zdev->dma_table = NULL; ++out: + return rc; + } + +diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c +index 440df0c7a2ee..a69321a77783 100644 +--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c ++++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c +@@ -219,6 +219,29 @@ static int ghash_async_final(struct ahash_request *req) + } + } + ++static int ghash_async_import(struct ahash_request *req, const void *in) ++{ ++ struct ahash_request *cryptd_req = ahash_request_ctx(req); ++ struct shash_desc *desc = cryptd_shash_desc(cryptd_req); ++ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); ++ ++ ghash_async_init(req); ++ memcpy(dctx, in, sizeof(*dctx)); ++ return 0; ++ ++} ++ ++static int ghash_async_export(struct ahash_request *req, void *out) ++{ ++ struct ahash_request *cryptd_req = ahash_request_ctx(req); ++ struct shash_desc *desc = cryptd_shash_desc(cryptd_req); ++ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); ++ ++ memcpy(out, dctx, sizeof(*dctx)); ++ return 0; ++ ++} ++ + static int ghash_async_digest(struct ahash_request *req) + { + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); +@@ -288,8 +311,11 @@ static struct ahash_alg ghash_async_alg = { + .final = ghash_async_final, + .setkey = ghash_async_setkey, + .digest = ghash_async_digest, ++ .export = ghash_async_export, ++ .import = ghash_async_import, + .halg = { + .digestsize = GHASH_DIGEST_SIZE, ++ .statesize = sizeof(struct ghash_desc_ctx), + .base = { + .cra_name = "ghash", + .cra_driver_name = "ghash-clmulni", +diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c +index 9e2ba5c6e1dd..f42e78de1e10 100644 +--- a/arch/x86/xen/spinlock.c ++++ b/arch/x86/xen/spinlock.c +@@ -27,6 +27,12 @@ static bool xen_pvspin = true; + + static void xen_qlock_kick(int cpu) + { ++ int irq = per_cpu(lock_kicker_irq, cpu); ++ ++ /* Don't kick if the target's kicker interrupt is not initialized. */ ++ if (irq == -1) ++ return; ++ + xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); + } + +diff --git a/crypto/cryptd.c b/crypto/cryptd.c +index e7aa904cb20b..26a504db3f53 100644 +--- a/crypto/cryptd.c ++++ b/crypto/cryptd.c +@@ -642,6 +642,7 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, + inst->alg.halg.base.cra_flags = type; + + inst->alg.halg.digestsize = salg->digestsize; ++ inst->alg.halg.statesize = salg->statesize; + inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); + + inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; +diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c +index a0ceb41d5ccc..b4f3930266b1 100644 +--- a/crypto/mcryptd.c ++++ b/crypto/mcryptd.c +@@ -531,6 +531,7 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, + inst->alg.halg.base.cra_flags = type; + + inst->alg.halg.digestsize = salg->digestsize; ++ inst->alg.halg.statesize = salg->statesize; + inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx); + + inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm; +diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c +index 5fdac394207a..549cdbed7b0e 100644 +--- a/drivers/acpi/acpi_video.c ++++ b/drivers/acpi/acpi_video.c +@@ -1211,6 +1211,9 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video) + union acpi_object *dod = NULL; + union acpi_object *obj; + ++ if (!video->cap._DOD) ++ return AE_NOT_EXIST; ++ + status = acpi_evaluate_object(video->device->handle, "_DOD", NULL, &buffer); + if (!ACPI_SUCCESS(status)) { + ACPI_EXCEPTION((AE_INFO, status, "Evaluating _DOD")); +diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c +index 65f7eecc45b0..f10a107614b4 100644 +--- a/drivers/char/tpm/tpm_tis.c ++++ b/drivers/char/tpm/tpm_tis.c +@@ -401,7 +401,7 @@ static void disable_interrupts(struct tpm_chip *chip) + iowrite32(intmask, + chip->vendor.iobase + + TPM_INT_ENABLE(chip->vendor.locality)); +- free_irq(chip->vendor.irq, chip); ++ devm_free_irq(chip->pdev, chip->vendor.irq, chip); + chip->vendor.irq = 0; + } + +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c +index 8412ce5f93a7..86fa9fdc8323 100644 +--- a/drivers/cpufreq/cpufreq.c ++++ b/drivers/cpufreq/cpufreq.c +@@ -626,9 +626,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, + char *buf) + { + unsigned int cur_freq = __cpufreq_get(policy); +- if (!cur_freq) +- return sprintf(buf, ""); +- return sprintf(buf, "%u\n", cur_freq); ++ ++ if (cur_freq) ++ return sprintf(buf, "%u\n", cur_freq); ++ ++ return sprintf(buf, "\n"); + } + + /** +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +index 7c42ff670080..a0924330d125 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +@@ -25,6 +25,7 @@ + * Alex Deucher + * Jerome Glisse + */ ++#include + #include + #include + #include +diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c +index 57c191798699..ddbf7e7e0d98 100644 +--- a/drivers/hv/hv.c ++++ b/drivers/hv/hv.c +@@ -274,7 +274,7 @@ cleanup: + * + * This routine is called normally during driver unloading or exiting. + */ +-void hv_cleanup(void) ++void hv_cleanup(bool crash) + { + union hv_x64_msr_hypercall_contents hypercall_msr; + +@@ -284,7 +284,8 @@ void hv_cleanup(void) + if (hv_context.hypercall_page) { + hypercall_msr.as_uint64 = 0; + wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); +- vfree(hv_context.hypercall_page); ++ if (!crash) ++ vfree(hv_context.hypercall_page); + hv_context.hypercall_page = NULL; + } + +@@ -304,7 +305,8 @@ void hv_cleanup(void) + + hypercall_msr.as_uint64 = 0; + wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64); +- vfree(hv_context.tsc_page); ++ if (!crash) ++ vfree(hv_context.tsc_page); + hv_context.tsc_page = NULL; + } + #endif +diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c +index b853b4b083bd..43af91362be5 100644 +--- a/drivers/hv/hv_balloon.c ++++ b/drivers/hv/hv_balloon.c +@@ -714,7 +714,7 @@ static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt) + * If the pfn range we are dealing with is not in the current + * "hot add block", move on. + */ +- if ((start_pfn >= has->end_pfn)) ++ if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn) + continue; + /* + * If the current hot add-request extends beyond +@@ -768,7 +768,7 @@ static unsigned long handle_pg_range(unsigned long pg_start, + * If the pfn range we are dealing with is not in the current + * "hot add block", move on. + */ +- if ((start_pfn >= has->end_pfn)) ++ if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn) + continue; + + old_covered_state = has->covered_end_pfn; +diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h +index 12156db2e88e..75e383e6d03d 100644 +--- a/drivers/hv/hyperv_vmbus.h ++++ b/drivers/hv/hyperv_vmbus.h +@@ -581,7 +581,7 @@ struct hv_ring_buffer_debug_info { + + extern int hv_init(void); + +-extern void hv_cleanup(void); ++extern void hv_cleanup(bool crash); + + extern int hv_post_message(union hv_connection_id connection_id, + enum hv_message_type message_type, +diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c +index 509ed9731630..802dcb409030 100644 +--- a/drivers/hv/vmbus_drv.c ++++ b/drivers/hv/vmbus_drv.c +@@ -889,7 +889,7 @@ err_alloc: + bus_unregister(&hv_bus); + + err_cleanup: +- hv_cleanup(); ++ hv_cleanup(false); + + return ret; + } +@@ -1254,7 +1254,7 @@ static void hv_kexec_handler(void) + vmbus_initiate_unload(); + for_each_online_cpu(cpu) + smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1); +- hv_cleanup(); ++ hv_cleanup(false); + }; + + static void hv_crash_handler(struct pt_regs *regs) +@@ -1266,7 +1266,7 @@ static void hv_crash_handler(struct pt_regs *regs) + * for kdump. + */ + hv_synic_cleanup(NULL); +- hv_cleanup(); ++ hv_cleanup(true); + }; + + static int __init hv_acpi_init(void) +@@ -1330,7 +1330,7 @@ static void __exit vmbus_exit(void) + &hyperv_panic_block); + } + bus_unregister(&hv_bus); +- hv_cleanup(); ++ hv_cleanup(false); + for_each_online_cpu(cpu) { + tasklet_kill(hv_context.event_dpc[cpu]); + smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1); +diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c +index aecec6d32463..7f1c625b08ec 100644 +--- a/drivers/isdn/gigaset/bas-gigaset.c ++++ b/drivers/isdn/gigaset/bas-gigaset.c +@@ -2317,6 +2317,9 @@ static int gigaset_probe(struct usb_interface *interface, + return -ENODEV; + } + ++ if (hostif->desc.bNumEndpoints < 1) ++ return -ENODEV; ++ + dev_info(&udev->dev, + "%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n", + __func__, le16_to_cpu(udev->descriptor.idVendor), +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c +index ebb0dd612ebd..122af340a531 100644 +--- a/drivers/md/raid10.c ++++ b/drivers/md/raid10.c +@@ -1477,7 +1477,25 @@ static void make_request(struct mddev *mddev, struct bio *bio) + split = bio; + } + ++ /* ++ * If a bio is splitted, the first part of bio will pass ++ * barrier but the bio is queued in current->bio_list (see ++ * generic_make_request). If there is a raise_barrier() called ++ * here, the second part of bio can't pass barrier. But since ++ * the first part bio isn't dispatched to underlaying disks ++ * yet, the barrier is never released, hence raise_barrier will ++ * alays wait. We have a deadlock. ++ * Note, this only happens in read path. For write path, the ++ * first part of bio is dispatched in a schedule() call ++ * (because of blk plug) or offloaded to raid10d. ++ * Quitting from the function immediately can change the bio ++ * order queued in bio_list and avoid the deadlock. ++ */ + __make_request(mddev, split); ++ if (split != bio && bio_data_dir(bio) == READ) { ++ generic_make_request(bio); ++ break; ++ } + } while (split != bio); + + /* In case raid10d snuck in to freeze_array */ +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c +index e8a09ff9e724..c8a7802d2953 100644 +--- a/drivers/net/hyperv/netvsc_drv.c ++++ b/drivers/net/hyperv/netvsc_drv.c +@@ -197,65 +197,6 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size, + return ppi; + } + +-union sub_key { +- u64 k; +- struct { +- u8 pad[3]; +- u8 kb; +- u32 ka; +- }; +-}; +- +-/* Toeplitz hash function +- * data: network byte order +- * return: host byte order +- */ +-static u32 comp_hash(u8 *key, int klen, void *data, int dlen) +-{ +- union sub_key subk; +- int k_next = 4; +- u8 dt; +- int i, j; +- u32 ret = 0; +- +- subk.k = 0; +- subk.ka = ntohl(*(u32 *)key); +- +- for (i = 0; i < dlen; i++) { +- subk.kb = key[k_next]; +- k_next = (k_next + 1) % klen; +- dt = ((u8 *)data)[i]; +- for (j = 0; j < 8; j++) { +- if (dt & 0x80) +- ret ^= subk.ka; +- dt <<= 1; +- subk.k <<= 1; +- } +- } +- +- return ret; +-} +- +-static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb) +-{ +- struct flow_keys flow; +- int data_len; +- +- if (!skb_flow_dissect_flow_keys(skb, &flow, 0) || +- !(flow.basic.n_proto == htons(ETH_P_IP) || +- flow.basic.n_proto == htons(ETH_P_IPV6))) +- return false; +- +- if (flow.basic.ip_proto == IPPROTO_TCP) +- data_len = 12; +- else +- data_len = 8; +- +- *hash = comp_hash(netvsc_hash_key, HASH_KEYLEN, &flow, data_len); +- +- return true; +-} +- + static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) + { +@@ -268,11 +209,9 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, + if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1) + return 0; + +- if (netvsc_set_hash(&hash, skb)) { +- q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] % +- ndev->real_num_tx_queues; +- skb_set_hash(skb, hash, PKT_HASH_TYPE_L3); +- } ++ hash = skb_get_hash(skb); ++ q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] % ++ ndev->real_num_tx_queues; + + return q_idx; + } +diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c +index a009ae34c5ef..930f0f25c1ce 100644 +--- a/drivers/pinctrl/intel/pinctrl-cherryview.c ++++ b/drivers/pinctrl/intel/pinctrl-cherryview.c +@@ -1466,12 +1466,11 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) + offset += range->npins; + } + +- /* Mask and clear all interrupts */ +- chv_writel(0, pctrl->regs + CHV_INTMASK); ++ /* Clear all interrupts */ + chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); + + ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0, +- handle_simple_irq, IRQ_TYPE_NONE); ++ handle_bad_irq, IRQ_TYPE_NONE); + if (ret) { + dev_err(pctrl->dev, "failed to add IRQ chip\n"); + goto fail; +diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h +index 5ada9268a450..a8ac4c0a1493 100644 +--- a/drivers/scsi/cxlflash/common.h ++++ b/drivers/scsi/cxlflash/common.h +@@ -34,7 +34,6 @@ extern const struct file_operations cxlflash_cxl_fops; + sectors + */ + +-#define NUM_RRQ_ENTRY 16 /* for master issued cmds */ + #define MAX_RHT_PER_CONTEXT (PAGE_SIZE / sizeof(struct sisl_rht_entry)) + + /* AFU command retry limit */ +@@ -48,9 +47,12 @@ extern const struct file_operations cxlflash_cxl_fops; + index derivation + */ + +-#define CXLFLASH_MAX_CMDS 16 ++#define CXLFLASH_MAX_CMDS 256 + #define CXLFLASH_MAX_CMDS_PER_LUN CXLFLASH_MAX_CMDS + ++/* RRQ for master issued cmds */ ++#define NUM_RRQ_ENTRY CXLFLASH_MAX_CMDS ++ + + static inline void check_sizes(void) + { +@@ -149,7 +151,7 @@ struct afu_cmd { + struct afu { + /* Stuff requiring alignment go first. */ + +- u64 rrq_entry[NUM_RRQ_ENTRY]; /* 128B RRQ */ ++ u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */ + /* + * Command & data for AFU commands. + */ +diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c +index c86847c68448..2882bcac918a 100644 +--- a/drivers/scsi/cxlflash/main.c ++++ b/drivers/scsi/cxlflash/main.c +@@ -2305,7 +2305,7 @@ static struct scsi_host_template driver_template = { + .eh_device_reset_handler = cxlflash_eh_device_reset_handler, + .eh_host_reset_handler = cxlflash_eh_host_reset_handler, + .change_queue_depth = cxlflash_change_queue_depth, +- .cmd_per_lun = 16, ++ .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN, + .can_queue = CXLFLASH_MAX_CMDS, + .this_id = -1, + .sg_tablesize = SG_NONE, /* No scatter gather support */ +diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c +index 6bffd91b973a..c1ccf1ee99ea 100644 +--- a/drivers/scsi/libiscsi.c ++++ b/drivers/scsi/libiscsi.c +@@ -560,8 +560,12 @@ static void iscsi_complete_task(struct iscsi_task *task, int state) + WARN_ON_ONCE(task->state == ISCSI_TASK_FREE); + task->state = state; + +- if (!list_empty(&task->running)) ++ spin_lock_bh(&conn->taskqueuelock); ++ if (!list_empty(&task->running)) { ++ pr_debug_once("%s while task on list", __func__); + list_del_init(&task->running); ++ } ++ spin_unlock_bh(&conn->taskqueuelock); + + if (conn->task == task) + conn->task = NULL; +@@ -783,7 +787,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, + if (session->tt->xmit_task(task)) + goto free_task; + } else { ++ spin_lock_bh(&conn->taskqueuelock); + list_add_tail(&task->running, &conn->mgmtqueue); ++ spin_unlock_bh(&conn->taskqueuelock); + iscsi_conn_queue_work(conn); + } + +@@ -1474,8 +1480,10 @@ void iscsi_requeue_task(struct iscsi_task *task) + * this may be on the requeue list already if the xmit_task callout + * is handling the r2ts while we are adding new ones + */ ++ spin_lock_bh(&conn->taskqueuelock); + if (list_empty(&task->running)) + list_add_tail(&task->running, &conn->requeue); ++ spin_unlock_bh(&conn->taskqueuelock); + iscsi_conn_queue_work(conn); + } + EXPORT_SYMBOL_GPL(iscsi_requeue_task); +@@ -1512,22 +1520,26 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) + * only have one nop-out as a ping from us and targets should not + * overflow us with nop-ins + */ ++ spin_lock_bh(&conn->taskqueuelock); + check_mgmt: + while (!list_empty(&conn->mgmtqueue)) { + conn->task = list_entry(conn->mgmtqueue.next, + struct iscsi_task, running); + list_del_init(&conn->task->running); ++ spin_unlock_bh(&conn->taskqueuelock); + if (iscsi_prep_mgmt_task(conn, conn->task)) { + /* regular RX path uses back_lock */ + spin_lock_bh(&conn->session->back_lock); + __iscsi_put_task(conn->task); + spin_unlock_bh(&conn->session->back_lock); + conn->task = NULL; ++ spin_lock_bh(&conn->taskqueuelock); + continue; + } + rc = iscsi_xmit_task(conn); + if (rc) + goto done; ++ spin_lock_bh(&conn->taskqueuelock); + } + + /* process pending command queue */ +@@ -1535,19 +1547,24 @@ check_mgmt: + conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task, + running); + list_del_init(&conn->task->running); ++ spin_unlock_bh(&conn->taskqueuelock); + if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { + fail_scsi_task(conn->task, DID_IMM_RETRY); ++ spin_lock_bh(&conn->taskqueuelock); + continue; + } + rc = iscsi_prep_scsi_cmd_pdu(conn->task); + if (rc) { + if (rc == -ENOMEM || rc == -EACCES) { ++ spin_lock_bh(&conn->taskqueuelock); + list_add_tail(&conn->task->running, + &conn->cmdqueue); + conn->task = NULL; ++ spin_unlock_bh(&conn->taskqueuelock); + goto done; + } else + fail_scsi_task(conn->task, DID_ABORT); ++ spin_lock_bh(&conn->taskqueuelock); + continue; + } + rc = iscsi_xmit_task(conn); +@@ -1558,6 +1575,7 @@ check_mgmt: + * we need to check the mgmt queue for nops that need to + * be sent to aviod starvation + */ ++ spin_lock_bh(&conn->taskqueuelock); + if (!list_empty(&conn->mgmtqueue)) + goto check_mgmt; + } +@@ -1577,12 +1595,15 @@ check_mgmt: + conn->task = task; + list_del_init(&conn->task->running); + conn->task->state = ISCSI_TASK_RUNNING; ++ spin_unlock_bh(&conn->taskqueuelock); + rc = iscsi_xmit_task(conn); + if (rc) + goto done; ++ spin_lock_bh(&conn->taskqueuelock); + if (!list_empty(&conn->mgmtqueue)) + goto check_mgmt; + } ++ spin_unlock_bh(&conn->taskqueuelock); + spin_unlock_bh(&conn->session->frwd_lock); + return -ENODATA; + +@@ -1738,7 +1759,9 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) + goto prepd_reject; + } + } else { ++ spin_lock_bh(&conn->taskqueuelock); + list_add_tail(&task->running, &conn->cmdqueue); ++ spin_unlock_bh(&conn->taskqueuelock); + iscsi_conn_queue_work(conn); + } + +@@ -2900,6 +2923,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size, + INIT_LIST_HEAD(&conn->mgmtqueue); + INIT_LIST_HEAD(&conn->cmdqueue); + INIT_LIST_HEAD(&conn->requeue); ++ spin_lock_init(&conn->taskqueuelock); + INIT_WORK(&conn->xmitwork, iscsi_xmitworker); + + /* allocate login_task used for the login/text sequences */ +diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c +index c14ab6c3ae40..60c21093f865 100644 +--- a/drivers/scsi/lpfc/lpfc_init.c ++++ b/drivers/scsi/lpfc/lpfc_init.c +@@ -11387,6 +11387,7 @@ static struct pci_driver lpfc_driver = { + .id_table = lpfc_id_table, + .probe = lpfc_pci_probe_one, + .remove = lpfc_pci_remove_one, ++ .shutdown = lpfc_pci_remove_one, + .suspend = lpfc_pci_suspend_one, + .resume = lpfc_pci_resume_one, + .err_handler = &lpfc_err_handler, +diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c +index de18790eb21c..d72a4058fd08 100644 +--- a/drivers/target/target_core_pscsi.c ++++ b/drivers/target/target_core_pscsi.c +@@ -154,7 +154,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev, + + buf = kzalloc(12, GFP_KERNEL); + if (!buf) +- return; ++ goto out_free; + + memset(cdb, 0, MAX_COMMAND_SIZE); + cdb[0] = MODE_SENSE; +@@ -169,9 +169,10 @@ static void pscsi_tape_read_blocksize(struct se_device *dev, + * If MODE_SENSE still returns zero, set the default value to 1024. + */ + sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]); ++out_free: + if (!sdev->sector_size) + sdev->sector_size = 1024; +-out_free: ++ + kfree(buf); + } + +@@ -314,9 +315,10 @@ static int pscsi_add_device_to_list(struct se_device *dev, + sd->lun, sd->queue_depth); + } + +- dev->dev_attrib.hw_block_size = sd->sector_size; ++ dev->dev_attrib.hw_block_size = ++ min_not_zero((int)sd->sector_size, 512); + dev->dev_attrib.hw_max_sectors = +- min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q)); ++ min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q)); + dev->dev_attrib.hw_queue_depth = sd->queue_depth; + + /* +@@ -339,8 +341,10 @@ static int pscsi_add_device_to_list(struct se_device *dev, + /* + * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE. + */ +- if (sd->type == TYPE_TAPE) ++ if (sd->type == TYPE_TAPE) { + pscsi_tape_read_blocksize(dev, sd); ++ dev->dev_attrib.hw_block_size = sd->sector_size; ++ } + return 0; + } + +@@ -406,7 +410,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd) + /* + * Called with struct Scsi_Host->host_lock called. + */ +-static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd) ++static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd) + __releases(sh->host_lock) + { + struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; +@@ -433,28 +437,6 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd) + return 0; + } + +-/* +- * Called with struct Scsi_Host->host_lock called. +- */ +-static int pscsi_create_type_other(struct se_device *dev, +- struct scsi_device *sd) +- __releases(sh->host_lock) +-{ +- struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; +- struct Scsi_Host *sh = sd->host; +- int ret; +- +- spin_unlock_irq(sh->host_lock); +- ret = pscsi_add_device_to_list(dev, sd); +- if (ret) +- return ret; +- +- pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n", +- phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, +- sd->channel, sd->id, sd->lun); +- return 0; +-} +- + static int pscsi_configure_device(struct se_device *dev) + { + struct se_hba *hba = dev->se_hba; +@@ -542,11 +524,8 @@ static int pscsi_configure_device(struct se_device *dev) + case TYPE_DISK: + ret = pscsi_create_type_disk(dev, sd); + break; +- case TYPE_ROM: +- ret = pscsi_create_type_rom(dev, sd); +- break; + default: +- ret = pscsi_create_type_other(dev, sd); ++ ret = pscsi_create_type_nondisk(dev, sd); + break; + } + +@@ -611,8 +590,7 @@ static void pscsi_free_device(struct se_device *dev) + else if (pdv->pdv_lld_host) + scsi_host_put(pdv->pdv_lld_host); + +- if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM)) +- scsi_device_put(sd); ++ scsi_device_put(sd); + + pdv->pdv_sd = NULL; + } +@@ -1088,7 +1066,6 @@ static sector_t pscsi_get_blocks(struct se_device *dev) + if (pdv->pdv_bd && pdv->pdv_bd->bd_part) + return pdv->pdv_bd->bd_part->nr_sects; + +- dump_stack(); + return 0; + } + +diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c +index 2e27b1034ede..90c5dffc9fa4 100644 +--- a/drivers/target/target_core_sbc.c ++++ b/drivers/target/target_core_sbc.c +@@ -1096,9 +1096,15 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) + return ret; + break; + case VERIFY: ++ case VERIFY_16: + size = 0; +- sectors = transport_get_sectors_10(cdb); +- cmd->t_task_lba = transport_lba_32(cdb); ++ if (cdb[0] == VERIFY) { ++ sectors = transport_get_sectors_10(cdb); ++ cmd->t_task_lba = transport_lba_32(cdb); ++ } else { ++ sectors = transport_get_sectors_16(cdb); ++ cmd->t_task_lba = transport_lba_64(cdb); ++ } + cmd->execute_cmd = sbc_emulate_noop; + goto check_lba; + case REZERO_UNIT: +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c +index f44ce09367bc..5724d7c41e29 100644 +--- a/drivers/usb/core/hcd.c ++++ b/drivers/usb/core/hcd.c +@@ -966,7 +966,7 @@ static void usb_bus_init (struct usb_bus *bus) + bus->bandwidth_allocated = 0; + bus->bandwidth_int_reqs = 0; + bus->bandwidth_isoc_reqs = 0; +- mutex_init(&bus->usb_address0_mutex); ++ mutex_init(&bus->devnum_next_mutex); + + INIT_LIST_HEAD (&bus->bus_list); + } +@@ -2497,6 +2497,14 @@ struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver, + return NULL; + } + if (primary_hcd == NULL) { ++ hcd->address0_mutex = kmalloc(sizeof(*hcd->address0_mutex), ++ GFP_KERNEL); ++ if (!hcd->address0_mutex) { ++ kfree(hcd); ++ dev_dbg(dev, "hcd address0 mutex alloc failed\n"); ++ return NULL; ++ } ++ mutex_init(hcd->address0_mutex); + hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex), + GFP_KERNEL); + if (!hcd->bandwidth_mutex) { +@@ -2508,6 +2516,7 @@ struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver, + dev_set_drvdata(dev, hcd); + } else { + mutex_lock(&usb_port_peer_mutex); ++ hcd->address0_mutex = primary_hcd->address0_mutex; + hcd->bandwidth_mutex = primary_hcd->bandwidth_mutex; + hcd->primary_hcd = primary_hcd; + primary_hcd->primary_hcd = primary_hcd; +@@ -2564,24 +2573,23 @@ EXPORT_SYMBOL_GPL(usb_create_hcd); + * Don't deallocate the bandwidth_mutex until the last shared usb_hcd is + * deallocated. + * +- * Make sure to only deallocate the bandwidth_mutex when the primary HCD is +- * freed. When hcd_release() is called for either hcd in a peer set +- * invalidate the peer's ->shared_hcd and ->primary_hcd pointers to +- * block new peering attempts ++ * Make sure to deallocate the bandwidth_mutex only when the last HCD is ++ * freed. When hcd_release() is called for either hcd in a peer set, ++ * invalidate the peer's ->shared_hcd and ->primary_hcd pointers. + */ + static void hcd_release(struct kref *kref) + { + struct usb_hcd *hcd = container_of (kref, struct usb_hcd, kref); + + mutex_lock(&usb_port_peer_mutex); +- if (usb_hcd_is_primary_hcd(hcd)) +- kfree(hcd->bandwidth_mutex); + if (hcd->shared_hcd) { + struct usb_hcd *peer = hcd->shared_hcd; + + peer->shared_hcd = NULL; +- if (peer->primary_hcd == hcd) +- peer->primary_hcd = NULL; ++ peer->primary_hcd = NULL; ++ } else { ++ kfree(hcd->address0_mutex); ++ kfree(hcd->bandwidth_mutex); + } + mutex_unlock(&usb_port_peer_mutex); + kfree(hcd); +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index 780db8bb2262..f52d8abf6979 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -1980,7 +1980,7 @@ static void choose_devnum(struct usb_device *udev) + struct usb_bus *bus = udev->bus; + + /* be safe when more hub events are proceed in parallel */ +- mutex_lock(&bus->usb_address0_mutex); ++ mutex_lock(&bus->devnum_next_mutex); + if (udev->wusb) { + devnum = udev->portnum + 1; + BUG_ON(test_bit(devnum, bus->devmap.devicemap)); +@@ -1998,7 +1998,7 @@ static void choose_devnum(struct usb_device *udev) + set_bit(devnum, bus->devmap.devicemap); + udev->devnum = devnum; + } +- mutex_unlock(&bus->usb_address0_mutex); ++ mutex_unlock(&bus->devnum_next_mutex); + } + + static void release_devnum(struct usb_device *udev) +@@ -4262,7 +4262,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, + if (oldspeed == USB_SPEED_LOW) + delay = HUB_LONG_RESET_TIME; + +- mutex_lock(&hdev->bus->usb_address0_mutex); ++ mutex_lock(hcd->address0_mutex); + + /* Reset the device; full speed may morph to high speed */ + /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */ +@@ -4548,7 +4548,7 @@ fail: + hub_port_disable(hub, port1, 0); + update_devnum(udev, devnum); /* for disconnect processing */ + } +- mutex_unlock(&hdev->bus->usb_address0_mutex); ++ mutex_unlock(hcd->address0_mutex); + return retval; + } + +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index 6fe8e30eeb99..68345a9e59b8 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -3666,7 +3666,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) / + EXT4_DESC_PER_BLOCK(sb); + if (ext4_has_feature_meta_bg(sb)) { +- if (le32_to_cpu(es->s_first_meta_bg) >= db_count) { ++ if (le32_to_cpu(es->s_first_meta_bg) > db_count) { + ext4_msg(sb, KERN_WARNING, + "first meta block group too large: %u " + "(group descriptor block count %u)", +diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h +index de7b4f97ac75..be519416c112 100644 +--- a/fs/gfs2/incore.h ++++ b/fs/gfs2/incore.h +@@ -207,7 +207,7 @@ struct lm_lockname { + struct gfs2_sbd *ln_sbd; + u64 ln_number; + unsigned int ln_type; +-}; ++} __packed __aligned(sizeof(int)); + + #define lm_name_equal(name1, name2) \ + (((name1)->ln_number == (name2)->ln_number) && \ +diff --git a/include/linux/log2.h b/include/linux/log2.h +index fd7ff3d91e6a..f38fae23bdac 100644 +--- a/include/linux/log2.h ++++ b/include/linux/log2.h +@@ -16,12 +16,6 @@ + #include + + /* +- * deal with unrepresentable constant logarithms +- */ +-extern __attribute__((const, noreturn)) +-int ____ilog2_NaN(void); +- +-/* + * non-constant log of base 2 calculators + * - the arch may override these in asm/bitops.h if they can be implemented + * more efficiently than using fls() and fls64() +@@ -85,7 +79,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) + #define ilog2(n) \ + ( \ + __builtin_constant_p(n) ? ( \ +- (n) < 1 ? ____ilog2_NaN() : \ ++ (n) < 2 ? 0 : \ + (n) & (1ULL << 63) ? 63 : \ + (n) & (1ULL << 62) ? 62 : \ + (n) & (1ULL << 61) ? 61 : \ +@@ -148,10 +142,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) + (n) & (1ULL << 4) ? 4 : \ + (n) & (1ULL << 3) ? 3 : \ + (n) & (1ULL << 2) ? 2 : \ +- (n) & (1ULL << 1) ? 1 : \ +- (n) & (1ULL << 0) ? 0 : \ +- ____ilog2_NaN() \ +- ) : \ ++ 1 ) : \ + (sizeof(n) <= 4) ? \ + __ilog2_u32(n) : \ + __ilog2_u64(n) \ +diff --git a/include/linux/usb.h b/include/linux/usb.h +index 12891ffd4bf0..8c75af6b7d5b 100644 +--- a/include/linux/usb.h ++++ b/include/linux/usb.h +@@ -371,14 +371,13 @@ struct usb_bus { + + int devnum_next; /* Next open device number in + * round-robin allocation */ ++ struct mutex devnum_next_mutex; /* devnum_next mutex */ + + struct usb_devmap devmap; /* device address allocation map */ + struct usb_device *root_hub; /* Root hub */ + struct usb_bus *hs_companion; /* Companion EHCI bus, if any */ + struct list_head bus_list; /* list of busses */ + +- struct mutex usb_address0_mutex; /* unaddressed device mutex */ +- + int bandwidth_allocated; /* on this bus: how much of the time + * reserved for periodic (intr/iso) + * requests is used, on average? +diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h +index f89c24bd53a4..3993b21f3d11 100644 +--- a/include/linux/usb/hcd.h ++++ b/include/linux/usb/hcd.h +@@ -180,6 +180,7 @@ struct usb_hcd { + * bandwidth_mutex should be dropped after a successful control message + * to the device, or resetting the bandwidth after a failed attempt. + */ ++ struct mutex *address0_mutex; + struct mutex *bandwidth_mutex; + struct usb_hcd *shared_hcd; + struct usb_hcd *primary_hcd; +diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h +index 4d1c46aac331..c7b1dc713cdd 100644 +--- a/include/scsi/libiscsi.h ++++ b/include/scsi/libiscsi.h +@@ -196,6 +196,7 @@ struct iscsi_conn { + struct iscsi_task *task; /* xmit task in progress */ + + /* xmit */ ++ spinlock_t taskqueuelock; /* protects the next three lists */ + struct list_head mgmtqueue; /* mgmt (control) xmit queue */ + struct list_head cmdqueue; /* data-path cmd queue */ + struct list_head requeue; /* tasks needing another run */ +diff --git a/kernel/events/core.c b/kernel/events/core.c +index 9bbe9ac23cf2..e4b5494f05f8 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -9230,7 +9230,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) + ret = inherit_task_group(event, parent, parent_ctx, + child, ctxn, &inherited_all); + if (ret) +- break; ++ goto out_unlock; + } + + /* +@@ -9246,7 +9246,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) + ret = inherit_task_group(event, parent, parent_ctx, + child, ctxn, &inherited_all); + if (ret) +- break; ++ goto out_unlock; + } + + raw_spin_lock_irqsave(&parent_ctx->lock, flags); +@@ -9274,6 +9274,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) + } + + raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); ++out_unlock: + mutex_unlock(&parent_ctx->mutex); + + perf_unpin_context(parent_ctx); +diff --git a/kernel/fork.c b/kernel/fork.c +index 2e55b53399de..278a2ddad351 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -331,13 +331,14 @@ void set_task_stack_end_magic(struct task_struct *tsk) + *stackend = STACK_END_MAGIC; /* for overflow detection */ + } + +-static struct task_struct *dup_task_struct(struct task_struct *orig) ++static struct task_struct *dup_task_struct(struct task_struct *orig, int node) + { + struct task_struct *tsk; + struct thread_info *ti; +- int node = tsk_fork_get_node(orig); + int err; + ++ if (node == NUMA_NO_NODE) ++ node = tsk_fork_get_node(orig); + tsk = alloc_task_struct_node(node); + if (!tsk) + return NULL; +@@ -1270,7 +1271,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, + int __user *child_tidptr, + struct pid *pid, + int trace, +- unsigned long tls) ++ unsigned long tls, ++ int node) + { + int retval; + struct task_struct *p; +@@ -1323,7 +1325,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, + goto fork_out; + + retval = -ENOMEM; +- p = dup_task_struct(current); ++ p = dup_task_struct(current, node); + if (!p) + goto fork_out; + +@@ -1699,7 +1701,8 @@ static inline void init_idle_pids(struct pid_link *links) + struct task_struct *fork_idle(int cpu) + { + struct task_struct *task; +- task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0); ++ task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0, ++ cpu_to_node(cpu)); + if (!IS_ERR(task)) { + init_idle_pids(task->pids); + init_idle(task, cpu); +@@ -1744,7 +1747,7 @@ long _do_fork(unsigned long clone_flags, + } + + p = copy_process(clone_flags, stack_start, stack_size, +- child_tidptr, NULL, trace, tls); ++ child_tidptr, NULL, trace, tls, NUMA_NO_NODE); + /* + * Do this prior waking up the new thread - the thread pointer + * might get invalid after that point, if the thread exits quickly. +diff --git a/mm/percpu.c b/mm/percpu.c +index 1f376bce413c..ef6353f0adbd 100644 +--- a/mm/percpu.c ++++ b/mm/percpu.c +@@ -1012,8 +1012,11 @@ area_found: + mutex_unlock(&pcpu_alloc_mutex); + } + +- if (chunk != pcpu_reserved_chunk) ++ if (chunk != pcpu_reserved_chunk) { ++ spin_lock_irqsave(&pcpu_lock, flags); + pcpu_nr_empty_pop_pages -= occ_pages; ++ spin_unlock_irqrestore(&pcpu_lock, flags); ++ } + + if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW) + pcpu_schedule_balance_work(); +diff --git a/tools/include/linux/log2.h b/tools/include/linux/log2.h +index 41446668ccce..d5677d39c1e4 100644 +--- a/tools/include/linux/log2.h ++++ b/tools/include/linux/log2.h +@@ -13,12 +13,6 @@ + #define _TOOLS_LINUX_LOG2_H + + /* +- * deal with unrepresentable constant logarithms +- */ +-extern __attribute__((const, noreturn)) +-int ____ilog2_NaN(void); +- +-/* + * non-constant log of base 2 calculators + * - the arch may override these in asm/bitops.h if they can be implemented + * more efficiently than using fls() and fls64() +@@ -78,7 +72,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) + #define ilog2(n) \ + ( \ + __builtin_constant_p(n) ? ( \ +- (n) < 1 ? ____ilog2_NaN() : \ ++ (n) < 2 ? 0 : \ + (n) & (1ULL << 63) ? 63 : \ + (n) & (1ULL << 62) ? 62 : \ + (n) & (1ULL << 61) ? 61 : \ +@@ -141,10 +135,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) + (n) & (1ULL << 4) ? 4 : \ + (n) & (1ULL << 3) ? 3 : \ + (n) & (1ULL << 2) ? 2 : \ +- (n) & (1ULL << 1) ? 1 : \ +- (n) & (1ULL << 0) ? 0 : \ +- ____ilog2_NaN() \ +- ) : \ ++ 1 ) : \ + (sizeof(n) <= 4) ? \ + __ilog2_u32(n) : \ + __ilog2_u64(n) \ diff --git a/patch/kernel/mvebu64-default/03-patch-4.4.57-58.patch b/patch/kernel/mvebu64-default/03-patch-4.4.57-58.patch new file mode 100644 index 000000000..628aea197 --- /dev/null +++ b/patch/kernel/mvebu64-default/03-patch-4.4.57-58.patch @@ -0,0 +1,2653 @@ +diff --git a/Makefile b/Makefile +index 841675e63a38..3efe2ea99e2d 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 57 ++SUBLEVEL = 58 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi +index 4dfca8fc49b3..1bc61ece2589 100644 +--- a/arch/arm/boot/dts/sama5d2.dtsi ++++ b/arch/arm/boot/dts/sama5d2.dtsi +@@ -856,6 +856,13 @@ + compatible = "atmel,at91sam9260-usart"; + reg = <0xf801c000 0x100>; + interrupts = <24 IRQ_TYPE_LEVEL_HIGH 7>; ++ dmas = <&dma0 ++ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) | ++ AT91_XDMAC_DT_PERID(35))>, ++ <&dma0 ++ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) | ++ AT91_XDMAC_DT_PERID(36))>; ++ dma-names = "tx", "rx"; + clocks = <&uart0_clk>; + clock-names = "usart"; + status = "disabled"; +@@ -865,6 +872,13 @@ + compatible = "atmel,at91sam9260-usart"; + reg = <0xf8020000 0x100>; + interrupts = <25 IRQ_TYPE_LEVEL_HIGH 7>; ++ dmas = <&dma0 ++ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) | ++ AT91_XDMAC_DT_PERID(37))>, ++ <&dma0 ++ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) | ++ AT91_XDMAC_DT_PERID(38))>; ++ dma-names = "tx", "rx"; + clocks = <&uart1_clk>; + clock-names = "usart"; + status = "disabled"; +@@ -874,6 +888,13 @@ + compatible = "atmel,at91sam9260-usart"; + reg = <0xf8024000 0x100>; + interrupts = <26 IRQ_TYPE_LEVEL_HIGH 7>; ++ dmas = <&dma0 ++ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) | ++ AT91_XDMAC_DT_PERID(39))>, ++ <&dma0 ++ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) | ++ AT91_XDMAC_DT_PERID(40))>; ++ dma-names = "tx", "rx"; + clocks = <&uart2_clk>; + clock-names = "usart"; + status = "disabled"; +@@ -985,6 +1006,13 @@ + compatible = "atmel,at91sam9260-usart"; + reg = <0xfc008000 0x100>; + interrupts = <27 IRQ_TYPE_LEVEL_HIGH 7>; ++ dmas = <&dma0 ++ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) | ++ AT91_XDMAC_DT_PERID(41))>, ++ <&dma0 ++ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) | ++ AT91_XDMAC_DT_PERID(42))>; ++ dma-names = "tx", "rx"; + clocks = <&uart3_clk>; + clock-names = "usart"; + status = "disabled"; +@@ -993,6 +1021,13 @@ + uart4: serial@fc00c000 { + compatible = "atmel,at91sam9260-usart"; + reg = <0xfc00c000 0x100>; ++ dmas = <&dma0 ++ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) | ++ AT91_XDMAC_DT_PERID(43))>, ++ <&dma0 ++ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) | ++ AT91_XDMAC_DT_PERID(44))>; ++ dma-names = "tx", "rx"; + interrupts = <28 IRQ_TYPE_LEVEL_HIGH 7>; + clocks = <&uart4_clk>; + clock-names = "usart"; +diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c +index 23726fb31741..d687f860a2da 100644 +--- a/arch/arm/mach-at91/pm.c ++++ b/arch/arm/mach-at91/pm.c +@@ -286,6 +286,22 @@ static void at91_ddr_standby(void) + at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1); + } + ++static void sama5d3_ddr_standby(void) ++{ ++ u32 lpr0; ++ u32 saved_lpr0; ++ ++ saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR); ++ lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB; ++ lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN; ++ ++ at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0); ++ ++ cpu_do_idle(); ++ ++ at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0); ++} ++ + /* We manage both DDRAM/SDRAM controllers, we need more than one value to + * remember. + */ +@@ -320,7 +336,7 @@ static const struct of_device_id const ramc_ids[] __initconst = { + { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby }, + { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby }, + { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby }, +- { .compatible = "atmel,sama5d3-ddramc", .data = at91_ddr_standby }, ++ { .compatible = "atmel,sama5d3-ddramc", .data = sama5d3_ddr_standby }, + { /*sentinel*/ } + }; + +diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c +index cfc4a966e2b9..83b5f7a323a9 100644 +--- a/arch/x86/kernel/cpu/mshyperv.c ++++ b/arch/x86/kernel/cpu/mshyperv.c +@@ -30,6 +30,7 @@ + #include + #include + #include ++#include + + struct ms_hyperv_info ms_hyperv; + EXPORT_SYMBOL_GPL(ms_hyperv); +@@ -157,6 +158,26 @@ static unsigned char hv_get_nmi_reason(void) + return 0; + } + ++#ifdef CONFIG_X86_LOCAL_APIC ++/* ++ * Prior to WS2016 Debug-VM sends NMIs to all CPUs which makes ++ * it dificult to process CHANNELMSG_UNLOAD in case of crash. Handle ++ * unknown NMI on the first CPU which gets it. ++ */ ++static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs) ++{ ++ static atomic_t nmi_cpu = ATOMIC_INIT(-1); ++ ++ if (!unknown_nmi_panic) ++ return NMI_DONE; ++ ++ if (atomic_cmpxchg(&nmi_cpu, -1, raw_smp_processor_id()) != -1) ++ return NMI_HANDLED; ++ ++ return NMI_DONE; ++} ++#endif ++ + static void __init ms_hyperv_init_platform(void) + { + /* +@@ -182,6 +203,9 @@ static void __init ms_hyperv_init_platform(void) + printk(KERN_INFO "HyperV: LAPIC Timer Frequency: %#x\n", + lapic_timer_frequency); + } ++ ++ register_nmi_handler(NMI_UNKNOWN, hv_nmi_unknown, NMI_FLAG_FIRST, ++ "hv_nmi_unknown"); + #endif + + if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) +diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c +index c6d6efed392a..7575f0798194 100644 +--- a/arch/x86/pci/xen.c ++++ b/arch/x86/pci/xen.c +@@ -231,23 +231,14 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) + return 1; + + for_each_pci_msi_entry(msidesc, dev) { +- __pci_read_msi_msg(msidesc, &msg); +- pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) | +- ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff); +- if (msg.data != XEN_PIRQ_MSI_DATA || +- xen_irq_from_pirq(pirq) < 0) { +- pirq = xen_allocate_pirq_msi(dev, msidesc); +- if (pirq < 0) { +- irq = -ENODEV; +- goto error; +- } +- xen_msi_compose_msg(dev, pirq, &msg); +- __pci_write_msi_msg(msidesc, &msg); +- dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq); +- } else { +- dev_dbg(&dev->dev, +- "xen: msi already bound to pirq=%d\n", pirq); ++ pirq = xen_allocate_pirq_msi(dev, msidesc); ++ if (pirq < 0) { ++ irq = -ENODEV; ++ goto error; + } ++ xen_msi_compose_msg(dev, pirq, &msg); ++ __pci_write_msi_msg(msidesc, &msg); ++ dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq); + irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, + (type == PCI_CAP_ID_MSI) ? nvec : 1, + (type == PCI_CAP_ID_MSIX) ? +diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c +index 0774799942e0..c6fee7437be4 100644 +--- a/block/scsi_ioctl.c ++++ b/block/scsi_ioctl.c +@@ -182,6 +182,9 @@ static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter) + __set_bit(WRITE_16, filter->write_ok); + __set_bit(WRITE_LONG, filter->write_ok); + __set_bit(WRITE_LONG_2, filter->write_ok); ++ __set_bit(WRITE_SAME, filter->write_ok); ++ __set_bit(WRITE_SAME_16, filter->write_ok); ++ __set_bit(WRITE_SAME_32, filter->write_ok); + __set_bit(ERASE, filter->write_ok); + __set_bit(GPCMD_MODE_SELECT_10, filter->write_ok); + __set_bit(MODE_SELECT, filter->write_ok); +diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c +index 68a5ceaa04c8..8d8b3eeba725 100644 +--- a/crypto/algif_hash.c ++++ b/crypto/algif_hash.c +@@ -184,7 +184,7 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags) + struct alg_sock *ask = alg_sk(sk); + struct hash_ctx *ctx = ask->private; + struct ahash_request *req = &ctx->req; +- char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req))]; ++ char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req)) ? : 1]; + struct sock *sk2; + struct alg_sock *ask2; + struct hash_ctx *ctx2; +diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c +index 96809cd99ace..2f24b578bcaf 100644 +--- a/drivers/acpi/blacklist.c ++++ b/drivers/acpi/blacklist.c +@@ -346,6 +346,34 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { + DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343"), + }, + }, ++ { ++ .callback = dmi_enable_rev_override, ++ .ident = "DELL Precision 5520", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 5520"), ++ }, ++ }, ++ { ++ .callback = dmi_enable_rev_override, ++ .ident = "DELL Precision 3520", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3520"), ++ }, ++ }, ++ /* ++ * Resolves a quirk with the Dell Latitude 3350 that ++ * causes the ethernet adapter to not function. ++ */ ++ { ++ .callback = dmi_enable_rev_override, ++ .ident = "DELL Latitude 3350", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 3350"), ++ }, ++ }, + #endif + {} + }; +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c +index 86fa9fdc8323..38b363f4316b 100644 +--- a/drivers/cpufreq/cpufreq.c ++++ b/drivers/cpufreq/cpufreq.c +@@ -1186,6 +1186,9 @@ static int cpufreq_online(unsigned int cpu) + for_each_cpu(j, policy->related_cpus) + per_cpu(cpufreq_cpu_data, j) = policy; + write_unlock_irqrestore(&cpufreq_driver_lock, flags); ++ } else { ++ policy->min = policy->user_policy.min; ++ policy->max = policy->user_policy.max; + } + + if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { +diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c +index 0470fc843d4e..9b6854607d73 100644 +--- a/drivers/iio/adc/ti_am335x_adc.c ++++ b/drivers/iio/adc/ti_am335x_adc.c +@@ -151,7 +151,9 @@ static irqreturn_t tiadc_irq_h(int irq, void *private) + { + struct iio_dev *indio_dev = private; + struct tiadc_device *adc_dev = iio_priv(indio_dev); +- unsigned int status, config; ++ unsigned int status, config, adc_fsm; ++ unsigned short count = 0; ++ + status = tiadc_readl(adc_dev, REG_IRQSTATUS); + + /* +@@ -165,6 +167,15 @@ static irqreturn_t tiadc_irq_h(int irq, void *private) + tiadc_writel(adc_dev, REG_CTRL, config); + tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1OVRRUN + | IRQENB_FIFO1UNDRFLW | IRQENB_FIFO1THRES); ++ ++ /* wait for idle state. ++ * ADC needs to finish the current conversion ++ * before disabling the module ++ */ ++ do { ++ adc_fsm = tiadc_readl(adc_dev, REG_ADCFSM); ++ } while (adc_fsm != 0x10 && count++ < 100); ++ + tiadc_writel(adc_dev, REG_CTRL, (config | CNTRLREG_TSCSSENB)); + return IRQ_HANDLED; + } else if (status & IRQENB_FIFO1THRES) { +diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +index 595511022795..0a86ef43e781 100644 +--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c ++++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +@@ -51,8 +51,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) + st->report_state.report_id, + st->report_state.index, + HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM); +- +- poll_value = hid_sensor_read_poll_value(st); + } else { + int val; + +@@ -89,7 +87,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) + sensor_hub_get_feature(st->hsdev, st->power_state.report_id, + st->power_state.index, + sizeof(state_val), &state_val); +- if (state && poll_value) ++ if (state) ++ poll_value = hid_sensor_read_poll_value(st); ++ if (poll_value > 0) + msleep_interruptible(poll_value * 2); + + return 0; +diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c +index d96aa27dfcdc..db64adfbe1af 100644 +--- a/drivers/input/joystick/iforce/iforce-usb.c ++++ b/drivers/input/joystick/iforce/iforce-usb.c +@@ -141,6 +141,9 @@ static int iforce_usb_probe(struct usb_interface *intf, + + interface = intf->cur_altsetting; + ++ if (interface->desc.bNumEndpoints < 2) ++ return -ENODEV; ++ + epirq = &interface->endpoint[0].desc; + epout = &interface->endpoint[1].desc; + +diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c +index 9365535ba7f1..50a7faa504f7 100644 +--- a/drivers/input/misc/cm109.c ++++ b/drivers/input/misc/cm109.c +@@ -675,6 +675,10 @@ static int cm109_usb_probe(struct usb_interface *intf, + int error = -ENOMEM; + + interface = intf->cur_altsetting; ++ ++ if (interface->desc.bNumEndpoints < 1) ++ return -ENODEV; ++ + endpoint = &interface->endpoint[0].desc; + + if (!usb_endpoint_is_int_in(endpoint)) +diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c +index 9c0ea36913b4..f4e8fbec6a94 100644 +--- a/drivers/input/misc/ims-pcu.c ++++ b/drivers/input/misc/ims-pcu.c +@@ -1667,6 +1667,10 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc + return -EINVAL; + + alt = pcu->ctrl_intf->cur_altsetting; ++ ++ if (alt->desc.bNumEndpoints < 1) ++ return -ENODEV; ++ + pcu->ep_ctrl = &alt->endpoint[0].desc; + pcu->max_ctrl_size = usb_endpoint_maxp(pcu->ep_ctrl); + +diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c +index 79c964c075f1..6e7ff9561d92 100644 +--- a/drivers/input/misc/yealink.c ++++ b/drivers/input/misc/yealink.c +@@ -875,6 +875,10 @@ static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id) + int ret, pipe, i; + + interface = intf->cur_altsetting; ++ ++ if (interface->desc.bNumEndpoints < 1) ++ return -ENODEV; ++ + endpoint = &interface->endpoint[0].desc; + if (!usb_endpoint_is_int_in(endpoint)) + return -ENODEV; +diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c +index ed1935f300a7..da5458dfb1e3 100644 +--- a/drivers/input/mouse/elan_i2c_core.c ++++ b/drivers/input/mouse/elan_i2c_core.c +@@ -218,17 +218,19 @@ static int elan_query_product(struct elan_tp_data *data) + + static int elan_check_ASUS_special_fw(struct elan_tp_data *data) + { +- if (data->ic_type != 0x0E) +- return false; +- +- switch (data->product_id) { +- case 0x05 ... 0x07: +- case 0x09: +- case 0x13: ++ if (data->ic_type == 0x0E) { ++ switch (data->product_id) { ++ case 0x05 ... 0x07: ++ case 0x09: ++ case 0x13: ++ return true; ++ } ++ } else if (data->ic_type == 0x08 && data->product_id == 0x26) { ++ /* ASUS EeeBook X205TA */ + return true; +- default: +- return false; + } ++ ++ return false; + } + + static int __elan_initialize(struct elan_tp_data *data) +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h +index 0cdd95801a25..25eab453f2b2 100644 +--- a/drivers/input/serio/i8042-x86ia64io.h ++++ b/drivers/input/serio/i8042-x86ia64io.h +@@ -120,6 +120,13 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { + }, + }, + { ++ /* Dell Embedded Box PC 3000 */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"), ++ }, ++ }, ++ { + /* OQO Model 01 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "OQO"), +diff --git a/drivers/input/tablet/hanwang.c b/drivers/input/tablet/hanwang.c +index cd852059b99e..df4bea96d7ed 100644 +--- a/drivers/input/tablet/hanwang.c ++++ b/drivers/input/tablet/hanwang.c +@@ -340,6 +340,9 @@ static int hanwang_probe(struct usb_interface *intf, const struct usb_device_id + int error; + int i; + ++ if (intf->cur_altsetting->desc.bNumEndpoints < 1) ++ return -ENODEV; ++ + hanwang = kzalloc(sizeof(struct hanwang), GFP_KERNEL); + input_dev = input_allocate_device(); + if (!hanwang || !input_dev) { +diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c +index d2ac7c2b5b82..2812f9236b7d 100644 +--- a/drivers/input/tablet/kbtab.c ++++ b/drivers/input/tablet/kbtab.c +@@ -122,6 +122,9 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i + struct input_dev *input_dev; + int error = -ENOMEM; + ++ if (intf->cur_altsetting->desc.bNumEndpoints < 1) ++ return -ENODEV; ++ + kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL); + input_dev = input_allocate_device(); + if (!kbtab || !input_dev) +diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c +index 45b466e3bbe8..0146e2c74649 100644 +--- a/drivers/input/touchscreen/sur40.c ++++ b/drivers/input/touchscreen/sur40.c +@@ -500,6 +500,9 @@ static int sur40_probe(struct usb_interface *interface, + if (iface_desc->desc.bInterfaceClass != 0xFF) + return -ENODEV; + ++ if (iface_desc->desc.bNumEndpoints < 5) ++ return -ENODEV; ++ + /* Use endpoint #4 (0x86). */ + endpoint = &iface_desc->endpoint[4].desc; + if (endpoint->bEndpointAddress != TOUCH_ENDPOINT) +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c +index f0fc6f7b5d98..0628372f3591 100644 +--- a/drivers/iommu/intel-iommu.c ++++ b/drivers/iommu/intel-iommu.c +@@ -908,7 +908,7 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf + * which we used for the IOMMU lookup. Strictly speaking + * we could do this for all PCI devices; we only need to + * get the BDF# from the scope table for ACPI matches. */ +- if (pdev->is_virtfn) ++ if (pdev && pdev->is_virtfn) + goto got_pdev; + + *bus = drhd->devices[i].bus; +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c +index 122af340a531..a92979e704e3 100644 +--- a/drivers/md/raid10.c ++++ b/drivers/md/raid10.c +@@ -1072,6 +1072,8 @@ static void __make_request(struct mddev *mddev, struct bio *bio) + int max_sectors; + int sectors; + ++ md_write_start(mddev, bio); ++ + /* + * Register the new request and wait if the reconstruction + * thread has put up a bar for new requests. +@@ -1455,8 +1457,6 @@ static void make_request(struct mddev *mddev, struct bio *bio) + return; + } + +- md_write_start(mddev, bio); +- + do { + + /* +diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c +index 5cefca95734e..885f689ac870 100644 +--- a/drivers/media/usb/uvc/uvc_driver.c ++++ b/drivers/media/usb/uvc/uvc_driver.c +@@ -1595,6 +1595,114 @@ static const char *uvc_print_chain(struct uvc_video_chain *chain) + return buffer; + } + ++static struct uvc_video_chain *uvc_alloc_chain(struct uvc_device *dev) ++{ ++ struct uvc_video_chain *chain; ++ ++ chain = kzalloc(sizeof(*chain), GFP_KERNEL); ++ if (chain == NULL) ++ return NULL; ++ ++ INIT_LIST_HEAD(&chain->entities); ++ mutex_init(&chain->ctrl_mutex); ++ chain->dev = dev; ++ v4l2_prio_init(&chain->prio); ++ ++ return chain; ++} ++ ++/* ++ * Fallback heuristic for devices that don't connect units and terminals in a ++ * valid chain. ++ * ++ * Some devices have invalid baSourceID references, causing uvc_scan_chain() ++ * to fail, but if we just take the entities we can find and put them together ++ * in the most sensible chain we can think of, turns out they do work anyway. ++ * Note: This heuristic assumes there is a single chain. ++ * ++ * At the time of writing, devices known to have such a broken chain are ++ * - Acer Integrated Camera (5986:055a) ++ * - Realtek rtl157a7 (0bda:57a7) ++ */ ++static int uvc_scan_fallback(struct uvc_device *dev) ++{ ++ struct uvc_video_chain *chain; ++ struct uvc_entity *iterm = NULL; ++ struct uvc_entity *oterm = NULL; ++ struct uvc_entity *entity; ++ struct uvc_entity *prev; ++ ++ /* ++ * Start by locating the input and output terminals. We only support ++ * devices with exactly one of each for now. ++ */ ++ list_for_each_entry(entity, &dev->entities, list) { ++ if (UVC_ENTITY_IS_ITERM(entity)) { ++ if (iterm) ++ return -EINVAL; ++ iterm = entity; ++ } ++ ++ if (UVC_ENTITY_IS_OTERM(entity)) { ++ if (oterm) ++ return -EINVAL; ++ oterm = entity; ++ } ++ } ++ ++ if (iterm == NULL || oterm == NULL) ++ return -EINVAL; ++ ++ /* Allocate the chain and fill it. */ ++ chain = uvc_alloc_chain(dev); ++ if (chain == NULL) ++ return -ENOMEM; ++ ++ if (uvc_scan_chain_entity(chain, oterm) < 0) ++ goto error; ++ ++ prev = oterm; ++ ++ /* ++ * Add all Processing and Extension Units with two pads. The order ++ * doesn't matter much, use reverse list traversal to connect units in ++ * UVC descriptor order as we build the chain from output to input. This ++ * leads to units appearing in the order meant by the manufacturer for ++ * the cameras known to require this heuristic. ++ */ ++ list_for_each_entry_reverse(entity, &dev->entities, list) { ++ if (entity->type != UVC_VC_PROCESSING_UNIT && ++ entity->type != UVC_VC_EXTENSION_UNIT) ++ continue; ++ ++ if (entity->num_pads != 2) ++ continue; ++ ++ if (uvc_scan_chain_entity(chain, entity) < 0) ++ goto error; ++ ++ prev->baSourceID[0] = entity->id; ++ prev = entity; ++ } ++ ++ if (uvc_scan_chain_entity(chain, iterm) < 0) ++ goto error; ++ ++ prev->baSourceID[0] = iterm->id; ++ ++ list_add_tail(&chain->list, &dev->chains); ++ ++ uvc_trace(UVC_TRACE_PROBE, ++ "Found a video chain by fallback heuristic (%s).\n", ++ uvc_print_chain(chain)); ++ ++ return 0; ++ ++error: ++ kfree(chain); ++ return -EINVAL; ++} ++ + /* + * Scan the device for video chains and register video devices. + * +@@ -1617,15 +1725,10 @@ static int uvc_scan_device(struct uvc_device *dev) + if (term->chain.next || term->chain.prev) + continue; + +- chain = kzalloc(sizeof(*chain), GFP_KERNEL); ++ chain = uvc_alloc_chain(dev); + if (chain == NULL) + return -ENOMEM; + +- INIT_LIST_HEAD(&chain->entities); +- mutex_init(&chain->ctrl_mutex); +- chain->dev = dev; +- v4l2_prio_init(&chain->prio); +- + term->flags |= UVC_ENTITY_FLAG_DEFAULT; + + if (uvc_scan_chain(chain, term) < 0) { +@@ -1639,6 +1742,9 @@ static int uvc_scan_device(struct uvc_device *dev) + list_add_tail(&chain->list, &dev->chains); + } + ++ if (list_empty(&dev->chains)) ++ uvc_scan_fallback(dev); ++ + if (list_empty(&dev->chains)) { + uvc_printk(KERN_INFO, "No valid video chain found.\n"); + return -1; +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c +index bda164089904..62d37d2ac557 100644 +--- a/drivers/mmc/host/sdhci.c ++++ b/drivers/mmc/host/sdhci.c +@@ -1274,7 +1274,9 @@ clock_set: + return; + } + timeout--; +- mdelay(1); ++ spin_unlock_irq(&host->lock); ++ usleep_range(900, 1100); ++ spin_lock_irq(&host->lock); + } + + clk |= SDHCI_CLOCK_CARD_EN; +diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c +index d2c386f09d69..1d843357422e 100644 +--- a/drivers/mmc/host/ushc.c ++++ b/drivers/mmc/host/ushc.c +@@ -426,6 +426,9 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id + struct ushc_data *ushc; + int ret; + ++ if (intf->cur_altsetting->desc.bNumEndpoints < 1) ++ return -ENODEV; ++ + mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev); + if (mmc == NULL) + return -ENOMEM; +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h +index b6fa89102526..66ba1e0ff37e 100644 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h +@@ -913,8 +913,8 @@ + #define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1 + #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1 + #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 +-#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2 +-#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1 ++#define RX_PACKET_ATTRIBUTES_LAST_INDEX 2 ++#define RX_PACKET_ATTRIBUTES_LAST_WIDTH 1 + #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3 + #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1 + #define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4 +@@ -923,6 +923,8 @@ + #define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1 + #define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6 + #define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1 ++#define RX_PACKET_ATTRIBUTES_FIRST_INDEX 7 ++#define RX_PACKET_ATTRIBUTES_FIRST_WIDTH 1 + + #define RX_NORMAL_DESC0_OVT_INDEX 0 + #define RX_NORMAL_DESC0_OVT_WIDTH 16 +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +index f6a7161e3b85..5e6238e0b2bd 100644 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +@@ -1658,10 +1658,15 @@ static int xgbe_dev_read(struct xgbe_channel *channel) + + /* Get the header length */ + if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) { ++ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, ++ FIRST, 1); + rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2, + RX_NORMAL_DESC2, HL); + if (rdata->rx.hdr_len) + pdata->ext_stats.rx_split_header_packets++; ++ } else { ++ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, ++ FIRST, 0); + } + + /* Get the RSS hash */ +@@ -1684,19 +1689,16 @@ static int xgbe_dev_read(struct xgbe_channel *channel) + } + } + +- /* Get the packet length */ +- rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); +- +- if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) { +- /* Not all the data has been transferred for this packet */ +- XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, +- INCOMPLETE, 1); ++ /* Not all the data has been transferred for this packet */ ++ if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) + return 0; +- } + + /* This is the last of the data for this packet */ + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, +- INCOMPLETE, 0); ++ LAST, 1); ++ ++ /* Get the packet length */ ++ rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); + + /* Set checksum done indicator as appropriate */ + if (netdev->features & NETIF_F_RXCSUM) +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +index 53ce1222b11d..865b7e0b133b 100644 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +@@ -1760,13 +1760,12 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, + { + struct sk_buff *skb; + u8 *packet; +- unsigned int copy_len; + + skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len); + if (!skb) + return NULL; + +- /* Start with the header buffer which may contain just the header ++ /* Pull in the header buffer which may contain just the header + * or the header plus data + */ + dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base, +@@ -1775,30 +1774,49 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, + + packet = page_address(rdata->rx.hdr.pa.pages) + + rdata->rx.hdr.pa.pages_offset; +- copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len; +- copy_len = min(rdata->rx.hdr.dma_len, copy_len); +- skb_copy_to_linear_data(skb, packet, copy_len); +- skb_put(skb, copy_len); +- +- len -= copy_len; +- if (len) { +- /* Add the remaining data as a frag */ +- dma_sync_single_range_for_cpu(pdata->dev, +- rdata->rx.buf.dma_base, +- rdata->rx.buf.dma_off, +- rdata->rx.buf.dma_len, +- DMA_FROM_DEVICE); +- +- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, +- rdata->rx.buf.pa.pages, +- rdata->rx.buf.pa.pages_offset, +- len, rdata->rx.buf.dma_len); +- rdata->rx.buf.pa.pages = NULL; +- } ++ skb_copy_to_linear_data(skb, packet, len); ++ skb_put(skb, len); + + return skb; + } + ++static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata, ++ struct xgbe_packet_data *packet) ++{ ++ /* Always zero if not the first descriptor */ ++ if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST)) ++ return 0; ++ ++ /* First descriptor with split header, return header length */ ++ if (rdata->rx.hdr_len) ++ return rdata->rx.hdr_len; ++ ++ /* First descriptor but not the last descriptor and no split header, ++ * so the full buffer was used ++ */ ++ if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) ++ return rdata->rx.hdr.dma_len; ++ ++ /* First descriptor and last descriptor and no split header, so ++ * calculate how much of the buffer was used ++ */ ++ return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len); ++} ++ ++static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata, ++ struct xgbe_packet_data *packet, ++ unsigned int len) ++{ ++ /* Always the full buffer if not the last descriptor */ ++ if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) ++ return rdata->rx.buf.dma_len; ++ ++ /* Last descriptor so calculate how much of the buffer was used ++ * for the last bit of data ++ */ ++ return rdata->rx.len - len; ++} ++ + static int xgbe_tx_poll(struct xgbe_channel *channel) + { + struct xgbe_prv_data *pdata = channel->pdata; +@@ -1881,8 +1899,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) + struct napi_struct *napi; + struct sk_buff *skb; + struct skb_shared_hwtstamps *hwtstamps; +- unsigned int incomplete, error, context_next, context; +- unsigned int len, rdesc_len, max_len; ++ unsigned int last, error, context_next, context; ++ unsigned int len, buf1_len, buf2_len, max_len; + unsigned int received = 0; + int packet_count = 0; + +@@ -1892,7 +1910,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) + if (!ring) + return 0; + +- incomplete = 0; ++ last = 0; + context_next = 0; + + napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; +@@ -1926,9 +1944,8 @@ read_again: + received++; + ring->cur++; + +- incomplete = XGMAC_GET_BITS(packet->attributes, +- RX_PACKET_ATTRIBUTES, +- INCOMPLETE); ++ last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, ++ LAST); + context_next = XGMAC_GET_BITS(packet->attributes, + RX_PACKET_ATTRIBUTES, + CONTEXT_NEXT); +@@ -1937,7 +1954,7 @@ read_again: + CONTEXT); + + /* Earlier error, just drain the remaining data */ +- if ((incomplete || context_next) && error) ++ if ((!last || context_next) && error) + goto read_again; + + if (error || packet->errors) { +@@ -1949,16 +1966,22 @@ read_again: + } + + if (!context) { +- /* Length is cumulative, get this descriptor's length */ +- rdesc_len = rdata->rx.len - len; +- len += rdesc_len; ++ /* Get the data length in the descriptor buffers */ ++ buf1_len = xgbe_rx_buf1_len(rdata, packet); ++ len += buf1_len; ++ buf2_len = xgbe_rx_buf2_len(rdata, packet, len); ++ len += buf2_len; + +- if (rdesc_len && !skb) { ++ if (!skb) { + skb = xgbe_create_skb(pdata, napi, rdata, +- rdesc_len); +- if (!skb) ++ buf1_len); ++ if (!skb) { + error = 1; +- } else if (rdesc_len) { ++ goto skip_data; ++ } ++ } ++ ++ if (buf2_len) { + dma_sync_single_range_for_cpu(pdata->dev, + rdata->rx.buf.dma_base, + rdata->rx.buf.dma_off, +@@ -1968,13 +1991,14 @@ read_again: + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + rdata->rx.buf.pa.pages, + rdata->rx.buf.pa.pages_offset, +- rdesc_len, ++ buf2_len, + rdata->rx.buf.dma_len); + rdata->rx.buf.pa.pages = NULL; + } + } + +- if (incomplete || context_next) ++skip_data: ++ if (!last || context_next) + goto read_again; + + if (!skb) +@@ -2033,7 +2057,7 @@ next_packet: + } + + /* Check if we need to save state before leaving */ +- if (received && (incomplete || context_next)) { ++ if (received && (!last || context_next)) { + rdata = XGBE_GET_DESC_DATA(ring, ring->cur); + rdata->state_saved = 1; + rdata->state.skb = skb; +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c +index 91627561c58d..f971d92f7b41 100644 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c +@@ -3495,7 +3495,8 @@ static int bcmgenet_suspend(struct device *d) + + bcmgenet_netif_stop(dev); + +- phy_suspend(priv->phydev); ++ if (!device_may_wakeup(d)) ++ phy_suspend(priv->phydev); + + netif_device_detach(dev); + +@@ -3592,7 +3593,8 @@ static int bcmgenet_resume(struct device *d) + + netif_device_attach(dev); + +- phy_resume(priv->phydev); ++ if (!device_may_wakeup(d)) ++ phy_resume(priv->phydev); + + if (priv->eee.eee_enabled) + bcmgenet_eee_enable_set(dev, true); +diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c +index 8bdfe53754ba..e96d1f95bb47 100644 +--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c ++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c +@@ -220,20 +220,6 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable) + udelay(60); + } + +-static void bcmgenet_internal_phy_setup(struct net_device *dev) +-{ +- struct bcmgenet_priv *priv = netdev_priv(dev); +- u32 reg; +- +- /* Power up PHY */ +- bcmgenet_phy_power_set(dev, true); +- /* enable APD */ +- reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); +- reg |= EXT_PWR_DN_EN_LD; +- bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); +- bcmgenet_mii_reset(dev); +-} +- + static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) + { + u32 reg; +@@ -281,7 +267,6 @@ int bcmgenet_mii_config(struct net_device *dev) + + if (priv->internal_phy) { + phy_name = "internal PHY"; +- bcmgenet_internal_phy_setup(dev); + } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { + phy_name = "MoCA"; + bcmgenet_moca_phy_setup(priv); +diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c +index 23ec28f43f6d..afaa98d1d4e4 100644 +--- a/drivers/net/ethernet/intel/igb/e1000_phy.c ++++ b/drivers/net/ethernet/intel/igb/e1000_phy.c +@@ -77,6 +77,10 @@ s32 igb_get_phy_id(struct e1000_hw *hw) + s32 ret_val = 0; + u16 phy_id; + ++ /* ensure PHY page selection to fix misconfigured i210 */ ++ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) ++ phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0); ++ + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); + if (ret_val) + goto out; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +index cf0098596e85..e9408f5e2a1d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +@@ -197,6 +197,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, + if (lro_num_seg > 1) { + mlx5e_lro_update_hdr(skb, cqe); + skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); ++ /* Subtract one since we already counted this as one ++ * "regular" packet in mlx5e_complete_rx_cqe() ++ */ ++ rq->stats.packets += lro_num_seg - 1; + rq->stats.lro_packets++; + rq->stats.lro_bytes += cqe_bcnt; + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c +index ba115ec7aa92..1e611980cf99 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c +@@ -85,7 +85,7 @@ static struct mlx5_profile profile[] = { + [2] = { + .mask = MLX5_PROF_MASK_QP_SIZE | + MLX5_PROF_MASK_MR_CACHE, +- .log_max_qp = 17, ++ .log_max_qp = 18, + .mr_cache[0] = { + .size = 500, + .limit = 250 +diff --git a/drivers/parport/share.c b/drivers/parport/share.c +index 5ce5ef211bdb..754f21fd9768 100644 +--- a/drivers/parport/share.c ++++ b/drivers/parport/share.c +@@ -936,8 +936,10 @@ parport_register_dev_model(struct parport *port, const char *name, + * pardevice fields. -arca + */ + port->ops->init_state(par_dev, par_dev->state); +- port->proc_device = par_dev; +- parport_device_proc_register(par_dev); ++ if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) { ++ port->proc_device = par_dev; ++ parport_device_proc_register(par_dev); ++ } + + return par_dev; + +diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c +index 31f31d460fc9..357527712539 100644 +--- a/drivers/pci/iov.c ++++ b/drivers/pci/iov.c +@@ -303,13 +303,6 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn) + return rc; + } + +- pci_iov_set_numvfs(dev, nr_virtfn); +- iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE; +- pci_cfg_access_lock(dev); +- pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); +- msleep(100); +- pci_cfg_access_unlock(dev); +- + iov->initial_VFs = initial; + if (nr_virtfn < initial) + initial = nr_virtfn; +@@ -320,6 +313,13 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn) + goto err_pcibios; + } + ++ pci_iov_set_numvfs(dev, nr_virtfn); ++ iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE; ++ pci_cfg_access_lock(dev); ++ pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); ++ msleep(100); ++ pci_cfg_access_unlock(dev); ++ + for (i = 0; i < initial; i++) { + rc = virtfn_add(dev, i, 0); + if (rc) +@@ -555,21 +555,61 @@ void pci_iov_release(struct pci_dev *dev) + } + + /** +- * pci_iov_resource_bar - get position of the SR-IOV BAR ++ * pci_iov_update_resource - update a VF BAR + * @dev: the PCI device + * @resno: the resource number + * +- * Returns position of the BAR encapsulated in the SR-IOV capability. ++ * Update a VF BAR in the SR-IOV capability of a PF. + */ +-int pci_iov_resource_bar(struct pci_dev *dev, int resno) ++void pci_iov_update_resource(struct pci_dev *dev, int resno) + { +- if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END) +- return 0; ++ struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL; ++ struct resource *res = dev->resource + resno; ++ int vf_bar = resno - PCI_IOV_RESOURCES; ++ struct pci_bus_region region; ++ u16 cmd; ++ u32 new; ++ int reg; ++ ++ /* ++ * The generic pci_restore_bars() path calls this for all devices, ++ * including VFs and non-SR-IOV devices. If this is not a PF, we ++ * have nothing to do. ++ */ ++ if (!iov) ++ return; ++ ++ pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &cmd); ++ if ((cmd & PCI_SRIOV_CTRL_VFE) && (cmd & PCI_SRIOV_CTRL_MSE)) { ++ dev_WARN(&dev->dev, "can't update enabled VF BAR%d %pR\n", ++ vf_bar, res); ++ return; ++ } ++ ++ /* ++ * Ignore unimplemented BARs, unused resource slots for 64-bit ++ * BARs, and non-movable resources, e.g., those described via ++ * Enhanced Allocation. ++ */ ++ if (!res->flags) ++ return; ++ ++ if (res->flags & IORESOURCE_UNSET) ++ return; ++ ++ if (res->flags & IORESOURCE_PCI_FIXED) ++ return; + +- BUG_ON(!dev->is_physfn); ++ pcibios_resource_to_bus(dev->bus, ®ion, res); ++ new = region.start; ++ new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK; + +- return dev->sriov->pos + PCI_SRIOV_BAR + +- 4 * (resno - PCI_IOV_RESOURCES); ++ reg = iov->pos + PCI_SRIOV_BAR + 4 * vf_bar; ++ pci_write_config_dword(dev, reg, new); ++ if (res->flags & IORESOURCE_MEM_64) { ++ new = region.start >> 16 >> 16; ++ pci_write_config_dword(dev, reg + 4, new); ++ } + } + + resource_size_t __weak pcibios_iov_resource_alignment(struct pci_dev *dev, +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c +index e311a9bf2c90..0e53488f8ec1 100644 +--- a/drivers/pci/pci.c ++++ b/drivers/pci/pci.c +@@ -519,10 +519,6 @@ static void pci_restore_bars(struct pci_dev *dev) + { + int i; + +- /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */ +- if (dev->is_virtfn) +- return; +- + for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) + pci_update_resource(dev, i); + } +@@ -4472,36 +4468,6 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags) + } + EXPORT_SYMBOL(pci_select_bars); + +-/** +- * pci_resource_bar - get position of the BAR associated with a resource +- * @dev: the PCI device +- * @resno: the resource number +- * @type: the BAR type to be filled in +- * +- * Returns BAR position in config space, or 0 if the BAR is invalid. +- */ +-int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) +-{ +- int reg; +- +- if (resno < PCI_ROM_RESOURCE) { +- *type = pci_bar_unknown; +- return PCI_BASE_ADDRESS_0 + 4 * resno; +- } else if (resno == PCI_ROM_RESOURCE) { +- *type = pci_bar_mem32; +- return dev->rom_base_reg; +- } else if (resno < PCI_BRIDGE_RESOURCES) { +- /* device specific resource */ +- *type = pci_bar_unknown; +- reg = pci_iov_resource_bar(dev, resno); +- if (reg) +- return reg; +- } +- +- dev_err(&dev->dev, "BAR %d: invalid resource\n", resno); +- return 0; +-} +- + /* Some architectures require additional programming to enable VGA */ + static arch_set_vga_state_t arch_set_vga_state; + +diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h +index d390fc1475ec..c43e448873ca 100644 +--- a/drivers/pci/pci.h ++++ b/drivers/pci/pci.h +@@ -232,7 +232,6 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl, + int pci_setup_device(struct pci_dev *dev); + int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, + struct resource *res, unsigned int reg); +-int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type); + void pci_configure_ari(struct pci_dev *dev); + void __pci_bus_size_bridges(struct pci_bus *bus, + struct list_head *realloc_head); +@@ -276,7 +275,7 @@ static inline void pci_restore_ats_state(struct pci_dev *dev) + #ifdef CONFIG_PCI_IOV + int pci_iov_init(struct pci_dev *dev); + void pci_iov_release(struct pci_dev *dev); +-int pci_iov_resource_bar(struct pci_dev *dev, int resno); ++void pci_iov_update_resource(struct pci_dev *dev, int resno); + resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno); + void pci_restore_iov_state(struct pci_dev *dev); + int pci_iov_bus_range(struct pci_bus *bus); +@@ -290,10 +289,6 @@ static inline void pci_iov_release(struct pci_dev *dev) + + { + } +-static inline int pci_iov_resource_bar(struct pci_dev *dev, int resno) +-{ +- return 0; +-} + static inline void pci_restore_iov_state(struct pci_dev *dev) + { + } +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c +index 71d9a6d1bd56..b83df942794f 100644 +--- a/drivers/pci/probe.c ++++ b/drivers/pci/probe.c +@@ -226,7 +226,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, + mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK; + } + } else { +- res->flags |= (l & IORESOURCE_ROM_ENABLE); ++ if (l & PCI_ROM_ADDRESS_ENABLE) ++ res->flags |= IORESOURCE_ROM_ENABLE; + l64 = l & PCI_ROM_ADDRESS_MASK; + sz64 = sz & PCI_ROM_ADDRESS_MASK; + mask64 = (u32)PCI_ROM_ADDRESS_MASK; +diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c +index eb0ad530dc43..3eea7fc5e1a2 100644 +--- a/drivers/pci/rom.c ++++ b/drivers/pci/rom.c +@@ -31,6 +31,11 @@ int pci_enable_rom(struct pci_dev *pdev) + if (!res->flags) + return -1; + ++ /* ++ * Ideally pci_update_resource() would update the ROM BAR address, ++ * and we would only set the enable bit here. But apparently some ++ * devices have buggy ROM BARs that read as zero when disabled. ++ */ + pcibios_resource_to_bus(pdev->bus, ®ion, res); + pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); + rom_addr &= ~PCI_ROM_ADDRESS_MASK; +diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c +index 604011e047d6..25062966cbfa 100644 +--- a/drivers/pci/setup-res.c ++++ b/drivers/pci/setup-res.c +@@ -25,21 +25,18 @@ + #include + #include "pci.h" + +- +-void pci_update_resource(struct pci_dev *dev, int resno) ++static void pci_std_update_resource(struct pci_dev *dev, int resno) + { + struct pci_bus_region region; + bool disable; + u16 cmd; + u32 new, check, mask; + int reg; +- enum pci_bar_type type; + struct resource *res = dev->resource + resno; + +- if (dev->is_virtfn) { +- dev_warn(&dev->dev, "can't update VF BAR%d\n", resno); ++ /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */ ++ if (dev->is_virtfn) + return; +- } + + /* + * Ignore resources for unimplemented BARs and unused resource slots +@@ -60,21 +57,34 @@ void pci_update_resource(struct pci_dev *dev, int resno) + return; + + pcibios_resource_to_bus(dev->bus, ®ion, res); ++ new = region.start; + +- new = region.start | (res->flags & PCI_REGION_FLAG_MASK); +- if (res->flags & IORESOURCE_IO) ++ if (res->flags & IORESOURCE_IO) { + mask = (u32)PCI_BASE_ADDRESS_IO_MASK; +- else ++ new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK; ++ } else if (resno == PCI_ROM_RESOURCE) { ++ mask = (u32)PCI_ROM_ADDRESS_MASK; ++ } else { + mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; ++ new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK; ++ } + +- reg = pci_resource_bar(dev, resno, &type); +- if (!reg) +- return; +- if (type != pci_bar_unknown) { ++ if (resno < PCI_ROM_RESOURCE) { ++ reg = PCI_BASE_ADDRESS_0 + 4 * resno; ++ } else if (resno == PCI_ROM_RESOURCE) { ++ ++ /* ++ * Apparently some Matrox devices have ROM BARs that read ++ * as zero when disabled, so don't update ROM BARs unless ++ * they're enabled. See https://lkml.org/lkml/2005/8/30/138. ++ */ + if (!(res->flags & IORESOURCE_ROM_ENABLE)) + return; ++ ++ reg = dev->rom_base_reg; + new |= PCI_ROM_ADDRESS_ENABLE; +- } ++ } else ++ return; + + /* + * We can't update a 64-bit BAR atomically, so when possible, +@@ -110,6 +120,16 @@ void pci_update_resource(struct pci_dev *dev, int resno) + pci_write_config_word(dev, PCI_COMMAND, cmd); + } + ++void pci_update_resource(struct pci_dev *dev, int resno) ++{ ++ if (resno <= PCI_ROM_RESOURCE) ++ pci_std_update_resource(dev, resno); ++#ifdef CONFIG_PCI_IOV ++ else if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END) ++ pci_iov_update_resource(dev, resno); ++#endif ++} ++ + int pci_claim_resource(struct pci_dev *dev, int resource) + { + struct resource *res = &dev->resource[resource]; +diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c +index 24ec282e15d8..7c3b8d3516e3 100644 +--- a/drivers/s390/crypto/ap_bus.c ++++ b/drivers/s390/crypto/ap_bus.c +@@ -1651,6 +1651,9 @@ static void ap_scan_bus(struct work_struct *unused) + ap_dev->queue_depth = queue_depth; + ap_dev->raw_hwtype = device_type; + ap_dev->device_type = device_type; ++ /* CEX6 toleration: map to CEX5 */ ++ if (device_type == AP_DEVICE_TYPE_CEX6) ++ ap_dev->device_type = AP_DEVICE_TYPE_CEX5; + ap_dev->functions = device_functions; + spin_lock_init(&ap_dev->lock); + INIT_LIST_HEAD(&ap_dev->pendingq); +diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h +index 6adcbdf225d1..cc741e948170 100644 +--- a/drivers/s390/crypto/ap_bus.h ++++ b/drivers/s390/crypto/ap_bus.h +@@ -105,6 +105,7 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr) + #define AP_DEVICE_TYPE_CEX3C 9 + #define AP_DEVICE_TYPE_CEX4 10 + #define AP_DEVICE_TYPE_CEX5 11 ++#define AP_DEVICE_TYPE_CEX6 12 + + /* + * Known function facilities +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c +index 5b24ffd93649..83ff1724ec79 100644 +--- a/drivers/tty/serial/8250/8250_pci.c ++++ b/drivers/tty/serial/8250/8250_pci.c +@@ -57,6 +57,7 @@ struct serial_private { + unsigned int nr; + void __iomem *remapped_bar[PCI_NUM_BAR_RESOURCES]; + struct pci_serial_quirk *quirk; ++ const struct pciserial_board *board; + int line[0]; + }; + +@@ -4058,6 +4059,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board) + } + } + priv->nr = i; ++ priv->board = board; + return priv; + + err_deinit: +@@ -4068,7 +4070,7 @@ err_out: + } + EXPORT_SYMBOL_GPL(pciserial_init_ports); + +-void pciserial_remove_ports(struct serial_private *priv) ++void pciserial_detach_ports(struct serial_private *priv) + { + struct pci_serial_quirk *quirk; + int i; +@@ -4088,7 +4090,11 @@ void pciserial_remove_ports(struct serial_private *priv) + quirk = find_quirk(priv->dev); + if (quirk->exit) + quirk->exit(priv->dev); ++} + ++void pciserial_remove_ports(struct serial_private *priv) ++{ ++ pciserial_detach_ports(priv); + kfree(priv); + } + EXPORT_SYMBOL_GPL(pciserial_remove_ports); +@@ -5819,7 +5825,7 @@ static pci_ers_result_t serial8250_io_error_detected(struct pci_dev *dev, + return PCI_ERS_RESULT_DISCONNECT; + + if (priv) +- pciserial_suspend_ports(priv); ++ pciserial_detach_ports(priv); + + pci_disable_device(dev); + +@@ -5844,9 +5850,18 @@ static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev) + static void serial8250_io_resume(struct pci_dev *dev) + { + struct serial_private *priv = pci_get_drvdata(dev); ++ const struct pciserial_board *board; + +- if (priv) +- pciserial_resume_ports(priv); ++ if (!priv) ++ return; ++ ++ board = priv->board; ++ kfree(priv); ++ priv = pciserial_init_ports(dev, board); ++ ++ if (!IS_ERR(priv)) { ++ pci_set_drvdata(dev, priv); ++ } + } + + static const struct pci_error_handlers serial8250_err_handler = { +diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c +index deaddb950c20..24337ac3323f 100644 +--- a/drivers/usb/class/usbtmc.c ++++ b/drivers/usb/class/usbtmc.c +@@ -1105,7 +1105,7 @@ static int usbtmc_probe(struct usb_interface *intf, + + dev_dbg(&intf->dev, "%s called\n", __func__); + +- data = kmalloc(sizeof(*data), GFP_KERNEL); ++ data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + +@@ -1163,6 +1163,12 @@ static int usbtmc_probe(struct usb_interface *intf, + } + } + ++ if (!data->bulk_out || !data->bulk_in) { ++ dev_err(&intf->dev, "bulk endpoints not found\n"); ++ retcode = -ENODEV; ++ goto err_put; ++ } ++ + retcode = get_capabilities(data); + if (retcode) + dev_err(&intf->dev, "can't read capabilities\n"); +@@ -1186,6 +1192,7 @@ static int usbtmc_probe(struct usb_interface *intf, + error_register: + sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp); + sysfs_remove_group(&intf->dev.kobj, &data_attr_grp); ++err_put: + kref_put(&data->kref, usbtmc_delete); + return retcode; + } +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c +index ac30a051ad71..325cbc9c35d8 100644 +--- a/drivers/usb/core/config.c ++++ b/drivers/usb/core/config.c +@@ -246,6 +246,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, + + /* + * Adjust bInterval for quirked devices. ++ */ ++ /* ++ * This quirk fixes bIntervals reported in ms. ++ */ ++ if (to_usb_device(ddev)->quirks & ++ USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL) { ++ n = clamp(fls(d->bInterval) + 3, i, j); ++ i = j = n; ++ } ++ /* + * This quirk fixes bIntervals reported in + * linear microframes. + */ +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index f52d8abf6979..9e62c93af96e 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -4199,7 +4199,7 @@ static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev) + struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent); + int connect_type = USB_PORT_CONNECT_TYPE_UNKNOWN; + +- if (!udev->usb2_hw_lpm_capable) ++ if (!udev->usb2_hw_lpm_capable || !udev->bos) + return; + + if (hub) +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c +index 24f9f98968a5..96b21b0dac1e 100644 +--- a/drivers/usb/core/quirks.c ++++ b/drivers/usb/core/quirks.c +@@ -170,6 +170,14 @@ static const struct usb_device_id usb_quirk_list[] = { + /* M-Systems Flash Disk Pioneers */ + { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, + ++ /* Baum Vario Ultra */ ++ { USB_DEVICE(0x0904, 0x6101), .driver_info = ++ USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL }, ++ { USB_DEVICE(0x0904, 0x6102), .driver_info = ++ USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL }, ++ { USB_DEVICE(0x0904, 0x6103), .driver_info = ++ USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL }, ++ + /* Keytouch QWERTY Panel keyboard */ + { USB_DEVICE(0x0926, 0x3333), .driver_info = + USB_QUIRK_CONFIG_INTF_STRINGS }, +diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c +index 2fa1e80a3ce7..67e474b13fca 100644 +--- a/drivers/usb/gadget/function/f_acm.c ++++ b/drivers/usb/gadget/function/f_acm.c +@@ -535,13 +535,15 @@ static int acm_notify_serial_state(struct f_acm *acm) + { + struct usb_composite_dev *cdev = acm->port.func.config->cdev; + int status; ++ __le16 serial_state; + + spin_lock(&acm->lock); + if (acm->notify_req) { + dev_dbg(&cdev->gadget->dev, "acm ttyGS%d serial state %04x\n", + acm->port_num, acm->serial_state); ++ serial_state = cpu_to_le16(acm->serial_state); + status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE, +- 0, &acm->serial_state, sizeof(acm->serial_state)); ++ 0, &serial_state, sizeof(acm->serial_state)); + } else { + acm->pending = true; + status = 0; +diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c +index 29b41b5dee04..c7689d05356c 100644 +--- a/drivers/usb/gadget/function/f_uvc.c ++++ b/drivers/usb/gadget/function/f_uvc.c +@@ -625,7 +625,7 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f) + uvc_ss_streaming_comp.bMaxBurst = opts->streaming_maxburst; + uvc_ss_streaming_comp.wBytesPerInterval = + cpu_to_le16(max_packet_size * max_packet_mult * +- opts->streaming_maxburst); ++ (opts->streaming_maxburst + 1)); + + /* Allocate endpoints. */ + ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep); +diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c +index 4e38683c653c..6d4e75785710 100644 +--- a/drivers/usb/misc/idmouse.c ++++ b/drivers/usb/misc/idmouse.c +@@ -346,6 +346,9 @@ static int idmouse_probe(struct usb_interface *interface, + if (iface_desc->desc.bInterfaceClass != 0x0A) + return -ENODEV; + ++ if (iface_desc->desc.bNumEndpoints < 1) ++ return -ENODEV; ++ + /* allocate memory for our device state and initialize it */ + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (dev == NULL) +diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c +index 86b4e4b2ab9a..383fa007348f 100644 +--- a/drivers/usb/misc/lvstest.c ++++ b/drivers/usb/misc/lvstest.c +@@ -370,6 +370,10 @@ static int lvs_rh_probe(struct usb_interface *intf, + + hdev = interface_to_usbdev(intf); + desc = intf->cur_altsetting; ++ ++ if (desc->desc.bNumEndpoints < 1) ++ return -ENODEV; ++ + endpoint = &desc->endpoint[0].desc; + + /* valid only for SS root hub */ +diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c +index bbd029c9c725..442b6631162e 100644 +--- a/drivers/usb/misc/uss720.c ++++ b/drivers/usb/misc/uss720.c +@@ -711,6 +711,11 @@ static int uss720_probe(struct usb_interface *intf, + + interface = intf->cur_altsetting; + ++ if (interface->desc.bNumEndpoints < 3) { ++ usb_put_dev(usbdev); ++ return -ENODEV; ++ } ++ + /* + * Allocate parport interface + */ +diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c +index e499b862a946..88f26ac2a185 100644 +--- a/drivers/usb/musb/musb_cppi41.c ++++ b/drivers/usb/musb/musb_cppi41.c +@@ -250,8 +250,27 @@ static void cppi41_dma_callback(void *private_data) + transferred < cppi41_channel->packet_sz) + cppi41_channel->prog_len = 0; + +- if (cppi41_channel->is_tx) +- empty = musb_is_tx_fifo_empty(hw_ep); ++ if (cppi41_channel->is_tx) { ++ u8 type; ++ ++ if (is_host_active(musb)) ++ type = hw_ep->out_qh->type; ++ else ++ type = hw_ep->ep_in.type; ++ ++ if (type == USB_ENDPOINT_XFER_ISOC) ++ /* ++ * Don't use the early-TX-interrupt workaround below ++ * for Isoch transfter. Since Isoch are periodic ++ * transfer, by the time the next transfer is ++ * scheduled, the current one should be done already. ++ * ++ * This avoids audio playback underrun issue. ++ */ ++ empty = true; ++ else ++ empty = musb_is_tx_fifo_empty(hw_ep); ++ } + + if (!cppi41_channel->is_tx || empty) { + cppi41_trans_done(cppi41_channel); +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 42cc72e54c05..af67a0de6b5d 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -233,6 +233,14 @@ static void option_instat_callback(struct urb *urb); + #define BANDRICH_PRODUCT_1012 0x1012 + + #define QUALCOMM_VENDOR_ID 0x05C6 ++/* These Quectel products use Qualcomm's vendor ID */ ++#define QUECTEL_PRODUCT_UC20 0x9003 ++#define QUECTEL_PRODUCT_UC15 0x9090 ++ ++#define QUECTEL_VENDOR_ID 0x2c7c ++/* These Quectel products use Quectel's vendor ID */ ++#define QUECTEL_PRODUCT_EC21 0x0121 ++#define QUECTEL_PRODUCT_EC25 0x0125 + + #define CMOTECH_VENDOR_ID 0x16d8 + #define CMOTECH_PRODUCT_6001 0x6001 +@@ -1161,7 +1169,14 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ +- { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */ ++ /* Quectel products using Qualcomm vendor ID */ ++ { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)}, ++ { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20), ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, ++ /* Quectel products using Quectel vendor ID */ ++ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21), ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, ++ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, +diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c +index 696458db7e3c..38b3f0d8cd58 100644 +--- a/drivers/usb/serial/qcserial.c ++++ b/drivers/usb/serial/qcserial.c +@@ -169,6 +169,8 @@ static const struct usb_device_id id_table[] = { + {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ + {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ + {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ ++ {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */ ++ {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */ + + /* Huawei devices */ + {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ +diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c +index 252c7bd9218a..d01496fd27fe 100644 +--- a/drivers/usb/wusbcore/wa-hc.c ++++ b/drivers/usb/wusbcore/wa-hc.c +@@ -39,6 +39,9 @@ int wa_create(struct wahc *wa, struct usb_interface *iface, + int result; + struct device *dev = &iface->dev; + ++ if (iface->cur_altsetting->desc.bNumEndpoints < 3) ++ return -ENODEV; ++ + result = wa_rpipes_create(wa); + if (result < 0) + goto error_rpipes_create; +diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c +index 0257f35cfb9d..e75bbe5a10cd 100644 +--- a/drivers/uwb/hwa-rc.c ++++ b/drivers/uwb/hwa-rc.c +@@ -825,6 +825,9 @@ static int hwarc_probe(struct usb_interface *iface, + struct hwarc *hwarc; + struct device *dev = &iface->dev; + ++ if (iface->cur_altsetting->desc.bNumEndpoints < 1) ++ return -ENODEV; ++ + result = -ENOMEM; + uwb_rc = uwb_rc_alloc(); + if (uwb_rc == NULL) { +diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c +index 2bfc846ac071..6345e85822a4 100644 +--- a/drivers/uwb/i1480/dfu/usb.c ++++ b/drivers/uwb/i1480/dfu/usb.c +@@ -362,6 +362,9 @@ int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id) + result); + } + ++ if (iface->cur_altsetting->desc.bNumEndpoints < 1) ++ return -ENODEV; ++ + result = -ENOMEM; + i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL); + if (i1480_usb == NULL) { +diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c +index 0582b72ef377..1a9f18b40be6 100644 +--- a/drivers/vfio/vfio_iommu_spapr_tce.c ++++ b/drivers/vfio/vfio_iommu_spapr_tce.c +@@ -511,6 +511,12 @@ static long tce_iommu_build_v2(struct tce_container *container, + unsigned long hpa; + enum dma_data_direction dirtmp; + ++ if (!tbl->it_userspace) { ++ ret = tce_iommu_userspace_view_alloc(tbl); ++ if (ret) ++ return ret; ++ } ++ + for (i = 0; i < pages; ++i) { + struct mm_iommu_table_group_mem_t *mem = NULL; + unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, +@@ -584,15 +590,6 @@ static long tce_iommu_create_table(struct tce_container *container, + WARN_ON(!ret && !(*ptbl)->it_ops->free); + WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size)); + +- if (!ret && container->v2) { +- ret = tce_iommu_userspace_view_alloc(*ptbl); +- if (ret) +- (*ptbl)->it_ops->free(*ptbl); +- } +- +- if (ret) +- decrement_locked_vm(table_size >> PAGE_SHIFT); +- + return ret; + } + +@@ -1064,10 +1061,7 @@ static int tce_iommu_take_ownership(struct tce_container *container, + if (!tbl || !tbl->it_map) + continue; + +- rc = tce_iommu_userspace_view_alloc(tbl); +- if (!rc) +- rc = iommu_take_ownership(tbl); +- ++ rc = iommu_take_ownership(tbl); + if (rc) { + for (j = 0; j < i; ++j) + iommu_release_ownership( +diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c +index 6e92917ba77a..4e3c78d88832 100644 +--- a/drivers/video/console/fbcon.c ++++ b/drivers/video/console/fbcon.c +@@ -1168,6 +1168,8 @@ static void fbcon_free_font(struct display *p, bool freefont) + p->userfont = 0; + } + ++static void set_vc_hi_font(struct vc_data *vc, bool set); ++ + static void fbcon_deinit(struct vc_data *vc) + { + struct display *p = &fb_display[vc->vc_num]; +@@ -1203,6 +1205,9 @@ finished: + if (free_font) + vc->vc_font.data = NULL; + ++ if (vc->vc_hi_font_mask) ++ set_vc_hi_font(vc, false); ++ + if (!con_is_bound(&fb_con)) + fbcon_exit(); + +@@ -2439,32 +2444,10 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font) + return 0; + } + +-static int fbcon_do_set_font(struct vc_data *vc, int w, int h, +- const u8 * data, int userfont) ++/* set/clear vc_hi_font_mask and update vc attrs accordingly */ ++static void set_vc_hi_font(struct vc_data *vc, bool set) + { +- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; +- struct fbcon_ops *ops = info->fbcon_par; +- struct display *p = &fb_display[vc->vc_num]; +- int resize; +- int cnt; +- char *old_data = NULL; +- +- if (CON_IS_VISIBLE(vc) && softback_lines) +- fbcon_set_origin(vc); +- +- resize = (w != vc->vc_font.width) || (h != vc->vc_font.height); +- if (p->userfont) +- old_data = vc->vc_font.data; +- if (userfont) +- cnt = FNTCHARCNT(data); +- else +- cnt = 256; +- vc->vc_font.data = (void *)(p->fontdata = data); +- if ((p->userfont = userfont)) +- REFCOUNT(data)++; +- vc->vc_font.width = w; +- vc->vc_font.height = h; +- if (vc->vc_hi_font_mask && cnt == 256) { ++ if (!set) { + vc->vc_hi_font_mask = 0; + if (vc->vc_can_do_color) { + vc->vc_complement_mask >>= 1; +@@ -2487,7 +2470,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, + ((c & 0xfe00) >> 1) | (c & 0xff); + vc->vc_attr >>= 1; + } +- } else if (!vc->vc_hi_font_mask && cnt == 512) { ++ } else { + vc->vc_hi_font_mask = 0x100; + if (vc->vc_can_do_color) { + vc->vc_complement_mask <<= 1; +@@ -2519,8 +2502,38 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, + } else + vc->vc_video_erase_char = c & ~0x100; + } +- + } ++} ++ ++static int fbcon_do_set_font(struct vc_data *vc, int w, int h, ++ const u8 * data, int userfont) ++{ ++ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; ++ struct fbcon_ops *ops = info->fbcon_par; ++ struct display *p = &fb_display[vc->vc_num]; ++ int resize; ++ int cnt; ++ char *old_data = NULL; ++ ++ if (CON_IS_VISIBLE(vc) && softback_lines) ++ fbcon_set_origin(vc); ++ ++ resize = (w != vc->vc_font.width) || (h != vc->vc_font.height); ++ if (p->userfont) ++ old_data = vc->vc_font.data; ++ if (userfont) ++ cnt = FNTCHARCNT(data); ++ else ++ cnt = 256; ++ vc->vc_font.data = (void *)(p->fontdata = data); ++ if ((p->userfont = userfont)) ++ REFCOUNT(data)++; ++ vc->vc_font.width = w; ++ vc->vc_font.height = h; ++ if (vc->vc_hi_font_mask && cnt == 256) ++ set_vc_hi_font(vc, false); ++ else if (!vc->vc_hi_font_mask && cnt == 512) ++ set_vc_hi_font(vc, true); + + if (resize) { + int cols, rows; +diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c +index 611f9c11da85..2e319d0c395d 100644 +--- a/drivers/xen/xen-acpi-processor.c ++++ b/drivers/xen/xen-acpi-processor.c +@@ -27,10 +27,10 @@ + #include + #include + #include ++#include + #include + #include + #include +-#include + #include + #include + +@@ -466,15 +466,33 @@ static int xen_upload_processor_pm_data(void) + return rc; + } + +-static int xen_acpi_processor_resume(struct notifier_block *nb, +- unsigned long action, void *data) ++static void xen_acpi_processor_resume_worker(struct work_struct *dummy) + { ++ int rc; ++ + bitmap_zero(acpi_ids_done, nr_acpi_bits); +- return xen_upload_processor_pm_data(); ++ ++ rc = xen_upload_processor_pm_data(); ++ if (rc != 0) ++ pr_info("ACPI data upload failed, error = %d\n", rc); ++} ++ ++static void xen_acpi_processor_resume(void) ++{ ++ static DECLARE_WORK(wq, xen_acpi_processor_resume_worker); ++ ++ /* ++ * xen_upload_processor_pm_data() calls non-atomic code. ++ * However, the context for xen_acpi_processor_resume is syscore ++ * with only the boot CPU online and in an atomic context. ++ * ++ * So defer the upload for some point safer. ++ */ ++ schedule_work(&wq); + } + +-struct notifier_block xen_acpi_processor_resume_nb = { +- .notifier_call = xen_acpi_processor_resume, ++static struct syscore_ops xap_syscore_ops = { ++ .resume = xen_acpi_processor_resume, + }; + + static int __init xen_acpi_processor_init(void) +@@ -527,7 +545,7 @@ static int __init xen_acpi_processor_init(void) + if (rc) + goto err_unregister; + +- xen_resume_notifier_register(&xen_acpi_processor_resume_nb); ++ register_syscore_ops(&xap_syscore_ops); + + return 0; + err_unregister: +@@ -544,7 +562,7 @@ static void __exit xen_acpi_processor_exit(void) + { + int i; + +- xen_resume_notifier_unregister(&xen_acpi_processor_resume_nb); ++ unregister_syscore_ops(&xap_syscore_ops); + kfree(acpi_ids_done); + kfree(acpi_id_present); + kfree(acpi_id_cst_present); +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c +index d4be4e23bc21..dad8e7bdf0a6 100644 +--- a/fs/ext4/inline.c ++++ b/fs/ext4/inline.c +@@ -1158,10 +1158,9 @@ static int ext4_finish_convert_inline_dir(handle_t *handle, + set_buffer_uptodate(dir_block); + err = ext4_handle_dirty_dirent_node(handle, inode, dir_block); + if (err) +- goto out; ++ return err; + set_buffer_verified(dir_block); +-out: +- return err; ++ return ext4_mark_inode_dirty(handle, inode); + } + + static int ext4_convert_inline_data_nolock(handle_t *handle, +diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c +index 1aabfda669b0..7183b7ea065b 100644 +--- a/fs/xfs/libxfs/xfs_inode_buf.c ++++ b/fs/xfs/libxfs/xfs_inode_buf.c +@@ -299,6 +299,14 @@ xfs_dinode_verify( + if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC)) + return false; + ++ /* don't allow invalid i_size */ ++ if (be64_to_cpu(dip->di_size) & (1ULL << 63)) ++ return false; ++ ++ /* No zero-length symlinks. */ ++ if (S_ISLNK(be16_to_cpu(dip->di_mode)) && dip->di_size == 0) ++ return false; ++ + /* only version 3 or greater inodes are extensively verified here */ + if (dip->di_version < 3) + return true; +diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c +index dbae6490a79a..832764ee035a 100644 +--- a/fs/xfs/xfs_bmap_util.c ++++ b/fs/xfs/xfs_bmap_util.c +@@ -1713,6 +1713,7 @@ xfs_swap_extents( + xfs_trans_t *tp; + xfs_bstat_t *sbp = &sxp->sx_stat; + xfs_ifork_t *tempifp, *ifp, *tifp; ++ xfs_extnum_t nextents; + int src_log_flags, target_log_flags; + int error = 0; + int aforkblks = 0; +@@ -1899,7 +1900,8 @@ xfs_swap_extents( + * pointer. Otherwise it's already NULL or + * pointing to the extent. + */ +- if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) { ++ nextents = ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t); ++ if (nextents <= XFS_INLINE_EXTS) { + ifp->if_u1.if_extents = + ifp->if_u2.if_inline_ext; + } +@@ -1918,7 +1920,8 @@ xfs_swap_extents( + * pointer. Otherwise it's already NULL or + * pointing to the extent. + */ +- if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) { ++ nextents = tip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t); ++ if (nextents <= XFS_INLINE_EXTS) { + tifp->if_u1.if_extents = + tifp->if_u2.if_inline_ext; + } +diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c +index eb1b8c8acfcb..8146b0cf20ce 100644 +--- a/fs/xfs/xfs_buf.c ++++ b/fs/xfs/xfs_buf.c +@@ -375,6 +375,7 @@ retry: + out_free_pages: + for (i = 0; i < bp->b_page_count; i++) + __free_page(bp->b_pages[i]); ++ bp->b_flags &= ~_XBF_PAGES; + return error; + } + +diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h +index 1d0043dc34e4..de2a722fe3cf 100644 +--- a/include/linux/usb/quirks.h ++++ b/include/linux/usb/quirks.h +@@ -50,4 +50,10 @@ + /* device can't handle Link Power Management */ + #define USB_QUIRK_NO_LPM BIT(10) + ++/* ++ * Device reports its bInterval as linear frames instead of the ++ * USB 2.0 calculation. ++ */ ++#define USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL BIT(11) ++ + #endif /* __LINUX_USB_QUIRKS_H */ +diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c +index ddc3573894b0..bc95e48d5cfb 100644 +--- a/net/ceph/osdmap.c ++++ b/net/ceph/osdmap.c +@@ -1265,7 +1265,6 @@ static int decode_new_up_state_weight(void **p, void *end, + if ((map->osd_state[osd] & CEPH_OSD_EXISTS) && + (xorstate & CEPH_OSD_EXISTS)) { + pr_info("osd%d does not exist\n", osd); +- map->osd_weight[osd] = CEPH_OSD_IN; + ret = set_primary_affinity(map, osd, + CEPH_OSD_DEFAULT_PRIMARY_AFFINITY); + if (ret) +diff --git a/net/core/sock.c b/net/core/sock.c +index f4c0917e66b5..9c708a5fb751 100644 +--- a/net/core/sock.c ++++ b/net/core/sock.c +@@ -1459,6 +1459,11 @@ void sk_destruct(struct sock *sk) + pr_debug("%s: optmem leakage (%d bytes) detected\n", + __func__, atomic_read(&sk->sk_omem_alloc)); + ++ if (sk->sk_frag.page) { ++ put_page(sk->sk_frag.page); ++ sk->sk_frag.page = NULL; ++ } ++ + if (sk->sk_peer_cred) + put_cred(sk->sk_peer_cred); + put_pid(sk->sk_peer_pid); +@@ -1552,6 +1557,12 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) + is_charged = sk_filter_charge(newsk, filter); + + if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { ++ /* We need to make sure that we don't uncharge the new ++ * socket if we couldn't charge it in the first place ++ * as otherwise we uncharge the parent's filter. ++ */ ++ if (!is_charged) ++ RCU_INIT_POINTER(newsk->sk_filter, NULL); + /* It is still raw copy of parent, so invalidate + * destructor and make plain sk_free() */ + newsk->sk_destruct = NULL; +@@ -2691,11 +2702,6 @@ void sk_common_release(struct sock *sk) + + sk_refcnt_debug_release(sk); + +- if (sk->sk_frag.page) { +- put_page(sk->sk_frag.page); +- sk->sk_frag.page = NULL; +- } +- + sock_put(sk); + } + EXPORT_SYMBOL(sk_common_release); +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c +index 4e60dae86df5..1adba44f8fbc 100644 +--- a/net/ipv4/fib_frontend.c ++++ b/net/ipv4/fib_frontend.c +@@ -1080,7 +1080,8 @@ static void nl_fib_input(struct sk_buff *skb) + + net = sock_net(skb->sk); + nlh = nlmsg_hdr(skb); +- if (skb->len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len || ++ if (skb->len < nlmsg_total_size(sizeof(*frn)) || ++ skb->len < nlh->nlmsg_len || + nlmsg_len(nlh) < sizeof(*frn)) + return; + +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 7cc0f8aac28f..818630cec54f 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -5435,6 +5435,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) + struct inet_connection_sock *icsk = inet_csk(sk); + + tcp_set_state(sk, TCP_ESTABLISHED); ++ icsk->icsk_ack.lrcvtime = tcp_time_stamp; + + if (skb) { + icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); +@@ -5647,7 +5648,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, + * to stand against the temptation 8) --ANK + */ + inet_csk_schedule_ack(sk); +- icsk->icsk_ack.lrcvtime = tcp_time_stamp; + tcp_enter_quickack_mode(sk); + inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, + TCP_DELACK_MAX, TCP_RTO_MAX); +diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c +index 9475a2748a9a..019db68bdb9f 100644 +--- a/net/ipv4/tcp_minisocks.c ++++ b/net/ipv4/tcp_minisocks.c +@@ -472,6 +472,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, + newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); + newtp->rtt_min[0].rtt = ~0U; + newicsk->icsk_rto = TCP_TIMEOUT_INIT; ++ newicsk->icsk_ack.lrcvtime = tcp_time_stamp; + + newtp->packets_out = 0; + newtp->retrans_out = 0; +diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c +index d1bd4a45ca2d..d26b28def310 100644 +--- a/net/openvswitch/flow_netlink.c ++++ b/net/openvswitch/flow_netlink.c +@@ -588,7 +588,7 @@ static int ip_tun_from_nlattr(const struct nlattr *attr, + ipv4 = true; + break; + case OVS_TUNNEL_KEY_ATTR_IPV6_SRC: +- SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst, ++ SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.src, + nla_get_in6_addr(a), is_mask); + ipv6 = true; + break; +diff --git a/net/unix/garbage.c b/net/unix/garbage.c +index 6a0d48525fcf..c36757e72844 100644 +--- a/net/unix/garbage.c ++++ b/net/unix/garbage.c +@@ -146,6 +146,7 @@ void unix_notinflight(struct user_struct *user, struct file *fp) + if (s) { + struct unix_sock *u = unix_sk(s); + ++ BUG_ON(!atomic_long_read(&u->inflight)); + BUG_ON(list_empty(&u->link)); + + if (atomic_long_dec_and_test(&u->inflight)) +@@ -341,6 +342,14 @@ void unix_gc(void) + } + list_del(&cursor); + ++ /* Now gc_candidates contains only garbage. Restore original ++ * inflight counters for these as well, and remove the skbuffs ++ * which are creating the cycle(s). ++ */ ++ skb_queue_head_init(&hitlist); ++ list_for_each_entry(u, &gc_candidates, link) ++ scan_children(&u->sk, inc_inflight, &hitlist); ++ + /* not_cycle_list contains those sockets which do not make up a + * cycle. Restore these to the inflight list. + */ +@@ -350,14 +359,6 @@ void unix_gc(void) + list_move_tail(&u->link, &gc_inflight_list); + } + +- /* Now gc_candidates contains only garbage. Restore original +- * inflight counters for these as well, and remove the skbuffs +- * which are creating the cycle(s). +- */ +- skb_queue_head_init(&hitlist); +- list_for_each_entry(u, &gc_candidates, link) +- scan_children(&u->sk, inc_inflight, &hitlist); +- + spin_unlock(&unix_gc_lock); + + /* Here we are. Hitlist is filled. Die. */ +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c +index 1f0de6d74daa..9d0953e5734f 100644 +--- a/net/wireless/nl80211.c ++++ b/net/wireless/nl80211.c +@@ -492,21 +492,17 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb, + { + int err; + +- rtnl_lock(); +- + if (!cb->args[0]) { + err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, + nl80211_fam.attrbuf, nl80211_fam.maxattr, + nl80211_policy); + if (err) +- goto out_unlock; ++ return err; + + *wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk), + nl80211_fam.attrbuf); +- if (IS_ERR(*wdev)) { +- err = PTR_ERR(*wdev); +- goto out_unlock; +- } ++ if (IS_ERR(*wdev)) ++ return PTR_ERR(*wdev); + *rdev = wiphy_to_rdev((*wdev)->wiphy); + /* 0 is the first index - add 1 to parse only once */ + cb->args[0] = (*rdev)->wiphy_idx + 1; +@@ -516,10 +512,8 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb, + struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1); + struct wireless_dev *tmp; + +- if (!wiphy) { +- err = -ENODEV; +- goto out_unlock; +- } ++ if (!wiphy) ++ return -ENODEV; + *rdev = wiphy_to_rdev(wiphy); + *wdev = NULL; + +@@ -530,21 +524,11 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb, + } + } + +- if (!*wdev) { +- err = -ENODEV; +- goto out_unlock; +- } ++ if (!*wdev) ++ return -ENODEV; + } + + return 0; +- out_unlock: +- rtnl_unlock(); +- return err; +-} +- +-static void nl80211_finish_wdev_dump(struct cfg80211_registered_device *rdev) +-{ +- rtnl_unlock(); + } + + /* IE validation */ +@@ -3884,9 +3868,10 @@ static int nl80211_dump_station(struct sk_buff *skb, + int sta_idx = cb->args[2]; + int err; + ++ rtnl_lock(); + err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); + if (err) +- return err; ++ goto out_err; + + if (!wdev->netdev) { + err = -EINVAL; +@@ -3922,7 +3907,7 @@ static int nl80211_dump_station(struct sk_buff *skb, + cb->args[2] = sta_idx; + err = skb->len; + out_err: +- nl80211_finish_wdev_dump(rdev); ++ rtnl_unlock(); + + return err; + } +@@ -4639,9 +4624,10 @@ static int nl80211_dump_mpath(struct sk_buff *skb, + int path_idx = cb->args[2]; + int err; + ++ rtnl_lock(); + err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); + if (err) +- return err; ++ goto out_err; + + if (!rdev->ops->dump_mpath) { + err = -EOPNOTSUPP; +@@ -4675,7 +4661,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb, + cb->args[2] = path_idx; + err = skb->len; + out_err: +- nl80211_finish_wdev_dump(rdev); ++ rtnl_unlock(); + return err; + } + +@@ -4835,9 +4821,10 @@ static int nl80211_dump_mpp(struct sk_buff *skb, + int path_idx = cb->args[2]; + int err; + ++ rtnl_lock(); + err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); + if (err) +- return err; ++ goto out_err; + + if (!rdev->ops->dump_mpp) { + err = -EOPNOTSUPP; +@@ -4870,7 +4857,7 @@ static int nl80211_dump_mpp(struct sk_buff *skb, + cb->args[2] = path_idx; + err = skb->len; + out_err: +- nl80211_finish_wdev_dump(rdev); ++ rtnl_unlock(); + return err; + } + +@@ -6806,9 +6793,12 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb) + int start = cb->args[2], idx = 0; + int err; + ++ rtnl_lock(); + err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); +- if (err) ++ if (err) { ++ rtnl_unlock(); + return err; ++ } + + wdev_lock(wdev); + spin_lock_bh(&rdev->bss_lock); +@@ -6831,7 +6821,7 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb) + wdev_unlock(wdev); + + cb->args[2] = idx; +- nl80211_finish_wdev_dump(rdev); ++ rtnl_unlock(); + + return skb->len; + } +@@ -6915,9 +6905,10 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb) + int res; + bool radio_stats; + ++ rtnl_lock(); + res = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); + if (res) +- return res; ++ goto out_err; + + /* prepare_wdev_dump parsed the attributes */ + radio_stats = nl80211_fam.attrbuf[NL80211_ATTR_SURVEY_RADIO_STATS]; +@@ -6958,7 +6949,7 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb) + cb->args[2] = survey_idx; + res = skb->len; + out_err: +- nl80211_finish_wdev_dump(rdev); ++ rtnl_unlock(); + return res; + } + +@@ -10158,17 +10149,13 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb, + void *data = NULL; + unsigned int data_len = 0; + +- rtnl_lock(); +- + if (cb->args[0]) { + /* subtract the 1 again here */ + struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1); + struct wireless_dev *tmp; + +- if (!wiphy) { +- err = -ENODEV; +- goto out_unlock; +- } ++ if (!wiphy) ++ return -ENODEV; + *rdev = wiphy_to_rdev(wiphy); + *wdev = NULL; + +@@ -10189,13 +10176,11 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb, + nl80211_fam.attrbuf, nl80211_fam.maxattr, + nl80211_policy); + if (err) +- goto out_unlock; ++ return err; + + if (!nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_ID] || +- !nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_SUBCMD]) { +- err = -EINVAL; +- goto out_unlock; +- } ++ !nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_SUBCMD]) ++ return -EINVAL; + + *wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk), + nl80211_fam.attrbuf); +@@ -10204,10 +10189,8 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb, + + *rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), + nl80211_fam.attrbuf); +- if (IS_ERR(*rdev)) { +- err = PTR_ERR(*rdev); +- goto out_unlock; +- } ++ if (IS_ERR(*rdev)) ++ return PTR_ERR(*rdev); + + vid = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_ID]); + subcmd = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_SUBCMD]); +@@ -10220,19 +10203,15 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb, + if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd) + continue; + +- if (!vcmd->dumpit) { +- err = -EOPNOTSUPP; +- goto out_unlock; +- } ++ if (!vcmd->dumpit) ++ return -EOPNOTSUPP; + + vcmd_idx = i; + break; + } + +- if (vcmd_idx < 0) { +- err = -EOPNOTSUPP; +- goto out_unlock; +- } ++ if (vcmd_idx < 0) ++ return -EOPNOTSUPP; + + if (nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_DATA]) { + data = nla_data(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_DATA]); +@@ -10249,9 +10228,6 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb, + + /* keep rtnl locked in successful case */ + return 0; +- out_unlock: +- rtnl_unlock(); +- return err; + } + + static int nl80211_vendor_cmd_dump(struct sk_buff *skb, +@@ -10266,9 +10242,10 @@ static int nl80211_vendor_cmd_dump(struct sk_buff *skb, + int err; + struct nlattr *vendor_data; + ++ rtnl_lock(); + err = nl80211_prepare_vendor_dump(skb, cb, &rdev, &wdev); + if (err) +- return err; ++ goto out; + + vcmd_idx = cb->args[2]; + data = (void *)cb->args[3]; +@@ -10277,18 +10254,26 @@ static int nl80211_vendor_cmd_dump(struct sk_buff *skb, + + if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_NETDEV)) { +- if (!wdev) +- return -EINVAL; ++ if (!wdev) { ++ err = -EINVAL; ++ goto out; ++ } + if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV && +- !wdev->netdev) +- return -EINVAL; ++ !wdev->netdev) { ++ err = -EINVAL; ++ goto out; ++ } + + if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) { + if (wdev->netdev && +- !netif_running(wdev->netdev)) +- return -ENETDOWN; +- if (!wdev->netdev && !wdev->p2p_started) +- return -ENETDOWN; ++ !netif_running(wdev->netdev)) { ++ err = -ENETDOWN; ++ goto out; ++ } ++ if (!wdev->netdev && !wdev->p2p_started) { ++ err = -ENETDOWN; ++ goto out; ++ } + } + } + +diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c +index 58e79e02f217..c67f9c212dd1 100644 +--- a/sound/core/seq/seq_clientmgr.c ++++ b/sound/core/seq/seq_clientmgr.c +@@ -1921,6 +1921,7 @@ static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client, + info.output_pool != client->pool->size)) { + if (snd_seq_write_pool_allocated(client)) { + /* remove all existing cells */ ++ snd_seq_pool_mark_closing(client->pool); + snd_seq_queue_client_leave_cells(client->number); + snd_seq_pool_done(client->pool); + } +diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c +index 86240d02b530..3f4efcb85df5 100644 +--- a/sound/core/seq/seq_fifo.c ++++ b/sound/core/seq/seq_fifo.c +@@ -70,6 +70,9 @@ void snd_seq_fifo_delete(struct snd_seq_fifo **fifo) + return; + *fifo = NULL; + ++ if (f->pool) ++ snd_seq_pool_mark_closing(f->pool); ++ + snd_seq_fifo_clear(f); + + /* wake up clients if any */ +diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c +index dfa5156f3585..5847c4475bf3 100644 +--- a/sound/core/seq/seq_memory.c ++++ b/sound/core/seq/seq_memory.c +@@ -414,6 +414,18 @@ int snd_seq_pool_init(struct snd_seq_pool *pool) + return 0; + } + ++/* refuse the further insertion to the pool */ ++void snd_seq_pool_mark_closing(struct snd_seq_pool *pool) ++{ ++ unsigned long flags; ++ ++ if (snd_BUG_ON(!pool)) ++ return; ++ spin_lock_irqsave(&pool->lock, flags); ++ pool->closing = 1; ++ spin_unlock_irqrestore(&pool->lock, flags); ++} ++ + /* remove events */ + int snd_seq_pool_done(struct snd_seq_pool *pool) + { +@@ -424,10 +436,6 @@ int snd_seq_pool_done(struct snd_seq_pool *pool) + return -EINVAL; + + /* wait for closing all threads */ +- spin_lock_irqsave(&pool->lock, flags); +- pool->closing = 1; +- spin_unlock_irqrestore(&pool->lock, flags); +- + if (waitqueue_active(&pool->output_sleep)) + wake_up(&pool->output_sleep); + +@@ -484,6 +492,7 @@ int snd_seq_pool_delete(struct snd_seq_pool **ppool) + *ppool = NULL; + if (pool == NULL) + return 0; ++ snd_seq_pool_mark_closing(pool); + snd_seq_pool_done(pool); + kfree(pool); + return 0; +diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h +index 4a2ec779b8a7..32f959c17786 100644 +--- a/sound/core/seq/seq_memory.h ++++ b/sound/core/seq/seq_memory.h +@@ -84,6 +84,7 @@ static inline int snd_seq_total_cells(struct snd_seq_pool *pool) + int snd_seq_pool_init(struct snd_seq_pool *pool); + + /* done pool - free events */ ++void snd_seq_pool_mark_closing(struct snd_seq_pool *pool); + int snd_seq_pool_done(struct snd_seq_pool *pool); + + /* create pool */ +diff --git a/sound/pci/ctxfi/cthw20k1.c b/sound/pci/ctxfi/cthw20k1.c +index ab4cdab5cfa5..79edd88d5cd0 100644 +--- a/sound/pci/ctxfi/cthw20k1.c ++++ b/sound/pci/ctxfi/cthw20k1.c +@@ -1905,7 +1905,7 @@ static int hw_card_start(struct hw *hw) + return err; + + /* Set DMA transfer mask */ +- if (dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) { ++ if (!dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) { + dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits)); + } else { + dma_set_mask(&pci->dev, DMA_BIT_MASK(32)); +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index cf0785ddbd14..1d4f34379f56 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -6040,6 +6040,8 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { + ALC295_STANDARD_PINS, + {0x17, 0x21014040}, + {0x18, 0x21a19050}), ++ SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, ++ ALC295_STANDARD_PINS), + SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, + ALC298_STANDARD_PINS, + {0x17, 0x90170110}), diff --git a/patch/kernel/mvebu64-default/03-patch-4.4.58-59.patch b/patch/kernel/mvebu64-default/03-patch-4.4.58-59.patch new file mode 100644 index 000000000..49788f7d8 --- /dev/null +++ b/patch/kernel/mvebu64-default/03-patch-4.4.58-59.patch @@ -0,0 +1,548 @@ +diff --git a/Makefile b/Makefile +index 3efe2ea99e2d..083724c6ca4d 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 58 ++SUBLEVEL = 59 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/c6x/kernel/ptrace.c b/arch/c6x/kernel/ptrace.c +index 3c494e84444d..a511ac16a8e3 100644 +--- a/arch/c6x/kernel/ptrace.c ++++ b/arch/c6x/kernel/ptrace.c +@@ -69,46 +69,6 @@ static int gpr_get(struct task_struct *target, + 0, sizeof(*regs)); + } + +-static int gpr_set(struct task_struct *target, +- const struct user_regset *regset, +- unsigned int pos, unsigned int count, +- const void *kbuf, const void __user *ubuf) +-{ +- int ret; +- struct pt_regs *regs = task_pt_regs(target); +- +- /* Don't copyin TSR or CSR */ +- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, +- ®s, +- 0, PT_TSR * sizeof(long)); +- if (ret) +- return ret; +- +- ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, +- PT_TSR * sizeof(long), +- (PT_TSR + 1) * sizeof(long)); +- if (ret) +- return ret; +- +- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, +- ®s, +- (PT_TSR + 1) * sizeof(long), +- PT_CSR * sizeof(long)); +- if (ret) +- return ret; +- +- ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, +- PT_CSR * sizeof(long), +- (PT_CSR + 1) * sizeof(long)); +- if (ret) +- return ret; +- +- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, +- ®s, +- (PT_CSR + 1) * sizeof(long), -1); +- return ret; +-} +- + enum c6x_regset { + REGSET_GPR, + }; +@@ -120,7 +80,6 @@ static const struct user_regset c6x_regsets[] = { + .size = sizeof(u32), + .align = sizeof(u32), + .get = gpr_get, +- .set = gpr_set + }, + }; + +diff --git a/arch/h8300/kernel/ptrace.c b/arch/h8300/kernel/ptrace.c +index 92075544a19a..0dc1c8f622bc 100644 +--- a/arch/h8300/kernel/ptrace.c ++++ b/arch/h8300/kernel/ptrace.c +@@ -95,7 +95,8 @@ static int regs_get(struct task_struct *target, + long *reg = (long *)®s; + + /* build user regs in buffer */ +- for (r = 0; r < ARRAY_SIZE(register_offset); r++) ++ BUILD_BUG_ON(sizeof(regs) % sizeof(long) != 0); ++ for (r = 0; r < sizeof(regs) / sizeof(long); r++) + *reg++ = h8300_get_reg(target, r); + + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, +@@ -113,7 +114,8 @@ static int regs_set(struct task_struct *target, + long *reg; + + /* build user regs in buffer */ +- for (reg = (long *)®s, r = 0; r < ARRAY_SIZE(register_offset); r++) ++ BUILD_BUG_ON(sizeof(regs) % sizeof(long) != 0); ++ for (reg = (long *)®s, r = 0; r < sizeof(regs) / sizeof(long); r++) + *reg++ = h8300_get_reg(target, r); + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, +@@ -122,7 +124,7 @@ static int regs_set(struct task_struct *target, + return ret; + + /* write back to pt_regs */ +- for (reg = (long *)®s, r = 0; r < ARRAY_SIZE(register_offset); r++) ++ for (reg = (long *)®s, r = 0; r < sizeof(regs) / sizeof(long); r++) + h8300_put_reg(target, r, *reg++); + return 0; + } +diff --git a/arch/metag/kernel/ptrace.c b/arch/metag/kernel/ptrace.c +index 7563628822bd..5e2dc7defd2c 100644 +--- a/arch/metag/kernel/ptrace.c ++++ b/arch/metag/kernel/ptrace.c +@@ -24,6 +24,16 @@ + * user_regset definitions. + */ + ++static unsigned long user_txstatus(const struct pt_regs *regs) ++{ ++ unsigned long data = (unsigned long)regs->ctx.Flags; ++ ++ if (regs->ctx.SaveMask & TBICTX_CBUF_BIT) ++ data |= USER_GP_REGS_STATUS_CATCH_BIT; ++ ++ return data; ++} ++ + int metag_gp_regs_copyout(const struct pt_regs *regs, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +@@ -62,9 +72,7 @@ int metag_gp_regs_copyout(const struct pt_regs *regs, + if (ret) + goto out; + /* TXSTATUS */ +- data = (unsigned long)regs->ctx.Flags; +- if (regs->ctx.SaveMask & TBICTX_CBUF_BIT) +- data |= USER_GP_REGS_STATUS_CATCH_BIT; ++ data = user_txstatus(regs); + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &data, 4*25, 4*26); + if (ret) +@@ -119,6 +127,7 @@ int metag_gp_regs_copyin(struct pt_regs *regs, + if (ret) + goto out; + /* TXSTATUS */ ++ data = user_txstatus(regs); + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &data, 4*25, 4*26); + if (ret) +@@ -244,6 +253,8 @@ int metag_rp_state_copyin(struct pt_regs *regs, + unsigned long long *ptr; + int ret, i; + ++ if (count < 4*13) ++ return -EINVAL; + /* Read the entire pipeline before making any changes */ + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &rp, 0, 4*13); +@@ -303,7 +314,7 @@ static int metag_tls_set(struct task_struct *target, + const void *kbuf, const void __user *ubuf) + { + int ret; +- void __user *tls; ++ void __user *tls = target->thread.tls_ptr; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); + if (ret) +diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c +index 74d581569778..c95bf18260f8 100644 +--- a/arch/mips/kernel/ptrace.c ++++ b/arch/mips/kernel/ptrace.c +@@ -485,7 +485,8 @@ static int fpr_set(struct task_struct *target, + &target->thread.fpu, + 0, sizeof(elf_fpregset_t)); + +- for (i = 0; i < NUM_FPU_REGS; i++) { ++ BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); ++ for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) { + err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &fpr_val, i * sizeof(elf_fpreg_t), + (i + 1) * sizeof(elf_fpreg_t)); +diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c +index 9ddc4928a089..c1566170964f 100644 +--- a/arch/sparc/kernel/ptrace_64.c ++++ b/arch/sparc/kernel/ptrace_64.c +@@ -311,7 +311,7 @@ static int genregs64_set(struct task_struct *target, + } + + if (!ret) { +- unsigned long y; ++ unsigned long y = regs->y; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &y, +diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c +index 146264a41ec8..9736f9be5447 100644 +--- a/drivers/pinctrl/qcom/pinctrl-msm.c ++++ b/drivers/pinctrl/qcom/pinctrl-msm.c +@@ -597,10 +597,6 @@ static void msm_gpio_irq_unmask(struct irq_data *d) + + spin_lock_irqsave(&pctrl->lock, flags); + +- val = readl(pctrl->regs + g->intr_status_reg); +- val &= ~BIT(g->intr_status_bit); +- writel(val, pctrl->regs + g->intr_status_reg); +- + val = readl(pctrl->regs + g->intr_cfg_reg); + val |= BIT(g->intr_enable_bit); + writel(val, pctrl->regs + g->intr_cfg_reg); +diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c +index 56f7e2521202..01d15dca940e 100644 +--- a/drivers/virtio/virtio_balloon.c ++++ b/drivers/virtio/virtio_balloon.c +@@ -416,6 +416,8 @@ static int init_vqs(struct virtio_balloon *vb) + * Prime this virtqueue with one buffer so the hypervisor can + * use it to signal us later (it can't be broken yet!). + */ ++ update_balloon_stats(vb); ++ + sg_init_one(&sg, vb->stats, sizeof vb->stats); + if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL) + < 0) +diff --git a/fs/ext4/crypto_key.c b/fs/ext4/crypto_key.c +index 9a16d1e75a49..505f8afde57c 100644 +--- a/fs/ext4/crypto_key.c ++++ b/fs/ext4/crypto_key.c +@@ -88,8 +88,6 @@ void ext4_free_crypt_info(struct ext4_crypt_info *ci) + if (!ci) + return; + +- if (ci->ci_keyring_key) +- key_put(ci->ci_keyring_key); + crypto_free_ablkcipher(ci->ci_ctfm); + kmem_cache_free(ext4_crypt_info_cachep, ci); + } +@@ -111,7 +109,7 @@ void ext4_free_encryption_info(struct inode *inode, + ext4_free_crypt_info(ci); + } + +-int _ext4_get_encryption_info(struct inode *inode) ++int ext4_get_encryption_info(struct inode *inode) + { + struct ext4_inode_info *ei = EXT4_I(inode); + struct ext4_crypt_info *crypt_info; +@@ -128,22 +126,15 @@ int _ext4_get_encryption_info(struct inode *inode) + char mode; + int res; + ++ if (ei->i_crypt_info) ++ return 0; ++ + if (!ext4_read_workqueue) { + res = ext4_init_crypto(); + if (res) + return res; + } + +-retry: +- crypt_info = ACCESS_ONCE(ei->i_crypt_info); +- if (crypt_info) { +- if (!crypt_info->ci_keyring_key || +- key_validate(crypt_info->ci_keyring_key) == 0) +- return 0; +- ext4_free_encryption_info(inode, crypt_info); +- goto retry; +- } +- + res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION, + EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, + &ctx, sizeof(ctx)); +@@ -166,7 +157,6 @@ retry: + crypt_info->ci_data_mode = ctx.contents_encryption_mode; + crypt_info->ci_filename_mode = ctx.filenames_encryption_mode; + crypt_info->ci_ctfm = NULL; +- crypt_info->ci_keyring_key = NULL; + memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor, + sizeof(crypt_info->ci_master_key)); + if (S_ISREG(inode->i_mode)) +@@ -206,7 +196,6 @@ retry: + keyring_key = NULL; + goto out; + } +- crypt_info->ci_keyring_key = keyring_key; + if (keyring_key->type != &key_type_logon) { + printk_once(KERN_WARNING + "ext4: key type must be logon\n"); +@@ -253,16 +242,13 @@ got_key: + ext4_encryption_key_size(mode)); + if (res) + goto out; +- memzero_explicit(raw_key, sizeof(raw_key)); +- if (cmpxchg(&ei->i_crypt_info, NULL, crypt_info) != NULL) { +- ext4_free_crypt_info(crypt_info); +- goto retry; +- } +- return 0; + ++ if (cmpxchg(&ei->i_crypt_info, NULL, crypt_info) == NULL) ++ crypt_info = NULL; + out: + if (res == -ENOKEY) + res = 0; ++ key_put(keyring_key); + ext4_free_crypt_info(crypt_info); + memzero_explicit(raw_key, sizeof(raw_key)); + return res; +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h +index cd5914495ad7..362d59b24f1d 100644 +--- a/fs/ext4/ext4.h ++++ b/fs/ext4/ext4.h +@@ -2330,23 +2330,11 @@ static inline void ext4_fname_free_filename(struct ext4_filename *fname) { } + /* crypto_key.c */ + void ext4_free_crypt_info(struct ext4_crypt_info *ci); + void ext4_free_encryption_info(struct inode *inode, struct ext4_crypt_info *ci); +-int _ext4_get_encryption_info(struct inode *inode); + + #ifdef CONFIG_EXT4_FS_ENCRYPTION + int ext4_has_encryption_key(struct inode *inode); + +-static inline int ext4_get_encryption_info(struct inode *inode) +-{ +- struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info; +- +- if (!ci || +- (ci->ci_keyring_key && +- (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) | +- (1 << KEY_FLAG_REVOKED) | +- (1 << KEY_FLAG_DEAD))))) +- return _ext4_get_encryption_info(inode); +- return 0; +-} ++int ext4_get_encryption_info(struct inode *inode); + + static inline struct ext4_crypt_info *ext4_encryption_info(struct inode *inode) + { +diff --git a/fs/ext4/ext4_crypto.h b/fs/ext4/ext4_crypto.h +index ac7d4e813796..1b17b05b9f4d 100644 +--- a/fs/ext4/ext4_crypto.h ++++ b/fs/ext4/ext4_crypto.h +@@ -78,7 +78,6 @@ struct ext4_crypt_info { + char ci_filename_mode; + char ci_flags; + struct crypto_ablkcipher *ci_ctfm; +- struct key *ci_keyring_key; + char ci_master_key[EXT4_KEY_DESCRIPTOR_SIZE]; + }; + +diff --git a/fs/f2fs/crypto_key.c b/fs/f2fs/crypto_key.c +index 5de2d866a25c..18595d7a0efc 100644 +--- a/fs/f2fs/crypto_key.c ++++ b/fs/f2fs/crypto_key.c +@@ -92,7 +92,6 @@ static void f2fs_free_crypt_info(struct f2fs_crypt_info *ci) + if (!ci) + return; + +- key_put(ci->ci_keyring_key); + crypto_free_ablkcipher(ci->ci_ctfm); + kmem_cache_free(f2fs_crypt_info_cachep, ci); + } +@@ -113,7 +112,7 @@ void f2fs_free_encryption_info(struct inode *inode, struct f2fs_crypt_info *ci) + f2fs_free_crypt_info(ci); + } + +-int _f2fs_get_encryption_info(struct inode *inode) ++int f2fs_get_encryption_info(struct inode *inode) + { + struct f2fs_inode_info *fi = F2FS_I(inode); + struct f2fs_crypt_info *crypt_info; +@@ -129,18 +128,12 @@ int _f2fs_get_encryption_info(struct inode *inode) + char mode; + int res; + ++ if (fi->i_crypt_info) ++ return 0; ++ + res = f2fs_crypto_initialize(); + if (res) + return res; +-retry: +- crypt_info = ACCESS_ONCE(fi->i_crypt_info); +- if (crypt_info) { +- if (!crypt_info->ci_keyring_key || +- key_validate(crypt_info->ci_keyring_key) == 0) +- return 0; +- f2fs_free_encryption_info(inode, crypt_info); +- goto retry; +- } + + res = f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION, + F2FS_XATTR_NAME_ENCRYPTION_CONTEXT, +@@ -159,7 +152,6 @@ retry: + crypt_info->ci_data_mode = ctx.contents_encryption_mode; + crypt_info->ci_filename_mode = ctx.filenames_encryption_mode; + crypt_info->ci_ctfm = NULL; +- crypt_info->ci_keyring_key = NULL; + memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor, + sizeof(crypt_info->ci_master_key)); + if (S_ISREG(inode->i_mode)) +@@ -197,7 +189,6 @@ retry: + keyring_key = NULL; + goto out; + } +- crypt_info->ci_keyring_key = keyring_key; + BUG_ON(keyring_key->type != &key_type_logon); + ukp = user_key_payload(keyring_key); + if (ukp->datalen != sizeof(struct f2fs_encryption_key)) { +@@ -230,17 +221,12 @@ retry: + if (res) + goto out; + +- memzero_explicit(raw_key, sizeof(raw_key)); +- if (cmpxchg(&fi->i_crypt_info, NULL, crypt_info) != NULL) { +- f2fs_free_crypt_info(crypt_info); +- goto retry; +- } +- return 0; +- ++ if (cmpxchg(&fi->i_crypt_info, NULL, crypt_info) == NULL) ++ crypt_info = NULL; + out: + if (res == -ENOKEY && !S_ISREG(inode->i_mode)) + res = 0; +- ++ key_put(keyring_key); + f2fs_free_crypt_info(crypt_info); + memzero_explicit(raw_key, sizeof(raw_key)); + return res; +diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h +index 9db5500d63d9..b1aeca83f4be 100644 +--- a/fs/f2fs/f2fs.h ++++ b/fs/f2fs/f2fs.h +@@ -2149,7 +2149,6 @@ void f2fs_end_io_crypto_work(struct f2fs_crypto_ctx *, struct bio *); + + /* crypto_key.c */ + void f2fs_free_encryption_info(struct inode *, struct f2fs_crypt_info *); +-int _f2fs_get_encryption_info(struct inode *inode); + + /* crypto_fname.c */ + bool f2fs_valid_filenames_enc_mode(uint32_t); +@@ -2170,18 +2169,7 @@ void f2fs_exit_crypto(void); + + int f2fs_has_encryption_key(struct inode *); + +-static inline int f2fs_get_encryption_info(struct inode *inode) +-{ +- struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info; +- +- if (!ci || +- (ci->ci_keyring_key && +- (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) | +- (1 << KEY_FLAG_REVOKED) | +- (1 << KEY_FLAG_DEAD))))) +- return _f2fs_get_encryption_info(inode); +- return 0; +-} ++int f2fs_get_encryption_info(struct inode *inode); + + void f2fs_fname_crypto_free_buffer(struct f2fs_str *); + int f2fs_fname_setup_filename(struct inode *, const struct qstr *, +diff --git a/fs/f2fs/f2fs_crypto.h b/fs/f2fs/f2fs_crypto.h +index c2c1c2b63b25..f113f1a1e8c1 100644 +--- a/fs/f2fs/f2fs_crypto.h ++++ b/fs/f2fs/f2fs_crypto.h +@@ -79,7 +79,6 @@ struct f2fs_crypt_info { + char ci_filename_mode; + char ci_flags; + struct crypto_ablkcipher *ci_ctfm; +- struct key *ci_keyring_key; + char ci_master_key[F2FS_KEY_DESCRIPTOR_SIZE]; + }; + +diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c +index 8b0a15e285f9..e984f059e5fc 100644 +--- a/kernel/sched/deadline.c ++++ b/kernel/sched/deadline.c +@@ -1771,12 +1771,11 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p) + #ifdef CONFIG_SMP + if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) + queue_push_tasks(rq); +-#else ++#endif + if (dl_task(rq->curr)) + check_preempt_curr_dl(rq, p, 0); + else + resched_curr(rq); +-#endif + } + } + +diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c +index 8ec86abe0ea1..78ae5c1d9412 100644 +--- a/kernel/sched/rt.c ++++ b/kernel/sched/rt.c +@@ -2136,10 +2136,9 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p) + #ifdef CONFIG_SMP + if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) + queue_push_tasks(rq); +-#else ++#endif /* CONFIG_SMP */ + if (p->prio < rq->curr->prio) + resched_curr(rq); +-#endif /* CONFIG_SMP */ + } + } + +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c +index b5e665b3cfb0..36a50ef9295d 100644 +--- a/net/xfrm/xfrm_policy.c ++++ b/net/xfrm/xfrm_policy.c +@@ -3030,6 +3030,11 @@ static int __net_init xfrm_net_init(struct net *net) + { + int rv; + ++ /* Initialize the per-net locks here */ ++ spin_lock_init(&net->xfrm.xfrm_state_lock); ++ rwlock_init(&net->xfrm.xfrm_policy_lock); ++ mutex_init(&net->xfrm.xfrm_cfg_mutex); ++ + rv = xfrm_statistics_init(net); + if (rv < 0) + goto out_statistics; +@@ -3046,11 +3051,6 @@ static int __net_init xfrm_net_init(struct net *net) + if (rv < 0) + goto out; + +- /* Initialize the per-net locks here */ +- spin_lock_init(&net->xfrm.xfrm_state_lock); +- rwlock_init(&net->xfrm.xfrm_policy_lock); +- mutex_init(&net->xfrm.xfrm_cfg_mutex); +- + return 0; + + out: +diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c +index 805681a7d356..7a5a64e70b4d 100644 +--- a/net/xfrm/xfrm_user.c ++++ b/net/xfrm/xfrm_user.c +@@ -412,7 +412,14 @@ static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_es + up = nla_data(rp); + ulen = xfrm_replay_state_esn_len(up); + +- if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen) ++ /* Check the overall length and the internal bitmap length to avoid ++ * potential overflow. */ ++ if (nla_len(rp) < ulen || ++ xfrm_replay_state_esn_len(replay_esn) != ulen || ++ replay_esn->bmp_len != up->bmp_len) ++ return -EINVAL; ++ ++ if (up->replay_window > up->bmp_len * sizeof(__u32) * 8) + return -EINVAL; + + return 0; diff --git a/patch/kernel/mvebu64-default/03-patch-4.4.59-60.patch b/patch/kernel/mvebu64-default/03-patch-4.4.59-60.patch new file mode 100644 index 000000000..ad96b856c --- /dev/null +++ b/patch/kernel/mvebu64-default/03-patch-4.4.59-60.patch @@ -0,0 +1,1175 @@ +diff --git a/Makefile b/Makefile +index 083724c6ca4d..fb7c2b40753d 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 59 ++SUBLEVEL = 60 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c +index 2e7f60c9fc5d..51cdc46a87e2 100644 +--- a/arch/mips/lantiq/irq.c ++++ b/arch/mips/lantiq/irq.c +@@ -269,6 +269,11 @@ static void ltq_hw5_irqdispatch(void) + DEFINE_HWx_IRQDISPATCH(5) + #endif + ++static void ltq_hw_irq_handler(struct irq_desc *desc) ++{ ++ ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2); ++} ++ + #ifdef CONFIG_MIPS_MT_SMP + void __init arch_init_ipiirq(int irq, struct irqaction *action) + { +@@ -313,23 +318,19 @@ static struct irqaction irq_call = { + asmlinkage void plat_irq_dispatch(void) + { + unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; +- unsigned int i; +- +- if ((MIPS_CPU_TIMER_IRQ == 7) && (pending & CAUSEF_IP7)) { +- do_IRQ(MIPS_CPU_TIMER_IRQ); +- goto out; +- } else { +- for (i = 0; i < MAX_IM; i++) { +- if (pending & (CAUSEF_IP2 << i)) { +- ltq_hw_irqdispatch(i); +- goto out; +- } +- } ++ int irq; ++ ++ if (!pending) { ++ spurious_interrupt(); ++ return; + } +- pr_alert("Spurious IRQ: CAUSE=0x%08x\n", read_c0_status()); + +-out: +- return; ++ pending >>= CAUSEB_IP; ++ while (pending) { ++ irq = fls(pending) - 1; ++ do_IRQ(MIPS_CPU_IRQ_BASE + irq); ++ pending &= ~BIT(irq); ++ } + } + + static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) +@@ -354,11 +355,6 @@ static const struct irq_domain_ops irq_domain_ops = { + .map = icu_map, + }; + +-static struct irqaction cascade = { +- .handler = no_action, +- .name = "cascade", +-}; +- + int __init icu_of_init(struct device_node *node, struct device_node *parent) + { + struct device_node *eiu_node; +@@ -390,7 +386,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent) + mips_cpu_irq_init(); + + for (i = 0; i < MAX_IM; i++) +- setup_irq(i + 2, &cascade); ++ irq_set_chained_handler(i + 2, ltq_hw_irq_handler); + + if (cpu_has_vint) { + pr_info("Setting up vectored interrupts\n"); +diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c +index e345891450c3..df8844a1853a 100644 +--- a/arch/x86/xen/setup.c ++++ b/arch/x86/xen/setup.c +@@ -713,10 +713,9 @@ static void __init xen_reserve_xen_mfnlist(void) + size = PFN_PHYS(xen_start_info->nr_p2m_frames); + } + +- if (!xen_is_e820_reserved(start, size)) { +- memblock_reserve(start, size); ++ memblock_reserve(start, size); ++ if (!xen_is_e820_reserved(start, size)) + return; +- } + + #ifdef CONFIG_X86_32 + /* +@@ -727,6 +726,7 @@ static void __init xen_reserve_xen_mfnlist(void) + BUG(); + #else + xen_relocate_p2m(); ++ memblock_free(start, size); + #endif + } + +diff --git a/block/bio.c b/block/bio.c +index 46e2cc1d4016..14263fab94d3 100644 +--- a/block/bio.c ++++ b/block/bio.c +@@ -373,10 +373,14 @@ static void punt_bios_to_rescuer(struct bio_set *bs) + bio_list_init(&punt); + bio_list_init(&nopunt); + +- while ((bio = bio_list_pop(current->bio_list))) ++ while ((bio = bio_list_pop(¤t->bio_list[0]))) + bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); ++ current->bio_list[0] = nopunt; + +- *current->bio_list = nopunt; ++ bio_list_init(&nopunt); ++ while ((bio = bio_list_pop(¤t->bio_list[1]))) ++ bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); ++ current->bio_list[1] = nopunt; + + spin_lock(&bs->rescue_lock); + bio_list_merge(&bs->rescue_list, &punt); +@@ -464,7 +468,9 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) + * we retry with the original gfp_flags. + */ + +- if (current->bio_list && !bio_list_empty(current->bio_list)) ++ if (current->bio_list && ++ (!bio_list_empty(¤t->bio_list[0]) || ++ !bio_list_empty(¤t->bio_list[1]))) + gfp_mask &= ~__GFP_DIRECT_RECLAIM; + + p = mempool_alloc(bs->bio_pool, gfp_mask); +diff --git a/block/blk-core.c b/block/blk-core.c +index 4fab5d610805..ef083e7a37c5 100644 +--- a/block/blk-core.c ++++ b/block/blk-core.c +@@ -2021,7 +2021,14 @@ end_io: + */ + blk_qc_t generic_make_request(struct bio *bio) + { +- struct bio_list bio_list_on_stack; ++ /* ++ * bio_list_on_stack[0] contains bios submitted by the current ++ * make_request_fn. ++ * bio_list_on_stack[1] contains bios that were submitted before ++ * the current make_request_fn, but that haven't been processed ++ * yet. ++ */ ++ struct bio_list bio_list_on_stack[2]; + blk_qc_t ret = BLK_QC_T_NONE; + + if (!generic_make_request_checks(bio)) +@@ -2038,7 +2045,7 @@ blk_qc_t generic_make_request(struct bio *bio) + * should be added at the tail + */ + if (current->bio_list) { +- bio_list_add(current->bio_list, bio); ++ bio_list_add(¤t->bio_list[0], bio); + goto out; + } + +@@ -2057,24 +2064,39 @@ blk_qc_t generic_make_request(struct bio *bio) + * bio_list, and call into ->make_request() again. + */ + BUG_ON(bio->bi_next); +- bio_list_init(&bio_list_on_stack); +- current->bio_list = &bio_list_on_stack; ++ bio_list_init(&bio_list_on_stack[0]); ++ current->bio_list = bio_list_on_stack; + do { + struct request_queue *q = bdev_get_queue(bio->bi_bdev); + + if (likely(blk_queue_enter(q, __GFP_DIRECT_RECLAIM) == 0)) { ++ struct bio_list lower, same; ++ ++ /* Create a fresh bio_list for all subordinate requests */ ++ bio_list_on_stack[1] = bio_list_on_stack[0]; ++ bio_list_init(&bio_list_on_stack[0]); + + ret = q->make_request_fn(q, bio); + + blk_queue_exit(q); +- +- bio = bio_list_pop(current->bio_list); ++ /* sort new bios into those for a lower level ++ * and those for the same level ++ */ ++ bio_list_init(&lower); ++ bio_list_init(&same); ++ while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL) ++ if (q == bdev_get_queue(bio->bi_bdev)) ++ bio_list_add(&same, bio); ++ else ++ bio_list_add(&lower, bio); ++ /* now assemble so we handle the lowest level first */ ++ bio_list_merge(&bio_list_on_stack[0], &lower); ++ bio_list_merge(&bio_list_on_stack[0], &same); ++ bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]); + } else { +- struct bio *bio_next = bio_list_pop(current->bio_list); +- + bio_io_error(bio); +- bio = bio_next; + } ++ bio = bio_list_pop(&bio_list_on_stack[0]); + } while (bio); + current->bio_list = NULL; /* deactivate */ + +diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile +index 675eaf337178..b9cebca376f9 100644 +--- a/drivers/acpi/Makefile ++++ b/drivers/acpi/Makefile +@@ -2,7 +2,6 @@ + # Makefile for the Linux ACPI interpreter + # + +-ccflags-y := -Os + ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT + + # +diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c +index 296b7a14893a..5365ff6e69c1 100644 +--- a/drivers/acpi/acpi_platform.c ++++ b/drivers/acpi/acpi_platform.c +@@ -24,9 +24,11 @@ + ACPI_MODULE_NAME("platform"); + + static const struct acpi_device_id forbidden_id_list[] = { +- {"PNP0000", 0}, /* PIC */ +- {"PNP0100", 0}, /* Timer */ +- {"PNP0200", 0}, /* AT DMA Controller */ ++ {"PNP0000", 0}, /* PIC */ ++ {"PNP0100", 0}, /* Timer */ ++ {"PNP0200", 0}, /* AT DMA Controller */ ++ {"ACPI0009", 0}, /* IOxAPIC */ ++ {"ACPI000A", 0}, /* IOAPIC */ + {"", 0}, + }; + +diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c +index 35310336dd0a..d684e2b79d2b 100644 +--- a/drivers/gpu/drm/radeon/radeon_ttm.c ++++ b/drivers/gpu/drm/radeon/radeon_ttm.c +@@ -213,8 +213,8 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo, + rbo->placement.num_busy_placement = 0; + for (i = 0; i < rbo->placement.num_placement; i++) { + if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) { +- if (rbo->placements[0].fpfn < fpfn) +- rbo->placements[0].fpfn = fpfn; ++ if (rbo->placements[i].fpfn < fpfn) ++ rbo->placements[i].fpfn = fpfn; + } else { + rbo->placement.busy_placement = + &rbo->placements[i]; +diff --git a/drivers/md/dm.c b/drivers/md/dm.c +index 397f0454100b..320eb3c4bb6b 100644 +--- a/drivers/md/dm.c ++++ b/drivers/md/dm.c +@@ -1481,26 +1481,29 @@ static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule) + struct dm_offload *o = container_of(cb, struct dm_offload, cb); + struct bio_list list; + struct bio *bio; ++ int i; + + INIT_LIST_HEAD(&o->cb.list); + + if (unlikely(!current->bio_list)) + return; + +- list = *current->bio_list; +- bio_list_init(current->bio_list); +- +- while ((bio = bio_list_pop(&list))) { +- struct bio_set *bs = bio->bi_pool; +- if (unlikely(!bs) || bs == fs_bio_set) { +- bio_list_add(current->bio_list, bio); +- continue; ++ for (i = 0; i < 2; i++) { ++ list = current->bio_list[i]; ++ bio_list_init(¤t->bio_list[i]); ++ ++ while ((bio = bio_list_pop(&list))) { ++ struct bio_set *bs = bio->bi_pool; ++ if (unlikely(!bs) || bs == fs_bio_set) { ++ bio_list_add(¤t->bio_list[i], bio); ++ continue; ++ } ++ ++ spin_lock(&bs->rescue_lock); ++ bio_list_add(&bs->rescue_list, bio); ++ queue_work(bs->rescue_workqueue, &bs->rescue_work); ++ spin_unlock(&bs->rescue_lock); + } +- +- spin_lock(&bs->rescue_lock); +- bio_list_add(&bs->rescue_list, bio); +- queue_work(bs->rescue_workqueue, &bs->rescue_work); +- spin_unlock(&bs->rescue_lock); + } + } + +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c +index 515554c7365b..9be39988bf06 100644 +--- a/drivers/md/raid1.c ++++ b/drivers/md/raid1.c +@@ -877,7 +877,8 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio) + ((conf->start_next_window < + conf->next_resync + RESYNC_SECTORS) && + current->bio_list && +- !bio_list_empty(current->bio_list))), ++ (!bio_list_empty(¤t->bio_list[0]) || ++ !bio_list_empty(¤t->bio_list[1])))), + conf->resync_lock); + conf->nr_waiting--; + } +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c +index a92979e704e3..e5ee4e9e0ea5 100644 +--- a/drivers/md/raid10.c ++++ b/drivers/md/raid10.c +@@ -946,7 +946,8 @@ static void wait_barrier(struct r10conf *conf) + !conf->barrier || + (conf->nr_pending && + current->bio_list && +- !bio_list_empty(current->bio_list)), ++ (!bio_list_empty(¤t->bio_list[0]) || ++ !bio_list_empty(¤t->bio_list[1]))), + conf->resync_lock); + conf->nr_waiting--; + } +diff --git a/drivers/power/reset/at91-poweroff.c b/drivers/power/reset/at91-poweroff.c +index e9e24df35f26..2579f025b90b 100644 +--- a/drivers/power/reset/at91-poweroff.c ++++ b/drivers/power/reset/at91-poweroff.c +@@ -14,9 +14,12 @@ + #include + #include + #include ++#include + #include + #include + ++#include ++ + #define AT91_SHDW_CR 0x00 /* Shut Down Control Register */ + #define AT91_SHDW_SHDW BIT(0) /* Shut Down command */ + #define AT91_SHDW_KEY (0xa5 << 24) /* KEY Password */ +@@ -50,6 +53,7 @@ static const char *shdwc_wakeup_modes[] = { + + static void __iomem *at91_shdwc_base; + static struct clk *sclk; ++static void __iomem *mpddrc_base; + + static void __init at91_wakeup_status(void) + { +@@ -73,6 +77,29 @@ static void at91_poweroff(void) + writel(AT91_SHDW_KEY | AT91_SHDW_SHDW, at91_shdwc_base + AT91_SHDW_CR); + } + ++static void at91_lpddr_poweroff(void) ++{ ++ asm volatile( ++ /* Align to cache lines */ ++ ".balign 32\n\t" ++ ++ /* Ensure AT91_SHDW_CR is in the TLB by reading it */ ++ " ldr r6, [%2, #" __stringify(AT91_SHDW_CR) "]\n\t" ++ ++ /* Power down SDRAM0 */ ++ " str %1, [%0, #" __stringify(AT91_DDRSDRC_LPR) "]\n\t" ++ /* Shutdown CPU */ ++ " str %3, [%2, #" __stringify(AT91_SHDW_CR) "]\n\t" ++ ++ " b .\n\t" ++ : ++ : "r" (mpddrc_base), ++ "r" cpu_to_le32(AT91_DDRSDRC_LPDDR2_PWOFF), ++ "r" (at91_shdwc_base), ++ "r" cpu_to_le32(AT91_SHDW_KEY | AT91_SHDW_SHDW) ++ : "r0"); ++} ++ + static int at91_poweroff_get_wakeup_mode(struct device_node *np) + { + const char *pm; +@@ -124,6 +151,8 @@ static void at91_poweroff_dt_set_wakeup_mode(struct platform_device *pdev) + static int __init at91_poweroff_probe(struct platform_device *pdev) + { + struct resource *res; ++ struct device_node *np; ++ u32 ddr_type; + int ret; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +@@ -150,12 +179,30 @@ static int __init at91_poweroff_probe(struct platform_device *pdev) + + pm_power_off = at91_poweroff; + ++ np = of_find_compatible_node(NULL, NULL, "atmel,sama5d3-ddramc"); ++ if (!np) ++ return 0; ++ ++ mpddrc_base = of_iomap(np, 0); ++ of_node_put(np); ++ ++ if (!mpddrc_base) ++ return 0; ++ ++ ddr_type = readl(mpddrc_base + AT91_DDRSDRC_MDR) & AT91_DDRSDRC_MD; ++ if ((ddr_type == AT91_DDRSDRC_MD_LPDDR2) || ++ (ddr_type == AT91_DDRSDRC_MD_LPDDR3)) ++ pm_power_off = at91_lpddr_poweroff; ++ else ++ iounmap(mpddrc_base); ++ + return 0; + } + + static int __exit at91_poweroff_remove(struct platform_device *pdev) + { +- if (pm_power_off == at91_poweroff) ++ if (pm_power_off == at91_poweroff || ++ pm_power_off == at91_lpddr_poweroff) + pm_power_off = NULL; + + clk_disable_unprepare(sclk); +@@ -163,6 +210,11 @@ static int __exit at91_poweroff_remove(struct platform_device *pdev) + return 0; + } + ++static const struct of_device_id at91_ramc_of_match[] = { ++ { .compatible = "atmel,sama5d3-ddramc", }, ++ { /* sentinel */ } ++}; ++ + static const struct of_device_id at91_poweroff_of_match[] = { + { .compatible = "atmel,at91sam9260-shdwc", }, + { .compatible = "atmel,at91sam9rl-shdwc", }, +diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c +index f40afdd0e5f5..00662dd28d66 100644 +--- a/drivers/rtc/rtc-s35390a.c ++++ b/drivers/rtc/rtc-s35390a.c +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + + #define S35390A_CMD_STATUS1 0 + #define S35390A_CMD_STATUS2 1 +@@ -34,10 +35,14 @@ + #define S35390A_ALRM_BYTE_HOURS 1 + #define S35390A_ALRM_BYTE_MINS 2 + ++/* flags for STATUS1 */ + #define S35390A_FLAG_POC 0x01 + #define S35390A_FLAG_BLD 0x02 ++#define S35390A_FLAG_INT2 0x04 + #define S35390A_FLAG_24H 0x40 + #define S35390A_FLAG_RESET 0x80 ++ ++/* flag for STATUS2 */ + #define S35390A_FLAG_TEST 0x01 + + #define S35390A_INT2_MODE_MASK 0xF0 +@@ -94,19 +99,63 @@ static int s35390a_get_reg(struct s35390a *s35390a, int reg, char *buf, int len) + return 0; + } + +-static int s35390a_reset(struct s35390a *s35390a) ++/* ++ * Returns <0 on error, 0 if rtc is setup fine and 1 if the chip was reset. ++ * To keep the information if an irq is pending, pass the value read from ++ * STATUS1 to the caller. ++ */ ++static int s35390a_reset(struct s35390a *s35390a, char *status1) + { +- char buf[1]; +- +- if (s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)) < 0) +- return -EIO; +- +- if (!(buf[0] & (S35390A_FLAG_POC | S35390A_FLAG_BLD))) ++ char buf; ++ int ret; ++ unsigned initcount = 0; ++ ++ ret = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, status1, 1); ++ if (ret < 0) ++ return ret; ++ ++ if (*status1 & S35390A_FLAG_POC) ++ /* ++ * Do not communicate for 0.5 seconds since the power-on ++ * detection circuit is in operation. ++ */ ++ msleep(500); ++ else if (!(*status1 & S35390A_FLAG_BLD)) ++ /* ++ * If both POC and BLD are unset everything is fine. ++ */ + return 0; + +- buf[0] |= (S35390A_FLAG_RESET | S35390A_FLAG_24H); +- buf[0] &= 0xf0; +- return s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)); ++ /* ++ * At least one of POC and BLD are set, so reinitialise chip. Keeping ++ * this information in the hardware to know later that the time isn't ++ * valid is unfortunately not possible because POC and BLD are cleared ++ * on read. So the reset is best done now. ++ * ++ * The 24H bit is kept over reset, so set it already here. ++ */ ++initialize: ++ *status1 = S35390A_FLAG_24H; ++ buf = S35390A_FLAG_RESET | S35390A_FLAG_24H; ++ ret = s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, &buf, 1); ++ ++ if (ret < 0) ++ return ret; ++ ++ ret = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, &buf, 1); ++ if (ret < 0) ++ return ret; ++ ++ if (buf & (S35390A_FLAG_POC | S35390A_FLAG_BLD)) { ++ /* Try up to five times to reset the chip */ ++ if (initcount < 5) { ++ ++initcount; ++ goto initialize; ++ } else ++ return -EIO; ++ } ++ ++ return 1; + } + + static int s35390a_disable_test_mode(struct s35390a *s35390a) +@@ -242,6 +291,8 @@ static int s35390a_set_alarm(struct i2c_client *client, struct rtc_wkalrm *alm) + + if (alm->time.tm_wday != -1) + buf[S35390A_ALRM_BYTE_WDAY] = bin2bcd(alm->time.tm_wday) | 0x80; ++ else ++ buf[S35390A_ALRM_BYTE_WDAY] = 0; + + buf[S35390A_ALRM_BYTE_HOURS] = s35390a_hr2reg(s35390a, + alm->time.tm_hour) | 0x80; +@@ -265,27 +316,61 @@ static int s35390a_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alm) + char buf[3], sts; + int i, err; + ++ /* ++ * initialize all members to -1 to signal the core that they are not ++ * defined by the hardware. ++ */ ++ alm->time.tm_sec = -1; ++ alm->time.tm_min = -1; ++ alm->time.tm_hour = -1; ++ alm->time.tm_mday = -1; ++ alm->time.tm_mon = -1; ++ alm->time.tm_year = -1; ++ alm->time.tm_wday = -1; ++ alm->time.tm_yday = -1; ++ alm->time.tm_isdst = -1; ++ + err = s35390a_get_reg(s35390a, S35390A_CMD_STATUS2, &sts, sizeof(sts)); + if (err < 0) + return err; + +- if (bitrev8(sts) != S35390A_INT2_MODE_ALARM) +- return -EINVAL; ++ if ((bitrev8(sts) & S35390A_INT2_MODE_MASK) != S35390A_INT2_MODE_ALARM) { ++ /* ++ * When the alarm isn't enabled, the register to configure ++ * the alarm time isn't accessible. ++ */ ++ alm->enabled = 0; ++ return 0; ++ } else { ++ alm->enabled = 1; ++ } + + err = s35390a_get_reg(s35390a, S35390A_CMD_INT2_REG1, buf, sizeof(buf)); + if (err < 0) + return err; + + /* This chip returns the bits of each byte in reverse order */ +- for (i = 0; i < 3; ++i) { ++ for (i = 0; i < 3; ++i) + buf[i] = bitrev8(buf[i]); +- buf[i] &= ~0x80; +- } + +- alm->time.tm_wday = bcd2bin(buf[S35390A_ALRM_BYTE_WDAY]); +- alm->time.tm_hour = s35390a_reg2hr(s35390a, +- buf[S35390A_ALRM_BYTE_HOURS]); +- alm->time.tm_min = bcd2bin(buf[S35390A_ALRM_BYTE_MINS]); ++ /* ++ * B0 of the three matching registers is an enable flag. Iff it is set ++ * the configured value is used for matching. ++ */ ++ if (buf[S35390A_ALRM_BYTE_WDAY] & 0x80) ++ alm->time.tm_wday = ++ bcd2bin(buf[S35390A_ALRM_BYTE_WDAY] & ~0x80); ++ ++ if (buf[S35390A_ALRM_BYTE_HOURS] & 0x80) ++ alm->time.tm_hour = ++ s35390a_reg2hr(s35390a, ++ buf[S35390A_ALRM_BYTE_HOURS] & ~0x80); ++ ++ if (buf[S35390A_ALRM_BYTE_MINS] & 0x80) ++ alm->time.tm_min = bcd2bin(buf[S35390A_ALRM_BYTE_MINS] & ~0x80); ++ ++ /* alarm triggers always at s=0 */ ++ alm->time.tm_sec = 0; + + dev_dbg(&client->dev, "%s: alm is mins=%d, hours=%d, wday=%d\n", + __func__, alm->time.tm_min, alm->time.tm_hour, +@@ -327,11 +412,11 @@ static struct i2c_driver s35390a_driver; + static int s35390a_probe(struct i2c_client *client, + const struct i2c_device_id *id) + { +- int err; ++ int err, err_reset; + unsigned int i; + struct s35390a *s35390a; + struct rtc_time tm; +- char buf[1]; ++ char buf, status1; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + err = -ENODEV; +@@ -360,29 +445,35 @@ static int s35390a_probe(struct i2c_client *client, + } + } + +- err = s35390a_reset(s35390a); +- if (err < 0) { ++ err_reset = s35390a_reset(s35390a, &status1); ++ if (err_reset < 0) { ++ err = err_reset; + dev_err(&client->dev, "error resetting chip\n"); + goto exit_dummy; + } + +- err = s35390a_disable_test_mode(s35390a); +- if (err < 0) { +- dev_err(&client->dev, "error disabling test mode\n"); +- goto exit_dummy; +- } +- +- err = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)); +- if (err < 0) { +- dev_err(&client->dev, "error checking 12/24 hour mode\n"); +- goto exit_dummy; +- } +- if (buf[0] & S35390A_FLAG_24H) ++ if (status1 & S35390A_FLAG_24H) + s35390a->twentyfourhour = 1; + else + s35390a->twentyfourhour = 0; + +- if (s35390a_get_datetime(client, &tm) < 0) ++ if (status1 & S35390A_FLAG_INT2) { ++ /* disable alarm (and maybe test mode) */ ++ buf = 0; ++ err = s35390a_set_reg(s35390a, S35390A_CMD_STATUS2, &buf, 1); ++ if (err < 0) { ++ dev_err(&client->dev, "error disabling alarm"); ++ goto exit_dummy; ++ } ++ } else { ++ err = s35390a_disable_test_mode(s35390a); ++ if (err < 0) { ++ dev_err(&client->dev, "error disabling test mode\n"); ++ goto exit_dummy; ++ } ++ } ++ ++ if (err_reset > 0 || s35390a_get_datetime(client, &tm) < 0) + dev_warn(&client->dev, "clock needs to be set\n"); + + device_set_wakeup_capable(&client->dev, 1); +@@ -395,6 +486,10 @@ static int s35390a_probe(struct i2c_client *client, + err = PTR_ERR(s35390a->rtc); + goto exit_dummy; + } ++ ++ if (status1 & S35390A_FLAG_INT2) ++ rtc_update_irq(s35390a->rtc, 1, RTC_AF); ++ + return 0; + + exit_dummy: +diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c +index 9c706d8c1441..6f5e2720ffad 100644 +--- a/drivers/scsi/libsas/sas_ata.c ++++ b/drivers/scsi/libsas/sas_ata.c +@@ -218,7 +218,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) + task->num_scatter = qc->n_elem; + } else { + for_each_sg(qc->sg, sg, qc->n_elem, si) +- xfer += sg->length; ++ xfer += sg_dma_len(sg); + + task->total_xfer_len = xfer; + task->num_scatter = si; +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h +index 92648a5ea2d2..63f5965acc89 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.h ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.h +@@ -390,6 +390,7 @@ struct MPT3SAS_TARGET { + * @eedp_enable: eedp support enable bit + * @eedp_type: 0(type_1), 1(type_2), 2(type_3) + * @eedp_block_length: block size ++ * @ata_command_pending: SATL passthrough outstanding for device + */ + struct MPT3SAS_DEVICE { + struct MPT3SAS_TARGET *sas_target; +@@ -398,6 +399,17 @@ struct MPT3SAS_DEVICE { + u8 configured_lun; + u8 block; + u8 tlr_snoop_check; ++ /* ++ * Bug workaround for SATL handling: the mpt2/3sas firmware ++ * doesn't return BUSY or TASK_SET_FULL for subsequent ++ * commands while a SATL pass through is in operation as the ++ * spec requires, it simply does nothing with them until the ++ * pass through completes, causing them possibly to timeout if ++ * the passthrough is a long executing command (like format or ++ * secure erase). This variable allows us to do the right ++ * thing while a SATL command is pending. ++ */ ++ unsigned long ata_command_pending; + }; + + #define MPT3_CMD_NOT_USED 0x8000 /* free */ +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +index f6a8e9958e75..8a5fbdb45cfd 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +@@ -3707,9 +3707,18 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, + } + } + +-static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd) ++static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending) + { +- return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16); ++ struct MPT3SAS_DEVICE *priv = scmd->device->hostdata; ++ ++ if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16) ++ return 0; ++ ++ if (pending) ++ return test_and_set_bit(0, &priv->ata_command_pending); ++ ++ clear_bit(0, &priv->ata_command_pending); ++ return 0; + } + + /** +@@ -3733,9 +3742,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) + if (!scmd) + continue; + count++; +- if (ata_12_16_cmd(scmd)) +- scsi_internal_device_unblock(scmd->device, +- SDEV_RUNNING); ++ _scsih_set_satl_pending(scmd, false); + mpt3sas_base_free_smid(ioc, smid); + scsi_dma_unmap(scmd); + if (ioc->pci_error_recovery) +@@ -3866,13 +3873,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) + if (ioc->logging_level & MPT_DEBUG_SCSI) + scsi_print_command(scmd); + +- /* +- * Lock the device for any subsequent command until command is +- * done. +- */ +- if (ata_12_16_cmd(scmd)) +- scsi_internal_device_block(scmd->device); +- + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + scmd->result = DID_NO_CONNECT << 16; +@@ -3886,6 +3886,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) + return 0; + } + ++ /* ++ * Bug work around for firmware SATL handling. The loop ++ * is based on atomic operations and ensures consistency ++ * since we're lockless at this point ++ */ ++ do { ++ if (test_bit(0, &sas_device_priv_data->ata_command_pending)) { ++ scmd->result = SAM_STAT_BUSY; ++ scmd->scsi_done(scmd); ++ return 0; ++ } ++ } while (_scsih_set_satl_pending(scmd, true)); ++ + sas_target_priv_data = sas_device_priv_data->sas_target; + + /* invalid device handle */ +@@ -4445,8 +4458,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) + if (scmd == NULL) + return 1; + +- if (ata_12_16_cmd(scmd)) +- scsi_internal_device_unblock(scmd->device, SDEV_RUNNING); ++ _scsih_set_satl_pending(scmd, false); + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c +index dedcff9cabb5..6514636431ab 100644 +--- a/drivers/scsi/sg.c ++++ b/drivers/scsi/sg.c +@@ -1008,6 +1008,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) + result = get_user(val, ip); + if (result) + return result; ++ if (val > SG_MAX_CDB_SIZE) ++ return -ENOMEM; + sfp->next_cmd_len = (val > 0) ? val : 0; + return 0; + case SG_GET_VERSION_NUM: +diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c +index a0f911641b04..a15070a7fcd6 100644 +--- a/drivers/tty/serial/atmel_serial.c ++++ b/drivers/tty/serial/atmel_serial.c +@@ -1987,6 +1987,11 @@ static void atmel_flush_buffer(struct uart_port *port) + atmel_uart_writel(port, ATMEL_PDC_TCR, 0); + atmel_port->pdc_tx.ofs = 0; + } ++ /* ++ * in uart_flush_buffer(), the xmit circular buffer has just ++ * been cleared, so we have to reset tx_len accordingly. ++ */ ++ atmel_port->tx_len = 0; + } + + /* +@@ -2499,6 +2504,9 @@ static void atmel_console_write(struct console *co, const char *s, u_int count) + pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN; + atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); + ++ /* Make sure that tx path is actually able to send characters */ ++ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN); ++ + uart_console_write(port, s, count, atmel_console_putchar); + + /* +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c +index 5724d7c41e29..ca2cbdb3aa67 100644 +--- a/drivers/usb/core/hcd.c ++++ b/drivers/usb/core/hcd.c +@@ -499,8 +499,10 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb) + */ + tbuf_size = max_t(u16, sizeof(struct usb_hub_descriptor), wLength); + tbuf = kzalloc(tbuf_size, GFP_KERNEL); +- if (!tbuf) +- return -ENOMEM; ++ if (!tbuf) { ++ status = -ENOMEM; ++ goto err_alloc; ++ } + + bufp = tbuf; + +@@ -705,6 +707,7 @@ error: + } + + kfree(tbuf); ++ err_alloc: + + /* any errors get returned through the urb completion */ + spin_lock_irq(&hcd_root_hub_lock); +diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h +index c923350ca20a..d7ce4e3280db 100644 +--- a/include/linux/kvm_host.h ++++ b/include/linux/kvm_host.h +@@ -182,8 +182,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, + int len, void *val); + int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, + int len, struct kvm_io_device *dev); +-int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, +- struct kvm_io_device *dev); ++void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, ++ struct kvm_io_device *dev); + + #ifdef CONFIG_KVM_ASYNC_PF + struct kvm_async_pf { +diff --git a/kernel/padata.c b/kernel/padata.c +index b38bea9c466a..401227e3967c 100644 +--- a/kernel/padata.c ++++ b/kernel/padata.c +@@ -189,19 +189,20 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd) + + reorder = &next_queue->reorder; + ++ spin_lock(&reorder->lock); + if (!list_empty(&reorder->list)) { + padata = list_entry(reorder->list.next, + struct padata_priv, list); + +- spin_lock(&reorder->lock); + list_del_init(&padata->list); + atomic_dec(&pd->reorder_objects); +- spin_unlock(&reorder->lock); + + pd->processed++; + ++ spin_unlock(&reorder->lock); + goto out; + } ++ spin_unlock(&reorder->lock); + + if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) { + padata = ERR_PTR(-ENODATA); +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index ea11123a9249..7294301d8495 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -4362,6 +4362,7 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, + { + struct page *page = NULL; + spinlock_t *ptl; ++ pte_t pte; + retry: + ptl = pmd_lockptr(mm, pmd); + spin_lock(ptl); +@@ -4371,12 +4372,13 @@ retry: + */ + if (!pmd_huge(*pmd)) + goto out; +- if (pmd_present(*pmd)) { ++ pte = huge_ptep_get((pte_t *)pmd); ++ if (pte_present(pte)) { + page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT); + if (flags & FOLL_GET) + get_page(page); + } else { +- if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) { ++ if (is_hugetlb_entry_migration(pte)) { + spin_unlock(ptl); + __migration_entry_wait(mm, (pte_t *)pmd, ptl); + goto retry; +diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c +index b8d927c56494..a6b2f2138c9d 100644 +--- a/net/ceph/messenger.c ++++ b/net/ceph/messenger.c +@@ -7,6 +7,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -478,11 +479,16 @@ static int ceph_tcp_connect(struct ceph_connection *con) + { + struct sockaddr_storage *paddr = &con->peer_addr.in_addr; + struct socket *sock; ++ unsigned int noio_flag; + int ret; + + BUG_ON(con->sock); ++ ++ /* sock_create_kern() allocates with GFP_KERNEL */ ++ noio_flag = memalloc_noio_save(); + ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family, + SOCK_STREAM, IPPROTO_TCP, &sock); ++ memalloc_noio_restore(noio_flag); + if (ret) + return ret; + sock->sk->sk_allocation = GFP_NOFS; +diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c +index 3f4efcb85df5..3490d21ab9e7 100644 +--- a/sound/core/seq/seq_fifo.c ++++ b/sound/core/seq/seq_fifo.c +@@ -265,6 +265,10 @@ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize) + /* NOTE: overflow flag is not cleared */ + spin_unlock_irqrestore(&f->lock, flags); + ++ /* close the old pool and wait until all users are gone */ ++ snd_seq_pool_mark_closing(oldpool); ++ snd_use_lock_sync(&f->use_lock); ++ + /* release cells in old pool */ + for (cell = oldhead; cell; cell = next) { + next = cell->next; +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 1d4f34379f56..46a34039ecdc 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -4831,6 +4831,7 @@ enum { + ALC292_FIXUP_DISABLE_AAMIX, + ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK, + ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, ++ ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE, + ALC275_FIXUP_DELL_XPS, + ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE, + ALC293_FIXUP_LENOVO_SPK_NOISE, +@@ -5429,6 +5430,15 @@ static const struct hda_fixup alc269_fixups[] = { + .chained = true, + .chain_id = ALC269_FIXUP_HEADSET_MODE + }, ++ [ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE] = { ++ .type = HDA_FIXUP_PINS, ++ .v.pins = (const struct hda_pintbl[]) { ++ { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */ ++ { } ++ }, ++ .chained = true, ++ .chain_id = ALC269_FIXUP_HEADSET_MODE ++ }, + [ALC275_FIXUP_DELL_XPS] = { + .type = HDA_FIXUP_VERBS, + .v.verbs = (const struct hda_verb[]) { +@@ -5501,7 +5511,7 @@ static const struct hda_fixup alc269_fixups[] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc298_fixup_speaker_volume, + .chained = true, +- .chain_id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, ++ .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE, + }, + [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = { + .type = HDA_FIXUP_PINS, +diff --git a/sound/soc/atmel/atmel-classd.c b/sound/soc/atmel/atmel-classd.c +index 8276675730ef..78a985629607 100644 +--- a/sound/soc/atmel/atmel-classd.c ++++ b/sound/soc/atmel/atmel-classd.c +@@ -343,7 +343,7 @@ static int atmel_classd_codec_dai_digital_mute(struct snd_soc_dai *codec_dai, + } + + #define CLASSD_ACLK_RATE_11M2896_MPY_8 (112896 * 100 * 8) +-#define CLASSD_ACLK_RATE_12M288_MPY_8 (12228 * 1000 * 8) ++#define CLASSD_ACLK_RATE_12M288_MPY_8 (12288 * 1000 * 8) + + static struct { + int rate; +diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c +index 46dbc0a7dfc1..49001fa84ead 100644 +--- a/virt/kvm/eventfd.c ++++ b/virt/kvm/eventfd.c +@@ -868,7 +868,8 @@ kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx, + continue; + + kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); +- kvm->buses[bus_idx]->ioeventfd_count--; ++ if (kvm->buses[bus_idx]) ++ kvm->buses[bus_idx]->ioeventfd_count--; + ioeventfd_release(p); + ret = 0; + break; +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c +index 336ed267c407..cb092bd9965b 100644 +--- a/virt/kvm/kvm_main.c ++++ b/virt/kvm/kvm_main.c +@@ -654,8 +654,11 @@ static void kvm_destroy_vm(struct kvm *kvm) + list_del(&kvm->vm_list); + spin_unlock(&kvm_lock); + kvm_free_irq_routing(kvm); +- for (i = 0; i < KVM_NR_BUSES; i++) +- kvm_io_bus_destroy(kvm->buses[i]); ++ for (i = 0; i < KVM_NR_BUSES; i++) { ++ if (kvm->buses[i]) ++ kvm_io_bus_destroy(kvm->buses[i]); ++ kvm->buses[i] = NULL; ++ } + kvm_coalesced_mmio_free(kvm); + #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) + mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); +@@ -3271,6 +3274,8 @@ int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, + }; + + bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); ++ if (!bus) ++ return -ENOMEM; + r = __kvm_io_bus_write(vcpu, bus, &range, val); + return r < 0 ? r : 0; + } +@@ -3288,6 +3293,8 @@ int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, + }; + + bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); ++ if (!bus) ++ return -ENOMEM; + + /* First try the device referenced by cookie. */ + if ((cookie >= 0) && (cookie < bus->dev_count) && +@@ -3338,6 +3345,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, + }; + + bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); ++ if (!bus) ++ return -ENOMEM; + r = __kvm_io_bus_read(vcpu, bus, &range, val); + return r < 0 ? r : 0; + } +@@ -3350,6 +3359,9 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, + struct kvm_io_bus *new_bus, *bus; + + bus = kvm->buses[bus_idx]; ++ if (!bus) ++ return -ENOMEM; ++ + /* exclude ioeventfd which is limited by maximum fd */ + if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) + return -ENOSPC; +@@ -3369,37 +3381,41 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, + } + + /* Caller must hold slots_lock. */ +-int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, +- struct kvm_io_device *dev) ++void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, ++ struct kvm_io_device *dev) + { +- int i, r; ++ int i; + struct kvm_io_bus *new_bus, *bus; + + bus = kvm->buses[bus_idx]; +- r = -ENOENT; ++ if (!bus) ++ return; ++ + for (i = 0; i < bus->dev_count; i++) + if (bus->range[i].dev == dev) { +- r = 0; + break; + } + +- if (r) +- return r; ++ if (i == bus->dev_count) ++ return; + + new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) * + sizeof(struct kvm_io_range)), GFP_KERNEL); +- if (!new_bus) +- return -ENOMEM; ++ if (!new_bus) { ++ pr_err("kvm: failed to shrink bus, removing it completely\n"); ++ goto broken; ++ } + + memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); + new_bus->dev_count--; + memcpy(new_bus->range + i, bus->range + i + 1, + (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); + ++broken: + rcu_assign_pointer(kvm->buses[bus_idx], new_bus); + synchronize_srcu_expedited(&kvm->srcu); + kfree(bus); +- return r; ++ return; + } + + static struct notifier_block kvm_cpu_notifier = { diff --git a/patch/kernel/mvebu64-default/03-patch-4.4.60-61.patch b/patch/kernel/mvebu64-default/03-patch-4.4.60-61.patch new file mode 100644 index 000000000..642a5bd6b --- /dev/null +++ b/patch/kernel/mvebu64-default/03-patch-4.4.60-61.patch @@ -0,0 +1,1527 @@ +diff --git a/Makefile b/Makefile +index fb7c2b40753d..ef5045b8201d 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 60 ++SUBLEVEL = 61 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c +index 11b6595c2672..f91ee2f27b41 100644 +--- a/arch/arm/kvm/mmu.c ++++ b/arch/arm/kvm/mmu.c +@@ -796,6 +796,7 @@ void stage2_unmap_vm(struct kvm *kvm) + int idx; + + idx = srcu_read_lock(&kvm->srcu); ++ down_read(¤t->mm->mmap_sem); + spin_lock(&kvm->mmu_lock); + + slots = kvm_memslots(kvm); +@@ -803,6 +804,7 @@ void stage2_unmap_vm(struct kvm *kvm) + stage2_unmap_memslot(kvm, memslot); + + spin_unlock(&kvm->mmu_lock); ++ up_read(¤t->mm->mmap_sem); + srcu_read_unlock(&kvm->srcu, idx); + } + +@@ -1759,6 +1761,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, + (KVM_PHYS_SIZE >> PAGE_SHIFT)) + return -EFAULT; + ++ down_read(¤t->mm->mmap_sem); + /* + * A memory region could potentially cover multiple VMAs, and any holes + * between them, so iterate over all of them to find out if we can map +@@ -1802,8 +1805,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, + pa += vm_start - vma->vm_start; + + /* IO region dirty page logging not allowed */ +- if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) +- return -EINVAL; ++ if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) { ++ ret = -EINVAL; ++ goto out; ++ } + + ret = kvm_phys_addr_ioremap(kvm, gpa, pa, + vm_end - vm_start, +@@ -1815,7 +1820,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, + } while (hva < reg_end); + + if (change == KVM_MR_FLAGS_ONLY) +- return ret; ++ goto out; + + spin_lock(&kvm->mmu_lock); + if (ret) +@@ -1823,6 +1828,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, + else + stage2_flush_memslot(kvm, memslot); + spin_unlock(&kvm->mmu_lock); ++out: ++ up_read(¤t->mm->mmap_sem); + return ret; + } + +diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h +index 273e61225c27..07238b39638c 100644 +--- a/arch/metag/include/asm/uaccess.h ++++ b/arch/metag/include/asm/uaccess.h +@@ -197,20 +197,21 @@ extern long __must_check strnlen_user(const char __user *src, long count); + + #define strlen_user(str) strnlen_user(str, 32767) + +-extern unsigned long __must_check __copy_user_zeroing(void *to, +- const void __user *from, +- unsigned long n); ++extern unsigned long raw_copy_from_user(void *to, const void __user *from, ++ unsigned long n); + + static inline unsigned long + copy_from_user(void *to, const void __user *from, unsigned long n) + { ++ unsigned long res = n; + if (likely(access_ok(VERIFY_READ, from, n))) +- return __copy_user_zeroing(to, from, n); +- memset(to, 0, n); +- return n; ++ res = raw_copy_from_user(to, from, n); ++ if (unlikely(res)) ++ memset(to + (n - res), 0, res); ++ return res; + } + +-#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n) ++#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n) + #define __copy_from_user_inatomic __copy_from_user + + extern unsigned long __must_check __copy_user(void __user *to, +diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c +index b3ebfe9c8e88..2792fc621088 100644 +--- a/arch/metag/lib/usercopy.c ++++ b/arch/metag/lib/usercopy.c +@@ -29,7 +29,6 @@ + COPY \ + "1:\n" \ + " .section .fixup,\"ax\"\n" \ +- " MOV D1Ar1,#0\n" \ + FIXUP \ + " MOVT D1Ar1,#HI(1b)\n" \ + " JUMP D1Ar1,#LO(1b)\n" \ +@@ -260,27 +259,31 @@ + "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "22:\n" \ + "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ +- "SUB %3, %3, #32\n" \ + "23:\n" \ +- "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ ++ "SUB %3, %3, #32\n" \ + "24:\n" \ ++ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ ++ "25:\n" \ + "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ ++ "26:\n" \ + "SUB %3, %3, #32\n" \ + "DCACHE [%1+#-64], D0Ar6\n" \ + "BR $Lloop"id"\n" \ + \ + "MOV RAPF, %1\n" \ +- "25:\n" \ ++ "27:\n" \ + "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ +- "26:\n" \ ++ "28:\n" \ + "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ ++ "29:\n" \ + "SUB %3, %3, #32\n" \ +- "27:\n" \ ++ "30:\n" \ + "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ +- "28:\n" \ ++ "31:\n" \ + "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ ++ "32:\n" \ + "SUB %0, %0, #8\n" \ +- "29:\n" \ ++ "33:\n" \ + "SETL [%0++], D0.7, D1.7\n" \ + "SUB %3, %3, #32\n" \ + "1:" \ +@@ -312,11 +315,15 @@ + " .long 26b,3b\n" \ + " .long 27b,3b\n" \ + " .long 28b,3b\n" \ +- " .long 29b,4b\n" \ ++ " .long 29b,3b\n" \ ++ " .long 30b,3b\n" \ ++ " .long 31b,3b\n" \ ++ " .long 32b,3b\n" \ ++ " .long 33b,4b\n" \ + " .previous\n" \ + : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ + : "0" (to), "1" (from), "2" (ret), "3" (n) \ +- : "D1Ar1", "D0Ar2", "memory") ++ : "D1Ar1", "D0Ar2", "cc", "memory") + + /* rewind 'to' and 'from' pointers when a fault occurs + * +@@ -342,7 +349,7 @@ + #define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\ + __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ + "LSR D0Ar2, D0Ar2, #8\n" \ +- "AND D0Ar2, D0Ar2, #0x7\n" \ ++ "ANDS D0Ar2, D0Ar2, #0x7\n" \ + "ADDZ D0Ar2, D0Ar2, #4\n" \ + "SUB D0Ar2, D0Ar2, #1\n" \ + "MOV D1Ar1, #4\n" \ +@@ -403,47 +410,55 @@ + "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "22:\n" \ + "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ +- "SUB %3, %3, #16\n" \ + "23:\n" \ +- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ +- "24:\n" \ +- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "SUB %3, %3, #16\n" \ +- "25:\n" \ ++ "24:\n" \ + "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ +- "26:\n" \ ++ "25:\n" \ + "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ ++ "26:\n" \ + "SUB %3, %3, #16\n" \ + "27:\n" \ + "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "28:\n" \ + "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ ++ "29:\n" \ ++ "SUB %3, %3, #16\n" \ ++ "30:\n" \ ++ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ ++ "31:\n" \ ++ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ ++ "32:\n" \ + "SUB %3, %3, #16\n" \ + "DCACHE [%1+#-64], D0Ar6\n" \ + "BR $Lloop"id"\n" \ + \ + "MOV RAPF, %1\n" \ +- "29:\n" \ ++ "33:\n" \ + "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ +- "30:\n" \ ++ "34:\n" \ + "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ ++ "35:\n" \ + "SUB %3, %3, #16\n" \ +- "31:\n" \ ++ "36:\n" \ + "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ +- "32:\n" \ ++ "37:\n" \ + "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ ++ "38:\n" \ + "SUB %3, %3, #16\n" \ +- "33:\n" \ ++ "39:\n" \ + "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ +- "34:\n" \ ++ "40:\n" \ + "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ ++ "41:\n" \ + "SUB %3, %3, #16\n" \ +- "35:\n" \ ++ "42:\n" \ + "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ +- "36:\n" \ ++ "43:\n" \ + "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ ++ "44:\n" \ + "SUB %0, %0, #4\n" \ +- "37:\n" \ ++ "45:\n" \ + "SETD [%0++], D0.7\n" \ + "SUB %3, %3, #16\n" \ + "1:" \ +@@ -483,11 +498,19 @@ + " .long 34b,3b\n" \ + " .long 35b,3b\n" \ + " .long 36b,3b\n" \ +- " .long 37b,4b\n" \ ++ " .long 37b,3b\n" \ ++ " .long 38b,3b\n" \ ++ " .long 39b,3b\n" \ ++ " .long 40b,3b\n" \ ++ " .long 41b,3b\n" \ ++ " .long 42b,3b\n" \ ++ " .long 43b,3b\n" \ ++ " .long 44b,3b\n" \ ++ " .long 45b,4b\n" \ + " .previous\n" \ + : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ + : "0" (to), "1" (from), "2" (ret), "3" (n) \ +- : "D1Ar1", "D0Ar2", "memory") ++ : "D1Ar1", "D0Ar2", "cc", "memory") + + /* rewind 'to' and 'from' pointers when a fault occurs + * +@@ -513,7 +536,7 @@ + #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\ + __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ + "LSR D0Ar2, D0Ar2, #8\n" \ +- "AND D0Ar2, D0Ar2, #0x7\n" \ ++ "ANDS D0Ar2, D0Ar2, #0x7\n" \ + "ADDZ D0Ar2, D0Ar2, #4\n" \ + "SUB D0Ar2, D0Ar2, #1\n" \ + "MOV D1Ar1, #4\n" \ +@@ -538,23 +561,31 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, + if ((unsigned long) src & 1) { + __asm_copy_to_user_1(dst, src, retn); + n--; ++ if (retn) ++ return retn + n; + } + if ((unsigned long) dst & 1) { + /* Worst case - byte copy */ + while (n > 0) { + __asm_copy_to_user_1(dst, src, retn); + n--; ++ if (retn) ++ return retn + n; + } + } + if (((unsigned long) src & 2) && n >= 2) { + __asm_copy_to_user_2(dst, src, retn); + n -= 2; ++ if (retn) ++ return retn + n; + } + if ((unsigned long) dst & 2) { + /* Second worst case - word copy */ + while (n >= 2) { + __asm_copy_to_user_2(dst, src, retn); + n -= 2; ++ if (retn) ++ return retn + n; + } + } + +@@ -569,6 +600,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, + while (n >= 8) { + __asm_copy_to_user_8x64(dst, src, retn); + n -= 8; ++ if (retn) ++ return retn + n; + } + } + if (n >= RAPF_MIN_BUF_SIZE) { +@@ -581,6 +614,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, + while (n >= 8) { + __asm_copy_to_user_8x64(dst, src, retn); + n -= 8; ++ if (retn) ++ return retn + n; + } + } + #endif +@@ -588,11 +623,15 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, + while (n >= 16) { + __asm_copy_to_user_16(dst, src, retn); + n -= 16; ++ if (retn) ++ return retn + n; + } + + while (n >= 4) { + __asm_copy_to_user_4(dst, src, retn); + n -= 4; ++ if (retn) ++ return retn + n; + } + + switch (n) { +@@ -609,6 +648,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, + break; + } + ++ /* ++ * If we get here, retn correctly reflects the number of failing ++ * bytes. ++ */ + return retn; + } + EXPORT_SYMBOL(__copy_user); +@@ -617,16 +660,14 @@ EXPORT_SYMBOL(__copy_user); + __asm_copy_user_cont(to, from, ret, \ + " GETB D1Ar1,[%1++]\n" \ + "2: SETB [%0++],D1Ar1\n", \ +- "3: ADD %2,%2,#1\n" \ +- " SETB [%0++],D1Ar1\n", \ ++ "3: ADD %2,%2,#1\n", \ + " .long 2b,3b\n") + + #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ + __asm_copy_user_cont(to, from, ret, \ + " GETW D1Ar1,[%1++]\n" \ + "2: SETW [%0++],D1Ar1\n" COPY, \ +- "3: ADD %2,%2,#2\n" \ +- " SETW [%0++],D1Ar1\n" FIXUP, \ ++ "3: ADD %2,%2,#2\n" FIXUP, \ + " .long 2b,3b\n" TENTRY) + + #define __asm_copy_from_user_2(to, from, ret) \ +@@ -636,145 +677,26 @@ EXPORT_SYMBOL(__copy_user); + __asm_copy_from_user_2x_cont(to, from, ret, \ + " GETB D1Ar1,[%1++]\n" \ + "4: SETB [%0++],D1Ar1\n", \ +- "5: ADD %2,%2,#1\n" \ +- " SETB [%0++],D1Ar1\n", \ ++ "5: ADD %2,%2,#1\n", \ + " .long 4b,5b\n") + + #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ + __asm_copy_user_cont(to, from, ret, \ + " GETD D1Ar1,[%1++]\n" \ + "2: SETD [%0++],D1Ar1\n" COPY, \ +- "3: ADD %2,%2,#4\n" \ +- " SETD [%0++],D1Ar1\n" FIXUP, \ ++ "3: ADD %2,%2,#4\n" FIXUP, \ + " .long 2b,3b\n" TENTRY) + + #define __asm_copy_from_user_4(to, from, ret) \ + __asm_copy_from_user_4x_cont(to, from, ret, "", "", "") + +-#define __asm_copy_from_user_5(to, from, ret) \ +- __asm_copy_from_user_4x_cont(to, from, ret, \ +- " GETB D1Ar1,[%1++]\n" \ +- "4: SETB [%0++],D1Ar1\n", \ +- "5: ADD %2,%2,#1\n" \ +- " SETB [%0++],D1Ar1\n", \ +- " .long 4b,5b\n") +- +-#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ +- __asm_copy_from_user_4x_cont(to, from, ret, \ +- " GETW D1Ar1,[%1++]\n" \ +- "4: SETW [%0++],D1Ar1\n" COPY, \ +- "5: ADD %2,%2,#2\n" \ +- " SETW [%0++],D1Ar1\n" FIXUP, \ +- " .long 4b,5b\n" TENTRY) +- +-#define __asm_copy_from_user_6(to, from, ret) \ +- __asm_copy_from_user_6x_cont(to, from, ret, "", "", "") +- +-#define __asm_copy_from_user_7(to, from, ret) \ +- __asm_copy_from_user_6x_cont(to, from, ret, \ +- " GETB D1Ar1,[%1++]\n" \ +- "6: SETB [%0++],D1Ar1\n", \ +- "7: ADD %2,%2,#1\n" \ +- " SETB [%0++],D1Ar1\n", \ +- " .long 6b,7b\n") +- +-#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ +- __asm_copy_from_user_4x_cont(to, from, ret, \ +- " GETD D1Ar1,[%1++]\n" \ +- "4: SETD [%0++],D1Ar1\n" COPY, \ +- "5: ADD %2,%2,#4\n" \ +- " SETD [%0++],D1Ar1\n" FIXUP, \ +- " .long 4b,5b\n" TENTRY) +- +-#define __asm_copy_from_user_8(to, from, ret) \ +- __asm_copy_from_user_8x_cont(to, from, ret, "", "", "") +- +-#define __asm_copy_from_user_9(to, from, ret) \ +- __asm_copy_from_user_8x_cont(to, from, ret, \ +- " GETB D1Ar1,[%1++]\n" \ +- "6: SETB [%0++],D1Ar1\n", \ +- "7: ADD %2,%2,#1\n" \ +- " SETB [%0++],D1Ar1\n", \ +- " .long 6b,7b\n") +- +-#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ +- __asm_copy_from_user_8x_cont(to, from, ret, \ +- " GETW D1Ar1,[%1++]\n" \ +- "6: SETW [%0++],D1Ar1\n" COPY, \ +- "7: ADD %2,%2,#2\n" \ +- " SETW [%0++],D1Ar1\n" FIXUP, \ +- " .long 6b,7b\n" TENTRY) +- +-#define __asm_copy_from_user_10(to, from, ret) \ +- __asm_copy_from_user_10x_cont(to, from, ret, "", "", "") +- +-#define __asm_copy_from_user_11(to, from, ret) \ +- __asm_copy_from_user_10x_cont(to, from, ret, \ +- " GETB D1Ar1,[%1++]\n" \ +- "8: SETB [%0++],D1Ar1\n", \ +- "9: ADD %2,%2,#1\n" \ +- " SETB [%0++],D1Ar1\n", \ +- " .long 8b,9b\n") +- +-#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ +- __asm_copy_from_user_8x_cont(to, from, ret, \ +- " GETD D1Ar1,[%1++]\n" \ +- "6: SETD [%0++],D1Ar1\n" COPY, \ +- "7: ADD %2,%2,#4\n" \ +- " SETD [%0++],D1Ar1\n" FIXUP, \ +- " .long 6b,7b\n" TENTRY) +- +-#define __asm_copy_from_user_12(to, from, ret) \ +- __asm_copy_from_user_12x_cont(to, from, ret, "", "", "") +- +-#define __asm_copy_from_user_13(to, from, ret) \ +- __asm_copy_from_user_12x_cont(to, from, ret, \ +- " GETB D1Ar1,[%1++]\n" \ +- "8: SETB [%0++],D1Ar1\n", \ +- "9: ADD %2,%2,#1\n" \ +- " SETB [%0++],D1Ar1\n", \ +- " .long 8b,9b\n") +- +-#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ +- __asm_copy_from_user_12x_cont(to, from, ret, \ +- " GETW D1Ar1,[%1++]\n" \ +- "8: SETW [%0++],D1Ar1\n" COPY, \ +- "9: ADD %2,%2,#2\n" \ +- " SETW [%0++],D1Ar1\n" FIXUP, \ +- " .long 8b,9b\n" TENTRY) +- +-#define __asm_copy_from_user_14(to, from, ret) \ +- __asm_copy_from_user_14x_cont(to, from, ret, "", "", "") +- +-#define __asm_copy_from_user_15(to, from, ret) \ +- __asm_copy_from_user_14x_cont(to, from, ret, \ +- " GETB D1Ar1,[%1++]\n" \ +- "10: SETB [%0++],D1Ar1\n", \ +- "11: ADD %2,%2,#1\n" \ +- " SETB [%0++],D1Ar1\n", \ +- " .long 10b,11b\n") +- +-#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ +- __asm_copy_from_user_12x_cont(to, from, ret, \ +- " GETD D1Ar1,[%1++]\n" \ +- "8: SETD [%0++],D1Ar1\n" COPY, \ +- "9: ADD %2,%2,#4\n" \ +- " SETD [%0++],D1Ar1\n" FIXUP, \ +- " .long 8b,9b\n" TENTRY) +- +-#define __asm_copy_from_user_16(to, from, ret) \ +- __asm_copy_from_user_16x_cont(to, from, ret, "", "", "") +- + #define __asm_copy_from_user_8x64(to, from, ret) \ + asm volatile ( \ + " GETL D0Ar2,D1Ar1,[%1++]\n" \ + "2: SETL [%0++],D0Ar2,D1Ar1\n" \ + "1:\n" \ + " .section .fixup,\"ax\"\n" \ +- " MOV D1Ar1,#0\n" \ +- " MOV D0Ar2,#0\n" \ + "3: ADD %2,%2,#8\n" \ +- " SETL [%0++],D0Ar2,D1Ar1\n" \ + " MOVT D0Ar2,#HI(1b)\n" \ + " JUMP D0Ar2,#LO(1b)\n" \ + " .previous\n" \ +@@ -789,36 +711,57 @@ EXPORT_SYMBOL(__copy_user); + * + * Rationale: + * A fault occurs while reading from user buffer, which is the +- * source. Since the fault is at a single address, we only +- * need to rewind by 8 bytes. ++ * source. + * Since we don't write to kernel buffer until we read first, + * the kernel buffer is at the right state and needn't be +- * corrected. ++ * corrected, but the source must be rewound to the beginning of ++ * the block, which is LSM_STEP*8 bytes. ++ * LSM_STEP is bits 10:8 in TXSTATUS which is already read ++ * and stored in D0Ar2 ++ * ++ * NOTE: If a fault occurs at the last operation in M{G,S}ETL ++ * LSM_STEP will be 0. ie: we do 4 writes in our case, if ++ * a fault happens at the 4th write, LSM_STEP will be 0 ++ * instead of 4. The code copes with that. + */ + #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \ + __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ +- "SUB %1, %1, #8\n") ++ "LSR D0Ar2, D0Ar2, #5\n" \ ++ "ANDS D0Ar2, D0Ar2, #0x38\n" \ ++ "ADDZ D0Ar2, D0Ar2, #32\n" \ ++ "SUB %1, %1, D0Ar2\n") + + /* rewind 'from' pointer when a fault occurs + * + * Rationale: + * A fault occurs while reading from user buffer, which is the +- * source. Since the fault is at a single address, we only +- * need to rewind by 4 bytes. ++ * source. + * Since we don't write to kernel buffer until we read first, + * the kernel buffer is at the right state and needn't be +- * corrected. ++ * corrected, but the source must be rewound to the beginning of ++ * the block, which is LSM_STEP*4 bytes. ++ * LSM_STEP is bits 10:8 in TXSTATUS which is already read ++ * and stored in D0Ar2 ++ * ++ * NOTE: If a fault occurs at the last operation in M{G,S}ETL ++ * LSM_STEP will be 0. ie: we do 4 writes in our case, if ++ * a fault happens at the 4th write, LSM_STEP will be 0 ++ * instead of 4. The code copes with that. + */ + #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \ + __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ +- "SUB %1, %1, #4\n") ++ "LSR D0Ar2, D0Ar2, #6\n" \ ++ "ANDS D0Ar2, D0Ar2, #0x1c\n" \ ++ "ADDZ D0Ar2, D0Ar2, #16\n" \ ++ "SUB %1, %1, D0Ar2\n") + + +-/* Copy from user to kernel, zeroing the bytes that were inaccessible in +- userland. The return-value is the number of bytes that were +- inaccessible. */ +-unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, +- unsigned long n) ++/* ++ * Copy from user to kernel. The return-value is the number of bytes that were ++ * inaccessible. ++ */ ++unsigned long raw_copy_from_user(void *pdst, const void __user *psrc, ++ unsigned long n) + { + register char *dst asm ("A0.2") = pdst; + register const char __user *src asm ("A1.2") = psrc; +@@ -830,6 +773,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, + if ((unsigned long) src & 1) { + __asm_copy_from_user_1(dst, src, retn); + n--; ++ if (retn) ++ return retn + n; + } + if ((unsigned long) dst & 1) { + /* Worst case - byte copy */ +@@ -837,12 +782,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, + __asm_copy_from_user_1(dst, src, retn); + n--; + if (retn) +- goto copy_exception_bytes; ++ return retn + n; + } + } + if (((unsigned long) src & 2) && n >= 2) { + __asm_copy_from_user_2(dst, src, retn); + n -= 2; ++ if (retn) ++ return retn + n; + } + if ((unsigned long) dst & 2) { + /* Second worst case - word copy */ +@@ -850,16 +797,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, + __asm_copy_from_user_2(dst, src, retn); + n -= 2; + if (retn) +- goto copy_exception_bytes; ++ return retn + n; + } + } + +- /* We only need one check after the unalignment-adjustments, +- because if both adjustments were done, either both or +- neither reference had an exception. */ +- if (retn != 0) +- goto copy_exception_bytes; +- + #ifdef USE_RAPF + /* 64 bit copy loop */ + if (!(((unsigned long) src | (unsigned long) dst) & 7)) { +@@ -872,7 +813,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, + __asm_copy_from_user_8x64(dst, src, retn); + n -= 8; + if (retn) +- goto copy_exception_bytes; ++ return retn + n; + } + } + +@@ -888,7 +829,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, + __asm_copy_from_user_8x64(dst, src, retn); + n -= 8; + if (retn) +- goto copy_exception_bytes; ++ return retn + n; + } + } + #endif +@@ -898,7 +839,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, + n -= 4; + + if (retn) +- goto copy_exception_bytes; ++ return retn + n; + } + + /* If we get here, there were no memory read faults. */ +@@ -924,21 +865,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, + /* If we get here, retn correctly reflects the number of failing + bytes. */ + return retn; +- +- copy_exception_bytes: +- /* We already have "retn" bytes cleared, and need to clear the +- remaining "n" bytes. A non-optimized simple byte-for-byte in-line +- memset is preferred here, since this isn't speed-critical code and +- we'd rather have this a leaf-function than calling memset. */ +- { +- char *endp; +- for (endp = dst + n; dst < endp; dst++) +- *dst = 0; +- } +- +- return retn + n; + } +-EXPORT_SYMBOL(__copy_user_zeroing); ++EXPORT_SYMBOL(raw_copy_from_user); + + #define __asm_clear_8x64(to, ret) \ + asm volatile ( \ +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig +index db459612de44..75bfca69e418 100644 +--- a/arch/mips/Kconfig ++++ b/arch/mips/Kconfig +@@ -1412,7 +1412,7 @@ config CPU_MIPS32_R6 + select CPU_SUPPORTS_MSA + select GENERIC_CSUM + select HAVE_KVM +- select MIPS_O32_FP64_SUPPORT ++ select MIPS_O32_FP64_SUPPORT if 32BIT + help + Choose this option to build a kernel for release 6 or later of the + MIPS32 architecture. New MIPS processors, starting with the Warrior +diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h +index 40196bebe849..2365ce0ad8f2 100644 +--- a/arch/mips/include/asm/spinlock.h ++++ b/arch/mips/include/asm/spinlock.h +@@ -112,7 +112,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) + " andi %[ticket], %[ticket], 0xffff \n" + " bne %[ticket], %[my_ticket], 4f \n" + " subu %[ticket], %[my_ticket], %[ticket] \n" +- "2: \n" ++ "2: .insn \n" + " .subsection 2 \n" + "4: andi %[ticket], %[ticket], 0xffff \n" + " sll %[ticket], 5 \n" +@@ -187,7 +187,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) + " sc %[ticket], %[ticket_ptr] \n" + " beqz %[ticket], 1b \n" + " li %[ticket], 1 \n" +- "2: \n" ++ "2: .insn \n" + " .subsection 2 \n" + "3: b 2b \n" + " li %[ticket], 0 \n" +@@ -367,7 +367,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) + " .set reorder \n" + __WEAK_LLSC_MB + " li %2, 1 \n" +- "2: \n" ++ "2: .insn \n" + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) + : GCC_OFF_SMALL_ASM() (rw->lock) + : "memory"); +@@ -407,7 +407,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) + " lui %1, 0x8000 \n" + " sc %1, %0 \n" + " li %2, 1 \n" +- "2: \n" ++ "2: .insn \n" + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), + "=&r" (ret) + : GCC_OFF_SMALL_ASM() (rw->lock) +diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c +index 3e390a4e3897..daf580ce5ca2 100644 +--- a/arch/mips/lantiq/xway/sysctrl.c ++++ b/arch/mips/lantiq/xway/sysctrl.c +@@ -467,7 +467,7 @@ void __init ltq_soc_init(void) + + if (!np_xbar) + panic("Failed to load xbar nodes from devicetree"); +- if (of_address_to_resource(np_pmu, 0, &res_xbar)) ++ if (of_address_to_resource(np_xbar, 0, &res_xbar)) + panic("Failed to get xbar resources"); + if (request_mem_region(res_xbar.start, resource_size(&res_xbar), + res_xbar.name) < 0) +diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c +index 29f73e00253d..63b7d6f82d24 100644 +--- a/arch/mips/mm/tlbex.c ++++ b/arch/mips/mm/tlbex.c +@@ -757,7 +757,8 @@ static void build_huge_update_entries(u32 **p, unsigned int pte, + static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r, + struct uasm_label **l, + unsigned int pte, +- unsigned int ptr) ++ unsigned int ptr, ++ unsigned int flush) + { + #ifdef CONFIG_SMP + UASM_i_SC(p, pte, 0, ptr); +@@ -766,6 +767,22 @@ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r, + #else + UASM_i_SW(p, pte, 0, ptr); + #endif ++ if (cpu_has_ftlb && flush) { ++ BUG_ON(!cpu_has_tlbinv); ++ ++ UASM_i_MFC0(p, ptr, C0_ENTRYHI); ++ uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV); ++ UASM_i_MTC0(p, ptr, C0_ENTRYHI); ++ build_tlb_write_entry(p, l, r, tlb_indexed); ++ ++ uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV); ++ UASM_i_MTC0(p, ptr, C0_ENTRYHI); ++ build_huge_update_entries(p, pte, ptr); ++ build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0); ++ ++ return; ++ } ++ + build_huge_update_entries(p, pte, ptr); + build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0); + } +@@ -2082,7 +2099,7 @@ static void build_r4000_tlb_load_handler(void) + uasm_l_tlbl_goaround2(&l, p); + } + uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID)); +- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); ++ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1); + #endif + + uasm_l_nopage_tlbl(&l, p); +@@ -2137,7 +2154,7 @@ static void build_r4000_tlb_store_handler(void) + build_tlb_probe_entry(&p); + uasm_i_ori(&p, wr.r1, wr.r1, + _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); +- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); ++ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1); + #endif + + uasm_l_nopage_tlbs(&l, p); +@@ -2193,7 +2210,7 @@ static void build_r4000_tlb_modify_handler(void) + build_tlb_probe_entry(&p); + uasm_i_ori(&p, wr.r1, wr.r1, + _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); +- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); ++ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0); + #endif + + uasm_l_nopage_tlbm(&l, p); +diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c +index f42834c7f007..3c575093f8f1 100644 +--- a/arch/mips/ralink/rt3883.c ++++ b/arch/mips/ralink/rt3883.c +@@ -36,7 +36,7 @@ static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) }; + static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; + static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; + static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) }; +-static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) }; ++static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) }; + static struct rt2880_pmx_func pci_func[] = { + FUNC("pci-dev", 0, 40, 32), + FUNC("pci-host2", 1, 40, 32), +@@ -44,7 +44,7 @@ static struct rt2880_pmx_func pci_func[] = { + FUNC("pci-fnc", 3, 40, 32) + }; + static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) }; +-static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) }; ++static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) }; + + static struct rt2880_pmx_group rt3883_pinmux_data[] = { + GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C), +diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c +index 718dd197909f..de73beb36910 100644 +--- a/arch/nios2/kernel/prom.c ++++ b/arch/nios2/kernel/prom.c +@@ -48,6 +48,13 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) + return alloc_bootmem_align(size, align); + } + ++int __init early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, ++ bool nomap) ++{ ++ reserve_bootmem(base, size, BOOTMEM_DEFAULT); ++ return 0; ++} ++ + void __init early_init_devtree(void *params) + { + __be32 *dtb = (u32 *)__dtb_start; +diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c +index a4ff86d58d5c..6c4e351a7930 100644 +--- a/arch/nios2/kernel/setup.c ++++ b/arch/nios2/kernel/setup.c +@@ -195,6 +195,9 @@ void __init setup_arch(char **cmdline_p) + } + #endif /* CONFIG_BLK_DEV_INITRD */ + ++ early_init_fdt_reserve_self(); ++ early_init_fdt_scan_reserved_mem(); ++ + unflatten_and_copy_device_tree(); + + setup_cpuinfo(); +diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c +index 86150fbb42c3..91e5c1758b5c 100644 +--- a/arch/powerpc/kernel/align.c ++++ b/arch/powerpc/kernel/align.c +@@ -808,14 +808,25 @@ int fix_alignment(struct pt_regs *regs) + nb = aligninfo[instr].len; + flags = aligninfo[instr].flags; + +- /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */ +- if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) { +- nb = 8; +- flags = LD+SW; +- } else if (IS_XFORM(instruction) && +- ((instruction >> 1) & 0x3ff) == 660) { +- nb = 8; +- flags = ST+SW; ++ /* ++ * Handle some cases which give overlaps in the DSISR values. ++ */ ++ if (IS_XFORM(instruction)) { ++ switch (get_xop(instruction)) { ++ case 532: /* ldbrx */ ++ nb = 8; ++ flags = LD+SW; ++ break; ++ case 660: /* stdbrx */ ++ nb = 8; ++ flags = ST+SW; ++ break; ++ case 20: /* lwarx */ ++ case 84: /* ldarx */ ++ case 116: /* lharx */ ++ case 276: /* lqarx */ ++ return 0; /* not emulated ever */ ++ } + } + + /* Byteswap little endian loads and stores */ +diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c +index c8822af10a58..19d9b2d2d212 100644 +--- a/arch/powerpc/mm/hash_native_64.c ++++ b/arch/powerpc/mm/hash_native_64.c +@@ -645,6 +645,10 @@ static void native_flush_hash_range(unsigned long number, int local) + unsigned long psize = batch->psize; + int ssize = batch->ssize; + int i; ++ unsigned int use_local; ++ ++ use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && ++ mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use(); + + local_irq_save(flags); + +@@ -671,8 +675,7 @@ static void native_flush_hash_range(unsigned long number, int local) + } pte_iterate_hashed_end(); + } + +- if (mmu_has_feature(MMU_FTR_TLBIEL) && +- mmu_psize_defs[psize].tlbiel && local) { ++ if (use_local) { + asm volatile("ptesync":::"memory"); + for (i = 0; i < number; i++) { + vpn = batch->vpn[i]; +diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c +index 4da604ebf6fd..ca15613eaaa4 100644 +--- a/arch/s390/boot/compressed/misc.c ++++ b/arch/s390/boot/compressed/misc.c +@@ -141,31 +141,34 @@ static void check_ipl_parmblock(void *start, unsigned long size) + + unsigned long decompress_kernel(void) + { +- unsigned long output_addr; +- unsigned char *output; ++ void *output, *kernel_end; + +- output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL; +- check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start); +- memset(&_bss, 0, &_ebss - &_bss); +- free_mem_ptr = (unsigned long)&_end; +- free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; +- output = (unsigned char *) output_addr; ++ output = (void *) ALIGN((unsigned long) &_end + HEAP_SIZE, PAGE_SIZE); ++ kernel_end = output + SZ__bss_start; ++ check_ipl_parmblock((void *) 0, (unsigned long) kernel_end); + + #ifdef CONFIG_BLK_DEV_INITRD + /* + * Move the initrd right behind the end of the decompressed +- * kernel image. ++ * kernel image. This also prevents initrd corruption caused by ++ * bss clearing since kernel_end will always be located behind the ++ * current bss section.. + */ +- if (INITRD_START && INITRD_SIZE && +- INITRD_START < (unsigned long) output + SZ__bss_start) { +- check_ipl_parmblock(output + SZ__bss_start, +- INITRD_START + INITRD_SIZE); +- memmove(output + SZ__bss_start, +- (void *) INITRD_START, INITRD_SIZE); +- INITRD_START = (unsigned long) output + SZ__bss_start; ++ if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) { ++ check_ipl_parmblock(kernel_end, INITRD_SIZE); ++ memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE); ++ INITRD_START = (unsigned long) kernel_end; + } + #endif + ++ /* ++ * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be ++ * initialized afterwards since they reside in bss. ++ */ ++ memset(&_bss, 0, &_ebss - &_bss); ++ free_mem_ptr = (unsigned long) &_end; ++ free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; ++ + puts("Uncompressing Linux... "); + __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error); + puts("Ok, booting the kernel.\n"); +diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h +index 5c7381c5ad7f..c8d837f0fbbc 100644 +--- a/arch/s390/include/asm/uaccess.h ++++ b/arch/s390/include/asm/uaccess.h +@@ -150,7 +150,7 @@ unsigned long __must_check __copy_to_user(void __user *to, const void *from, + " jg 2b\n" \ + ".popsection\n" \ + EX_TABLE(0b,3b) EX_TABLE(1b,3b) \ +- : "=d" (__rc), "=Q" (*(to)) \ ++ : "=d" (__rc), "+Q" (*(to)) \ + : "d" (size), "Q" (*(from)), \ + "d" (__reg0), "K" (-EFAULT) \ + : "cc"); \ +diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c +index 4f5fa8d65fe9..144367c0c28f 100644 +--- a/drivers/gpu/drm/ttm/ttm_object.c ++++ b/drivers/gpu/drm/ttm/ttm_object.c +@@ -179,7 +179,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile, + if (unlikely(ret != 0)) + goto out_err0; + +- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); ++ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false); + if (unlikely(ret != 0)) + goto out_err1; + +@@ -318,7 +318,8 @@ EXPORT_SYMBOL(ttm_ref_object_exists); + + int ttm_ref_object_add(struct ttm_object_file *tfile, + struct ttm_base_object *base, +- enum ttm_ref_type ref_type, bool *existed) ++ enum ttm_ref_type ref_type, bool *existed, ++ bool require_existed) + { + struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; + struct ttm_ref_object *ref; +@@ -345,6 +346,9 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, + } + + rcu_read_unlock(); ++ if (require_existed) ++ return -EPERM; ++ + ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), + false, false); + if (unlikely(ret != 0)) +@@ -635,7 +639,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile, + prime = (struct ttm_prime_object *) dma_buf->priv; + base = &prime->base; + *handle = base->hash.key; +- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); ++ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false); + + dma_buf_put(dma_buf); + +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +index 8e689b439890..6c649f7b5929 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +@@ -539,7 +539,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman, + struct vmw_fence_obj **p_fence) + { + struct vmw_fence_obj *fence; +- int ret; ++ int ret; + + fence = kzalloc(sizeof(*fence), GFP_KERNEL); + if (unlikely(fence == NULL)) +@@ -702,6 +702,41 @@ void vmw_fence_fifo_up(struct vmw_fence_manager *fman) + } + + ++/** ++ * vmw_fence_obj_lookup - Look up a user-space fence object ++ * ++ * @tfile: A struct ttm_object_file identifying the caller. ++ * @handle: A handle identifying the fence object. ++ * @return: A struct vmw_user_fence base ttm object on success or ++ * an error pointer on failure. ++ * ++ * The fence object is looked up and type-checked. The caller needs ++ * to have opened the fence object first, but since that happens on ++ * creation and fence objects aren't shareable, that's not an ++ * issue currently. ++ */ ++static struct ttm_base_object * ++vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle) ++{ ++ struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle); ++ ++ if (!base) { ++ pr_err("Invalid fence object handle 0x%08lx.\n", ++ (unsigned long)handle); ++ return ERR_PTR(-EINVAL); ++ } ++ ++ if (base->refcount_release != vmw_user_fence_base_release) { ++ pr_err("Invalid fence object handle 0x%08lx.\n", ++ (unsigned long)handle); ++ ttm_base_object_unref(&base); ++ return ERR_PTR(-EINVAL); ++ } ++ ++ return base; ++} ++ ++ + int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) + { +@@ -727,13 +762,9 @@ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data, + arg->kernel_cookie = jiffies + wait_timeout; + } + +- base = ttm_base_object_lookup(tfile, arg->handle); +- if (unlikely(base == NULL)) { +- printk(KERN_ERR "Wait invalid fence object handle " +- "0x%08lx.\n", +- (unsigned long)arg->handle); +- return -EINVAL; +- } ++ base = vmw_fence_obj_lookup(tfile, arg->handle); ++ if (IS_ERR(base)) ++ return PTR_ERR(base); + + fence = &(container_of(base, struct vmw_user_fence, base)->fence); + +@@ -772,13 +803,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data, + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + struct vmw_private *dev_priv = vmw_priv(dev); + +- base = ttm_base_object_lookup(tfile, arg->handle); +- if (unlikely(base == NULL)) { +- printk(KERN_ERR "Fence signaled invalid fence object handle " +- "0x%08lx.\n", +- (unsigned long)arg->handle); +- return -EINVAL; +- } ++ base = vmw_fence_obj_lookup(tfile, arg->handle); ++ if (IS_ERR(base)) ++ return PTR_ERR(base); + + fence = &(container_of(base, struct vmw_user_fence, base)->fence); + fman = fman_from_fence(fence); +@@ -1093,6 +1120,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, + (struct drm_vmw_fence_event_arg *) data; + struct vmw_fence_obj *fence = NULL; + struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); ++ struct ttm_object_file *tfile = vmw_fp->tfile; + struct drm_vmw_fence_rep __user *user_fence_rep = + (struct drm_vmw_fence_rep __user *)(unsigned long) + arg->fence_rep; +@@ -1106,24 +1134,18 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, + */ + if (arg->handle) { + struct ttm_base_object *base = +- ttm_base_object_lookup_for_ref(dev_priv->tdev, +- arg->handle); +- +- if (unlikely(base == NULL)) { +- DRM_ERROR("Fence event invalid fence object handle " +- "0x%08lx.\n", +- (unsigned long)arg->handle); +- return -EINVAL; +- } ++ vmw_fence_obj_lookup(tfile, arg->handle); ++ ++ if (IS_ERR(base)) ++ return PTR_ERR(base); ++ + fence = &(container_of(base, struct vmw_user_fence, + base)->fence); + (void) vmw_fence_obj_reference(fence); + + if (user_fence_rep != NULL) { +- bool existed; +- + ret = ttm_ref_object_add(vmw_fp->tfile, base, +- TTM_REF_USAGE, &existed); ++ TTM_REF_USAGE, NULL, false); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed to reference a fence " + "object.\n"); +@@ -1166,8 +1188,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, + return 0; + out_no_create: + if (user_fence_rep != NULL) +- ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, +- handle, TTM_REF_USAGE); ++ ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE); + out_no_ref_obj: + vmw_fence_obj_unreference(&fence); + return ret; +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +index b8c6a03c8c54..5ec24fd801cd 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +@@ -114,8 +114,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, + param->value = dev_priv->has_dx; + break; + default: +- DRM_ERROR("Illegal vmwgfx get param request: %d\n", +- param->param); + return -EINVAL; + } + +@@ -186,7 +184,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, + bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS); + struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); + +- if (unlikely(arg->pad64 != 0)) { ++ if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) { + DRM_ERROR("Illegal GET_3D_CAP argument.\n"); + return -EINVAL; + } +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +index e57667ca7557..dbca128a9aa6 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +@@ -591,7 +591,7 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo, + return ret; + + ret = ttm_ref_object_add(tfile, &user_bo->prime.base, +- TTM_REF_SYNCCPU_WRITE, &existed); ++ TTM_REF_SYNCCPU_WRITE, &existed, false); + if (ret != 0 || existed) + ttm_bo_synccpu_write_release(&user_bo->dma.base); + +@@ -775,7 +775,7 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, + + *handle = user_bo->prime.base.hash.key; + return ttm_ref_object_add(tfile, &user_bo->prime.base, +- TTM_REF_USAGE, NULL); ++ TTM_REF_USAGE, NULL, false); + } + + /* +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +index 7d620e82e000..c9c04ccccdd9 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +@@ -715,11 +715,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, + 128; + + num_sizes = 0; +- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) ++ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { ++ if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS) ++ return -EINVAL; + num_sizes += req->mip_levels[i]; ++ } + +- if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * +- DRM_VMW_MAX_MIP_LEVELS) ++ if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS || ++ num_sizes == 0) + return -EINVAL; + + size = vmw_user_surface_size + 128 + +@@ -904,17 +907,16 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv, + uint32_t handle; + struct ttm_base_object *base; + int ret; ++ bool require_exist = false; + + if (handle_type == DRM_VMW_HANDLE_PRIME) { + ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle); + if (unlikely(ret != 0)) + return ret; + } else { +- if (unlikely(drm_is_render_client(file_priv))) { +- DRM_ERROR("Render client refused legacy " +- "surface reference.\n"); +- return -EACCES; +- } ++ if (unlikely(drm_is_render_client(file_priv))) ++ require_exist = true; ++ + if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) { + DRM_ERROR("Locked master refused legacy " + "surface reference.\n"); +@@ -942,17 +944,14 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv, + + /* + * Make sure the surface creator has the same +- * authenticating master. ++ * authenticating master, or is already registered with us. + */ + if (drm_is_primary_client(file_priv) && +- user_srf->master != file_priv->master) { +- DRM_ERROR("Trying to reference surface outside of" +- " master domain.\n"); +- ret = -EACCES; +- goto out_bad_resource; +- } ++ user_srf->master != file_priv->master) ++ require_exist = true; + +- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); ++ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, ++ require_exist); + if (unlikely(ret != 0)) { + DRM_ERROR("Could not add a reference to a surface.\n"); + goto out_bad_resource; +diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c +index acb3b303d800..90841abd3ce4 100644 +--- a/drivers/iio/gyro/bmg160_core.c ++++ b/drivers/iio/gyro/bmg160_core.c +@@ -28,6 +28,7 @@ + #include + #include + #include ++#include + #include "bmg160.h" + + #define BMG160_IRQ_NAME "bmg160_event" +@@ -53,6 +54,9 @@ + #define BMG160_NO_FILTER 0 + #define BMG160_DEF_BW 100 + ++#define BMG160_GYRO_REG_RESET 0x14 ++#define BMG160_GYRO_RESET_VAL 0xb6 ++ + #define BMG160_REG_INT_MAP_0 0x17 + #define BMG160_INT_MAP_0_BIT_ANY BIT(1) + +@@ -186,6 +190,14 @@ static int bmg160_chip_init(struct bmg160_data *data) + int ret; + unsigned int val; + ++ /* ++ * Reset chip to get it in a known good state. A delay of 30ms after ++ * reset is required according to the datasheet. ++ */ ++ regmap_write(data->regmap, BMG160_GYRO_REG_RESET, ++ BMG160_GYRO_RESET_VAL); ++ usleep_range(30000, 30700); ++ + ret = regmap_read(data->regmap, BMG160_REG_CHIP_ID, &val); + if (ret < 0) { + dev_err(data->dev, "Error reading reg_chip_id\n"); +diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c +index 3f2a3d611e4b..9c6357c03905 100644 +--- a/drivers/staging/android/ashmem.c ++++ b/drivers/staging/android/ashmem.c +@@ -392,6 +392,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) + ret = PTR_ERR(vmfile); + goto out; + } ++ vmfile->f_mode |= FMODE_LSEEK; + asma->file = vmfile; + } + get_file(asma->file); +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c +index 2fa754c5fd62..6cb5c4b30e78 100644 +--- a/fs/cifs/smb2pdu.c ++++ b/fs/cifs/smb2pdu.c +@@ -952,6 +952,10 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, + return -EINVAL; + } + ++ /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */ ++ if (tcon) ++ tcon->tid = 0; ++ + rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req); + if (rc) { + kfree(unc_path); +diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c +index b803213d1307..39c75a86c67f 100644 +--- a/fs/sysfs/file.c ++++ b/fs/sysfs/file.c +@@ -108,7 +108,7 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf, + { + const struct sysfs_ops *ops = sysfs_file_ops(of->kn); + struct kobject *kobj = of->kn->parent->priv; +- size_t len; ++ ssize_t len; + + /* + * If buf != of->prealloc_buf, we don't know how +@@ -117,13 +117,15 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf, + if (WARN_ON_ONCE(buf != of->prealloc_buf)) + return 0; + len = ops->show(kobj, of->kn->priv, buf); ++ if (len < 0) ++ return len; + if (pos) { + if (len <= pos) + return 0; + len -= pos; + memmove(buf, buf + pos, len); + } +- return min(count, len); ++ return min_t(ssize_t, count, len); + } + + /* kernfs write callback for regular sysfs files */ +diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h +index ed953f98f0e1..1487011fe057 100644 +--- a/include/drm/ttm/ttm_object.h ++++ b/include/drm/ttm/ttm_object.h +@@ -229,6 +229,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base); + * @ref_type: The type of reference. + * @existed: Upon completion, indicates that an identical reference object + * already existed, and the refcount was upped on that object instead. ++ * @require_existed: Fail with -EPERM if an identical ref object didn't ++ * already exist. + * + * Checks that the base object is shareable and adds a ref object to it. + * +@@ -243,7 +245,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base); + */ + extern int ttm_ref_object_add(struct ttm_object_file *tfile, + struct ttm_base_object *base, +- enum ttm_ref_type ref_type, bool *existed); ++ enum ttm_ref_type ref_type, bool *existed, ++ bool require_existed); + + extern bool ttm_ref_object_exists(struct ttm_object_file *tfile, + struct ttm_base_object *base); +diff --git a/kernel/ptrace.c b/kernel/ptrace.c +index a46c40bfb5f6..c7e8ed99c953 100644 +--- a/kernel/ptrace.c ++++ b/kernel/ptrace.c +@@ -151,11 +151,17 @@ static void ptrace_unfreeze_traced(struct task_struct *task) + + WARN_ON(!task->ptrace || task->parent != current); + ++ /* ++ * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely. ++ * Recheck state under the lock to close this race. ++ */ + spin_lock_irq(&task->sighand->siglock); +- if (__fatal_signal_pending(task)) +- wake_up_state(task, __TASK_TRACED); +- else +- task->state = TASK_TRACED; ++ if (task->state == __TASK_TRACED) { ++ if (__fatal_signal_pending(task)) ++ wake_up_state(task, __TASK_TRACED); ++ else ++ task->state = TASK_TRACED; ++ } + spin_unlock_irq(&task->sighand->siglock); + } + +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c +index acbb0e73d3a2..7d7f99b0db47 100644 +--- a/kernel/trace/ring_buffer.c ++++ b/kernel/trace/ring_buffer.c +@@ -4875,9 +4875,9 @@ static __init int test_ringbuffer(void) + rb_data[cpu].cnt = cpu; + rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu], + "rbtester/%d", cpu); +- if (WARN_ON(!rb_threads[cpu])) { ++ if (WARN_ON(IS_ERR(rb_threads[cpu]))) { + pr_cont("FAILED\n"); +- ret = -1; ++ ret = PTR_ERR(rb_threads[cpu]); + goto out_free; + } + +@@ -4887,9 +4887,9 @@ static __init int test_ringbuffer(void) + + /* Now create the rb hammer! */ + rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer"); +- if (WARN_ON(!rb_hammer)) { ++ if (WARN_ON(IS_ERR(rb_hammer))) { + pr_cont("FAILED\n"); +- ret = -1; ++ ret = PTR_ERR(rb_hammer); + goto out_free; + } + +diff --git a/mm/mempolicy.c b/mm/mempolicy.c +index a4217fe60dff..e09b1a0e2cfe 100644 +--- a/mm/mempolicy.c ++++ b/mm/mempolicy.c +@@ -1492,7 +1492,6 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, + COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, + compat_ulong_t, maxnode) + { +- long err = 0; + unsigned long __user *nm = NULL; + unsigned long nr_bits, alloc_size; + DECLARE_BITMAP(bm, MAX_NUMNODES); +@@ -1501,14 +1500,13 @@ COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, + alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; + + if (nmask) { +- err = compat_get_bitmap(bm, nmask, nr_bits); ++ if (compat_get_bitmap(bm, nmask, nr_bits)) ++ return -EFAULT; + nm = compat_alloc_user_space(alloc_size); +- err |= copy_to_user(nm, bm, alloc_size); ++ if (copy_to_user(nm, bm, alloc_size)) ++ return -EFAULT; + } + +- if (err) +- return -EFAULT; +- + return sys_set_mempolicy(mode, nm, nr_bits+1); + } + +@@ -1516,7 +1514,6 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, + compat_ulong_t, mode, compat_ulong_t __user *, nmask, + compat_ulong_t, maxnode, compat_ulong_t, flags) + { +- long err = 0; + unsigned long __user *nm = NULL; + unsigned long nr_bits, alloc_size; + nodemask_t bm; +@@ -1525,14 +1522,13 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, + alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; + + if (nmask) { +- err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits); ++ if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits)) ++ return -EFAULT; + nm = compat_alloc_user_space(alloc_size); +- err |= copy_to_user(nm, nodes_addr(bm), alloc_size); ++ if (copy_to_user(nm, nodes_addr(bm), alloc_size)) ++ return -EFAULT; + } + +- if (err) +- return -EFAULT; +- + return sys_mbind(start, len, mode, nm, nr_bits+1, flags); + } + diff --git a/patch/kernel/mvebu64-default/03-patch-4.4.61-62.patch b/patch/kernel/mvebu64-default/03-patch-4.4.61-62.patch new file mode 100644 index 000000000..ee4be8e1f --- /dev/null +++ b/patch/kernel/mvebu64-default/03-patch-4.4.61-62.patch @@ -0,0 +1,839 @@ +diff --git a/Makefile b/Makefile +index ef5045b8201d..0309acc34472 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 61 ++SUBLEVEL = 62 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig +index 75bfca69e418..d5cfa937d622 100644 +--- a/arch/mips/Kconfig ++++ b/arch/mips/Kconfig +@@ -9,6 +9,7 @@ config MIPS + select HAVE_CONTEXT_TRACKING + select HAVE_GENERIC_DMA_COHERENT + select HAVE_IDE ++ select HAVE_IRQ_EXIT_ON_IRQ_STACK + select HAVE_OPROFILE + select HAVE_PERF_EVENTS + select PERF_USE_VMALLOC +diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h +index 15e0fecbc300..ebb9efb02502 100644 +--- a/arch/mips/include/asm/irq.h ++++ b/arch/mips/include/asm/irq.h +@@ -17,6 +17,18 @@ + + #include + ++#define IRQ_STACK_SIZE THREAD_SIZE ++ ++extern void *irq_stack[NR_CPUS]; ++ ++static inline bool on_irq_stack(int cpu, unsigned long sp) ++{ ++ unsigned long low = (unsigned long)irq_stack[cpu]; ++ unsigned long high = low + IRQ_STACK_SIZE; ++ ++ return (low <= sp && sp <= high); ++} ++ + #ifdef CONFIG_I8259 + static inline int irq_canonicalize(int irq) + { +diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h +index a71da576883c..5347f130f536 100644 +--- a/arch/mips/include/asm/stackframe.h ++++ b/arch/mips/include/asm/stackframe.h +@@ -216,12 +216,19 @@ + LONG_S $25, PT_R25(sp) + LONG_S $28, PT_R28(sp) + LONG_S $31, PT_R31(sp) ++ ++ /* Set thread_info if we're coming from user mode */ ++ mfc0 k0, CP0_STATUS ++ sll k0, 3 /* extract cu0 bit */ ++ bltz k0, 9f ++ + ori $28, sp, _THREAD_MASK + xori $28, _THREAD_MASK + #ifdef CONFIG_CPU_CAVIUM_OCTEON + .set mips64 + pref 0, 0($28) /* Prefetch the current pointer */ + #endif ++9: + .set pop + .endm + +diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c +index 154e2039ea5e..ec053ce7bb38 100644 +--- a/arch/mips/kernel/asm-offsets.c ++++ b/arch/mips/kernel/asm-offsets.c +@@ -101,6 +101,7 @@ void output_thread_info_defines(void) + OFFSET(TI_REGS, thread_info, regs); + DEFINE(_THREAD_SIZE, THREAD_SIZE); + DEFINE(_THREAD_MASK, THREAD_MASK); ++ DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE); + BLANK(); + } + +diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S +index baa7b6fc0a60..619e30e2c4f0 100644 +--- a/arch/mips/kernel/genex.S ++++ b/arch/mips/kernel/genex.S +@@ -188,9 +188,44 @@ NESTED(handle_int, PT_SIZE, sp) + + LONG_L s0, TI_REGS($28) + LONG_S sp, TI_REGS($28) +- PTR_LA ra, ret_from_irq +- PTR_LA v0, plat_irq_dispatch +- jr v0 ++ ++ /* ++ * SAVE_ALL ensures we are using a valid kernel stack for the thread. ++ * Check if we are already using the IRQ stack. ++ */ ++ move s1, sp # Preserve the sp ++ ++ /* Get IRQ stack for this CPU */ ++ ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG ++#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) ++ lui k1, %hi(irq_stack) ++#else ++ lui k1, %highest(irq_stack) ++ daddiu k1, %higher(irq_stack) ++ dsll k1, 16 ++ daddiu k1, %hi(irq_stack) ++ dsll k1, 16 ++#endif ++ LONG_SRL k0, SMP_CPUID_PTRSHIFT ++ LONG_ADDU k1, k0 ++ LONG_L t0, %lo(irq_stack)(k1) ++ ++ # Check if already on IRQ stack ++ PTR_LI t1, ~(_THREAD_SIZE-1) ++ and t1, t1, sp ++ beq t0, t1, 2f ++ ++ /* Switch to IRQ stack */ ++ li t1, _IRQ_STACK_SIZE ++ PTR_ADD sp, t0, t1 ++ ++2: ++ jal plat_irq_dispatch ++ ++ /* Restore sp */ ++ move sp, s1 ++ ++ j ret_from_irq + #ifdef CONFIG_CPU_MICROMIPS + nop + #endif +@@ -263,8 +298,44 @@ NESTED(except_vec_vi_handler, 0, sp) + + LONG_L s0, TI_REGS($28) + LONG_S sp, TI_REGS($28) +- PTR_LA ra, ret_from_irq +- jr v0 ++ ++ /* ++ * SAVE_ALL ensures we are using a valid kernel stack for the thread. ++ * Check if we are already using the IRQ stack. ++ */ ++ move s1, sp # Preserve the sp ++ ++ /* Get IRQ stack for this CPU */ ++ ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG ++#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) ++ lui k1, %hi(irq_stack) ++#else ++ lui k1, %highest(irq_stack) ++ daddiu k1, %higher(irq_stack) ++ dsll k1, 16 ++ daddiu k1, %hi(irq_stack) ++ dsll k1, 16 ++#endif ++ LONG_SRL k0, SMP_CPUID_PTRSHIFT ++ LONG_ADDU k1, k0 ++ LONG_L t0, %lo(irq_stack)(k1) ++ ++ # Check if already on IRQ stack ++ PTR_LI t1, ~(_THREAD_SIZE-1) ++ and t1, t1, sp ++ beq t0, t1, 2f ++ ++ /* Switch to IRQ stack */ ++ li t1, _IRQ_STACK_SIZE ++ PTR_ADD sp, t0, t1 ++ ++2: ++ jalr v0 ++ ++ /* Restore sp */ ++ move sp, s1 ++ ++ j ret_from_irq + END(except_vec_vi_handler) + + /* +diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c +index 8eb5af805964..dc1180a8bfa1 100644 +--- a/arch/mips/kernel/irq.c ++++ b/arch/mips/kernel/irq.c +@@ -25,6 +25,8 @@ + #include + #include + ++void *irq_stack[NR_CPUS]; ++ + /* + * 'what should we do if we get a hw irq event on an illegal vector'. + * each architecture has to answer this themselves. +@@ -55,6 +57,15 @@ void __init init_IRQ(void) + irq_set_noprobe(i); + + arch_init_irq(); ++ ++ for_each_possible_cpu(i) { ++ int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE; ++ void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages); ++ ++ irq_stack[i] = s; ++ pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i, ++ irq_stack[i], irq_stack[i] + IRQ_STACK_SIZE); ++ } + } + + #ifdef CONFIG_DEBUG_STACKOVERFLOW +diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c +index fc537d1b649d..8c26ecac930d 100644 +--- a/arch/mips/kernel/process.c ++++ b/arch/mips/kernel/process.c +@@ -32,6 +32,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -552,7 +553,19 @@ EXPORT_SYMBOL(unwind_stack_by_address); + unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, + unsigned long pc, unsigned long *ra) + { +- unsigned long stack_page = (unsigned long)task_stack_page(task); ++ unsigned long stack_page = 0; ++ int cpu; ++ ++ for_each_possible_cpu(cpu) { ++ if (on_irq_stack(cpu, *sp)) { ++ stack_page = (unsigned long)irq_stack[cpu]; ++ break; ++ } ++ } ++ ++ if (!stack_page) ++ stack_page = (unsigned long)task_stack_page(task); ++ + return unwind_stack_by_address(stack_page, sp, pc, ra); + } + #endif +diff --git a/block/blk-mq.c b/block/blk-mq.c +index d8d63c38bf29..0d1af3e44efb 100644 +--- a/block/blk-mq.c ++++ b/block/blk-mq.c +@@ -1470,7 +1470,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, + INIT_LIST_HEAD(&tags->page_list); + + tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *), +- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY, ++ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, + set->numa_node); + if (!tags->rqs) { + blk_mq_free_tags(tags); +@@ -1496,7 +1496,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, + + do { + page = alloc_pages_node(set->numa_node, +- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO, ++ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO, + this_order); + if (page) + break; +@@ -1517,7 +1517,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, + * Allow kmemleak to scan these pages as they contain pointers + * to additional allocations like via ops->init_request(). + */ +- kmemleak_alloc(p, order_to_size(this_order), 1, GFP_KERNEL); ++ kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO); + entries_per_page = order_to_size(this_order) / rq_size; + to_do = min(entries_per_page, set->queue_depth - i); + left -= to_do * rq_size; +diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c +index 69d4a1326fee..53e61459c69f 100644 +--- a/drivers/crypto/caam/ctrl.c ++++ b/drivers/crypto/caam/ctrl.c +@@ -278,7 +278,8 @@ static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask) + /* Try to run it through DECO0 */ + ret = run_descriptor_deco0(ctrldev, desc, &status); + +- if (ret || status) { ++ if (ret || ++ (status && status != JRSTA_SSRC_JUMP_HALT_CC)) { + dev_err(ctrldev, + "Failed to deinstantiate RNG4 SH%d\n", + sh_idx); +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h +index fb9f647bb5cd..5044f2257e89 100644 +--- a/drivers/gpu/drm/i915/i915_drv.h ++++ b/drivers/gpu/drm/i915/i915_drv.h +@@ -1159,7 +1159,7 @@ struct intel_gen6_power_mgmt { + struct intel_rps_client semaphores, mmioflips; + + /* manual wa residency calculations */ +- struct intel_rps_ei up_ei, down_ei; ++ struct intel_rps_ei ei; + + /* + * Protects RPS/RC6 register access and PCU communication. +diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c +index 0f42a2782afc..b7b0a38acd67 100644 +--- a/drivers/gpu/drm/i915/i915_irq.c ++++ b/drivers/gpu/drm/i915/i915_irq.c +@@ -994,68 +994,51 @@ static void vlv_c0_read(struct drm_i915_private *dev_priv, + ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); + } + +-static bool vlv_c0_above(struct drm_i915_private *dev_priv, +- const struct intel_rps_ei *old, +- const struct intel_rps_ei *now, +- int threshold) +-{ +- u64 time, c0; +- unsigned int mul = 100; +- +- if (old->cz_clock == 0) +- return false; +- +- if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) +- mul <<= 8; +- +- time = now->cz_clock - old->cz_clock; +- time *= threshold * dev_priv->czclk_freq; +- +- /* Workload can be split between render + media, e.g. SwapBuffers +- * being blitted in X after being rendered in mesa. To account for +- * this we need to combine both engines into our activity counter. +- */ +- c0 = now->render_c0 - old->render_c0; +- c0 += now->media_c0 - old->media_c0; +- c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC; +- +- return c0 >= time; +-} +- + void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) + { +- vlv_c0_read(dev_priv, &dev_priv->rps.down_ei); +- dev_priv->rps.up_ei = dev_priv->rps.down_ei; ++ memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei)); + } + + static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) + { ++ const struct intel_rps_ei *prev = &dev_priv->rps.ei; + struct intel_rps_ei now; + u32 events = 0; + +- if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0) ++ if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) + return 0; + + vlv_c0_read(dev_priv, &now); + if (now.cz_clock == 0) + return 0; + +- if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) { +- if (!vlv_c0_above(dev_priv, +- &dev_priv->rps.down_ei, &now, +- dev_priv->rps.down_threshold)) +- events |= GEN6_PM_RP_DOWN_THRESHOLD; +- dev_priv->rps.down_ei = now; +- } ++ if (prev->cz_clock) { ++ u64 time, c0; ++ unsigned int mul; + +- if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { +- if (vlv_c0_above(dev_priv, +- &dev_priv->rps.up_ei, &now, +- dev_priv->rps.up_threshold)) +- events |= GEN6_PM_RP_UP_THRESHOLD; +- dev_priv->rps.up_ei = now; ++ mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */ ++ if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) ++ mul <<= 8; ++ ++ time = now.cz_clock - prev->cz_clock; ++ time *= dev_priv->czclk_freq; ++ ++ /* Workload can be split between render + media, ++ * e.g. SwapBuffers being blitted in X after being rendered in ++ * mesa. To account for this we need to combine both engines ++ * into our activity counter. ++ */ ++ c0 = now.render_c0 - prev->render_c0; ++ c0 += now.media_c0 - prev->media_c0; ++ c0 *= mul; ++ ++ if (c0 > time * dev_priv->rps.up_threshold) ++ events = GEN6_PM_RP_UP_THRESHOLD; ++ else if (c0 < time * dev_priv->rps.down_threshold) ++ events = GEN6_PM_RP_DOWN_THRESHOLD; + } + ++ dev_priv->rps.ei = now; + return events; + } + +@@ -4390,7 +4373,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv) + /* Let's track the enabled rps events */ + if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) + /* WaGsvRC0ResidencyMethod:vlv */ +- dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED; ++ dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; + else + dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; + +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c +index e7c18519274a..fd4690ed93c0 100644 +--- a/drivers/gpu/drm/i915/intel_pm.c ++++ b/drivers/gpu/drm/i915/intel_pm.c +@@ -4376,6 +4376,12 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) + break; + } + ++ /* When byt can survive without system hang with dynamic ++ * sw freq adjustments, this restriction can be lifted. ++ */ ++ if (IS_VALLEYVIEW(dev_priv)) ++ goto skip_hw_write; ++ + I915_WRITE(GEN6_RP_UP_EI, + GT_INTERVAL_FROM_US(dev_priv, ei_up)); + I915_WRITE(GEN6_RP_UP_THRESHOLD, +@@ -4394,6 +4400,7 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) + GEN6_RP_UP_BUSY_AVG | + GEN6_RP_DOWN_IDLE_AVG); + ++skip_hw_write: + dev_priv->rps.power = new_power; + dev_priv->rps.up_threshold = threshold_up; + dev_priv->rps.down_threshold = threshold_down; +@@ -4404,8 +4411,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) + { + u32 mask = 0; + ++ /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */ + if (val > dev_priv->rps.min_freq_softlimit) +- mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT; ++ mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT; + if (val < dev_priv->rps.max_freq_softlimit) + mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; + +@@ -4509,7 +4517,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv) + { + mutex_lock(&dev_priv->rps.hw_lock); + if (dev_priv->rps.enabled) { +- if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) ++ if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED) + gen6_rps_reset_ei(dev_priv); + I915_WRITE(GEN6_PMINTRMSK, + gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); +diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c +index c0720c1ee4c9..5abab8800891 100644 +--- a/drivers/mtd/bcm47xxpart.c ++++ b/drivers/mtd/bcm47xxpart.c +@@ -225,12 +225,10 @@ static int bcm47xxpart_parse(struct mtd_info *master, + + last_trx_part = curr_part - 1; + +- /* +- * We have whole TRX scanned, skip to the next part. Use +- * roundown (not roundup), as the loop will increase +- * offset in next step. +- */ +- offset = rounddown(offset + trx->length, blocksize); ++ /* Jump to the end of TRX */ ++ offset = roundup(offset + trx->length, blocksize); ++ /* Next loop iteration will increase the offset */ ++ offset -= blocksize; + continue; + } + +diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c +index 7af870a3c549..855c43d8f7e0 100644 +--- a/drivers/net/ethernet/ibm/ibmveth.c ++++ b/drivers/net/ethernet/ibm/ibmveth.c +@@ -58,7 +58,7 @@ static struct kobj_type ktype_veth_pool; + + static const char ibmveth_driver_name[] = "ibmveth"; + static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver"; +-#define ibmveth_driver_version "1.05" ++#define ibmveth_driver_version "1.06" + + MODULE_AUTHOR("Santiago Leon "); + MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver"); +@@ -137,6 +137,11 @@ static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter) + return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK; + } + ++static inline int ibmveth_rxq_large_packet(struct ibmveth_adapter *adapter) ++{ ++ return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_LRG_PKT; ++} ++ + static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) + { + return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length); +@@ -1172,6 +1177,45 @@ map_failed: + goto retry_bounce; + } + ++static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt) ++{ ++ int offset = 0; ++ ++ /* only TCP packets will be aggregated */ ++ if (skb->protocol == htons(ETH_P_IP)) { ++ struct iphdr *iph = (struct iphdr *)skb->data; ++ ++ if (iph->protocol == IPPROTO_TCP) { ++ offset = iph->ihl * 4; ++ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; ++ } else { ++ return; ++ } ++ } else if (skb->protocol == htons(ETH_P_IPV6)) { ++ struct ipv6hdr *iph6 = (struct ipv6hdr *)skb->data; ++ ++ if (iph6->nexthdr == IPPROTO_TCP) { ++ offset = sizeof(struct ipv6hdr); ++ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; ++ } else { ++ return; ++ } ++ } else { ++ return; ++ } ++ /* if mss is not set through Large Packet bit/mss in rx buffer, ++ * expect that the mss will be written to the tcp header checksum. ++ */ ++ if (lrg_pkt) { ++ skb_shinfo(skb)->gso_size = mss; ++ } else if (offset) { ++ struct tcphdr *tcph = (struct tcphdr *)(skb->data + offset); ++ ++ skb_shinfo(skb)->gso_size = ntohs(tcph->check); ++ tcph->check = 0; ++ } ++} ++ + static int ibmveth_poll(struct napi_struct *napi, int budget) + { + struct ibmveth_adapter *adapter = +@@ -1180,6 +1224,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) + int frames_processed = 0; + unsigned long lpar_rc; + struct iphdr *iph; ++ u16 mss = 0; + + restart_poll: + while (frames_processed < budget) { +@@ -1197,9 +1242,21 @@ restart_poll: + int length = ibmveth_rxq_frame_length(adapter); + int offset = ibmveth_rxq_frame_offset(adapter); + int csum_good = ibmveth_rxq_csum_good(adapter); ++ int lrg_pkt = ibmveth_rxq_large_packet(adapter); + + skb = ibmveth_rxq_get_buffer(adapter); + ++ /* if the large packet bit is set in the rx queue ++ * descriptor, the mss will be written by PHYP eight ++ * bytes from the start of the rx buffer, which is ++ * skb->data at this stage ++ */ ++ if (lrg_pkt) { ++ __be64 *rxmss = (__be64 *)(skb->data + 8); ++ ++ mss = (u16)be64_to_cpu(*rxmss); ++ } ++ + new_skb = NULL; + if (length < rx_copybreak) + new_skb = netdev_alloc_skb(netdev, length); +@@ -1233,11 +1290,15 @@ restart_poll: + if (iph->check == 0xffff) { + iph->check = 0; + iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); +- adapter->rx_large_packets++; + } + } + } + ++ if (length > netdev->mtu + ETH_HLEN) { ++ ibmveth_rx_mss_helper(skb, mss, lrg_pkt); ++ adapter->rx_large_packets++; ++ } ++ + napi_gro_receive(napi, skb); /* send it up */ + + netdev->stats.rx_packets++; +diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h +index 4eade67fe30c..7acda04d034e 100644 +--- a/drivers/net/ethernet/ibm/ibmveth.h ++++ b/drivers/net/ethernet/ibm/ibmveth.h +@@ -209,6 +209,7 @@ struct ibmveth_rx_q_entry { + #define IBMVETH_RXQ_TOGGLE 0x80000000 + #define IBMVETH_RXQ_TOGGLE_SHIFT 31 + #define IBMVETH_RXQ_VALID 0x40000000 ++#define IBMVETH_RXQ_LRG_PKT 0x04000000 + #define IBMVETH_RXQ_NO_CSUM 0x02000000 + #define IBMVETH_RXQ_CSUM_GOOD 0x01000000 + #define IBMVETH_RXQ_OFF_MASK 0x0000FFFF +diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c +index 3348e646db70..6eba58044456 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/cq.c ++++ b/drivers/net/ethernet/mellanox/mlx4/cq.c +@@ -101,13 +101,19 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn) + { + struct mlx4_cq *cq; + ++ rcu_read_lock(); + cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree, + cqn & (dev->caps.num_cqs - 1)); ++ rcu_read_unlock(); ++ + if (!cq) { + mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn); + return; + } + ++ /* Acessing the CQ outside of rcu_read_lock is safe, because ++ * the CQ is freed only after interrupt handling is completed. ++ */ + ++cq->arm_sn; + + cq->comp(cq); +@@ -118,23 +124,19 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type) + struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; + struct mlx4_cq *cq; + +- spin_lock(&cq_table->lock); +- ++ rcu_read_lock(); + cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1)); +- if (cq) +- atomic_inc(&cq->refcount); +- +- spin_unlock(&cq_table->lock); ++ rcu_read_unlock(); + + if (!cq) { +- mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn); ++ mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn); + return; + } + ++ /* Acessing the CQ outside of rcu_read_lock is safe, because ++ * the CQ is freed only after interrupt handling is completed. ++ */ + cq->event(cq, event_type); +- +- if (atomic_dec_and_test(&cq->refcount)) +- complete(&cq->free); + } + + static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, +@@ -301,9 +303,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, + if (err) + return err; + +- spin_lock_irq(&cq_table->lock); ++ spin_lock(&cq_table->lock); + err = radix_tree_insert(&cq_table->tree, cq->cqn, cq); +- spin_unlock_irq(&cq_table->lock); ++ spin_unlock(&cq_table->lock); + if (err) + goto err_icm; + +@@ -347,9 +349,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, + return 0; + + err_radix: +- spin_lock_irq(&cq_table->lock); ++ spin_lock(&cq_table->lock); + radix_tree_delete(&cq_table->tree, cq->cqn); +- spin_unlock_irq(&cq_table->lock); ++ spin_unlock(&cq_table->lock); + + err_icm: + mlx4_cq_free_icm(dev, cq->cqn); +@@ -368,15 +370,15 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) + if (err) + mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn); + ++ spin_lock(&cq_table->lock); ++ radix_tree_delete(&cq_table->tree, cq->cqn); ++ spin_unlock(&cq_table->lock); ++ + synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq); + if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq != + priv->eq_table.eq[MLX4_EQ_ASYNC].irq) + synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq); + +- spin_lock_irq(&cq_table->lock); +- radix_tree_delete(&cq_table->tree, cq->cqn); +- spin_unlock_irq(&cq_table->lock); +- + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); + wait_for_completion(&cq->free); +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c +index 28a4b34310b2..82bf1b539d87 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c +@@ -439,8 +439,14 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) + ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn; + + ring->stride = stride; +- if (ring->stride <= TXBB_SIZE) ++ if (ring->stride <= TXBB_SIZE) { ++ /* Stamp first unused send wqe */ ++ __be32 *ptr = (__be32 *)ring->buf; ++ __be32 stamp = cpu_to_be32(1 << STAMP_SHIFT); ++ *ptr = stamp; ++ /* Move pointer to start of rx section */ + ring->buf += TXBB_SIZE; ++ } + + ring->log_stride = ffs(ring->stride) - 1; + ring->buf_size = ring->size * ring->stride; +diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +index d314d96dcb1c..d1fc7fa87b05 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c ++++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +@@ -2955,6 +2955,9 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, + put_res(dev, slave, srqn, RES_SRQ); + qp->srq = srq; + } ++ ++ /* Save param3 for dynamic changes from VST back to VGT */ ++ qp->param3 = qpc->param3; + put_res(dev, slave, rcqn, RES_CQ); + put_res(dev, slave, mtt_base, RES_MTT); + res_end_move(dev, slave, RES_QP, qpn); +@@ -3747,7 +3750,6 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, + int qpn = vhcr->in_modifier & 0x7fffff; + struct res_qp *qp; + u8 orig_sched_queue; +- __be32 orig_param3 = qpc->param3; + u8 orig_vlan_control = qpc->pri_path.vlan_control; + u8 orig_fvl_rx = qpc->pri_path.fvl_rx; + u8 orig_pri_path_fl = qpc->pri_path.fl; +@@ -3789,7 +3791,6 @@ out: + */ + if (!err) { + qp->sched_queue = orig_sched_queue; +- qp->param3 = orig_param3; + qp->vlan_control = orig_vlan_control; + qp->fvl_rx = orig_fvl_rx; + qp->pri_path_fl = orig_pri_path_fl; +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index 9e62c93af96e..7c2d87befb51 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -2602,8 +2602,15 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1, + if (ret < 0) + return ret; + +- /* The port state is unknown until the reset completes. */ +- if (!(portstatus & USB_PORT_STAT_RESET)) ++ /* ++ * The port state is unknown until the reset completes. ++ * ++ * On top of that, some chips may require additional time ++ * to re-establish a connection after the reset is complete, ++ * so also wait for the connection to be re-established. ++ */ ++ if (!(portstatus & USB_PORT_STAT_RESET) && ++ (portstatus & USB_PORT_STAT_CONNECTION)) + break; + + /* switch to the long delay after two short delay failures */ +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index 210ff64857e1..ec7a50f98f57 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -235,6 +235,7 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, + int status) + { + struct dwc3 *dwc = dep->dwc; ++ unsigned int unmap_after_complete = false; + int i; + + if (req->queued) { +@@ -259,11 +260,19 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, + if (req->request.status == -EINPROGRESS) + req->request.status = status; + +- if (dwc->ep0_bounced && dep->number <= 1) ++ /* ++ * NOTICE we don't want to unmap before calling ->complete() if we're ++ * dealing with a bounced ep0 request. If we unmap it here, we would end ++ * up overwritting the contents of req->buf and this could confuse the ++ * gadget driver. ++ */ ++ if (dwc->ep0_bounced && dep->number <= 1) { + dwc->ep0_bounced = false; +- +- usb_gadget_unmap_request(&dwc->gadget, &req->request, +- req->direction); ++ unmap_after_complete = true; ++ } else { ++ usb_gadget_unmap_request(&dwc->gadget, ++ &req->request, req->direction); ++ } + + dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n", + req, dep->name, req->request.actual, +@@ -282,6 +282,10 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, + spin_unlock(&dwc->lock); + usb_gadget_giveback_request(&dep->endpoint, &req->request); + spin_lock(&dwc->lock); ++ ++ if (unmap_after_complete) ++ usb_gadget_unmap_request(&dwc->gadget, ++ &req->request, req->direction); + } + + int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param) +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c +index 3975ac809934..d76800108ddb 100644 +--- a/net/packet/af_packet.c ++++ b/net/packet/af_packet.c +@@ -4138,8 +4138,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, + if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) + goto out; + if (po->tp_version >= TPACKET_V3 && +- (int)(req->tp_block_size - +- BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0) ++ req->tp_block_size <= ++ BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv)) + goto out; + if (unlikely(req->tp_frame_size < po->tp_hdrlen + + po->tp_reserve)) diff --git a/patch/kernel/mvebu64-default/03-patch-4.4.62-63.patch b/patch/kernel/mvebu64-default/03-patch-4.4.62-63.patch new file mode 100644 index 000000000..74a615c79 --- /dev/null +++ b/patch/kernel/mvebu64-default/03-patch-4.4.62-63.patch @@ -0,0 +1,1801 @@ +diff --git a/Makefile b/Makefile +index 0309acc34472..ec52973043f6 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 62 ++SUBLEVEL = 63 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig +index d5cfa937d622..8b0424abc84c 100644 +--- a/arch/mips/Kconfig ++++ b/arch/mips/Kconfig +@@ -1413,7 +1413,7 @@ config CPU_MIPS32_R6 + select CPU_SUPPORTS_MSA + select GENERIC_CSUM + select HAVE_KVM +- select MIPS_O32_FP64_SUPPORT if 32BIT ++ select MIPS_O32_FP64_SUPPORT + help + Choose this option to build a kernel for release 6 or later of the + MIPS32 architecture. New MIPS processors, starting with the Warrior +@@ -1464,7 +1464,7 @@ config CPU_MIPS64_R6 + select CPU_SUPPORTS_HIGHMEM + select CPU_SUPPORTS_MSA + select GENERIC_CSUM +- select MIPS_O32_FP64_SUPPORT if MIPS32_O32 ++ select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32 + help + Choose this option to build a kernel for release 6 or later of the + MIPS64 architecture. New MIPS processors, starting with the Warrior +diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c +index 51cdc46a87e2..2e7f60c9fc5d 100644 +--- a/arch/mips/lantiq/irq.c ++++ b/arch/mips/lantiq/irq.c +@@ -269,11 +269,6 @@ static void ltq_hw5_irqdispatch(void) + DEFINE_HWx_IRQDISPATCH(5) + #endif + +-static void ltq_hw_irq_handler(struct irq_desc *desc) +-{ +- ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2); +-} +- + #ifdef CONFIG_MIPS_MT_SMP + void __init arch_init_ipiirq(int irq, struct irqaction *action) + { +@@ -318,19 +313,23 @@ static struct irqaction irq_call = { + asmlinkage void plat_irq_dispatch(void) + { + unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; +- int irq; +- +- if (!pending) { +- spurious_interrupt(); +- return; ++ unsigned int i; ++ ++ if ((MIPS_CPU_TIMER_IRQ == 7) && (pending & CAUSEF_IP7)) { ++ do_IRQ(MIPS_CPU_TIMER_IRQ); ++ goto out; ++ } else { ++ for (i = 0; i < MAX_IM; i++) { ++ if (pending & (CAUSEF_IP2 << i)) { ++ ltq_hw_irqdispatch(i); ++ goto out; ++ } ++ } + } ++ pr_alert("Spurious IRQ: CAUSE=0x%08x\n", read_c0_status()); + +- pending >>= CAUSEB_IP; +- while (pending) { +- irq = fls(pending) - 1; +- do_IRQ(MIPS_CPU_IRQ_BASE + irq); +- pending &= ~BIT(irq); +- } ++out: ++ return; + } + + static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) +@@ -355,6 +354,11 @@ static const struct irq_domain_ops irq_domain_ops = { + .map = icu_map, + }; + ++static struct irqaction cascade = { ++ .handler = no_action, ++ .name = "cascade", ++}; ++ + int __init icu_of_init(struct device_node *node, struct device_node *parent) + { + struct device_node *eiu_node; +@@ -386,7 +390,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent) + mips_cpu_irq_init(); + + for (i = 0; i < MAX_IM; i++) +- irq_set_chained_handler(i + 2, ltq_hw_irq_handler); ++ setup_irq(i + 2, &cascade); + + if (cpu_has_vint) { + pr_info("Setting up vectored interrupts\n"); +diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c +index 5c03a6a9b054..a20823210ac0 100644 +--- a/arch/powerpc/kernel/setup_64.c ++++ b/arch/powerpc/kernel/setup_64.c +@@ -220,6 +220,15 @@ static void cpu_ready_for_interrupts(void) + unsigned long lpcr = mfspr(SPRN_LPCR); + mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); + } ++ ++ /* ++ * Fixup HFSCR:TM based on CPU features. The bit is set by our ++ * early asm init because at that point we haven't updated our ++ * CPU features from firmware and device-tree. Here we have, ++ * so let's do it. ++ */ ++ if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP)) ++ mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM); + } + + /* +diff --git a/arch/x86/entry/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c +index 08a317a9ae4b..a7508d7e20b7 100644 +--- a/arch/x86/entry/vdso/vdso32-setup.c ++++ b/arch/x86/entry/vdso/vdso32-setup.c +@@ -31,8 +31,10 @@ static int __init vdso32_setup(char *s) + { + vdso32_enabled = simple_strtoul(s, NULL, 0); + +- if (vdso32_enabled > 1) ++ if (vdso32_enabled > 1) { + pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n"); ++ vdso32_enabled = 0; ++ } + + return 1; + } +@@ -63,13 +65,18 @@ subsys_initcall(sysenter_setup); + /* Register vsyscall32 into the ABI table */ + #include + ++static const int zero; ++static const int one = 1; ++ + static struct ctl_table abi_table2[] = { + { + .procname = "vsyscall32", + .data = &vdso32_enabled, + .maxlen = sizeof(int), + .mode = 0644, +- .proc_handler = proc_dointvec ++ .proc_handler = proc_dointvec_minmax, ++ .extra1 = (int *)&zero, ++ .extra2 = (int *)&one, + }, + {} + }; +diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h +index 1514753fd435..d262f985bbc8 100644 +--- a/arch/x86/include/asm/elf.h ++++ b/arch/x86/include/asm/elf.h +@@ -278,7 +278,7 @@ struct task_struct; + + #define ARCH_DLINFO_IA32 \ + do { \ +- if (vdso32_enabled) { \ ++ if (VDSO_CURRENT_BASE) { \ + NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \ + } \ +diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c +index 659f01e165d5..8900400230c6 100644 +--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c ++++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c +@@ -410,6 +410,9 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc) + cpuc->lbr_entries[i].to = msr_lastbranch.to; + cpuc->lbr_entries[i].mispred = 0; + cpuc->lbr_entries[i].predicted = 0; ++ cpuc->lbr_entries[i].in_tx = 0; ++ cpuc->lbr_entries[i].abort = 0; ++ cpuc->lbr_entries[i].cycles = 0; + cpuc->lbr_entries[i].reserved = 0; + } + cpuc->lbr_stack.nr = i; +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 3a7ae80dc49d..0a472e9865c5 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -6678,14 +6678,20 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, + } + + page = nested_get_page(vcpu, vmptr); +- if (page == NULL || +- *(u32 *)kmap(page) != VMCS12_REVISION) { ++ if (page == NULL) { + nested_vmx_failInvalid(vcpu); ++ skip_emulated_instruction(vcpu); ++ return 1; ++ } ++ if (*(u32 *)kmap(page) != VMCS12_REVISION) { + kunmap(page); ++ nested_release_page_clean(page); ++ nested_vmx_failInvalid(vcpu); + skip_emulated_instruction(vcpu); + return 1; + } + kunmap(page); ++ nested_release_page_clean(page); + vmx->nested.vmxon_ptr = vmptr; + break; + case EXIT_REASON_VMCLEAR: +diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c +index 493f54172b4a..3aebbd6c6f5f 100644 +--- a/arch/x86/mm/init.c ++++ b/arch/x86/mm/init.c +@@ -628,21 +628,40 @@ void __init init_mem_mapping(void) + * devmem_is_allowed() checks to see if /dev/mem access to a certain address + * is valid. The argument is a physical page number. + * +- * +- * On x86, access has to be given to the first megabyte of ram because that area +- * contains BIOS code and data regions used by X and dosemu and similar apps. +- * Access has to be given to non-kernel-ram areas as well, these contain the PCI +- * mmio resources as well as potential bios/acpi data regions. ++ * On x86, access has to be given to the first megabyte of RAM because that ++ * area traditionally contains BIOS code and data regions used by X, dosemu, ++ * and similar apps. Since they map the entire memory range, the whole range ++ * must be allowed (for mapping), but any areas that would otherwise be ++ * disallowed are flagged as being "zero filled" instead of rejected. ++ * Access has to be given to non-kernel-ram areas as well, these contain the ++ * PCI mmio resources as well as potential bios/acpi data regions. + */ + int devmem_is_allowed(unsigned long pagenr) + { +- if (pagenr < 256) +- return 1; +- if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) ++ if (page_is_ram(pagenr)) { ++ /* ++ * For disallowed memory regions in the low 1MB range, ++ * request that the page be shown as all zeros. ++ */ ++ if (pagenr < 256) ++ return 2; ++ ++ return 0; ++ } ++ ++ /* ++ * This must follow RAM test, since System RAM is considered a ++ * restricted resource under CONFIG_STRICT_IOMEM. ++ */ ++ if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) { ++ /* Low 1MB bypasses iomem restrictions. */ ++ if (pagenr < 256) ++ return 1; ++ + return 0; +- if (!page_is_ram(pagenr)) +- return 1; +- return 0; ++ } ++ ++ return 1; + } + + void free_init_pages(char *what, unsigned long begin, unsigned long end) +diff --git a/crypto/ahash.c b/crypto/ahash.c +index dac1c24e9c3e..f9caf0f74199 100644 +--- a/crypto/ahash.c ++++ b/crypto/ahash.c +@@ -31,6 +31,7 @@ struct ahash_request_priv { + crypto_completion_t complete; + void *data; + u8 *result; ++ u32 flags; + void *ubuf[] CRYPTO_MINALIGN_ATTR; + }; + +@@ -270,6 +271,8 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) + priv->result = req->result; + priv->complete = req->base.complete; + priv->data = req->base.data; ++ priv->flags = req->base.flags; ++ + /* + * WARNING: We do not backup req->priv here! The req->priv + * is for internal use of the Crypto API and the +@@ -284,38 +287,44 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) + return 0; + } + +-static void ahash_restore_req(struct ahash_request *req) ++static void ahash_restore_req(struct ahash_request *req, int err) + { + struct ahash_request_priv *priv = req->priv; + ++ if (!err) ++ memcpy(priv->result, req->result, ++ crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); ++ + /* Restore the original crypto request. */ + req->result = priv->result; +- req->base.complete = priv->complete; +- req->base.data = priv->data; ++ ++ ahash_request_set_callback(req, priv->flags, ++ priv->complete, priv->data); + req->priv = NULL; + + /* Free the req->priv.priv from the ADJUSTED request. */ + kzfree(priv); + } + +-static void ahash_op_unaligned_finish(struct ahash_request *req, int err) ++static void ahash_notify_einprogress(struct ahash_request *req) + { + struct ahash_request_priv *priv = req->priv; ++ struct crypto_async_request oreq; + +- if (err == -EINPROGRESS) +- return; +- +- if (!err) +- memcpy(priv->result, req->result, +- crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); ++ oreq.data = priv->data; + +- ahash_restore_req(req); ++ priv->complete(&oreq, -EINPROGRESS); + } + + static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) + { + struct ahash_request *areq = req->data; + ++ if (err == -EINPROGRESS) { ++ ahash_notify_einprogress(areq); ++ return; ++ } ++ + /* + * Restore the original request, see ahash_op_unaligned() for what + * goes where. +@@ -326,7 +335,7 @@ static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) + */ + + /* First copy req->result into req->priv.result */ +- ahash_op_unaligned_finish(areq, err); ++ ahash_restore_req(areq, err); + + /* Complete the ORIGINAL request. */ + areq->base.complete(&areq->base, err); +@@ -342,7 +351,12 @@ static int ahash_op_unaligned(struct ahash_request *req, + return err; + + err = op(req); +- ahash_op_unaligned_finish(req, err); ++ if (err == -EINPROGRESS || ++ (err == -EBUSY && (ahash_request_flags(req) & ++ CRYPTO_TFM_REQ_MAY_BACKLOG))) ++ return err; ++ ++ ahash_restore_req(req, err); + + return err; + } +@@ -377,25 +391,14 @@ int crypto_ahash_digest(struct ahash_request *req) + } + EXPORT_SYMBOL_GPL(crypto_ahash_digest); + +-static void ahash_def_finup_finish2(struct ahash_request *req, int err) ++static void ahash_def_finup_done2(struct crypto_async_request *req, int err) + { +- struct ahash_request_priv *priv = req->priv; ++ struct ahash_request *areq = req->data; + + if (err == -EINPROGRESS) + return; + +- if (!err) +- memcpy(priv->result, req->result, +- crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); +- +- ahash_restore_req(req); +-} +- +-static void ahash_def_finup_done2(struct crypto_async_request *req, int err) +-{ +- struct ahash_request *areq = req->data; +- +- ahash_def_finup_finish2(areq, err); ++ ahash_restore_req(areq, err); + + areq->base.complete(&areq->base, err); + } +@@ -406,11 +409,15 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err) + goto out; + + req->base.complete = ahash_def_finup_done2; +- req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ++ + err = crypto_ahash_reqtfm(req)->final(req); ++ if (err == -EINPROGRESS || ++ (err == -EBUSY && (ahash_request_flags(req) & ++ CRYPTO_TFM_REQ_MAY_BACKLOG))) ++ return err; + + out: +- ahash_def_finup_finish2(req, err); ++ ahash_restore_req(req, err); + return err; + } + +@@ -418,7 +425,16 @@ static void ahash_def_finup_done1(struct crypto_async_request *req, int err) + { + struct ahash_request *areq = req->data; + ++ if (err == -EINPROGRESS) { ++ ahash_notify_einprogress(areq); ++ return; ++ } ++ ++ areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ++ + err = ahash_def_finup_finish1(areq, err); ++ if (areq->priv) ++ return; + + areq->base.complete(&areq->base, err); + } +@@ -433,6 +449,11 @@ static int ahash_def_finup(struct ahash_request *req) + return err; + + err = tfm->update(req); ++ if (err == -EINPROGRESS || ++ (err == -EBUSY && (ahash_request_flags(req) & ++ CRYPTO_TFM_REQ_MAY_BACKLOG))) ++ return err; ++ + return ahash_def_finup_finish1(req, err); + } + +diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c +index 14c2a07c9f3f..67d7489ced01 100644 +--- a/drivers/acpi/nfit.c ++++ b/drivers/acpi/nfit.c +@@ -979,7 +979,11 @@ static int cmp_map(const void *m0, const void *m1) + const struct nfit_set_info_map *map0 = m0; + const struct nfit_set_info_map *map1 = m1; + +- return map0->region_offset - map1->region_offset; ++ if (map0->region_offset < map1->region_offset) ++ return -1; ++ else if (map0->region_offset > map1->region_offset) ++ return 1; ++ return 0; + } + + /* Retrieve the nth entry referencing this spa */ +diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c +index 1648de80e230..62a93b685c54 100644 +--- a/drivers/block/zram/zram_drv.c ++++ b/drivers/block/zram/zram_drv.c +@@ -574,13 +574,13 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) + + if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { + bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); +- clear_page(mem); ++ memset(mem, 0, PAGE_SIZE); + return 0; + } + + cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); + if (size == PAGE_SIZE) +- copy_page(mem, cmem); ++ memcpy(mem, cmem, PAGE_SIZE); + else + ret = zcomp_decompress(zram->comp, cmem, size, mem); + zs_unmap_object(meta->mem_pool, handle); +@@ -738,7 +738,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, + + if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) { + src = kmap_atomic(page); +- copy_page(cmem, src); ++ memcpy(cmem, src, PAGE_SIZE); + kunmap_atomic(src); + } else { + memcpy(cmem, src, clen); +diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig +index a043107da2af..3143db57ce44 100644 +--- a/drivers/char/Kconfig ++++ b/drivers/char/Kconfig +@@ -583,10 +583,12 @@ config TELCLOCK + controlling the behavior of this hardware. + + config DEVPORT +- bool +- depends on !M68K ++ bool "/dev/port character device" + depends on ISA || PCI + default y ++ help ++ Say Y here if you want to support the /dev/port device. The /dev/port ++ device is similar to /dev/mem, but for I/O ports. + + source "drivers/s390/char/Kconfig" + +diff --git a/drivers/char/mem.c b/drivers/char/mem.c +index 6b1721f978c2..e901463d4972 100644 +--- a/drivers/char/mem.c ++++ b/drivers/char/mem.c +@@ -59,6 +59,10 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) + #endif + + #ifdef CONFIG_STRICT_DEVMEM ++static inline int page_is_allowed(unsigned long pfn) ++{ ++ return devmem_is_allowed(pfn); ++} + static inline int range_is_allowed(unsigned long pfn, unsigned long size) + { + u64 from = ((u64)pfn) << PAGE_SHIFT; +@@ -78,6 +82,10 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) + return 1; + } + #else ++static inline int page_is_allowed(unsigned long pfn) ++{ ++ return 1; ++} + static inline int range_is_allowed(unsigned long pfn, unsigned long size) + { + return 1; +@@ -125,23 +133,31 @@ static ssize_t read_mem(struct file *file, char __user *buf, + + while (count > 0) { + unsigned long remaining; ++ int allowed; + + sz = size_inside_page(p, count); + +- if (!range_is_allowed(p >> PAGE_SHIFT, count)) ++ allowed = page_is_allowed(p >> PAGE_SHIFT); ++ if (!allowed) + return -EPERM; ++ if (allowed == 2) { ++ /* Show zeros for restricted memory. */ ++ remaining = clear_user(buf, sz); ++ } else { ++ /* ++ * On ia64 if a page has been mapped somewhere as ++ * uncached, then it must also be accessed uncached ++ * by the kernel or data corruption may occur. ++ */ ++ ptr = xlate_dev_mem_ptr(p); ++ if (!ptr) ++ return -EFAULT; + +- /* +- * On ia64 if a page has been mapped somewhere as uncached, then +- * it must also be accessed uncached by the kernel or data +- * corruption may occur. +- */ +- ptr = xlate_dev_mem_ptr(p); +- if (!ptr) +- return -EFAULT; ++ remaining = copy_to_user(buf, ptr, sz); ++ ++ unxlate_dev_mem_ptr(p, ptr); ++ } + +- remaining = copy_to_user(buf, ptr, sz); +- unxlate_dev_mem_ptr(p, ptr); + if (remaining) + return -EFAULT; + +@@ -184,30 +200,36 @@ static ssize_t write_mem(struct file *file, const char __user *buf, + #endif + + while (count > 0) { ++ int allowed; ++ + sz = size_inside_page(p, count); + +- if (!range_is_allowed(p >> PAGE_SHIFT, sz)) ++ allowed = page_is_allowed(p >> PAGE_SHIFT); ++ if (!allowed) + return -EPERM; + +- /* +- * On ia64 if a page has been mapped somewhere as uncached, then +- * it must also be accessed uncached by the kernel or data +- * corruption may occur. +- */ +- ptr = xlate_dev_mem_ptr(p); +- if (!ptr) { +- if (written) +- break; +- return -EFAULT; +- } ++ /* Skip actual writing when a page is marked as restricted. */ ++ if (allowed == 1) { ++ /* ++ * On ia64 if a page has been mapped somewhere as ++ * uncached, then it must also be accessed uncached ++ * by the kernel or data corruption may occur. ++ */ ++ ptr = xlate_dev_mem_ptr(p); ++ if (!ptr) { ++ if (written) ++ break; ++ return -EFAULT; ++ } + +- copied = copy_from_user(ptr, buf, sz); +- unxlate_dev_mem_ptr(p, ptr); +- if (copied) { +- written += sz - copied; +- if (written) +- break; +- return -EFAULT; ++ copied = copy_from_user(ptr, buf, sz); ++ unxlate_dev_mem_ptr(p, ptr); ++ if (copied) { ++ written += sz - copied; ++ if (written) ++ break; ++ return -EFAULT; ++ } + } + + buf += sz; +diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c +index 090183f812be..31e8ae916ba0 100644 +--- a/drivers/char/virtio_console.c ++++ b/drivers/char/virtio_console.c +@@ -1130,6 +1130,8 @@ static int put_chars(u32 vtermno, const char *buf, int count) + { + struct port *port; + struct scatterlist sg[1]; ++ void *data; ++ int ret; + + if (unlikely(early_put_chars)) + return early_put_chars(vtermno, buf, count); +@@ -1138,8 +1140,14 @@ static int put_chars(u32 vtermno, const char *buf, int count) + if (!port) + return -EPIPE; + +- sg_init_one(sg, buf, count); +- return __send_to_port(port, sg, 1, count, (void *)buf, false); ++ data = kmemdup(buf, count, GFP_ATOMIC); ++ if (!data) ++ return -ENOMEM; ++ ++ sg_init_one(sg, data, count); ++ ret = __send_to_port(port, sg, 1, count, data, false); ++ kfree(data); ++ return ret; + } + + /* +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +index ece9f4102c0e..7f8acb3ebfcd 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +@@ -714,7 +714,7 @@ nv4a_chipset = { + .i2c = nv04_i2c_new, + .imem = nv40_instmem_new, + .mc = nv44_mc_new, +- .mmu = nv44_mmu_new, ++ .mmu = nv04_mmu_new, + .pci = nv40_pci_new, + .therm = nv40_therm_new, + .timer = nv41_timer_new, +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c +index d4d8942b1347..e55f8302d08a 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c +@@ -198,7 +198,7 @@ nv31_mpeg_intr(struct nvkm_engine *engine) + } + + if (type == 0x00000010) { +- if (!nv31_mpeg_mthd(mpeg, mthd, data)) ++ if (nv31_mpeg_mthd(mpeg, mthd, data)) + show &= ~0x01000000; + } + } +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c +index d433cfa4a8ab..36af0a8927fc 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c +@@ -172,7 +172,7 @@ nv44_mpeg_intr(struct nvkm_engine *engine) + } + + if (type == 0x00000010) { +- if (!nv44_mpeg_mthd(subdev->device, mthd, data)) ++ if (nv44_mpeg_mthd(subdev->device, mthd, data)) + show &= ~0x01000000; + } + } +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c +index 16f000a76de5..3258baf3282e 100644 +--- a/drivers/input/joystick/xpad.c ++++ b/drivers/input/joystick/xpad.c +@@ -189,6 +189,7 @@ static const struct xpad_device { + { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, + { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, + { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 }, ++ { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE }, + { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 }, + { 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, + { 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 }, +@@ -310,6 +311,7 @@ static struct usb_device_id xpad_table[] = { + XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ + XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */ + XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */ ++ XPAD_XBOXONE_VENDOR(0x1532), /* Razer Wildcat */ + XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */ + XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */ + { } +diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c +index 15af9a9753e5..2d203b422129 100644 +--- a/drivers/irqchip/irq-imx-gpcv2.c ++++ b/drivers/irqchip/irq-imx-gpcv2.c +@@ -230,6 +230,8 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node, + return -ENOMEM; + } + ++ raw_spin_lock_init(&cd->rlock); ++ + cd->gpc_base = of_iomap(node, 0); + if (!cd->gpc_base) { + pr_err("fsl-gpcv2: unable to map gpc registers\n"); +diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c +index f5df9eaba04f..9757f35cd5f5 100644 +--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c ++++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c +@@ -1010,8 +1010,8 @@ EXPORT_SYMBOL(dvb_usbv2_probe); + void dvb_usbv2_disconnect(struct usb_interface *intf) + { + struct dvb_usb_device *d = usb_get_intfdata(intf); +- const char *name = d->name; +- struct device dev = d->udev->dev; ++ const char *devname = kstrdup(dev_name(&d->udev->dev), GFP_KERNEL); ++ const char *drvname = d->name; + + dev_dbg(&d->udev->dev, "%s: bInterfaceNumber=%d\n", __func__, + intf->cur_altsetting->desc.bInterfaceNumber); +@@ -1021,8 +1021,9 @@ void dvb_usbv2_disconnect(struct usb_interface *intf) + + dvb_usbv2_exit(d); + +- dev_info(&dev, "%s: '%s' successfully deinitialized and disconnected\n", +- KBUILD_MODNAME, name); ++ pr_info("%s: '%s:%s' successfully deinitialized and disconnected\n", ++ KBUILD_MODNAME, drvname, devname); ++ kfree(devname); + } + EXPORT_SYMBOL(dvb_usbv2_disconnect); + +diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c +index 733a7ff7b207..caad3b5c01ad 100644 +--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c ++++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c +@@ -35,42 +35,51 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le + + int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type) + { +- struct hexline hx; +- u8 reset; +- int ret,pos=0; ++ struct hexline *hx; ++ u8 *buf; ++ int ret, pos = 0; ++ u16 cpu_cs_register = cypress[type].cpu_cs_register; ++ ++ buf = kmalloc(sizeof(*hx), GFP_KERNEL); ++ if (!buf) ++ return -ENOMEM; ++ hx = (struct hexline *)buf; + + /* stop the CPU */ +- reset = 1; +- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1) ++ buf[0] = 1; ++ if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1) + err("could not stop the USB controller CPU."); + +- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) { +- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk); +- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len); ++ while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) { ++ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n", hx->addr, hx->len, hx->chk); ++ ret = usb_cypress_writemem(udev, hx->addr, hx->data, hx->len); + +- if (ret != hx.len) { ++ if (ret != hx->len) { + err("error while transferring firmware " + "(transferred size: %d, block size: %d)", +- ret,hx.len); ++ ret, hx->len); + ret = -EINVAL; + break; + } + } + if (ret < 0) { + err("firmware download failed at %d with %d",pos,ret); ++ kfree(buf); + return ret; + } + + if (ret == 0) { + /* restart the CPU */ +- reset = 0; +- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) { ++ buf[0] = 0; ++ if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1) { + err("could not restart the USB controller CPU."); + ret = -EINVAL; + } + } else + ret = -EIO; + ++ kfree(buf); ++ + return ret; + } + EXPORT_SYMBOL(usb_cypress_load_firmware); +diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c +index 855c43d8f7e0..f9e4988ea30e 100644 +--- a/drivers/net/ethernet/ibm/ibmveth.c ++++ b/drivers/net/ethernet/ibm/ibmveth.c +@@ -1179,7 +1179,9 @@ map_failed: + + static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt) + { ++ struct tcphdr *tcph; + int offset = 0; ++ int hdr_len; + + /* only TCP packets will be aggregated */ + if (skb->protocol == htons(ETH_P_IP)) { +@@ -1206,14 +1208,20 @@ static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt) + /* if mss is not set through Large Packet bit/mss in rx buffer, + * expect that the mss will be written to the tcp header checksum. + */ ++ tcph = (struct tcphdr *)(skb->data + offset); + if (lrg_pkt) { + skb_shinfo(skb)->gso_size = mss; + } else if (offset) { +- struct tcphdr *tcph = (struct tcphdr *)(skb->data + offset); +- + skb_shinfo(skb)->gso_size = ntohs(tcph->check); + tcph->check = 0; + } ++ ++ if (skb_shinfo(skb)->gso_size) { ++ hdr_len = offset + tcph->doff * 4; ++ skb_shinfo(skb)->gso_segs = ++ DIV_ROUND_UP(skb->len - hdr_len, ++ skb_shinfo(skb)->gso_size); ++ } + } + + static int ibmveth_poll(struct napi_struct *napi, int budget) +diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c +index 4e2b26a88b15..2aa1a1d29cb4 100644 +--- a/drivers/net/usb/catc.c ++++ b/drivers/net/usb/catc.c +@@ -777,7 +777,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id + struct net_device *netdev; + struct catc *catc; + u8 broadcast[ETH_ALEN]; +- int i, pktsz; ++ int pktsz, ret; + + if (usb_set_interface(usbdev, + intf->altsetting->desc.bInterfaceNumber, 1)) { +@@ -812,12 +812,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id + if ((!catc->ctrl_urb) || (!catc->tx_urb) || + (!catc->rx_urb) || (!catc->irq_urb)) { + dev_err(&intf->dev, "No free urbs available.\n"); +- usb_free_urb(catc->ctrl_urb); +- usb_free_urb(catc->tx_urb); +- usb_free_urb(catc->rx_urb); +- usb_free_urb(catc->irq_urb); +- free_netdev(netdev); +- return -ENOMEM; ++ ret = -ENOMEM; ++ goto fail_free; + } + + /* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */ +@@ -845,15 +841,24 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id + catc->irq_buf, 2, catc_irq_done, catc, 1); + + if (!catc->is_f5u011) { ++ u32 *buf; ++ int i; ++ + dev_dbg(dev, "Checking memory size\n"); + +- i = 0x12345678; +- catc_write_mem(catc, 0x7a80, &i, 4); +- i = 0x87654321; +- catc_write_mem(catc, 0xfa80, &i, 4); +- catc_read_mem(catc, 0x7a80, &i, 4); ++ buf = kmalloc(4, GFP_KERNEL); ++ if (!buf) { ++ ret = -ENOMEM; ++ goto fail_free; ++ } ++ ++ *buf = 0x12345678; ++ catc_write_mem(catc, 0x7a80, buf, 4); ++ *buf = 0x87654321; ++ catc_write_mem(catc, 0xfa80, buf, 4); ++ catc_read_mem(catc, 0x7a80, buf, 4); + +- switch (i) { ++ switch (*buf) { + case 0x12345678: + catc_set_reg(catc, TxBufCount, 8); + catc_set_reg(catc, RxBufCount, 32); +@@ -868,6 +873,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id + dev_dbg(dev, "32k Memory\n"); + break; + } ++ ++ kfree(buf); + + dev_dbg(dev, "Getting MAC from SEEROM.\n"); + +@@ -914,16 +921,21 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id + usb_set_intfdata(intf, catc); + + SET_NETDEV_DEV(netdev, &intf->dev); +- if (register_netdev(netdev) != 0) { +- usb_set_intfdata(intf, NULL); +- usb_free_urb(catc->ctrl_urb); +- usb_free_urb(catc->tx_urb); +- usb_free_urb(catc->rx_urb); +- usb_free_urb(catc->irq_urb); +- free_netdev(netdev); +- return -EIO; +- } ++ ret = register_netdev(netdev); ++ if (ret) ++ goto fail_clear_intfdata; ++ + return 0; ++ ++fail_clear_intfdata: ++ usb_set_intfdata(intf, NULL); ++fail_free: ++ usb_free_urb(catc->ctrl_urb); ++ usb_free_urb(catc->tx_urb); ++ usb_free_urb(catc->rx_urb); ++ usb_free_urb(catc->irq_urb); ++ free_netdev(netdev); ++ return ret; + } + + static void catc_disconnect(struct usb_interface *intf) +diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c +index f84080215915..17fac0121e56 100644 +--- a/drivers/net/usb/pegasus.c ++++ b/drivers/net/usb/pegasus.c +@@ -126,40 +126,61 @@ static void async_ctrl_callback(struct urb *urb) + + static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data) + { ++ u8 *buf; + int ret; + ++ buf = kmalloc(size, GFP_NOIO); ++ if (!buf) ++ return -ENOMEM; ++ + ret = usb_control_msg(pegasus->usb, usb_rcvctrlpipe(pegasus->usb, 0), + PEGASUS_REQ_GET_REGS, PEGASUS_REQT_READ, 0, +- indx, data, size, 1000); ++ indx, buf, size, 1000); + if (ret < 0) + netif_dbg(pegasus, drv, pegasus->net, + "%s returned %d\n", __func__, ret); ++ else if (ret <= size) ++ memcpy(data, buf, ret); ++ kfree(buf); + return ret; + } + +-static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data) ++static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, ++ const void *data) + { ++ u8 *buf; + int ret; + ++ buf = kmemdup(data, size, GFP_NOIO); ++ if (!buf) ++ return -ENOMEM; ++ + ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0), + PEGASUS_REQ_SET_REGS, PEGASUS_REQT_WRITE, 0, +- indx, data, size, 100); ++ indx, buf, size, 100); + if (ret < 0) + netif_dbg(pegasus, drv, pegasus->net, + "%s returned %d\n", __func__, ret); ++ kfree(buf); + return ret; + } + + static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data) + { ++ u8 *buf; + int ret; + ++ buf = kmemdup(&data, 1, GFP_NOIO); ++ if (!buf) ++ return -ENOMEM; ++ + ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0), + PEGASUS_REQ_SET_REG, PEGASUS_REQT_WRITE, data, +- indx, &data, 1, 1000); ++ indx, buf, 1, 1000); + if (ret < 0) + netif_dbg(pegasus, drv, pegasus->net, + "%s returned %d\n", __func__, ret); ++ kfree(buf); + return ret; + } + +diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c +index d37b7dce2d40..39672984dde1 100644 +--- a/drivers/net/usb/rtl8150.c ++++ b/drivers/net/usb/rtl8150.c +@@ -155,16 +155,36 @@ static const char driver_name [] = "rtl8150"; + */ + static int get_registers(rtl8150_t * dev, u16 indx, u16 size, void *data) + { +- return usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), +- RTL8150_REQ_GET_REGS, RTL8150_REQT_READ, +- indx, 0, data, size, 500); ++ void *buf; ++ int ret; ++ ++ buf = kmalloc(size, GFP_NOIO); ++ if (!buf) ++ return -ENOMEM; ++ ++ ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), ++ RTL8150_REQ_GET_REGS, RTL8150_REQT_READ, ++ indx, 0, buf, size, 500); ++ if (ret > 0 && ret <= size) ++ memcpy(data, buf, ret); ++ kfree(buf); ++ return ret; + } + +-static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data) ++static int set_registers(rtl8150_t * dev, u16 indx, u16 size, const void *data) + { +- return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), +- RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE, +- indx, 0, data, size, 500); ++ void *buf; ++ int ret; ++ ++ buf = kmemdup(data, size, GFP_NOIO); ++ if (!buf) ++ return -ENOMEM; ++ ++ ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), ++ RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE, ++ indx, 0, buf, size, 500); ++ kfree(buf); ++ return ret; + } + + static void async_set_reg_cb(struct urb *urb) +diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c +index a8762711ad74..03945731eb65 100644 +--- a/drivers/net/wireless/ath/ath9k/common-spectral.c ++++ b/drivers/net/wireless/ath/ath9k/common-spectral.c +@@ -528,6 +528,9 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h + if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK)) + return 0; + ++ if (!spec_priv->rfs_chan_spec_scan) ++ return 1; ++ + /* Output buffers are full, no need to process anything + * since there is no space to put the result anyway + */ +@@ -1072,7 +1075,7 @@ static struct rchan_callbacks rfs_spec_scan_cb = { + + void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv) + { +- if (config_enabled(CONFIG_ATH9K_DEBUGFS)) { ++ if (config_enabled(CONFIG_ATH9K_DEBUGFS) && spec_priv->rfs_chan_spec_scan) { + relay_close(spec_priv->rfs_chan_spec_scan); + spec_priv->rfs_chan_spec_scan = NULL; + } +@@ -1086,6 +1089,9 @@ void ath9k_cmn_spectral_init_debug(struct ath_spec_scan_priv *spec_priv, + debugfs_phy, + 1024, 256, &rfs_spec_scan_cb, + NULL); ++ if (!spec_priv->rfs_chan_spec_scan) ++ return; ++ + debugfs_create_file("spectral_scan_ctl", + S_IRUSR | S_IWUSR, + debugfs_phy, spec_priv, +diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c +index 5f47356d6942..254b0ee37039 100644 +--- a/drivers/nvdimm/bus.c ++++ b/drivers/nvdimm/bus.c +@@ -590,8 +590,14 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, + rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len); + if (rc < 0) + goto out_unlock; ++ nvdimm_bus_unlock(&nvdimm_bus->dev); ++ + if (copy_to_user(p, buf, buf_len)) + rc = -EFAULT; ++ ++ vfree(buf); ++ return rc; ++ + out_unlock: + nvdimm_bus_unlock(&nvdimm_bus->dev); + out: +diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c +index 1062fa42ff26..b2cdc1a1ad4f 100644 +--- a/drivers/platform/x86/acer-wmi.c ++++ b/drivers/platform/x86/acer-wmi.c +@@ -1816,11 +1816,24 @@ static int __init acer_wmi_enable_lm(void) + return status; + } + ++#define ACER_WMID_ACCEL_HID "BST0001" ++ + static acpi_status __init acer_wmi_get_handle_cb(acpi_handle ah, u32 level, + void *ctx, void **retval) + { ++ struct acpi_device *dev; ++ ++ if (!strcmp(ctx, "SENR")) { ++ if (acpi_bus_get_device(ah, &dev)) ++ return AE_OK; ++ if (!strcmp(ACER_WMID_ACCEL_HID, acpi_device_hid(dev))) ++ return AE_OK; ++ } else ++ return AE_OK; ++ + *(acpi_handle *)retval = ah; +- return AE_OK; ++ ++ return AE_CTRL_TERMINATE; + } + + static int __init acer_wmi_get_handle(const char *name, const char *prop, +@@ -1847,7 +1860,7 @@ static int __init acer_wmi_accel_setup(void) + { + int err; + +- err = acer_wmi_get_handle("SENR", "BST0001", &gsensor_handle); ++ err = acer_wmi_get_handle("SENR", ACER_WMID_ACCEL_HID, &gsensor_handle); + if (err) + return err; + +@@ -2185,10 +2198,11 @@ static int __init acer_wmi_init(void) + err = acer_wmi_input_setup(); + if (err) + return err; ++ err = acer_wmi_accel_setup(); ++ if (err) ++ return err; + } + +- acer_wmi_accel_setup(); +- + err = platform_driver_register(&acer_platform_driver); + if (err) { + pr_err("Unable to register platform driver\n"); +diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c +index 60232bd366ef..71216aa68905 100644 +--- a/drivers/rtc/rtc-tegra.c ++++ b/drivers/rtc/rtc-tegra.c +@@ -18,6 +18,7 @@ + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + #include ++#include + #include + #include + #include +@@ -59,6 +60,7 @@ struct tegra_rtc_info { + struct platform_device *pdev; + struct rtc_device *rtc_dev; + void __iomem *rtc_base; /* NULL if not initialized. */ ++ struct clk *clk; + int tegra_rtc_irq; /* alarm and periodic irq */ + spinlock_t tegra_rtc_lock; + }; +@@ -332,6 +334,14 @@ static int __init tegra_rtc_probe(struct platform_device *pdev) + if (info->tegra_rtc_irq <= 0) + return -EBUSY; + ++ info->clk = devm_clk_get(&pdev->dev, NULL); ++ if (IS_ERR(info->clk)) ++ return PTR_ERR(info->clk); ++ ++ ret = clk_prepare_enable(info->clk); ++ if (ret < 0) ++ return ret; ++ + /* set context info. */ + info->pdev = pdev; + spin_lock_init(&info->tegra_rtc_lock); +@@ -352,7 +362,7 @@ static int __init tegra_rtc_probe(struct platform_device *pdev) + ret = PTR_ERR(info->rtc_dev); + dev_err(&pdev->dev, "Unable to register device (err=%d).\n", + ret); +- return ret; ++ goto disable_clk; + } + + ret = devm_request_irq(&pdev->dev, info->tegra_rtc_irq, +@@ -362,12 +372,25 @@ static int __init tegra_rtc_probe(struct platform_device *pdev) + dev_err(&pdev->dev, + "Unable to request interrupt for device (err=%d).\n", + ret); +- return ret; ++ goto disable_clk; + } + + dev_notice(&pdev->dev, "Tegra internal Real Time Clock\n"); + + return 0; ++ ++disable_clk: ++ clk_disable_unprepare(info->clk); ++ return ret; ++} ++ ++static int tegra_rtc_remove(struct platform_device *pdev) ++{ ++ struct tegra_rtc_info *info = platform_get_drvdata(pdev); ++ ++ clk_disable_unprepare(info->clk); ++ ++ return 0; + } + + #ifdef CONFIG_PM_SLEEP +@@ -419,6 +442,7 @@ static void tegra_rtc_shutdown(struct platform_device *pdev) + + MODULE_ALIAS("platform:tegra_rtc"); + static struct platform_driver tegra_rtc_driver = { ++ .remove = tegra_rtc_remove, + .shutdown = tegra_rtc_shutdown, + .driver = { + .name = "tegra_rtc", +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c +index 78430ef28ea4..4d5207dff960 100644 +--- a/drivers/scsi/sd.c ++++ b/drivers/scsi/sd.c +@@ -2051,6 +2051,22 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp, + + #define READ_CAPACITY_RETRIES_ON_RESET 10 + ++/* ++ * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set ++ * and the reported logical block size is bigger than 512 bytes. Note ++ * that last_sector is a u64 and therefore logical_to_sectors() is not ++ * applicable. ++ */ ++static bool sd_addressable_capacity(u64 lba, unsigned int sector_size) ++{ ++ u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9); ++ ++ if (sizeof(sector_t) == 4 && last_sector > U32_MAX) ++ return false; ++ ++ return true; ++} ++ + static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, + unsigned char *buffer) + { +@@ -2116,7 +2132,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, + return -ENODEV; + } + +- if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) { ++ if (!sd_addressable_capacity(lba, sector_size)) { + sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a " + "kernel compiled with support for large block " + "devices.\n"); +@@ -2202,7 +2218,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp, + return sector_size; + } + +- if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) { ++ if (!sd_addressable_capacity(lba, sector_size)) { + sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a " + "kernel compiled with support for large block " + "devices.\n"); +@@ -2888,7 +2904,8 @@ static int sd_revalidate_disk(struct gendisk *disk) + q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); + rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); + } else +- rw_max = BLK_DEF_MAX_SECTORS; ++ rw_max = min_not_zero(logical_to_sectors(sdp, dev_max), ++ (sector_t)BLK_DEF_MAX_SECTORS); + + /* Combine with controller limits */ + q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q)); +diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c +index 64c867405ad4..804586aeaffe 100644 +--- a/drivers/scsi/sr.c ++++ b/drivers/scsi/sr.c +@@ -834,6 +834,7 @@ static void get_capabilities(struct scsi_cd *cd) + unsigned char *buffer; + struct scsi_mode_data data; + struct scsi_sense_hdr sshdr; ++ unsigned int ms_len = 128; + int rc, n; + + static const char *loadmech[] = +@@ -860,10 +861,11 @@ static void get_capabilities(struct scsi_cd *cd) + scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr); + + /* ask for mode page 0x2a */ +- rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128, ++ rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len, + SR_TIMEOUT, 3, &data, NULL); + +- if (!scsi_status_is_good(rc)) { ++ if (!scsi_status_is_good(rc) || data.length > ms_len || ++ data.header_length + data.block_descriptor_length > data.length) { + /* failed, drive doesn't have capabilities mode page */ + cd->cdi.speed = 1; + cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R | +diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c +index 2cbea2af7cd0..6d1b0acbc5b3 100644 +--- a/drivers/target/iscsi/iscsi_target_parameters.c ++++ b/drivers/target/iscsi/iscsi_target_parameters.c +@@ -781,22 +781,6 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param) + if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) + SET_PSTATE_REPLY_OPTIONAL(param); + /* +- * The GlobalSAN iSCSI Initiator for MacOSX does +- * not respond to MaxBurstLength, FirstBurstLength, +- * DefaultTime2Wait or DefaultTime2Retain parameter keys. +- * So, we set them to 'reply optional' here, and assume the +- * the defaults from iscsi_parameters.h if the initiator +- * is not RFC compliant and the keys are not negotiated. +- */ +- if (!strcmp(param->name, MAXBURSTLENGTH)) +- SET_PSTATE_REPLY_OPTIONAL(param); +- if (!strcmp(param->name, FIRSTBURSTLENGTH)) +- SET_PSTATE_REPLY_OPTIONAL(param); +- if (!strcmp(param->name, DEFAULTTIME2WAIT)) +- SET_PSTATE_REPLY_OPTIONAL(param); +- if (!strcmp(param->name, DEFAULTTIME2RETAIN)) +- SET_PSTATE_REPLY_OPTIONAL(param); +- /* + * Required for gPXE iSCSI boot client + */ + if (!strcmp(param->name, MAXCONNECTIONS)) +diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c +index 428b0d9e3dba..93590521ae33 100644 +--- a/drivers/target/iscsi/iscsi_target_util.c ++++ b/drivers/target/iscsi/iscsi_target_util.c +@@ -731,21 +731,23 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) + { + struct se_cmd *se_cmd = NULL; + int rc; ++ bool op_scsi = false; + /* + * Determine if a struct se_cmd is associated with + * this struct iscsi_cmd. + */ + switch (cmd->iscsi_opcode) { + case ISCSI_OP_SCSI_CMD: +- se_cmd = &cmd->se_cmd; +- __iscsit_free_cmd(cmd, true, shutdown); ++ op_scsi = true; + /* + * Fallthrough + */ + case ISCSI_OP_SCSI_TMFUNC: +- rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown); +- if (!rc && shutdown && se_cmd && se_cmd->se_sess) { +- __iscsit_free_cmd(cmd, true, shutdown); ++ se_cmd = &cmd->se_cmd; ++ __iscsit_free_cmd(cmd, op_scsi, shutdown); ++ rc = transport_generic_free_cmd(se_cmd, shutdown); ++ if (!rc && shutdown && se_cmd->se_sess) { ++ __iscsit_free_cmd(cmd, op_scsi, shutdown); + target_put_sess_cmd(se_cmd); + } + break; +diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c +index a15070a7fcd6..53e4d5056db7 100644 +--- a/drivers/tty/serial/atmel_serial.c ++++ b/drivers/tty/serial/atmel_serial.c +@@ -810,6 +810,11 @@ static void atmel_complete_tx_dma(void *arg) + */ + if (!uart_circ_empty(xmit)) + tasklet_schedule(&atmel_port->tasklet); ++ else if ((port->rs485.flags & SER_RS485_ENABLED) && ++ !(port->rs485.flags & SER_RS485_RX_DURING_TX)) { ++ /* DMA done, stop TX, start RX for RS485 */ ++ atmel_start_rx(port); ++ } + + spin_unlock_irqrestore(&port->lock, flags); + } +@@ -912,12 +917,6 @@ static void atmel_tx_dma(struct uart_port *port) + desc->callback = atmel_complete_tx_dma; + desc->callback_param = atmel_port; + atmel_port->cookie_tx = dmaengine_submit(desc); +- +- } else { +- if (port->rs485.flags & SER_RS485_ENABLED) { +- /* DMA done, stop TX, start RX for RS485 */ +- atmel_start_rx(port); +- } + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) +diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c +index 0567d517eed3..ea2f19f5fbde 100644 +--- a/drivers/video/fbdev/xen-fbfront.c ++++ b/drivers/video/fbdev/xen-fbfront.c +@@ -644,7 +644,6 @@ static void xenfb_backend_changed(struct xenbus_device *dev, + break; + + case XenbusStateInitWait: +-InitWait: + xenbus_switch_state(dev, XenbusStateConnected); + break; + +@@ -655,7 +654,8 @@ InitWait: + * get Connected twice here. + */ + if (dev->state != XenbusStateConnected) +- goto InitWait; /* no InitWait seen yet, fudge it */ ++ /* no InitWait seen yet, fudge it */ ++ xenbus_switch_state(dev, XenbusStateConnected); + + if (xenbus_scanf(XBT_NIL, info->xbdev->otherend, + "request-update", "%d", &val) < 0) +diff --git a/fs/cifs/file.c b/fs/cifs/file.c +index 72f270d4bd17..a0c0a49b6620 100644 +--- a/fs/cifs/file.c ++++ b/fs/cifs/file.c +@@ -2545,7 +2545,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from, + wdata->credits = credits; + + if (!wdata->cfile->invalidHandle || +- !cifs_reopen_file(wdata->cfile, false)) ++ !(rc = cifs_reopen_file(wdata->cfile, false))) + rc = server->ops->async_writev(wdata, + cifs_uncached_writedata_release); + if (rc) { +@@ -2958,7 +2958,7 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file, + rdata->credits = credits; + + if (!rdata->cfile->invalidHandle || +- !cifs_reopen_file(rdata->cfile, true)) ++ !(rc = cifs_reopen_file(rdata->cfile, true))) + rc = server->ops->async_readv(rdata); + error: + if (rc) { +@@ -3544,7 +3544,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, + } + + if (!rdata->cfile->invalidHandle || +- !cifs_reopen_file(rdata->cfile, true)) ++ !(rc = cifs_reopen_file(rdata->cfile, true))) + rc = server->ops->async_readv(rdata); + if (rc) { + add_credits_and_wake_if(server, rdata->credits, 0); +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index 7dcc97eadb12..817a937de733 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -71,10 +71,9 @@ static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw, + csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, + csum_size); + offset += csum_size; +- csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, +- EXT4_INODE_SIZE(inode->i_sb) - +- offset); + } ++ csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, ++ EXT4_INODE_SIZE(inode->i_sb) - offset); + } + + return csum; +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c +index d598b9c809c1..db1a1427c27a 100644 +--- a/fs/proc/task_mmu.c ++++ b/fs/proc/task_mmu.c +@@ -803,7 +803,14 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma, + static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp) + { +- pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp); ++ pmd_t pmd = *pmdp; ++ ++ /* See comment in change_huge_pmd() */ ++ pmdp_invalidate(vma, addr, pmdp); ++ if (pmd_dirty(*pmdp)) ++ pmd = pmd_mkdirty(pmd); ++ if (pmd_young(*pmdp)) ++ pmd = pmd_mkyoung(pmd); + + pmd = pmd_wrprotect(pmd); + pmd = pmd_clear_soft_dirty(pmd); +diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h +index 3b4af1d7c7e9..a25414ce2898 100644 +--- a/include/crypto/internal/hash.h ++++ b/include/crypto/internal/hash.h +@@ -173,6 +173,16 @@ static inline struct ahash_instance *ahash_alloc_instance( + return crypto_alloc_instance2(name, alg, ahash_instance_headroom()); + } + ++static inline void ahash_request_complete(struct ahash_request *req, int err) ++{ ++ req->base.complete(&req->base, err); ++} ++ ++static inline u32 ahash_request_flags(struct ahash_request *req) ++{ ++ return req->base.flags; ++} ++ + static inline struct crypto_ahash *crypto_spawn_ahash( + struct crypto_ahash_spawn *spawn) + { +diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h +index cb91b44f5f78..ad2bcf647b9a 100644 +--- a/include/linux/cgroup.h ++++ b/include/linux/cgroup.h +@@ -528,6 +528,25 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp) + pr_cont_kernfs_path(cgrp->kn); + } + ++static inline void cgroup_init_kthreadd(void) ++{ ++ /* ++ * kthreadd is inherited by all kthreads, keep it in the root so ++ * that the new kthreads are guaranteed to stay in the root until ++ * initialization is finished. ++ */ ++ current->no_cgroup_migration = 1; ++} ++ ++static inline void cgroup_kthread_ready(void) ++{ ++ /* ++ * This kthread finished initialization. The creator should have ++ * set PF_NO_SETAFFINITY if this kthread should stay in the root. ++ */ ++ current->no_cgroup_migration = 0; ++} ++ + #else /* !CONFIG_CGROUPS */ + + struct cgroup_subsys_state; +@@ -551,6 +570,8 @@ static inline void cgroup_free(struct task_struct *p) {} + + static inline int cgroup_init_early(void) { return 0; } + static inline int cgroup_init(void) { return 0; } ++static inline void cgroup_init_kthreadd(void) {} ++static inline void cgroup_kthread_ready(void) {} + + #endif /* !CONFIG_CGROUPS */ + +diff --git a/include/linux/sched.h b/include/linux/sched.h +index ce0f61dcd887..352213b360d7 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1475,6 +1475,10 @@ struct task_struct { + #ifdef CONFIG_COMPAT_BRK + unsigned brk_randomized:1; + #endif ++#ifdef CONFIG_CGROUPS ++ /* disallow userland-initiated cgroup migration */ ++ unsigned no_cgroup_migration:1; ++#endif + + unsigned long atomic_flags; /* Flags needing atomic access. */ + +diff --git a/kernel/cgroup.c b/kernel/cgroup.c +index 127c63e02d52..4cb94b678e9f 100644 +--- a/kernel/cgroup.c ++++ b/kernel/cgroup.c +@@ -2752,11 +2752,12 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf, + tsk = tsk->group_leader; + + /* +- * Workqueue threads may acquire PF_NO_SETAFFINITY and become +- * trapped in a cpuset, or RT worker may be born in a cgroup +- * with no rt_runtime allocated. Just say no. ++ * kthreads may acquire PF_NO_SETAFFINITY during initialization. ++ * If userland migrates such a kthread to a non-root cgroup, it can ++ * become trapped in a cpuset, or RT kthread may be born in a ++ * cgroup with no rt_runtime allocated. Just say no. + */ +- if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) { ++ if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) { + ret = -EINVAL; + goto out_unlock_rcu; + } +diff --git a/kernel/kthread.c b/kernel/kthread.c +index 9ff173dca1ae..850b255649a2 100644 +--- a/kernel/kthread.c ++++ b/kernel/kthread.c +@@ -18,6 +18,7 @@ + #include + #include + #include ++#include + #include + + static DEFINE_SPINLOCK(kthread_create_lock); +@@ -205,6 +206,7 @@ static int kthread(void *_create) + ret = -EINTR; + + if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) { ++ cgroup_kthread_ready(); + __kthread_parkme(&self); + ret = threadfn(data); + } +@@ -510,6 +512,7 @@ int kthreadd(void *unused) + set_mems_allowed(node_states[N_MEMORY]); + + current->flags |= PF_NOFREEZE; ++ cgroup_init_kthreadd(); + + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c +index 3f743b147247..34b2a0d5cf1a 100644 +--- a/kernel/trace/ftrace.c ++++ b/kernel/trace/ftrace.c +@@ -3677,23 +3677,24 @@ static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash) + ftrace_probe_registered = 1; + } + +-static void __disable_ftrace_function_probe(void) ++static bool __disable_ftrace_function_probe(void) + { + int i; + + if (!ftrace_probe_registered) +- return; ++ return false; + + for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { + struct hlist_head *hhd = &ftrace_func_hash[i]; + if (hhd->first) +- return; ++ return false; + } + + /* no more funcs left */ + ftrace_shutdown(&trace_probe_ops, 0); + + ftrace_probe_registered = 0; ++ return true; + } + + +@@ -3820,6 +3821,7 @@ static void + __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, + void *data, int flags) + { ++ struct ftrace_ops_hash old_hash_ops; + struct ftrace_func_entry *rec_entry; + struct ftrace_func_probe *entry; + struct ftrace_func_probe *p; +@@ -3831,6 +3833,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, + struct hlist_node *tmp; + char str[KSYM_SYMBOL_LEN]; + int i, ret; ++ bool disabled; + + if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) + func_g.search = NULL; +@@ -3849,6 +3852,10 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, + + mutex_lock(&trace_probe_ops.func_hash->regex_lock); + ++ old_hash_ops.filter_hash = old_hash; ++ /* Probes only have filters */ ++ old_hash_ops.notrace_hash = NULL; ++ + hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); + if (!hash) + /* Hmm, should report this somehow */ +@@ -3886,12 +3893,17 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, + } + } + mutex_lock(&ftrace_lock); +- __disable_ftrace_function_probe(); ++ disabled = __disable_ftrace_function_probe(); + /* + * Remove after the disable is called. Otherwise, if the last + * probe is removed, a null hash means *all enabled*. + */ + ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); ++ ++ /* still need to update the function call sites */ ++ if (ftrace_enabled && !disabled) ++ ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS, ++ &old_hash_ops); + synchronize_sched(); + if (!ret) + free_ftrace_hash_rcu(old_hash); +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index 36bf4c3fe4f5..9f0aa255e288 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -2084,6 +2084,8 @@ static int ip6_route_del(struct fib6_config *cfg) + continue; + if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric) + continue; ++ if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol) ++ continue; + dst_hold(&rt->dst); + read_unlock_bh(&table->tb6_lock); + +diff --git a/net/sctp/socket.c b/net/sctp/socket.c +index 138f2d667212..5758818435f3 100644 +--- a/net/sctp/socket.c ++++ b/net/sctp/socket.c +@@ -4422,6 +4422,12 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) + if (!asoc) + return -EINVAL; + ++ /* If there is a thread waiting on more sndbuf space for ++ * sending on this asoc, it cannot be peeled. ++ */ ++ if (waitqueue_active(&asoc->wait)) ++ return -EBUSY; ++ + /* An association cannot be branched off from an already peeled-off + * socket, nor is this supported for tcp style sockets. + */ +@@ -6960,8 +6966,6 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, + */ + release_sock(sk); + current_timeo = schedule_timeout(current_timeo); +- if (sk != asoc->base.sk) +- goto do_error; + lock_sock(sk); + + *timeo_p = current_timeo; +diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c +index 06095cc8815e..1f0687d8e3d7 100644 +--- a/net/sunrpc/auth_gss/auth_gss.c ++++ b/net/sunrpc/auth_gss/auth_gss.c +@@ -541,9 +541,13 @@ gss_setup_upcall(struct gss_auth *gss_auth, struct rpc_cred *cred) + return gss_new; + gss_msg = gss_add_msg(gss_new); + if (gss_msg == gss_new) { +- int res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg); ++ int res; ++ atomic_inc(&gss_msg->count); ++ res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg); + if (res) { + gss_unhash_msg(gss_new); ++ atomic_dec(&gss_msg->count); ++ gss_release_msg(gss_new); + gss_msg = ERR_PTR(res); + } + } else +@@ -836,6 +840,7 @@ gss_pipe_destroy_msg(struct rpc_pipe_msg *msg) + warn_gssd(); + gss_release_msg(gss_msg); + } ++ gss_release_msg(gss_msg); + } + + static void gss_pipe_dentry_destroy(struct dentry *dir, diff --git a/patch/kernel/mvebu64-default/03-patch-4.4.63-64.patch b/patch/kernel/mvebu64-default/03-patch-4.4.63-64.patch new file mode 100644 index 000000000..9d503a345 --- /dev/null +++ b/patch/kernel/mvebu64-default/03-patch-4.4.63-64.patch @@ -0,0 +1,1016 @@ +diff --git a/Makefile b/Makefile +index ec52973043f6..17708f5dc169 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 63 ++SUBLEVEL = 64 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c +index f91ee2f27b41..01cf10556081 100644 +--- a/arch/arm/kvm/mmu.c ++++ b/arch/arm/kvm/mmu.c +@@ -300,6 +300,14 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp, + next = kvm_pgd_addr_end(addr, end); + if (!pgd_none(*pgd)) + unmap_puds(kvm, pgd, addr, next); ++ /* ++ * If we are dealing with a large range in ++ * stage2 table, release the kvm->mmu_lock ++ * to prevent starvation and lockup detector ++ * warnings. ++ */ ++ if (kvm && (next != end)) ++ cond_resched_lock(&kvm->mmu_lock); + } while (pgd++, addr = next, addr != end); + } + +@@ -738,6 +746,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm) + */ + static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) + { ++ assert_spin_locked(&kvm->mmu_lock); + unmap_range(kvm, kvm->arch.pgd, start, size); + } + +@@ -824,7 +833,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm) + if (kvm->arch.pgd == NULL) + return; + ++ spin_lock(&kvm->mmu_lock); + unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); ++ spin_unlock(&kvm->mmu_lock); ++ + kvm_free_hwpgd(kvm_get_hwpgd(kvm)); + if (KVM_PREALLOC_LEVEL > 0) + kfree(kvm->arch.pgd); +diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S +index edba294620db..f6fd0332c3a2 100644 +--- a/arch/powerpc/kernel/entry_64.S ++++ b/arch/powerpc/kernel/entry_64.S +@@ -716,7 +716,7 @@ resume_kernel: + + addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */ + +- lwz r3,GPR1(r1) ++ ld r3,GPR1(r1) + subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */ + mr r4,r1 /* src: current exception frame */ + mr r1,r3 /* Reroute the trampoline frame to r1 */ +@@ -730,8 +730,8 @@ resume_kernel: + addi r6,r6,8 + bdnz 2b + +- /* Do real store operation to complete stwu */ +- lwz r5,GPR1(r1) ++ /* Do real store operation to complete stdu */ ++ ld r5,GPR1(r1) + std r8,0(r5) + + /* Clear _TIF_EMULATE_STACK_STORE flag */ +diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h +index 024f85f947ae..e2c0e4eab037 100644 +--- a/arch/s390/include/asm/pgtable.h ++++ b/arch/s390/include/asm/pgtable.h +@@ -829,6 +829,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, + { + pgste_t pgste; + ++ if (pte_present(entry)) ++ pte_val(entry) &= ~_PAGE_UNUSED; + if (mm_has_pgste(mm)) { + pgste = pgste_get_lock(ptep); + pgste_val(pgste) &= ~_PGSTE_GPS_ZERO; +diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h +index d8ce3ec816ab..bd8ce6bcdfc9 100644 +--- a/arch/x86/include/asm/pmem.h ++++ b/arch/x86/include/asm/pmem.h +@@ -72,8 +72,8 @@ static inline void arch_wmb_pmem(void) + * @size: number of bytes to write back + * + * Write back a cache range using the CLWB (cache line write back) +- * instruction. This function requires explicit ordering with an +- * arch_wmb_pmem() call. This API is internal to the x86 PMEM implementation. ++ * instruction. Note that @size is internally rounded up to be cache ++ * line size aligned. + */ + static inline void __arch_wb_cache_pmem(void *vaddr, size_t size) + { +@@ -87,15 +87,6 @@ static inline void __arch_wb_cache_pmem(void *vaddr, size_t size) + clwb(p); + } + +-/* +- * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec +- * iterators, so for other types (bvec & kvec) we must do a cache write-back. +- */ +-static inline bool __iter_needs_pmem_wb(struct iov_iter *i) +-{ +- return iter_is_iovec(i) == false; +-} +- + /** + * arch_copy_from_iter_pmem - copy data from an iterator to PMEM + * @addr: PMEM destination address +@@ -114,8 +105,36 @@ static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes, + /* TODO: skip the write-back by always using non-temporal stores */ + len = copy_from_iter_nocache(vaddr, bytes, i); + +- if (__iter_needs_pmem_wb(i)) +- __arch_wb_cache_pmem(vaddr, bytes); ++ /* ++ * In the iovec case on x86_64 copy_from_iter_nocache() uses ++ * non-temporal stores for the bulk of the transfer, but we need ++ * to manually flush if the transfer is unaligned. A cached ++ * memory copy is used when destination or size is not naturally ++ * aligned. That is: ++ * - Require 8-byte alignment when size is 8 bytes or larger. ++ * - Require 4-byte alignment when size is 4 bytes. ++ * ++ * In the non-iovec case the entire destination needs to be ++ * flushed. ++ */ ++ if (iter_is_iovec(i)) { ++ unsigned long flushed, dest = (unsigned long) addr; ++ ++ if (bytes < 8) { ++ if (!IS_ALIGNED(dest, 4) || (bytes != 4)) ++ __arch_wb_cache_pmem(addr, 1); ++ } else { ++ if (!IS_ALIGNED(dest, 8)) { ++ dest = ALIGN(dest, boot_cpu_data.x86_clflush_size); ++ __arch_wb_cache_pmem(addr, 1); ++ } ++ ++ flushed = dest - (unsigned long) addr; ++ if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8)) ++ __arch_wb_cache_pmem(addr + bytes - 1, 1); ++ } ++ } else ++ __arch_wb_cache_pmem(addr, bytes); + + return len; + } +diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c +index e99b15077e94..62aca448726a 100644 +--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c ++++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c +@@ -53,7 +53,7 @@ static const char * const th_names[] = { + "load_store", + "insn_fetch", + "combined_unit", +- "", ++ "decode_unit", + "northbridge", + "execution_unit", + }; +diff --git a/block/genhd.c b/block/genhd.c +index a5bed6bc869d..3032453a89e6 100644 +--- a/block/genhd.c ++++ b/block/genhd.c +@@ -664,7 +664,6 @@ void del_gendisk(struct gendisk *disk) + + kobject_put(disk->part0.holder_dir); + kobject_put(disk->slave_dir); +- disk->driverfs_dev = NULL; + if (!sysfs_deprecated) + sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk))); + pm_runtime_set_memalloc_noio(disk_to_dev(disk), false); +diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c +index fcd4ce6f78d5..1c2b846c5776 100644 +--- a/drivers/acpi/power.c ++++ b/drivers/acpi/power.c +@@ -200,6 +200,7 @@ static int acpi_power_get_list_state(struct list_head *list, int *state) + return -EINVAL; + + /* The state of the list is 'on' IFF all resources are 'on'. */ ++ cur_state = 0; + list_for_each_entry(entry, list, node) { + struct acpi_power_resource *resource = entry->resource; + acpi_handle handle = resource->device.handle; +diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c +index 1ef37c727572..d037454fe7b8 100644 +--- a/drivers/hv/channel.c ++++ b/drivers/hv/channel.c +@@ -73,7 +73,6 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, + void *in, *out; + unsigned long flags; + int ret, err = 0; +- unsigned long t; + struct page *page; + + spin_lock_irqsave(&newchannel->lock, flags); +@@ -183,11 +182,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, + goto error1; + } + +- t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ); +- if (t == 0) { +- err = -ETIMEDOUT; +- goto error1; +- } ++ wait_for_completion(&open_info->waitevent); + + spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); + list_del(&open_info->msglistentry); +@@ -375,7 +370,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, + struct vmbus_channel_gpadl_header *gpadlmsg; + struct vmbus_channel_gpadl_body *gpadl_body; + struct vmbus_channel_msginfo *msginfo = NULL; +- struct vmbus_channel_msginfo *submsginfo; ++ struct vmbus_channel_msginfo *submsginfo, *tmp; + u32 msgcount; + struct list_head *curr; + u32 next_gpadl_handle; +@@ -437,6 +432,13 @@ cleanup: + list_del(&msginfo->msglistentry); + spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); + ++ if (msgcount > 1) { ++ list_for_each_entry_safe(submsginfo, tmp, &msginfo->submsglist, ++ msglistentry) { ++ kfree(submsginfo); ++ } ++ } ++ + kfree(msginfo); + return ret; + } +diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c +index 4fc2e8836e60..2bbc53025549 100644 +--- a/drivers/hv/connection.c ++++ b/drivers/hv/connection.c +@@ -429,7 +429,7 @@ int vmbus_post_msg(void *buffer, size_t buflen) + union hv_connection_id conn_id; + int ret = 0; + int retries = 0; +- u32 msec = 1; ++ u32 usec = 1; + + conn_id.asu32 = 0; + conn_id.u.id = VMBUS_MESSAGE_CONNECTION_ID; +@@ -462,9 +462,9 @@ int vmbus_post_msg(void *buffer, size_t buflen) + } + + retries++; +- msleep(msec); +- if (msec < 2048) +- msec *= 2; ++ udelay(usec); ++ if (usec < 2048) ++ usec *= 2; + } + return ret; + } +diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c +index ddbf7e7e0d98..8ce1f2e22912 100644 +--- a/drivers/hv/hv.c ++++ b/drivers/hv/hv.c +@@ -305,9 +305,10 @@ void hv_cleanup(bool crash) + + hypercall_msr.as_uint64 = 0; + wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64); +- if (!crash) ++ if (!crash) { + vfree(hv_context.tsc_page); +- hv_context.tsc_page = NULL; ++ hv_context.tsc_page = NULL; ++ } + } + #endif + } +diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c +index 43af91362be5..354da7f207b7 100644 +--- a/drivers/hv/hv_balloon.c ++++ b/drivers/hv/hv_balloon.c +@@ -430,16 +430,27 @@ struct dm_info_msg { + * currently hot added. We hot add in multiples of 128M + * chunks; it is possible that we may not be able to bring + * online all the pages in the region. The range +- * covered_end_pfn defines the pages that can ++ * covered_start_pfn:covered_end_pfn defines the pages that can + * be brough online. + */ + + struct hv_hotadd_state { + struct list_head list; + unsigned long start_pfn; ++ unsigned long covered_start_pfn; + unsigned long covered_end_pfn; + unsigned long ha_end_pfn; + unsigned long end_pfn; ++ /* ++ * A list of gaps. ++ */ ++ struct list_head gap_list; ++}; ++ ++struct hv_hotadd_gap { ++ struct list_head list; ++ unsigned long start_pfn; ++ unsigned long end_pfn; + }; + + struct balloon_state { +@@ -595,18 +606,46 @@ static struct notifier_block hv_memory_nb = { + .priority = 0 + }; + ++/* Check if the particular page is backed and can be onlined and online it. */ ++static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg) ++{ ++ unsigned long cur_start_pgp; ++ unsigned long cur_end_pgp; ++ struct hv_hotadd_gap *gap; ++ ++ cur_start_pgp = (unsigned long)pfn_to_page(has->covered_start_pfn); ++ cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn); ++ ++ /* The page is not backed. */ ++ if (((unsigned long)pg < cur_start_pgp) || ++ ((unsigned long)pg >= cur_end_pgp)) ++ return; ++ ++ /* Check for gaps. */ ++ list_for_each_entry(gap, &has->gap_list, list) { ++ cur_start_pgp = (unsigned long) ++ pfn_to_page(gap->start_pfn); ++ cur_end_pgp = (unsigned long) ++ pfn_to_page(gap->end_pfn); ++ if (((unsigned long)pg >= cur_start_pgp) && ++ ((unsigned long)pg < cur_end_pgp)) { ++ return; ++ } ++ } + +-static void hv_bring_pgs_online(unsigned long start_pfn, unsigned long size) ++ /* This frame is currently backed; online the page. */ ++ __online_page_set_limits(pg); ++ __online_page_increment_counters(pg); ++ __online_page_free(pg); ++} ++ ++static void hv_bring_pgs_online(struct hv_hotadd_state *has, ++ unsigned long start_pfn, unsigned long size) + { + int i; + +- for (i = 0; i < size; i++) { +- struct page *pg; +- pg = pfn_to_page(start_pfn + i); +- __online_page_set_limits(pg); +- __online_page_increment_counters(pg); +- __online_page_free(pg); +- } ++ for (i = 0; i < size; i++) ++ hv_page_online_one(has, pfn_to_page(start_pfn + i)); + } + + static void hv_mem_hot_add(unsigned long start, unsigned long size, +@@ -682,26 +721,25 @@ static void hv_online_page(struct page *pg) + + list_for_each(cur, &dm_device.ha_region_list) { + has = list_entry(cur, struct hv_hotadd_state, list); +- cur_start_pgp = (unsigned long)pfn_to_page(has->start_pfn); +- cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn); ++ cur_start_pgp = (unsigned long) ++ pfn_to_page(has->start_pfn); ++ cur_end_pgp = (unsigned long)pfn_to_page(has->end_pfn); + +- if (((unsigned long)pg >= cur_start_pgp) && +- ((unsigned long)pg < cur_end_pgp)) { +- /* +- * This frame is currently backed; online the +- * page. +- */ +- __online_page_set_limits(pg); +- __online_page_increment_counters(pg); +- __online_page_free(pg); +- } ++ /* The page belongs to a different HAS. */ ++ if (((unsigned long)pg < cur_start_pgp) || ++ ((unsigned long)pg >= cur_end_pgp)) ++ continue; ++ ++ hv_page_online_one(has, pg); ++ break; + } + } + +-static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt) ++static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt) + { + struct list_head *cur; + struct hv_hotadd_state *has; ++ struct hv_hotadd_gap *gap; + unsigned long residual, new_inc; + + if (list_empty(&dm_device.ha_region_list)) +@@ -716,6 +754,24 @@ static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt) + */ + if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn) + continue; ++ ++ /* ++ * If the current start pfn is not where the covered_end ++ * is, create a gap and update covered_end_pfn. ++ */ ++ if (has->covered_end_pfn != start_pfn) { ++ gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC); ++ if (!gap) ++ return -ENOMEM; ++ ++ INIT_LIST_HEAD(&gap->list); ++ gap->start_pfn = has->covered_end_pfn; ++ gap->end_pfn = start_pfn; ++ list_add_tail(&gap->list, &has->gap_list); ++ ++ has->covered_end_pfn = start_pfn; ++ } ++ + /* + * If the current hot add-request extends beyond + * our current limit; extend it. +@@ -732,19 +788,10 @@ static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt) + has->end_pfn += new_inc; + } + +- /* +- * If the current start pfn is not where the covered_end +- * is, update it. +- */ +- +- if (has->covered_end_pfn != start_pfn) +- has->covered_end_pfn = start_pfn; +- +- return true; +- ++ return 1; + } + +- return false; ++ return 0; + } + + static unsigned long handle_pg_range(unsigned long pg_start, +@@ -783,6 +830,8 @@ static unsigned long handle_pg_range(unsigned long pg_start, + if (pgs_ol > pfn_cnt) + pgs_ol = pfn_cnt; + ++ has->covered_end_pfn += pgs_ol; ++ pfn_cnt -= pgs_ol; + /* + * Check if the corresponding memory block is already + * online by checking its last previously backed page. +@@ -791,10 +840,8 @@ static unsigned long handle_pg_range(unsigned long pg_start, + */ + if (start_pfn > has->start_pfn && + !PageReserved(pfn_to_page(start_pfn - 1))) +- hv_bring_pgs_online(start_pfn, pgs_ol); ++ hv_bring_pgs_online(has, start_pfn, pgs_ol); + +- has->covered_end_pfn += pgs_ol; +- pfn_cnt -= pgs_ol; + } + + if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) { +@@ -832,13 +879,19 @@ static unsigned long process_hot_add(unsigned long pg_start, + unsigned long rg_size) + { + struct hv_hotadd_state *ha_region = NULL; ++ int covered; + + if (pfn_cnt == 0) + return 0; + +- if (!dm_device.host_specified_ha_region) +- if (pfn_covered(pg_start, pfn_cnt)) ++ if (!dm_device.host_specified_ha_region) { ++ covered = pfn_covered(pg_start, pfn_cnt); ++ if (covered < 0) ++ return 0; ++ ++ if (covered) + goto do_pg_range; ++ } + + /* + * If the host has specified a hot-add range; deal with it first. +@@ -850,10 +903,12 @@ static unsigned long process_hot_add(unsigned long pg_start, + return 0; + + INIT_LIST_HEAD(&ha_region->list); ++ INIT_LIST_HEAD(&ha_region->gap_list); + + list_add_tail(&ha_region->list, &dm_device.ha_region_list); + ha_region->start_pfn = rg_start; + ha_region->ha_end_pfn = rg_start; ++ ha_region->covered_start_pfn = pg_start; + ha_region->covered_end_pfn = pg_start; + ha_region->end_pfn = rg_start + rg_size; + } +@@ -1581,6 +1636,7 @@ static int balloon_remove(struct hv_device *dev) + struct hv_dynmem_device *dm = hv_get_drvdata(dev); + struct list_head *cur, *tmp; + struct hv_hotadd_state *has; ++ struct hv_hotadd_gap *gap, *tmp_gap; + + if (dm->num_pages_ballooned != 0) + pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned); +@@ -1597,6 +1653,10 @@ static int balloon_remove(struct hv_device *dev) + #endif + list_for_each_safe(cur, tmp, &dm->ha_region_list) { + has = list_entry(cur, struct hv_hotadd_state, list); ++ list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) { ++ list_del(&gap->list); ++ kfree(gap); ++ } + list_del(&has->list); + kfree(has); + } +diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c +index 43482ae1e049..1a2b2620421e 100644 +--- a/drivers/input/mouse/elantech.c ++++ b/drivers/input/mouse/elantech.c +@@ -1122,6 +1122,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse, + * Asus UX32VD 0x361f02 00, 15, 0e clickpad + * Avatar AVIU-145A2 0x361f00 ? clickpad + * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons ++ * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons + * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons + * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons + * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**) +@@ -1528,6 +1529,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = { + }, + }, + { ++ /* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E547"), ++ }, ++ }, ++ { + /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), +diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c +index 1f1582f6cccb..8d838779fd1b 100644 +--- a/drivers/mmc/host/sdhci-esdhc-imx.c ++++ b/drivers/mmc/host/sdhci-esdhc-imx.c +@@ -804,6 +804,7 @@ static int esdhc_change_pinstate(struct sdhci_host *host, + + switch (uhs) { + case MMC_TIMING_UHS_SDR50: ++ case MMC_TIMING_UHS_DDR50: + pinctrl = imx_data->pins_100mhz; + break; + case MMC_TIMING_UHS_SDR104: +diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c +index 0134ba32a057..39712560b4c1 100644 +--- a/drivers/mtd/ubi/upd.c ++++ b/drivers/mtd/ubi/upd.c +@@ -148,11 +148,11 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol, + return err; + } + +- if (bytes == 0) { +- err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL); +- if (err) +- return err; ++ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL); ++ if (err) ++ return err; + ++ if (bytes == 0) { + err = clear_update_marker(ubi, vol, 0); + if (err) + return err; +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h +index b76883606e4b..94906aaa9b7c 100644 +--- a/fs/cifs/cifsglob.h ++++ b/fs/cifs/cifsglob.h +@@ -906,7 +906,6 @@ struct cifs_tcon { + bool use_persistent:1; /* use persistent instead of durable handles */ + #ifdef CONFIG_CIFS_SMB2 + bool print:1; /* set if connection to printer share */ +- bool bad_network_name:1; /* set if ret status STATUS_BAD_NETWORK_NAME */ + __le32 capabilities; + __u32 share_flags; + __u32 maximal_access; +diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c +index fc537c29044e..87b87e091e8e 100644 +--- a/fs/cifs/smb1ops.c ++++ b/fs/cifs/smb1ops.c +@@ -1015,6 +1015,15 @@ cifs_dir_needs_close(struct cifsFileInfo *cfile) + return !cfile->srch_inf.endOfSearch && !cfile->invalidHandle; + } + ++static bool ++cifs_can_echo(struct TCP_Server_Info *server) ++{ ++ if (server->tcpStatus == CifsGood) ++ return true; ++ ++ return false; ++} ++ + struct smb_version_operations smb1_operations = { + .send_cancel = send_nt_cancel, + .compare_fids = cifs_compare_fids, +@@ -1049,6 +1058,7 @@ struct smb_version_operations smb1_operations = { + .get_dfs_refer = CIFSGetDFSRefer, + .qfs_tcon = cifs_qfs_tcon, + .is_path_accessible = cifs_is_path_accessible, ++ .can_echo = cifs_can_echo, + .query_path_info = cifs_query_path_info, + .query_file_info = cifs_query_file_info, + .get_srv_inum = cifs_get_srv_inum, +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c +index 6cb5c4b30e78..6cb2603f8a5c 100644 +--- a/fs/cifs/smb2pdu.c ++++ b/fs/cifs/smb2pdu.c +@@ -932,9 +932,6 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, + else + return -EIO; + +- if (tcon && tcon->bad_network_name) +- return -ENOENT; +- + if ((tcon && tcon->seal) && + ((ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) == 0)) { + cifs_dbg(VFS, "encryption requested but no server support"); +@@ -1036,8 +1033,6 @@ tcon_exit: + tcon_error_exit: + if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) { + cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree); +- if (tcon) +- tcon->bad_network_name = true; + } + goto tcon_exit; + } +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c +index 7d7f99b0db47..1275175b0946 100644 +--- a/kernel/trace/ring_buffer.c ++++ b/kernel/trace/ring_buffer.c +@@ -3440,11 +3440,23 @@ EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); + int ring_buffer_iter_empty(struct ring_buffer_iter *iter) + { + struct ring_buffer_per_cpu *cpu_buffer; ++ struct buffer_page *reader; ++ struct buffer_page *head_page; ++ struct buffer_page *commit_page; ++ unsigned commit; + + cpu_buffer = iter->cpu_buffer; + +- return iter->head_page == cpu_buffer->commit_page && +- iter->head == rb_commit_index(cpu_buffer); ++ /* Remember, trace recording is off when iterator is in use */ ++ reader = cpu_buffer->reader_page; ++ head_page = cpu_buffer->head_page; ++ commit_page = cpu_buffer->commit_page; ++ commit = rb_page_commit(commit_page); ++ ++ return ((iter->head_page == commit_page && iter->head == commit) || ++ (iter->head_page == reader && commit_page == head_page && ++ head_page->read == commit && ++ iter->head == rb_page_commit(cpu_buffer->reader_page))); + } + EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); + +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index 059233abcfcf..4c21c0b7dc91 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -6060,11 +6060,13 @@ ftrace_trace_snapshot_callback(struct ftrace_hash *hash, + return ret; + + out_reg: +- ret = register_ftrace_function_probe(glob, ops, count); ++ ret = alloc_snapshot(&global_trace); ++ if (ret < 0) ++ goto out; + +- if (ret >= 0) +- alloc_snapshot(&global_trace); ++ ret = register_ftrace_function_probe(glob, ops, count); + ++ out: + return ret < 0 ? ret : 0; + } + +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c +index 2b528389409f..9f0915f72702 100644 +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -3396,6 +3396,27 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) + !ether_addr_equal(bssid, hdr->addr1)) + return false; + } ++ ++ /* ++ * 802.11-2016 Table 9-26 says that for data frames, A1 must be ++ * the BSSID - we've checked that already but may have accepted ++ * the wildcard (ff:ff:ff:ff:ff:ff). ++ * ++ * It also says: ++ * The BSSID of the Data frame is determined as follows: ++ * a) If the STA is contained within an AP or is associated ++ * with an AP, the BSSID is the address currently in use ++ * by the STA contained in the AP. ++ * ++ * So we should not accept data frames with an address that's ++ * multicast. ++ * ++ * Accepting it also opens a security problem because stations ++ * could encrypt it with the GTK and inject traffic that way. ++ */ ++ if (ieee80211_is_data(hdr->frame_control) && multicast) ++ return false; ++ + return true; + case NL80211_IFTYPE_WDS: + if (bssid || !ieee80211_is_data(hdr->frame_control)) +diff --git a/net/tipc/node.c b/net/tipc/node.c +index 3926b561f873..d468aad6163e 100644 +--- a/net/tipc/node.c ++++ b/net/tipc/node.c +@@ -102,9 +102,10 @@ static unsigned int tipc_hashfn(u32 addr) + + static void tipc_node_kref_release(struct kref *kref) + { +- struct tipc_node *node = container_of(kref, struct tipc_node, kref); ++ struct tipc_node *n = container_of(kref, struct tipc_node, kref); + +- tipc_node_delete(node); ++ kfree(n->bc_entry.link); ++ kfree_rcu(n, rcu); + } + + void tipc_node_put(struct tipc_node *node) +@@ -216,21 +217,20 @@ static void tipc_node_delete(struct tipc_node *node) + { + list_del_rcu(&node->list); + hlist_del_rcu(&node->hash); +- kfree(node->bc_entry.link); +- kfree_rcu(node, rcu); ++ tipc_node_put(node); ++ ++ del_timer_sync(&node->timer); ++ tipc_node_put(node); + } + + void tipc_node_stop(struct net *net) + { +- struct tipc_net *tn = net_generic(net, tipc_net_id); ++ struct tipc_net *tn = tipc_net(net); + struct tipc_node *node, *t_node; + + spin_lock_bh(&tn->node_list_lock); +- list_for_each_entry_safe(node, t_node, &tn->node_list, list) { +- if (del_timer(&node->timer)) +- tipc_node_put(node); +- tipc_node_put(node); +- } ++ list_for_each_entry_safe(node, t_node, &tn->node_list, list) ++ tipc_node_delete(node); + spin_unlock_bh(&tn->node_list_lock); + } + +@@ -313,9 +313,7 @@ static void tipc_node_timeout(unsigned long data) + if (rc & TIPC_LINK_DOWN_EVT) + tipc_node_link_down(n, bearer_id, false); + } +- if (!mod_timer(&n->timer, jiffies + n->keepalive_intv)) +- tipc_node_get(n); +- tipc_node_put(n); ++ mod_timer(&n->timer, jiffies + n->keepalive_intv); + } + + /** +diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c +index 0a369bb440e7..662bdd20a748 100644 +--- a/net/vmw_vsock/vmci_transport.c ++++ b/net/vmw_vsock/vmci_transport.c +@@ -842,7 +842,7 @@ static void vmci_transport_peer_detach_cb(u32 sub_id, + * qp_handle. + */ + if (vmci_handle_is_invalid(e_payload->handle) || +- vmci_handle_is_equal(trans->qp_handle, e_payload->handle)) ++ !vmci_handle_is_equal(trans->qp_handle, e_payload->handle)) + return; + + /* We don't ask for delayed CBs when we subscribe to this event (we +@@ -2154,7 +2154,7 @@ module_exit(vmci_transport_exit); + + MODULE_AUTHOR("VMware, Inc."); + MODULE_DESCRIPTION("VMCI transport for Virtual Sockets"); +-MODULE_VERSION("1.0.2.0-k"); ++MODULE_VERSION("1.0.3.0-k"); + MODULE_LICENSE("GPL v2"); + MODULE_ALIAS("vmware_vsock"); + MODULE_ALIAS_NETPROTO(PF_VSOCK); +diff --git a/security/keys/gc.c b/security/keys/gc.c +index addf060399e0..9cb4fe4478a1 100644 +--- a/security/keys/gc.c ++++ b/security/keys/gc.c +@@ -46,7 +46,7 @@ static unsigned long key_gc_flags; + * immediately unlinked. + */ + struct key_type key_type_dead = { +- .name = "dead", ++ .name = ".dead", + }; + + /* +diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c +index 1c3872aeed14..442e350c209d 100644 +--- a/security/keys/keyctl.c ++++ b/security/keys/keyctl.c +@@ -271,7 +271,8 @@ error: + * Create and join an anonymous session keyring or join a named session + * keyring, creating it if necessary. A named session keyring must have Search + * permission for it to be joined. Session keyrings without this permit will +- * be skipped over. ++ * be skipped over. It is not permitted for userspace to create or join ++ * keyrings whose name begin with a dot. + * + * If successful, the ID of the joined session keyring will be returned. + */ +@@ -288,12 +289,16 @@ long keyctl_join_session_keyring(const char __user *_name) + ret = PTR_ERR(name); + goto error; + } ++ ++ ret = -EPERM; ++ if (name[0] == '.') ++ goto error_name; + } + + /* join the session */ + ret = join_session_keyring(name); ++error_name: + kfree(name); +- + error: + return ret; + } +@@ -1223,8 +1228,8 @@ error: + * Read or set the default keyring in which request_key() will cache keys and + * return the old setting. + * +- * If a process keyring is specified then this will be created if it doesn't +- * yet exist. The old setting will be returned if successful. ++ * If a thread or process keyring is specified then it will be created if it ++ * doesn't yet exist. The old setting will be returned if successful. + */ + long keyctl_set_reqkey_keyring(int reqkey_defl) + { +@@ -1249,11 +1254,8 @@ long keyctl_set_reqkey_keyring(int reqkey_defl) + + case KEY_REQKEY_DEFL_PROCESS_KEYRING: + ret = install_process_keyring_to_cred(new); +- if (ret < 0) { +- if (ret != -EEXIST) +- goto error; +- ret = 0; +- } ++ if (ret < 0) ++ goto error; + goto set; + + case KEY_REQKEY_DEFL_DEFAULT: +diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c +index e6d50172872f..4ed909142956 100644 +--- a/security/keys/process_keys.c ++++ b/security/keys/process_keys.c +@@ -125,13 +125,18 @@ error: + } + + /* +- * Install a fresh thread keyring directly to new credentials. This keyring is +- * allowed to overrun the quota. ++ * Install a thread keyring to the given credentials struct if it didn't have ++ * one already. This is allowed to overrun the quota. ++ * ++ * Return: 0 if a thread keyring is now present; -errno on failure. + */ + int install_thread_keyring_to_cred(struct cred *new) + { + struct key *keyring; + ++ if (new->thread_keyring) ++ return 0; ++ + keyring = keyring_alloc("_tid", new->uid, new->gid, new, + KEY_POS_ALL | KEY_USR_VIEW, + KEY_ALLOC_QUOTA_OVERRUN, NULL); +@@ -143,7 +148,9 @@ int install_thread_keyring_to_cred(struct cred *new) + } + + /* +- * Install a fresh thread keyring, discarding the old one. ++ * Install a thread keyring to the current task if it didn't have one already. ++ * ++ * Return: 0 if a thread keyring is now present; -errno on failure. + */ + static int install_thread_keyring(void) + { +@@ -154,8 +161,6 @@ static int install_thread_keyring(void) + if (!new) + return -ENOMEM; + +- BUG_ON(new->thread_keyring); +- + ret = install_thread_keyring_to_cred(new); + if (ret < 0) { + abort_creds(new); +@@ -166,17 +171,17 @@ static int install_thread_keyring(void) + } + + /* +- * Install a process keyring directly to a credentials struct. ++ * Install a process keyring to the given credentials struct if it didn't have ++ * one already. This is allowed to overrun the quota. + * +- * Returns -EEXIST if there was already a process keyring, 0 if one installed, +- * and other value on any other error ++ * Return: 0 if a process keyring is now present; -errno on failure. + */ + int install_process_keyring_to_cred(struct cred *new) + { + struct key *keyring; + + if (new->process_keyring) +- return -EEXIST; ++ return 0; + + keyring = keyring_alloc("_pid", new->uid, new->gid, new, + KEY_POS_ALL | KEY_USR_VIEW, +@@ -189,11 +194,9 @@ int install_process_keyring_to_cred(struct cred *new) + } + + /* +- * Make sure a process keyring is installed for the current process. The +- * existing process keyring is not replaced. ++ * Install a process keyring to the current task if it didn't have one already. + * +- * Returns 0 if there is a process keyring by the end of this function, some +- * error otherwise. ++ * Return: 0 if a process keyring is now present; -errno on failure. + */ + static int install_process_keyring(void) + { +@@ -207,14 +210,18 @@ static int install_process_keyring(void) + ret = install_process_keyring_to_cred(new); + if (ret < 0) { + abort_creds(new); +- return ret != -EEXIST ? ret : 0; ++ return ret; + } + + return commit_creds(new); + } + + /* +- * Install a session keyring directly to a credentials struct. ++ * Install the given keyring as the session keyring of the given credentials ++ * struct, replacing the existing one if any. If the given keyring is NULL, ++ * then install a new anonymous session keyring. ++ * ++ * Return: 0 on success; -errno on failure. + */ + int install_session_keyring_to_cred(struct cred *cred, struct key *keyring) + { +@@ -249,8 +256,11 @@ int install_session_keyring_to_cred(struct cred *cred, struct key *keyring) + } + + /* +- * Install a session keyring, discarding the old one. If a keyring is not +- * supplied, an empty one is invented. ++ * Install the given keyring as the session keyring of the current task, ++ * replacing the existing one if any. If the given keyring is NULL, then ++ * install a new anonymous session keyring. ++ * ++ * Return: 0 on success; -errno on failure. + */ + static int install_session_keyring(struct key *keyring) + { +diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c +index 0d9f48ec42bb..bc7adb84e679 100644 +--- a/tools/hv/hv_kvp_daemon.c ++++ b/tools/hv/hv_kvp_daemon.c +@@ -1433,7 +1433,7 @@ int main(int argc, char *argv[]) + openlog("KVP", 0, LOG_USER); + syslog(LOG_INFO, "KVP starting; pid is:%d", getpid()); + +- kvp_fd = open("/dev/vmbus/hv_kvp", O_RDWR); ++ kvp_fd = open("/dev/vmbus/hv_kvp", O_RDWR | O_CLOEXEC); + + if (kvp_fd < 0) { + syslog(LOG_ERR, "open /dev/vmbus/hv_kvp failed; error: %d %s", diff --git a/patch/kernel/mvebu64-default/03-patch-4.4.64-65.patch b/patch/kernel/mvebu64-default/03-patch-4.4.64-65.patch new file mode 100644 index 000000000..c8da77fdc --- /dev/null +++ b/patch/kernel/mvebu64-default/03-patch-4.4.64-65.patch @@ -0,0 +1,920 @@ +diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt +index 302b5ed616a6..35e17f748ca7 100644 +--- a/Documentation/sysctl/fs.txt ++++ b/Documentation/sysctl/fs.txt +@@ -265,6 +265,13 @@ aio-nr can grow to. + + ============================================================== + ++mount-max: ++ ++This denotes the maximum number of mounts that may exist ++in a mount namespace. ++ ++============================================================== ++ + + 2. /proc/sys/fs/binfmt_misc + ---------------------------------------------------------- +diff --git a/Makefile b/Makefile +index 17708f5dc169..ddaef04f528a 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 64 ++SUBLEVEL = 65 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c +index 4e941f00b600..082ff5608455 100644 +--- a/drivers/media/tuners/tuner-xc2028.c ++++ b/drivers/media/tuners/tuner-xc2028.c +@@ -1403,11 +1403,12 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg) + * in order to avoid troubles during device release. + */ + kfree(priv->ctrl.fname); ++ priv->ctrl.fname = NULL; + memcpy(&priv->ctrl, p, sizeof(priv->ctrl)); + if (p->fname) { + priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL); + if (priv->ctrl.fname == NULL) +- rc = -ENOMEM; ++ return -ENOMEM; + } + + /* +diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c +index 6df3ee561d52..515aa3f993f3 100644 +--- a/drivers/net/wireless/hostap/hostap_hw.c ++++ b/drivers/net/wireless/hostap/hostap_hw.c +@@ -836,25 +836,30 @@ static int hfa384x_get_rid(struct net_device *dev, u16 rid, void *buf, int len, + spin_lock_bh(&local->baplock); + + res = hfa384x_setup_bap(dev, BAP0, rid, 0); +- if (!res) +- res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec)); ++ if (res) ++ goto unlock; ++ ++ res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec)); ++ if (res) ++ goto unlock; + + if (le16_to_cpu(rec.len) == 0) { + /* RID not available */ + res = -ENODATA; ++ goto unlock; + } + + rlen = (le16_to_cpu(rec.len) - 1) * 2; +- if (!res && exact_len && rlen != len) { ++ if (exact_len && rlen != len) { + printk(KERN_DEBUG "%s: hfa384x_get_rid - RID len mismatch: " + "rid=0x%04x, len=%d (expected %d)\n", + dev->name, rid, rlen, len); + res = -ENODATA; + } + +- if (!res) +- res = hfa384x_from_bap(dev, BAP0, buf, len); ++ res = hfa384x_from_bap(dev, BAP0, buf, len); + ++unlock: + spin_unlock_bh(&local->baplock); + mutex_unlock(&local->rid_bap_mtx); + +diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c +index 80f9de907563..5cc80b80c82b 100644 +--- a/drivers/tty/nozomi.c ++++ b/drivers/tty/nozomi.c +@@ -823,7 +823,7 @@ static int receive_data(enum port_type index, struct nozomi *dc) + struct tty_struct *tty = tty_port_tty_get(&port->port); + int i, ret; + +- read_mem32((u32 *) &size, addr, 4); ++ size = __le32_to_cpu(readl(addr)); + /* DBG1( "%d bytes port: %d", size, index); */ + + if (tty && test_bit(TTY_THROTTLED, &tty->flags)) { +diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c +index 9982cb176ce8..830e2fd47642 100644 +--- a/drivers/vfio/pci/vfio_pci.c ++++ b/drivers/vfio/pci/vfio_pci.c +@@ -562,8 +562,9 @@ static long vfio_pci_ioctl(void *device_data, + + } else if (cmd == VFIO_DEVICE_SET_IRQS) { + struct vfio_irq_set hdr; ++ size_t size; + u8 *data = NULL; +- int ret = 0; ++ int max, ret = 0; + + minsz = offsetofend(struct vfio_irq_set, count); + +@@ -571,23 +572,31 @@ static long vfio_pci_ioctl(void *device_data, + return -EFAULT; + + if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS || ++ hdr.count >= (U32_MAX - hdr.start) || + hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK | + VFIO_IRQ_SET_ACTION_TYPE_MASK)) + return -EINVAL; + +- if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) { +- size_t size; +- int max = vfio_pci_get_irq_count(vdev, hdr.index); ++ max = vfio_pci_get_irq_count(vdev, hdr.index); ++ if (hdr.start >= max || hdr.start + hdr.count > max) ++ return -EINVAL; + +- if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL) +- size = sizeof(uint8_t); +- else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD) +- size = sizeof(int32_t); +- else +- return -EINVAL; ++ switch (hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) { ++ case VFIO_IRQ_SET_DATA_NONE: ++ size = 0; ++ break; ++ case VFIO_IRQ_SET_DATA_BOOL: ++ size = sizeof(uint8_t); ++ break; ++ case VFIO_IRQ_SET_DATA_EVENTFD: ++ size = sizeof(int32_t); ++ break; ++ default: ++ return -EINVAL; ++ } + +- if (hdr.argsz - minsz < hdr.count * size || +- hdr.start >= max || hdr.start + hdr.count > max) ++ if (size) { ++ if (hdr.argsz - minsz < hdr.count * size) + return -EINVAL; + + data = memdup_user((void __user *)(arg + minsz), +diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c +index 20e9a86d2dcf..5c8f767b6368 100644 +--- a/drivers/vfio/pci/vfio_pci_intrs.c ++++ b/drivers/vfio/pci/vfio_pci_intrs.c +@@ -255,7 +255,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) + if (!is_irq_none(vdev)) + return -EINVAL; + +- vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); ++ vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); + if (!vdev->ctx) + return -ENOMEM; + +diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c +index ad8a5b757cc7..a443c6e54412 100644 +--- a/fs/gfs2/dir.c ++++ b/fs/gfs2/dir.c +@@ -760,7 +760,7 @@ static int get_first_leaf(struct gfs2_inode *dip, u32 index, + int error; + + error = get_leaf_nr(dip, index, &leaf_no); +- if (!error) ++ if (!IS_ERR_VALUE(error)) + error = get_leaf(dip, leaf_no, bh_out); + + return error; +@@ -976,7 +976,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name) + + index = name->hash >> (32 - dip->i_depth); + error = get_leaf_nr(dip, index, &leaf_no); +- if (error) ++ if (IS_ERR_VALUE(error)) + return error; + + /* Get the old leaf block */ +diff --git a/fs/mount.h b/fs/mount.h +index 3dc7dea5a357..13a4ebbbaa74 100644 +--- a/fs/mount.h ++++ b/fs/mount.h +@@ -13,6 +13,8 @@ struct mnt_namespace { + u64 seq; /* Sequence number to prevent loops */ + wait_queue_head_t poll; + u64 event; ++ unsigned int mounts; /* # of mounts in the namespace */ ++ unsigned int pending_mounts; + }; + + struct mnt_pcp { +diff --git a/fs/namespace.c b/fs/namespace.c +index 7df3d406d3e0..f26d18d69712 100644 +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@ -27,6 +27,9 @@ + #include "pnode.h" + #include "internal.h" + ++/* Maximum number of mounts in a mount namespace */ ++unsigned int sysctl_mount_max __read_mostly = 100000; ++ + static unsigned int m_hash_mask __read_mostly; + static unsigned int m_hash_shift __read_mostly; + static unsigned int mp_hash_mask __read_mostly; +@@ -925,6 +928,9 @@ static void commit_tree(struct mount *mnt) + + list_splice(&head, n->list.prev); + ++ n->mounts += n->pending_mounts; ++ n->pending_mounts = 0; ++ + __attach_mnt(mnt, parent); + touch_mnt_namespace(n); + } +@@ -1445,11 +1451,16 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how) + propagate_umount(&tmp_list); + + while (!list_empty(&tmp_list)) { ++ struct mnt_namespace *ns; + bool disconnect; + p = list_first_entry(&tmp_list, struct mount, mnt_list); + list_del_init(&p->mnt_expire); + list_del_init(&p->mnt_list); +- __touch_mnt_namespace(p->mnt_ns); ++ ns = p->mnt_ns; ++ if (ns) { ++ ns->mounts--; ++ __touch_mnt_namespace(ns); ++ } + p->mnt_ns = NULL; + if (how & UMOUNT_SYNC) + p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; +@@ -1850,6 +1861,28 @@ static int invent_group_ids(struct mount *mnt, bool recurse) + return 0; + } + ++int count_mounts(struct mnt_namespace *ns, struct mount *mnt) ++{ ++ unsigned int max = READ_ONCE(sysctl_mount_max); ++ unsigned int mounts = 0, old, pending, sum; ++ struct mount *p; ++ ++ for (p = mnt; p; p = next_mnt(p, mnt)) ++ mounts++; ++ ++ old = ns->mounts; ++ pending = ns->pending_mounts; ++ sum = old + pending; ++ if ((old > sum) || ++ (pending > sum) || ++ (max < sum) || ++ (mounts > (max - sum))) ++ return -ENOSPC; ++ ++ ns->pending_mounts = pending + mounts; ++ return 0; ++} ++ + /* + * @source_mnt : mount tree to be attached + * @nd : place the mount tree @source_mnt is attached +@@ -1919,6 +1952,7 @@ static int attach_recursive_mnt(struct mount *source_mnt, + struct path *parent_path) + { + HLIST_HEAD(tree_list); ++ struct mnt_namespace *ns = dest_mnt->mnt_ns; + struct mountpoint *smp; + struct mount *child, *p; + struct hlist_node *n; +@@ -1931,6 +1965,13 @@ static int attach_recursive_mnt(struct mount *source_mnt, + if (IS_ERR(smp)) + return PTR_ERR(smp); + ++ /* Is there space to add these mounts to the mount namespace? */ ++ if (!parent_path) { ++ err = count_mounts(ns, source_mnt); ++ if (err) ++ goto out; ++ } ++ + if (IS_MNT_SHARED(dest_mnt)) { + err = invent_group_ids(source_mnt, true); + if (err) +@@ -1970,11 +2011,14 @@ static int attach_recursive_mnt(struct mount *source_mnt, + out_cleanup_ids: + while (!hlist_empty(&tree_list)) { + child = hlist_entry(tree_list.first, struct mount, mnt_hash); ++ child->mnt_parent->mnt_ns->pending_mounts = 0; + umount_tree(child, UMOUNT_SYNC); + } + unlock_mount_hash(); + cleanup_group_ids(source_mnt, NULL); + out: ++ ns->pending_mounts = 0; ++ + read_seqlock_excl(&mount_lock); + put_mountpoint(smp); + read_sequnlock_excl(&mount_lock); +@@ -2804,6 +2848,8 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) + init_waitqueue_head(&new_ns->poll); + new_ns->event = 0; + new_ns->user_ns = get_user_ns(user_ns); ++ new_ns->mounts = 0; ++ new_ns->pending_mounts = 0; + return new_ns; + } + +@@ -2853,6 +2899,7 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns, + q = new; + while (p) { + q->mnt_ns = new_ns; ++ new_ns->mounts++; + if (new_fs) { + if (&p->mnt == new_fs->root.mnt) { + new_fs->root.mnt = mntget(&q->mnt); +@@ -2891,6 +2938,7 @@ static struct mnt_namespace *create_mnt_ns(struct vfsmount *m) + struct mount *mnt = real_mount(m); + mnt->mnt_ns = new_ns; + new_ns->root = mnt; ++ new_ns->mounts++; + list_add(&mnt->mnt_list, &new_ns->list); + } else { + mntput(m); +diff --git a/fs/pnode.c b/fs/pnode.c +index b9f2af59b9a6..b394ca5307ec 100644 +--- a/fs/pnode.c ++++ b/fs/pnode.c +@@ -259,7 +259,7 @@ static int propagate_one(struct mount *m) + read_sequnlock_excl(&mount_lock); + } + hlist_add_head(&child->mnt_hash, list); +- return 0; ++ return count_mounts(m->mnt_ns, child); + } + + /* +diff --git a/fs/pnode.h b/fs/pnode.h +index 623f01772bec..dc87e65becd2 100644 +--- a/fs/pnode.h ++++ b/fs/pnode.h +@@ -54,4 +54,5 @@ void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, + struct mount *copy_tree(struct mount *, struct dentry *, int); + bool is_path_reachable(struct mount *, struct dentry *, + const struct path *root); ++int count_mounts(struct mnt_namespace *ns, struct mount *mnt); + #endif /* _LINUX_PNODE_H */ +diff --git a/include/linux/mount.h b/include/linux/mount.h +index f822c3c11377..dc6cd800cd5d 100644 +--- a/include/linux/mount.h ++++ b/include/linux/mount.h +@@ -95,4 +95,6 @@ extern void mark_mounts_for_expiry(struct list_head *mounts); + + extern dev_t name_to_dev_t(const char *name); + ++extern unsigned int sysctl_mount_max; ++ + #endif /* _LINUX_MOUNT_H */ +diff --git a/kernel/events/core.c b/kernel/events/core.c +index e4b5494f05f8..784ab8fe8714 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -8250,6 +8250,37 @@ static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) + return 0; + } + ++/* ++ * Variation on perf_event_ctx_lock_nested(), except we take two context ++ * mutexes. ++ */ ++static struct perf_event_context * ++__perf_event_ctx_lock_double(struct perf_event *group_leader, ++ struct perf_event_context *ctx) ++{ ++ struct perf_event_context *gctx; ++ ++again: ++ rcu_read_lock(); ++ gctx = READ_ONCE(group_leader->ctx); ++ if (!atomic_inc_not_zero(&gctx->refcount)) { ++ rcu_read_unlock(); ++ goto again; ++ } ++ rcu_read_unlock(); ++ ++ mutex_lock_double(&gctx->mutex, &ctx->mutex); ++ ++ if (group_leader->ctx != gctx) { ++ mutex_unlock(&ctx->mutex); ++ mutex_unlock(&gctx->mutex); ++ put_ctx(gctx); ++ goto again; ++ } ++ ++ return gctx; ++} ++ + /** + * sys_perf_event_open - open a performance event, associate it to a task/cpu + * +@@ -8486,8 +8517,26 @@ SYSCALL_DEFINE5(perf_event_open, + } + + if (move_group) { +- gctx = group_leader->ctx; +- mutex_lock_double(&gctx->mutex, &ctx->mutex); ++ gctx = __perf_event_ctx_lock_double(group_leader, ctx); ++ ++ /* ++ * Check if we raced against another sys_perf_event_open() call ++ * moving the software group underneath us. ++ */ ++ if (!(group_leader->group_flags & PERF_GROUP_SOFTWARE)) { ++ /* ++ * If someone moved the group out from under us, check ++ * if this new event wound up on the same ctx, if so ++ * its the regular !move_group case, otherwise fail. ++ */ ++ if (gctx != ctx) { ++ err = -EINVAL; ++ goto err_locked; ++ } else { ++ perf_event_ctx_unlock(group_leader, gctx); ++ move_group = 0; ++ } ++ } + } else { + mutex_lock(&ctx->mutex); + } +@@ -8582,7 +8631,7 @@ SYSCALL_DEFINE5(perf_event_open, + perf_unpin_context(ctx); + + if (move_group) +- mutex_unlock(&gctx->mutex); ++ perf_event_ctx_unlock(group_leader, gctx); + mutex_unlock(&ctx->mutex); + + if (task) { +@@ -8610,7 +8659,7 @@ SYSCALL_DEFINE5(perf_event_open, + + err_locked: + if (move_group) +- mutex_unlock(&gctx->mutex); ++ perf_event_ctx_unlock(group_leader, gctx); + mutex_unlock(&ctx->mutex); + /* err_file: */ + fput(event_file); +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index 2f0d157258a2..300d64162aff 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -65,6 +65,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -1749,6 +1750,14 @@ static struct ctl_table fs_table[] = { + .mode = 0644, + .proc_handler = proc_doulongvec_minmax, + }, ++ { ++ .procname = "mount-max", ++ .data = &sysctl_mount_max, ++ .maxlen = sizeof(unsigned int), ++ .mode = 0644, ++ .proc_handler = proc_dointvec_minmax, ++ .extra1 = &one, ++ }, + { } + }; + +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c +index 3a00512addbc..37a3b05d175c 100644 +--- a/net/ipv4/ping.c ++++ b/net/ipv4/ping.c +@@ -154,17 +154,18 @@ void ping_hash(struct sock *sk) + void ping_unhash(struct sock *sk) + { + struct inet_sock *isk = inet_sk(sk); ++ + pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num); ++ write_lock_bh(&ping_table.lock); + if (sk_hashed(sk)) { +- write_lock_bh(&ping_table.lock); + hlist_nulls_del(&sk->sk_nulls_node); + sk_nulls_node_init(&sk->sk_nulls_node); + sock_put(sk); + isk->inet_num = 0; + isk->inet_sport = 0; + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); +- write_unlock_bh(&ping_table.lock); + } ++ write_unlock_bh(&ping_table.lock); + } + EXPORT_SYMBOL_GPL(ping_unhash); + +diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c +index 77afe913d03d..9adedba78eea 100644 +--- a/net/netfilter/nfnetlink.c ++++ b/net/netfilter/nfnetlink.c +@@ -326,10 +326,12 @@ replay: + nlh = nlmsg_hdr(skb); + err = 0; + +- if (nlmsg_len(nlh) < sizeof(struct nfgenmsg) || +- skb->len < nlh->nlmsg_len) { +- err = -EINVAL; +- goto ack; ++ if (nlh->nlmsg_len < NLMSG_HDRLEN || ++ skb->len < nlh->nlmsg_len || ++ nlmsg_len(nlh) < sizeof(struct nfgenmsg)) { ++ nfnl_err_reset(&err_list); ++ status |= NFNL_BATCH_FAILURE; ++ goto done; + } + + /* Only requests are handled by the kernel */ +diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c +index 648f2a67f314..cb1381513c82 100644 +--- a/net/tipc/bearer.c ++++ b/net/tipc/bearer.c +@@ -381,6 +381,10 @@ int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b, + dev = dev_get_by_name(net, driver_name); + if (!dev) + return -ENODEV; ++ if (tipc_mtu_bad(dev, 0)) { ++ dev_put(dev); ++ return -EINVAL; ++ } + + /* Associate TIPC bearer with L2 bearer */ + rcu_assign_pointer(b->media_ptr, dev); +@@ -570,14 +574,19 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt, + if (!b_ptr) + return NOTIFY_DONE; + +- b_ptr->mtu = dev->mtu; +- + switch (evt) { + case NETDEV_CHANGE: + if (netif_carrier_ok(dev)) + break; + case NETDEV_GOING_DOWN: ++ tipc_reset_bearer(net, b_ptr); ++ break; + case NETDEV_CHANGEMTU: ++ if (tipc_mtu_bad(dev, 0)) { ++ bearer_disable(net, b_ptr); ++ break; ++ } ++ b_ptr->mtu = dev->mtu; + tipc_reset_bearer(net, b_ptr); + break; + case NETDEV_CHANGEADDR: +diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h +index 552185bc4773..5f11e18b1fa1 100644 +--- a/net/tipc/bearer.h ++++ b/net/tipc/bearer.h +@@ -39,6 +39,7 @@ + + #include "netlink.h" + #include "core.h" ++#include "msg.h" + #include + + #define MAX_MEDIA 3 +@@ -61,6 +62,9 @@ + #define TIPC_MEDIA_TYPE_IB 2 + #define TIPC_MEDIA_TYPE_UDP 3 + ++/* minimum bearer MTU */ ++#define TIPC_MIN_BEARER_MTU (MAX_H_SIZE + INT_H_SIZE) ++ + /** + * struct tipc_node_map - set of node identifiers + * @count: # of nodes in set +@@ -226,4 +230,13 @@ void tipc_bearer_xmit(struct net *net, u32 bearer_id, + void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id, + struct sk_buff_head *xmitq); + ++/* check if device MTU is too low for tipc headers */ ++static inline bool tipc_mtu_bad(struct net_device *dev, unsigned int reserve) ++{ ++ if (dev->mtu >= TIPC_MIN_BEARER_MTU + reserve) ++ return false; ++ netdev_warn(dev, "MTU too low for tipc bearer\n"); ++ return true; ++} ++ + #endif /* _TIPC_BEARER_H */ +diff --git a/net/tipc/core.c b/net/tipc/core.c +index 03a842870c52..e2bdb07a49a2 100644 +--- a/net/tipc/core.c ++++ b/net/tipc/core.c +@@ -69,6 +69,7 @@ static int __net_init tipc_init_net(struct net *net) + if (err) + goto out_nametbl; + ++ INIT_LIST_HEAD(&tn->dist_queue); + err = tipc_topsrv_start(net); + if (err) + goto out_subscr; +diff --git a/net/tipc/core.h b/net/tipc/core.h +index 18e95a8020cd..fe3b89e9cde4 100644 +--- a/net/tipc/core.h ++++ b/net/tipc/core.h +@@ -103,6 +103,9 @@ struct tipc_net { + spinlock_t nametbl_lock; + struct name_table *nametbl; + ++ /* Name dist queue */ ++ struct list_head dist_queue; ++ + /* Topology subscription server */ + struct tipc_server *topsrv; + atomic_t subscription_count; +diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c +index f51c8bdbea1c..c4c151bc000c 100644 +--- a/net/tipc/name_distr.c ++++ b/net/tipc/name_distr.c +@@ -40,11 +40,6 @@ + + int sysctl_tipc_named_timeout __read_mostly = 2000; + +-/** +- * struct tipc_dist_queue - queue holding deferred name table updates +- */ +-static struct list_head tipc_dist_queue = LIST_HEAD_INIT(tipc_dist_queue); +- + struct distr_queue_item { + struct distr_item i; + u32 dtype; +@@ -67,6 +62,8 @@ static void publ_to_item(struct distr_item *i, struct publication *p) + + /** + * named_prepare_buf - allocate & initialize a publication message ++ * ++ * The buffer returned is of size INT_H_SIZE + payload size + */ + static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size, + u32 dest) +@@ -171,9 +168,9 @@ static void named_distribute(struct net *net, struct sk_buff_head *list, + struct publication *publ; + struct sk_buff *skb = NULL; + struct distr_item *item = NULL; +- uint msg_dsz = (tipc_node_get_mtu(net, dnode, 0) / ITEM_SIZE) * +- ITEM_SIZE; +- uint msg_rem = msg_dsz; ++ u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0) - INT_H_SIZE) / ++ ITEM_SIZE) * ITEM_SIZE; ++ u32 msg_rem = msg_dsz; + + list_for_each_entry(publ, pls, local_list) { + /* Prepare next buffer: */ +@@ -340,9 +337,11 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i, + * tipc_named_add_backlog - add a failed name table update to the backlog + * + */ +-static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node) ++static void tipc_named_add_backlog(struct net *net, struct distr_item *i, ++ u32 type, u32 node) + { + struct distr_queue_item *e; ++ struct tipc_net *tn = net_generic(net, tipc_net_id); + unsigned long now = get_jiffies_64(); + + e = kzalloc(sizeof(*e), GFP_ATOMIC); +@@ -352,7 +351,7 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node) + e->node = node; + e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout); + memcpy(e, i, sizeof(*i)); +- list_add_tail(&e->next, &tipc_dist_queue); ++ list_add_tail(&e->next, &tn->dist_queue); + } + + /** +@@ -362,10 +361,11 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node) + void tipc_named_process_backlog(struct net *net) + { + struct distr_queue_item *e, *tmp; ++ struct tipc_net *tn = net_generic(net, tipc_net_id); + char addr[16]; + unsigned long now = get_jiffies_64(); + +- list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) { ++ list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) { + if (time_after(e->expires, now)) { + if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype)) + continue; +@@ -405,7 +405,7 @@ void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq) + node = msg_orignode(msg); + while (count--) { + if (!tipc_update_nametbl(net, item, node, mtype)) +- tipc_named_add_backlog(item, mtype, node); ++ tipc_named_add_backlog(net, item, mtype, node); + item++; + } + kfree_skb(skb); +diff --git a/net/tipc/node.c b/net/tipc/node.c +index d468aad6163e..2df0b98d4a32 100644 +--- a/net/tipc/node.c ++++ b/net/tipc/node.c +@@ -728,7 +728,7 @@ static void tipc_node_fsm_evt(struct tipc_node *n, int evt) + state = SELF_UP_PEER_UP; + break; + case SELF_LOST_CONTACT_EVT: +- state = SELF_DOWN_PEER_LEAVING; ++ state = SELF_DOWN_PEER_DOWN; + break; + case SELF_ESTABL_CONTACT_EVT: + case PEER_LOST_CONTACT_EVT: +@@ -747,7 +747,7 @@ static void tipc_node_fsm_evt(struct tipc_node *n, int evt) + state = SELF_UP_PEER_UP; + break; + case PEER_LOST_CONTACT_EVT: +- state = SELF_LEAVING_PEER_DOWN; ++ state = SELF_DOWN_PEER_DOWN; + break; + case SELF_LOST_CONTACT_EVT: + case PEER_ESTABL_CONTACT_EVT: +diff --git a/net/tipc/socket.c b/net/tipc/socket.c +index b26b7a127773..65171f8e8c45 100644 +--- a/net/tipc/socket.c ++++ b/net/tipc/socket.c +@@ -777,9 +777,11 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, + * @tsk: receiving socket + * @skb: pointer to message buffer. + */ +-static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb) ++static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb, ++ struct sk_buff_head *xmitq) + { + struct sock *sk = &tsk->sk; ++ u32 onode = tsk_own_node(tsk); + struct tipc_msg *hdr = buf_msg(skb); + int mtyp = msg_type(hdr); + int conn_cong; +@@ -792,7 +794,8 @@ static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb) + + if (mtyp == CONN_PROBE) { + msg_set_type(hdr, CONN_PROBE_REPLY); +- tipc_sk_respond(sk, skb, TIPC_OK); ++ if (tipc_msg_reverse(onode, &skb, TIPC_OK)) ++ __skb_queue_tail(xmitq, skb); + return; + } else if (mtyp == CONN_ACK) { + conn_cong = tsk_conn_cong(tsk); +@@ -1647,7 +1650,8 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf) + * + * Returns true if message was added to socket receive queue, otherwise false + */ +-static bool filter_rcv(struct sock *sk, struct sk_buff *skb) ++static bool filter_rcv(struct sock *sk, struct sk_buff *skb, ++ struct sk_buff_head *xmitq) + { + struct socket *sock = sk->sk_socket; + struct tipc_sock *tsk = tipc_sk(sk); +@@ -1657,7 +1661,7 @@ static bool filter_rcv(struct sock *sk, struct sk_buff *skb) + int usr = msg_user(hdr); + + if (unlikely(msg_user(hdr) == CONN_MANAGER)) { +- tipc_sk_proto_rcv(tsk, skb); ++ tipc_sk_proto_rcv(tsk, skb, xmitq); + return false; + } + +@@ -1700,7 +1704,8 @@ static bool filter_rcv(struct sock *sk, struct sk_buff *skb) + return true; + + reject: +- tipc_sk_respond(sk, skb, err); ++ if (tipc_msg_reverse(tsk_own_node(tsk), &skb, err)) ++ __skb_queue_tail(xmitq, skb); + return false; + } + +@@ -1716,9 +1721,24 @@ reject: + static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb) + { + unsigned int truesize = skb->truesize; ++ struct sk_buff_head xmitq; ++ u32 dnode, selector; + +- if (likely(filter_rcv(sk, skb))) ++ __skb_queue_head_init(&xmitq); ++ ++ if (likely(filter_rcv(sk, skb, &xmitq))) { + atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt); ++ return 0; ++ } ++ ++ if (skb_queue_empty(&xmitq)) ++ return 0; ++ ++ /* Send response/rejected message */ ++ skb = __skb_dequeue(&xmitq); ++ dnode = msg_destnode(buf_msg(skb)); ++ selector = msg_origport(buf_msg(skb)); ++ tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector); + return 0; + } + +@@ -1732,12 +1752,13 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb) + * Caller must hold socket lock + */ + static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, +- u32 dport) ++ u32 dport, struct sk_buff_head *xmitq) + { ++ unsigned long time_limit = jiffies + 2; ++ struct sk_buff *skb; + unsigned int lim; + atomic_t *dcnt; +- struct sk_buff *skb; +- unsigned long time_limit = jiffies + 2; ++ u32 onode; + + while (skb_queue_len(inputq)) { + if (unlikely(time_after_eq(jiffies, time_limit))) +@@ -1749,20 +1770,22 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, + + /* Add message directly to receive queue if possible */ + if (!sock_owned_by_user(sk)) { +- filter_rcv(sk, skb); ++ filter_rcv(sk, skb, xmitq); + continue; + } + + /* Try backlog, compensating for double-counted bytes */ + dcnt = &tipc_sk(sk)->dupl_rcvcnt; +- if (sk->sk_backlog.len) ++ if (!sk->sk_backlog.len) + atomic_set(dcnt, 0); + lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt); + if (likely(!sk_add_backlog(sk, skb, lim))) + continue; + + /* Overload => reject message back to sender */ +- tipc_sk_respond(sk, skb, TIPC_ERR_OVERLOAD); ++ onode = tipc_own_addr(sock_net(sk)); ++ if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) ++ __skb_queue_tail(xmitq, skb); + break; + } + } +@@ -1775,12 +1798,14 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, + */ + void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq) + { ++ struct sk_buff_head xmitq; + u32 dnode, dport = 0; + int err; + struct tipc_sock *tsk; + struct sock *sk; + struct sk_buff *skb; + ++ __skb_queue_head_init(&xmitq); + while (skb_queue_len(inputq)) { + dport = tipc_skb_peek_port(inputq, dport); + tsk = tipc_sk_lookup(net, dport); +@@ -1788,9 +1813,14 @@ void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq) + if (likely(tsk)) { + sk = &tsk->sk; + if (likely(spin_trylock_bh(&sk->sk_lock.slock))) { +- tipc_sk_enqueue(inputq, sk, dport); ++ tipc_sk_enqueue(inputq, sk, dport, &xmitq); + spin_unlock_bh(&sk->sk_lock.slock); + } ++ /* Send pending response/rejected messages, if any */ ++ while ((skb = __skb_dequeue(&xmitq))) { ++ dnode = msg_destnode(buf_msg(skb)); ++ tipc_node_xmit_skb(net, skb, dnode, dport); ++ } + sock_put(sk); + continue; + } +diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c +index 6af78c6276b4..78d6b78de29d 100644 +--- a/net/tipc/udp_media.c ++++ b/net/tipc/udp_media.c +@@ -52,7 +52,7 @@ + /* IANA assigned UDP port */ + #define UDP_PORT_DEFAULT 6118 + +-#define UDP_MIN_HEADROOM 28 ++#define UDP_MIN_HEADROOM 48 + + static const struct nla_policy tipc_nl_udp_policy[TIPC_NLA_UDP_MAX + 1] = { + [TIPC_NLA_UDP_UNSPEC] = {.type = NLA_UNSPEC}, +@@ -376,6 +376,11 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b, + udp_conf.local_ip.s_addr = htonl(INADDR_ANY); + udp_conf.use_udp_checksums = false; + ub->ifindex = dev->ifindex; ++ if (tipc_mtu_bad(dev, sizeof(struct iphdr) + ++ sizeof(struct udphdr))) { ++ err = -EINVAL; ++ goto err; ++ } + b->mtu = dev->mtu - sizeof(struct iphdr) + - sizeof(struct udphdr); + #if IS_ENABLED(CONFIG_IPV6) diff --git a/patch/kernel/mvebu64-default/03-patch-4.4.65-66.patch b/patch/kernel/mvebu64-default/03-patch-4.4.65-66.patch new file mode 100644 index 000000000..1fe8b7a72 --- /dev/null +++ b/patch/kernel/mvebu64-default/03-patch-4.4.65-66.patch @@ -0,0 +1,1309 @@ +diff --git a/Documentation/devicetree/bindings/clock/sunxi.txt b/Documentation/devicetree/bindings/clock/sunxi.txt +index 8a47b77abfca..e8c74a6e738b 100644 +--- a/Documentation/devicetree/bindings/clock/sunxi.txt ++++ b/Documentation/devicetree/bindings/clock/sunxi.txt +@@ -18,6 +18,7 @@ Required properties: + "allwinner,sun4i-a10-cpu-clk" - for the CPU multiplexer clock + "allwinner,sun4i-a10-axi-clk" - for the AXI clock + "allwinner,sun8i-a23-axi-clk" - for the AXI clock on A23 ++ "allwinner,sun4i-a10-gates-clk" - for generic gates on all compatible SoCs + "allwinner,sun4i-a10-axi-gates-clk" - for the AXI gates + "allwinner,sun4i-a10-ahb-clk" - for the AHB clock + "allwinner,sun5i-a13-ahb-clk" - for the AHB clock on A13 +@@ -43,6 +44,7 @@ Required properties: + "allwinner,sun6i-a31-apb0-gates-clk" - for the APB0 gates on A31 + "allwinner,sun7i-a20-apb0-gates-clk" - for the APB0 gates on A20 + "allwinner,sun8i-a23-apb0-gates-clk" - for the APB0 gates on A23 ++ "allwinner,sun8i-h3-apb0-gates-clk" - for the APB0 gates on H3 + "allwinner,sun9i-a80-apb0-gates-clk" - for the APB0 gates on A80 + "allwinner,sun4i-a10-apb1-clk" - for the APB1 clock + "allwinner,sun9i-a80-apb1-clk" - for the APB1 bus clock on A80 +diff --git a/Makefile b/Makefile +index ddaef04f528a..1cd052823c03 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 65 ++SUBLEVEL = 66 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h +index b5ff87e6f4b7..aee1a77934cf 100644 +--- a/arch/arc/include/asm/entry-arcv2.h ++++ b/arch/arc/include/asm/entry-arcv2.h +@@ -16,6 +16,7 @@ + ; + ; Now manually save: r12, sp, fp, gp, r25 + ++ PUSH r30 + PUSH r12 + + ; Saving pt_regs->sp correctly requires some extra work due to the way +@@ -72,6 +73,7 @@ + POPAX AUX_USER_SP + 1: + POP r12 ++ POP r30 + + .endm + +diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h +index 69095da1fcfd..47111d565a95 100644 +--- a/arch/arc/include/asm/ptrace.h ++++ b/arch/arc/include/asm/ptrace.h +@@ -84,7 +84,7 @@ struct pt_regs { + unsigned long fp; + unsigned long sp; /* user/kernel sp depending on where we came from */ + +- unsigned long r12; ++ unsigned long r12, r30; + + /*------- Below list auto saved by h/w -----------*/ + unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11; +diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c +index f86692dbcfd5..83fc403aec3c 100644 +--- a/arch/arm/mach-omap2/timer.c ++++ b/arch/arm/mach-omap2/timer.c +@@ -496,8 +496,7 @@ void __init omap_init_time(void) + __omap_sync32k_timer_init(1, "timer_32k_ck", "ti,timer-alwon", + 2, "timer_sys_ck", NULL, false); + +- if (of_have_populated_dt()) +- clocksource_probe(); ++ clocksource_probe(); + } + + #if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM43XX) +@@ -505,6 +504,8 @@ void __init omap3_secure_sync32k_timer_init(void) + { + __omap_sync32k_timer_init(12, "secure_32k_fck", "ti,timer-secure", + 2, "timer_sys_ck", NULL, false); ++ ++ clocksource_probe(); + } + #endif /* CONFIG_ARCH_OMAP3 */ + +@@ -513,6 +514,8 @@ void __init omap3_gptimer_timer_init(void) + { + __omap_sync32k_timer_init(2, "timer_sys_ck", NULL, + 1, "timer_sys_ck", "ti,timer-alwon", true); ++ ++ clocksource_probe(); + } + #endif + +diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c +index d434d5d5ae6e..610f0f3bdb34 100644 +--- a/arch/mips/kernel/crash.c ++++ b/arch/mips/kernel/crash.c +@@ -14,12 +14,22 @@ static int crashing_cpu = -1; + static cpumask_t cpus_in_crash = CPU_MASK_NONE; + + #ifdef CONFIG_SMP +-static void crash_shutdown_secondary(void *ignore) ++static void crash_shutdown_secondary(void *passed_regs) + { +- struct pt_regs *regs; ++ struct pt_regs *regs = passed_regs; + int cpu = smp_processor_id(); + +- regs = task_pt_regs(current); ++ /* ++ * If we are passed registers, use those. Otherwise get the ++ * regs from the last interrupt, which should be correct, as ++ * we are in an interrupt. But if the regs are not there, ++ * pull them from the top of the stack. They are probably ++ * wrong, but we need something to keep from crashing again. ++ */ ++ if (!regs) ++ regs = get_irq_regs(); ++ if (!regs) ++ regs = task_pt_regs(current); + + if (!cpu_online(cpu)) + return; +diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c +index 4a4d9e067c89..3afffc30ee12 100644 +--- a/arch/mips/kernel/elf.c ++++ b/arch/mips/kernel/elf.c +@@ -206,7 +206,7 @@ int arch_check_elf(void *_ehdr, bool has_interpreter, + else if ((prog_req.fr1 && prog_req.frdefault) || + (prog_req.single && !prog_req.frdefault)) + /* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */ +- state->overall_fp_mode = ((current_cpu_data.fpu_id & MIPS_FPIR_F64) && ++ state->overall_fp_mode = ((raw_current_cpu_data.fpu_id & MIPS_FPIR_F64) && + cpu_has_mips_r2_r6) ? + FP_FR1 : FP_FR0; + else if (prog_req.fr1) +diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c +index de63d36af895..732d6171ac6a 100644 +--- a/arch/mips/kernel/kgdb.c ++++ b/arch/mips/kernel/kgdb.c +@@ -244,9 +244,6 @@ static int compute_signal(int tt) + void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) + { + int reg; +- struct thread_info *ti = task_thread_info(p); +- unsigned long ksp = (unsigned long)ti + THREAD_SIZE - 32; +- struct pt_regs *regs = (struct pt_regs *)ksp - 1; + #if (KGDB_GDB_REG_SIZE == 32) + u32 *ptr = (u32 *)gdb_regs; + #else +@@ -254,25 +251,46 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) + #endif + + for (reg = 0; reg < 16; reg++) +- *(ptr++) = regs->regs[reg]; ++ *(ptr++) = 0; + + /* S0 - S7 */ +- for (reg = 16; reg < 24; reg++) +- *(ptr++) = regs->regs[reg]; ++ *(ptr++) = p->thread.reg16; ++ *(ptr++) = p->thread.reg17; ++ *(ptr++) = p->thread.reg18; ++ *(ptr++) = p->thread.reg19; ++ *(ptr++) = p->thread.reg20; ++ *(ptr++) = p->thread.reg21; ++ *(ptr++) = p->thread.reg22; ++ *(ptr++) = p->thread.reg23; + + for (reg = 24; reg < 28; reg++) + *(ptr++) = 0; + + /* GP, SP, FP, RA */ +- for (reg = 28; reg < 32; reg++) +- *(ptr++) = regs->regs[reg]; +- +- *(ptr++) = regs->cp0_status; +- *(ptr++) = regs->lo; +- *(ptr++) = regs->hi; +- *(ptr++) = regs->cp0_badvaddr; +- *(ptr++) = regs->cp0_cause; +- *(ptr++) = regs->cp0_epc; ++ *(ptr++) = (long)p; ++ *(ptr++) = p->thread.reg29; ++ *(ptr++) = p->thread.reg30; ++ *(ptr++) = p->thread.reg31; ++ ++ *(ptr++) = p->thread.cp0_status; ++ ++ /* lo, hi */ ++ *(ptr++) = 0; ++ *(ptr++) = 0; ++ ++ /* ++ * BadVAddr, Cause ++ * Ideally these would come from the last exception frame up the stack ++ * but that requires unwinding, otherwise we can't know much for sure. ++ */ ++ *(ptr++) = 0; ++ *(ptr++) = 0; ++ ++ /* ++ * PC ++ * use return address (RA), i.e. the moment after return from resume() ++ */ ++ *(ptr++) = p->thread.reg31; + } + + void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) +diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h +index 408b715c95a5..9d81579f3d54 100644 +--- a/arch/sparc/include/asm/pgtable_64.h ++++ b/arch/sparc/include/asm/pgtable_64.h +@@ -668,26 +668,27 @@ static inline unsigned long pmd_pfn(pmd_t pmd) + return pte_pfn(pte); + } + +-#ifdef CONFIG_TRANSPARENT_HUGEPAGE +-static inline unsigned long pmd_dirty(pmd_t pmd) ++#define __HAVE_ARCH_PMD_WRITE ++static inline unsigned long pmd_write(pmd_t pmd) + { + pte_t pte = __pte(pmd_val(pmd)); + +- return pte_dirty(pte); ++ return pte_write(pte); + } + +-static inline unsigned long pmd_young(pmd_t pmd) ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE ++static inline unsigned long pmd_dirty(pmd_t pmd) + { + pte_t pte = __pte(pmd_val(pmd)); + +- return pte_young(pte); ++ return pte_dirty(pte); + } + +-static inline unsigned long pmd_write(pmd_t pmd) ++static inline unsigned long pmd_young(pmd_t pmd) + { + pte_t pte = __pte(pmd_val(pmd)); + +- return pte_write(pte); ++ return pte_young(pte); + } + + static inline unsigned long pmd_trans_huge(pmd_t pmd) +diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c +index 3d3414c14792..965655afdbb6 100644 +--- a/arch/sparc/mm/init_64.c ++++ b/arch/sparc/mm/init_64.c +@@ -1493,7 +1493,7 @@ bool kern_addr_valid(unsigned long addr) + if ((long)addr < 0L) { + unsigned long pa = __pa(addr); + +- if ((addr >> max_phys_bits) != 0UL) ++ if ((pa >> max_phys_bits) != 0UL) + return false; + + return pfn_valid(pa >> PAGE_SHIFT); +diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c +index 311bcf338f07..bfc587579dc3 100644 +--- a/arch/x86/kernel/ftrace.c ++++ b/arch/x86/kernel/ftrace.c +@@ -977,6 +977,18 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, + unsigned long return_hooker = (unsigned long) + &return_to_handler; + ++ /* ++ * When resuming from suspend-to-ram, this function can be indirectly ++ * called from early CPU startup code while the CPU is in real mode, ++ * which would fail miserably. Make sure the stack pointer is a ++ * virtual address. ++ * ++ * This check isn't as accurate as virt_addr_valid(), but it should be ++ * good enough for this purpose, and it's fast. ++ */ ++ if (unlikely((long)__builtin_frame_address(0) >= 0)) ++ return; ++ + if (unlikely(ftrace_graph_is_dead())) + return; + +diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c +index f1ba6a092854..8846257d8792 100644 +--- a/arch/x86/xen/time.c ++++ b/arch/x86/xen/time.c +@@ -343,11 +343,11 @@ static int xen_vcpuop_set_next_event(unsigned long delta, + WARN_ON(!clockevent_state_oneshot(evt)); + + single.timeout_abs_ns = get_abs_timeout(delta); +- single.flags = VCPU_SSHOTTMR_future; ++ /* Get an event anyway, even if the timeout is already expired */ ++ single.flags = 0; + + ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single); +- +- BUG_ON(ret != 0 && ret != -ETIME); ++ BUG_ON(ret != 0); + + return ret; + } +diff --git a/crypto/testmgr.c b/crypto/testmgr.c +index d4944318ca1f..5f15f45fcc9f 100644 +--- a/crypto/testmgr.c ++++ b/crypto/testmgr.c +@@ -488,6 +488,8 @@ static int __test_aead(struct crypto_aead *tfm, int enc, + aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + tcrypt_complete, &result); + ++ iv_len = crypto_aead_ivsize(tfm); ++ + for (i = 0, j = 0; i < tcount; i++) { + if (template[i].np) + continue; +@@ -508,7 +510,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc, + + memcpy(input, template[i].input, template[i].ilen); + memcpy(assoc, template[i].assoc, template[i].alen); +- iv_len = crypto_aead_ivsize(tfm); + if (template[i].iv) + memcpy(iv, template[i].iv, iv_len); + else +@@ -617,7 +618,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc, + j++; + + if (template[i].iv) +- memcpy(iv, template[i].iv, MAX_IVLEN); ++ memcpy(iv, template[i].iv, iv_len); + else + memset(iv, 0, MAX_IVLEN); + +diff --git a/drivers/clk/sunxi/clk-simple-gates.c b/drivers/clk/sunxi/clk-simple-gates.c +index 0214c6548afd..97cb4221de25 100644 +--- a/drivers/clk/sunxi/clk-simple-gates.c ++++ b/drivers/clk/sunxi/clk-simple-gates.c +@@ -98,6 +98,8 @@ static void __init sunxi_simple_gates_init(struct device_node *node) + sunxi_simple_gates_setup(node, NULL, 0); + } + ++CLK_OF_DECLARE(sun4i_a10_gates, "allwinner,sun4i-a10-gates-clk", ++ sunxi_simple_gates_init); + CLK_OF_DECLARE(sun4i_a10_apb0, "allwinner,sun4i-a10-apb0-gates-clk", + sunxi_simple_gates_init); + CLK_OF_DECLARE(sun4i_a10_apb1, "allwinner,sun4i-a10-apb1-gates-clk", +diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +index 7e9154c7f1db..d1c9525d81eb 100644 +--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c ++++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +@@ -2258,7 +2258,7 @@ static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, + if (pi->caps_stable_p_state) { + stable_p_state_sclk = (max_limits->sclk * 75) / 100; + +- for (i = table->count - 1; i >= 0; i++) { ++ for (i = table->count - 1; i >= 0; i--) { + if (stable_p_state_sclk >= table->entries[i].clk) { + stable_p_state_sclk = table->entries[i].clk; + break; +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h +index 25eab453f2b2..e7b96f1ac2c5 100644 +--- a/drivers/input/serio/i8042-x86ia64io.h ++++ b/drivers/input/serio/i8042-x86ia64io.h +@@ -685,6 +685,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "20046"), + }, + }, ++ { ++ /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"), ++ }, ++ }, + { } + }; + +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c +index 9be39988bf06..d81be5e471d0 100644 +--- a/drivers/md/raid1.c ++++ b/drivers/md/raid1.c +@@ -570,7 +570,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect + if (best_dist_disk < 0) { + if (is_badblock(rdev, this_sector, sectors, + &first_bad, &bad_sectors)) { +- if (first_bad < this_sector) ++ if (first_bad <= this_sector) + /* Cannot use this */ + continue; + best_good_sectors = first_bad - this_sector; +diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c +index 082ff5608455..317ef63ee789 100644 +--- a/drivers/media/tuners/tuner-xc2028.c ++++ b/drivers/media/tuners/tuner-xc2028.c +@@ -1407,8 +1407,10 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg) + memcpy(&priv->ctrl, p, sizeof(priv->ctrl)); + if (p->fname) { + priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL); +- if (priv->ctrl.fname == NULL) +- return -ENOMEM; ++ if (priv->ctrl.fname == NULL) { ++ rc = -ENOMEM; ++ goto unlock; ++ } + } + + /* +@@ -1440,6 +1442,7 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg) + } else + priv->state = XC2028_WAITING_FIRMWARE; + } ++unlock: + mutex_unlock(&priv->lock); + + return rc; +diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c +index 7445da218bd9..cc1725616f9d 100644 +--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c ++++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c +@@ -2823,7 +2823,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) + if (!g) { + netif_info(lio, tx_err, lio->netdev, + "Transmit scatter gather: glist null!\n"); +- goto lio_xmit_failed; ++ goto lio_xmit_dma_failed; + } + + cmdsetup.s.gather = 1; +@@ -2894,7 +2894,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) + else + status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more); + if (status == IQ_SEND_FAILED) +- goto lio_xmit_failed; ++ goto lio_xmit_dma_failed; + + netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); + +@@ -2908,12 +2908,13 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) + + return NETDEV_TX_OK; + ++lio_xmit_dma_failed: ++ dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr, ++ ndata.datasize, DMA_TO_DEVICE); + lio_xmit_failed: + stats->tx_dropped++; + netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", + iq_no, stats->tx_dropped); +- dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr, +- ndata.datasize, DMA_TO_DEVICE); + recv_buffer_free(skb); + return NETDEV_TX_OK; + } +diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c +index 06c8bfeaccd6..40cd86614677 100644 +--- a/drivers/net/macvlan.c ++++ b/drivers/net/macvlan.c +@@ -1110,6 +1110,7 @@ static int macvlan_port_create(struct net_device *dev) + static void macvlan_port_destroy(struct net_device *dev) + { + struct macvlan_port *port = macvlan_port_get_rtnl(dev); ++ struct sk_buff *skb; + + dev->priv_flags &= ~IFF_MACVLAN_PORT; + netdev_rx_handler_unregister(dev); +@@ -1118,7 +1119,15 @@ static void macvlan_port_destroy(struct net_device *dev) + * but we need to cancel it and purge left skbs if any. + */ + cancel_work_sync(&port->bc_work); +- __skb_queue_purge(&port->bc_queue); ++ ++ while ((skb = __skb_dequeue(&port->bc_queue))) { ++ const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src; ++ ++ if (src) ++ dev_put(src->dev); ++ ++ kfree_skb(skb); ++ } + + kfree_rcu(port, rcu); + } +diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c +index e6cefd0e3262..84b9cca152eb 100644 +--- a/drivers/net/phy/dp83640.c ++++ b/drivers/net/phy/dp83640.c +@@ -1436,8 +1436,6 @@ static bool dp83640_rxtstamp(struct phy_device *phydev, + skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT; + skb_queue_tail(&dp83640->rx_queue, skb); + schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT); +- } else { +- netif_rx_ni(skb); + } + + return true; +diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c +index bba0ca786aaa..851c0e121807 100644 +--- a/drivers/net/phy/phy.c ++++ b/drivers/net/phy/phy.c +@@ -538,7 +538,7 @@ void phy_stop_machine(struct phy_device *phydev) + cancel_delayed_work_sync(&phydev->state_queue); + + mutex_lock(&phydev->lock); +- if (phydev->state > PHY_UP) ++ if (phydev->state > PHY_UP && phydev->state != PHY_HALTED) + phydev->state = PHY_UP; + mutex_unlock(&phydev->lock); + } +diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c +index 88dbbeb8569b..f9b8c44677eb 100644 +--- a/drivers/regulator/core.c ++++ b/drivers/regulator/core.c +@@ -1519,6 +1519,7 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) + ret = regulator_enable(rdev->supply); + if (ret < 0) { + _regulator_put(rdev->supply); ++ rdev->supply = NULL; + return ret; + } + } +diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c +index 898a570319f1..af60cc3714c1 100644 +--- a/drivers/usb/gadget/function/f_midi.c ++++ b/drivers/usb/gadget/function/f_midi.c +@@ -361,7 +361,9 @@ static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt) + /* allocate a bunch of read buffers and queue them all at once. */ + for (i = 0; i < midi->qlen && err == 0; i++) { + struct usb_request *req = +- midi_alloc_ep_req(midi->out_ep, midi->buflen); ++ midi_alloc_ep_req(midi->out_ep, ++ max_t(unsigned, midi->buflen, ++ bulk_out_desc.wMaxPacketSize)); + if (req == NULL) + return -ENOMEM; + +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c +index 263002f0389d..7c23363ecf19 100644 +--- a/fs/ext4/xattr.c ++++ b/fs/ext4/xattr.c +@@ -233,6 +233,27 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh) + return error; + } + ++static int ++__xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header, ++ void *end, const char *function, unsigned int line) ++{ ++ struct ext4_xattr_entry *entry = IFIRST(header); ++ int error = -EFSCORRUPTED; ++ ++ if (((void *) header >= end) || ++ (header->h_magic != le32_to_cpu(EXT4_XATTR_MAGIC))) ++ goto errout; ++ error = ext4_xattr_check_names(entry, end, entry); ++errout: ++ if (error) ++ __ext4_error_inode(inode, function, line, 0, ++ "corrupted in-inode xattr"); ++ return error; ++} ++ ++#define xattr_check_inode(inode, header, end) \ ++ __xattr_check_inode((inode), (header), (end), __func__, __LINE__) ++ + static inline int + ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size) + { +@@ -344,7 +365,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name, + header = IHDR(inode, raw_inode); + entry = IFIRST(header); + end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; +- error = ext4_xattr_check_names(entry, end, entry); ++ error = xattr_check_inode(inode, header, end); + if (error) + goto cleanup; + error = ext4_xattr_find_entry(&entry, name_index, name, +@@ -475,7 +496,7 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size) + raw_inode = ext4_raw_inode(&iloc); + header = IHDR(inode, raw_inode); + end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; +- error = ext4_xattr_check_names(IFIRST(header), end, IFIRST(header)); ++ error = xattr_check_inode(inode, header, end); + if (error) + goto cleanup; + error = ext4_xattr_list_entries(dentry, IFIRST(header), +@@ -991,8 +1012,7 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i, + is->s.here = is->s.first; + is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; + if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { +- error = ext4_xattr_check_names(IFIRST(header), is->s.end, +- IFIRST(header)); ++ error = xattr_check_inode(inode, header, is->s.end); + if (error) + return error; + /* Find the named attribute. */ +@@ -1293,6 +1313,10 @@ retry: + last = entry; + total_ino = sizeof(struct ext4_xattr_ibody_header); + ++ error = xattr_check_inode(inode, header, end); ++ if (error) ++ goto cleanup; ++ + free = ext4_xattr_free_space(last, &min_offs, base, &total_ino); + if (free >= isize_diff) { + entry = IFIRST(header); +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c +index 3a65e0132352..16462e702f96 100644 +--- a/fs/f2fs/super.c ++++ b/fs/f2fs/super.c +@@ -918,6 +918,79 @@ static loff_t max_file_size(unsigned bits) + return result; + } + ++static inline bool sanity_check_area_boundary(struct super_block *sb, ++ struct f2fs_super_block *raw_super) ++{ ++ u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); ++ u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr); ++ u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr); ++ u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr); ++ u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr); ++ u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr); ++ u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt); ++ u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit); ++ u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat); ++ u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa); ++ u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main); ++ u32 segment_count = le32_to_cpu(raw_super->segment_count); ++ u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg); ++ ++ if (segment0_blkaddr != cp_blkaddr) { ++ f2fs_msg(sb, KERN_INFO, ++ "Mismatch start address, segment0(%u) cp_blkaddr(%u)", ++ segment0_blkaddr, cp_blkaddr); ++ return true; ++ } ++ ++ if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) != ++ sit_blkaddr) { ++ f2fs_msg(sb, KERN_INFO, ++ "Wrong CP boundary, start(%u) end(%u) blocks(%u)", ++ cp_blkaddr, sit_blkaddr, ++ segment_count_ckpt << log_blocks_per_seg); ++ return true; ++ } ++ ++ if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) != ++ nat_blkaddr) { ++ f2fs_msg(sb, KERN_INFO, ++ "Wrong SIT boundary, start(%u) end(%u) blocks(%u)", ++ sit_blkaddr, nat_blkaddr, ++ segment_count_sit << log_blocks_per_seg); ++ return true; ++ } ++ ++ if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) != ++ ssa_blkaddr) { ++ f2fs_msg(sb, KERN_INFO, ++ "Wrong NAT boundary, start(%u) end(%u) blocks(%u)", ++ nat_blkaddr, ssa_blkaddr, ++ segment_count_nat << log_blocks_per_seg); ++ return true; ++ } ++ ++ if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) != ++ main_blkaddr) { ++ f2fs_msg(sb, KERN_INFO, ++ "Wrong SSA boundary, start(%u) end(%u) blocks(%u)", ++ ssa_blkaddr, main_blkaddr, ++ segment_count_ssa << log_blocks_per_seg); ++ return true; ++ } ++ ++ if (main_blkaddr + (segment_count_main << log_blocks_per_seg) != ++ segment0_blkaddr + (segment_count << log_blocks_per_seg)) { ++ f2fs_msg(sb, KERN_INFO, ++ "Wrong MAIN_AREA boundary, start(%u) end(%u) blocks(%u)", ++ main_blkaddr, ++ segment0_blkaddr + (segment_count << log_blocks_per_seg), ++ segment_count_main << log_blocks_per_seg); ++ return true; ++ } ++ ++ return false; ++} ++ + static int sanity_check_raw_super(struct super_block *sb, + struct f2fs_super_block *raw_super) + { +@@ -947,6 +1020,14 @@ static int sanity_check_raw_super(struct super_block *sb, + return 1; + } + ++ /* check log blocks per segment */ ++ if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) { ++ f2fs_msg(sb, KERN_INFO, ++ "Invalid log blocks per segment (%u)\n", ++ le32_to_cpu(raw_super->log_blocks_per_seg)); ++ return 1; ++ } ++ + /* Currently, support 512/1024/2048/4096 bytes sector size */ + if (le32_to_cpu(raw_super->log_sectorsize) > + F2FS_MAX_LOG_SECTOR_SIZE || +@@ -965,6 +1046,23 @@ static int sanity_check_raw_super(struct super_block *sb, + le32_to_cpu(raw_super->log_sectorsize)); + return 1; + } ++ ++ /* check reserved ino info */ ++ if (le32_to_cpu(raw_super->node_ino) != 1 || ++ le32_to_cpu(raw_super->meta_ino) != 2 || ++ le32_to_cpu(raw_super->root_ino) != 3) { ++ f2fs_msg(sb, KERN_INFO, ++ "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)", ++ le32_to_cpu(raw_super->node_ino), ++ le32_to_cpu(raw_super->meta_ino), ++ le32_to_cpu(raw_super->root_ino)); ++ return 1; ++ } ++ ++ /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */ ++ if (sanity_check_area_boundary(sb, raw_super)) ++ return 1; ++ + return 0; + } + +diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c +index ad4e2377dd63..5be1fa6b676d 100644 +--- a/fs/nfsd/nfssvc.c ++++ b/fs/nfsd/nfssvc.c +@@ -656,6 +656,37 @@ static __be32 map_new_errors(u32 vers, __be32 nfserr) + return nfserr; + } + ++/* ++ * A write procedure can have a large argument, and a read procedure can ++ * have a large reply, but no NFSv2 or NFSv3 procedure has argument and ++ * reply that can both be larger than a page. The xdr code has taken ++ * advantage of this assumption to be a sloppy about bounds checking in ++ * some cases. Pending a rewrite of the NFSv2/v3 xdr code to fix that ++ * problem, we enforce these assumptions here: ++ */ ++static bool nfs_request_too_big(struct svc_rqst *rqstp, ++ struct svc_procedure *proc) ++{ ++ /* ++ * The ACL code has more careful bounds-checking and is not ++ * susceptible to this problem: ++ */ ++ if (rqstp->rq_prog != NFS_PROGRAM) ++ return false; ++ /* ++ * Ditto NFSv4 (which can in theory have argument and reply both ++ * more than a page): ++ */ ++ if (rqstp->rq_vers >= 4) ++ return false; ++ /* The reply will be small, we're OK: */ ++ if (proc->pc_xdrressize > 0 && ++ proc->pc_xdrressize < XDR_QUADLEN(PAGE_SIZE)) ++ return false; ++ ++ return rqstp->rq_arg.len > PAGE_SIZE; ++} ++ + int + nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp) + { +@@ -668,6 +699,11 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp) + rqstp->rq_vers, rqstp->rq_proc); + proc = rqstp->rq_procinfo; + ++ if (nfs_request_too_big(rqstp, proc)) { ++ dprintk("nfsd: NFSv%d argument too large\n", rqstp->rq_vers); ++ *statp = rpc_garbage_args; ++ return 1; ++ } + /* + * Give the xdr decoder a chance to change this if it wants + * (necessary in the NFSv4.0 compound case) +diff --git a/include/uapi/linux/ipv6_route.h b/include/uapi/linux/ipv6_route.h +index f6598d1c886e..316e838b7470 100644 +--- a/include/uapi/linux/ipv6_route.h ++++ b/include/uapi/linux/ipv6_route.h +@@ -34,7 +34,7 @@ + #define RTF_PREF(pref) ((pref) << 27) + #define RTF_PREF_MASK 0x18000000 + +-#define RTF_PCPU 0x40000000 ++#define RTF_PCPU 0x40000000 /* read-only: can not be set by user */ + #define RTF_LOCAL 0x80000000 + + +diff --git a/net/9p/client.c b/net/9p/client.c +index ea79ee9a7348..f5feac4ff4ec 100644 +--- a/net/9p/client.c ++++ b/net/9p/client.c +@@ -2101,6 +2101,10 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset) + trace_9p_protocol_dump(clnt, req->rc); + goto free_and_error; + } ++ if (rsize < count) { ++ pr_err("bogus RREADDIR count (%d > %d)\n", count, rsize); ++ count = rsize; ++ } + + p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count); + +diff --git a/net/core/neighbour.c b/net/core/neighbour.c +index 769cece9b00b..ae92131c4f89 100644 +--- a/net/core/neighbour.c ++++ b/net/core/neighbour.c +@@ -859,7 +859,8 @@ static void neigh_probe(struct neighbour *neigh) + if (skb) + skb = skb_clone(skb, GFP_ATOMIC); + write_unlock(&neigh->lock); +- neigh->ops->solicit(neigh, skb); ++ if (neigh->ops->solicit) ++ neigh->ops->solicit(neigh, skb); + atomic_inc(&neigh->probes); + kfree_skb(skb); + } +diff --git a/net/core/netpoll.c b/net/core/netpoll.c +index 94acfc89ad97..440aa9f6e0a8 100644 +--- a/net/core/netpoll.c ++++ b/net/core/netpoll.c +@@ -105,15 +105,21 @@ static void queue_process(struct work_struct *work) + while ((skb = skb_dequeue(&npinfo->txq))) { + struct net_device *dev = skb->dev; + struct netdev_queue *txq; ++ unsigned int q_index; + + if (!netif_device_present(dev) || !netif_running(dev)) { + kfree_skb(skb); + continue; + } + +- txq = skb_get_tx_queue(dev, skb); +- + local_irq_save(flags); ++ /* check if skb->queue_mapping is still valid */ ++ q_index = skb_get_queue_mapping(skb); ++ if (unlikely(q_index >= dev->real_num_tx_queues)) { ++ q_index = q_index % dev->real_num_tx_queues; ++ skb_set_queue_mapping(skb, q_index); ++ } ++ txq = netdev_get_tx_queue(dev, q_index); + HARD_TX_LOCK(dev, txq, smp_processor_id()); + if (netif_xmit_frozen_or_stopped(txq) || + netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) { +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index da4d68d78590..375248b900ba 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -2559,7 +2559,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) + skb_reset_network_header(skb); + + /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */ +- ip_hdr(skb)->protocol = IPPROTO_ICMP; ++ ip_hdr(skb)->protocol = IPPROTO_UDP; + skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr)); + + src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0; +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c +index 600dcda840d1..e1d51370977b 100644 +--- a/net/ipv4/tcp.c ++++ b/net/ipv4/tcp.c +@@ -2260,6 +2260,7 @@ int tcp_disconnect(struct sock *sk, int flags) + tcp_init_send_head(sk); + memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); + __sk_dst_reset(sk); ++ tcp_saved_syn_free(tp); + + WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); + +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c +index 6c6161763c2f..97cb02dc5f02 100644 +--- a/net/ipv6/ip6_tunnel.c ++++ b/net/ipv6/ip6_tunnel.c +@@ -1049,7 +1049,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, + struct ip6_tnl *t = netdev_priv(dev); + struct net *net = t->net; + struct net_device_stats *stats = &t->dev->stats; +- struct ipv6hdr *ipv6h = ipv6_hdr(skb); ++ struct ipv6hdr *ipv6h; + struct ipv6_tel_txoption opt; + struct dst_entry *dst = NULL, *ndst = NULL; + struct net_device *tdev; +@@ -1061,26 +1061,28 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, + + /* NBMA tunnel */ + if (ipv6_addr_any(&t->parms.raddr)) { +- struct in6_addr *addr6; +- struct neighbour *neigh; +- int addr_type; ++ if (skb->protocol == htons(ETH_P_IPV6)) { ++ struct in6_addr *addr6; ++ struct neighbour *neigh; ++ int addr_type; + +- if (!skb_dst(skb)) +- goto tx_err_link_failure; ++ if (!skb_dst(skb)) ++ goto tx_err_link_failure; + +- neigh = dst_neigh_lookup(skb_dst(skb), +- &ipv6_hdr(skb)->daddr); +- if (!neigh) +- goto tx_err_link_failure; ++ neigh = dst_neigh_lookup(skb_dst(skb), ++ &ipv6_hdr(skb)->daddr); ++ if (!neigh) ++ goto tx_err_link_failure; + +- addr6 = (struct in6_addr *)&neigh->primary_key; +- addr_type = ipv6_addr_type(addr6); ++ addr6 = (struct in6_addr *)&neigh->primary_key; ++ addr_type = ipv6_addr_type(addr6); + +- if (addr_type == IPV6_ADDR_ANY) +- addr6 = &ipv6_hdr(skb)->daddr; ++ if (addr_type == IPV6_ADDR_ANY) ++ addr6 = &ipv6_hdr(skb)->daddr; + +- memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); +- neigh_release(neigh); ++ memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); ++ neigh_release(neigh); ++ } + } else if (!(t->parms.flags & + (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) { + /* enable the cache only only if the routing decision does +diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c +index d9843e5a667f..8361d73ab653 100644 +--- a/net/ipv6/ip6mr.c ++++ b/net/ipv6/ip6mr.c +@@ -774,7 +774,8 @@ failure: + * Delete a VIF entry + */ + +-static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head) ++static int mif6_delete(struct mr6_table *mrt, int vifi, int notify, ++ struct list_head *head) + { + struct mif_device *v; + struct net_device *dev; +@@ -820,7 +821,7 @@ static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head) + dev->ifindex, &in6_dev->cnf); + } + +- if (v->flags & MIFF_REGISTER) ++ if ((v->flags & MIFF_REGISTER) && !notify) + unregister_netdevice_queue(dev, head); + + dev_put(dev); +@@ -1330,7 +1331,6 @@ static int ip6mr_device_event(struct notifier_block *this, + struct mr6_table *mrt; + struct mif_device *v; + int ct; +- LIST_HEAD(list); + + if (event != NETDEV_UNREGISTER) + return NOTIFY_DONE; +@@ -1339,10 +1339,9 @@ static int ip6mr_device_event(struct notifier_block *this, + v = &mrt->vif6_table[0]; + for (ct = 0; ct < mrt->maxvif; ct++, v++) { + if (v->dev == dev) +- mif6_delete(mrt, ct, &list); ++ mif6_delete(mrt, ct, 1, NULL); + } + } +- unregister_netdevice_many(&list); + + return NOTIFY_DONE; + } +@@ -1551,7 +1550,7 @@ static void mroute_clean_tables(struct mr6_table *mrt, bool all) + for (i = 0; i < mrt->maxvif; i++) { + if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC)) + continue; +- mif6_delete(mrt, i, &list); ++ mif6_delete(mrt, i, 0, &list); + } + unregister_netdevice_many(&list); + +@@ -1704,7 +1703,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns + if (copy_from_user(&mifi, optval, sizeof(mifi_t))) + return -EFAULT; + rtnl_lock(); +- ret = mif6_delete(mrt, mifi, NULL); ++ ret = mif6_delete(mrt, mifi, 0, NULL); + rtnl_unlock(); + return ret; + +diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c +index 8bca90d6d915..a625f69a28dd 100644 +--- a/net/ipv6/raw.c ++++ b/net/ipv6/raw.c +@@ -1144,8 +1144,7 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg) + spin_lock_bh(&sk->sk_receive_queue.lock); + skb = skb_peek(&sk->sk_receive_queue); + if (skb) +- amount = skb_tail_pointer(skb) - +- skb_transport_header(skb); ++ amount = skb->len; + spin_unlock_bh(&sk->sk_receive_queue.lock); + return put_user(amount, (int __user *)arg); + } +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index 9f0aa255e288..6c91d5c4a92c 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -1758,6 +1758,10 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg) + int addr_type; + int err = -EINVAL; + ++ /* RTF_PCPU is an internal flag; can not be set by userspace */ ++ if (cfg->fc_flags & RTF_PCPU) ++ goto out; ++ + if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128) + goto out; + #ifndef CONFIG_IPV6_SUBTREES +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c +index ec17cbe8a02b..d3dec414fd44 100644 +--- a/net/l2tp/l2tp_core.c ++++ b/net/l2tp/l2tp_core.c +@@ -278,7 +278,8 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn + } + EXPORT_SYMBOL_GPL(l2tp_session_find); + +-struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) ++struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth, ++ bool do_ref) + { + int hash; + struct l2tp_session *session; +@@ -288,6 +289,9 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) + for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { + hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) { + if (++count > nth) { ++ l2tp_session_inc_refcount(session); ++ if (do_ref && session->ref) ++ session->ref(session); + read_unlock_bh(&tunnel->hlist_lock); + return session; + } +@@ -298,7 +302,7 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) + + return NULL; + } +-EXPORT_SYMBOL_GPL(l2tp_session_find_nth); ++EXPORT_SYMBOL_GPL(l2tp_session_get_nth); + + /* Lookup a session by interface name. + * This is very inefficient but is only used by management interfaces. +diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h +index 763e8e241ce3..555d962a62d2 100644 +--- a/net/l2tp/l2tp_core.h ++++ b/net/l2tp/l2tp_core.h +@@ -243,7 +243,8 @@ out: + struct l2tp_session *l2tp_session_find(struct net *net, + struct l2tp_tunnel *tunnel, + u32 session_id); +-struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth); ++struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth, ++ bool do_ref); + struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname); + struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id); + struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); +diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c +index 2d6760a2ae34..d100aed3d06f 100644 +--- a/net/l2tp/l2tp_debugfs.c ++++ b/net/l2tp/l2tp_debugfs.c +@@ -53,7 +53,7 @@ static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd) + + static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd) + { +- pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); ++ pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true); + pd->session_idx++; + + if (pd->session == NULL) { +@@ -238,10 +238,14 @@ static int l2tp_dfs_seq_show(struct seq_file *m, void *v) + } + + /* Show the tunnel or session context */ +- if (pd->session == NULL) ++ if (!pd->session) { + l2tp_dfs_seq_tunnel_show(m, pd->tunnel); +- else ++ } else { + l2tp_dfs_seq_session_show(m, pd->session); ++ if (pd->session->deref) ++ pd->session->deref(pd->session); ++ l2tp_session_dec_refcount(pd->session); ++ } + + out: + return 0; +diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c +index 2caaa84ce92d..665cc74df5c5 100644 +--- a/net/l2tp/l2tp_netlink.c ++++ b/net/l2tp/l2tp_netlink.c +@@ -827,7 +827,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback + goto out; + } + +- session = l2tp_session_find_nth(tunnel, si); ++ session = l2tp_session_get_nth(tunnel, si, false); + if (session == NULL) { + ti++; + tunnel = NULL; +@@ -837,8 +837,11 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback + + if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, NLM_F_MULTI, +- session, L2TP_CMD_SESSION_GET) < 0) ++ session, L2TP_CMD_SESSION_GET) < 0) { ++ l2tp_session_dec_refcount(session); + break; ++ } ++ l2tp_session_dec_refcount(session); + + si++; + } +diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c +index 1ad18c55064c..8ab9c5d74416 100644 +--- a/net/l2tp/l2tp_ppp.c ++++ b/net/l2tp/l2tp_ppp.c +@@ -467,6 +467,10 @@ static void pppol2tp_session_close(struct l2tp_session *session) + static void pppol2tp_session_destruct(struct sock *sk) + { + struct l2tp_session *session = sk->sk_user_data; ++ ++ skb_queue_purge(&sk->sk_receive_queue); ++ skb_queue_purge(&sk->sk_write_queue); ++ + if (session) { + sk->sk_user_data = NULL; + BUG_ON(session->magic != L2TP_SESSION_MAGIC); +@@ -505,9 +509,6 @@ static int pppol2tp_release(struct socket *sock) + l2tp_session_queue_purge(session); + sock_put(sk); + } +- skb_queue_purge(&sk->sk_receive_queue); +- skb_queue_purge(&sk->sk_write_queue); +- + release_sock(sk); + + /* This will delete the session context via +@@ -1574,7 +1575,7 @@ static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd) + + static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd) + { +- pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); ++ pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true); + pd->session_idx++; + + if (pd->session == NULL) { +@@ -1701,10 +1702,14 @@ static int pppol2tp_seq_show(struct seq_file *m, void *v) + + /* Show the tunnel or session context. + */ +- if (pd->session == NULL) ++ if (!pd->session) { + pppol2tp_seq_tunnel_show(m, pd->tunnel); +- else ++ } else { + pppol2tp_seq_session_show(m, pd->session); ++ if (pd->session->deref) ++ pd->session->deref(pd->session); ++ l2tp_session_dec_refcount(pd->session); ++ } + + out: + return 0; +@@ -1863,4 +1868,4 @@ MODULE_DESCRIPTION("PPP over L2TP over UDP"); + MODULE_LICENSE("GPL"); + MODULE_VERSION(PPPOL2TP_DRV_VERSION); + MODULE_ALIAS("pppox-proto-" __stringify(PX_PROTO_OL2TP)); +-MODULE_ALIAS_L2TP_PWTYPE(11); ++MODULE_ALIAS_L2TP_PWTYPE(7); +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c +index d76800108ddb..f8d6a0ca9c03 100644 +--- a/net/packet/af_packet.c ++++ b/net/packet/af_packet.c +@@ -3626,6 +3626,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv + return -EBUSY; + if (copy_from_user(&val, optval, sizeof(val))) + return -EFAULT; ++ if (val > INT_MAX) ++ return -EINVAL; + po->tp_reserve = val; + return 0; + } +@@ -4150,6 +4152,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, + rb->frames_per_block = req->tp_block_size / req->tp_frame_size; + if (unlikely(rb->frames_per_block == 0)) + goto out; ++ if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr)) ++ goto out; + if (unlikely((rb->frames_per_block * req->tp_block_nr) != + req->tp_frame_nr)) + goto out; +diff --git a/net/rds/cong.c b/net/rds/cong.c +index e6144b8246fd..6641bcf7c185 100644 +--- a/net/rds/cong.c ++++ b/net/rds/cong.c +@@ -299,7 +299,7 @@ void rds_cong_set_bit(struct rds_cong_map *map, __be16 port) + i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; + off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; + +- __set_bit_le(off, (void *)map->m_page_addrs[i]); ++ set_bit_le(off, (void *)map->m_page_addrs[i]); + } + + void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port) +@@ -313,7 +313,7 @@ void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port) + i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; + off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; + +- __clear_bit_le(off, (void *)map->m_page_addrs[i]); ++ clear_bit_le(off, (void *)map->m_page_addrs[i]); + } + + static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port) +diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c +index e384d6aefa3a..1090a52c03cd 100644 +--- a/net/sched/act_mirred.c ++++ b/net/sched/act_mirred.c +@@ -36,14 +36,15 @@ static DEFINE_SPINLOCK(mirred_list_lock); + static void tcf_mirred_release(struct tc_action *a, int bind) + { + struct tcf_mirred *m = to_mirred(a); +- struct net_device *dev = rcu_dereference_protected(m->tcfm_dev, 1); ++ struct net_device *dev; + + /* We could be called either in a RCU callback or with RTNL lock held. */ + spin_lock_bh(&mirred_list_lock); + list_del(&m->tcfm_list); +- spin_unlock_bh(&mirred_list_lock); ++ dev = rcu_dereference_protected(m->tcfm_dev, 1); + if (dev) + dev_put(dev); ++ spin_unlock_bh(&mirred_list_lock); + } + + static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = { +diff --git a/net/sctp/socket.c b/net/sctp/socket.c +index 5758818435f3..c96d666cef29 100644 +--- a/net/sctp/socket.c ++++ b/net/sctp/socket.c +@@ -6394,6 +6394,9 @@ int sctp_inet_listen(struct socket *sock, int backlog) + if (sock->state != SS_UNCONNECTED) + goto out; + ++ if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED)) ++ goto out; ++ + /* If backlog is zero, disable listening. */ + if (!backlog) { + if (sctp_sstate(sk, CLOSED)) +diff --git a/sound/core/seq/seq_lock.c b/sound/core/seq/seq_lock.c +index 3b693e924db7..12ba83367b1b 100644 +--- a/sound/core/seq/seq_lock.c ++++ b/sound/core/seq/seq_lock.c +@@ -28,19 +28,16 @@ + /* wait until all locks are released */ + void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line) + { +- int max_count = 5 * HZ; ++ int warn_count = 5 * HZ; + + if (atomic_read(lockp) < 0) { + pr_warn("ALSA: seq_lock: lock trouble [counter = %d] in %s:%d\n", atomic_read(lockp), file, line); + return; + } + while (atomic_read(lockp) > 0) { +- if (max_count == 0) { +- pr_warn("ALSA: seq_lock: timeout [%d left] in %s:%d\n", atomic_read(lockp), file, line); +- break; +- } ++ if (warn_count-- == 0) ++ pr_warn("ALSA: seq_lock: waiting [%d left] in %s:%d\n", atomic_read(lockp), file, line); + schedule_timeout_uninterruptible(1); +- max_count--; + } + } + +diff --git a/sound/firewire/lib.h b/sound/firewire/lib.h +index f3f6f84c48d6..bb5f8cdea3e2 100644 +--- a/sound/firewire/lib.h ++++ b/sound/firewire/lib.h +@@ -42,7 +42,7 @@ struct snd_fw_async_midi_port { + + struct snd_rawmidi_substream *substream; + snd_fw_async_midi_port_fill fill; +- unsigned int consume_bytes; ++ int consume_bytes; + }; + + int snd_fw_async_midi_port_init(struct snd_fw_async_midi_port *port, diff --git a/patch/kernel/mvebu64-default/03-patch-4.4.66-67.patch b/patch/kernel/mvebu64-default/03-patch-4.4.66-67.patch new file mode 100644 index 000000000..aafd894f6 --- /dev/null +++ b/patch/kernel/mvebu64-default/03-patch-4.4.66-67.patch @@ -0,0 +1,948 @@ +diff --git a/Makefile b/Makefile +index 1cd052823c03..c987902ae1ee 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 66 ++SUBLEVEL = 67 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c +index 9462d2752850..8bdc34dbaedf 100644 +--- a/drivers/block/drbd/drbd_bitmap.c ++++ b/drivers/block/drbd/drbd_bitmap.c +@@ -479,8 +479,14 @@ void drbd_bm_cleanup(struct drbd_device *device) + * this masks out the remaining bits. + * Returns the number of bits cleared. + */ ++#ifndef BITS_PER_PAGE + #define BITS_PER_PAGE (1UL << (PAGE_SHIFT + 3)) + #define BITS_PER_PAGE_MASK (BITS_PER_PAGE - 1) ++#else ++# if BITS_PER_PAGE != (1UL << (PAGE_SHIFT + 3)) ++# error "ambiguous BITS_PER_PAGE" ++# endif ++#endif + #define BITS_PER_LONG_MASK (BITS_PER_LONG - 1) + static int bm_clear_surplus(struct drbd_bitmap *b) + { +diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c +index 3eff35c2d453..2684605fe67f 100644 +--- a/drivers/infiniband/hw/qib/qib_qp.c ++++ b/drivers/infiniband/hw/qib/qib_qp.c +@@ -41,13 +41,13 @@ + + #include "qib.h" + +-#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE) +-#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) ++#define RVT_BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE) ++#define RVT_BITS_PER_PAGE_MASK (RVT_BITS_PER_PAGE-1) + + static inline unsigned mk_qpn(struct qib_qpn_table *qpt, + struct qpn_map *map, unsigned off) + { +- return (map - qpt->map) * BITS_PER_PAGE + off; ++ return (map - qpt->map) * RVT_BITS_PER_PAGE + off; + } + + static inline unsigned find_next_offset(struct qib_qpn_table *qpt, +@@ -59,7 +59,7 @@ static inline unsigned find_next_offset(struct qib_qpn_table *qpt, + if (((off & qpt->mask) >> 1) >= n) + off = (off | qpt->mask) + 2; + } else +- off = find_next_zero_bit(map->page, BITS_PER_PAGE, off); ++ off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off); + return off; + } + +@@ -147,8 +147,8 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt, + qpn = 2; + if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues) + qpn = (qpn | qpt->mask) + 2; +- offset = qpn & BITS_PER_PAGE_MASK; +- map = &qpt->map[qpn / BITS_PER_PAGE]; ++ offset = qpn & RVT_BITS_PER_PAGE_MASK; ++ map = &qpt->map[qpn / RVT_BITS_PER_PAGE]; + max_scan = qpt->nmaps - !offset; + for (i = 0;;) { + if (unlikely(!map->page)) { +@@ -173,7 +173,7 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt, + * We just need to be sure we don't loop + * forever. + */ +- } while (offset < BITS_PER_PAGE && qpn < QPN_MAX); ++ } while (offset < RVT_BITS_PER_PAGE && qpn < QPN_MAX); + /* + * In order to keep the number of pages allocated to a + * minimum, we scan the all existing pages before increasing +@@ -204,9 +204,9 @@ static void free_qpn(struct qib_qpn_table *qpt, u32 qpn) + { + struct qpn_map *map; + +- map = qpt->map + qpn / BITS_PER_PAGE; ++ map = qpt->map + qpn / RVT_BITS_PER_PAGE; + if (map->page) +- clear_bit(qpn & BITS_PER_PAGE_MASK, map->page); ++ clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page); + } + + static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn) +diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c +index 80a439543259..e503279c34fc 100644 +--- a/drivers/md/dm-ioctl.c ++++ b/drivers/md/dm-ioctl.c +@@ -1843,7 +1843,7 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user) + if (r) + goto out; + +- param->data_size = sizeof(*param); ++ param->data_size = offsetof(struct dm_ioctl, data); + r = fn(param, input_param_size); + + if (unlikely(param->flags & DM_BUFFER_FULL_FLAG) && +diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig +index 54479c481a7a..8a25adced79f 100644 +--- a/drivers/mtd/chips/Kconfig ++++ b/drivers/mtd/chips/Kconfig +@@ -111,6 +111,7 @@ config MTD_MAP_BANK_WIDTH_16 + + config MTD_MAP_BANK_WIDTH_32 + bool "Support 256-bit buswidth" if MTD_CFI_GEOMETRY ++ select MTD_COMPLEX_MAPPINGS if HAS_IOMEM + default n + help + If you wish to support CFI devices on a physical bus which is +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c +index 49056c33be74..21e5b9ed1ead 100644 +--- a/drivers/net/ethernet/broadcom/tg3.c ++++ b/drivers/net/ethernet/broadcom/tg3.c +@@ -12031,7 +12031,7 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + int ret; + u32 offset, len, b_offset, odd_len; + u8 *buf; +- __be32 start, end; ++ __be32 start = 0, end; + + if (tg3_flag(tp, NO_NVRAM) || + eeprom->magic != TG3_EEPROM_MAGIC) +diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c +index 2882bcac918a..0b096730c72a 100644 +--- a/drivers/scsi/cxlflash/main.c ++++ b/drivers/scsi/cxlflash/main.c +@@ -996,6 +996,8 @@ static int wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) + do { + msleep(delay_us / 1000); + status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); ++ if (status == U64_MAX) ++ nretry /= 2; + } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE && + nretry--); + +@@ -1027,6 +1029,8 @@ static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) + do { + msleep(delay_us / 1000); + status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); ++ if (status == U64_MAX) ++ nretry /= 2; + } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE && + nretry--); + +@@ -1137,7 +1141,7 @@ static const struct asyc_intr_info ainfo[] = { + {SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR}, + {SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST}, + {SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0}, +- {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, SCAN_HOST}, ++ {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, 0}, + {SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET}, + {SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0}, + {SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET}, +@@ -1145,7 +1149,7 @@ static const struct asyc_intr_info ainfo[] = { + {SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR}, + {SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST}, + {SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0}, +- {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, SCAN_HOST}, ++ {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, 0}, + {0x0, "", 0, 0} /* terminator */ + }; + +@@ -1962,6 +1966,11 @@ retry: + * cxlflash_eh_host_reset_handler() - reset the host adapter + * @scp: SCSI command from stack identifying host. + * ++ * Following a reset, the state is evaluated again in case an EEH occurred ++ * during the reset. In such a scenario, the host reset will either yield ++ * until the EEH recovery is complete or return success or failure based ++ * upon the current device state. ++ * + * Return: + * SUCCESS as defined in scsi/scsi.h + * FAILED as defined in scsi/scsi.h +@@ -1993,7 +2002,8 @@ static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp) + } else + cfg->state = STATE_NORMAL; + wake_up_all(&cfg->reset_waitq); +- break; ++ ssleep(1); ++ /* fall through */ + case STATE_RESET: + wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); + if (cfg->state == STATE_NORMAL) +@@ -2534,6 +2544,9 @@ static void drain_ioctls(struct cxlflash_cfg *cfg) + * @pdev: PCI device struct. + * @state: PCI channel state. + * ++ * When an EEH occurs during an active reset, wait until the reset is ++ * complete and then take action based upon the device state. ++ * + * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT + */ + static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev, +@@ -2547,6 +2560,10 @@ static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev, + + switch (state) { + case pci_channel_io_frozen: ++ wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); ++ if (cfg->state == STATE_FAILTERM) ++ return PCI_ERS_RESULT_DISCONNECT; ++ + cfg->state = STATE_RESET; + scsi_block_requests(cfg->host); + drain_ioctls(cfg); +diff --git a/drivers/staging/rdma/ehca/ehca_mrmw.c b/drivers/staging/rdma/ehca/ehca_mrmw.c +index f914b30999f8..4d52ca42644a 100644 +--- a/drivers/staging/rdma/ehca/ehca_mrmw.c ++++ b/drivers/staging/rdma/ehca/ehca_mrmw.c +@@ -1921,7 +1921,7 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo, + u64 *kpage) + { + int ret = 0; +- u64 pgaddr, prev_pgaddr; ++ u64 pgaddr, prev_pgaddr = 0; + u32 j = 0; + int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE; + int nr_kpages = kpages_per_hwpage; +@@ -2417,6 +2417,7 @@ static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca, + ehca_err(&shca->ib_device, "kpage alloc failed"); + return -ENOMEM; + } ++ hret = H_SUCCESS; + for (top = 0; top < EHCA_MAP_ENTRIES; top++) { + if (!ehca_bmap_valid(ehca_bmap->top[top])) + continue; +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c +index 83ff1724ec79..cf3da51a3536 100644 +--- a/drivers/tty/serial/8250/8250_pci.c ++++ b/drivers/tty/serial/8250/8250_pci.c +@@ -5850,17 +5850,15 @@ static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev) + static void serial8250_io_resume(struct pci_dev *dev) + { + struct serial_private *priv = pci_get_drvdata(dev); +- const struct pciserial_board *board; ++ struct serial_private *new; + + if (!priv) + return; + +- board = priv->board; +- kfree(priv); +- priv = pciserial_init_ports(dev, board); +- +- if (!IS_ERR(priv)) { +- pci_set_drvdata(dev, priv); ++ new = pciserial_init_ports(dev, priv->board); ++ if (!IS_ERR(new)) { ++ pci_set_drvdata(dev, new); ++ kfree(priv); + } + } + +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h +index 94906aaa9b7c..e2f6a79e9b01 100644 +--- a/fs/cifs/cifsglob.h ++++ b/fs/cifs/cifsglob.h +@@ -227,6 +227,7 @@ struct smb_version_operations { + /* verify the message */ + int (*check_message)(char *, unsigned int); + bool (*is_oplock_break)(char *, struct TCP_Server_Info *); ++ int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *); + void (*downgrade_oplock)(struct TCP_Server_Info *, + struct cifsInodeInfo *, bool); + /* process transaction2 response */ +@@ -1289,12 +1290,19 @@ struct mid_q_entry { + void *callback_data; /* general purpose pointer for callback */ + void *resp_buf; /* pointer to received SMB header */ + int mid_state; /* wish this were enum but can not pass to wait_event */ ++ unsigned int mid_flags; + __le16 command; /* smb command code */ + bool large_buf:1; /* if valid response, is pointer to large buf */ + bool multiRsp:1; /* multiple trans2 responses for one request */ + bool multiEnd:1; /* both received */ + }; + ++struct close_cancelled_open { ++ struct cifs_fid fid; ++ struct cifs_tcon *tcon; ++ struct work_struct work; ++}; ++ + /* Make code in transport.c a little cleaner by moving + update of optional stats into function below */ + #ifdef CONFIG_CIFS_STATS2 +@@ -1426,6 +1434,9 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param, + #define MID_RESPONSE_MALFORMED 0x10 + #define MID_SHUTDOWN 0x20 + ++/* Flags */ ++#define MID_WAIT_CANCELLED 1 /* Cancelled while waiting for response */ ++ + /* Types of response buffer returned from SendReceive2 */ + #define CIFS_NO_BUFFER 0 /* Response buffer not returned */ + #define CIFS_SMALL_BUFFER 1 +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c +index b1104ed8f54c..5e2f8b8ca08a 100644 +--- a/fs/cifs/cifssmb.c ++++ b/fs/cifs/cifssmb.c +@@ -1424,6 +1424,8 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) + + length = discard_remaining_data(server); + dequeue_mid(mid, rdata->result); ++ mid->resp_buf = server->smallbuf; ++ server->smallbuf = NULL; + return length; + } + +@@ -1538,6 +1540,8 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) + return cifs_readv_discard(server, mid); + + dequeue_mid(mid, false); ++ mid->resp_buf = server->smallbuf; ++ server->smallbuf = NULL; + return length; + } + +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c +index 5d59f25521ce..156bc18eac69 100644 +--- a/fs/cifs/connect.c ++++ b/fs/cifs/connect.c +@@ -924,10 +924,19 @@ cifs_demultiplex_thread(void *p) + + server->lstrp = jiffies; + if (mid_entry != NULL) { ++ if ((mid_entry->mid_flags & MID_WAIT_CANCELLED) && ++ mid_entry->mid_state == MID_RESPONSE_RECEIVED && ++ server->ops->handle_cancelled_mid) ++ server->ops->handle_cancelled_mid( ++ mid_entry->resp_buf, ++ server); ++ + if (!mid_entry->multiRsp || mid_entry->multiEnd) + mid_entry->callback(mid_entry); +- } else if (!server->ops->is_oplock_break || +- !server->ops->is_oplock_break(buf, server)) { ++ } else if (server->ops->is_oplock_break && ++ server->ops->is_oplock_break(buf, server)) { ++ cifs_dbg(FYI, "Received oplock break\n"); ++ } else { + cifs_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n", + atomic_read(&midCount)); + cifs_dump_mem("Received Data is: ", buf, +diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c +index e5bc85e49be7..76ccf20fbfb7 100644 +--- a/fs/cifs/smb2misc.c ++++ b/fs/cifs/smb2misc.c +@@ -630,3 +630,47 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) + cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n"); + return false; + } ++ ++void ++smb2_cancelled_close_fid(struct work_struct *work) ++{ ++ struct close_cancelled_open *cancelled = container_of(work, ++ struct close_cancelled_open, work); ++ ++ cifs_dbg(VFS, "Close unmatched open\n"); ++ ++ SMB2_close(0, cancelled->tcon, cancelled->fid.persistent_fid, ++ cancelled->fid.volatile_fid); ++ cifs_put_tcon(cancelled->tcon); ++ kfree(cancelled); ++} ++ ++int ++smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server) ++{ ++ struct smb2_hdr *hdr = (struct smb2_hdr *)buffer; ++ struct smb2_create_rsp *rsp = (struct smb2_create_rsp *)buffer; ++ struct cifs_tcon *tcon; ++ struct close_cancelled_open *cancelled; ++ ++ if (hdr->Command != SMB2_CREATE || hdr->Status != STATUS_SUCCESS) ++ return 0; ++ ++ cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL); ++ if (!cancelled) ++ return -ENOMEM; ++ ++ tcon = smb2_find_smb_tcon(server, hdr->SessionId, hdr->TreeId); ++ if (!tcon) { ++ kfree(cancelled); ++ return -ENOENT; ++ } ++ ++ cancelled->fid.persistent_fid = rsp->PersistentFileId; ++ cancelled->fid.volatile_fid = rsp->VolatileFileId; ++ cancelled->tcon = tcon; ++ INIT_WORK(&cancelled->work, smb2_cancelled_close_fid); ++ queue_work(cifsiod_wq, &cancelled->work); ++ ++ return 0; ++} +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c +index be34b4860675..087918c4612a 100644 +--- a/fs/cifs/smb2ops.c ++++ b/fs/cifs/smb2ops.c +@@ -1511,6 +1511,7 @@ struct smb_version_operations smb20_operations = { + .clear_stats = smb2_clear_stats, + .print_stats = smb2_print_stats, + .is_oplock_break = smb2_is_valid_oplock_break, ++ .handle_cancelled_mid = smb2_handle_cancelled_mid, + .downgrade_oplock = smb2_downgrade_oplock, + .need_neg = smb2_need_neg, + .negotiate = smb2_negotiate, +@@ -1589,6 +1590,7 @@ struct smb_version_operations smb21_operations = { + .clear_stats = smb2_clear_stats, + .print_stats = smb2_print_stats, + .is_oplock_break = smb2_is_valid_oplock_break, ++ .handle_cancelled_mid = smb2_handle_cancelled_mid, + .downgrade_oplock = smb2_downgrade_oplock, + .need_neg = smb2_need_neg, + .negotiate = smb2_negotiate, +@@ -1670,6 +1672,7 @@ struct smb_version_operations smb30_operations = { + .print_stats = smb2_print_stats, + .dump_share_caps = smb2_dump_share_caps, + .is_oplock_break = smb2_is_valid_oplock_break, ++ .handle_cancelled_mid = smb2_handle_cancelled_mid, + .downgrade_oplock = smb2_downgrade_oplock, + .need_neg = smb2_need_neg, + .negotiate = smb2_negotiate, +@@ -1757,6 +1760,7 @@ struct smb_version_operations smb311_operations = { + .print_stats = smb2_print_stats, + .dump_share_caps = smb2_dump_share_caps, + .is_oplock_break = smb2_is_valid_oplock_break, ++ .handle_cancelled_mid = smb2_handle_cancelled_mid, + .downgrade_oplock = smb2_downgrade_oplock, + .need_neg = smb2_need_neg, + .negotiate = smb2_negotiate, +diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h +index 0a406ae78129..adc5234486c3 100644 +--- a/fs/cifs/smb2proto.h ++++ b/fs/cifs/smb2proto.h +@@ -47,6 +47,10 @@ extern struct mid_q_entry *smb2_setup_request(struct cifs_ses *ses, + struct smb_rqst *rqst); + extern struct mid_q_entry *smb2_setup_async_request( + struct TCP_Server_Info *server, struct smb_rqst *rqst); ++extern struct cifs_ses *smb2_find_smb_ses(struct TCP_Server_Info *server, ++ __u64 ses_id); ++extern struct cifs_tcon *smb2_find_smb_tcon(struct TCP_Server_Info *server, ++ __u64 ses_id, __u32 tid); + extern int smb2_calc_signature(struct smb_rqst *rqst, + struct TCP_Server_Info *server); + extern int smb3_calc_signature(struct smb_rqst *rqst, +@@ -157,6 +161,9 @@ extern int SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon, + extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon, + const u64 persistent_fid, const u64 volatile_fid, + const __u8 oplock_level); ++extern int smb2_handle_cancelled_mid(char *buffer, ++ struct TCP_Server_Info *server); ++void smb2_cancelled_close_fid(struct work_struct *work); + extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon, + u64 persistent_file_id, u64 volatile_file_id, + struct kstatfs *FSData); +diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c +index d4c5b6f109a7..69e3b322bbfe 100644 +--- a/fs/cifs/smb2transport.c ++++ b/fs/cifs/smb2transport.c +@@ -115,22 +115,68 @@ smb3_crypto_shash_allocate(struct TCP_Server_Info *server) + } + + static struct cifs_ses * +-smb2_find_smb_ses(struct smb2_hdr *smb2hdr, struct TCP_Server_Info *server) ++smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id) + { + struct cifs_ses *ses; + +- spin_lock(&cifs_tcp_ses_lock); + list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { +- if (ses->Suid != smb2hdr->SessionId) ++ if (ses->Suid != ses_id) + continue; +- spin_unlock(&cifs_tcp_ses_lock); + return ses; + } ++ ++ return NULL; ++} ++ ++struct cifs_ses * ++smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id) ++{ ++ struct cifs_ses *ses; ++ ++ spin_lock(&cifs_tcp_ses_lock); ++ ses = smb2_find_smb_ses_unlocked(server, ses_id); + spin_unlock(&cifs_tcp_ses_lock); + ++ return ses; ++} ++ ++static struct cifs_tcon * ++smb2_find_smb_sess_tcon_unlocked(struct cifs_ses *ses, __u32 tid) ++{ ++ struct cifs_tcon *tcon; ++ ++ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { ++ if (tcon->tid != tid) ++ continue; ++ ++tcon->tc_count; ++ return tcon; ++ } ++ + return NULL; + } + ++/* ++ * Obtain tcon corresponding to the tid in the given ++ * cifs_ses ++ */ ++ ++struct cifs_tcon * ++smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32 tid) ++{ ++ struct cifs_ses *ses; ++ struct cifs_tcon *tcon; ++ ++ spin_lock(&cifs_tcp_ses_lock); ++ ses = smb2_find_smb_ses_unlocked(server, ses_id); ++ if (!ses) { ++ spin_unlock(&cifs_tcp_ses_lock); ++ return NULL; ++ } ++ tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid); ++ spin_unlock(&cifs_tcp_ses_lock); ++ ++ return tcon; ++} + + int + smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) +@@ -143,7 +189,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) + struct smb2_hdr *smb2_pdu = (struct smb2_hdr *)iov[0].iov_base; + struct cifs_ses *ses; + +- ses = smb2_find_smb_ses(smb2_pdu, server); ++ ses = smb2_find_smb_ses(server, smb2_pdu->SessionId); + if (!ses) { + cifs_dbg(VFS, "%s: Could not find session\n", __func__); + return 0; +@@ -314,7 +360,7 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) + struct smb2_hdr *smb2_pdu = (struct smb2_hdr *)iov[0].iov_base; + struct cifs_ses *ses; + +- ses = smb2_find_smb_ses(smb2_pdu, server); ++ ses = smb2_find_smb_ses(server, smb2_pdu->SessionId); + if (!ses) { + cifs_dbg(VFS, "%s: Could not find session\n", __func__); + return 0; +diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c +index 87abe8ed074c..54af10204e83 100644 +--- a/fs/cifs/transport.c ++++ b/fs/cifs/transport.c +@@ -786,9 +786,11 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, + + rc = wait_for_response(ses->server, midQ); + if (rc != 0) { ++ cifs_dbg(FYI, "Cancelling wait for mid %llu\n", midQ->mid); + send_cancel(ses->server, buf, midQ); + spin_lock(&GlobalMid_Lock); + if (midQ->mid_state == MID_REQUEST_SUBMITTED) { ++ midQ->mid_flags |= MID_WAIT_CANCELLED; + midQ->callback = DeleteMidQEntry; + spin_unlock(&GlobalMid_Lock); + cifs_small_buf_release(buf); +diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c +index 1a0835073663..9d6c2dcf1bd0 100644 +--- a/fs/ext4/crypto.c ++++ b/fs/ext4/crypto.c +@@ -34,6 +34,7 @@ + #include + #include + #include ++#include + + #include "ext4_extents.h" + #include "xattr.h" +@@ -469,3 +470,61 @@ uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size) + return size; + return 0; + } ++ ++/* ++ * Validate dentries for encrypted directories to make sure we aren't ++ * potentially caching stale data after a key has been added or ++ * removed. ++ */ ++static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags) ++{ ++ struct dentry *dir; ++ struct ext4_crypt_info *ci; ++ int dir_has_key, cached_with_key; ++ ++ if (flags & LOOKUP_RCU) ++ return -ECHILD; ++ ++ dir = dget_parent(dentry); ++ if (!ext4_encrypted_inode(d_inode(dir))) { ++ dput(dir); ++ return 0; ++ } ++ ci = EXT4_I(d_inode(dir))->i_crypt_info; ++ ++ /* this should eventually be an flag in d_flags */ ++ cached_with_key = dentry->d_fsdata != NULL; ++ dir_has_key = (ci != NULL); ++ dput(dir); ++ ++ /* ++ * If the dentry was cached without the key, and it is a ++ * negative dentry, it might be a valid name. We can't check ++ * if the key has since been made available due to locking ++ * reasons, so we fail the validation so ext4_lookup() can do ++ * this check. ++ * ++ * We also fail the validation if the dentry was created with ++ * the key present, but we no longer have the key, or vice versa. ++ */ ++ if ((!cached_with_key && d_is_negative(dentry)) || ++ (!cached_with_key && dir_has_key) || ++ (cached_with_key && !dir_has_key)) { ++#if 0 /* Revalidation debug */ ++ char buf[80]; ++ char *cp = simple_dname(dentry, buf, sizeof(buf)); ++ ++ if (IS_ERR(cp)) ++ cp = (char *) "???"; ++ pr_err("revalidate: %s %p %d %d %d\n", cp, dentry->d_fsdata, ++ cached_with_key, d_is_negative(dentry), ++ dir_has_key); ++#endif ++ return 0; ++ } ++ return 1; ++} ++ ++const struct dentry_operations ext4_encrypted_d_ops = { ++ .d_revalidate = ext4_d_revalidate, ++}; +diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c +index 1d1bca74f844..6d17f31a31d7 100644 +--- a/fs/ext4/dir.c ++++ b/fs/ext4/dir.c +@@ -111,6 +111,12 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx) + int dir_has_error = 0; + struct ext4_str fname_crypto_str = {.name = NULL, .len = 0}; + ++ if (ext4_encrypted_inode(inode)) { ++ err = ext4_get_encryption_info(inode); ++ if (err && err != -ENOKEY) ++ return err; ++ } ++ + if (is_dx_dir(inode)) { + err = ext4_dx_readdir(file, ctx); + if (err != ERR_BAD_DX_DIR) { +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h +index 362d59b24f1d..3de9bb357b4f 100644 +--- a/fs/ext4/ext4.h ++++ b/fs/ext4/ext4.h +@@ -2268,6 +2268,7 @@ struct page *ext4_encrypt(struct inode *inode, + struct page *plaintext_page); + int ext4_decrypt(struct page *page); + int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex); ++extern const struct dentry_operations ext4_encrypted_d_ops; + + #ifdef CONFIG_EXT4_FS_ENCRYPTION + int ext4_init_crypto(void); +diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c +index 789e2d6724a9..bcd7c4788903 100644 +--- a/fs/ext4/ioctl.c ++++ b/fs/ext4/ioctl.c +@@ -622,6 +622,9 @@ resizefs_out: + struct ext4_encryption_policy policy; + int err = 0; + ++ if (!ext4_has_feature_encrypt(sb)) ++ return -EOPNOTSUPP; ++ + if (copy_from_user(&policy, + (struct ext4_encryption_policy __user *)arg, + sizeof(policy))) { +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c +index 573b4cbb0cb9..fafa903ab3c0 100644 +--- a/fs/ext4/namei.c ++++ b/fs/ext4/namei.c +@@ -1557,6 +1557,24 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi + struct ext4_dir_entry_2 *de; + struct buffer_head *bh; + ++ if (ext4_encrypted_inode(dir)) { ++ int res = ext4_get_encryption_info(dir); ++ ++ /* ++ * This should be a properly defined flag for ++ * dentry->d_flags when we uplift this to the VFS. ++ * d_fsdata is set to (void *) 1 if if the dentry is ++ * created while the directory was encrypted and we ++ * don't have access to the key. ++ */ ++ dentry->d_fsdata = NULL; ++ if (ext4_encryption_info(dir)) ++ dentry->d_fsdata = (void *) 1; ++ d_set_d_op(dentry, &ext4_encrypted_d_ops); ++ if (res && res != -ENOKEY) ++ return ERR_PTR(res); ++ } ++ + if (dentry->d_name.len > EXT4_NAME_LEN) + return ERR_PTR(-ENAMETOOLONG); + +diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c +index 00575d776d91..7162ab7bc093 100644 +--- a/fs/nfsd/nfs3xdr.c ++++ b/fs/nfsd/nfs3xdr.c +@@ -358,6 +358,7 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p, + { + unsigned int len, v, hdr, dlen; + u32 max_blocksize = svc_max_payload(rqstp); ++ struct kvec *head = rqstp->rq_arg.head; + + p = decode_fh(p, &args->fh); + if (!p) +@@ -367,6 +368,8 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p, + args->count = ntohl(*p++); + args->stable = ntohl(*p++); + len = args->len = ntohl(*p++); ++ if ((void *)p > head->iov_base + head->iov_len) ++ return 0; + /* + * The count must equal the amount of data passed. + */ +@@ -377,9 +380,8 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p, + * Check to make sure that we got the right number of + * bytes. + */ +- hdr = (void*)p - rqstp->rq_arg.head[0].iov_base; +- dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len +- - hdr; ++ hdr = (void*)p - head->iov_base; ++ dlen = head->iov_len + rqstp->rq_arg.page_len - hdr; + /* + * Round the length of the data which was specified up to + * the next multiple of XDR units and then compare that +@@ -396,7 +398,7 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p, + len = args->len = max_blocksize; + } + rqstp->rq_vec[0].iov_base = (void*)p; +- rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr; ++ rqstp->rq_vec[0].iov_len = head->iov_len - hdr; + v = 0; + while (len > rqstp->rq_vec[v].iov_len) { + len -= rqstp->rq_vec[v].iov_len; +@@ -471,6 +473,8 @@ nfs3svc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p, + /* first copy and check from the first page */ + old = (char*)p; + vec = &rqstp->rq_arg.head[0]; ++ if ((void *)old > vec->iov_base + vec->iov_len) ++ return 0; + avail = vec->iov_len - (old - (char*)vec->iov_base); + while (len && avail && *old) { + *new++ = *old++; +diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c +index 79d964aa8079..bf913201a6ad 100644 +--- a/fs/nfsd/nfsxdr.c ++++ b/fs/nfsd/nfsxdr.c +@@ -280,6 +280,7 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p, + struct nfsd_writeargs *args) + { + unsigned int len, hdr, dlen; ++ struct kvec *head = rqstp->rq_arg.head; + int v; + + p = decode_fh(p, &args->fh); +@@ -300,9 +301,10 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p, + * Check to make sure that we got the right number of + * bytes. + */ +- hdr = (void*)p - rqstp->rq_arg.head[0].iov_base; +- dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len +- - hdr; ++ hdr = (void*)p - head->iov_base; ++ if (hdr > head->iov_len) ++ return 0; ++ dlen = head->iov_len + rqstp->rq_arg.page_len - hdr; + + /* + * Round the length of the data which was specified up to +@@ -316,7 +318,7 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p, + return 0; + + rqstp->rq_vec[0].iov_base = (void*)p; +- rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr; ++ rqstp->rq_vec[0].iov_len = head->iov_len - hdr; + v = 0; + while (len > rqstp->rq_vec[v].iov_len) { + len -= rqstp->rq_vec[v].iov_len; +diff --git a/fs/timerfd.c b/fs/timerfd.c +index 053818dd6c18..1327a02ec778 100644 +--- a/fs/timerfd.c ++++ b/fs/timerfd.c +@@ -40,6 +40,7 @@ struct timerfd_ctx { + short unsigned settime_flags; /* to show in fdinfo */ + struct rcu_head rcu; + struct list_head clist; ++ spinlock_t cancel_lock; + bool might_cancel; + }; + +@@ -112,7 +113,7 @@ void timerfd_clock_was_set(void) + rcu_read_unlock(); + } + +-static void timerfd_remove_cancel(struct timerfd_ctx *ctx) ++static void __timerfd_remove_cancel(struct timerfd_ctx *ctx) + { + if (ctx->might_cancel) { + ctx->might_cancel = false; +@@ -122,6 +123,13 @@ static void timerfd_remove_cancel(struct timerfd_ctx *ctx) + } + } + ++static void timerfd_remove_cancel(struct timerfd_ctx *ctx) ++{ ++ spin_lock(&ctx->cancel_lock); ++ __timerfd_remove_cancel(ctx); ++ spin_unlock(&ctx->cancel_lock); ++} ++ + static bool timerfd_canceled(struct timerfd_ctx *ctx) + { + if (!ctx->might_cancel || ctx->moffs.tv64 != KTIME_MAX) +@@ -132,6 +140,7 @@ static bool timerfd_canceled(struct timerfd_ctx *ctx) + + static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags) + { ++ spin_lock(&ctx->cancel_lock); + if ((ctx->clockid == CLOCK_REALTIME || + ctx->clockid == CLOCK_REALTIME_ALARM) && + (flags & TFD_TIMER_ABSTIME) && (flags & TFD_TIMER_CANCEL_ON_SET)) { +@@ -141,9 +150,10 @@ static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags) + list_add_rcu(&ctx->clist, &cancel_list); + spin_unlock(&cancel_lock); + } +- } else if (ctx->might_cancel) { +- timerfd_remove_cancel(ctx); ++ } else { ++ __timerfd_remove_cancel(ctx); + } ++ spin_unlock(&ctx->cancel_lock); + } + + static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx) +@@ -395,6 +405,7 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags) + return -ENOMEM; + + init_waitqueue_head(&ctx->wqh); ++ spin_lock_init(&ctx->cancel_lock); + ctx->clockid = clockid; + + if (isalarm(ctx)) +diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h +index 366cf77953b5..806d0ab845e0 100644 +--- a/include/linux/mtd/map.h ++++ b/include/linux/mtd/map.h +@@ -122,18 +122,13 @@ + #endif + + #ifdef CONFIG_MTD_MAP_BANK_WIDTH_32 +-# ifdef map_bankwidth +-# undef map_bankwidth +-# define map_bankwidth(map) ((map)->bankwidth) +-# undef map_bankwidth_is_large +-# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8) +-# undef map_words +-# define map_words(map) map_calc_words(map) +-# else +-# define map_bankwidth(map) 32 +-# define map_bankwidth_is_large(map) (1) +-# define map_words(map) map_calc_words(map) +-# endif ++/* always use indirect access for 256-bit to preserve kernel stack */ ++# undef map_bankwidth ++# define map_bankwidth(map) ((map)->bankwidth) ++# undef map_bankwidth_is_large ++# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8) ++# undef map_words ++# define map_words(map) map_calc_words(map) + #define map_bankwidth_is_32(map) (map_bankwidth(map) == 32) + #undef MAX_MAP_BANKWIDTH + #define MAX_MAP_BANKWIDTH 32 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c +index 8e33019d8e7b..acfb16fdcd55 100644 +--- a/net/netlink/af_netlink.c ++++ b/net/netlink/af_netlink.c +@@ -2107,7 +2107,7 @@ static int netlink_dump(struct sock *sk) + if (!skb) { + alloc_size = alloc_min_size; + skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, +- (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM)); ++ GFP_KERNEL); + } + if (!skb) + goto errout_skb; +diff --git a/sound/ppc/awacs.c b/sound/ppc/awacs.c +index 09da7b52bc2e..1468e4b7bf93 100644 +--- a/sound/ppc/awacs.c ++++ b/sound/ppc/awacs.c +@@ -991,6 +991,7 @@ snd_pmac_awacs_init(struct snd_pmac *chip) + if (err < 0) + return err; + } ++ master_vol = NULL; + if (pm7500) + err = build_mixers(chip, + ARRAY_SIZE(snd_pmac_awacs_mixers_pmac7500), +diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c +index 7a5c9a36c1db..daba8c56b43b 100644 +--- a/sound/soc/intel/boards/bytcr_rt5640.c ++++ b/sound/soc/intel/boards/bytcr_rt5640.c +@@ -139,7 +139,7 @@ static struct snd_soc_dai_link byt_dailink[] = { + .codec_dai_name = "snd-soc-dummy-dai", + .codec_name = "snd-soc-dummy", + .platform_name = "sst-mfld-platform", +- .ignore_suspend = 1, ++ .nonatomic = true, + .dynamic = 1, + .dpcm_playback = 1, + .dpcm_capture = 1, +@@ -166,6 +166,7 @@ static struct snd_soc_dai_link byt_dailink[] = { + | SND_SOC_DAIFMT_CBS_CFS, + .be_hw_params_fixup = byt_codec_fixup, + .ignore_suspend = 1, ++ .nonatomic = true, + .dpcm_playback = 1, + .dpcm_capture = 1, + .ops = &byt_be_ssp2_ops, diff --git a/patch/kernel/mvebu64-default/03-patch-4.4.67-68.patch b/patch/kernel/mvebu64-default/03-patch-4.4.67-68.patch new file mode 100644 index 000000000..29d3db30a --- /dev/null +++ b/patch/kernel/mvebu64-default/03-patch-4.4.67-68.patch @@ -0,0 +1,2093 @@ +diff --git a/Makefile b/Makefile +index c987902ae1ee..e6c7990497e7 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 67 ++SUBLEVEL = 68 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile +index af9e59bf3831..3c789496297f 100644 +--- a/arch/arm/kernel/Makefile ++++ b/arch/arm/kernel/Makefile +@@ -73,7 +73,6 @@ obj-$(CONFIG_IWMMXT) += iwmmxt.o + obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o + obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_xscale.o perf_event_v6.o \ + perf_event_v7.o +-CFLAGS_pj4-cp0.o := -marm + AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt + obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o + obj-$(CONFIG_VDSO) += vdso.o +diff --git a/arch/arm/kernel/pj4-cp0.c b/arch/arm/kernel/pj4-cp0.c +index 8153e36b2491..7c9248b74d3f 100644 +--- a/arch/arm/kernel/pj4-cp0.c ++++ b/arch/arm/kernel/pj4-cp0.c +@@ -66,9 +66,13 @@ static void __init pj4_cp_access_write(u32 value) + + __asm__ __volatile__ ( + "mcr p15, 0, %1, c1, c0, 2\n\t" ++#ifdef CONFIG_THUMB2_KERNEL ++ "isb\n\t" ++#else + "mrc p15, 0, %0, c1, c0, 2\n\t" + "mov %0, %0\n\t" + "sub pc, pc, #4\n\t" ++#endif + : "=r" (temp) : "r" (value)); + } + +diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S +index 6d1dffca6c7b..748dde9fa4a5 100644 +--- a/arch/arm/mach-omap2/omap-headsmp.S ++++ b/arch/arm/mach-omap2/omap-headsmp.S +@@ -17,6 +17,7 @@ + + #include + #include ++#include + + #include "omap44xx.h" + +@@ -56,7 +57,7 @@ wait_2: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0 + cmp r0, r4 + bne wait_2 + ldr r12, =API_HYP_ENTRY +- adr r0, hyp_boot ++ badr r0, hyp_boot + smc #0 + hyp_boot: + b secondary_startup +diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c +index b162ad70effc..6297140dd84f 100644 +--- a/arch/arm64/net/bpf_jit_comp.c ++++ b/arch/arm64/net/bpf_jit_comp.c +@@ -728,14 +728,14 @@ static int build_body(struct jit_ctx *ctx) + int ret; + + ret = build_insn(insn, ctx); +- +- if (ctx->image == NULL) +- ctx->offset[i] = ctx->idx; +- + if (ret > 0) { + i++; ++ if (ctx->image == NULL) ++ ctx->offset[i] = ctx->idx; + continue; + } ++ if (ctx->image == NULL) ++ ctx->offset[i] = ctx->idx; + if (ret) + return ret; + } +diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c +index af27334d6809..e3384065f5e7 100644 +--- a/arch/mips/kernel/mips-r2-to-r6-emul.c ++++ b/arch/mips/kernel/mips-r2-to-r6-emul.c +@@ -434,8 +434,8 @@ static int multu_func(struct pt_regs *regs, u32 ir) + rs = regs->regs[MIPSInst_RS(ir)]; + res = (u64)rt * (u64)rs; + rt = res; +- regs->lo = (s64)rt; +- regs->hi = (s64)(res >> 32); ++ regs->lo = (s64)(s32)rt; ++ regs->hi = (s64)(s32)(res >> 32); + + MIPS_R2_STATS(muls); + +@@ -671,9 +671,9 @@ static int maddu_func(struct pt_regs *regs, u32 ir) + res += ((((s64)rt) << 32) | (u32)rs); + + rt = res; +- regs->lo = (s64)rt; ++ regs->lo = (s64)(s32)rt; + rs = res >> 32; +- regs->hi = (s64)rs; ++ regs->hi = (s64)(s32)rs; + + MIPS_R2_STATS(dsps); + +@@ -729,9 +729,9 @@ static int msubu_func(struct pt_regs *regs, u32 ir) + res = ((((s64)rt) << 32) | (u32)rs) - res; + + rt = res; +- regs->lo = (s64)rt; ++ regs->lo = (s64)(s32)rt; + rs = res >> 32; +- regs->hi = (s64)rs; ++ regs->hi = (s64)(s32)rs; + + MIPS_R2_STATS(dsps); + +diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S +index e45b88a5d7e0..ae877c7b3905 100644 +--- a/arch/powerpc/platforms/powernv/opal-wrappers.S ++++ b/arch/powerpc/platforms/powernv/opal-wrappers.S +@@ -148,7 +148,7 @@ opal_tracepoint_entry: + opal_tracepoint_return: + std r3,STK_REG(R31)(r1) + mr r4,r3 +- ld r0,STK_REG(R23)(r1) ++ ld r3,STK_REG(R23)(r1) + bl __trace_opal_exit + ld r3,STK_REG(R31)(r1) + addi r1,r1,STACKFRAMESIZE +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c +index 8ca533b8c606..1e5d2f07416b 100644 +--- a/arch/x86/kernel/apic/io_apic.c ++++ b/arch/x86/kernel/apic/io_apic.c +@@ -1875,6 +1875,7 @@ static struct irq_chip ioapic_chip __read_mostly = { + .irq_ack = irq_chip_ack_parent, + .irq_eoi = ioapic_ack_level, + .irq_set_affinity = ioapic_set_affinity, ++ .irq_retrigger = irq_chip_retrigger_hierarchy, + .flags = IRQCHIP_SKIP_SET_WAKE, + }; + +@@ -1886,6 +1887,7 @@ static struct irq_chip ioapic_ir_chip __read_mostly = { + .irq_ack = irq_chip_ack_parent, + .irq_eoi = ioapic_ir_ack_level, + .irq_set_affinity = ioapic_set_affinity, ++ .irq_retrigger = irq_chip_retrigger_hierarchy, + .flags = IRQCHIP_SKIP_SET_WAKE, + }; + +diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h +index c6ee63f927ab..d688826e5736 100644 +--- a/arch/x86/kernel/kprobes/common.h ++++ b/arch/x86/kernel/kprobes/common.h +@@ -67,7 +67,7 @@ + #endif + + /* Ensure if the instruction can be boostable */ +-extern int can_boost(kprobe_opcode_t *instruction); ++extern int can_boost(kprobe_opcode_t *instruction, void *addr); + /* Recover instruction if given address is probed */ + extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf, + unsigned long addr); +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c +index 023c442c33bb..99d293ea2b49 100644 +--- a/arch/x86/kernel/kprobes/core.c ++++ b/arch/x86/kernel/kprobes/core.c +@@ -163,12 +163,12 @@ NOKPROBE_SYMBOL(skip_prefixes); + * Returns non-zero if opcode is boostable. + * RIP relative instructions are adjusted at copying time in 64 bits mode + */ +-int can_boost(kprobe_opcode_t *opcodes) ++int can_boost(kprobe_opcode_t *opcodes, void *addr) + { + kprobe_opcode_t opcode; + kprobe_opcode_t *orig_opcodes = opcodes; + +- if (search_exception_tables((unsigned long)opcodes)) ++ if (search_exception_tables((unsigned long)addr)) + return 0; /* Page fault may occur on this address. */ + + retry: +@@ -413,7 +413,7 @@ static int arch_copy_kprobe(struct kprobe *p) + * __copy_instruction can modify the displacement of the instruction, + * but it doesn't affect boostable check. + */ +- if (can_boost(p->ainsn.insn)) ++ if (can_boost(p->ainsn.insn, p->addr)) + p->ainsn.boostable = 0; + else + p->ainsn.boostable = -1; +diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c +index 7b3b9d15c47a..c9d488f3e4cd 100644 +--- a/arch/x86/kernel/kprobes/opt.c ++++ b/arch/x86/kernel/kprobes/opt.c +@@ -177,7 +177,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src) + + while (len < RELATIVEJUMP_SIZE) { + ret = __copy_instruction(dest + len, src + len); +- if (!ret || !can_boost(dest + len)) ++ if (!ret || !can_boost(dest + len, src + len)) + return -EINVAL; + len += ret; + } +diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c +index 0497f719977d..c055e9a4e547 100644 +--- a/arch/x86/kernel/pci-calgary_64.c ++++ b/arch/x86/kernel/pci-calgary_64.c +@@ -296,7 +296,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, + + /* were we called with bad_dma_address? */ + badend = DMA_ERROR_CODE + (EMERGENCY_PAGES * PAGE_SIZE); +- if (unlikely((dma_addr >= DMA_ERROR_CODE) && (dma_addr < badend))) { ++ if (unlikely(dma_addr < badend)) { + WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA " + "address 0x%Lx\n", dma_addr); + return; +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c +index 2e1fd586b895..642e9c93a097 100644 +--- a/arch/x86/kvm/cpuid.c ++++ b/arch/x86/kvm/cpuid.c +@@ -818,12 +818,6 @@ void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) + if (!best) + best = check_cpuid_limit(vcpu, function, index); + +- /* +- * Perfmon not yet supported for L2 guest. +- */ +- if (is_guest_mode(vcpu) && function == 0xa) +- best = NULL; +- + if (best) { + *eax = best->eax; + *ebx = best->ebx; +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 0a472e9865c5..50ca8f409a7c 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -7754,8 +7754,6 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) + case EXIT_REASON_TASK_SWITCH: + return true; + case EXIT_REASON_CPUID: +- if (kvm_register_read(vcpu, VCPU_REGS_RAX) == 0xa) +- return false; + return true; + case EXIT_REASON_HLT: + return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING); +@@ -7840,6 +7838,9 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) + return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); + case EXIT_REASON_PCOMMIT: + return nested_cpu_has2(vmcs12, SECONDARY_EXEC_PCOMMIT); ++ case EXIT_REASON_PML_FULL: ++ /* We don't expose PML support to L1. */ ++ return false; + default: + return true; + } +@@ -9759,6 +9760,18 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) + + } + ++ if (enable_pml) { ++ /* ++ * Conceptually we want to copy the PML address and index from ++ * vmcs01 here, and then back to vmcs01 on nested vmexit. But, ++ * since we always flush the log on each vmexit, this happens ++ * to be equivalent to simply resetting the fields in vmcs02. ++ */ ++ ASSERT(vmx->pml_pg); ++ vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); ++ vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); ++ } ++ + if (nested_cpu_has_ept(vmcs12)) { + kvm_mmu_unload(vcpu); + nested_ept_init_mmu_context(vcpu); +diff --git a/arch/x86/platform/intel-mid/device_libs/platform_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_wdt.c +index de734134bc8d..40c616495da7 100644 +--- a/arch/x86/platform/intel-mid/device_libs/platform_wdt.c ++++ b/arch/x86/platform/intel-mid/device_libs/platform_wdt.c +@@ -17,7 +17,7 @@ + #include + #include + +-#define TANGIER_EXT_TIMER0_MSI 15 ++#define TANGIER_EXT_TIMER0_MSI 12 + + static struct platform_device wdt_dev = { + .name = "intel_mid_wdt", +diff --git a/block/blk-integrity.c b/block/blk-integrity.c +index d69c5c79f98e..319f2e4f4a8b 100644 +--- a/block/blk-integrity.c ++++ b/block/blk-integrity.c +@@ -417,7 +417,7 @@ void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template + bi->tuple_size = template->tuple_size; + bi->tag_size = template->tag_size; + +- blk_integrity_revalidate(disk); ++ disk->queue->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES; + } + EXPORT_SYMBOL(blk_integrity_register); + +@@ -430,26 +430,11 @@ EXPORT_SYMBOL(blk_integrity_register); + */ + void blk_integrity_unregister(struct gendisk *disk) + { +- blk_integrity_revalidate(disk); ++ disk->queue->backing_dev_info.capabilities &= ~BDI_CAP_STABLE_WRITES; + memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity)); + } + EXPORT_SYMBOL(blk_integrity_unregister); + +-void blk_integrity_revalidate(struct gendisk *disk) +-{ +- struct blk_integrity *bi = &disk->queue->integrity; +- +- if (!(disk->flags & GENHD_FL_UP)) +- return; +- +- if (bi->profile) +- disk->queue->backing_dev_info.capabilities |= +- BDI_CAP_STABLE_WRITES; +- else +- disk->queue->backing_dev_info.capabilities &= +- ~BDI_CAP_STABLE_WRITES; +-} +- + void blk_integrity_add(struct gendisk *disk) + { + if (kobject_init_and_add(&disk->integrity_kobj, &integrity_ktype, +diff --git a/block/partition-generic.c b/block/partition-generic.c +index a241e3900bc9..3c062699f28b 100644 +--- a/block/partition-generic.c ++++ b/block/partition-generic.c +@@ -435,7 +435,6 @@ rescan: + + if (disk->fops->revalidate_disk) + disk->fops->revalidate_disk(disk); +- blk_integrity_revalidate(disk); + check_disk_size_change(disk, bdev); + bdev->bd_invalidated = 0; + if (!get_capacity(disk) || !(state = check_partition(disk, bdev))) +diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile +index 820714c72d36..af9e9aff49e5 100644 +--- a/drivers/clk/Makefile ++++ b/drivers/clk/Makefile +@@ -78,7 +78,9 @@ obj-$(CONFIG_ARCH_TEGRA) += tegra/ + obj-$(CONFIG_ARCH_OMAP2PLUS) += ti/ + obj-$(CONFIG_ARCH_U8500) += ux500/ + obj-$(CONFIG_COMMON_CLK_VERSATILE) += versatile/ ++ifeq ($(CONFIG_COMMON_CLK), y) + obj-$(CONFIG_X86) += x86/ ++endif + obj-$(CONFIG_ARCH_ZX) += zte/ + obj-$(CONFIG_ARCH_ZYNQ) += zynq/ + obj-$(CONFIG_H8300) += h8300/ +diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c +index 8fb7213277cc..b75391495778 100644 +--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c ++++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c +@@ -66,8 +66,11 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, + if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) + goto out_unlock; + ++ ttm_bo_reference(bo); + up_read(&vma->vm_mm->mmap_sem); + (void) ttm_bo_wait(bo, false, true, false); ++ ttm_bo_unreserve(bo); ++ ttm_bo_unref(&bo); + goto out_unlock; + } + +@@ -114,8 +117,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) + + if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { + if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { ++ ttm_bo_reference(bo); + up_read(&vma->vm_mm->mmap_sem); + (void) ttm_bo_wait_unreserved(bo); ++ ttm_bo_unref(&bo); + } + + return VM_FAULT_RETRY; +@@ -160,6 +165,13 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) + ret = ttm_bo_vm_fault_idle(bo, vma, vmf); + if (unlikely(ret != 0)) { + retval = ret; ++ ++ if (retval == VM_FAULT_RETRY && ++ !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { ++ /* The BO has already been unreserved. */ ++ return retval; ++ } ++ + goto out_unlock; + } + +diff --git a/drivers/leds/leds-ktd2692.c b/drivers/leds/leds-ktd2692.c +index feca07be85f5..1eb9fb33db38 100644 +--- a/drivers/leds/leds-ktd2692.c ++++ b/drivers/leds/leds-ktd2692.c +@@ -296,15 +296,15 @@ static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev, + return -ENXIO; + + led->ctrl_gpio = devm_gpiod_get(dev, "ctrl", GPIOD_ASIS); +- if (IS_ERR(led->ctrl_gpio)) { +- ret = PTR_ERR(led->ctrl_gpio); ++ ret = PTR_ERR_OR_ZERO(led->ctrl_gpio); ++ if (ret) { + dev_err(dev, "cannot get ctrl-gpios %d\n", ret); + return ret; + } + + led->aux_gpio = devm_gpiod_get(dev, "aux", GPIOD_ASIS); +- if (IS_ERR(led->aux_gpio)) { +- ret = PTR_ERR(led->aux_gpio); ++ ret = PTR_ERR_OR_ZERO(led->aux_gpio); ++ if (ret) { + dev_err(dev, "cannot get aux-gpios %d\n", ret); + return ret; + } +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +index 07f5f239cb65..4744919440e0 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +@@ -2473,7 +2473,8 @@ static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) + INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); + + bp->ntp_fltr_count = 0; +- bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR), ++ bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR), ++ sizeof(long), + GFP_KERNEL); + + if (!bp->ntp_fltr_bmap) +diff --git a/drivers/net/wireless/brcm80211/brcmfmac/core.c b/drivers/net/wireless/brcm80211/brcmfmac/core.c +index b5ab98ee1445..82753e7c7e7c 100644 +--- a/drivers/net/wireless/brcm80211/brcmfmac/core.c ++++ b/drivers/net/wireless/brcm80211/brcmfmac/core.c +@@ -211,7 +211,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, + int ret; + struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_pub *drvr = ifp->drvr; +- struct ethhdr *eh = (struct ethhdr *)(skb->data); ++ struct ethhdr *eh; + + brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx); + +@@ -232,22 +232,13 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, + goto done; + } + +- /* Make sure there's enough room for any header */ +- if (skb_headroom(skb) < drvr->hdrlen) { +- struct sk_buff *skb2; +- +- brcmf_dbg(INFO, "%s: insufficient headroom\n", ++ /* Make sure there's enough writable headroom*/ ++ ret = skb_cow_head(skb, drvr->hdrlen); ++ if (ret < 0) { ++ brcmf_err("%s: skb_cow_head failed\n", + brcmf_ifname(drvr, ifp->bssidx)); +- drvr->bus_if->tx_realloc++; +- skb2 = skb_realloc_headroom(skb, drvr->hdrlen); + dev_kfree_skb(skb); +- skb = skb2; +- if (skb == NULL) { +- brcmf_err("%s: skb_realloc_headroom failed\n", +- brcmf_ifname(drvr, ifp->bssidx)); +- ret = -ENOMEM; +- goto done; +- } ++ goto done; + } + + /* validate length for ether packet */ +@@ -257,6 +248,8 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, + goto done; + } + ++ eh = (struct ethhdr *)(skb->data); ++ + if (eh->h_proto == htons(ETH_P_PAE)) + atomic_inc(&ifp->pend_8021x_cnt); + +diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c +index aa498e0d2204..49f3e17c28ea 100644 +--- a/drivers/net/wireless/mwifiex/11n_aggr.c ++++ b/drivers/net/wireless/mwifiex/11n_aggr.c +@@ -101,13 +101,6 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv, + { + struct txpd *local_tx_pd; + struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb); +- unsigned int pad; +- int headroom = (priv->adapter->iface_type == +- MWIFIEX_USB) ? 0 : INTF_HEADER_LEN; +- +- pad = ((void *)skb->data - sizeof(*local_tx_pd) - +- headroom - NULL) & (MWIFIEX_DMA_ALIGN_SZ - 1); +- skb_push(skb, pad); + + skb_push(skb, sizeof(*local_tx_pd)); + +@@ -121,12 +114,10 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv, + local_tx_pd->bss_num = priv->bss_num; + local_tx_pd->bss_type = priv->bss_type; + /* Always zero as the data is followed by struct txpd */ +- local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd) + +- pad); ++ local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd)); + local_tx_pd->tx_pkt_type = cpu_to_le16(PKT_TYPE_AMSDU); + local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len - +- sizeof(*local_tx_pd) - +- pad); ++ sizeof(*local_tx_pd)); + + if (tx_info->flags & MWIFIEX_BUF_FLAG_TDLS_PKT) + local_tx_pd->flags |= MWIFIEX_TXPD_FLAGS_TDLS_PACKET; +@@ -190,7 +181,11 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, + ra_list_flags); + return -1; + } +- skb_reserve(skb_aggr, MWIFIEX_MIN_DATA_HEADER_LEN); ++ ++ /* skb_aggr->data already 64 byte align, just reserve bus interface ++ * header and txpd. ++ */ ++ skb_reserve(skb_aggr, headroom + sizeof(struct txpd)); + tx_info_aggr = MWIFIEX_SKB_TXCB(skb_aggr); + + memset(tx_info_aggr, 0, sizeof(*tx_info_aggr)); +diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c +index 9824d8dd2b44..45d97b64ef84 100644 +--- a/drivers/net/wireless/mwifiex/debugfs.c ++++ b/drivers/net/wireless/mwifiex/debugfs.c +@@ -115,7 +115,8 @@ mwifiex_info_read(struct file *file, char __user *ubuf, + if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) { + p += sprintf(p, "multicast_count=\"%d\"\n", + netdev_mc_count(netdev)); +- p += sprintf(p, "essid=\"%s\"\n", info.ssid.ssid); ++ p += sprintf(p, "essid=\"%.*s\"\n", info.ssid.ssid_len, ++ info.ssid.ssid); + p += sprintf(p, "bssid=\"%pM\"\n", info.bssid); + p += sprintf(p, "channel=\"%d\"\n", (int) info.bss_chan); + p += sprintf(p, "country_code = \"%s\"\n", info.country_code); +diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c +index d6c4f0f60839..6cfa2969b123 100644 +--- a/drivers/net/wireless/mwifiex/sta_ioctl.c ++++ b/drivers/net/wireless/mwifiex/sta_ioctl.c +@@ -1098,8 +1098,6 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp, + encrypt_key.is_rx_seq_valid = true; + } + } else { +- if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) +- return 0; + encrypt_key.key_disable = true; + if (mac_addr) + memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN); +diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig +index 03cb3ea2d2c0..b5679fb67591 100644 +--- a/drivers/phy/Kconfig ++++ b/drivers/phy/Kconfig +@@ -368,6 +368,7 @@ config PHY_QCOM_UFS + config PHY_TUSB1210 + tristate "TI TUSB1210 ULPI PHY module" + depends on USB_ULPI_BUS ++ depends on EXTCON || !EXTCON # if EXTCON=m, this cannot be built-in + select GENERIC_PHY + help + Support for TI TUSB1210 USB ULPI PHY. +diff --git a/drivers/power/bq24190_charger.c b/drivers/power/bq24190_charger.c +index f5746b9f4e83..f05d2773fe00 100644 +--- a/drivers/power/bq24190_charger.c ++++ b/drivers/power/bq24190_charger.c +@@ -144,10 +144,7 @@ + * so the first read after a fault returns the latched value and subsequent + * reads return the current value. In order to return the fault status + * to the user, have the interrupt handler save the reg's value and retrieve +- * it in the appropriate health/status routine. Each routine has its own +- * flag indicating whether it should use the value stored by the last run +- * of the interrupt handler or do an actual reg read. That way each routine +- * can report back whatever fault may have occured. ++ * it in the appropriate health/status routine. + */ + struct bq24190_dev_info { + struct i2c_client *client; +@@ -159,10 +156,6 @@ struct bq24190_dev_info { + unsigned int gpio_int; + unsigned int irq; + struct mutex f_reg_lock; +- bool first_time; +- bool charger_health_valid; +- bool battery_health_valid; +- bool battery_status_valid; + u8 f_reg; + u8 ss_reg; + u8 watchdog; +@@ -636,21 +629,11 @@ static int bq24190_charger_get_health(struct bq24190_dev_info *bdi, + union power_supply_propval *val) + { + u8 v; +- int health, ret; ++ int health; + + mutex_lock(&bdi->f_reg_lock); +- +- if (bdi->charger_health_valid) { +- v = bdi->f_reg; +- bdi->charger_health_valid = false; +- mutex_unlock(&bdi->f_reg_lock); +- } else { +- mutex_unlock(&bdi->f_reg_lock); +- +- ret = bq24190_read(bdi, BQ24190_REG_F, &v); +- if (ret < 0) +- return ret; +- } ++ v = bdi->f_reg; ++ mutex_unlock(&bdi->f_reg_lock); + + if (v & BQ24190_REG_F_BOOST_FAULT_MASK) { + /* +@@ -937,18 +920,8 @@ static int bq24190_battery_get_status(struct bq24190_dev_info *bdi, + int status, ret; + + mutex_lock(&bdi->f_reg_lock); +- +- if (bdi->battery_status_valid) { +- chrg_fault = bdi->f_reg; +- bdi->battery_status_valid = false; +- mutex_unlock(&bdi->f_reg_lock); +- } else { +- mutex_unlock(&bdi->f_reg_lock); +- +- ret = bq24190_read(bdi, BQ24190_REG_F, &chrg_fault); +- if (ret < 0) +- return ret; +- } ++ chrg_fault = bdi->f_reg; ++ mutex_unlock(&bdi->f_reg_lock); + + chrg_fault &= BQ24190_REG_F_CHRG_FAULT_MASK; + chrg_fault >>= BQ24190_REG_F_CHRG_FAULT_SHIFT; +@@ -996,21 +969,11 @@ static int bq24190_battery_get_health(struct bq24190_dev_info *bdi, + union power_supply_propval *val) + { + u8 v; +- int health, ret; ++ int health; + + mutex_lock(&bdi->f_reg_lock); +- +- if (bdi->battery_health_valid) { +- v = bdi->f_reg; +- bdi->battery_health_valid = false; +- mutex_unlock(&bdi->f_reg_lock); +- } else { +- mutex_unlock(&bdi->f_reg_lock); +- +- ret = bq24190_read(bdi, BQ24190_REG_F, &v); +- if (ret < 0) +- return ret; +- } ++ v = bdi->f_reg; ++ mutex_unlock(&bdi->f_reg_lock); + + if (v & BQ24190_REG_F_BAT_FAULT_MASK) { + health = POWER_SUPPLY_HEALTH_OVERVOLTAGE; +@@ -1197,9 +1160,12 @@ static const struct power_supply_desc bq24190_battery_desc = { + static irqreturn_t bq24190_irq_handler_thread(int irq, void *data) + { + struct bq24190_dev_info *bdi = data; +- bool alert_userspace = false; ++ const u8 battery_mask_ss = BQ24190_REG_SS_CHRG_STAT_MASK; ++ const u8 battery_mask_f = BQ24190_REG_F_BAT_FAULT_MASK ++ | BQ24190_REG_F_NTC_FAULT_MASK; ++ bool alert_charger = false, alert_battery = false; + u8 ss_reg = 0, f_reg = 0; +- int ret; ++ int i, ret; + + pm_runtime_get_sync(bdi->dev); + +@@ -1209,6 +1175,32 @@ static irqreturn_t bq24190_irq_handler_thread(int irq, void *data) + goto out; + } + ++ i = 0; ++ do { ++ ret = bq24190_read(bdi, BQ24190_REG_F, &f_reg); ++ if (ret < 0) { ++ dev_err(bdi->dev, "Can't read F reg: %d\n", ret); ++ goto out; ++ } ++ } while (f_reg && ++i < 2); ++ ++ if (f_reg != bdi->f_reg) { ++ dev_info(bdi->dev, ++ "Fault: boost %d, charge %d, battery %d, ntc %d\n", ++ !!(f_reg & BQ24190_REG_F_BOOST_FAULT_MASK), ++ !!(f_reg & BQ24190_REG_F_CHRG_FAULT_MASK), ++ !!(f_reg & BQ24190_REG_F_BAT_FAULT_MASK), ++ !!(f_reg & BQ24190_REG_F_NTC_FAULT_MASK)); ++ ++ mutex_lock(&bdi->f_reg_lock); ++ if ((bdi->f_reg & battery_mask_f) != (f_reg & battery_mask_f)) ++ alert_battery = true; ++ if ((bdi->f_reg & ~battery_mask_f) != (f_reg & ~battery_mask_f)) ++ alert_charger = true; ++ bdi->f_reg = f_reg; ++ mutex_unlock(&bdi->f_reg_lock); ++ } ++ + if (ss_reg != bdi->ss_reg) { + /* + * The device is in host mode so when PG_STAT goes from 1->0 +@@ -1225,47 +1217,17 @@ static irqreturn_t bq24190_irq_handler_thread(int irq, void *data) + ret); + } + ++ if ((bdi->ss_reg & battery_mask_ss) != (ss_reg & battery_mask_ss)) ++ alert_battery = true; ++ if ((bdi->ss_reg & ~battery_mask_ss) != (ss_reg & ~battery_mask_ss)) ++ alert_charger = true; + bdi->ss_reg = ss_reg; +- alert_userspace = true; +- } +- +- mutex_lock(&bdi->f_reg_lock); +- +- ret = bq24190_read(bdi, BQ24190_REG_F, &f_reg); +- if (ret < 0) { +- mutex_unlock(&bdi->f_reg_lock); +- dev_err(bdi->dev, "Can't read F reg: %d\n", ret); +- goto out; + } + +- if (f_reg != bdi->f_reg) { +- bdi->f_reg = f_reg; +- bdi->charger_health_valid = true; +- bdi->battery_health_valid = true; +- bdi->battery_status_valid = true; +- +- alert_userspace = true; +- } +- +- mutex_unlock(&bdi->f_reg_lock); +- +- /* +- * Sometimes bq24190 gives a steady trickle of interrupts even +- * though the watchdog timer is turned off and neither the STATUS +- * nor FAULT registers have changed. Weed out these sprurious +- * interrupts so userspace isn't alerted for no reason. +- * In addition, the chip always generates an interrupt after +- * register reset so we should ignore that one (the very first +- * interrupt received). +- */ +- if (alert_userspace) { +- if (!bdi->first_time) { +- power_supply_changed(bdi->charger); +- power_supply_changed(bdi->battery); +- } else { +- bdi->first_time = false; +- } +- } ++ if (alert_charger) ++ power_supply_changed(bdi->charger); ++ if (alert_battery) ++ power_supply_changed(bdi->battery); + + out: + pm_runtime_put_sync(bdi->dev); +@@ -1300,6 +1262,10 @@ static int bq24190_hw_init(struct bq24190_dev_info *bdi) + goto out; + + ret = bq24190_set_mode_host(bdi); ++ if (ret < 0) ++ goto out; ++ ++ ret = bq24190_read(bdi, BQ24190_REG_SS, &bdi->ss_reg); + out: + pm_runtime_put_sync(bdi->dev); + return ret; +@@ -1375,10 +1341,8 @@ static int bq24190_probe(struct i2c_client *client, + bdi->model = id->driver_data; + strncpy(bdi->model_name, id->name, I2C_NAME_SIZE); + mutex_init(&bdi->f_reg_lock); +- bdi->first_time = true; +- bdi->charger_health_valid = false; +- bdi->battery_health_valid = false; +- bdi->battery_status_valid = false; ++ bdi->f_reg = 0; ++ bdi->ss_reg = BQ24190_REG_SS_VBUS_STAT_MASK; /* impossible state */ + + i2c_set_clientdata(client, bdi); + +@@ -1392,22 +1356,13 @@ static int bq24190_probe(struct i2c_client *client, + return -EINVAL; + } + +- ret = devm_request_threaded_irq(dev, bdi->irq, NULL, +- bq24190_irq_handler_thread, +- IRQF_TRIGGER_RISING | IRQF_ONESHOT, +- "bq24190-charger", bdi); +- if (ret < 0) { +- dev_err(dev, "Can't set up irq handler\n"); +- goto out1; +- } +- + pm_runtime_enable(dev); + pm_runtime_resume(dev); + + ret = bq24190_hw_init(bdi); + if (ret < 0) { + dev_err(dev, "Hardware init failed\n"); +- goto out2; ++ goto out1; + } + + charger_cfg.drv_data = bdi; +@@ -1418,7 +1373,7 @@ static int bq24190_probe(struct i2c_client *client, + if (IS_ERR(bdi->charger)) { + dev_err(dev, "Can't register charger\n"); + ret = PTR_ERR(bdi->charger); +- goto out2; ++ goto out1; + } + + battery_cfg.drv_data = bdi; +@@ -1427,24 +1382,34 @@ static int bq24190_probe(struct i2c_client *client, + if (IS_ERR(bdi->battery)) { + dev_err(dev, "Can't register battery\n"); + ret = PTR_ERR(bdi->battery); +- goto out3; ++ goto out2; + } + + ret = bq24190_sysfs_create_group(bdi); + if (ret) { + dev_err(dev, "Can't create sysfs entries\n"); ++ goto out3; ++ } ++ ++ ret = devm_request_threaded_irq(dev, bdi->irq, NULL, ++ bq24190_irq_handler_thread, ++ IRQF_TRIGGER_FALLING | IRQF_ONESHOT, ++ "bq24190-charger", bdi); ++ if (ret < 0) { ++ dev_err(dev, "Can't set up irq handler\n"); + goto out4; + } + + return 0; + + out4: +- power_supply_unregister(bdi->battery); ++ bq24190_sysfs_remove_group(bdi); + out3: +- power_supply_unregister(bdi->charger); ++ power_supply_unregister(bdi->battery); + out2: +- pm_runtime_disable(dev); ++ power_supply_unregister(bdi->charger); + out1: ++ pm_runtime_disable(dev); + if (bdi->gpio_int) + gpio_free(bdi->gpio_int); + +@@ -1488,12 +1453,13 @@ static int bq24190_pm_resume(struct device *dev) + struct i2c_client *client = to_i2c_client(dev); + struct bq24190_dev_info *bdi = i2c_get_clientdata(client); + +- bdi->charger_health_valid = false; +- bdi->battery_health_valid = false; +- bdi->battery_status_valid = false; ++ bdi->f_reg = 0; ++ bdi->ss_reg = BQ24190_REG_SS_VBUS_STAT_MASK; /* impossible state */ + + pm_runtime_get_sync(bdi->dev); + bq24190_register_reset(bdi); ++ bq24190_set_mode_host(bdi); ++ bq24190_read(bdi, BQ24190_REG_SS, &bdi->ss_reg); + pm_runtime_put_sync(bdi->dev); + + /* Things may have changed while suspended so alert upper layer */ +diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig +index 64eed87d34a8..433c5e3d5733 100644 +--- a/drivers/scsi/Kconfig ++++ b/drivers/scsi/Kconfig +@@ -1637,7 +1637,7 @@ config ATARI_SCSI_RESET_BOOT + + config MAC_SCSI + tristate "Macintosh NCR5380 SCSI" +- depends on MAC && SCSI=y ++ depends on MAC && SCSI + select SCSI_SPI_ATTRS + help + This is the NCR 5380 SCSI controller included on most of the 68030 +diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c +index 4e6c16af40fc..91ff8fb0cc3a 100644 +--- a/drivers/staging/emxx_udc/emxx_udc.c ++++ b/drivers/staging/emxx_udc/emxx_udc.c +@@ -3181,7 +3181,7 @@ static const struct { + }; + + /*-------------------------------------------------------------------------*/ +-static void __init nbu2ss_drv_ep_init(struct nbu2ss_udc *udc) ++static void nbu2ss_drv_ep_init(struct nbu2ss_udc *udc) + { + int i; + +@@ -3211,7 +3211,7 @@ static void __init nbu2ss_drv_ep_init(struct nbu2ss_udc *udc) + + /*-------------------------------------------------------------------------*/ + /* platform_driver */ +-static int __init nbu2ss_drv_contest_init( ++static int nbu2ss_drv_contest_init( + struct platform_device *pdev, + struct nbu2ss_udc *udc) + { +diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c +index a9c1e0bafa62..e35fbece3d2f 100644 +--- a/drivers/staging/wlan-ng/p80211netdev.c ++++ b/drivers/staging/wlan-ng/p80211netdev.c +@@ -232,7 +232,7 @@ static int p80211_convert_to_ether(wlandevice_t *wlandev, struct sk_buff *skb) + struct p80211_hdr_a3 *hdr; + + hdr = (struct p80211_hdr_a3 *) skb->data; +- if (p80211_rx_typedrop(wlandev, hdr->fc)) ++ if (p80211_rx_typedrop(wlandev, le16_to_cpu(hdr->fc))) + return CONV_TO_ETHER_SKIPPED; + + /* perform mcast filtering: allow my local address through but reject +diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c +index a2c0734c76e2..e8dd296fb25b 100644 +--- a/drivers/tty/serial/8250/8250_omap.c ++++ b/drivers/tty/serial/8250/8250_omap.c +@@ -1235,7 +1235,8 @@ static int omap8250_probe(struct platform_device *pdev) + pm_runtime_put_autosuspend(&pdev->dev); + return 0; + err: +- pm_runtime_put(&pdev->dev); ++ pm_runtime_dont_use_autosuspend(&pdev->dev); ++ pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + return ret; + } +@@ -1244,6 +1245,7 @@ static int omap8250_remove(struct platform_device *pdev) + { + struct omap8250_priv *priv = platform_get_drvdata(pdev); + ++ pm_runtime_dont_use_autosuspend(&pdev->dev); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + serial8250_unregister_port(priv->line); +@@ -1343,6 +1345,10 @@ static int omap8250_runtime_suspend(struct device *dev) + struct omap8250_priv *priv = dev_get_drvdata(dev); + struct uart_8250_port *up; + ++ /* In case runtime-pm tries this before we are setup */ ++ if (!priv) ++ return 0; ++ + up = serial8250_get_port(priv->line); + /* + * When using 'no_console_suspend', the console UART must not be +diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h +index 41d7cf6d63ba..858c30814497 100644 +--- a/drivers/usb/chipidea/ci.h ++++ b/drivers/usb/chipidea/ci.h +@@ -428,9 +428,6 @@ int hw_port_test_set(struct ci_hdrc *ci, u8 mode); + + u8 hw_port_test_get(struct ci_hdrc *ci); + +-int hw_wait_reg(struct ci_hdrc *ci, enum ci_hw_regs reg, u32 mask, +- u32 value, unsigned int timeout_ms); +- + void ci_platform_configure(struct ci_hdrc *ci); + + #endif /* __DRIVERS_USB_CHIPIDEA_CI_H */ +diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c +index ba4a2a1eb3ff..939c6ad71068 100644 +--- a/drivers/usb/chipidea/core.c ++++ b/drivers/usb/chipidea/core.c +@@ -518,38 +518,6 @@ int hw_device_reset(struct ci_hdrc *ci) + return 0; + } + +-/** +- * hw_wait_reg: wait the register value +- * +- * Sometimes, it needs to wait register value before going on. +- * Eg, when switch to device mode, the vbus value should be lower +- * than OTGSC_BSV before connects to host. +- * +- * @ci: the controller +- * @reg: register index +- * @mask: mast bit +- * @value: the bit value to wait +- * @timeout_ms: timeout in millisecond +- * +- * This function returns an error code if timeout +- */ +-int hw_wait_reg(struct ci_hdrc *ci, enum ci_hw_regs reg, u32 mask, +- u32 value, unsigned int timeout_ms) +-{ +- unsigned long elapse = jiffies + msecs_to_jiffies(timeout_ms); +- +- while (hw_read(ci, reg, mask) != value) { +- if (time_after(jiffies, elapse)) { +- dev_err(ci->dev, "timeout waiting for %08x in %d\n", +- mask, reg); +- return -ETIMEDOUT; +- } +- msleep(20); +- } +- +- return 0; +-} +- + static irqreturn_t ci_irq(int irq, void *data) + { + struct ci_hdrc *ci = data; +diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c +index 03b6743461d1..0cf149edddd8 100644 +--- a/drivers/usb/chipidea/otg.c ++++ b/drivers/usb/chipidea/otg.c +@@ -44,12 +44,15 @@ u32 hw_read_otgsc(struct ci_hdrc *ci, u32 mask) + else + val &= ~OTGSC_BSVIS; + +- cable->changed = false; +- + if (cable->state) + val |= OTGSC_BSV; + else + val &= ~OTGSC_BSV; ++ ++ if (cable->enabled) ++ val |= OTGSC_BSVIE; ++ else ++ val &= ~OTGSC_BSVIE; + } + + cable = &ci->platdata->id_extcon; +@@ -59,15 +62,18 @@ u32 hw_read_otgsc(struct ci_hdrc *ci, u32 mask) + else + val &= ~OTGSC_IDIS; + +- cable->changed = false; +- + if (cable->state) + val |= OTGSC_ID; + else + val &= ~OTGSC_ID; ++ ++ if (cable->enabled) ++ val |= OTGSC_IDIE; ++ else ++ val &= ~OTGSC_IDIE; + } + +- return val; ++ return val & mask; + } + + /** +@@ -77,6 +83,36 @@ u32 hw_read_otgsc(struct ci_hdrc *ci, u32 mask) + */ + void hw_write_otgsc(struct ci_hdrc *ci, u32 mask, u32 data) + { ++ struct ci_hdrc_cable *cable; ++ ++ cable = &ci->platdata->vbus_extcon; ++ if (!IS_ERR(cable->edev)) { ++ if (data & mask & OTGSC_BSVIS) ++ cable->changed = false; ++ ++ /* Don't enable vbus interrupt if using external notifier */ ++ if (data & mask & OTGSC_BSVIE) { ++ cable->enabled = true; ++ data &= ~OTGSC_BSVIE; ++ } else if (mask & OTGSC_BSVIE) { ++ cable->enabled = false; ++ } ++ } ++ ++ cable = &ci->platdata->id_extcon; ++ if (!IS_ERR(cable->edev)) { ++ if (data & mask & OTGSC_IDIS) ++ cable->changed = false; ++ ++ /* Don't enable id interrupt if using external notifier */ ++ if (data & mask & OTGSC_IDIE) { ++ cable->enabled = true; ++ data &= ~OTGSC_IDIE; ++ } else if (mask & OTGSC_IDIE) { ++ cable->enabled = false; ++ } ++ } ++ + hw_write(ci, OP_OTGSC, mask | OTGSC_INT_STATUS_BITS, data); + } + +@@ -104,7 +140,31 @@ void ci_handle_vbus_change(struct ci_hdrc *ci) + usb_gadget_vbus_disconnect(&ci->gadget); + } + +-#define CI_VBUS_STABLE_TIMEOUT_MS 5000 ++/** ++ * When we switch to device mode, the vbus value should be lower ++ * than OTGSC_BSV before connecting to host. ++ * ++ * @ci: the controller ++ * ++ * This function returns an error code if timeout ++ */ ++static int hw_wait_vbus_lower_bsv(struct ci_hdrc *ci) ++{ ++ unsigned long elapse = jiffies + msecs_to_jiffies(5000); ++ u32 mask = OTGSC_BSV; ++ ++ while (hw_read_otgsc(ci, mask)) { ++ if (time_after(jiffies, elapse)) { ++ dev_err(ci->dev, "timeout waiting for %08x in OTGSC\n", ++ mask); ++ return -ETIMEDOUT; ++ } ++ msleep(20); ++ } ++ ++ return 0; ++} ++ + static void ci_handle_id_switch(struct ci_hdrc *ci) + { + enum ci_role role = ci_otg_role(ci); +@@ -116,9 +176,11 @@ static void ci_handle_id_switch(struct ci_hdrc *ci) + ci_role_stop(ci); + + if (role == CI_ROLE_GADGET) +- /* wait vbus lower than OTGSC_BSV */ +- hw_wait_reg(ci, OP_OTGSC, OTGSC_BSV, 0, +- CI_VBUS_STABLE_TIMEOUT_MS); ++ /* ++ * wait vbus lower than OTGSC_BSV before connecting ++ * to host ++ */ ++ hw_wait_vbus_lower_bsv(ci); + + ci_role_start(ci, role); + } +diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c +index df538fd10aa4..46f5354c534d 100644 +--- a/drivers/usb/host/ehci-exynos.c ++++ b/drivers/usb/host/ehci-exynos.c +@@ -77,10 +77,12 @@ static int exynos_ehci_get_phy(struct device *dev, + if (IS_ERR(phy)) { + ret = PTR_ERR(phy); + if (ret == -EPROBE_DEFER) { ++ of_node_put(child); + return ret; + } else if (ret != -ENOSYS && ret != -ENODEV) { + dev_err(dev, + "Error retrieving usb2 phy: %d\n", ret); ++ of_node_put(child); + return ret; + } + } +diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c +index 2cd105be7319..6865b919403f 100644 +--- a/drivers/usb/host/ohci-exynos.c ++++ b/drivers/usb/host/ohci-exynos.c +@@ -66,10 +66,12 @@ static int exynos_ohci_get_phy(struct device *dev, + if (IS_ERR(phy)) { + ret = PTR_ERR(phy); + if (ret == -EPROBE_DEFER) { ++ of_node_put(child); + return ret; + } else if (ret != -ENOSYS && ret != -ENODEV) { + dev_err(dev, + "Error retrieving usb2 phy: %d\n", ret); ++ of_node_put(child); + return ret; + } + } +diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c +index 7812052dc700..754fc3e41005 100644 +--- a/drivers/usb/serial/ark3116.c ++++ b/drivers/usb/serial/ark3116.c +@@ -373,23 +373,29 @@ static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port) + dev_dbg(&port->dev, + "%s - usb_serial_generic_open failed: %d\n", + __func__, result); +- goto err_out; ++ goto err_free; + } + + /* remove any data still left: also clears error state */ + ark3116_read_reg(serial, UART_RX, buf); + + /* read modem status */ +- priv->msr = ark3116_read_reg(serial, UART_MSR, buf); ++ result = ark3116_read_reg(serial, UART_MSR, buf); ++ if (result < 0) ++ goto err_close; ++ priv->msr = *buf; ++ + /* read line status */ +- priv->lsr = ark3116_read_reg(serial, UART_LSR, buf); ++ result = ark3116_read_reg(serial, UART_LSR, buf); ++ if (result < 0) ++ goto err_close; ++ priv->lsr = *buf; + + result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); + if (result) { + dev_err(&port->dev, "submit irq_in urb failed %d\n", + result); +- ark3116_close(port); +- goto err_out; ++ goto err_close; + } + + /* activate interrupts */ +@@ -402,8 +408,15 @@ static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port) + if (tty) + ark3116_set_termios(tty, port, NULL); + +-err_out: + kfree(buf); ++ ++ return 0; ++ ++err_close: ++ usb_serial_generic_close(port); ++err_free: ++ kfree(buf); ++ + return result; + } + +diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c +index e0b1fe2f60e1..be93b9ff2d98 100644 +--- a/drivers/usb/serial/digi_acceleport.c ++++ b/drivers/usb/serial/digi_acceleport.c +@@ -1399,25 +1399,30 @@ static int digi_read_inb_callback(struct urb *urb) + { + struct usb_serial_port *port = urb->context; + struct digi_port *priv = usb_get_serial_port_data(port); +- int opcode = ((unsigned char *)urb->transfer_buffer)[0]; +- int len = ((unsigned char *)urb->transfer_buffer)[1]; +- int port_status = ((unsigned char *)urb->transfer_buffer)[2]; +- unsigned char *data = ((unsigned char *)urb->transfer_buffer) + 3; ++ unsigned char *buf = urb->transfer_buffer; ++ int opcode; ++ int len; ++ int port_status; ++ unsigned char *data; + int flag, throttled; +- int status = urb->status; +- +- /* do not process callbacks on closed ports */ +- /* but do continue the read chain */ +- if (urb->status == -ENOENT) +- return 0; + + /* short/multiple packet check */ ++ if (urb->actual_length < 2) { ++ dev_warn(&port->dev, "short packet received\n"); ++ return -1; ++ } ++ ++ opcode = buf[0]; ++ len = buf[1]; ++ + if (urb->actual_length != len + 2) { +- dev_err(&port->dev, "%s: INCOMPLETE OR MULTIPLE PACKET, " +- "status=%d, port=%d, opcode=%d, len=%d, " +- "actual_length=%d, status=%d\n", __func__, status, +- priv->dp_port_num, opcode, len, urb->actual_length, +- port_status); ++ dev_err(&port->dev, "malformed packet received: port=%d, opcode=%d, len=%d, actual_length=%u\n", ++ priv->dp_port_num, opcode, len, urb->actual_length); ++ return -1; ++ } ++ ++ if (opcode == DIGI_CMD_RECEIVE_DATA && len < 1) { ++ dev_err(&port->dev, "malformed data packet received\n"); + return -1; + } + +@@ -1431,6 +1436,9 @@ static int digi_read_inb_callback(struct urb *urb) + + /* receive data */ + if (opcode == DIGI_CMD_RECEIVE_DATA) { ++ port_status = buf[2]; ++ data = &buf[3]; ++ + /* get flag from port_status */ + flag = 0; + +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index 19a98116c2ab..b3a21fcbbaf9 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -1439,10 +1439,13 @@ static int read_latency_timer(struct usb_serial_port *port) + FTDI_SIO_GET_LATENCY_TIMER_REQUEST_TYPE, + 0, priv->interface, + buf, 1, WDR_TIMEOUT); +- if (rv < 0) ++ if (rv < 1) { + dev_err(&port->dev, "Unable to read latency timer: %i\n", rv); +- else ++ if (rv >= 0) ++ rv = -EIO; ++ } else { + priv->latency = buf[0]; ++ } + + kfree(buf); + +diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c +index b63a6c3899c5..749e1b674145 100644 +--- a/drivers/usb/serial/io_edgeport.c ++++ b/drivers/usb/serial/io_edgeport.c +@@ -492,20 +492,24 @@ static int get_epic_descriptor(struct edgeport_serial *ep) + int result; + struct usb_serial *serial = ep->serial; + struct edgeport_product_info *product_info = &ep->product_info; +- struct edge_compatibility_descriptor *epic = &ep->epic_descriptor; ++ struct edge_compatibility_descriptor *epic; + struct edge_compatibility_bits *bits; + struct device *dev = &serial->dev->dev; + + ep->is_epic = 0; ++ ++ epic = kmalloc(sizeof(*epic), GFP_KERNEL); ++ if (!epic) ++ return -ENOMEM; ++ + result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), + USB_REQUEST_ION_GET_EPIC_DESC, + 0xC0, 0x00, 0x00, +- &ep->epic_descriptor, +- sizeof(struct edge_compatibility_descriptor), ++ epic, sizeof(*epic), + 300); +- +- if (result > 0) { ++ if (result == sizeof(*epic)) { + ep->is_epic = 1; ++ memcpy(&ep->epic_descriptor, epic, sizeof(*epic)); + memset(product_info, 0, sizeof(struct edgeport_product_info)); + + product_info->NumPorts = epic->NumPorts; +@@ -534,8 +538,16 @@ static int get_epic_descriptor(struct edgeport_serial *ep) + dev_dbg(dev, " IOSPWriteLCR : %s\n", bits->IOSPWriteLCR ? "TRUE": "FALSE"); + dev_dbg(dev, " IOSPSetBaudRate : %s\n", bits->IOSPSetBaudRate ? "TRUE": "FALSE"); + dev_dbg(dev, " TrueEdgeport : %s\n", bits->TrueEdgeport ? "TRUE": "FALSE"); ++ ++ result = 0; ++ } else if (result >= 0) { ++ dev_warn(&serial->interface->dev, "short epic descriptor received: %d\n", ++ result); ++ result = -EIO; + } + ++ kfree(epic); ++ + return result; + } + +@@ -2097,8 +2109,7 @@ static int rom_write(struct usb_serial *serial, __u16 extAddr, __u16 addr, + * rom_read + * reads a number of bytes from the Edgeport device starting at the given + * address. +- * If successful returns the number of bytes read, otherwise it returns +- * a negative error number of the problem. ++ * Returns zero on success or a negative error number. + ****************************************************************************/ + static int rom_read(struct usb_serial *serial, __u16 extAddr, + __u16 addr, __u16 length, __u8 *data) +@@ -2123,12 +2134,17 @@ static int rom_read(struct usb_serial *serial, __u16 extAddr, + USB_REQUEST_ION_READ_ROM, + 0xC0, addr, extAddr, transfer_buffer, + current_length, 300); +- if (result < 0) ++ if (result < current_length) { ++ if (result >= 0) ++ result = -EIO; + break; ++ } + memcpy(data, transfer_buffer, current_length); + length -= current_length; + addr += current_length; + data += current_length; ++ ++ result = 0; + } + + kfree(transfer_buffer); +@@ -2585,9 +2601,10 @@ static void get_manufacturing_desc(struct edgeport_serial *edge_serial) + EDGE_MANUF_DESC_LEN, + (__u8 *)(&edge_serial->manuf_descriptor)); + +- if (response < 1) +- dev_err(dev, "error in getting manufacturer descriptor\n"); +- else { ++ if (response < 0) { ++ dev_err(dev, "error in getting manufacturer descriptor: %d\n", ++ response); ++ } else { + char string[30]; + dev_dbg(dev, "**Manufacturer Descriptor\n"); + dev_dbg(dev, " RomSize: %dK\n", +@@ -2644,9 +2661,10 @@ static void get_boot_desc(struct edgeport_serial *edge_serial) + EDGE_BOOT_DESC_LEN, + (__u8 *)(&edge_serial->boot_descriptor)); + +- if (response < 1) +- dev_err(dev, "error in getting boot descriptor\n"); +- else { ++ if (response < 0) { ++ dev_err(dev, "error in getting boot descriptor: %d\n", ++ response); ++ } else { + dev_dbg(dev, "**Boot Descriptor:\n"); + dev_dbg(dev, " BootCodeLength: %d\n", + le16_to_cpu(edge_serial->boot_descriptor.BootCodeLength)); +@@ -2789,7 +2807,7 @@ static int edge_startup(struct usb_serial *serial) + dev_info(&serial->dev->dev, "%s detected\n", edge_serial->name); + + /* Read the epic descriptor */ +- if (get_epic_descriptor(edge_serial) <= 0) { ++ if (get_epic_descriptor(edge_serial) < 0) { + /* memcpy descriptor to Supports structures */ + memcpy(&edge_serial->epic_descriptor.Supports, descriptor, + sizeof(struct edge_compatibility_bits)); +diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c +index 930be98d59b3..6b0942428917 100644 +--- a/drivers/usb/serial/keyspan_pda.c ++++ b/drivers/usb/serial/keyspan_pda.c +@@ -139,6 +139,7 @@ static void keyspan_pda_rx_interrupt(struct urb *urb) + { + struct usb_serial_port *port = urb->context; + unsigned char *data = urb->transfer_buffer; ++ unsigned int len = urb->actual_length; + int retval; + int status = urb->status; + struct keyspan_pda_private *priv; +@@ -159,18 +160,26 @@ static void keyspan_pda_rx_interrupt(struct urb *urb) + goto exit; + } + ++ if (len < 1) { ++ dev_warn(&port->dev, "short message received\n"); ++ goto exit; ++ } ++ + /* see if the message is data or a status interrupt */ + switch (data[0]) { + case 0: + /* rest of message is rx data */ +- if (urb->actual_length) { +- tty_insert_flip_string(&port->port, data + 1, +- urb->actual_length - 1); +- tty_flip_buffer_push(&port->port); +- } ++ if (len < 2) ++ break; ++ tty_insert_flip_string(&port->port, data + 1, len - 1); ++ tty_flip_buffer_push(&port->port); + break; + case 1: + /* status interrupt */ ++ if (len < 3) { ++ dev_warn(&port->dev, "short interrupt message received\n"); ++ break; ++ } + dev_dbg(&port->dev, "rx int, d1=%d, d2=%d\n", data[1], data[2]); + switch (data[1]) { + case 1: /* modemline change */ +diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c +index 89726f702202..9bf82c262c5b 100644 +--- a/drivers/usb/serial/mct_u232.c ++++ b/drivers/usb/serial/mct_u232.c +@@ -322,8 +322,12 @@ static int mct_u232_get_modem_stat(struct usb_serial_port *port, + MCT_U232_GET_REQUEST_TYPE, + 0, 0, buf, MCT_U232_GET_MODEM_STAT_SIZE, + WDR_TIMEOUT); +- if (rc < 0) { ++ if (rc < MCT_U232_GET_MODEM_STAT_SIZE) { + dev_err(&port->dev, "Get MODEM STATus failed (error = %d)\n", rc); ++ ++ if (rc >= 0) ++ rc = -EIO; ++ + *msr = 0; + } else { + *msr = buf[0]; +diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c +index a3ed07c58754..af0c87276299 100644 +--- a/drivers/usb/serial/quatech2.c ++++ b/drivers/usb/serial/quatech2.c +@@ -188,22 +188,22 @@ static inline int qt2_setdevice(struct usb_device *dev, u8 *data) + } + + +-static inline int qt2_getdevice(struct usb_device *dev, u8 *data) +-{ +- return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), +- QT_SET_GET_DEVICE, 0xc0, 0, 0, +- data, 3, QT2_USB_TIMEOUT); +-} +- + static inline int qt2_getregister(struct usb_device *dev, + u8 uart, + u8 reg, + u8 *data) + { +- return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), +- QT_SET_GET_REGISTER, 0xc0, reg, +- uart, data, sizeof(*data), QT2_USB_TIMEOUT); ++ int ret; ++ ++ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), ++ QT_SET_GET_REGISTER, 0xc0, reg, ++ uart, data, sizeof(*data), QT2_USB_TIMEOUT); ++ if (ret < sizeof(*data)) { ++ if (ret >= 0) ++ ret = -EIO; ++ } + ++ return ret; + } + + static inline int qt2_setregister(struct usb_device *dev, +@@ -372,9 +372,11 @@ static int qt2_open(struct tty_struct *tty, struct usb_serial_port *port) + 0xc0, 0, + device_port, data, 2, QT2_USB_TIMEOUT); + +- if (status < 0) { ++ if (status < 2) { + dev_err(&port->dev, "%s - open port failed %i\n", __func__, + status); ++ if (status >= 0) ++ status = -EIO; + kfree(data); + return status; + } +diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c +index 70a098de429f..886e1294b120 100644 +--- a/drivers/usb/serial/ssu100.c ++++ b/drivers/usb/serial/ssu100.c +@@ -80,9 +80,17 @@ static inline int ssu100_setdevice(struct usb_device *dev, u8 *data) + + static inline int ssu100_getdevice(struct usb_device *dev, u8 *data) + { +- return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), +- QT_SET_GET_DEVICE, 0xc0, 0, 0, +- data, 3, 300); ++ int ret; ++ ++ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), ++ QT_SET_GET_DEVICE, 0xc0, 0, 0, ++ data, 3, 300); ++ if (ret < 3) { ++ if (ret >= 0) ++ ret = -EIO; ++ } ++ ++ return ret; + } + + static inline int ssu100_getregister(struct usb_device *dev, +@@ -90,10 +98,17 @@ static inline int ssu100_getregister(struct usb_device *dev, + unsigned short reg, + u8 *data) + { +- return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), +- QT_SET_GET_REGISTER, 0xc0, reg, +- uart, data, sizeof(*data), 300); ++ int ret; ++ ++ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), ++ QT_SET_GET_REGISTER, 0xc0, reg, ++ uart, data, sizeof(*data), 300); ++ if (ret < sizeof(*data)) { ++ if (ret >= 0) ++ ret = -EIO; ++ } + ++ return ret; + } + + +@@ -289,8 +304,10 @@ static int ssu100_open(struct tty_struct *tty, struct usb_serial_port *port) + QT_OPEN_CLOSE_CHANNEL, + QT_TRANSFER_IN, 0x01, + 0, data, 2, 300); +- if (result < 0) { ++ if (result < 2) { + dev_dbg(&port->dev, "%s - open failed %i\n", __func__, result); ++ if (result >= 0) ++ result = -EIO; + kfree(data); + return result; + } +diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c +index 535fcfafc097..fe7f5ace6064 100644 +--- a/drivers/usb/serial/ti_usb_3410_5052.c ++++ b/drivers/usb/serial/ti_usb_3410_5052.c +@@ -1352,13 +1352,10 @@ static int ti_command_out_sync(struct ti_device *tdev, __u8 command, + (USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT), + value, moduleid, data, size, 1000); + +- if (status == size) +- status = 0; +- +- if (status > 0) +- status = -ECOMM; ++ if (status < 0) ++ return status; + +- return status; ++ return 0; + } + + +@@ -1374,8 +1371,7 @@ static int ti_command_in_sync(struct ti_device *tdev, __u8 command, + + if (status == size) + status = 0; +- +- if (status > 0) ++ else if (status >= 0) + status = -ECOMM; + + return status; +diff --git a/fs/9p/acl.c b/fs/9p/acl.c +index 929b618da43b..c30c6ceac2c4 100644 +--- a/fs/9p/acl.c ++++ b/fs/9p/acl.c +@@ -283,6 +283,7 @@ static int v9fs_xattr_set_acl(const struct xattr_handler *handler, + case ACL_TYPE_ACCESS: + if (acl) { + struct iattr iattr; ++ struct posix_acl *old_acl = acl; + + retval = posix_acl_update_mode(inode, &iattr.ia_mode, &acl); + if (retval) +@@ -293,6 +294,7 @@ static int v9fs_xattr_set_acl(const struct xattr_handler *handler, + * by the mode bits. So don't + * update ACL. + */ ++ posix_acl_release(old_acl); + value = NULL; + size = 0; + } +diff --git a/fs/block_dev.c b/fs/block_dev.c +index 198aea66fe71..e5733bb537c9 100644 +--- a/fs/block_dev.c ++++ b/fs/block_dev.c +@@ -1098,7 +1098,6 @@ int revalidate_disk(struct gendisk *disk) + + if (disk->fops->revalidate_disk) + ret = disk->fops->revalidate_disk(disk); +- blk_integrity_revalidate(disk); + bdev = bdget_disk(disk, 0); + if (!bdev) + return ret; +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c +index 16462e702f96..86e1cb899957 100644 +--- a/fs/f2fs/super.c ++++ b/fs/f2fs/super.c +@@ -1059,6 +1059,13 @@ static int sanity_check_raw_super(struct super_block *sb, + return 1; + } + ++ if (le32_to_cpu(raw_super->segment_count) > F2FS_MAX_SEGMENT) { ++ f2fs_msg(sb, KERN_INFO, ++ "Invalid segment count (%u)", ++ le32_to_cpu(raw_super->segment_count)); ++ return 1; ++ } ++ + /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */ + if (sanity_check_area_boundary(sb, raw_super)) + return 1; +diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h +index 25c6324a0dd0..3d6e6ce44c5c 100644 +--- a/include/linux/f2fs_fs.h ++++ b/include/linux/f2fs_fs.h +@@ -284,6 +284,12 @@ struct f2fs_nat_block { + #define SIT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_sit_entry)) + + /* ++ * F2FS uses 4 bytes to represent block address. As a result, supported size of ++ * disk is 16 TB and it equals to 16 * 1024 * 1024 / 2 segments. ++ */ ++#define F2FS_MAX_SEGMENT ((16 * 1024 * 1024) / 2) ++ ++/* + * Note that f2fs_sit_entry->vblocks has the following bit-field information. + * [15:10] : allocation type such as CURSEG_XXXX_TYPE + * [9:0] : valid block count +diff --git a/include/linux/genhd.h b/include/linux/genhd.h +index 847cc1d91634..5012fcdb4c9e 100644 +--- a/include/linux/genhd.h ++++ b/include/linux/genhd.h +@@ -742,11 +742,9 @@ static inline void part_nr_sects_write(struct hd_struct *part, sector_t size) + #if defined(CONFIG_BLK_DEV_INTEGRITY) + extern void blk_integrity_add(struct gendisk *); + extern void blk_integrity_del(struct gendisk *); +-extern void blk_integrity_revalidate(struct gendisk *); + #else /* CONFIG_BLK_DEV_INTEGRITY */ + static inline void blk_integrity_add(struct gendisk *disk) { } + static inline void blk_integrity_del(struct gendisk *disk) { } +-static inline void blk_integrity_revalidate(struct gendisk *disk) { } + #endif /* CONFIG_BLK_DEV_INTEGRITY */ + + #else /* CONFIG_BLOCK */ +diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h +index 5dd75fa47dd8..f9be467d6695 100644 +--- a/include/linux/usb/chipidea.h ++++ b/include/linux/usb/chipidea.h +@@ -14,6 +14,7 @@ struct ci_hdrc; + * struct ci_hdrc_cable - structure for external connector cable state tracking + * @state: current state of the line + * @changed: set to true when extcon event happen ++ * @enabled: set to true if we've enabled the vbus or id interrupt + * @edev: device which generate events + * @ci: driver state of the chipidea device + * @nb: hold event notification callback +@@ -22,6 +23,7 @@ struct ci_hdrc; + struct ci_hdrc_cable { + bool state; + bool changed; ++ bool enabled; + struct extcon_dev *edev; + struct ci_hdrc *ci; + struct notifier_block nb; +diff --git a/include/net/addrconf.h b/include/net/addrconf.h +index 78003dfb8539..18dd7a3caf2f 100644 +--- a/include/net/addrconf.h ++++ b/include/net/addrconf.h +@@ -19,6 +19,8 @@ + #define ADDRCONF_TIMER_FUZZ (HZ / 4) + #define ADDRCONF_TIMER_FUZZ_MAX (HZ) + ++#define ADDRCONF_NOTIFY_PRIORITY 0 ++ + #include + #include + +diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h +index 295d291269e2..6275d651f76e 100644 +--- a/include/net/ip6_route.h ++++ b/include/net/ip6_route.h +@@ -77,6 +77,7 @@ static inline struct dst_entry *ip6_route_output(struct net *net, + struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6, + int flags); + ++void ip6_route_init_special_entries(void); + int ip6_route_init(void); + void ip6_route_cleanup(void); + +diff --git a/lib/test_bpf.c b/lib/test_bpf.c +index 10cd1860e5b0..7e26aea3e404 100644 +--- a/lib/test_bpf.c ++++ b/lib/test_bpf.c +@@ -4315,6 +4315,51 @@ static struct bpf_test tests[] = { + { }, + { { 0, 1 } }, + }, ++ { ++ /* Mainly testing JIT + imm64 here. */ ++ "JMP_JGE_X: ldimm64 test 1", ++ .u.insns_int = { ++ BPF_ALU32_IMM(BPF_MOV, R0, 0), ++ BPF_LD_IMM64(R1, 3), ++ BPF_LD_IMM64(R2, 2), ++ BPF_JMP_REG(BPF_JGE, R1, R2, 2), ++ BPF_LD_IMM64(R0, 0xffffffffffffffffUL), ++ BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeUL), ++ BPF_EXIT_INSN(), ++ }, ++ INTERNAL, ++ { }, ++ { { 0, 0xeeeeeeeeU } }, ++ }, ++ { ++ "JMP_JGE_X: ldimm64 test 2", ++ .u.insns_int = { ++ BPF_ALU32_IMM(BPF_MOV, R0, 0), ++ BPF_LD_IMM64(R1, 3), ++ BPF_LD_IMM64(R2, 2), ++ BPF_JMP_REG(BPF_JGE, R1, R2, 0), ++ BPF_LD_IMM64(R0, 0xffffffffffffffffUL), ++ BPF_EXIT_INSN(), ++ }, ++ INTERNAL, ++ { }, ++ { { 0, 0xffffffffU } }, ++ }, ++ { ++ "JMP_JGE_X: ldimm64 test 3", ++ .u.insns_int = { ++ BPF_ALU32_IMM(BPF_MOV, R0, 1), ++ BPF_LD_IMM64(R1, 3), ++ BPF_LD_IMM64(R2, 2), ++ BPF_JMP_REG(BPF_JGE, R1, R2, 4), ++ BPF_LD_IMM64(R0, 0xffffffffffffffffUL), ++ BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeUL), ++ BPF_EXIT_INSN(), ++ }, ++ INTERNAL, ++ { }, ++ { { 0, 1 } }, ++ }, + /* BPF_JMP | BPF_JNE | BPF_X */ + { + "JMP_JNE_X: if (3 != 2) return 1", +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c +index b94e165a4f79..fe38ef58997c 100644 +--- a/net/core/rtnetlink.c ++++ b/net/core/rtnetlink.c +@@ -1018,7 +1018,7 @@ static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev) + return err; + } + +- if (nla_put(skb, IFLA_PHYS_PORT_NAME, strlen(name), name)) ++ if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name)) + return -EMSGSIZE; + + return 0; +diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c +index 7113bae4e6a0..8f2cd7d09720 100644 +--- a/net/ipv4/raw.c ++++ b/net/ipv4/raw.c +@@ -354,6 +354,9 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, + rt->dst.dev->mtu); + return -EMSGSIZE; + } ++ if (length < sizeof(struct iphdr)) ++ return -EINVAL; ++ + if (flags&MSG_PROBE) + goto out; + +diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c +index 1e70fa8fa793..3861dedd5365 100644 +--- a/net/ipv4/tcp_lp.c ++++ b/net/ipv4/tcp_lp.c +@@ -264,13 +264,15 @@ static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked, s32 rtt_us) + { + struct tcp_sock *tp = tcp_sk(sk); + struct lp *lp = inet_csk_ca(sk); ++ u32 delta; + + if (rtt_us > 0) + tcp_lp_rtt_sample(sk, rtt_us); + + /* calc inference */ +- if (tcp_time_stamp > tp->rx_opt.rcv_tsecr) +- lp->inference = 3 * (tcp_time_stamp - tp->rx_opt.rcv_tsecr); ++ delta = tcp_time_stamp - tp->rx_opt.rcv_tsecr; ++ if ((s32)delta > 0) ++ lp->inference = 3 * delta; + + /* test if within inference */ + if (lp->last_drop && (tcp_time_stamp - lp->last_drop < lp->inference)) +diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c +index 019db68bdb9f..4c1c94fa8f08 100644 +--- a/net/ipv4/tcp_minisocks.c ++++ b/net/ipv4/tcp_minisocks.c +@@ -547,6 +547,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, + newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; + newtp->rx_opt.mss_clamp = req->mss; + tcp_ecn_openreq_child(newtp, req); ++ newtp->fastopen_req = NULL; + newtp->fastopen_rsk = NULL; + newtp->syn_data_acked = 0; + newtp->rack.mstamp.v64 = 0; +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index de95714d021c..3fdcdc730f71 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -1221,7 +1221,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, + * eventually). The difference is that pulled data not copied, but + * immediately discarded. + */ +-static void __pskb_trim_head(struct sk_buff *skb, int len) ++static int __pskb_trim_head(struct sk_buff *skb, int len) + { + struct skb_shared_info *shinfo; + int i, k, eat; +@@ -1231,7 +1231,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len) + __skb_pull(skb, eat); + len -= eat; + if (!len) +- return; ++ return 0; + } + eat = len; + k = 0; +@@ -1257,23 +1257,28 @@ static void __pskb_trim_head(struct sk_buff *skb, int len) + skb_reset_tail_pointer(skb); + skb->data_len -= len; + skb->len = skb->data_len; ++ return len; + } + + /* Remove acked data from a packet in the transmit queue. */ + int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) + { ++ u32 delta_truesize; ++ + if (skb_unclone(skb, GFP_ATOMIC)) + return -ENOMEM; + +- __pskb_trim_head(skb, len); ++ delta_truesize = __pskb_trim_head(skb, len); + + TCP_SKB_CB(skb)->seq += len; + skb->ip_summed = CHECKSUM_PARTIAL; + +- skb->truesize -= len; +- sk->sk_wmem_queued -= len; +- sk_mem_uncharge(sk, len); +- sock_set_flag(sk, SOCK_QUEUE_SHRUNK); ++ if (delta_truesize) { ++ skb->truesize -= delta_truesize; ++ sk->sk_wmem_queued -= delta_truesize; ++ sk_mem_uncharge(sk, delta_truesize); ++ sock_set_flag(sk, SOCK_QUEUE_SHRUNK); ++ } + + /* Any change of skb->len requires recalculation of tso factor. */ + if (tcp_skb_pcount(skb) > 1) +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c +index 253186a35567..7090fef372cc 100644 +--- a/net/ipv6/addrconf.c ++++ b/net/ipv6/addrconf.c +@@ -3306,6 +3306,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, + */ + static struct notifier_block ipv6_dev_notf = { + .notifier_call = addrconf_notify, ++ .priority = ADDRCONF_NOTIFY_PRIORITY, + }; + + static void addrconf_type_change(struct net_device *dev, unsigned long event) +@@ -5940,6 +5941,8 @@ int __init addrconf_init(void) + goto errlo; + } + ++ ip6_route_init_special_entries(); ++ + for (i = 0; i < IN6_ADDR_HSIZE; i++) + INIT_HLIST_HEAD(&inet6_addr_lst[i]); + +diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c +index a625f69a28dd..c93ede16795d 100644 +--- a/net/ipv6/raw.c ++++ b/net/ipv6/raw.c +@@ -630,6 +630,8 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length, + ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu); + return -EMSGSIZE; + } ++ if (length < sizeof(struct ipv6hdr)) ++ return -EINVAL; + if (flags&MSG_PROBE) + goto out; + +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index 6c91d5c4a92c..8f4177a1d4f5 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -3363,7 +3363,10 @@ static int ip6_route_dev_notify(struct notifier_block *this, + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct net *net = dev_net(dev); + +- if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) { ++ if (!(dev->flags & IFF_LOOPBACK)) ++ return NOTIFY_OK; ++ ++ if (event == NETDEV_REGISTER) { + net->ipv6.ip6_null_entry->dst.dev = dev; + net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev); + #ifdef CONFIG_IPV6_MULTIPLE_TABLES +@@ -3372,6 +3375,12 @@ static int ip6_route_dev_notify(struct notifier_block *this, + net->ipv6.ip6_blk_hole_entry->dst.dev = dev; + net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev); + #endif ++ } else if (event == NETDEV_UNREGISTER) { ++ in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev); ++#ifdef CONFIG_IPV6_MULTIPLE_TABLES ++ in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev); ++ in6_dev_put(net->ipv6.ip6_blk_hole_entry->rt6i_idev); ++#endif + } + + return NOTIFY_OK; +@@ -3678,9 +3687,24 @@ static struct pernet_operations ip6_route_net_late_ops = { + + static struct notifier_block ip6_route_dev_notifier = { + .notifier_call = ip6_route_dev_notify, +- .priority = 0, ++ .priority = ADDRCONF_NOTIFY_PRIORITY - 10, + }; + ++void __init ip6_route_init_special_entries(void) ++{ ++ /* Registering of the loopback is done before this portion of code, ++ * the loopback reference in rt6_info will not be taken, do it ++ * manually for init_net */ ++ init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev; ++ init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); ++ #ifdef CONFIG_IPV6_MULTIPLE_TABLES ++ init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev; ++ init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); ++ init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev; ++ init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); ++ #endif ++} ++ + int __init ip6_route_init(void) + { + int ret; +@@ -3707,17 +3731,6 @@ int __init ip6_route_init(void) + + ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep; + +- /* Registering of the loopback is done before this portion of code, +- * the loopback reference in rt6_info will not be taken, do it +- * manually for init_net */ +- init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev; +- init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); +- #ifdef CONFIG_IPV6_MULTIPLE_TABLES +- init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev; +- init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); +- init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev; +- init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); +- #endif + ret = fib6_init(); + if (ret) + goto out_register_subsys; +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index 8f3e5e9d8bdb..e6de496bffbe 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -2166,7 +2166,20 @@ static void azx_remove(struct pci_dev *pci) + /* cancel the pending probing work */ + chip = card->private_data; + hda = container_of(chip, struct hda_intel, chip); ++ /* FIXME: below is an ugly workaround. ++ * Both device_release_driver() and driver_probe_device() ++ * take *both* the device's and its parent's lock before ++ * calling the remove() and probe() callbacks. The codec ++ * probe takes the locks of both the codec itself and its ++ * parent, i.e. the PCI controller dev. Meanwhile, when ++ * the PCI controller is unbound, it takes its lock, too ++ * ==> ouch, a deadlock! ++ * As a workaround, we unlock temporarily here the controller ++ * device during cancel_work_sync() call. ++ */ ++ device_unlock(&pci->dev); + cancel_work_sync(&hda->probe_work); ++ device_lock(&pci->dev); + + snd_card_free(card); + } +diff --git a/tools/power/cpupower/utils/helpers/cpuid.c b/tools/power/cpupower/utils/helpers/cpuid.c +index 93b0aa74ca03..39c2c7d067bb 100644 +--- a/tools/power/cpupower/utils/helpers/cpuid.c ++++ b/tools/power/cpupower/utils/helpers/cpuid.c +@@ -156,6 +156,7 @@ out: + */ + case 0x2C: /* Westmere EP - Gulftown */ + cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO; ++ break; + case 0x2A: /* SNB */ + case 0x2D: /* SNB Xeon */ + case 0x3A: /* IVB */ diff --git a/patch/kernel/mvebu64-default/03-patch-4.4.68-69.patch b/patch/kernel/mvebu64-default/03-patch-4.4.68-69.patch new file mode 100644 index 000000000..0fde812ae --- /dev/null +++ b/patch/kernel/mvebu64-default/03-patch-4.4.68-69.patch @@ -0,0 +1,2955 @@ +diff --git a/Makefile b/Makefile +index e6c7990497e7..dc5df61ea4be 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 68 ++SUBLEVEL = 69 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c +index a9b3b905e661..443db0c43d7c 100644 +--- a/arch/arm/kvm/psci.c ++++ b/arch/arm/kvm/psci.c +@@ -208,9 +208,10 @@ int kvm_psci_version(struct kvm_vcpu *vcpu) + + static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) + { +- int ret = 1; ++ struct kvm *kvm = vcpu->kvm; + unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0); + unsigned long val; ++ int ret = 1; + + switch (psci_fn) { + case PSCI_0_2_FN_PSCI_VERSION: +@@ -230,7 +231,9 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) + break; + case PSCI_0_2_FN_CPU_ON: + case PSCI_0_2_FN64_CPU_ON: ++ mutex_lock(&kvm->lock); + val = kvm_psci_vcpu_on(vcpu); ++ mutex_unlock(&kvm->lock); + break; + case PSCI_0_2_FN_AFFINITY_INFO: + case PSCI_0_2_FN64_AFFINITY_INFO: +@@ -279,6 +282,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) + + static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) + { ++ struct kvm *kvm = vcpu->kvm; + unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0); + unsigned long val; + +@@ -288,7 +292,9 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) + val = PSCI_RET_SUCCESS; + break; + case KVM_PSCI_FN_CPU_ON: ++ mutex_lock(&kvm->lock); + val = kvm_psci_vcpu_on(vcpu); ++ mutex_unlock(&kvm->lock); + break; + default: + val = PSCI_RET_NOT_SUPPORTED; +diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c +index d2650e84faf2..c2489f62c4fb 100644 +--- a/arch/arm64/kvm/sys_regs.c ++++ b/arch/arm64/kvm/sys_regs.c +@@ -1054,8 +1054,8 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, + { + struct sys_reg_params params; + u32 hsr = kvm_vcpu_get_hsr(vcpu); +- int Rt = (hsr >> 5) & 0xf; +- int Rt2 = (hsr >> 10) & 0xf; ++ int Rt = (hsr >> 5) & 0x1f; ++ int Rt2 = (hsr >> 10) & 0x1f; + + params.is_aarch32 = true; + params.is_32bit = false; +@@ -1106,7 +1106,7 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu, + { + struct sys_reg_params params; + u32 hsr = kvm_vcpu_get_hsr(vcpu); +- int Rt = (hsr >> 5) & 0xf; ++ int Rt = (hsr >> 5) & 0x1f; + + params.is_aarch32 = true; + params.is_32bit = true; +diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h +index 9011a88353de..ed1e9206f830 100644 +--- a/arch/x86/boot/boot.h ++++ b/arch/x86/boot/boot.h +@@ -16,7 +16,7 @@ + #ifndef BOOT_BOOT_H + #define BOOT_BOOT_H + +-#define STACK_SIZE 512 /* Minimum number of bytes for stack */ ++#define STACK_SIZE 1024 /* Minimum number of bytes for stack */ + + #ifndef __ASSEMBLY__ + +diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h +index bd8ce6bcdfc9..6503526d7b24 100644 +--- a/arch/x86/include/asm/pmem.h ++++ b/arch/x86/include/asm/pmem.h +@@ -122,7 +122,7 @@ static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes, + + if (bytes < 8) { + if (!IS_ALIGNED(dest, 4) || (bytes != 4)) +- __arch_wb_cache_pmem(addr, 1); ++ __arch_wb_cache_pmem(addr, bytes); + } else { + if (!IS_ALIGNED(dest, 8)) { + dest = ALIGN(dest, boot_cpu_data.x86_clflush_size); +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index e75095fa414e..281899da19d4 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -2960,6 +2960,12 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, + | KVM_VCPUEVENT_VALID_SMM)) + return -EINVAL; + ++ /* INITs are latched while in SMM */ ++ if (events->flags & KVM_VCPUEVENT_VALID_SMM && ++ (events->smi.smm || events->smi.pending) && ++ vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) ++ return -EINVAL; ++ + process_nmi(vcpu); + vcpu->arch.exception.pending = events->exception.injected; + vcpu->arch.exception.nr = events->exception.nr; +@@ -6993,6 +6999,12 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, + mp_state->mp_state != KVM_MP_STATE_RUNNABLE) + return -EINVAL; + ++ /* INITs are latched while in SMM */ ++ if ((is_smm(vcpu) || vcpu->arch.smi_pending) && ++ (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED || ++ mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED)) ++ return -EINVAL; ++ + if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { + vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; + set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); +diff --git a/arch/x86/um/ptrace_64.c b/arch/x86/um/ptrace_64.c +index a629694ee750..e14c43a2d187 100644 +--- a/arch/x86/um/ptrace_64.c ++++ b/arch/x86/um/ptrace_64.c +@@ -121,7 +121,7 @@ int poke_user(struct task_struct *child, long addr, long data) + else if ((addr >= offsetof(struct user, u_debugreg[0])) && + (addr <= offsetof(struct user, u_debugreg[7]))) { + addr -= offsetof(struct user, u_debugreg[0]); +- addr = addr >> 2; ++ addr = addr >> 3; + if ((addr == 4) || (addr == 5)) + return -EIO; + child->thread.arch.debugregs[addr] = data; +diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c +index 1e56ff583459..63146c378f1e 100644 +--- a/arch/x86/xen/mmu.c ++++ b/arch/x86/xen/mmu.c +@@ -2038,7 +2038,8 @@ static unsigned long __init xen_read_phys_ulong(phys_addr_t addr) + + /* + * Translate a virtual address to a physical one without relying on mapped +- * page tables. ++ * page tables. Don't rely on big pages being aligned in (guest) physical ++ * space! + */ + static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr) + { +@@ -2059,7 +2060,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr) + sizeof(pud))); + if (!pud_present(pud)) + return 0; +- pa = pud_pfn(pud) << PAGE_SHIFT; ++ pa = pud_val(pud) & PTE_PFN_MASK; + if (pud_large(pud)) + return pa + (vaddr & ~PUD_MASK); + +@@ -2067,7 +2068,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr) + sizeof(pmd))); + if (!pmd_present(pmd)) + return 0; +- pa = pmd_pfn(pmd) << PAGE_SHIFT; ++ pa = pmd_val(pmd) & PTE_PFN_MASK; + if (pmd_large(pmd)) + return pa + (vaddr & ~PMD_MASK); + +diff --git a/block/blk-integrity.c b/block/blk-integrity.c +index 319f2e4f4a8b..478f572cb1e7 100644 +--- a/block/blk-integrity.c ++++ b/block/blk-integrity.c +@@ -412,7 +412,8 @@ void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template + + bi->flags = BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE | + template->flags; +- bi->interval_exp = ilog2(queue_logical_block_size(disk->queue)); ++ bi->interval_exp = template->interval_exp ? : ++ ilog2(queue_logical_block_size(disk->queue)); + bi->profile = template->profile ? template->profile : &nop_profile; + bi->tuple_size = template->tuple_size; + bi->tag_size = template->tag_size; +diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c +index 6d4d4569447e..faea9d728fd2 100644 +--- a/crypto/algif_aead.c ++++ b/crypto/algif_aead.c +@@ -29,6 +29,11 @@ struct aead_sg_list { + struct scatterlist sg[ALG_MAX_PAGES]; + }; + ++struct aead_tfm { ++ struct crypto_aead *aead; ++ bool has_key; ++}; ++ + struct aead_ctx { + struct aead_sg_list tsgl; + /* +@@ -513,24 +518,146 @@ static struct proto_ops algif_aead_ops = { + .poll = aead_poll, + }; + ++static int aead_check_key(struct socket *sock) ++{ ++ int err = 0; ++ struct sock *psk; ++ struct alg_sock *pask; ++ struct aead_tfm *tfm; ++ struct sock *sk = sock->sk; ++ struct alg_sock *ask = alg_sk(sk); ++ ++ lock_sock(sk); ++ if (ask->refcnt) ++ goto unlock_child; ++ ++ psk = ask->parent; ++ pask = alg_sk(ask->parent); ++ tfm = pask->private; ++ ++ err = -ENOKEY; ++ lock_sock_nested(psk, SINGLE_DEPTH_NESTING); ++ if (!tfm->has_key) ++ goto unlock; ++ ++ if (!pask->refcnt++) ++ sock_hold(psk); ++ ++ ask->refcnt = 1; ++ sock_put(psk); ++ ++ err = 0; ++ ++unlock: ++ release_sock(psk); ++unlock_child: ++ release_sock(sk); ++ ++ return err; ++} ++ ++static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg, ++ size_t size) ++{ ++ int err; ++ ++ err = aead_check_key(sock); ++ if (err) ++ return err; ++ ++ return aead_sendmsg(sock, msg, size); ++} ++ ++static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page, ++ int offset, size_t size, int flags) ++{ ++ int err; ++ ++ err = aead_check_key(sock); ++ if (err) ++ return err; ++ ++ return aead_sendpage(sock, page, offset, size, flags); ++} ++ ++static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg, ++ size_t ignored, int flags) ++{ ++ int err; ++ ++ err = aead_check_key(sock); ++ if (err) ++ return err; ++ ++ return aead_recvmsg(sock, msg, ignored, flags); ++} ++ ++static struct proto_ops algif_aead_ops_nokey = { ++ .family = PF_ALG, ++ ++ .connect = sock_no_connect, ++ .socketpair = sock_no_socketpair, ++ .getname = sock_no_getname, ++ .ioctl = sock_no_ioctl, ++ .listen = sock_no_listen, ++ .shutdown = sock_no_shutdown, ++ .getsockopt = sock_no_getsockopt, ++ .mmap = sock_no_mmap, ++ .bind = sock_no_bind, ++ .accept = sock_no_accept, ++ .setsockopt = sock_no_setsockopt, ++ ++ .release = af_alg_release, ++ .sendmsg = aead_sendmsg_nokey, ++ .sendpage = aead_sendpage_nokey, ++ .recvmsg = aead_recvmsg_nokey, ++ .poll = aead_poll, ++}; ++ + static void *aead_bind(const char *name, u32 type, u32 mask) + { +- return crypto_alloc_aead(name, type, mask); ++ struct aead_tfm *tfm; ++ struct crypto_aead *aead; ++ ++ tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); ++ if (!tfm) ++ return ERR_PTR(-ENOMEM); ++ ++ aead = crypto_alloc_aead(name, type, mask); ++ if (IS_ERR(aead)) { ++ kfree(tfm); ++ return ERR_CAST(aead); ++ } ++ ++ tfm->aead = aead; ++ ++ return tfm; + } + + static void aead_release(void *private) + { +- crypto_free_aead(private); ++ struct aead_tfm *tfm = private; ++ ++ crypto_free_aead(tfm->aead); ++ kfree(tfm); + } + + static int aead_setauthsize(void *private, unsigned int authsize) + { +- return crypto_aead_setauthsize(private, authsize); ++ struct aead_tfm *tfm = private; ++ ++ return crypto_aead_setauthsize(tfm->aead, authsize); + } + + static int aead_setkey(void *private, const u8 *key, unsigned int keylen) + { +- return crypto_aead_setkey(private, key, keylen); ++ struct aead_tfm *tfm = private; ++ int err; ++ ++ err = crypto_aead_setkey(tfm->aead, key, keylen); ++ tfm->has_key = !err; ++ ++ return err; + } + + static void aead_sock_destruct(struct sock *sk) +@@ -546,12 +673,14 @@ static void aead_sock_destruct(struct sock *sk) + af_alg_release_parent(sk); + } + +-static int aead_accept_parent(void *private, struct sock *sk) ++static int aead_accept_parent_nokey(void *private, struct sock *sk) + { + struct aead_ctx *ctx; + struct alg_sock *ask = alg_sk(sk); +- unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private); +- unsigned int ivlen = crypto_aead_ivsize(private); ++ struct aead_tfm *tfm = private; ++ struct crypto_aead *aead = tfm->aead; ++ unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(aead); ++ unsigned int ivlen = crypto_aead_ivsize(aead); + + ctx = sock_kmalloc(sk, len, GFP_KERNEL); + if (!ctx) +@@ -577,7 +706,7 @@ static int aead_accept_parent(void *private, struct sock *sk) + + ask->private = ctx; + +- aead_request_set_tfm(&ctx->aead_req, private); ++ aead_request_set_tfm(&ctx->aead_req, aead); + aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, + af_alg_complete, &ctx->completion); + +@@ -586,13 +715,25 @@ static int aead_accept_parent(void *private, struct sock *sk) + return 0; + } + ++static int aead_accept_parent(void *private, struct sock *sk) ++{ ++ struct aead_tfm *tfm = private; ++ ++ if (!tfm->has_key) ++ return -ENOKEY; ++ ++ return aead_accept_parent_nokey(private, sk); ++} ++ + static const struct af_alg_type algif_type_aead = { + .bind = aead_bind, + .release = aead_release, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .accept = aead_accept_parent, ++ .accept_nokey = aead_accept_parent_nokey, + .ops = &algif_aead_ops, ++ .ops_nokey = &algif_aead_ops_nokey, + .name = "aead", + .owner = THIS_MODULE + }; +diff --git a/drivers/Makefile b/drivers/Makefile +index 795d0ca714bf..098997f2cc3a 100644 +--- a/drivers/Makefile ++++ b/drivers/Makefile +@@ -98,6 +98,7 @@ obj-$(CONFIG_USB_PHY) += usb/ + obj-$(CONFIG_USB) += usb/ + obj-$(CONFIG_PCI) += usb/ + obj-$(CONFIG_USB_GADGET) += usb/ ++obj-$(CONFIG_OF) += usb/ + obj-$(CONFIG_SERIO) += input/serio/ + obj-$(CONFIG_GAMEPORT) += input/gameport/ + obj-$(CONFIG_INPUT) += input/ +diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c +index cb852cc750b7..f9b569ef3dd7 100644 +--- a/drivers/bluetooth/hci_bcm.c ++++ b/drivers/bluetooth/hci_bcm.c +@@ -287,6 +287,9 @@ static int bcm_open(struct hci_uart *hu) + + hu->priv = bcm; + ++ if (!hu->tty->dev) ++ goto out; ++ + mutex_lock(&bcm_device_lock); + list_for_each(p, &bcm_device_list) { + struct bcm_device *dev = list_entry(p, struct bcm_device, list); +@@ -307,7 +310,7 @@ static int bcm_open(struct hci_uart *hu) + } + + mutex_unlock(&bcm_device_lock); +- ++out: + return 0; + } + +diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c +index b9065506a847..0c63fce0c1e0 100644 +--- a/drivers/bluetooth/hci_intel.c ++++ b/drivers/bluetooth/hci_intel.c +@@ -307,6 +307,9 @@ static int intel_set_power(struct hci_uart *hu, bool powered) + struct list_head *p; + int err = -ENODEV; + ++ if (!hu->tty->dev) ++ return err; ++ + mutex_lock(&intel_device_list_lock); + + list_for_each(p, &intel_device_list) { +@@ -379,6 +382,9 @@ static void intel_busy_work(struct work_struct *work) + struct intel_data *intel = container_of(work, struct intel_data, + busy_work); + ++ if (!intel->hu->tty->dev) ++ return; ++ + /* Link is busy, delay the suspend */ + mutex_lock(&intel_device_list_lock); + list_for_each(p, &intel_device_list) { +@@ -913,6 +919,8 @@ done: + list_for_each(p, &intel_device_list) { + struct intel_device *dev = list_entry(p, struct intel_device, + list); ++ if (!hu->tty->dev) ++ break; + if (hu->tty->dev->parent == dev->pdev->dev.parent) { + if (device_may_wakeup(&dev->pdev->dev)) + idev = dev; +@@ -1094,6 +1102,9 @@ static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb) + + BT_DBG("hu %p skb %p", hu, skb); + ++ if (!hu->tty->dev) ++ goto out_enqueue; ++ + /* Be sure our controller is resumed and potential LPM transaction + * completed before enqueuing any packet. + */ +@@ -1110,7 +1121,7 @@ static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb) + } + } + mutex_unlock(&intel_device_list_lock); +- ++out_enqueue: + skb_queue_tail(&intel->txq, skb); + + return 0; +diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c +index 90e624662257..0d83cfb9708f 100644 +--- a/drivers/char/ipmi/ipmi_ssif.c ++++ b/drivers/char/ipmi/ipmi_ssif.c +@@ -888,6 +888,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result, + * for details on the intricacies of this. + */ + int left; ++ unsigned char *data_to_send; + + ssif_inc_stat(ssif_info, sent_messages_parts); + +@@ -896,6 +897,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result, + left = 32; + /* Length byte. */ + ssif_info->multi_data[ssif_info->multi_pos] = left; ++ data_to_send = ssif_info->multi_data + ssif_info->multi_pos; + ssif_info->multi_pos += left; + if (left < 32) + /* +@@ -909,7 +911,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result, + rv = ssif_i2c_send(ssif_info, msg_written_handler, + I2C_SMBUS_WRITE, + SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE, +- ssif_info->multi_data + ssif_info->multi_pos, ++ data_to_send, + I2C_SMBUS_BLOCK_DATA); + if (rv < 0) { + /* request failed, just return the error. */ +diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c +index b1f37d4095fa..e76d52a203a7 100644 +--- a/drivers/infiniband/core/sysfs.c ++++ b/drivers/infiniband/core/sysfs.c +@@ -863,7 +863,7 @@ err_put: + free_port_list_attributes(device); + + err_unregister: +- device_unregister(class_dev); ++ device_del(class_dev); + + err: + return ret; +diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c +index 77ddf2fa8625..8763fb832b01 100644 +--- a/drivers/infiniband/hw/mlx4/main.c ++++ b/drivers/infiniband/hw/mlx4/main.c +@@ -2491,6 +2491,7 @@ err_counter: + mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]); + + err_map: ++ mlx4_ib_free_eqs(dev, ibdev); + iounmap(ibdev->uar_map); + + err_uar: +diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c +index 36ec8aa048aa..0b5bb0cee6f9 100644 +--- a/drivers/infiniband/hw/mlx4/mcg.c ++++ b/drivers/infiniband/hw/mlx4/mcg.c +@@ -1105,7 +1105,8 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy + while ((p = rb_first(&ctx->mcg_table)) != NULL) { + group = rb_entry(p, struct mcast_group, node); + if (atomic_read(&group->refcount)) +- mcg_warn_group(group, "group refcount %d!!! (pointer %p)\n", atomic_read(&group->refcount), group); ++ mcg_debug_group(group, "group refcount %d!!! (pointer %p)\n", ++ atomic_read(&group->refcount), group); + + force_clean_group(group); + } +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c +index 6bd5740e2691..09396bd7b02d 100644 +--- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c +@@ -281,8 +281,11 @@ void ipoib_delete_debug_files(struct net_device *dev) + { + struct ipoib_dev_priv *priv = netdev_priv(dev); + ++ WARN_ONCE(!priv->mcg_dentry, "null mcg debug file\n"); ++ WARN_ONCE(!priv->path_dentry, "null path debug file\n"); + debugfs_remove(priv->mcg_dentry); + debugfs_remove(priv->path_dentry); ++ priv->mcg_dentry = priv->path_dentry = NULL; + } + + int ipoib_register_debugfs(void) +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c +index 8efcff1beb8f..6699ecd855f0 100644 +--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c +@@ -106,6 +106,33 @@ static struct ib_client ipoib_client = { + .get_net_dev_by_params = ipoib_get_net_dev_by_params, + }; + ++#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG ++static int ipoib_netdev_event(struct notifier_block *this, ++ unsigned long event, void *ptr) ++{ ++ struct netdev_notifier_info *ni = ptr; ++ struct net_device *dev = ni->dev; ++ ++ if (dev->netdev_ops->ndo_open != ipoib_open) ++ return NOTIFY_DONE; ++ ++ switch (event) { ++ case NETDEV_REGISTER: ++ ipoib_create_debug_files(dev); ++ break; ++ case NETDEV_CHANGENAME: ++ ipoib_delete_debug_files(dev); ++ ipoib_create_debug_files(dev); ++ break; ++ case NETDEV_UNREGISTER: ++ ipoib_delete_debug_files(dev); ++ break; ++ } ++ ++ return NOTIFY_DONE; ++} ++#endif ++ + int ipoib_open(struct net_device *dev) + { + struct ipoib_dev_priv *priv = netdev_priv(dev); +@@ -1595,8 +1622,6 @@ void ipoib_dev_cleanup(struct net_device *dev) + + ASSERT_RTNL(); + +- ipoib_delete_debug_files(dev); +- + /* Delete any child interfaces first */ + list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { + /* Stop GC on child */ +@@ -1908,8 +1933,6 @@ static struct net_device *ipoib_add_port(const char *format, + goto register_failed; + } + +- ipoib_create_debug_files(priv->dev); +- + if (ipoib_cm_add_mode_attr(priv->dev)) + goto sysfs_failed; + if (ipoib_add_pkey_attr(priv->dev)) +@@ -1924,7 +1947,6 @@ static struct net_device *ipoib_add_port(const char *format, + return priv->dev; + + sysfs_failed: +- ipoib_delete_debug_files(priv->dev); + unregister_netdev(priv->dev); + + register_failed: +@@ -2006,6 +2028,12 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data) + kfree(dev_list); + } + ++#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG ++static struct notifier_block ipoib_netdev_notifier = { ++ .notifier_call = ipoib_netdev_event, ++}; ++#endif ++ + static int __init ipoib_init_module(void) + { + int ret; +@@ -2057,6 +2085,9 @@ static int __init ipoib_init_module(void) + if (ret) + goto err_client; + ++#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG ++ register_netdevice_notifier(&ipoib_netdev_notifier); ++#endif + return 0; + + err_client: +@@ -2074,6 +2105,9 @@ err_fs: + + static void __exit ipoib_cleanup_module(void) + { ++#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG ++ unregister_netdevice_notifier(&ipoib_netdev_notifier); ++#endif + ipoib_netlink_fini(); + ib_unregister_client(&ipoib_client); + ib_sa_unregister_client(&ipoib_sa_client); +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +index fca1a882de27..57a34f87dedf 100644 +--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +@@ -85,8 +85,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv, + goto register_failed; + } + +- ipoib_create_debug_files(priv->dev); +- + /* RTNL childs don't need proprietary sysfs entries */ + if (type == IPOIB_LEGACY_CHILD) { + if (ipoib_cm_add_mode_attr(priv->dev)) +@@ -107,7 +105,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv, + + sysfs_failed: + result = -ENOMEM; +- ipoib_delete_debug_files(priv->dev); + unregister_netdevice(priv->dev); + + register_failed: +diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c +index 665bf3285618..32e76c5ee741 100644 +--- a/drivers/md/dm-era-target.c ++++ b/drivers/md/dm-era-target.c +@@ -961,15 +961,15 @@ static int metadata_commit(struct era_metadata *md) + } + } + +- r = save_sm_root(md); ++ r = dm_tm_pre_commit(md->tm); + if (r) { +- DMERR("%s: save_sm_root failed", __func__); ++ DMERR("%s: pre commit failed", __func__); + return r; + } + +- r = dm_tm_pre_commit(md->tm); ++ r = save_sm_root(md); + if (r) { +- DMERR("%s: pre commit failed", __func__); ++ DMERR("%s: save_sm_root failed", __func__); + return r; + } + +diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c +index 1e1bef349487..6decf4a95ce1 100644 +--- a/drivers/net/wireless/ath/ath10k/mac.c ++++ b/drivers/net/wireless/ath/ath10k/mac.c +@@ -6351,12 +6351,13 @@ static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) + + static int ath10k_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, +- enum ieee80211_ampdu_mlme_action action, +- struct ieee80211_sta *sta, u16 tid, u16 *ssn, +- u8 buf_size, bool amsdu) ++ struct ieee80211_ampdu_params *params) + { + struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); ++ struct ieee80211_sta *sta = params->sta; ++ enum ieee80211_ampdu_mlme_action action = params->action; ++ u16 tid = params->tid; + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n", + arvif->vdev_id, sta->addr, tid, action); +diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c +index a680a970b7f7..e4281438c04f 100644 +--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c ++++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c +@@ -1657,13 +1657,14 @@ static void ath9k_htc_reset_tsf(struct ieee80211_hw *hw, + + static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, +- enum ieee80211_ampdu_mlme_action action, +- struct ieee80211_sta *sta, +- u16 tid, u16 *ssn, u8 buf_size, bool amsdu) ++ struct ieee80211_ampdu_params *params) + { + struct ath9k_htc_priv *priv = hw->priv; + struct ath9k_htc_sta *ista; + int ret = 0; ++ struct ieee80211_sta *sta = params->sta; ++ enum ieee80211_ampdu_mlme_action action = params->action; ++ u16 tid = params->tid; + + mutex_lock(&priv->mutex); + ath9k_htc_ps_wakeup(priv); +diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c +index b114e57a823f..3abc64574116 100644 +--- a/drivers/net/wireless/ath/ath9k/main.c ++++ b/drivers/net/wireless/ath/ath9k/main.c +@@ -1855,14 +1855,16 @@ static void ath9k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) + + static int ath9k_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, +- enum ieee80211_ampdu_mlme_action action, +- struct ieee80211_sta *sta, +- u16 tid, u16 *ssn, u8 buf_size, bool amsdu) ++ struct ieee80211_ampdu_params *params) + { + struct ath_softc *sc = hw->priv; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + bool flush = false; + int ret = 0; ++ struct ieee80211_sta *sta = params->sta; ++ enum ieee80211_ampdu_mlme_action action = params->action; ++ u16 tid = params->tid; ++ u16 *ssn = ¶ms->ssn; + + mutex_lock(&sc->mutex); + +diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c +index 19d3d64416bf..4d1527a2e292 100644 +--- a/drivers/net/wireless/ath/carl9170/main.c ++++ b/drivers/net/wireless/ath/carl9170/main.c +@@ -1413,10 +1413,12 @@ static void carl9170_ampdu_work(struct work_struct *work) + + static int carl9170_op_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, +- enum ieee80211_ampdu_mlme_action action, +- struct ieee80211_sta *sta, +- u16 tid, u16 *ssn, u8 buf_size, bool amsdu) ++ struct ieee80211_ampdu_params *params) + { ++ struct ieee80211_sta *sta = params->sta; ++ enum ieee80211_ampdu_mlme_action action = params->action; ++ u16 tid = params->tid; ++ u16 *ssn = ¶ms->ssn; + struct ar9170 *ar = hw->priv; + struct carl9170_sta_info *sta_info = (void *) sta->drv_priv; + struct carl9170_sta_tid *tid_info; +diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c +index 7c169abdbafe..a27279c2c695 100644 +--- a/drivers/net/wireless/ath/wcn36xx/main.c ++++ b/drivers/net/wireless/ath/wcn36xx/main.c +@@ -857,12 +857,14 @@ static int wcn36xx_resume(struct ieee80211_hw *hw) + + static int wcn36xx_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, +- enum ieee80211_ampdu_mlme_action action, +- struct ieee80211_sta *sta, u16 tid, u16 *ssn, +- u8 buf_size, bool amsdu) ++ struct ieee80211_ampdu_params *params) + { + struct wcn36xx *wcn = hw->priv; + struct wcn36xx_sta *sta_priv = NULL; ++ struct ieee80211_sta *sta = params->sta; ++ enum ieee80211_ampdu_mlme_action action = params->action; ++ u16 tid = params->tid; ++ u16 *ssn = ¶ms->ssn; + + wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n", + action, tid); +diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +index bec2dc1ca2e4..61ae2768132a 100644 +--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c ++++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +@@ -818,13 +818,15 @@ brcms_ops_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + static int + brcms_ops_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, +- enum ieee80211_ampdu_mlme_action action, +- struct ieee80211_sta *sta, u16 tid, u16 *ssn, +- u8 buf_size, bool amsdu) ++ struct ieee80211_ampdu_params *params) + { + struct brcms_info *wl = hw->priv; + struct scb *scb = &wl->wlc->pri_scb; + int status; ++ struct ieee80211_sta *sta = params->sta; ++ enum ieee80211_ampdu_mlme_action action = params->action; ++ u16 tid = params->tid; ++ u8 buf_size = params->buf_size; + + if (WARN_ON(scb->magic != SCB_MAGIC)) + return -EIDRM; +diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c +index 95a7fdb3cc1c..c602a1e674ca 100644 +--- a/drivers/net/wireless/cw1200/sta.c ++++ b/drivers/net/wireless/cw1200/sta.c +@@ -2135,9 +2135,7 @@ void cw1200_mcast_timeout(unsigned long arg) + + int cw1200_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, +- enum ieee80211_ampdu_mlme_action action, +- struct ieee80211_sta *sta, u16 tid, u16 *ssn, +- u8 buf_size, bool amsdu) ++ struct ieee80211_ampdu_params *params) + { + /* Aggregation is implemented fully in firmware, + * including block ack negotiation. Do not allow +diff --git a/drivers/net/wireless/cw1200/sta.h b/drivers/net/wireless/cw1200/sta.h +index bebb3379017f..a0bacaa39b31 100644 +--- a/drivers/net/wireless/cw1200/sta.h ++++ b/drivers/net/wireless/cw1200/sta.h +@@ -109,9 +109,7 @@ void cw1200_bss_info_changed(struct ieee80211_hw *dev, + u32 changed); + int cw1200_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, +- enum ieee80211_ampdu_mlme_action action, +- struct ieee80211_sta *sta, u16 tid, u16 *ssn, +- u8 buf_size, bool amsdu); ++ struct ieee80211_ampdu_params *params); + + void cw1200_suspend_resume(struct cw1200_common *priv, + struct wsm_suspend_resume *arg); +diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c +index 6656215a13a9..04b0349a6ad9 100644 +--- a/drivers/net/wireless/iwlegacy/4965-mac.c ++++ b/drivers/net/wireless/iwlegacy/4965-mac.c +@@ -5982,12 +5982,14 @@ il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + + int + il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, +- enum ieee80211_ampdu_mlme_action action, +- struct ieee80211_sta *sta, u16 tid, u16 * ssn, +- u8 buf_size, bool amsdu) ++ struct ieee80211_ampdu_params *params) + { + struct il_priv *il = hw->priv; + int ret = -EINVAL; ++ struct ieee80211_sta *sta = params->sta; ++ enum ieee80211_ampdu_mlme_action action = params->action; ++ u16 tid = params->tid; ++ u16 *ssn = ¶ms->ssn; + + D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid); + +diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h +index 8ab8706f9422..e432715e02d8 100644 +--- a/drivers/net/wireless/iwlegacy/4965.h ++++ b/drivers/net/wireless/iwlegacy/4965.h +@@ -182,9 +182,7 @@ void il4965_mac_update_tkip_key(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, u32 iv32, + u16 *phase1key); + int il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, +- enum ieee80211_ampdu_mlme_action action, +- struct ieee80211_sta *sta, u16 tid, u16 * ssn, +- u8 buf_size, bool amsdu); ++ struct ieee80211_ampdu_params *params); + int il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + void +diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c +index b3ad34e8bf5a..1eb1a823a111 100644 +--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c ++++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c +@@ -729,12 +729,15 @@ static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg) + + static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, +- enum ieee80211_ampdu_mlme_action action, +- struct ieee80211_sta *sta, u16 tid, u16 *ssn, +- u8 buf_size, bool amsdu) ++ struct ieee80211_ampdu_params *params) + { + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + int ret = -EINVAL; ++ struct ieee80211_sta *sta = params->sta; ++ enum ieee80211_ampdu_mlme_action action = params->action; ++ u16 tid = params->tid; ++ u16 *ssn = ¶ms->ssn; ++ u8 buf_size = params->buf_size; + struct iwl_station_priv *sta_priv = (void *) sta->drv_priv; + + IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", +diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c +index ce12717e656a..1a8ea775de08 100644 +--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c ++++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c +@@ -826,13 +826,16 @@ iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + + static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, +- enum ieee80211_ampdu_mlme_action action, +- struct ieee80211_sta *sta, u16 tid, +- u16 *ssn, u8 buf_size, bool amsdu) ++ struct ieee80211_ampdu_params *params) + { + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + bool tx_agg_ref = false; ++ struct ieee80211_sta *sta = params->sta; ++ enum ieee80211_ampdu_mlme_action action = params->action; ++ u16 tid = params->tid; ++ u16 *ssn = ¶ms->ssn; ++ u8 buf_size = params->buf_size; + + IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n", + sta->addr, tid, action); +diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c +index 0cd95120bc78..d59769e858f4 100644 +--- a/drivers/net/wireless/mac80211_hwsim.c ++++ b/drivers/net/wireless/mac80211_hwsim.c +@@ -1817,10 +1817,12 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw, + + static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, +- enum ieee80211_ampdu_mlme_action action, +- struct ieee80211_sta *sta, u16 tid, u16 *ssn, +- u8 buf_size, bool amsdu) ++ struct ieee80211_ampdu_params *params) + { ++ struct ieee80211_sta *sta = params->sta; ++ enum ieee80211_ampdu_mlme_action action = params->action; ++ u16 tid = params->tid; ++ + switch (action) { + case IEEE80211_AMPDU_TX_START: + ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); +diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c +index f715eee39851..e70dd9523911 100644 +--- a/drivers/net/wireless/mediatek/mt7601u/main.c ++++ b/drivers/net/wireless/mediatek/mt7601u/main.c +@@ -334,11 +334,13 @@ static int mt7601u_set_rts_threshold(struct ieee80211_hw *hw, u32 value) + + static int + mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, +- enum ieee80211_ampdu_mlme_action action, +- struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size, +- bool amsdu) ++ struct ieee80211_ampdu_params *params) + { + struct mt7601u_dev *dev = hw->priv; ++ struct ieee80211_sta *sta = params->sta; ++ enum ieee80211_ampdu_mlme_action action = params->action; ++ u16 tid = params->tid; ++ u16 *ssn = ¶ms->ssn; + struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv; + + WARN_ON(msta->wcid.idx > GROUP_WCID(0)); +diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c +index 30e3aaae32e2..088429d0a634 100644 +--- a/drivers/net/wireless/mwl8k.c ++++ b/drivers/net/wireless/mwl8k.c +@@ -5421,11 +5421,13 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx, + + static int + mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, +- enum ieee80211_ampdu_mlme_action action, +- struct ieee80211_sta *sta, u16 tid, u16 *ssn, +- u8 buf_size, bool amsdu) ++ struct ieee80211_ampdu_params *params) + { +- ++ struct ieee80211_sta *sta = params->sta; ++ enum ieee80211_ampdu_mlme_action action = params->action; ++ u16 tid = params->tid; ++ u16 *ssn = ¶ms->ssn; ++ u8 buf_size = params->buf_size; + int i, rc = 0; + struct mwl8k_priv *priv = hw->priv; + struct mwl8k_ampdu_stream *stream; +diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +index 6aed923a709a..7d820c395375 100644 +--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c ++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +@@ -5375,13 +5375,13 @@ static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + + static int + rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, +- enum ieee80211_ampdu_mlme_action action, +- struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size, +- bool amsdu) ++ struct ieee80211_ampdu_params *params) + { + struct rtl8xxxu_priv *priv = hw->priv; + struct device *dev = &priv->udev->dev; + u8 ampdu_factor, ampdu_density; ++ struct ieee80211_sta *sta = params->sta; ++ enum ieee80211_ampdu_mlme_action action = params->action; + + switch (action) { + case IEEE80211_AMPDU_TX_START: +diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c +index e36d8c456275..8b537a5a4b01 100644 +--- a/drivers/net/wireless/realtek/rtlwifi/core.c ++++ b/drivers/net/wireless/realtek/rtlwifi/core.c +@@ -1369,11 +1369,13 @@ static void rtl_op_sta_notify(struct ieee80211_hw *hw, + + static int rtl_op_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, +- enum ieee80211_ampdu_mlme_action action, +- struct ieee80211_sta *sta, u16 tid, u16 *ssn, +- u8 buf_size, bool amsdu) ++ struct ieee80211_ampdu_params *params) + { + struct rtl_priv *rtlpriv = rtl_priv(hw); ++ struct ieee80211_sta *sta = params->sta; ++ enum ieee80211_ampdu_mlme_action action = params->action; ++ u16 tid = params->tid; ++ u16 *ssn = ¶ms->ssn; + + switch (action) { + case IEEE80211_AMPDU_TX_START: +diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c +index b5bcc933a2a6..4df992de7d07 100644 +--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c ++++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c +@@ -659,29 +659,24 @@ static int rsi_mac80211_set_key(struct ieee80211_hw *hw, + * informs the f/w regarding this. + * @hw: Pointer to the ieee80211_hw structure. + * @vif: Pointer to the ieee80211_vif structure. +- * @action: ieee80211_ampdu_mlme_action enum. +- * @sta: Pointer to the ieee80211_sta structure. +- * @tid: Traffic identifier. +- * @ssn: Pointer to ssn value. +- * @buf_size: Buffer size (for kernel version > 2.6.38). +- * @amsdu: is AMSDU in AMPDU allowed ++ * @params: Pointer to A-MPDU action parameters + * + * Return: status: 0 on success, negative error code on failure. + */ + static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, +- enum ieee80211_ampdu_mlme_action action, +- struct ieee80211_sta *sta, +- unsigned short tid, +- unsigned short *ssn, +- unsigned char buf_size, +- bool amsdu) ++ struct ieee80211_ampdu_params *params) + { + int status = -EOPNOTSUPP; + struct rsi_hw *adapter = hw->priv; + struct rsi_common *common = adapter->priv; + u16 seq_no = 0; + u8 ii = 0; ++ struct ieee80211_sta *sta = params->sta; ++ enum ieee80211_ampdu_mlme_action action = params->action; ++ u16 tid = params->tid; ++ u16 *ssn = ¶ms->ssn; ++ u8 buf_size = params->buf_size; + + for (ii = 0; ii < RSI_MAX_VIFS; ii++) { + if (vif == adapter->vifs[ii]) +diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c +index 9733b31a780d..69c1c09687a3 100644 +--- a/drivers/net/wireless/rt2x00/rt2800lib.c ++++ b/drivers/net/wireless/rt2x00/rt2800lib.c +@@ -7935,10 +7935,11 @@ u64 rt2800_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) + EXPORT_SYMBOL_GPL(rt2800_get_tsf); + + int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, +- enum ieee80211_ampdu_mlme_action action, +- struct ieee80211_sta *sta, u16 tid, u16 *ssn, +- u8 buf_size, bool amsdu) ++ struct ieee80211_ampdu_params *params) + { ++ struct ieee80211_sta *sta = params->sta; ++ enum ieee80211_ampdu_mlme_action action = params->action; ++ u16 tid = params->tid; + struct rt2x00_sta *sta_priv = (struct rt2x00_sta *)sta->drv_priv; + int ret = 0; + +diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h +index 440790b92b19..83f1a44fb9b4 100644 +--- a/drivers/net/wireless/rt2x00/rt2800lib.h ++++ b/drivers/net/wireless/rt2x00/rt2800lib.h +@@ -218,9 +218,7 @@ int rt2800_conf_tx(struct ieee80211_hw *hw, + const struct ieee80211_tx_queue_params *params); + u64 rt2800_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif); + int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, +- enum ieee80211_ampdu_mlme_action action, +- struct ieee80211_sta *sta, u16 tid, u16 *ssn, +- u8 buf_size, bool amsdu); ++ struct ieee80211_ampdu_params *params); + int rt2800_get_survey(struct ieee80211_hw *hw, int idx, + struct survey_info *survey); + void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev); +diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c +index 09c7e098f460..085ef5c87262 100644 +--- a/drivers/net/wireless/ti/wl18xx/event.c ++++ b/drivers/net/wireless/ti/wl18xx/event.c +@@ -206,5 +206,33 @@ int wl18xx_process_mailbox_events(struct wl1271 *wl) + mbox->sc_pwd_len, + mbox->sc_pwd); + ++ if (vector & RX_BA_WIN_SIZE_CHANGE_EVENT_ID) { ++ struct wl12xx_vif *wlvif; ++ struct ieee80211_vif *vif; ++ struct ieee80211_sta *sta; ++ u8 link_id = mbox->rx_ba_link_id; ++ u8 win_size = mbox->rx_ba_win_size; ++ const u8 *addr; ++ ++ wlvif = wl->links[link_id].wlvif; ++ vif = wl12xx_wlvif_to_vif(wlvif); ++ ++ /* Update RX aggregation window size and call ++ * MAC routine to stop active RX aggregations for this link ++ */ ++ if (wlvif->bss_type != BSS_TYPE_AP_BSS) ++ addr = vif->bss_conf.bssid; ++ else ++ addr = wl->links[link_id].addr; ++ ++ sta = ieee80211_find_sta(vif, addr); ++ if (sta) { ++ sta->max_rx_aggregation_subframes = win_size; ++ ieee80211_stop_rx_ba_session(vif, ++ wl->links[link_id].ba_bitmap, ++ addr); ++ } ++ } ++ + return 0; + } +diff --git a/drivers/net/wireless/ti/wl18xx/event.h b/drivers/net/wireless/ti/wl18xx/event.h +index f3d4f13379cb..9495fadc8093 100644 +--- a/drivers/net/wireless/ti/wl18xx/event.h ++++ b/drivers/net/wireless/ti/wl18xx/event.h +@@ -38,6 +38,7 @@ enum { + REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID = BIT(18), + DFS_CHANNELS_CONFIG_COMPLETE_EVENT = BIT(19), + PERIODIC_SCAN_REPORT_EVENT_ID = BIT(20), ++ RX_BA_WIN_SIZE_CHANGE_EVENT_ID = BIT(21), + SMART_CONFIG_SYNC_EVENT_ID = BIT(22), + SMART_CONFIG_DECODE_EVENT_ID = BIT(23), + TIME_SYNC_EVENT_ID = BIT(24), +diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c +index 50cce42089a5..47f355e92193 100644 +--- a/drivers/net/wireless/ti/wl18xx/main.c ++++ b/drivers/net/wireless/ti/wl18xx/main.c +@@ -1029,7 +1029,8 @@ static int wl18xx_boot(struct wl1271 *wl) + DFS_CHANNELS_CONFIG_COMPLETE_EVENT | + SMART_CONFIG_SYNC_EVENT_ID | + SMART_CONFIG_DECODE_EVENT_ID | +- TIME_SYNC_EVENT_ID; ++ TIME_SYNC_EVENT_ID | ++ RX_BA_WIN_SIZE_CHANGE_EVENT_ID; + + wl->ap_event_mask = MAX_TX_FAILURE_EVENT_ID; + +diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c +index f28fa3b5029d..0646c9b6f8d7 100644 +--- a/drivers/net/wireless/ti/wlcore/acx.c ++++ b/drivers/net/wireless/ti/wlcore/acx.c +@@ -1419,7 +1419,8 @@ out: + + /* setup BA session receiver setting in the FW. */ + int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, +- u16 ssn, bool enable, u8 peer_hlid) ++ u16 ssn, bool enable, u8 peer_hlid, ++ u8 win_size) + { + struct wl1271_acx_ba_receiver_setup *acx; + int ret; +@@ -1435,7 +1436,7 @@ int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, + acx->hlid = peer_hlid; + acx->tid = tid_index; + acx->enable = enable; +- acx->win_size = wl->conf.ht.rx_ba_win_size; ++ acx->win_size = win_size; + acx->ssn = ssn; + + ret = wlcore_cmd_configure_failsafe(wl, ACX_BA_SESSION_RX_SETUP, acx, +diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h +index 954d57ec98f4..524aea495dff 100644 +--- a/drivers/net/wireless/ti/wlcore/acx.h ++++ b/drivers/net/wireless/ti/wlcore/acx.h +@@ -1112,7 +1112,8 @@ int wl1271_acx_set_ht_information(struct wl1271 *wl, + int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl, + struct wl12xx_vif *wlvif); + int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, +- u16 ssn, bool enable, u8 peer_hlid); ++ u16 ssn, bool enable, u8 peer_hlid, ++ u8 win_size); + int wl12xx_acx_tsf_info(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u64 *mactime); + int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif, +diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c +index ec7f6af3fab2..7b27c7e23af2 100644 +--- a/drivers/net/wireless/ti/wlcore/main.c ++++ b/drivers/net/wireless/ti/wlcore/main.c +@@ -5261,14 +5261,16 @@ out: + + static int wl1271_op_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, +- enum ieee80211_ampdu_mlme_action action, +- struct ieee80211_sta *sta, u16 tid, u16 *ssn, +- u8 buf_size, bool amsdu) ++ struct ieee80211_ampdu_params *params) + { + struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + int ret; + u8 hlid, *ba_bitmap; ++ struct ieee80211_sta *sta = params->sta; ++ enum ieee80211_ampdu_mlme_action action = params->action; ++ u16 tid = params->tid; ++ u16 *ssn = ¶ms->ssn; + + wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action, + tid); +@@ -5326,7 +5328,9 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw, + } + + ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true, +- hlid); ++ hlid, ++ params->buf_size); ++ + if (!ret) { + *ba_bitmap |= BIT(tid); + wl->ba_rx_session_count++; +@@ -5347,7 +5351,7 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw, + } + + ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false, +- hlid); ++ hlid, 0); + if (!ret) { + *ba_bitmap &= ~BIT(tid); + wl->ba_rx_session_count--; +diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c +index b87192e0f9aa..109becdabc24 100644 +--- a/drivers/staging/comedi/drivers/jr3_pci.c ++++ b/drivers/staging/comedi/drivers/jr3_pci.c +@@ -610,7 +610,7 @@ static void jr3_pci_poll_dev(unsigned long data) + s = &dev->subdevices[i]; + spriv = s->private; + +- if (now > spriv->next_time_min) { ++ if (time_after_eq(now, spriv->next_time_min)) { + struct jr3_pci_poll_delay sub_delay; + + sub_delay = jr3_pci_poll_subdevice(s); +@@ -726,11 +726,12 @@ static int jr3_pci_auto_attach(struct comedi_device *dev, + s->insn_read = jr3_pci_ai_insn_read; + + spriv = jr3_pci_alloc_spriv(dev, s); +- if (spriv) { +- /* Channel specific range and maxdata */ +- s->range_table_list = spriv->range_table_list; +- s->maxdata_list = spriv->maxdata_list; +- } ++ if (!spriv) ++ return -ENOMEM; ++ ++ /* Channel specific range and maxdata */ ++ s->range_table_list = spriv->range_table_list; ++ s->maxdata_list = spriv->maxdata_list; + } + + /* Reset DSP card */ +diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c +index 445f83615575..fb4f3fea6c66 100644 +--- a/drivers/staging/gdm724x/gdm_mux.c ++++ b/drivers/staging/gdm724x/gdm_mux.c +@@ -670,14 +670,14 @@ static int __init gdm_usb_mux_init(void) + + static void __exit gdm_usb_mux_exit(void) + { +- unregister_lte_tty_driver(); +- + if (mux_rx_wq) { + flush_workqueue(mux_rx_wq); + destroy_workqueue(mux_rx_wq); + } + + usb_deregister(&gdm_mux_driver); ++ unregister_lte_tty_driver(); ++ + } + + module_init(gdm_usb_mux_init); +diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c +index c975c3b87093..cfc3017fd64a 100644 +--- a/drivers/staging/vt6656/usbpipe.c ++++ b/drivers/staging/vt6656/usbpipe.c +@@ -50,15 +50,25 @@ int vnt_control_out(struct vnt_private *priv, u8 request, u16 value, + u16 index, u16 length, u8 *buffer) + { + int status = 0; ++ u8 *usb_buffer; + + if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags)) + return STATUS_FAILURE; + + mutex_lock(&priv->usb_lock); + ++ usb_buffer = kmemdup(buffer, length, GFP_KERNEL); ++ if (!usb_buffer) { ++ mutex_unlock(&priv->usb_lock); ++ return -ENOMEM; ++ } ++ + status = usb_control_msg(priv->usb, +- usb_sndctrlpipe(priv->usb, 0), request, 0x40, value, +- index, buffer, length, USB_CTL_WAIT); ++ usb_sndctrlpipe(priv->usb, 0), ++ request, 0x40, value, ++ index, usb_buffer, length, USB_CTL_WAIT); ++ ++ kfree(usb_buffer); + + mutex_unlock(&priv->usb_lock); + +@@ -78,15 +88,28 @@ int vnt_control_in(struct vnt_private *priv, u8 request, u16 value, + u16 index, u16 length, u8 *buffer) + { + int status; ++ u8 *usb_buffer; + + if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags)) + return STATUS_FAILURE; + + mutex_lock(&priv->usb_lock); + ++ usb_buffer = kmalloc(length, GFP_KERNEL); ++ if (!usb_buffer) { ++ mutex_unlock(&priv->usb_lock); ++ return -ENOMEM; ++ } ++ + status = usb_control_msg(priv->usb, +- usb_rcvctrlpipe(priv->usb, 0), request, 0xc0, value, +- index, buffer, length, USB_CTL_WAIT); ++ usb_rcvctrlpipe(priv->usb, 0), ++ request, 0xc0, value, ++ index, usb_buffer, length, USB_CTL_WAIT); ++ ++ if (status == length) ++ memcpy(buffer, usb_buffer, length); ++ ++ kfree(usb_buffer); + + mutex_unlock(&priv->usb_lock); + +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c +index 6ed80b05d674..200d3de8bc1e 100644 +--- a/drivers/target/iscsi/iscsi_target.c ++++ b/drivers/target/iscsi/iscsi_target.c +@@ -4821,6 +4821,7 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force) + continue; + } + atomic_set(&sess->session_reinstatement, 1); ++ atomic_set(&sess->session_fall_back_to_erl0, 1); + spin_unlock(&sess->conn_lock); + + list_move_tail(&se_sess->sess_list, &free_list); +diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c +index b4bfd706ac94..dc1bd1f1bdfe 100644 +--- a/drivers/target/iscsi/iscsi_target_configfs.c ++++ b/drivers/target/iscsi/iscsi_target_configfs.c +@@ -725,11 +725,8 @@ static ssize_t lio_target_nacl_cmdsn_depth_store(struct config_item *item, + + if (iscsit_get_tpg(tpg) < 0) + return -EINVAL; +- /* +- * iscsit_tpg_set_initiator_node_queue_depth() assumes force=1 +- */ +- ret = iscsit_tpg_set_initiator_node_queue_depth(tpg, +- config_item_name(acl_ci), cmdsn_depth, 1); ++ ++ ret = core_tpg_set_initiator_node_queue_depth(se_nacl, cmdsn_depth); + + pr_debug("LIO_Target_ConfigFS: %s/%s Set CmdSN Window: %u for" + "InitiatorName: %s\n", config_item_name(wwn_ci), +@@ -1593,42 +1590,31 @@ static int lio_tpg_check_prot_fabric_only( + } + + /* +- * Called with spin_lock_irq(struct se_portal_group->session_lock) held +- * or not held. +- * +- * Also, this function calls iscsit_inc_session_usage_count() on the ++ * This function calls iscsit_inc_session_usage_count() on the + * struct iscsi_session in question. + */ + static int lio_tpg_shutdown_session(struct se_session *se_sess) + { + struct iscsi_session *sess = se_sess->fabric_sess_ptr; +- struct se_portal_group *se_tpg = se_sess->se_tpg; +- bool local_lock = false; +- +- if (!spin_is_locked(&se_tpg->session_lock)) { +- spin_lock_irq(&se_tpg->session_lock); +- local_lock = true; +- } ++ struct se_portal_group *se_tpg = &sess->tpg->tpg_se_tpg; + ++ spin_lock_bh(&se_tpg->session_lock); + spin_lock(&sess->conn_lock); + if (atomic_read(&sess->session_fall_back_to_erl0) || + atomic_read(&sess->session_logout) || + (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { + spin_unlock(&sess->conn_lock); +- if (local_lock) +- spin_unlock_irq(&sess->conn_lock); ++ spin_unlock_bh(&se_tpg->session_lock); + return 0; + } + atomic_set(&sess->session_reinstatement, 1); ++ atomic_set(&sess->session_fall_back_to_erl0, 1); + spin_unlock(&sess->conn_lock); + + iscsit_stop_time2retain_timer(sess); +- spin_unlock_irq(&se_tpg->session_lock); ++ spin_unlock_bh(&se_tpg->session_lock); + + iscsit_stop_session(sess, 1, 1); +- if (!local_lock) +- spin_lock_irq(&se_tpg->session_lock); +- + return 1; + } + +diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c +index 316f66172335..4a137b0ae3dc 100644 +--- a/drivers/target/iscsi/iscsi_target_login.c ++++ b/drivers/target/iscsi/iscsi_target_login.c +@@ -195,6 +195,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) + initiatorname_param->value) && + (sess_p->sess_ops->SessionType == sessiontype))) { + atomic_set(&sess_p->session_reinstatement, 1); ++ atomic_set(&sess_p->session_fall_back_to_erl0, 1); + spin_unlock(&sess_p->conn_lock); + iscsit_inc_session_usage_count(sess_p); + iscsit_stop_time2retain_timer(sess_p); +diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c +index 68261b7dcefe..205a509b0dfb 100644 +--- a/drivers/target/iscsi/iscsi_target_tpg.c ++++ b/drivers/target/iscsi/iscsi_target_tpg.c +@@ -589,16 +589,6 @@ int iscsit_tpg_del_network_portal( + return iscsit_tpg_release_np(tpg_np, tpg, np); + } + +-int iscsit_tpg_set_initiator_node_queue_depth( +- struct iscsi_portal_group *tpg, +- unsigned char *initiatorname, +- u32 queue_depth, +- int force) +-{ +- return core_tpg_set_initiator_node_queue_depth(&tpg->tpg_se_tpg, +- initiatorname, queue_depth, force); +-} +- + int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication) + { + unsigned char buf1[256], buf2[256], *none = NULL; +diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h +index 9db32bd24cd4..2da211920c18 100644 +--- a/drivers/target/iscsi/iscsi_target_tpg.h ++++ b/drivers/target/iscsi/iscsi_target_tpg.h +@@ -26,8 +26,6 @@ extern struct iscsi_tpg_np *iscsit_tpg_add_network_portal(struct iscsi_portal_gr + int); + extern int iscsit_tpg_del_network_portal(struct iscsi_portal_group *, + struct iscsi_tpg_np *); +-extern int iscsit_tpg_set_initiator_node_queue_depth(struct iscsi_portal_group *, +- unsigned char *, u32, int); + extern int iscsit_ta_authentication(struct iscsi_portal_group *, u32); + extern int iscsit_ta_login_timeout(struct iscsi_portal_group *, u32); + extern int iscsit_ta_netif_timeout(struct iscsi_portal_group *, u32); +diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c +index 79291869bce6..041a56987845 100644 +--- a/drivers/target/target_core_file.c ++++ b/drivers/target/target_core_file.c +@@ -594,8 +594,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, + if (ret < 0) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + +- if (ret) +- target_complete_cmd(cmd, SAM_STAT_GOOD); ++ target_complete_cmd(cmd, SAM_STAT_GOOD); + return 0; + } + +diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c +index 90c5dffc9fa4..608117819366 100644 +--- a/drivers/target/target_core_sbc.c ++++ b/drivers/target/target_core_sbc.c +@@ -498,8 +498,11 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes + * been failed with a non-zero SCSI status. + */ + if (cmd->scsi_status) { +- pr_err("compare_and_write_callback: non zero scsi_status:" ++ pr_debug("compare_and_write_callback: non zero scsi_status:" + " 0x%02x\n", cmd->scsi_status); ++ *post_ret = 1; ++ if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION) ++ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + goto out; + } + +diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c +index 2794c6ec5c3c..899c33b3c734 100644 +--- a/drivers/target/target_core_tpg.c ++++ b/drivers/target/target_core_tpg.c +@@ -169,28 +169,25 @@ void core_tpg_add_node_to_devs( + mutex_unlock(&tpg->tpg_lun_mutex); + } + +-/* core_set_queue_depth_for_node(): +- * +- * +- */ +-static int core_set_queue_depth_for_node( +- struct se_portal_group *tpg, +- struct se_node_acl *acl) ++static void ++target_set_nacl_queue_depth(struct se_portal_group *tpg, ++ struct se_node_acl *acl, u32 queue_depth) + { ++ acl->queue_depth = queue_depth; ++ + if (!acl->queue_depth) { +- pr_err("Queue depth for %s Initiator Node: %s is 0," ++ pr_warn("Queue depth for %s Initiator Node: %s is 0," + "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(), + acl->initiatorname); + acl->queue_depth = 1; + } +- +- return 0; + } + + static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg, + const unsigned char *initiatorname) + { + struct se_node_acl *acl; ++ u32 queue_depth; + + acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size), + GFP_KERNEL); +@@ -205,24 +202,20 @@ static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg, + spin_lock_init(&acl->nacl_sess_lock); + mutex_init(&acl->lun_entry_mutex); + atomic_set(&acl->acl_pr_ref_count, 0); ++ + if (tpg->se_tpg_tfo->tpg_get_default_depth) +- acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg); ++ queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg); + else +- acl->queue_depth = 1; ++ queue_depth = 1; ++ target_set_nacl_queue_depth(tpg, acl, queue_depth); ++ + snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); + acl->se_tpg = tpg; + acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); + + tpg->se_tpg_tfo->set_default_node_attributes(acl); + +- if (core_set_queue_depth_for_node(tpg, acl) < 0) +- goto out_free_acl; +- + return acl; +- +-out_free_acl: +- kfree(acl); +- return NULL; + } + + static void target_add_node_acl(struct se_node_acl *acl) +@@ -369,7 +362,8 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl) + if (sess->sess_tearing_down != 0) + continue; + +- target_get_session(sess); ++ if (!target_get_session(sess)) ++ continue; + list_move(&sess->sess_acl_list, &sess_list); + } + spin_unlock_irqrestore(&acl->nacl_sess_lock, flags); +@@ -406,108 +400,52 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl) + * + */ + int core_tpg_set_initiator_node_queue_depth( +- struct se_portal_group *tpg, +- unsigned char *initiatorname, +- u32 queue_depth, +- int force) ++ struct se_node_acl *acl, ++ u32 queue_depth) + { +- struct se_session *sess, *init_sess = NULL; +- struct se_node_acl *acl; ++ LIST_HEAD(sess_list); ++ struct se_portal_group *tpg = acl->se_tpg; ++ struct se_session *sess, *sess_tmp; + unsigned long flags; +- int dynamic_acl = 0; +- +- mutex_lock(&tpg->acl_node_mutex); +- acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); +- if (!acl) { +- pr_err("Access Control List entry for %s Initiator" +- " Node %s does not exists for TPG %hu, ignoring" +- " request.\n", tpg->se_tpg_tfo->get_fabric_name(), +- initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); +- mutex_unlock(&tpg->acl_node_mutex); +- return -ENODEV; +- } +- if (acl->dynamic_node_acl) { +- acl->dynamic_node_acl = 0; +- dynamic_acl = 1; +- } +- mutex_unlock(&tpg->acl_node_mutex); +- +- spin_lock_irqsave(&tpg->session_lock, flags); +- list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { +- if (sess->se_node_acl != acl) +- continue; +- +- if (!force) { +- pr_err("Unable to change queue depth for %s" +- " Initiator Node: %s while session is" +- " operational. To forcefully change the queue" +- " depth and force session reinstatement" +- " use the \"force=1\" parameter.\n", +- tpg->se_tpg_tfo->get_fabric_name(), initiatorname); +- spin_unlock_irqrestore(&tpg->session_lock, flags); +- +- mutex_lock(&tpg->acl_node_mutex); +- if (dynamic_acl) +- acl->dynamic_node_acl = 1; +- mutex_unlock(&tpg->acl_node_mutex); +- return -EEXIST; +- } +- /* +- * Determine if the session needs to be closed by our context. +- */ +- if (!tpg->se_tpg_tfo->shutdown_session(sess)) +- continue; +- +- init_sess = sess; +- break; +- } ++ int rc; + + /* + * User has requested to change the queue depth for a Initiator Node. + * Change the value in the Node's struct se_node_acl, and call +- * core_set_queue_depth_for_node() to add the requested queue depth. +- * +- * Finally call tpg->se_tpg_tfo->close_session() to force session +- * reinstatement to occur if there is an active session for the +- * $FABRIC_MOD Initiator Node in question. ++ * target_set_nacl_queue_depth() to set the new queue depth. + */ +- acl->queue_depth = queue_depth; ++ target_set_nacl_queue_depth(tpg, acl, queue_depth); ++ ++ spin_lock_irqsave(&acl->nacl_sess_lock, flags); ++ list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list, ++ sess_acl_list) { ++ if (sess->sess_tearing_down != 0) ++ continue; ++ if (!target_get_session(sess)) ++ continue; ++ spin_unlock_irqrestore(&acl->nacl_sess_lock, flags); + +- if (core_set_queue_depth_for_node(tpg, acl) < 0) { +- spin_unlock_irqrestore(&tpg->session_lock, flags); + /* +- * Force session reinstatement if +- * core_set_queue_depth_for_node() failed, because we assume +- * the $FABRIC_MOD has already the set session reinstatement +- * bit from tpg->se_tpg_tfo->shutdown_session() called above. ++ * Finally call tpg->se_tpg_tfo->close_session() to force session ++ * reinstatement to occur if there is an active session for the ++ * $FABRIC_MOD Initiator Node in question. + */ +- if (init_sess) +- tpg->se_tpg_tfo->close_session(init_sess); +- +- mutex_lock(&tpg->acl_node_mutex); +- if (dynamic_acl) +- acl->dynamic_node_acl = 1; +- mutex_unlock(&tpg->acl_node_mutex); +- return -EINVAL; ++ rc = tpg->se_tpg_tfo->shutdown_session(sess); ++ target_put_session(sess); ++ if (!rc) { ++ spin_lock_irqsave(&acl->nacl_sess_lock, flags); ++ continue; ++ } ++ target_put_session(sess); ++ spin_lock_irqsave(&acl->nacl_sess_lock, flags); + } +- spin_unlock_irqrestore(&tpg->session_lock, flags); +- /* +- * If the $FABRIC_MOD session for the Initiator Node ACL exists, +- * forcefully shutdown the $FABRIC_MOD session/nexus. +- */ +- if (init_sess) +- tpg->se_tpg_tfo->close_session(init_sess); ++ spin_unlock_irqrestore(&acl->nacl_sess_lock, flags); + + pr_debug("Successfully changed queue depth to: %d for Initiator" +- " Node: %s on %s Target Portal Group: %u\n", queue_depth, +- initiatorname, tpg->se_tpg_tfo->get_fabric_name(), ++ " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth, ++ acl->initiatorname, tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg)); + +- mutex_lock(&tpg->acl_node_mutex); +- if (dynamic_acl) +- acl->dynamic_node_acl = 1; +- mutex_unlock(&tpg->acl_node_mutex); +- + return 0; + } + EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth); +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c +index df2059984e14..af301414a9f3 100644 +--- a/drivers/target/target_core_transport.c ++++ b/drivers/target/target_core_transport.c +@@ -383,9 +383,9 @@ static void target_release_session(struct kref *kref) + se_tpg->se_tpg_tfo->close_session(se_sess); + } + +-void target_get_session(struct se_session *se_sess) ++int target_get_session(struct se_session *se_sess) + { +- kref_get(&se_sess->sess_kref); ++ return kref_get_unless_zero(&se_sess->sess_kref); + } + EXPORT_SYMBOL(target_get_session); + +diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c +index 807d80145686..96aa0ad32497 100644 +--- a/drivers/tty/pty.c ++++ b/drivers/tty/pty.c +@@ -216,16 +216,11 @@ static int pty_signal(struct tty_struct *tty, int sig) + static void pty_flush_buffer(struct tty_struct *tty) + { + struct tty_struct *to = tty->link; +- struct tty_ldisc *ld; + + if (!to) + return; + +- ld = tty_ldisc_ref(to); +- tty_buffer_flush(to, ld); +- if (ld) +- tty_ldisc_deref(ld); +- ++ tty_buffer_flush(to, NULL); + if (to->packet) { + spin_lock_irq(&tty->ctrl_lock); + tty->ctrl_status |= TIOCPKT_FLUSHWRITE; +diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c +index 24280d9a05e9..de1c143b475f 100644 +--- a/drivers/tty/serial/omap-serial.c ++++ b/drivers/tty/serial/omap-serial.c +@@ -1712,7 +1712,8 @@ static int serial_omap_probe(struct platform_device *pdev) + return 0; + + err_add_port: +- pm_runtime_put(&pdev->dev); ++ pm_runtime_dont_use_autosuspend(&pdev->dev); ++ pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + pm_qos_remove_request(&up->pm_qos_request); + device_init_wakeup(up->dev, false); +@@ -1725,9 +1726,13 @@ static int serial_omap_remove(struct platform_device *dev) + { + struct uart_omap_port *up = platform_get_drvdata(dev); + ++ pm_runtime_get_sync(up->dev); ++ ++ uart_remove_one_port(&serial_omap_reg, &up->port); ++ ++ pm_runtime_dont_use_autosuspend(up->dev); + pm_runtime_put_sync(up->dev); + pm_runtime_disable(up->dev); +- uart_remove_one_port(&serial_omap_reg, &up->port); + pm_qos_remove_request(&up->pm_qos_request); + device_init_wakeup(&dev->dev, false); + +diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c +index 6deb06147202..e6bc1a6be4a4 100644 +--- a/drivers/tty/serial/samsung.c ++++ b/drivers/tty/serial/samsung.c +@@ -900,14 +900,13 @@ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p) + return -ENOMEM; + } + +- dma->rx_addr = dma_map_single(dma->rx_chan->device->dev, dma->rx_buf, ++ dma->rx_addr = dma_map_single(p->port.dev, dma->rx_buf, + dma->rx_size, DMA_FROM_DEVICE); + + spin_lock_irqsave(&p->port.lock, flags); + + /* TX buffer */ +- dma->tx_addr = dma_map_single(dma->tx_chan->device->dev, +- p->port.state->xmit.buf, ++ dma->tx_addr = dma_map_single(p->port.dev, p->port.state->xmit.buf, + UART_XMIT_SIZE, DMA_TO_DEVICE); + + spin_unlock_irqrestore(&p->port.lock, flags); +@@ -921,7 +920,7 @@ static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p) + + if (dma->rx_chan) { + dmaengine_terminate_all(dma->rx_chan); +- dma_unmap_single(dma->rx_chan->device->dev, dma->rx_addr, ++ dma_unmap_single(p->port.dev, dma->rx_addr, + dma->rx_size, DMA_FROM_DEVICE); + kfree(dma->rx_buf); + dma_release_channel(dma->rx_chan); +@@ -930,7 +929,7 @@ static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p) + + if (dma->tx_chan) { + dmaengine_terminate_all(dma->tx_chan); +- dma_unmap_single(dma->tx_chan->device->dev, dma->tx_addr, ++ dma_unmap_single(p->port.dev, dma->tx_addr, + UART_XMIT_SIZE, DMA_TO_DEVICE); + dma_release_channel(dma->tx_chan); + dma->tx_chan = NULL; +diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c +index dadd1e8dfe09..0bb380a9fcf7 100644 +--- a/drivers/usb/core/driver.c ++++ b/drivers/usb/core/driver.c +@@ -1328,6 +1328,24 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) + */ + if (udev->parent && !PMSG_IS_AUTO(msg)) + status = 0; ++ ++ /* ++ * If the device is inaccessible, don't try to resume ++ * suspended interfaces and just return the error. ++ */ ++ if (status && status != -EBUSY) { ++ int err; ++ u16 devstat; ++ ++ err = usb_get_status(udev, USB_RECIP_DEVICE, 0, ++ &devstat); ++ if (err) { ++ dev_err(&udev->dev, ++ "Failed to suspend device, error %d\n", ++ status); ++ goto done; ++ } ++ } + } + + /* If the suspend failed, resume interfaces that did get suspended */ +@@ -1760,6 +1778,9 @@ static int autosuspend_check(struct usb_device *udev) + int w, i; + struct usb_interface *intf; + ++ if (udev->state == USB_STATE_NOTATTACHED) ++ return -ENODEV; ++ + /* Fail if autosuspend is disabled, or any interfaces are in use, or + * any interface drivers require remote wakeup but it isn't available. + */ +diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c +index ea337a718cc1..b3de806085f0 100644 +--- a/drivers/usb/core/file.c ++++ b/drivers/usb/core/file.c +@@ -26,6 +26,7 @@ + #define MAX_USB_MINORS 256 + static const struct file_operations *usb_minors[MAX_USB_MINORS]; + static DECLARE_RWSEM(minor_rwsem); ++static DEFINE_MUTEX(init_usb_class_mutex); + + static int usb_open(struct inode *inode, struct file *file) + { +@@ -108,8 +109,9 @@ static void release_usb_class(struct kref *kref) + + static void destroy_usb_class(void) + { +- if (usb_class) +- kref_put(&usb_class->kref, release_usb_class); ++ mutex_lock(&init_usb_class_mutex); ++ kref_put(&usb_class->kref, release_usb_class); ++ mutex_unlock(&init_usb_class_mutex); + } + + int usb_major_init(void) +@@ -171,7 +173,10 @@ int usb_register_dev(struct usb_interface *intf, + if (intf->minor >= 0) + return -EADDRINUSE; + ++ mutex_lock(&init_usb_class_mutex); + retval = init_usb_class(); ++ mutex_unlock(&init_usb_class_mutex); ++ + if (retval) + return retval; + +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index 7c2d87befb51..67961231cbbd 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -1048,6 +1048,9 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) + + portstatus = portchange = 0; + status = hub_port_status(hub, port1, &portstatus, &portchange); ++ if (status) ++ goto abort; ++ + if (udev || (portstatus & USB_PORT_STAT_CONNECTION)) + dev_dbg(&port_dev->dev, "status %04x change %04x\n", + portstatus, portchange); +@@ -1180,7 +1183,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) + + /* Scan all ports that need attention */ + kick_hub_wq(hub); +- ++ abort: + if (type == HUB_INIT2 || type == HUB_INIT3) { + /* Allow autosuspend if it was suppressed */ + disconnected: +@@ -2068,6 +2071,12 @@ void usb_disconnect(struct usb_device **pdev) + dev_info(&udev->dev, "USB disconnect, device number %d\n", + udev->devnum); + ++ /* ++ * Ensure that the pm runtime code knows that the USB device ++ * is in the process of being disconnected. ++ */ ++ pm_runtime_barrier(&udev->dev); ++ + usb_lock_device(udev); + + hub_disconnect_children(udev); +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c +index 998a738e6359..5d70d46239bb 100644 +--- a/drivers/usb/host/xhci-mem.c ++++ b/drivers/usb/host/xhci-mem.c +@@ -2493,7 +2493,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) + (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | + xhci->cmd_ring->cycle_state; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, +- "// Setting command ring address to 0x%x", val); ++ "// Setting command ring address to 0x%016llx", val_64); + xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); + xhci_dbg_cmd_ptrs(xhci); + +diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c +index 1624b09d9748..2e947dc94e32 100644 +--- a/drivers/usb/misc/usbtest.c ++++ b/drivers/usb/misc/usbtest.c +@@ -135,6 +135,7 @@ get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf) + case USB_ENDPOINT_XFER_INT: + if (dev->info->intr) + goto try_intr; ++ continue; + case USB_ENDPOINT_XFER_ISOC: + if (dev->info->iso) + goto try_iso; +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index b3a21fcbbaf9..dbd441c1c2ad 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -873,6 +873,7 @@ static const struct usb_device_id id_table_combined[] = { + { USB_DEVICE_AND_INTERFACE_INFO(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID, + USB_CLASS_VENDOR_SPEC, + USB_SUBCLASS_VENDOR_SPEC, 0x00) }, ++ { USB_DEVICE_INTERFACE_NUMBER(ACTEL_VID, MICROSEMI_ARROW_SF2PLUS_BOARD_PID, 2) }, + { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) }, + { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h +index 48ee04c94a75..71fb9e59db71 100644 +--- a/drivers/usb/serial/ftdi_sio_ids.h ++++ b/drivers/usb/serial/ftdi_sio_ids.h +@@ -873,6 +873,12 @@ + #define FIC_VID 0x1457 + #define FIC_NEO1973_DEBUG_PID 0x5118 + ++/* ++ * Actel / Microsemi ++ */ ++#define ACTEL_VID 0x1514 ++#define MICROSEMI_ARROW_SF2PLUS_BOARD_PID 0x2008 ++ + /* Olimex */ + #define OLIMEX_VID 0x15BA + #define OLIMEX_ARM_USB_OCD_PID 0x0003 +diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c +index ecb826eefe02..2fa280671c1e 100644 +--- a/drivers/vfio/vfio_iommu_type1.c ++++ b/drivers/vfio/vfio_iommu_type1.c +@@ -130,57 +130,34 @@ static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old) + rb_erase(&old->node, &iommu->dma_list); + } + +-struct vwork { +- struct mm_struct *mm; +- long npage; +- struct work_struct work; +-}; +- +-/* delayed decrement/increment for locked_vm */ +-static void vfio_lock_acct_bg(struct work_struct *work) ++static int vfio_lock_acct(long npage, bool *lock_cap) + { +- struct vwork *vwork = container_of(work, struct vwork, work); +- struct mm_struct *mm; +- +- mm = vwork->mm; +- down_write(&mm->mmap_sem); +- mm->locked_vm += vwork->npage; +- up_write(&mm->mmap_sem); +- mmput(mm); +- kfree(vwork); +-} ++ int ret = 0; + +-static void vfio_lock_acct(long npage) +-{ +- struct vwork *vwork; +- struct mm_struct *mm; ++ if (!npage) ++ return 0; + +- if (!current->mm || !npage) +- return; /* process exited or nothing to do */ ++ if (!current->mm) ++ return -ESRCH; /* process exited */ + +- if (down_write_trylock(¤t->mm->mmap_sem)) { +- current->mm->locked_vm += npage; +- up_write(¤t->mm->mmap_sem); +- return; +- } ++ down_write(¤t->mm->mmap_sem); ++ if (npage > 0) { ++ if (lock_cap ? !*lock_cap : !capable(CAP_IPC_LOCK)) { ++ unsigned long limit; + +- /* +- * Couldn't get mmap_sem lock, so must setup to update +- * mm->locked_vm later. If locked_vm were atomic, we +- * wouldn't need this silliness +- */ +- vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL); +- if (!vwork) +- return; +- mm = get_task_mm(current); +- if (!mm) { +- kfree(vwork); +- return; ++ limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; ++ ++ if (current->mm->locked_vm + npage > limit) ++ ret = -ENOMEM; ++ } + } +- INIT_WORK(&vwork->work, vfio_lock_acct_bg); +- vwork->mm = mm; +- vwork->npage = npage; +- schedule_work(&vwork->work); ++ ++ if (!ret) ++ current->mm->locked_vm += npage; ++ ++ up_write(¤t->mm->mmap_sem); ++ ++ return ret; + } + + /* +@@ -262,9 +239,9 @@ static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn) + static long vfio_pin_pages(unsigned long vaddr, long npage, + int prot, unsigned long *pfn_base) + { +- unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; ++ unsigned long pfn = 0, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; + bool lock_cap = capable(CAP_IPC_LOCK); +- long ret, i; ++ long ret, i = 1; + bool rsvd; + + if (!current->mm) +@@ -283,16 +260,11 @@ static long vfio_pin_pages(unsigned long vaddr, long npage, + return -ENOMEM; + } + +- if (unlikely(disable_hugepages)) { +- if (!rsvd) +- vfio_lock_acct(1); +- return 1; +- } ++ if (unlikely(disable_hugepages)) ++ goto out; + + /* Lock all the consecutive pages from pfn_base */ +- for (i = 1, vaddr += PAGE_SIZE; i < npage; i++, vaddr += PAGE_SIZE) { +- unsigned long pfn = 0; +- ++ for (vaddr += PAGE_SIZE; i < npage; i++, vaddr += PAGE_SIZE) { + ret = vaddr_get_pfn(vaddr, prot, &pfn); + if (ret) + break; +@@ -308,12 +280,24 @@ static long vfio_pin_pages(unsigned long vaddr, long npage, + put_pfn(pfn, prot); + pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", + __func__, limit << PAGE_SHIFT); +- break; ++ ret = -ENOMEM; ++ goto unpin_out; + } + } + ++out: + if (!rsvd) +- vfio_lock_acct(i); ++ ret = vfio_lock_acct(i, &lock_cap); ++ ++unpin_out: ++ if (ret) { ++ if (!rsvd) { ++ for (pfn = *pfn_base ; i ; pfn++, i--) ++ put_pfn(pfn, prot); ++ } ++ ++ return ret; ++ } + + return i; + } +@@ -328,7 +312,7 @@ static long vfio_unpin_pages(unsigned long pfn, long npage, + unlocked += put_pfn(pfn++, prot); + + if (do_accounting) +- vfio_lock_acct(-unlocked); ++ vfio_lock_acct(-unlocked, NULL); + + return unlocked; + } +@@ -390,7 +374,7 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma) + cond_resched(); + } + +- vfio_lock_acct(-unlocked); ++ vfio_lock_acct(-unlocked, NULL); + } + + static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma) +diff --git a/fs/block_dev.c b/fs/block_dev.c +index e5733bb537c9..26bbaaefdff4 100644 +--- a/fs/block_dev.c ++++ b/fs/block_dev.c +@@ -88,12 +88,11 @@ void invalidate_bdev(struct block_device *bdev) + { + struct address_space *mapping = bdev->bd_inode->i_mapping; + +- if (mapping->nrpages == 0) +- return; +- +- invalidate_bh_lrus(); +- lru_add_drain_all(); /* make sure all lru add caches are flushed */ +- invalidate_mapping_pages(mapping, 0, -1); ++ if (mapping->nrpages) { ++ invalidate_bh_lrus(); ++ lru_add_drain_all(); /* make sure all lru add caches are flushed */ ++ invalidate_mapping_pages(mapping, 0, -1); ++ } + /* 99% of the time, we don't need to flush the cleancache on the bdev. + * But, for the strange corners, lets be cautious + */ +diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c +index 819163d8313b..b24275ef97f7 100644 +--- a/fs/ceph/xattr.c ++++ b/fs/ceph/xattr.c +@@ -369,6 +369,7 @@ static int __set_xattr(struct ceph_inode_info *ci, + + if (update_xattr) { + int err = 0; ++ + if (xattr && (flags & XATTR_CREATE)) + err = -EEXIST; + else if (!xattr && (flags & XATTR_REPLACE)) +@@ -376,12 +377,14 @@ static int __set_xattr(struct ceph_inode_info *ci, + if (err) { + kfree(name); + kfree(val); ++ kfree(*newxattr); + return err; + } + if (update_xattr < 0) { + if (xattr) + __remove_xattr(ci, xattr); + kfree(name); ++ kfree(*newxattr); + return 0; + } + } +diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c +index 02b071bf3732..a0b3e7d1be48 100644 +--- a/fs/cifs/cifs_unicode.c ++++ b/fs/cifs/cifs_unicode.c +@@ -83,6 +83,9 @@ convert_sfm_char(const __u16 src_char, char *target) + case SFM_COLON: + *target = ':'; + break; ++ case SFM_DOUBLEQUOTE: ++ *target = '"'; ++ break; + case SFM_ASTERISK: + *target = '*'; + break; +@@ -418,6 +421,9 @@ static __le16 convert_to_sfm_char(char src_char, bool end_of_string) + case ':': + dest_char = cpu_to_le16(SFM_COLON); + break; ++ case '"': ++ dest_char = cpu_to_le16(SFM_DOUBLEQUOTE); ++ break; + case '*': + dest_char = cpu_to_le16(SFM_ASTERISK); + break; +diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h +index 479bc0a941f3..07ade707fa60 100644 +--- a/fs/cifs/cifs_unicode.h ++++ b/fs/cifs/cifs_unicode.h +@@ -57,6 +57,7 @@ + * not conflict (although almost does) with the mapping above. + */ + ++#define SFM_DOUBLEQUOTE ((__u16) 0xF020) + #define SFM_ASTERISK ((__u16) 0xF021) + #define SFM_QUESTION ((__u16) 0xF025) + #define SFM_COLON ((__u16) 0xF022) +@@ -64,8 +65,8 @@ + #define SFM_LESSTHAN ((__u16) 0xF023) + #define SFM_PIPE ((__u16) 0xF027) + #define SFM_SLASH ((__u16) 0xF026) +-#define SFM_PERIOD ((__u16) 0xF028) +-#define SFM_SPACE ((__u16) 0xF029) ++#define SFM_SPACE ((__u16) 0xF028) ++#define SFM_PERIOD ((__u16) 0xF029) + + /* + * Mapping mechanism to use when one of the seven reserved characters is +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c +index 5e2f8b8ca08a..b60150e5b5ce 100644 +--- a/fs/cifs/cifssmb.c ++++ b/fs/cifs/cifssmb.c +@@ -717,6 +717,9 @@ CIFSSMBEcho(struct TCP_Server_Info *server) + if (rc) + return rc; + ++ if (server->capabilities & CAP_UNICODE) ++ smb->hdr.Flags2 |= SMBFLG2_UNICODE; ++ + /* set up echo request */ + smb->hdr.Tid = 0xffff; + smb->hdr.WordCount = 1; +diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c +index 35cf990f87d3..a8f5b31636dc 100644 +--- a/fs/cifs/ioctl.c ++++ b/fs/cifs/ioctl.c +@@ -272,6 +272,8 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) + rc = -EOPNOTSUPP; + break; + case CIFS_IOC_GET_MNT_INFO: ++ if (pSMBFile == NULL) ++ break; + tcon = tlink_tcon(pSMBFile->tlink); + rc = smb_mnt_get_fsinfo(xid, tcon, (void __user *)arg); + break; +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c +index 6cb2603f8a5c..f4afa3b1cc56 100644 +--- a/fs/cifs/smb2pdu.c ++++ b/fs/cifs/smb2pdu.c +@@ -564,8 +564,12 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) + } + + if (rsplen != sizeof(struct validate_negotiate_info_rsp)) { +- cifs_dbg(VFS, "invalid size of protocol negotiate response\n"); +- return -EIO; ++ cifs_dbg(VFS, "invalid protocol negotiate response size: %d\n", ++ rsplen); ++ ++ /* relax check since Mac returns max bufsize allowed on ioctl */ ++ if (rsplen > CIFSMaxBufSize) ++ return -EIO; + } + + /* check validate negotiate info response matches what we got earlier */ +@@ -1518,8 +1522,12 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, + * than one credit. Windows typically sets this smaller, but for some + * ioctls it may be useful to allow server to send more. No point + * limiting what the server can send as long as fits in one credit ++ * Unfortunately - we can not handle more than CIFS_MAX_MSG_SIZE ++ * (by default, note that it can be overridden to make max larger) ++ * in responses (except for read responses which can be bigger. ++ * We may want to bump this limit up + */ +- req->MaxOutputResponse = cpu_to_le32(0xFF00); /* < 64K uses 1 credit */ ++ req->MaxOutputResponse = cpu_to_le32(CIFSMaxBufSize); + + if (is_fsctl) + req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index 817a937de733..ccae64dad40c 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -5393,6 +5393,11 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) + file_update_time(vma->vm_file); + + down_read(&EXT4_I(inode)->i_mmap_sem); ++ ++ ret = ext4_convert_inline_data(inode); ++ if (ret) ++ goto out_ret; ++ + /* Delalloc case is easy... */ + if (test_opt(inode->i_sb, DELALLOC) && + !ext4_should_journal_data(inode) && +diff --git a/fs/xattr.c b/fs/xattr.c +index 9b932b95d74e..f0da9d24e9ca 100644 +--- a/fs/xattr.c ++++ b/fs/xattr.c +@@ -442,7 +442,7 @@ getxattr(struct dentry *d, const char __user *name, void __user *value, + size = XATTR_SIZE_MAX; + kvalue = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); + if (!kvalue) { +- vvalue = vmalloc(size); ++ vvalue = vzalloc(size); + if (!vvalue) + return -ENOMEM; + kvalue = vvalue; +diff --git a/include/net/mac80211.h b/include/net/mac80211.h +index 760bc4d5a2cf..4e51f9a5a177 100644 +--- a/include/net/mac80211.h ++++ b/include/net/mac80211.h +@@ -1662,6 +1662,9 @@ struct ieee80211_sta_rates { + * @supp_rates: Bitmap of supported rates (per band) + * @ht_cap: HT capabilities of this STA; restricted to our own capabilities + * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities ++ * @max_rx_aggregation_subframes: maximal amount of frames in a single AMPDU ++ * that this station is allowed to transmit to us. ++ * Can be modified by driver. + * @wme: indicates whether the STA supports QoS/WME (if local devices does, + * otherwise always false) + * @drv_priv: data area for driver use, will always be aligned to +@@ -1688,6 +1691,7 @@ struct ieee80211_sta { + u16 aid; + struct ieee80211_sta_ht_cap ht_cap; + struct ieee80211_sta_vht_cap vht_cap; ++ u8 max_rx_aggregation_subframes; + bool wme; + u8 uapsd_queues; + u8 max_sp; +@@ -2674,6 +2678,33 @@ enum ieee80211_ampdu_mlme_action { + }; + + /** ++ * struct ieee80211_ampdu_params - AMPDU action parameters ++ * ++ * @action: the ampdu action, value from %ieee80211_ampdu_mlme_action. ++ * @sta: peer of this AMPDU session ++ * @tid: tid of the BA session ++ * @ssn: start sequence number of the session. TX/RX_STOP can pass 0. When ++ * action is set to %IEEE80211_AMPDU_RX_START the driver passes back the ++ * actual ssn value used to start the session and writes the value here. ++ * @buf_size: reorder buffer size (number of subframes). Valid only when the ++ * action is set to %IEEE80211_AMPDU_RX_START or ++ * %IEEE80211_AMPDU_TX_OPERATIONAL ++ * @amsdu: indicates the peer's ability to receive A-MSDU within A-MPDU. ++ * valid when the action is set to %IEEE80211_AMPDU_TX_OPERATIONAL ++ * @timeout: BA session timeout. Valid only when the action is set to ++ * %IEEE80211_AMPDU_RX_START ++ */ ++struct ieee80211_ampdu_params { ++ enum ieee80211_ampdu_mlme_action action; ++ struct ieee80211_sta *sta; ++ u16 tid; ++ u16 ssn; ++ u8 buf_size; ++ bool amsdu; ++ u16 timeout; ++}; ++ ++/** + * enum ieee80211_frame_release_type - frame release reason + * @IEEE80211_FRAME_RELEASE_PSPOLL: frame released for PS-Poll + * @IEEE80211_FRAME_RELEASE_UAPSD: frame(s) released due to +@@ -3017,13 +3048,9 @@ enum ieee80211_reconfig_type { + * @ampdu_action: Perform a certain A-MPDU action + * The RA/TID combination determines the destination and TID we want + * the ampdu action to be performed for. The action is defined through +- * ieee80211_ampdu_mlme_action. Starting sequence number (@ssn) +- * is the first frame we expect to perform the action on. Notice +- * that TX/RX_STOP can pass NULL for this parameter. +- * The @buf_size parameter is only valid when the action is set to +- * %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's reorder +- * buffer size (number of subframes) for this session -- the driver +- * may neither send aggregates containing more subframes than this ++ * ieee80211_ampdu_mlme_action. ++ * When the action is set to %IEEE80211_AMPDU_TX_OPERATIONAL the driver ++ * may neither send aggregates containing more subframes than @buf_size + * nor send aggregates in a way that lost frames would exceed the + * buffer size. If just limiting the aggregate size, this would be + * possible with a buf_size of 8: +@@ -3034,9 +3061,6 @@ enum ieee80211_reconfig_type { + * buffer size of 8. Correct ways to retransmit #1 would be: + * - TX: 1 or 18 or 81 + * Even "189" would be wrong since 1 could be lost again. +- * The @amsdu parameter is valid when the action is set to +- * %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's ability +- * to receive A-MSDU within A-MPDU. + * + * Returns a negative error code on failure. + * The callback can sleep. +@@ -3378,9 +3402,7 @@ struct ieee80211_ops { + int (*tx_last_beacon)(struct ieee80211_hw *hw); + int (*ampdu_action)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, +- enum ieee80211_ampdu_mlme_action action, +- struct ieee80211_sta *sta, u16 tid, u16 *ssn, +- u8 buf_size, bool amsdu); ++ struct ieee80211_ampdu_params *params); + int (*get_survey)(struct ieee80211_hw *hw, int idx, + struct survey_info *survey); + void (*rfkill_poll)(struct ieee80211_hw *hw); +diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h +index 97069ecabe49..5f9b62c129fc 100644 +--- a/include/target/target_core_fabric.h ++++ b/include/target/target_core_fabric.h +@@ -117,7 +117,7 @@ void __transport_register_session(struct se_portal_group *, + struct se_node_acl *, struct se_session *, void *); + void transport_register_session(struct se_portal_group *, + struct se_node_acl *, struct se_session *, void *); +-void target_get_session(struct se_session *); ++int target_get_session(struct se_session *); + void target_put_session(struct se_session *); + ssize_t target_show_dynamic_sessions(struct se_portal_group *, char *); + void transport_free_session(struct se_session *); +@@ -172,8 +172,7 @@ bool target_tpg_has_node_acl(struct se_portal_group *tpg, + const char *); + struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *, + unsigned char *); +-int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *, +- unsigned char *, u32, int); ++int core_tpg_set_initiator_node_queue_depth(struct se_node_acl *, u32); + int core_tpg_set_initiator_node_tag(struct se_portal_group *, + struct se_node_acl *, const char *); + int core_tpg_register(struct se_wwn *, struct se_portal_group *, int); +diff --git a/kernel/padata.c b/kernel/padata.c +index 401227e3967c..ecc7b3f452c7 100644 +--- a/kernel/padata.c ++++ b/kernel/padata.c +@@ -357,7 +357,7 @@ static int padata_setup_cpumasks(struct parallel_data *pd, + + cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask); + if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) { +- free_cpumask_var(pd->cpumask.cbcpu); ++ free_cpumask_var(pd->cpumask.pcpu); + return -ENOMEM; + } + +diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c +index b1eb8c09a660..c842f40c1173 100644 +--- a/net/bluetooth/hci_sock.c ++++ b/net/bluetooth/hci_sock.c +@@ -1164,7 +1164,8 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg, + if (msg->msg_flags & MSG_OOB) + return -EOPNOTSUPP; + +- if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE)) ++ if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE| ++ MSG_CMSG_COMPAT)) + return -EINVAL; + + if (len < 4 || len > HCI_MAX_FRAME_SIZE) +diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c +index 367784be5df2..a830356b94ac 100644 +--- a/net/mac80211/agg-rx.c ++++ b/net/mac80211/agg-rx.c +@@ -7,6 +7,7 @@ + * Copyright 2006-2007 Jiri Benc + * Copyright 2007, Michael Wu + * Copyright 2007-2010, Intel Corporation ++ * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as +@@ -61,6 +62,14 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, + { + struct ieee80211_local *local = sta->local; + struct tid_ampdu_rx *tid_rx; ++ struct ieee80211_ampdu_params params = { ++ .sta = &sta->sta, ++ .action = IEEE80211_AMPDU_RX_STOP, ++ .tid = tid, ++ .amsdu = false, ++ .timeout = 0, ++ .ssn = 0, ++ }; + + lockdep_assert_held(&sta->ampdu_mlme.mtx); + +@@ -78,8 +87,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, + initiator == WLAN_BACK_RECIPIENT ? "recipient" : "inititator", + (int)reason); + +- if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP, +- &sta->sta, tid, NULL, 0, false)) ++ if (drv_ampdu_action(local, sta->sdata, ¶ms)) + sdata_info(sta->sdata, + "HW problem - can not stop rx aggregation for %pM tid %d\n", + sta->sta.addr, tid); +@@ -237,6 +245,15 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta, + { + struct ieee80211_local *local = sta->sdata->local; + struct tid_ampdu_rx *tid_agg_rx; ++ struct ieee80211_ampdu_params params = { ++ .sta = &sta->sta, ++ .action = IEEE80211_AMPDU_RX_START, ++ .tid = tid, ++ .amsdu = false, ++ .timeout = timeout, ++ .ssn = start_seq_num, ++ }; ++ + int i, ret = -EOPNOTSUPP; + u16 status = WLAN_STATUS_REQUEST_DECLINED; + +@@ -273,8 +290,12 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta, + buf_size = IEEE80211_MAX_AMPDU_BUF; + + /* make sure the size doesn't exceed the maximum supported by the hw */ +- if (buf_size > local->hw.max_rx_aggregation_subframes) +- buf_size = local->hw.max_rx_aggregation_subframes; ++ if (buf_size > sta->sta.max_rx_aggregation_subframes) ++ buf_size = sta->sta.max_rx_aggregation_subframes; ++ params.buf_size = buf_size; ++ ++ ht_dbg(sta->sdata, "AddBA Req buf_size=%d for %pM\n", ++ buf_size, sta->sta.addr); + + /* examine state machine */ + mutex_lock(&sta->ampdu_mlme.mtx); +@@ -322,8 +343,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta, + for (i = 0; i < buf_size; i++) + __skb_queue_head_init(&tid_agg_rx->reorder_buf[i]); + +- ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START, +- &sta->sta, tid, &start_seq_num, 0, false); ++ ret = drv_ampdu_action(local, sta->sdata, ¶ms); + ht_dbg(sta->sdata, "Rx A-MPDU request on %pM tid %d result %d\n", + sta->sta.addr, tid, ret); + if (ret) { +diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c +index ff757181b0a8..4932e9f243a2 100644 +--- a/net/mac80211/agg-tx.c ++++ b/net/mac80211/agg-tx.c +@@ -7,6 +7,7 @@ + * Copyright 2006-2007 Jiri Benc + * Copyright 2007, Michael Wu + * Copyright 2007-2010, Intel Corporation ++ * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as +@@ -295,7 +296,14 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, + { + struct ieee80211_local *local = sta->local; + struct tid_ampdu_tx *tid_tx; +- enum ieee80211_ampdu_mlme_action action; ++ struct ieee80211_ampdu_params params = { ++ .sta = &sta->sta, ++ .tid = tid, ++ .buf_size = 0, ++ .amsdu = false, ++ .timeout = 0, ++ .ssn = 0, ++ }; + int ret; + + lockdep_assert_held(&sta->ampdu_mlme.mtx); +@@ -304,10 +312,10 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, + case AGG_STOP_DECLINED: + case AGG_STOP_LOCAL_REQUEST: + case AGG_STOP_PEER_REQUEST: +- action = IEEE80211_AMPDU_TX_STOP_CONT; ++ params.action = IEEE80211_AMPDU_TX_STOP_CONT; + break; + case AGG_STOP_DESTROY_STA: +- action = IEEE80211_AMPDU_TX_STOP_FLUSH; ++ params.action = IEEE80211_AMPDU_TX_STOP_FLUSH; + break; + default: + WARN_ON_ONCE(1); +@@ -330,9 +338,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, + spin_unlock_bh(&sta->lock); + if (reason != AGG_STOP_DESTROY_STA) + return -EALREADY; +- ret = drv_ampdu_action(local, sta->sdata, +- IEEE80211_AMPDU_TX_STOP_FLUSH_CONT, +- &sta->sta, tid, NULL, 0, false); ++ params.action = IEEE80211_AMPDU_TX_STOP_FLUSH_CONT; ++ ret = drv_ampdu_action(local, sta->sdata, ¶ms); + WARN_ON_ONCE(ret); + return 0; + } +@@ -381,8 +388,7 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, + WLAN_BACK_INITIATOR; + tid_tx->tx_stop = reason == AGG_STOP_LOCAL_REQUEST; + +- ret = drv_ampdu_action(local, sta->sdata, action, +- &sta->sta, tid, NULL, 0, false); ++ ret = drv_ampdu_action(local, sta->sdata, ¶ms); + + /* HW shall not deny going back to legacy */ + if (WARN_ON(ret)) { +@@ -445,7 +451,14 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) + struct tid_ampdu_tx *tid_tx; + struct ieee80211_local *local = sta->local; + struct ieee80211_sub_if_data *sdata = sta->sdata; +- u16 start_seq_num; ++ struct ieee80211_ampdu_params params = { ++ .sta = &sta->sta, ++ .action = IEEE80211_AMPDU_TX_START, ++ .tid = tid, ++ .buf_size = 0, ++ .amsdu = false, ++ .timeout = 0, ++ }; + int ret; + + tid_tx = rcu_dereference_protected_tid_tx(sta, tid); +@@ -467,10 +480,8 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) + */ + synchronize_net(); + +- start_seq_num = sta->tid_seq[tid] >> 4; +- +- ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START, +- &sta->sta, tid, &start_seq_num, 0, false); ++ params.ssn = sta->tid_seq[tid] >> 4; ++ ret = drv_ampdu_action(local, sdata, ¶ms); + if (ret) { + ht_dbg(sdata, + "BA request denied - HW unavailable for %pM tid %d\n", +@@ -499,7 +510,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) + + /* send AddBA request */ + ieee80211_send_addba_request(sdata, sta->sta.addr, tid, +- tid_tx->dialog_token, start_seq_num, ++ tid_tx->dialog_token, params.ssn, + IEEE80211_MAX_AMPDU_BUF, + tid_tx->timeout); + } +@@ -684,18 +695,24 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local, + struct sta_info *sta, u16 tid) + { + struct tid_ampdu_tx *tid_tx; ++ struct ieee80211_ampdu_params params = { ++ .sta = &sta->sta, ++ .action = IEEE80211_AMPDU_TX_OPERATIONAL, ++ .tid = tid, ++ .timeout = 0, ++ .ssn = 0, ++ }; + + lockdep_assert_held(&sta->ampdu_mlme.mtx); + + tid_tx = rcu_dereference_protected_tid_tx(sta, tid); ++ params.buf_size = tid_tx->buf_size; ++ params.amsdu = tid_tx->amsdu; + + ht_dbg(sta->sdata, "Aggregation is on for %pM tid %d\n", + sta->sta.addr, tid); + +- drv_ampdu_action(local, sta->sdata, +- IEEE80211_AMPDU_TX_OPERATIONAL, +- &sta->sta, tid, NULL, tid_tx->buf_size, +- tid_tx->amsdu); ++ drv_ampdu_action(local, sta->sdata, ¶ms); + + /* + * synchronize with TX path, while splicing the TX path +diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c +index ca1fe5576103..c258f1041d33 100644 +--- a/net/mac80211/driver-ops.c ++++ b/net/mac80211/driver-ops.c +@@ -284,9 +284,7 @@ int drv_switch_vif_chanctx(struct ieee80211_local *local, + + int drv_ampdu_action(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, +- enum ieee80211_ampdu_mlme_action action, +- struct ieee80211_sta *sta, u16 tid, +- u16 *ssn, u8 buf_size, bool amsdu) ++ struct ieee80211_ampdu_params *params) + { + int ret = -EOPNOTSUPP; + +@@ -296,12 +294,10 @@ int drv_ampdu_action(struct ieee80211_local *local, + if (!check_sdata_in_driver(sdata)) + return -EIO; + +- trace_drv_ampdu_action(local, sdata, action, sta, tid, +- ssn, buf_size, amsdu); ++ trace_drv_ampdu_action(local, sdata, params); + + if (local->ops->ampdu_action) +- ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action, +- sta, tid, ssn, buf_size, amsdu); ++ ret = local->ops->ampdu_action(&local->hw, &sdata->vif, params); + + trace_drv_return_int(local, ret); + +diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h +index 154ce4b13406..18b0d65baff0 100644 +--- a/net/mac80211/driver-ops.h ++++ b/net/mac80211/driver-ops.h +@@ -585,9 +585,7 @@ static inline int drv_tx_last_beacon(struct ieee80211_local *local) + + int drv_ampdu_action(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, +- enum ieee80211_ampdu_mlme_action action, +- struct ieee80211_sta *sta, u16 tid, +- u16 *ssn, u8 buf_size, bool amsdu); ++ struct ieee80211_ampdu_params *params); + + static inline int drv_get_survey(struct ieee80211_local *local, int idx, + struct survey_info *survey) +diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c +index 67066d048e6f..63ea6cbac5ad 100644 +--- a/net/mac80211/sta_info.c ++++ b/net/mac80211/sta_info.c +@@ -329,6 +329,9 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, + + memcpy(sta->addr, addr, ETH_ALEN); + memcpy(sta->sta.addr, addr, ETH_ALEN); ++ sta->sta.max_rx_aggregation_subframes = ++ local->hw.max_rx_aggregation_subframes; ++ + sta->local = local; + sta->sdata = sdata; + sta->rx_stats.last_rx = jiffies; +diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h +index 56c6d6cfa5a1..913e959b03cf 100644 +--- a/net/mac80211/trace.h ++++ b/net/mac80211/trace.h +@@ -80,7 +80,23 @@ + #define KEY_PR_FMT " cipher:0x%x, flags=%#x, keyidx=%d, hw_key_idx=%d" + #define KEY_PR_ARG __entry->cipher, __entry->flags, __entry->keyidx, __entry->hw_key_idx + +- ++#define AMPDU_ACTION_ENTRY __field(enum ieee80211_ampdu_mlme_action, \ ++ ieee80211_ampdu_mlme_action) \ ++ STA_ENTRY \ ++ __field(u16, tid) \ ++ __field(u16, ssn) \ ++ __field(u8, buf_size) \ ++ __field(bool, amsdu) \ ++ __field(u16, timeout) ++#define AMPDU_ACTION_ASSIGN STA_NAMED_ASSIGN(params->sta); \ ++ __entry->tid = params->tid; \ ++ __entry->ssn = params->ssn; \ ++ __entry->buf_size = params->buf_size; \ ++ __entry->amsdu = params->amsdu; \ ++ __entry->timeout = params->timeout; ++#define AMPDU_ACTION_PR_FMT STA_PR_FMT " tid %d, ssn %d, buf_size %u, amsdu %d, timeout %d" ++#define AMPDU_ACTION_PR_ARG STA_PR_ARG, __entry->tid, __entry->ssn, \ ++ __entry->buf_size, __entry->amsdu, __entry->timeout + + /* + * Tracing for driver callbacks. +@@ -970,38 +986,25 @@ DEFINE_EVENT(local_only_evt, drv_tx_last_beacon, + TRACE_EVENT(drv_ampdu_action, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, +- enum ieee80211_ampdu_mlme_action action, +- struct ieee80211_sta *sta, u16 tid, +- u16 *ssn, u8 buf_size, bool amsdu), ++ struct ieee80211_ampdu_params *params), + +- TP_ARGS(local, sdata, action, sta, tid, ssn, buf_size, amsdu), ++ TP_ARGS(local, sdata, params), + + TP_STRUCT__entry( + LOCAL_ENTRY +- STA_ENTRY +- __field(u32, action) +- __field(u16, tid) +- __field(u16, ssn) +- __field(u8, buf_size) +- __field(bool, amsdu) + VIF_ENTRY ++ AMPDU_ACTION_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; +- STA_ASSIGN; +- __entry->action = action; +- __entry->tid = tid; +- __entry->ssn = ssn ? *ssn : 0; +- __entry->buf_size = buf_size; +- __entry->amsdu = amsdu; ++ AMPDU_ACTION_ASSIGN; + ), + + TP_printk( +- LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d buf:%d amsdu:%d", +- LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, +- __entry->tid, __entry->buf_size, __entry->amsdu ++ LOCAL_PR_FMT VIF_PR_FMT AMPDU_ACTION_PR_FMT, ++ LOCAL_PR_ARG, VIF_PR_ARG, AMPDU_ACTION_PR_ARG + ) + ); + +diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c +index 31a3035cd4eb..923e59eb82c7 100644 +--- a/tools/testing/selftests/x86/ldt_gdt.c ++++ b/tools/testing/selftests/x86/ldt_gdt.c +@@ -394,6 +394,51 @@ static void *threadproc(void *ctx) + } + } + ++#ifdef __i386__ ++ ++#ifndef SA_RESTORE ++#define SA_RESTORER 0x04000000 ++#endif ++ ++/* ++ * The UAPI header calls this 'struct sigaction', which conflicts with ++ * glibc. Sigh. ++ */ ++struct fake_ksigaction { ++ void *handler; /* the real type is nasty */ ++ unsigned long sa_flags; ++ void (*sa_restorer)(void); ++ unsigned char sigset[8]; ++}; ++ ++static void fix_sa_restorer(int sig) ++{ ++ struct fake_ksigaction ksa; ++ ++ if (syscall(SYS_rt_sigaction, sig, NULL, &ksa, 8) == 0) { ++ /* ++ * glibc has a nasty bug: it sometimes writes garbage to ++ * sa_restorer. This interacts quite badly with anything ++ * that fiddles with SS because it can trigger legacy ++ * stack switching. Patch it up. See: ++ * ++ * https://sourceware.org/bugzilla/show_bug.cgi?id=21269 ++ */ ++ if (!(ksa.sa_flags & SA_RESTORER) && ksa.sa_restorer) { ++ ksa.sa_restorer = NULL; ++ if (syscall(SYS_rt_sigaction, sig, &ksa, NULL, ++ sizeof(ksa.sigset)) != 0) ++ err(1, "rt_sigaction"); ++ } ++ } ++} ++#else ++static void fix_sa_restorer(int sig) ++{ ++ /* 64-bit glibc works fine. */ ++} ++#endif ++ + static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), + int flags) + { +@@ -405,6 +450,7 @@ static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), + if (sigaction(sig, &sa, 0)) + err(1, "sigaction"); + ++ fix_sa_restorer(sig); + } + + static jmp_buf jmpbuf; diff --git a/patch/kernel/mvebu64-default/03-patch-4.4.69-70.patch b/patch/kernel/mvebu64-default/03-patch-4.4.69-70.patch new file mode 100644 index 000000000..948006ddb --- /dev/null +++ b/patch/kernel/mvebu64-default/03-patch-4.4.69-70.patch @@ -0,0 +1,3739 @@ +diff --git a/Documentation/arm64/tagged-pointers.txt b/Documentation/arm64/tagged-pointers.txt +index d9995f1f51b3..a25a99e82bb1 100644 +--- a/Documentation/arm64/tagged-pointers.txt ++++ b/Documentation/arm64/tagged-pointers.txt +@@ -11,24 +11,56 @@ in AArch64 Linux. + The kernel configures the translation tables so that translations made + via TTBR0 (i.e. userspace mappings) have the top byte (bits 63:56) of + the virtual address ignored by the translation hardware. This frees up +-this byte for application use, with the following caveats: ++this byte for application use. + +- (1) The kernel requires that all user addresses passed to EL1 +- are tagged with tag 0x00. This means that any syscall +- parameters containing user virtual addresses *must* have +- their top byte cleared before trapping to the kernel. + +- (2) Non-zero tags are not preserved when delivering signals. +- This means that signal handlers in applications making use +- of tags cannot rely on the tag information for user virtual +- addresses being maintained for fields inside siginfo_t. +- One exception to this rule is for signals raised in response +- to watchpoint debug exceptions, where the tag information +- will be preserved. ++Passing tagged addresses to the kernel ++-------------------------------------- + +- (3) Special care should be taken when using tagged pointers, +- since it is likely that C compilers will not hazard two +- virtual addresses differing only in the upper byte. ++All interpretation of userspace memory addresses by the kernel assumes ++an address tag of 0x00. ++ ++This includes, but is not limited to, addresses found in: ++ ++ - pointer arguments to system calls, including pointers in structures ++ passed to system calls, ++ ++ - the stack pointer (sp), e.g. when interpreting it to deliver a ++ signal, ++ ++ - the frame pointer (x29) and frame records, e.g. when interpreting ++ them to generate a backtrace or call graph. ++ ++Using non-zero address tags in any of these locations may result in an ++error code being returned, a (fatal) signal being raised, or other modes ++of failure. ++ ++For these reasons, passing non-zero address tags to the kernel via ++system calls is forbidden, and using a non-zero address tag for sp is ++strongly discouraged. ++ ++Programs maintaining a frame pointer and frame records that use non-zero ++address tags may suffer impaired or inaccurate debug and profiling ++visibility. ++ ++ ++Preserving tags ++--------------- ++ ++Non-zero tags are not preserved when delivering signals. This means that ++signal handlers in applications making use of tags cannot rely on the ++tag information for user virtual addresses being maintained for fields ++inside siginfo_t. One exception to this rule is for signals raised in ++response to watchpoint debug exceptions, where the tag information will ++be preserved. + + The architecture prevents the use of a tagged PC, so the upper byte will + be set to a sign-extension of bit 55 on exception return. ++ ++ ++Other considerations ++-------------------- ++ ++Special care should be taken when using tagged pointers, since it is ++likely that C compilers will not hazard two virtual addresses differing ++only in the upper byte. +diff --git a/Makefile b/Makefile +index dc5df61ea4be..a5ecb29c6ed3 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 69 ++SUBLEVEL = 70 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c +index 6cc08166ff00..63f06a2b1f7f 100644 +--- a/arch/alpha/kernel/osf_sys.c ++++ b/arch/alpha/kernel/osf_sys.c +@@ -1188,8 +1188,10 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options, + if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur))) + return -EFAULT; + +- err = 0; +- err |= put_user(status, ustatus); ++ err = put_user(status, ustatus); ++ if (ret < 0) ++ return err ? err : ret; ++ + err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec); + err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec); + err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec); +diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts +index f3e2b96c06a3..0bd325c314e1 100644 +--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts ++++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts +@@ -162,9 +162,10 @@ + }; + + adc0: adc@f8018000 { ++ atmel,adc-vref = <3300>; ++ atmel,adc-channels-used = <0xfe>; + pinctrl-0 = < + &pinctrl_adc0_adtrg +- &pinctrl_adc0_ad0 + &pinctrl_adc0_ad1 + &pinctrl_adc0_ad2 + &pinctrl_adc0_ad3 +@@ -172,8 +173,6 @@ + &pinctrl_adc0_ad5 + &pinctrl_adc0_ad6 + &pinctrl_adc0_ad7 +- &pinctrl_adc0_ad8 +- &pinctrl_adc0_ad9 + >; + status = "okay"; + }; +diff --git a/arch/arm/boot/dts/tegra20-paz00.dts b/arch/arm/boot/dts/tegra20-paz00.dts +index ed7e1009326c..d9ee0fd817e9 100644 +--- a/arch/arm/boot/dts/tegra20-paz00.dts ++++ b/arch/arm/boot/dts/tegra20-paz00.dts +@@ -565,6 +565,7 @@ + regulator-name = "+3VS,vdd_pnl"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; ++ regulator-boot-on; + gpio = <&gpio TEGRA_GPIO(A, 4) GPIO_ACTIVE_HIGH>; + enable-active-high; + }; +diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h +index 9ea611ea69df..91ceeb7b4530 100644 +--- a/arch/arm64/include/asm/cmpxchg.h ++++ b/arch/arm64/include/asm/cmpxchg.h +@@ -49,7 +49,7 @@ static inline unsigned long __xchg_case_##name(unsigned long x, \ + " swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n" \ + " nop\n" \ + " " #nop_lse) \ +- : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) \ ++ : "=&r" (ret), "=&r" (tmp), "+Q" (*(unsigned long *)ptr) \ + : "r" (x) \ + : cl); \ + \ +diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h +index b2ede967fe7d..d9ca1f2c0ea8 100644 +--- a/arch/arm64/include/asm/uaccess.h ++++ b/arch/arm64/include/asm/uaccess.h +@@ -92,11 +92,12 @@ static inline void set_fs(mm_segment_t fs) + */ + #define __range_ok(addr, size) \ + ({ \ ++ unsigned long __addr = (unsigned long __force)(addr); \ + unsigned long flag, roksum; \ + __chk_user_ptr(addr); \ + asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \ + : "=&r" (flag), "=&r" (roksum) \ +- : "1" (addr), "Ir" (size), \ ++ : "1" (__addr), "Ir" (size), \ + "r" (current_thread_info()->addr_limit) \ + : "cc"); \ + flag; \ +diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h +index 07238b39638c..3db381205928 100644 +--- a/arch/metag/include/asm/uaccess.h ++++ b/arch/metag/include/asm/uaccess.h +@@ -28,24 +28,32 @@ + + #define segment_eq(a, b) ((a).seg == (b).seg) + +-#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) +-/* +- * Explicitly allow NULL pointers here. Parts of the kernel such +- * as readv/writev use access_ok to validate pointers, but want +- * to allow NULL pointers for various reasons. NULL pointers are +- * safe to allow through because the first page is not mappable on +- * Meta. +- * +- * We also wish to avoid letting user code access the system area +- * and the kernel half of the address space. +- */ +-#define __user_bad(addr, size) (((addr) > 0 && (addr) < META_MEMORY_BASE) || \ +- ((addr) > PAGE_OFFSET && \ +- (addr) < LINCORE_BASE)) +- + static inline int __access_ok(unsigned long addr, unsigned long size) + { +- return __kernel_ok || !__user_bad(addr, size); ++ /* ++ * Allow access to the user mapped memory area, but not the system area ++ * before it. The check extends to the top of the address space when ++ * kernel access is allowed (there's no real reason to user copy to the ++ * system area in any case). ++ */ ++ if (likely(addr >= META_MEMORY_BASE && addr < get_fs().seg && ++ size <= get_fs().seg - addr)) ++ return true; ++ /* ++ * Explicitly allow NULL pointers here. Parts of the kernel such ++ * as readv/writev use access_ok to validate pointers, but want ++ * to allow NULL pointers for various reasons. NULL pointers are ++ * safe to allow through because the first page is not mappable on ++ * Meta. ++ */ ++ if (!addr) ++ return true; ++ /* Allow access to core code memory area... */ ++ if (addr >= LINCORE_CODE_BASE && addr <= LINCORE_CODE_LIMIT && ++ size <= LINCORE_CODE_LIMIT + 1 - addr) ++ return true; ++ /* ... but no other areas. */ ++ return false; + } + + #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), \ +@@ -186,8 +194,13 @@ do { \ + extern long __must_check __strncpy_from_user(char *dst, const char __user *src, + long count); + +-#define strncpy_from_user(dst, src, count) __strncpy_from_user(dst, src, count) +- ++static inline long ++strncpy_from_user(char *dst, const char __user *src, long count) ++{ ++ if (!access_ok(VERIFY_READ, src, 1)) ++ return -EFAULT; ++ return __strncpy_from_user(dst, src, count); ++} + /* + * Return the size of a string (including the ending 0) + * +diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S +index 488e6314f993..5cc93f0b52ca 100644 +--- a/arch/powerpc/kernel/exceptions-64e.S ++++ b/arch/powerpc/kernel/exceptions-64e.S +@@ -735,8 +735,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) + andis. r15,r14,(DBSR_IC|DBSR_BT)@h + beq+ 1f + ++#ifdef CONFIG_RELOCATABLE ++ ld r15,PACATOC(r13) ++ ld r14,interrupt_base_book3e@got(r15) ++ ld r15,__end_interrupts@got(r15) ++#else + LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) + LOAD_REG_IMMEDIATE(r15,__end_interrupts) ++#endif + cmpld cr0,r10,r14 + cmpld cr1,r10,r15 + blt+ cr0,1f +@@ -799,8 +805,14 @@ kernel_dbg_exc: + andis. r15,r14,(DBSR_IC|DBSR_BT)@h + beq+ 1f + ++#ifdef CONFIG_RELOCATABLE ++ ld r15,PACATOC(r13) ++ ld r14,interrupt_base_book3e@got(r15) ++ ld r15,__end_interrupts@got(r15) ++#else + LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) + LOAD_REG_IMMEDIATE(r15,__end_interrupts) ++#endif + cmpld cr0,r10,r14 + cmpld cr1,r10,r15 + blt+ cr0,1f +diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c +index b2eb4686bd8f..da3c4c3f4ec8 100644 +--- a/arch/powerpc/kernel/mce.c ++++ b/arch/powerpc/kernel/mce.c +@@ -204,6 +204,8 @@ static void machine_check_process_queued_event(struct irq_work *work) + { + int index; + ++ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); ++ + /* + * For now just print it to console. + * TODO: log this error event to FSP or nvram. +diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c +index 37de90f8a845..e4dcb0a43e3f 100644 +--- a/arch/powerpc/kernel/traps.c ++++ b/arch/powerpc/kernel/traps.c +@@ -297,8 +297,6 @@ long machine_check_early(struct pt_regs *regs) + + __this_cpu_inc(irq_stat.mce_exceptions); + +- add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); +- + if (cur_cpu_spec && cur_cpu_spec->machine_check_early) + handled = cur_cpu_spec->machine_check_early(regs); + return handled; +@@ -704,6 +702,8 @@ void machine_check_exception(struct pt_regs *regs) + + __this_cpu_inc(irq_stat.mce_exceptions); + ++ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); ++ + /* See if any machine dependent calls. In theory, we would want + * to call the CPU first, and call the ppc_md. one if the CPU + * one returns a positive number. However there is existing code +diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c +index f244dcb4f2cf..96536c969c9c 100644 +--- a/arch/powerpc/platforms/pseries/dlpar.c ++++ b/arch/powerpc/platforms/pseries/dlpar.c +@@ -280,7 +280,6 @@ int dlpar_detach_node(struct device_node *dn) + if (rc) + return rc; + +- of_node_put(dn); /* Must decrement the refcount */ + return 0; + } + +diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c +index f7c3a61040bd..df4685905015 100644 +--- a/arch/s390/kernel/crash_dump.c ++++ b/arch/s390/kernel/crash_dump.c +@@ -464,6 +464,20 @@ static void *nt_vmcoreinfo(void *ptr) + } + + /* ++ * Initialize final note (needed for /proc/vmcore code) ++ */ ++static void *nt_final(void *ptr) ++{ ++ Elf64_Nhdr *note; ++ ++ note = (Elf64_Nhdr *) ptr; ++ note->n_namesz = 0; ++ note->n_descsz = 0; ++ note->n_type = 0; ++ return PTR_ADD(ptr, sizeof(Elf64_Nhdr)); ++} ++ ++/* + * Initialize ELF header (new kernel) + */ + static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt) +@@ -553,6 +567,7 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset) + ptr = fill_cpu_elf_notes(ptr, &sa_ext->sa, sa_ext->vx_regs); + } + ptr = nt_vmcoreinfo(ptr); ++ ptr = nt_final(ptr); + memset(phdr, 0, sizeof(*phdr)); + phdr->p_type = PT_NOTE; + phdr->p_offset = notes_offset; +diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S +index 424e6809ad07..7460df3eec6b 100644 +--- a/arch/s390/kernel/entry.S ++++ b/arch/s390/kernel/entry.S +@@ -308,6 +308,7 @@ ENTRY(system_call) + lg %r14,__LC_VDSO_PER_CPU + lmg %r0,%r10,__PT_R0(%r11) + mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) ++.Lsysc_exit_timer: + stpt __LC_EXIT_TIMER + mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER + lmg %r11,%r15,__PT_R11(%r11) +@@ -593,6 +594,7 @@ ENTRY(io_int_handler) + lg %r14,__LC_VDSO_PER_CPU + lmg %r0,%r10,__PT_R0(%r11) + mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) ++.Lio_exit_timer: + stpt __LC_EXIT_TIMER + mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER + lmg %r11,%r15,__PT_R11(%r11) +@@ -1118,15 +1120,23 @@ cleanup_critical: + br %r14 + + .Lcleanup_sysc_restore: ++ # check if stpt has been executed + clg %r9,BASED(.Lcleanup_sysc_restore_insn) ++ jh 0f ++ mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER ++ cghi %r11,__LC_SAVE_AREA_ASYNC + je 0f ++ mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER ++0: clg %r9,BASED(.Lcleanup_sysc_restore_insn+8) ++ je 1f + lg %r9,24(%r11) # get saved pointer to pt_regs + mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) + mvc 0(64,%r11),__PT_R8(%r9) + lmg %r0,%r7,__PT_R0(%r9) +-0: lmg %r8,%r9,__LC_RETURN_PSW ++1: lmg %r8,%r9,__LC_RETURN_PSW + br %r14 + .Lcleanup_sysc_restore_insn: ++ .quad .Lsysc_exit_timer + .quad .Lsysc_done - 4 + + .Lcleanup_io_tif: +@@ -1134,15 +1144,20 @@ cleanup_critical: + br %r14 + + .Lcleanup_io_restore: ++ # check if stpt has been executed + clg %r9,BASED(.Lcleanup_io_restore_insn) +- je 0f ++ jh 0f ++ mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER ++0: clg %r9,BASED(.Lcleanup_io_restore_insn+8) ++ je 1f + lg %r9,24(%r11) # get saved r11 pointer to pt_regs + mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) + mvc 0(64,%r11),__PT_R8(%r9) + lmg %r0,%r7,__PT_R0(%r9) +-0: lmg %r8,%r9,__LC_RETURN_PSW ++1: lmg %r8,%r9,__LC_RETURN_PSW + br %r14 + .Lcleanup_io_restore_insn: ++ .quad .Lio_exit_timer + .quad .Lio_done - 4 + + .Lcleanup_idle: +diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c +index be39b5fde4b9..1011c05b1bd5 100644 +--- a/arch/x86/kernel/fpu/init.c ++++ b/arch/x86/kernel/fpu/init.c +@@ -96,6 +96,7 @@ static void fpu__init_system_early_generic(struct cpuinfo_x86 *c) + * Boot time FPU feature detection code: + */ + unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; ++EXPORT_SYMBOL_GPL(mxcsr_feature_mask); + + static void __init fpu__init_system_mxcsr(void) + { +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 281899da19d4..ae2b9cd358f2 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -3140,11 +3140,14 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, + } + } + ++#define XSAVE_MXCSR_OFFSET 24 ++ + static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, + struct kvm_xsave *guest_xsave) + { + u64 xstate_bv = + *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)]; ++ u32 mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)]; + + if (cpu_has_xsave) { + /* +@@ -3152,11 +3155,13 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, + * CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility + * with old userspace. + */ +- if (xstate_bv & ~kvm_supported_xcr0()) ++ if (xstate_bv & ~kvm_supported_xcr0() || ++ mxcsr & ~mxcsr_feature_mask) + return -EINVAL; + load_xsave(vcpu, (u8 *)guest_xsave->region); + } else { +- if (xstate_bv & ~XFEATURE_MASK_FPSSE) ++ if (xstate_bv & ~XFEATURE_MASK_FPSSE || ++ mxcsr & ~mxcsr_feature_mask) + return -EINVAL; + memcpy(&vcpu->arch.guest_fpu.state.fxsave, + guest_xsave->region, sizeof(struct fxregs_state)); +@@ -4603,16 +4608,20 @@ emul_write: + + static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) + { +- /* TODO: String I/O for in kernel device */ +- int r; ++ int r = 0, i; + +- if (vcpu->arch.pio.in) +- r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port, +- vcpu->arch.pio.size, pd); +- else +- r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, +- vcpu->arch.pio.port, vcpu->arch.pio.size, +- pd); ++ for (i = 0; i < vcpu->arch.pio.count; i++) { ++ if (vcpu->arch.pio.in) ++ r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port, ++ vcpu->arch.pio.size, pd); ++ else ++ r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, ++ vcpu->arch.pio.port, vcpu->arch.pio.size, ++ pd); ++ if (r) ++ break; ++ pd += vcpu->arch.pio.size; ++ } + return r; + } + +@@ -4650,6 +4659,8 @@ static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt, + if (vcpu->arch.pio.count) + goto data_avail; + ++ memset(vcpu->arch.pio_data, 0, size * count); ++ + ret = emulator_pio_in_out(vcpu, size, port, val, count, true); + if (ret) { + data_avail: +diff --git a/drivers/char/lp.c b/drivers/char/lp.c +index c4094c4e22c1..34ef474a3923 100644 +--- a/drivers/char/lp.c ++++ b/drivers/char/lp.c +@@ -859,7 +859,11 @@ static int __init lp_setup (char *str) + } else if (!strcmp(str, "auto")) { + parport_nr[0] = LP_PARPORT_AUTO; + } else if (!strcmp(str, "none")) { +- parport_nr[parport_ptr++] = LP_PARPORT_NONE; ++ if (parport_ptr < LP_NO) ++ parport_nr[parport_ptr++] = LP_PARPORT_NONE; ++ else ++ printk(KERN_INFO "lp: too many ports, %s ignored.\n", ++ str); + } else if (!strcmp(str, "reset")) { + reset = 1; + } +diff --git a/drivers/char/mem.c b/drivers/char/mem.c +index e901463d4972..0975d23031ea 100644 +--- a/drivers/char/mem.c ++++ b/drivers/char/mem.c +@@ -343,6 +343,11 @@ static const struct vm_operations_struct mmap_mem_ops = { + static int mmap_mem(struct file *file, struct vm_area_struct *vma) + { + size_t size = vma->vm_end - vma->vm_start; ++ phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; ++ ++ /* It's illegal to wrap around the end of the physical address space. */ ++ if (offset + (phys_addr_t)size < offset) ++ return -EINVAL; + + if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) + return -EINVAL; +diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c +index 2b21398c3adc..35308dfff754 100644 +--- a/drivers/char/tpm/tpm_crb.c ++++ b/drivers/char/tpm/tpm_crb.c +@@ -118,8 +118,7 @@ static int crb_recv(struct tpm_chip *chip, u8 *buf, size_t count) + + memcpy_fromio(buf, priv->rsp, 6); + expected = be32_to_cpup((__be32 *) &buf[2]); +- +- if (expected > count) ++ if (expected > count || expected < 6) + return -EIO; + + memcpy_fromio(&buf[6], &priv->rsp[6], expected - 6); +diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +index 5b261adb4b69..3a25da4a6e60 100644 +--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +@@ -1126,23 +1126,10 @@ static u32 dce_v10_0_latency_watermark(struct dce10_wm_params *wm) + a.full = dfixed_const(available_bandwidth); + b.full = dfixed_const(wm->num_heads); + a.full = dfixed_div(a, b); ++ tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512); ++ tmp = min(dfixed_trunc(a), tmp); + +- b.full = dfixed_const(mc_latency + 512); +- c.full = dfixed_const(wm->disp_clk); +- b.full = dfixed_div(b, c); +- +- c.full = dfixed_const(dmif_size); +- b.full = dfixed_div(c, b); +- +- tmp = min(dfixed_trunc(a), dfixed_trunc(b)); +- +- b.full = dfixed_const(1000); +- c.full = dfixed_const(wm->disp_clk); +- b.full = dfixed_div(c, b); +- c.full = dfixed_const(wm->bytes_per_pixel); +- b.full = dfixed_mul(b, c); +- +- lb_fill_bw = min(tmp, dfixed_trunc(b)); ++ lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000); + + a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); + b.full = dfixed_const(1000); +@@ -1250,14 +1237,14 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, + { + struct drm_display_mode *mode = &amdgpu_crtc->base.mode; + struct dce10_wm_params wm_low, wm_high; +- u32 pixel_period; ++ u32 active_time; + u32 line_time = 0; + u32 latency_watermark_a = 0, latency_watermark_b = 0; + u32 tmp, wm_mask, lb_vblank_lead_lines = 0; + + if (amdgpu_crtc->base.enabled && num_heads && mode) { +- pixel_period = 1000000 / (u32)mode->clock; +- line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); ++ active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; ++ line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); + + /* watermark for high clocks */ + if (adev->pm.dpm_enabled) { +@@ -1272,7 +1259,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, + + wm_high.disp_clk = mode->clock; + wm_high.src_width = mode->crtc_hdisplay; +- wm_high.active_time = mode->crtc_hdisplay * pixel_period; ++ wm_high.active_time = active_time; + wm_high.blank_time = line_time - wm_high.active_time; + wm_high.interlaced = false; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) +@@ -1311,7 +1298,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, + + wm_low.disp_clk = mode->clock; + wm_low.src_width = mode->crtc_hdisplay; +- wm_low.active_time = mode->crtc_hdisplay * pixel_period; ++ wm_low.active_time = active_time; + wm_low.blank_time = line_time - wm_low.active_time; + wm_low.interlaced = false; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) +diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +index 267749a94c5a..d6d3cda77762 100644 +--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +@@ -1114,23 +1114,10 @@ static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm) + a.full = dfixed_const(available_bandwidth); + b.full = dfixed_const(wm->num_heads); + a.full = dfixed_div(a, b); ++ tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512); ++ tmp = min(dfixed_trunc(a), tmp); + +- b.full = dfixed_const(mc_latency + 512); +- c.full = dfixed_const(wm->disp_clk); +- b.full = dfixed_div(b, c); +- +- c.full = dfixed_const(dmif_size); +- b.full = dfixed_div(c, b); +- +- tmp = min(dfixed_trunc(a), dfixed_trunc(b)); +- +- b.full = dfixed_const(1000); +- c.full = dfixed_const(wm->disp_clk); +- b.full = dfixed_div(c, b); +- c.full = dfixed_const(wm->bytes_per_pixel); +- b.full = dfixed_mul(b, c); +- +- lb_fill_bw = min(tmp, dfixed_trunc(b)); ++ lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000); + + a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); + b.full = dfixed_const(1000); +@@ -1238,14 +1225,14 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, + { + struct drm_display_mode *mode = &amdgpu_crtc->base.mode; + struct dce10_wm_params wm_low, wm_high; +- u32 pixel_period; ++ u32 active_time; + u32 line_time = 0; + u32 latency_watermark_a = 0, latency_watermark_b = 0; + u32 tmp, wm_mask, lb_vblank_lead_lines = 0; + + if (amdgpu_crtc->base.enabled && num_heads && mode) { +- pixel_period = 1000000 / (u32)mode->clock; +- line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); ++ active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; ++ line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); + + /* watermark for high clocks */ + if (adev->pm.dpm_enabled) { +@@ -1260,7 +1247,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, + + wm_high.disp_clk = mode->clock; + wm_high.src_width = mode->crtc_hdisplay; +- wm_high.active_time = mode->crtc_hdisplay * pixel_period; ++ wm_high.active_time = active_time; + wm_high.blank_time = line_time - wm_high.active_time; + wm_high.interlaced = false; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) +@@ -1299,7 +1286,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, + + wm_low.disp_clk = mode->clock; + wm_low.src_width = mode->crtc_hdisplay; +- wm_low.active_time = mode->crtc_hdisplay * pixel_period; ++ wm_low.active_time = active_time; + wm_low.blank_time = line_time - wm_low.active_time; + wm_low.interlaced = false; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) +diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +index 9b4dcf76ce6c..d6e51d4b04f0 100644 +--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +@@ -1096,23 +1096,10 @@ static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm) + a.full = dfixed_const(available_bandwidth); + b.full = dfixed_const(wm->num_heads); + a.full = dfixed_div(a, b); ++ tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512); ++ tmp = min(dfixed_trunc(a), tmp); + +- b.full = dfixed_const(mc_latency + 512); +- c.full = dfixed_const(wm->disp_clk); +- b.full = dfixed_div(b, c); +- +- c.full = dfixed_const(dmif_size); +- b.full = dfixed_div(c, b); +- +- tmp = min(dfixed_trunc(a), dfixed_trunc(b)); +- +- b.full = dfixed_const(1000); +- c.full = dfixed_const(wm->disp_clk); +- b.full = dfixed_div(c, b); +- c.full = dfixed_const(wm->bytes_per_pixel); +- b.full = dfixed_mul(b, c); +- +- lb_fill_bw = min(tmp, dfixed_trunc(b)); ++ lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000); + + a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); + b.full = dfixed_const(1000); +@@ -1220,14 +1207,14 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, + { + struct drm_display_mode *mode = &amdgpu_crtc->base.mode; + struct dce8_wm_params wm_low, wm_high; +- u32 pixel_period; ++ u32 active_time; + u32 line_time = 0; + u32 latency_watermark_a = 0, latency_watermark_b = 0; + u32 tmp, wm_mask, lb_vblank_lead_lines = 0; + + if (amdgpu_crtc->base.enabled && num_heads && mode) { +- pixel_period = 1000000 / (u32)mode->clock; +- line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); ++ active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; ++ line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); + + /* watermark for high clocks */ + if (adev->pm.dpm_enabled) { +@@ -1242,7 +1229,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, + + wm_high.disp_clk = mode->clock; + wm_high.src_width = mode->crtc_hdisplay; +- wm_high.active_time = mode->crtc_hdisplay * pixel_period; ++ wm_high.active_time = active_time; + wm_high.blank_time = line_time - wm_high.active_time; + wm_high.interlaced = false; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) +@@ -1281,7 +1268,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, + + wm_low.disp_clk = mode->clock; + wm_low.src_width = mode->crtc_hdisplay; +- wm_low.active_time = mode->crtc_hdisplay * pixel_period; ++ wm_low.active_time = active_time; + wm_low.blank_time = line_time - wm_low.active_time; + wm_low.interlaced = false; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c +index cc1e16fd7e76..c0106fd9fae9 100644 +--- a/drivers/gpu/drm/drm_edid.c ++++ b/drivers/gpu/drm/drm_edid.c +@@ -75,6 +75,8 @@ + #define EDID_QUIRK_FORCE_12BPC (1 << 9) + /* Force 6bpc */ + #define EDID_QUIRK_FORCE_6BPC (1 << 10) ++/* Force 10bpc */ ++#define EDID_QUIRK_FORCE_10BPC (1 << 11) + + struct detailed_mode_closure { + struct drm_connector *connector; +@@ -117,6 +119,9 @@ static struct edid_quirk { + { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 | + EDID_QUIRK_DETAILED_IN_CM }, + ++ /* LGD panel of HP zBook 17 G2, eDP 10 bpc, but reports unknown bpc */ ++ { "LGD", 764, EDID_QUIRK_FORCE_10BPC }, ++ + /* LG Philips LCD LP154W01-A5 */ + { "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE }, + { "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE }, +@@ -3834,6 +3839,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) + if (quirks & EDID_QUIRK_FORCE_8BPC) + connector->display_info.bpc = 8; + ++ if (quirks & EDID_QUIRK_FORCE_10BPC) ++ connector->display_info.bpc = 10; ++ + if (quirks & EDID_QUIRK_FORCE_12BPC) + connector->display_info.bpc = 12; + +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c +index 949dc6101a58..7c0b58613747 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c +@@ -130,7 +130,7 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode) + poll = false; + } + +- if (list_empty(&therm->alarm.head) && poll) ++ if (poll) + nvkm_timer_alarm(tmr, 1000000000ULL, &therm->alarm); + spin_unlock_irqrestore(&therm->lock, flags); + +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c +index 91198d79393a..e2feccec25f5 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c +@@ -83,7 +83,7 @@ nvkm_fan_update(struct nvkm_fan *fan, bool immediate, int target) + spin_unlock_irqrestore(&fan->lock, flags); + + /* schedule next fan update, if not at target speed already */ +- if (list_empty(&fan->alarm.head) && target != duty) { ++ if (target != duty) { + u16 bump_period = fan->bios.bump_period; + u16 slow_down_period = fan->bios.slow_down_period; + u64 delay; +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c +index 59701b7a6597..ff9fbe7950e5 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c +@@ -53,7 +53,7 @@ nvkm_fantog_update(struct nvkm_fantog *fan, int percent) + duty = !nvkm_gpio_get(gpio, 0, DCB_GPIO_FAN, 0xff); + nvkm_gpio_set(gpio, 0, DCB_GPIO_FAN, 0xff, duty); + +- if (list_empty(&fan->alarm.head) && percent != (duty * 100)) { ++ if (percent != (duty * 100)) { + u64 next_change = (percent * fan->period_us) / 100; + if (!duty) + next_change = fan->period_us - next_change; +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c +index b9703c02d8ca..9a79e91fdfdc 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c +@@ -185,7 +185,7 @@ alarm_timer_callback(struct nvkm_alarm *alarm) + spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags); + + /* schedule the next poll in one second */ +- if (therm->func->temp_get(therm) >= 0 && list_empty(&alarm->head)) ++ if (therm->func->temp_get(therm) >= 0) + nvkm_timer_alarm(tmr, 1000000000ULL, alarm); + } + +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c +index d4dae1f12d62..79fcdb43e174 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c +@@ -36,23 +36,29 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr) + unsigned long flags; + LIST_HEAD(exec); + +- /* move any due alarms off the pending list */ ++ /* Process pending alarms. */ + spin_lock_irqsave(&tmr->lock, flags); + list_for_each_entry_safe(alarm, atemp, &tmr->alarms, head) { +- if (alarm->timestamp <= nvkm_timer_read(tmr)) +- list_move_tail(&alarm->head, &exec); ++ /* Have we hit the earliest alarm that hasn't gone off? */ ++ if (alarm->timestamp > nvkm_timer_read(tmr)) { ++ /* Schedule it. If we didn't race, we're done. */ ++ tmr->func->alarm_init(tmr, alarm->timestamp); ++ if (alarm->timestamp > nvkm_timer_read(tmr)) ++ break; ++ } ++ ++ /* Move to completed list. We'll drop the lock before ++ * executing the callback so it can reschedule itself. ++ */ ++ list_move_tail(&alarm->head, &exec); + } + +- /* reschedule interrupt for next alarm time */ +- if (!list_empty(&tmr->alarms)) { +- alarm = list_first_entry(&tmr->alarms, typeof(*alarm), head); +- tmr->func->alarm_init(tmr, alarm->timestamp); +- } else { ++ /* Shut down interrupt if no more pending alarms. */ ++ if (list_empty(&tmr->alarms)) + tmr->func->alarm_fini(tmr); +- } + spin_unlock_irqrestore(&tmr->lock, flags); + +- /* execute any pending alarm handlers */ ++ /* Execute completed callbacks. */ + list_for_each_entry_safe(alarm, atemp, &exec, head) { + list_del_init(&alarm->head); + alarm->func(alarm); +@@ -65,24 +71,37 @@ nvkm_timer_alarm(struct nvkm_timer *tmr, u32 nsec, struct nvkm_alarm *alarm) + struct nvkm_alarm *list; + unsigned long flags; + +- alarm->timestamp = nvkm_timer_read(tmr) + nsec; +- +- /* append new alarm to list, in soonest-alarm-first order */ ++ /* Remove alarm from pending list. ++ * ++ * This both protects against the corruption of the list, ++ * and implements alarm rescheduling/cancellation. ++ */ + spin_lock_irqsave(&tmr->lock, flags); +- if (!nsec) { +- if (!list_empty(&alarm->head)) +- list_del(&alarm->head); +- } else { ++ list_del_init(&alarm->head); ++ ++ if (nsec) { ++ /* Insert into pending list, ordered earliest to latest. */ ++ alarm->timestamp = nvkm_timer_read(tmr) + nsec; + list_for_each_entry(list, &tmr->alarms, head) { + if (list->timestamp > alarm->timestamp) + break; + } ++ + list_add_tail(&alarm->head, &list->head); ++ ++ /* Update HW if this is now the earliest alarm. */ ++ list = list_first_entry(&tmr->alarms, typeof(*list), head); ++ if (list == alarm) { ++ tmr->func->alarm_init(tmr, alarm->timestamp); ++ /* This shouldn't happen if callers aren't stupid. ++ * ++ * Worst case scenario is that it'll take roughly ++ * 4 seconds for the next alarm to trigger. ++ */ ++ WARN_ON(alarm->timestamp <= nvkm_timer_read(tmr)); ++ } + } + spin_unlock_irqrestore(&tmr->lock, flags); +- +- /* process pending alarms */ +- nvkm_timer_alarm_trigger(tmr); + } + + void +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c +index 7b9ce87f0617..7f48249f41de 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c +@@ -76,8 +76,8 @@ nv04_timer_intr(struct nvkm_timer *tmr) + u32 stat = nvkm_rd32(device, NV04_PTIMER_INTR_0); + + if (stat & 0x00000001) { +- nvkm_timer_alarm_trigger(tmr); + nvkm_wr32(device, NV04_PTIMER_INTR_0, 0x00000001); ++ nvkm_timer_alarm_trigger(tmr); + stat &= ~0x00000001; + } + +diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c +index e690dd11e99f..4b0f942b8914 100644 +--- a/drivers/iio/dac/ad7303.c ++++ b/drivers/iio/dac/ad7303.c +@@ -184,9 +184,9 @@ static const struct iio_chan_spec_ext_info ad7303_ext_info[] = { + .address = (chan), \ + .scan_type = { \ + .sign = 'u', \ +- .realbits = '8', \ +- .storagebits = '8', \ +- .shift = '0', \ ++ .realbits = 8, \ ++ .storagebits = 8, \ ++ .shift = 0, \ + }, \ + .ext_info = ad7303_ext_info, \ + } +diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c +index a0aedda7dfd7..bf0bd7e03aff 100644 +--- a/drivers/iio/proximity/as3935.c ++++ b/drivers/iio/proximity/as3935.c +@@ -50,7 +50,6 @@ + #define AS3935_TUNE_CAP 0x08 + #define AS3935_CALIBRATE 0x3D + +-#define AS3935_WRITE_DATA BIT(15) + #define AS3935_READ_DATA BIT(14) + #define AS3935_ADDRESS(x) ((x) << 8) + +@@ -105,7 +104,7 @@ static int as3935_write(struct as3935_state *st, + { + u8 *buf = st->buf; + +- buf[0] = (AS3935_WRITE_DATA | AS3935_ADDRESS(reg)) >> 8; ++ buf[0] = AS3935_ADDRESS(reg) >> 8; + buf[1] = val; + + return spi_write(st->spi, buf, 2); +diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c +index 34b1adad07aa..6a8024d9d742 100644 +--- a/drivers/infiniband/core/addr.c ++++ b/drivers/infiniband/core/addr.c +@@ -277,8 +277,8 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, + fl6.saddr = src_in->sin6_addr; + fl6.flowi6_oif = addr->bound_dev_if; + +- dst = ip6_route_output(addr->net, NULL, &fl6); +- if ((ret = dst->error)) ++ ret = ipv6_stub->ipv6_dst_lookup(addr->net, NULL, &dst, &fl6); ++ if (ret < 0) + goto put; + + if (ipv6_addr_any(&fl6.saddr)) { +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c +index 0628372f3591..b92b8a724efb 100644 +--- a/drivers/iommu/intel-iommu.c ++++ b/drivers/iommu/intel-iommu.c +@@ -2005,11 +2005,14 @@ static int domain_context_mapping_one(struct dmar_domain *domain, + if (context_copied(context)) { + u16 did_old = context_domain_id(context); + +- if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) ++ if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) { + iommu->flush.flush_context(iommu, did_old, + (((u16)bus) << 8) | devfn, + DMA_CCMD_MASK_NOBIT, + DMA_CCMD_DEVICE_INVL); ++ iommu->flush.flush_iotlb(iommu, did_old, 0, 0, ++ DMA_TLB_DSI_FLUSH); ++ } + } + + pgd = domain->pgd; +diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig +index 7913fdcfc849..108249f52fa9 100644 +--- a/drivers/md/Kconfig ++++ b/drivers/md/Kconfig +@@ -357,6 +357,7 @@ config DM_LOG_USERSPACE + config DM_RAID + tristate "RAID 1/4/5/6/10 target" + depends on BLK_DEV_DM ++ select MD_RAID0 + select MD_RAID1 + select MD_RAID10 + select MD_RAID456 +diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c +index 2dd33085b331..cdceefd0e57d 100644 +--- a/drivers/md/dm-bufio.c ++++ b/drivers/md/dm-bufio.c +@@ -222,7 +222,7 @@ static DEFINE_SPINLOCK(param_spinlock); + * Buffers are freed after this timeout + */ + static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS; +-static unsigned dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES; ++static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES; + + static unsigned long dm_bufio_peak_allocated; + static unsigned long dm_bufio_allocated_kmem_cache; +@@ -914,10 +914,11 @@ static void __get_memory_limit(struct dm_bufio_client *c, + { + unsigned long buffers; + +- if (ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch) { +- mutex_lock(&dm_bufio_clients_lock); +- __cache_size_refresh(); +- mutex_unlock(&dm_bufio_clients_lock); ++ if (unlikely(ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) { ++ if (mutex_trylock(&dm_bufio_clients_lock)) { ++ __cache_size_refresh(); ++ mutex_unlock(&dm_bufio_clients_lock); ++ } + } + + buffers = dm_bufio_cache_size_per_client >> +@@ -1513,10 +1514,10 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp) + return true; + } + +-static unsigned get_retain_buffers(struct dm_bufio_client *c) ++static unsigned long get_retain_buffers(struct dm_bufio_client *c) + { +- unsigned retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes); +- return retain_bytes / c->block_size; ++ unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes); ++ return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT); + } + + static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, +@@ -1526,7 +1527,7 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, + struct dm_buffer *b, *tmp; + unsigned long freed = 0; + unsigned long count = nr_to_scan; +- unsigned retain_target = get_retain_buffers(c); ++ unsigned long retain_target = get_retain_buffers(c); + + for (l = 0; l < LIST_SIZE; l++) { + list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { +@@ -1752,11 +1753,19 @@ static bool older_than(struct dm_buffer *b, unsigned long age_hz) + static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) + { + struct dm_buffer *b, *tmp; +- unsigned retain_target = get_retain_buffers(c); +- unsigned count; ++ unsigned long retain_target = get_retain_buffers(c); ++ unsigned long count; ++ LIST_HEAD(write_list); + + dm_bufio_lock(c); + ++ __check_watermark(c, &write_list); ++ if (unlikely(!list_empty(&write_list))) { ++ dm_bufio_unlock(c); ++ __flush_write_list(&write_list); ++ dm_bufio_lock(c); ++ } ++ + count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY]; + list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) { + if (count <= retain_target) +@@ -1781,6 +1790,8 @@ static void cleanup_old_buffers(void) + + mutex_lock(&dm_bufio_clients_lock); + ++ __cache_size_refresh(); ++ + list_for_each_entry(c, &dm_bufio_all_clients, client_list) + __evict_old_buffers(c, max_age_hz); + +@@ -1904,7 +1915,7 @@ MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache"); + module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR); + MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds"); + +-module_param_named(retain_bytes, dm_bufio_retain_bytes, uint, S_IRUGO | S_IWUSR); ++module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR); + MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory"); + + module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR); +diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c +index 3970cda10080..d3c55d7754af 100644 +--- a/drivers/md/dm-cache-metadata.c ++++ b/drivers/md/dm-cache-metadata.c +@@ -1326,17 +1326,19 @@ void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd, + + int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown) + { +- int r; ++ int r = -EINVAL; + flags_mutator mutator = (clean_shutdown ? set_clean_shutdown : + clear_clean_shutdown); + + WRITE_LOCK(cmd); ++ if (cmd->fail_io) ++ goto out; ++ + r = __commit_transaction(cmd, mutator); + if (r) + goto out; + + r = __begin_transaction(cmd); +- + out: + WRITE_UNLOCK(cmd); + return r; +@@ -1348,7 +1350,8 @@ int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd, + int r = -EINVAL; + + READ_LOCK(cmd); +- r = dm_sm_get_nr_free(cmd->metadata_sm, result); ++ if (!cmd->fail_io) ++ r = dm_sm_get_nr_free(cmd->metadata_sm, result); + READ_UNLOCK(cmd); + + return r; +@@ -1360,7 +1363,8 @@ int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd, + int r = -EINVAL; + + READ_LOCK(cmd); +- r = dm_sm_get_nr_blocks(cmd->metadata_sm, result); ++ if (!cmd->fail_io) ++ r = dm_sm_get_nr_blocks(cmd->metadata_sm, result); + READ_UNLOCK(cmd); + + return r; +diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c +index 911ada643364..3b67afda430b 100644 +--- a/drivers/md/dm-thin-metadata.c ++++ b/drivers/md/dm-thin-metadata.c +@@ -485,11 +485,11 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd) + if (r < 0) + return r; + +- r = save_sm_roots(pmd); ++ r = dm_tm_pre_commit(pmd->tm); + if (r < 0) + return r; + +- r = dm_tm_pre_commit(pmd->tm); ++ r = save_sm_roots(pmd); + if (r < 0) + return r; + +diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c +index b1ced58eb5e1..a1a68209bd36 100644 +--- a/drivers/md/persistent-data/dm-btree.c ++++ b/drivers/md/persistent-data/dm-btree.c +@@ -887,8 +887,12 @@ static int find_key(struct ro_spine *s, dm_block_t block, bool find_highest, + else + *result_key = le64_to_cpu(ro_node(s)->keys[0]); + +- if (next_block || flags & INTERNAL_NODE) +- block = value64(ro_node(s), i); ++ if (next_block || flags & INTERNAL_NODE) { ++ if (find_highest) ++ block = value64(ro_node(s), i); ++ else ++ block = value64(ro_node(s), 0); ++ } + + } while (flags & INTERNAL_NODE); + +diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c +index ebb280a14325..32adf6b4a9c7 100644 +--- a/drivers/md/persistent-data/dm-space-map-disk.c ++++ b/drivers/md/persistent-data/dm-space-map-disk.c +@@ -142,10 +142,23 @@ static int sm_disk_inc_block(struct dm_space_map *sm, dm_block_t b) + + static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b) + { ++ int r; ++ uint32_t old_count; + enum allocation_event ev; + struct sm_disk *smd = container_of(sm, struct sm_disk, sm); + +- return sm_ll_dec(&smd->ll, b, &ev); ++ r = sm_ll_dec(&smd->ll, b, &ev); ++ if (!r && (ev == SM_FREE)) { ++ /* ++ * It's only free if it's also free in the last ++ * transaction. ++ */ ++ r = sm_ll_lookup(&smd->old_ll, b, &old_count); ++ if (!r && !old_count) ++ smd->nr_allocated_this_transaction--; ++ } ++ ++ return r; + } + + static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b) +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c +index 7af976934441..4384b46cee1a 100644 +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -2232,6 +2232,10 @@ static int resize_stripes(struct r5conf *conf, int newsize) + err = -ENOMEM; + + mutex_unlock(&conf->cache_size_mutex); ++ ++ conf->slab_cache = sc; ++ conf->active_name = 1-conf->active_name; ++ + /* Step 4, return new stripes to service */ + while(!list_empty(&newstripes)) { + nsh = list_entry(newstripes.next, struct stripe_head, lru); +@@ -2249,8 +2253,6 @@ static int resize_stripes(struct r5conf *conf, int newsize) + } + /* critical section pass, GFP_NOIO no longer needed */ + +- conf->slab_cache = sc; +- conf->active_name = 1-conf->active_name; + if (!err) + conf->pool_size = newsize; + return err; +diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c +index fdffb2f0ded8..107853b0fddd 100644 +--- a/drivers/media/dvb-frontends/cxd2841er.c ++++ b/drivers/media/dvb-frontends/cxd2841er.c +@@ -2678,7 +2678,9 @@ static struct dvb_frontend_ops cxd2841er_dvbt_t2_ops = { + FE_CAN_MUTE_TS | + FE_CAN_2G_MODULATION, + .frequency_min = 42000000, +- .frequency_max = 1002000000 ++ .frequency_max = 1002000000, ++ .symbol_rate_min = 870000, ++ .symbol_rate_max = 11700000 + }, + .init = cxd2841er_init_tc, + .sleep = cxd2841er_sleep_tc, +diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c +index c8946f98ced4..7727789dbda1 100644 +--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c ++++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c +@@ -173,6 +173,7 @@ static void s5p_mfc_watchdog_worker(struct work_struct *work) + } + s5p_mfc_clock_on(); + ret = s5p_mfc_init_hw(dev); ++ s5p_mfc_clock_off(); + if (ret) + mfc_err("Failed to reinit FW\n"); + } +diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c +index 2cdb740cde48..f838d9c7ed12 100644 +--- a/drivers/media/rc/mceusb.c ++++ b/drivers/media/rc/mceusb.c +@@ -1321,8 +1321,8 @@ static int mceusb_dev_probe(struct usb_interface *intf, + } + } + } +- if (ep_in == NULL) { +- dev_dbg(&intf->dev, "inbound and/or endpoint not found"); ++ if (!ep_in || !ep_out) { ++ dev_dbg(&intf->dev, "required endpoints not found\n"); + return -ENODEV; + } + +diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c +index 317ef63ee789..8d96a22647b3 100644 +--- a/drivers/media/tuners/tuner-xc2028.c ++++ b/drivers/media/tuners/tuner-xc2028.c +@@ -281,6 +281,14 @@ static void free_firmware(struct xc2028_data *priv) + int i; + tuner_dbg("%s called\n", __func__); + ++ /* free allocated f/w string */ ++ if (priv->fname != firmware_name) ++ kfree(priv->fname); ++ priv->fname = NULL; ++ ++ priv->state = XC2028_NO_FIRMWARE; ++ memset(&priv->cur_fw, 0, sizeof(priv->cur_fw)); ++ + if (!priv->firm) + return; + +@@ -291,9 +299,6 @@ static void free_firmware(struct xc2028_data *priv) + + priv->firm = NULL; + priv->firm_size = 0; +- priv->state = XC2028_NO_FIRMWARE; +- +- memset(&priv->cur_fw, 0, sizeof(priv->cur_fw)); + } + + static int load_all_firmwares(struct dvb_frontend *fe, +@@ -884,9 +889,8 @@ read_not_reliable: + return 0; + + fail: +- priv->state = XC2028_NO_FIRMWARE; ++ free_firmware(priv); + +- memset(&priv->cur_fw, 0, sizeof(priv->cur_fw)); + if (retry_count < 8) { + msleep(50); + retry_count++; +@@ -1332,11 +1336,8 @@ static int xc2028_dvb_release(struct dvb_frontend *fe) + mutex_lock(&xc2028_list_mutex); + + /* only perform final cleanup if this is the last instance */ +- if (hybrid_tuner_report_instance_count(priv) == 1) { ++ if (hybrid_tuner_report_instance_count(priv) == 1) + free_firmware(priv); +- kfree(priv->ctrl.fname); +- priv->ctrl.fname = NULL; +- } + + if (priv) + hybrid_tuner_release_state(priv); +@@ -1399,19 +1400,8 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg) + + /* + * Copy the config data. +- * For the firmware name, keep a local copy of the string, +- * in order to avoid troubles during device release. + */ +- kfree(priv->ctrl.fname); +- priv->ctrl.fname = NULL; + memcpy(&priv->ctrl, p, sizeof(priv->ctrl)); +- if (p->fname) { +- priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL); +- if (priv->ctrl.fname == NULL) { +- rc = -ENOMEM; +- goto unlock; +- } +- } + + /* + * If firmware name changed, frees firmware. As free_firmware will +@@ -1426,10 +1416,15 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg) + + if (priv->state == XC2028_NO_FIRMWARE) { + if (!firmware_name[0]) +- priv->fname = priv->ctrl.fname; ++ priv->fname = kstrdup(p->fname, GFP_KERNEL); + else + priv->fname = firmware_name; + ++ if (!priv->fname) { ++ rc = -ENOMEM; ++ goto unlock; ++ } ++ + rc = request_firmware_nowait(THIS_MODULE, 1, + priv->fname, + priv->i2c_props.adap->dev.parent, +diff --git a/drivers/media/usb/cx231xx/cx231xx-audio.c b/drivers/media/usb/cx231xx/cx231xx-audio.c +index de4ae5eb4830..10d8a08e36e6 100644 +--- a/drivers/media/usb/cx231xx/cx231xx-audio.c ++++ b/drivers/media/usb/cx231xx/cx231xx-audio.c +@@ -671,10 +671,8 @@ static int cx231xx_audio_init(struct cx231xx *dev) + + spin_lock_init(&adev->slock); + err = snd_pcm_new(card, "Cx231xx Audio", 0, 0, 1, &pcm); +- if (err < 0) { +- snd_card_free(card); +- return err; +- } ++ if (err < 0) ++ goto err_free_card; + + snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, + &snd_cx231xx_pcm_capture); +@@ -688,10 +686,9 @@ static int cx231xx_audio_init(struct cx231xx *dev) + INIT_WORK(&dev->wq_trigger, audio_trigger); + + err = snd_card_register(card); +- if (err < 0) { +- snd_card_free(card); +- return err; +- } ++ if (err < 0) ++ goto err_free_card; ++ + adev->sndcard = card; + adev->udev = dev->udev; + +@@ -701,6 +698,11 @@ static int cx231xx_audio_init(struct cx231xx *dev) + hs_config_info[0].interface_info. + audio_index + 1]; + ++ if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1) { ++ err = -ENODEV; ++ goto err_free_card; ++ } ++ + adev->end_point_addr = + uif->altsetting[0].endpoint[isoc_pipe].desc. + bEndpointAddress; +@@ -710,13 +712,20 @@ static int cx231xx_audio_init(struct cx231xx *dev) + "audio EndPoint Addr 0x%x, Alternate settings: %i\n", + adev->end_point_addr, adev->num_alt); + adev->alt_max_pkt_size = kmalloc(32 * adev->num_alt, GFP_KERNEL); +- +- if (adev->alt_max_pkt_size == NULL) +- return -ENOMEM; ++ if (!adev->alt_max_pkt_size) { ++ err = -ENOMEM; ++ goto err_free_card; ++ } + + for (i = 0; i < adev->num_alt; i++) { +- u16 tmp = +- le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc. ++ u16 tmp; ++ ++ if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1) { ++ err = -ENODEV; ++ goto err_free_pkt_size; ++ } ++ ++ tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc. + wMaxPacketSize); + adev->alt_max_pkt_size[i] = + (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1); +@@ -726,6 +735,13 @@ static int cx231xx_audio_init(struct cx231xx *dev) + } + + return 0; ++ ++err_free_pkt_size: ++ kfree(adev->alt_max_pkt_size); ++err_free_card: ++ snd_card_free(card); ++ ++ return err; + } + + static int cx231xx_audio_fini(struct cx231xx *dev) +diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c +index 8389c162bc89..2c5f76d588ac 100644 +--- a/drivers/media/usb/cx231xx/cx231xx-cards.c ++++ b/drivers/media/usb/cx231xx/cx231xx-cards.c +@@ -1447,6 +1447,9 @@ static int cx231xx_init_v4l2(struct cx231xx *dev, + + uif = udev->actconfig->interface[idx]; + ++ if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1) ++ return -ENODEV; ++ + dev->video_mode.end_point_addr = uif->altsetting[0].endpoint[isoc_pipe].desc.bEndpointAddress; + dev->video_mode.num_alt = uif->num_altsetting; + +@@ -1460,7 +1463,12 @@ static int cx231xx_init_v4l2(struct cx231xx *dev, + return -ENOMEM; + + for (i = 0; i < dev->video_mode.num_alt; i++) { +- u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize); ++ u16 tmp; ++ ++ if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1) ++ return -ENODEV; ++ ++ tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize); + dev->video_mode.alt_max_pkt_size[i] = (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1); + dev_dbg(dev->dev, + "Alternate setting %i, max size= %i\n", i, +@@ -1477,6 +1485,9 @@ static int cx231xx_init_v4l2(struct cx231xx *dev, + } + uif = udev->actconfig->interface[idx]; + ++ if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1) ++ return -ENODEV; ++ + dev->vbi_mode.end_point_addr = + uif->altsetting[0].endpoint[isoc_pipe].desc. + bEndpointAddress; +@@ -1493,8 +1504,12 @@ static int cx231xx_init_v4l2(struct cx231xx *dev, + return -ENOMEM; + + for (i = 0; i < dev->vbi_mode.num_alt; i++) { +- u16 tmp = +- le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe]. ++ u16 tmp; ++ ++ if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1) ++ return -ENODEV; ++ ++ tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe]. + desc.wMaxPacketSize); + dev->vbi_mode.alt_max_pkt_size[i] = + (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1); +@@ -1514,6 +1529,9 @@ static int cx231xx_init_v4l2(struct cx231xx *dev, + } + uif = udev->actconfig->interface[idx]; + ++ if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1) ++ return -ENODEV; ++ + dev->sliced_cc_mode.end_point_addr = + uif->altsetting[0].endpoint[isoc_pipe].desc. + bEndpointAddress; +@@ -1528,7 +1546,12 @@ static int cx231xx_init_v4l2(struct cx231xx *dev, + return -ENOMEM; + + for (i = 0; i < dev->sliced_cc_mode.num_alt; i++) { +- u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe]. ++ u16 tmp; ++ ++ if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1) ++ return -ENODEV; ++ ++ tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe]. + desc.wMaxPacketSize); + dev->sliced_cc_mode.alt_max_pkt_size[i] = + (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1); +@@ -1693,6 +1716,11 @@ static int cx231xx_usb_probe(struct usb_interface *interface, + } + uif = udev->actconfig->interface[idx]; + ++ if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1) { ++ retval = -ENODEV; ++ goto err_video_alt; ++ } ++ + dev->ts1_mode.end_point_addr = + uif->altsetting[0].endpoint[isoc_pipe]. + desc.bEndpointAddress; +@@ -1710,7 +1738,14 @@ static int cx231xx_usb_probe(struct usb_interface *interface, + } + + for (i = 0; i < dev->ts1_mode.num_alt; i++) { +- u16 tmp = le16_to_cpu(uif->altsetting[i]. ++ u16 tmp; ++ ++ if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1) { ++ retval = -ENODEV; ++ goto err_video_alt; ++ } ++ ++ tmp = le16_to_cpu(uif->altsetting[i]. + endpoint[isoc_pipe].desc. + wMaxPacketSize); + dev->ts1_mode.alt_max_pkt_size[i] = +diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c +index ab58f0b9da5c..d1b4b729e814 100644 +--- a/drivers/media/usb/dvb-usb/dib0700_core.c ++++ b/drivers/media/usb/dvb-usb/dib0700_core.c +@@ -783,6 +783,9 @@ int dib0700_rc_setup(struct dvb_usb_device *d, struct usb_interface *intf) + + /* Starting in firmware 1.20, the RC info is provided on a bulk pipe */ + ++ if (intf->altsetting[0].desc.bNumEndpoints < rc_ep + 1) ++ return -ENODEV; ++ + purb = usb_alloc_urb(0, GFP_KERNEL); + if (purb == NULL) { + err("rc usb alloc urb failed"); +diff --git a/drivers/media/usb/dvb-usb/ttusb2.c b/drivers/media/usb/dvb-usb/ttusb2.c +index f10717311e05..dd93c2c8fea9 100644 +--- a/drivers/media/usb/dvb-usb/ttusb2.c ++++ b/drivers/media/usb/dvb-usb/ttusb2.c +@@ -78,6 +78,9 @@ static int ttusb2_msg(struct dvb_usb_device *d, u8 cmd, + u8 *s, *r = NULL; + int ret = 0; + ++ if (4 + rlen > 64) ++ return -EIO; ++ + s = kzalloc(wlen+4, GFP_KERNEL); + if (!s) + return -ENOMEM; +@@ -381,6 +384,22 @@ static int ttusb2_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num + write_read = i+1 < num && (msg[i+1].flags & I2C_M_RD); + read = msg[i].flags & I2C_M_RD; + ++ if (3 + msg[i].len > sizeof(obuf)) { ++ err("i2c wr len=%d too high", msg[i].len); ++ break; ++ } ++ if (write_read) { ++ if (3 + msg[i+1].len > sizeof(ibuf)) { ++ err("i2c rd len=%d too high", msg[i+1].len); ++ break; ++ } ++ } else if (read) { ++ if (3 + msg[i].len > sizeof(ibuf)) { ++ err("i2c rd len=%d too high", msg[i].len); ++ break; ++ } ++ } ++ + obuf[0] = (msg[i].addr << 1) | (write_read | read); + if (read) + obuf[1] = 0; +diff --git a/drivers/media/usb/gspca/konica.c b/drivers/media/usb/gspca/konica.c +index 0712b1bc90b4..0f6d57fbf91b 100644 +--- a/drivers/media/usb/gspca/konica.c ++++ b/drivers/media/usb/gspca/konica.c +@@ -188,6 +188,9 @@ static int sd_start(struct gspca_dev *gspca_dev) + return -EIO; + } + ++ if (alt->desc.bNumEndpoints < 2) ++ return -ENODEV; ++ + packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); + + n = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; +diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c +index d1dc1a198e3e..91d709efef7a 100644 +--- a/drivers/media/usb/usbvision/usbvision-video.c ++++ b/drivers/media/usb/usbvision/usbvision-video.c +@@ -1523,7 +1523,14 @@ static int usbvision_probe(struct usb_interface *intf, + } + + for (i = 0; i < usbvision->num_alt; i++) { +- u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[1].desc. ++ u16 tmp; ++ ++ if (uif->altsetting[i].desc.bNumEndpoints < 2) { ++ ret = -ENODEV; ++ goto err_pkt; ++ } ++ ++ tmp = le16_to_cpu(uif->altsetting[i].endpoint[1].desc. + wMaxPacketSize); + usbvision->alt_max_pkt_size[i] = + (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1); +diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c +index 7433ba5c4bad..fd6a3b36208e 100644 +--- a/drivers/media/usb/zr364xx/zr364xx.c ++++ b/drivers/media/usb/zr364xx/zr364xx.c +@@ -604,6 +604,14 @@ static int zr364xx_read_video_callback(struct zr364xx_camera *cam, + ptr = pdest = frm->lpvbits; + + if (frm->ulState == ZR364XX_READ_IDLE) { ++ if (purb->actual_length < 128) { ++ /* header incomplete */ ++ dev_info(&cam->udev->dev, ++ "%s: buffer (%d bytes) too small to hold jpeg header. Discarding.\n", ++ __func__, purb->actual_length); ++ return -EINVAL; ++ } ++ + frm->ulState = ZR364XX_READ_FRAME; + frm->cur_size = 0; + +diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c +index 25f21968fa5c..de2ea9f2f966 100644 +--- a/drivers/net/irda/irda-usb.c ++++ b/drivers/net/irda/irda-usb.c +@@ -1077,7 +1077,7 @@ static int stir421x_patch_device(struct irda_usb_cb *self) + * are "42101001.sb" or "42101002.sb" + */ + sprintf(stir421x_fw_name, "4210%4X.sb", +- self->usbdev->descriptor.bcdDevice); ++ le16_to_cpu(self->usbdev->descriptor.bcdDevice)); + ret = request_firmware(&fw, stir421x_fw_name, &self->usbdev->dev); + if (ret < 0) + return ret; +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index 09052f9e324f..c6f5d9a6bec6 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -754,6 +754,7 @@ static const struct usb_device_id products[] = { + {QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ + {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ + {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ ++ {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */ + + /* 4. Gobi 1000 devices */ + {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ +diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c +index 165dd202c365..c92564b3ec85 100644 +--- a/drivers/net/wireless/ath/ath9k/hif_usb.c ++++ b/drivers/net/wireless/ath/ath9k/hif_usb.c +@@ -37,6 +37,7 @@ static struct usb_device_id ath9k_hif_usb_ids[] = { + { USB_DEVICE(0x0cf3, 0xb002) }, /* Ubiquiti WifiStation */ + { USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */ + { USB_DEVICE(0x0471, 0x209e) }, /* Philips (or NXP) PTA01 */ ++ { USB_DEVICE(0x1eda, 0x2315) }, /* AirTies */ + + { USB_DEVICE(0x0cf3, 0x7015), + .driver_info = AR9287_USB }, /* Atheros */ +@@ -1216,6 +1217,9 @@ static int send_eject_command(struct usb_interface *interface) + u8 bulk_out_ep; + int r; + ++ if (iface_desc->desc.bNumEndpoints < 2) ++ return -ENODEV; ++ + /* Find bulk out endpoint */ + for (r = 1; r >= 0; r--) { + endpoint = &iface_desc->endpoint[r].desc; +diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c +index 21192b6f9c64..268e50ba88a5 100644 +--- a/drivers/net/wireless/mwifiex/pcie.c ++++ b/drivers/net/wireless/mwifiex/pcie.c +@@ -947,6 +947,7 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter) + if (card && card->cmd_buf) { + mwifiex_unmap_pci_memory(adapter, card->cmd_buf, + PCI_DMA_TODEVICE); ++ dev_kfree_skb_any(card->cmd_buf); + } + return 0; + } +@@ -1513,6 +1514,11 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) + return -1; + + card->cmd_buf = skb; ++ /* ++ * Need to keep a reference, since core driver might free up this ++ * buffer before we've unmapped it. ++ */ ++ skb_get(skb); + + /* To send a command, the driver will: + 1. Write the 64bit physical address of the data buffer to +@@ -1610,6 +1616,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) + if (card->cmd_buf) { + mwifiex_unmap_pci_memory(adapter, card->cmd_buf, + PCI_DMA_TODEVICE); ++ dev_kfree_skb_any(card->cmd_buf); + card->cmd_buf = NULL; + } + +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c +index 9b4d8a637915..4b354918e183 100644 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c +@@ -359,6 +359,107 @@ bool rtl8821ae_phy_rf_config(struct ieee80211_hw *hw) + return rtl8821ae_phy_rf6052_config(hw); + } + ++static void _rtl8812ae_phy_set_rfe_reg_24g(struct ieee80211_hw *hw) ++{ ++ struct rtl_priv *rtlpriv = rtl_priv(hw); ++ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); ++ u8 tmp; ++ ++ switch (rtlhal->rfe_type) { ++ case 3: ++ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x54337770); ++ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x54337770); ++ rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010); ++ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010); ++ rtl_set_bbreg(hw, 0x900, 0x00000303, 0x1); ++ break; ++ case 4: ++ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777); ++ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777); ++ rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x001); ++ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x001); ++ break; ++ case 5: ++ rtl_write_byte(rtlpriv, RA_RFE_PINMUX + 2, 0x77); ++ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777); ++ tmp = rtl_read_byte(rtlpriv, RA_RFE_INV + 3); ++ rtl_write_byte(rtlpriv, RA_RFE_INV + 3, tmp & ~0x1); ++ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000); ++ break; ++ case 1: ++ if (rtlpriv->btcoexist.bt_coexistence) { ++ rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xffffff, 0x777777); ++ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, ++ 0x77777777); ++ rtl_set_bbreg(hw, RA_RFE_INV, 0x33f00000, 0x000); ++ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000); ++ break; ++ } ++ case 0: ++ case 2: ++ default: ++ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777); ++ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777); ++ rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x000); ++ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000); ++ break; ++ } ++} ++ ++static void _rtl8812ae_phy_set_rfe_reg_5g(struct ieee80211_hw *hw) ++{ ++ struct rtl_priv *rtlpriv = rtl_priv(hw); ++ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); ++ u8 tmp; ++ ++ switch (rtlhal->rfe_type) { ++ case 0: ++ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337717); ++ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337717); ++ rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010); ++ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010); ++ break; ++ case 1: ++ if (rtlpriv->btcoexist.bt_coexistence) { ++ rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xffffff, 0x337717); ++ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, ++ 0x77337717); ++ rtl_set_bbreg(hw, RA_RFE_INV, 0x33f00000, 0x000); ++ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000); ++ } else { ++ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, ++ 0x77337717); ++ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, ++ 0x77337717); ++ rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x000); ++ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000); ++ } ++ break; ++ case 3: ++ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x54337717); ++ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x54337717); ++ rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010); ++ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010); ++ rtl_set_bbreg(hw, 0x900, 0x00000303, 0x1); ++ break; ++ case 5: ++ rtl_write_byte(rtlpriv, RA_RFE_PINMUX + 2, 0x33); ++ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777); ++ tmp = rtl_read_byte(rtlpriv, RA_RFE_INV + 3); ++ rtl_write_byte(rtlpriv, RA_RFE_INV + 3, tmp | 0x1); ++ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010); ++ break; ++ case 2: ++ case 4: ++ default: ++ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337777); ++ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777); ++ rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010); ++ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010); ++ break; ++ } ++} ++ + u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band, + u8 rf_path) + { +@@ -553,14 +654,9 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band) + /* 0x82C[1:0] = 2b'00 */ + rtl_set_bbreg(hw, 0x82c, 0x3, 0); + } +- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { +- rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, +- 0x77777777); +- rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, +- 0x77777777); +- rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x000); +- rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x000); +- } ++ ++ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) ++ _rtl8812ae_phy_set_rfe_reg_24g(hw); + + rtl_set_bbreg(hw, RTXPATH, 0xf0, 0x1); + rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0x1); +@@ -615,14 +711,8 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band) + /* 0x82C[1:0] = 2'b00 */ + rtl_set_bbreg(hw, 0x82c, 0x3, 1); + +- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { +- rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, +- 0x77337777); +- rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, +- 0x77337777); +- rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x010); +- rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x010); +- } ++ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) ++ _rtl8812ae_phy_set_rfe_reg_5g(hw); + + rtl_set_bbreg(hw, RTXPATH, 0xf0, 0); + rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0xf); +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h +index 1d6110f9c1fb..ed69dbe178ff 100644 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h +@@ -2424,6 +2424,7 @@ + #define BMASKH4BITS 0xf0000000 + #define BMASKOFDM_D 0xffc00000 + #define BMASKCCK 0x3f3f3f3f ++#define BMASKRFEINV 0x3ff00000 + + #define BRFREGOFFSETMASK 0xfffff + +diff --git a/drivers/of/address.c b/drivers/of/address.c +index 9582c5703b3c..4fe5fe21cd49 100644 +--- a/drivers/of/address.c ++++ b/drivers/of/address.c +@@ -260,7 +260,7 @@ struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser, + if (!parser->range || parser->range + parser->np > parser->end) + return NULL; + +- range->pci_space = parser->range[0]; ++ range->pci_space = be32_to_cpup(parser->range); + range->flags = of_bus_pci_get_flags(parser->range); + range->pci_addr = of_read_number(parser->range + 1, ns); + range->cpu_addr = of_translate_address(parser->node, +diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c +index d7508704c992..f8b2b5987ea9 100644 +--- a/drivers/pci/pci-sysfs.c ++++ b/drivers/pci/pci-sysfs.c +@@ -973,15 +973,19 @@ void pci_remove_legacy_files(struct pci_bus *b) + int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma, + enum pci_mmap_api mmap_api) + { +- unsigned long nr, start, size, pci_start; ++ unsigned long nr, start, size; ++ resource_size_t pci_start = 0, pci_end; + + if (pci_resource_len(pdev, resno) == 0) + return 0; + nr = vma_pages(vma); + start = vma->vm_pgoff; + size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1; +- pci_start = (mmap_api == PCI_MMAP_PROCFS) ? +- pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0; ++ if (mmap_api == PCI_MMAP_PROCFS) { ++ pci_resource_to_user(pdev, resno, &pdev->resource[resno], ++ &pci_start, &pci_end); ++ pci_start >>= PAGE_SHIFT; ++ } + if (start >= pci_start && start < pci_start + size && + start + nr <= pci_start + size) + return 1; +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c +index 0e53488f8ec1..1a14ca8965e6 100644 +--- a/drivers/pci/pci.c ++++ b/drivers/pci/pci.c +@@ -1732,8 +1732,8 @@ static void pci_pme_list_scan(struct work_struct *work) + } + } + if (!list_empty(&pci_pme_list)) +- schedule_delayed_work(&pci_pme_work, +- msecs_to_jiffies(PME_TIMEOUT)); ++ queue_delayed_work(system_freezable_wq, &pci_pme_work, ++ msecs_to_jiffies(PME_TIMEOUT)); + mutex_unlock(&pci_pme_list_mutex); + } + +@@ -1798,8 +1798,9 @@ void pci_pme_active(struct pci_dev *dev, bool enable) + mutex_lock(&pci_pme_list_mutex); + list_add(&pme_dev->list, &pci_pme_list); + if (list_is_singular(&pci_pme_list)) +- schedule_delayed_work(&pci_pme_work, +- msecs_to_jiffies(PME_TIMEOUT)); ++ queue_delayed_work(system_freezable_wq, ++ &pci_pme_work, ++ msecs_to_jiffies(PME_TIMEOUT)); + mutex_unlock(&pci_pme_list_mutex); + } else { + mutex_lock(&pci_pme_list_mutex); +diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c +index d2c3d7cc35f5..5ca6d2130593 100644 +--- a/drivers/regulator/tps65023-regulator.c ++++ b/drivers/regulator/tps65023-regulator.c +@@ -311,8 +311,7 @@ static int tps_65023_probe(struct i2c_client *client, + + /* Enable setting output voltage by I2C */ + regmap_update_bits(tps->regmap, TPS65023_REG_CON_CTRL2, +- TPS65023_REG_CTRL2_CORE_ADJ, +- TPS65023_REG_CTRL2_CORE_ADJ); ++ TPS65023_REG_CTRL2_CORE_ADJ, 0); + + return 0; + } +diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c +index e9c4f973bba9..79bf13f5c0d1 100644 +--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c ++++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c +@@ -97,8 +97,9 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val) + + switch (variable) { + case HW_VAR_BSSID: +- rtl92e_writel(dev, BSSIDR, ((u32 *)(val))[0]); +- rtl92e_writew(dev, BSSIDR+2, ((u16 *)(val+2))[0]); ++ /* BSSIDR 2 byte alignment */ ++ rtl92e_writew(dev, BSSIDR, *(u16 *)val); ++ rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(val + 2)); + break; + + case HW_VAR_MEDIA_STATUS: +@@ -626,7 +627,7 @@ void rtl92e_get_eeprom_size(struct net_device *dev) + struct r8192_priv *priv = rtllib_priv(dev); + + RT_TRACE(COMP_INIT, "===========>%s()\n", __func__); +- curCR = rtl92e_readl(dev, EPROM_CMD); ++ curCR = rtl92e_readw(dev, EPROM_CMD); + RT_TRACE(COMP_INIT, "read from Reg Cmd9346CR(%x):%x\n", EPROM_CMD, + curCR); + priv->epromtype = (curCR & EPROM_CMD_9356SEL) ? EEPROM_93C56 : +@@ -963,8 +964,8 @@ static void _rtl92e_net_update(struct net_device *dev) + rtl92e_config_rate(dev, &rate_config); + priv->dot11CurrentPreambleMode = PREAMBLE_AUTO; + priv->basic_rate = rate_config &= 0x15f; +- rtl92e_writel(dev, BSSIDR, ((u32 *)net->bssid)[0]); +- rtl92e_writew(dev, BSSIDR+4, ((u16 *)net->bssid)[2]); ++ rtl92e_writew(dev, BSSIDR, *(u16 *)net->bssid); ++ rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(net->bssid + 2)); + + if (priv->rtllib->iw_mode == IW_MODE_ADHOC) { + rtl92e_writew(dev, ATIMWND, 2); +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index 96849e2e7435..0b7194086c5a 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -311,6 +311,12 @@ static void acm_ctrl_irq(struct urb *urb) + break; + + case USB_CDC_NOTIFY_SERIAL_STATE: ++ if (le16_to_cpu(dr->wLength) != 2) { ++ dev_dbg(&acm->control->dev, ++ "%s - malformed serial state\n", __func__); ++ break; ++ } ++ + newctrl = get_unaligned_le16(data); + + if (!acm->clocal && (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) { +@@ -347,11 +353,10 @@ static void acm_ctrl_irq(struct urb *urb) + + default: + dev_dbg(&acm->control->dev, +- "%s - unknown notification %d received: index %d " +- "len %d data0 %d data1 %d\n", ++ "%s - unknown notification %d received: index %d len %d\n", + __func__, +- dr->bNotificationType, dr->wIndex, +- dr->wLength, data[0], data[1]); ++ dr->bNotificationType, dr->wIndex, dr->wLength); ++ + break; + } + exit: +diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c +index f5c92d904ded..54d2d6b604c0 100644 +--- a/drivers/usb/core/devio.c ++++ b/drivers/usb/core/devio.c +@@ -373,11 +373,11 @@ static void snoop_urb(struct usb_device *udev, + + if (userurb) { /* Async */ + if (when == SUBMIT) +- dev_info(&udev->dev, "userurb %p, ep%d %s-%s, " ++ dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, " + "length %u\n", + userurb, ep, t, d, length); + else +- dev_info(&udev->dev, "userurb %p, ep%d %s-%s, " ++ dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, " + "actual_length %u status %d\n", + userurb, ep, t, d, length, + timeout_or_status); +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c +index ca2cbdb3aa67..c3f4f2ab7b33 100644 +--- a/drivers/usb/core/hcd.c ++++ b/drivers/usb/core/hcd.c +@@ -1696,7 +1696,7 @@ int usb_hcd_unlink_urb (struct urb *urb, int status) + if (retval == 0) + retval = -EINPROGRESS; + else if (retval != -EIDRM && retval != -EBUSY) +- dev_dbg(&udev->dev, "hcd_unlink_urb %p fail %d\n", ++ dev_dbg(&udev->dev, "hcd_unlink_urb %pK fail %d\n", + urb, retval); + usb_put_dev(udev); + } +@@ -1863,7 +1863,7 @@ rescan: + /* kick hcd */ + unlink1(hcd, urb, -ESHUTDOWN); + dev_dbg (hcd->self.controller, +- "shutdown urb %p ep%d%s%s\n", ++ "shutdown urb %pK ep%d%s%s\n", + urb, usb_endpoint_num(&ep->desc), + is_in ? "in" : "out", + ({ char *s; +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index 67961231cbbd..b627392ad52a 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -358,7 +358,8 @@ static void usb_set_lpm_parameters(struct usb_device *udev) + } + + /* USB 2.0 spec Section 11.24.4.5 */ +-static int get_hub_descriptor(struct usb_device *hdev, void *data) ++static int get_hub_descriptor(struct usb_device *hdev, ++ struct usb_hub_descriptor *desc) + { + int i, ret, size; + unsigned dtype; +@@ -374,10 +375,18 @@ static int get_hub_descriptor(struct usb_device *hdev, void *data) + for (i = 0; i < 3; i++) { + ret = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0), + USB_REQ_GET_DESCRIPTOR, USB_DIR_IN | USB_RT_HUB, +- dtype << 8, 0, data, size, ++ dtype << 8, 0, desc, size, + USB_CTRL_GET_TIMEOUT); +- if (ret >= (USB_DT_HUB_NONVAR_SIZE + 2)) ++ if (hub_is_superspeed(hdev)) { ++ if (ret == size) ++ return ret; ++ } else if (ret >= USB_DT_HUB_NONVAR_SIZE + 2) { ++ /* Make sure we have the DeviceRemovable field. */ ++ size = USB_DT_HUB_NONVAR_SIZE + desc->bNbrPorts / 8 + 1; ++ if (ret < size) ++ return -EMSGSIZE; + return ret; ++ } + } + return -EINVAL; + } +@@ -1295,7 +1304,7 @@ static int hub_configure(struct usb_hub *hub, + } + mutex_init(&hub->status_mutex); + +- hub->descriptor = kmalloc(sizeof(*hub->descriptor), GFP_KERNEL); ++ hub->descriptor = kzalloc(sizeof(*hub->descriptor), GFP_KERNEL); + if (!hub->descriptor) { + ret = -ENOMEM; + goto fail; +@@ -1303,7 +1312,7 @@ static int hub_configure(struct usb_hub *hub, + + /* Request the entire hub descriptor. + * hub->descriptor can handle USB_MAXCHILDREN ports, +- * but the hub can/will return fewer bytes here. ++ * but a (non-SS) hub can/will return fewer bytes here. + */ + ret = get_hub_descriptor(hdev, hub->descriptor); + if (ret < 0) { +diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c +index c601e25b609f..e43ef7d2d00e 100644 +--- a/drivers/usb/core/urb.c ++++ b/drivers/usb/core/urb.c +@@ -335,7 +335,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) + if (!urb || !urb->complete) + return -EINVAL; + if (urb->hcpriv) { +- WARN_ONCE(1, "URB %p submitted while active\n", urb); ++ WARN_ONCE(1, "URB %pK submitted while active\n", urb); + return -EBUSY; + } + +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c +index 5d70d46239bb..cf6bbaff42d0 100644 +--- a/drivers/usb/host/xhci-mem.c ++++ b/drivers/usb/host/xhci-mem.c +@@ -1709,7 +1709,7 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) + xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); + for (i = 0; i < num_sp; i++) { + dma_addr_t dma; +- void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma, ++ void *buf = dma_zalloc_coherent(dev, xhci->page_size, &dma, + flags); + if (!buf) + goto fail_sp5; +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c +index dd262f418140..30c4ae80c8f9 100644 +--- a/drivers/usb/host/xhci-pci.c ++++ b/drivers/usb/host/xhci-pci.c +@@ -52,6 +52,7 @@ + #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8 + #define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8 + #define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 ++#define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0 + + static const char hcd_name[] = "xhci_hcd"; + +@@ -167,12 +168,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) + pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI || +- pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) { ++ pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI || ++ pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) { + xhci->quirks |= XHCI_PME_STUCK_QUIRK; + } + if (pdev->vendor == PCI_VENDOR_ID_INTEL && + (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || +- pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) ++ pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI || ++ pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) + xhci->quirks |= XHCI_MISSING_CAS; + + if (pdev->vendor == PCI_VENDOR_ID_ETRON && +diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c +index 268829db9e88..062cf8a84a59 100644 +--- a/drivers/usb/host/xhci-plat.c ++++ b/drivers/usb/host/xhci-plat.c +@@ -92,7 +92,7 @@ static int xhci_plat_probe(struct platform_device *pdev) + + irq = platform_get_irq(pdev, 0); + if (irq < 0) +- return -ENODEV; ++ return irq; + + /* Try to set 64-bit DMA first */ + if (WARN_ON(!pdev->dev.dma_mask)) +diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c +index 775690bed4c0..5e43fd881a9c 100644 +--- a/drivers/usb/misc/iowarrior.c ++++ b/drivers/usb/misc/iowarrior.c +@@ -557,7 +557,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd, + info.revision = le16_to_cpu(dev->udev->descriptor.bcdDevice); + + /* 0==UNKNOWN, 1==LOW(usb1.1) ,2=FULL(usb1.1), 3=HIGH(usb2.0) */ +- info.speed = le16_to_cpu(dev->udev->speed); ++ info.speed = dev->udev->speed; + info.if_num = dev->interface->cur_altsetting->desc.bInterfaceNumber; + info.report_size = dev->report_size; + +diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c +index 4dd531ac5a7f..0ec9ee573ffa 100644 +--- a/drivers/usb/misc/legousbtower.c ++++ b/drivers/usb/misc/legousbtower.c +@@ -317,9 +317,16 @@ static int tower_open (struct inode *inode, struct file *file) + int subminor; + int retval = 0; + struct usb_interface *interface; +- struct tower_reset_reply reset_reply; ++ struct tower_reset_reply *reset_reply; + int result; + ++ reset_reply = kmalloc(sizeof(*reset_reply), GFP_KERNEL); ++ ++ if (!reset_reply) { ++ retval = -ENOMEM; ++ goto exit; ++ } ++ + nonseekable_open(inode, file); + subminor = iminor(inode); + +@@ -364,8 +371,8 @@ static int tower_open (struct inode *inode, struct file *file) + USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE, + 0, + 0, +- &reset_reply, +- sizeof(reset_reply), ++ reset_reply, ++ sizeof(*reset_reply), + 1000); + if (result < 0) { + dev_err(&dev->udev->dev, +@@ -406,6 +413,7 @@ unlock_exit: + mutex_unlock(&dev->lock); + + exit: ++ kfree(reset_reply); + return retval; + } + +@@ -808,7 +816,7 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device + struct lego_usb_tower *dev = NULL; + struct usb_host_interface *iface_desc; + struct usb_endpoint_descriptor* endpoint; +- struct tower_get_version_reply get_version_reply; ++ struct tower_get_version_reply *get_version_reply = NULL; + int i; + int retval = -ENOMEM; + int result; +@@ -898,6 +906,13 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device + dev->interrupt_in_interval = interrupt_in_interval ? interrupt_in_interval : dev->interrupt_in_endpoint->bInterval; + dev->interrupt_out_interval = interrupt_out_interval ? interrupt_out_interval : dev->interrupt_out_endpoint->bInterval; + ++ get_version_reply = kmalloc(sizeof(*get_version_reply), GFP_KERNEL); ++ ++ if (!get_version_reply) { ++ retval = -ENOMEM; ++ goto error; ++ } ++ + /* get the firmware version and log it */ + result = usb_control_msg (udev, + usb_rcvctrlpipe(udev, 0), +@@ -905,18 +920,19 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device + USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE, + 0, + 0, +- &get_version_reply, +- sizeof(get_version_reply), ++ get_version_reply, ++ sizeof(*get_version_reply), + 1000); + if (result < 0) { + dev_err(idev, "LEGO USB Tower get version control request failed\n"); + retval = result; + goto error; + } +- dev_info(&interface->dev, "LEGO USB Tower firmware version is %d.%d " +- "build %d\n", get_version_reply.major, +- get_version_reply.minor, +- le16_to_cpu(get_version_reply.build_no)); ++ dev_info(&interface->dev, ++ "LEGO USB Tower firmware version is %d.%d build %d\n", ++ get_version_reply->major, ++ get_version_reply->minor, ++ le16_to_cpu(get_version_reply->build_no)); + + /* we can register the device now, as it is ready */ + usb_set_intfdata (interface, dev); +@@ -937,9 +953,11 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device + USB_MAJOR, dev->minor); + + exit: ++ kfree(get_version_reply); + return retval; + + error: ++ kfree(get_version_reply); + tower_delete(dev); + return retval; + } +diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c +index 4c82077da475..6020024cb87c 100644 +--- a/drivers/usb/musb/tusb6010_omap.c ++++ b/drivers/usb/musb/tusb6010_omap.c +@@ -220,6 +220,7 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz, + u32 dma_remaining; + int src_burst, dst_burst; + u16 csr; ++ u32 psize; + int ch; + s8 dmareq; + s8 sync_dev; +@@ -391,15 +392,19 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz, + + if (chdat->tx) { + /* Send transfer_packet_sz packets at a time */ +- musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, +- chdat->transfer_packet_sz); ++ psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET); ++ psize &= ~0x7ff; ++ psize |= chdat->transfer_packet_sz; ++ musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize); + + musb_writel(ep_conf, TUSB_EP_TX_OFFSET, + TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len)); + } else { + /* Receive transfer_packet_sz packets at a time */ +- musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, +- chdat->transfer_packet_sz << 16); ++ psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET); ++ psize &= ~(0x7ff << 16); ++ psize |= (chdat->transfer_packet_sz << 16); ++ musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize); + + musb_writel(ep_conf, TUSB_EP_RX_OFFSET, + TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len)); +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index dbd441c1c2ad..e0385d6c0abb 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -809,10 +809,10 @@ static const struct usb_device_id id_table_combined[] = { + { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) }, + { USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, +- { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID), +- .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, +- { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID), +- .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, ++ { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID, 1) }, ++ { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID, 1) }, ++ { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_TINY_PID, 1) }, ++ { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_TINY_H_PID, 1) }, + { USB_DEVICE(FIC_VID, FIC_NEO1973_DEBUG_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID), +@@ -1508,9 +1508,9 @@ static int set_serial_info(struct tty_struct *tty, + (new_serial.flags & ASYNC_FLAGS)); + priv->custom_divisor = new_serial.custom_divisor; + ++check_and_exit: + write_latency_timer(port); + +-check_and_exit: + if ((old_priv.flags & ASYNC_SPD_MASK) != + (priv->flags & ASYNC_SPD_MASK)) { + if ((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h +index 71fb9e59db71..4fcf1cecb6d7 100644 +--- a/drivers/usb/serial/ftdi_sio_ids.h ++++ b/drivers/usb/serial/ftdi_sio_ids.h +@@ -882,6 +882,8 @@ + /* Olimex */ + #define OLIMEX_VID 0x15BA + #define OLIMEX_ARM_USB_OCD_PID 0x0003 ++#define OLIMEX_ARM_USB_TINY_PID 0x0004 ++#define OLIMEX_ARM_USB_TINY_H_PID 0x002a + #define OLIMEX_ARM_USB_OCD_H_PID 0x002b + + /* +diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c +index f1a8fdcd8674..e98532feb0cc 100644 +--- a/drivers/usb/serial/io_ti.c ++++ b/drivers/usb/serial/io_ti.c +@@ -2349,8 +2349,11 @@ static void change_port_settings(struct tty_struct *tty, + if (!baud) { + /* pick a default, any default... */ + baud = 9600; +- } else ++ } else { ++ /* Avoid a zero divisor. */ ++ baud = min(baud, 461550); + tty_encode_baud_rate(tty, baud, baud); ++ } + + edge_port->baud_rate = baud; + config->wBaudRate = (__u16)((461550L + baud/2) / baud); +diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c +index 9bf82c262c5b..a6c07c6be25f 100644 +--- a/drivers/usb/serial/mct_u232.c ++++ b/drivers/usb/serial/mct_u232.c +@@ -189,7 +189,7 @@ static int mct_u232_set_baud_rate(struct tty_struct *tty, + return -ENOMEM; + + divisor = mct_u232_calculate_baud_rate(serial, value, &speed); +- put_unaligned_le32(cpu_to_le32(divisor), buf); ++ put_unaligned_le32(divisor, buf); + rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), + MCT_U232_SET_BAUD_RATE_REQUEST, + MCT_U232_SET_REQUEST_TYPE, +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index af67a0de6b5d..3bf61acfc26b 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -281,6 +281,7 @@ static void option_instat_callback(struct urb *urb); + #define TELIT_PRODUCT_LE922_USBCFG0 0x1042 + #define TELIT_PRODUCT_LE922_USBCFG3 0x1043 + #define TELIT_PRODUCT_LE922_USBCFG5 0x1045 ++#define TELIT_PRODUCT_ME910 0x1100 + #define TELIT_PRODUCT_LE920 0x1200 + #define TELIT_PRODUCT_LE910 0x1201 + #define TELIT_PRODUCT_LE910_USBCFG4 0x1206 +@@ -640,6 +641,11 @@ static const struct option_blacklist_info simcom_sim7100e_blacklist = { + .reserved = BIT(5) | BIT(6), + }; + ++static const struct option_blacklist_info telit_me910_blacklist = { ++ .sendsetup = BIT(0), ++ .reserved = BIT(1) | BIT(3), ++}; ++ + static const struct option_blacklist_info telit_le910_blacklist = { + .sendsetup = BIT(0), + .reserved = BIT(1) | BIT(2), +@@ -1235,6 +1241,8 @@ static const struct usb_device_id option_ids[] = { + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff), + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, ++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), ++ .driver_info = (kernel_ulong_t)&telit_me910_blacklist }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), + .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), +diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c +index 38b3f0d8cd58..fd509ed6cf70 100644 +--- a/drivers/usb/serial/qcserial.c ++++ b/drivers/usb/serial/qcserial.c +@@ -162,6 +162,8 @@ static const struct usb_device_id id_table[] = { + {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */ + {DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */ + {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */ ++ {DEVICE_SWI(0x1199, 0x907a)}, /* Sierra Wireless EM74xx QDL */ ++ {DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */ + {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ + {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ + {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ +diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c +index f3cf4cecd2b7..091e8ec7a6c0 100644 +--- a/drivers/usb/storage/ene_ub6250.c ++++ b/drivers/usb/storage/ene_ub6250.c +@@ -446,6 +446,10 @@ struct ms_lib_ctrl { + #define SD_BLOCK_LEN 9 + + struct ene_ub6250_info { ++ ++ /* I/O bounce buffer */ ++ u8 *bbuf; ++ + /* for 6250 code */ + struct SD_STATUS SD_Status; + struct MS_STATUS MS_Status; +@@ -493,8 +497,11 @@ static int ene_load_bincode(struct us_data *us, unsigned char flag); + + static void ene_ub6250_info_destructor(void *extra) + { ++ struct ene_ub6250_info *info = (struct ene_ub6250_info *) extra; ++ + if (!extra) + return; ++ kfree(info->bbuf); + } + + static int ene_send_scsi_cmd(struct us_data *us, u8 fDir, void *buf, int use_sg) +@@ -858,8 +865,9 @@ static int ms_read_readpage(struct us_data *us, u32 PhyBlockAddr, + u8 PageNum, u32 *PageBuf, struct ms_lib_type_extdat *ExtraDat) + { + struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; ++ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; ++ u8 *bbuf = info->bbuf; + int result; +- u8 ExtBuf[4]; + u32 bn = PhyBlockAddr * 0x20 + PageNum; + + /* printk(KERN_INFO "MS --- MS_ReaderReadPage, +@@ -902,7 +910,7 @@ static int ms_read_readpage(struct us_data *us, u32 PhyBlockAddr, + bcb->CDB[2] = (unsigned char)(PhyBlockAddr>>16); + bcb->CDB[6] = 0x01; + +- result = ene_send_scsi_cmd(us, FDIR_READ, &ExtBuf, 0); ++ result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0); + if (result != USB_STOR_XFER_GOOD) + return USB_STOR_TRANSPORT_ERROR; + +@@ -911,9 +919,9 @@ static int ms_read_readpage(struct us_data *us, u32 PhyBlockAddr, + ExtraDat->status0 = 0x10; /* Not yet,fireware support */ + + ExtraDat->status1 = 0x00; /* Not yet,fireware support */ +- ExtraDat->ovrflg = ExtBuf[0]; +- ExtraDat->mngflg = ExtBuf[1]; +- ExtraDat->logadr = memstick_logaddr(ExtBuf[2], ExtBuf[3]); ++ ExtraDat->ovrflg = bbuf[0]; ++ ExtraDat->mngflg = bbuf[1]; ++ ExtraDat->logadr = memstick_logaddr(bbuf[2], bbuf[3]); + + return USB_STOR_TRANSPORT_GOOD; + } +@@ -1339,8 +1347,9 @@ static int ms_lib_read_extra(struct us_data *us, u32 PhyBlock, + u8 PageNum, struct ms_lib_type_extdat *ExtraDat) + { + struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; ++ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; ++ u8 *bbuf = info->bbuf; + int result; +- u8 ExtBuf[4]; + + /* printk("MS_LibReadExtra --- PhyBlock = %x, PageNum = %x\n", PhyBlock, PageNum); */ + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); +@@ -1355,7 +1364,7 @@ static int ms_lib_read_extra(struct us_data *us, u32 PhyBlock, + bcb->CDB[2] = (unsigned char)(PhyBlock>>16); + bcb->CDB[6] = 0x01; + +- result = ene_send_scsi_cmd(us, FDIR_READ, &ExtBuf, 0); ++ result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0); + if (result != USB_STOR_XFER_GOOD) + return USB_STOR_TRANSPORT_ERROR; + +@@ -1363,9 +1372,9 @@ static int ms_lib_read_extra(struct us_data *us, u32 PhyBlock, + ExtraDat->intr = 0x80; /* Not yet, waiting for fireware support */ + ExtraDat->status0 = 0x10; /* Not yet, waiting for fireware support */ + ExtraDat->status1 = 0x00; /* Not yet, waiting for fireware support */ +- ExtraDat->ovrflg = ExtBuf[0]; +- ExtraDat->mngflg = ExtBuf[1]; +- ExtraDat->logadr = memstick_logaddr(ExtBuf[2], ExtBuf[3]); ++ ExtraDat->ovrflg = bbuf[0]; ++ ExtraDat->mngflg = bbuf[1]; ++ ExtraDat->logadr = memstick_logaddr(bbuf[2], bbuf[3]); + + return USB_STOR_TRANSPORT_GOOD; + } +@@ -1569,9 +1578,9 @@ static int ms_lib_scan_logicalblocknumber(struct us_data *us, u16 btBlk1st) + u16 PhyBlock, newblk, i; + u16 LogStart, LogEnde; + struct ms_lib_type_extdat extdat; +- u8 buf[0x200]; + u32 count = 0, index = 0; + struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; ++ u8 *bbuf = info->bbuf; + + for (PhyBlock = 0; PhyBlock < info->MS_Lib.NumberOfPhyBlock;) { + ms_lib_phy_to_log_range(PhyBlock, &LogStart, &LogEnde); +@@ -1585,14 +1594,16 @@ static int ms_lib_scan_logicalblocknumber(struct us_data *us, u16 btBlk1st) + } + + if (count == PhyBlock) { +- ms_lib_read_extrablock(us, PhyBlock, 0, 0x80, &buf); ++ ms_lib_read_extrablock(us, PhyBlock, 0, 0x80, ++ bbuf); + count += 0x80; + } + index = (PhyBlock % 0x80) * 4; + +- extdat.ovrflg = buf[index]; +- extdat.mngflg = buf[index+1]; +- extdat.logadr = memstick_logaddr(buf[index+2], buf[index+3]); ++ extdat.ovrflg = bbuf[index]; ++ extdat.mngflg = bbuf[index+1]; ++ extdat.logadr = memstick_logaddr(bbuf[index+2], ++ bbuf[index+3]); + + if ((extdat.ovrflg & MS_REG_OVR_BKST) != MS_REG_OVR_BKST_OK) { + ms_lib_setacquired_errorblock(us, PhyBlock); +@@ -2075,9 +2086,9 @@ static int ene_ms_init(struct us_data *us) + { + struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; + int result; +- u8 buf[0x200]; + u16 MSP_BlockSize, MSP_UserAreaBlocks; + struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; ++ u8 *bbuf = info->bbuf; + + printk(KERN_INFO "transport --- ENE_MSInit\n"); + +@@ -2096,13 +2107,13 @@ static int ene_ms_init(struct us_data *us) + bcb->CDB[0] = 0xF1; + bcb->CDB[1] = 0x01; + +- result = ene_send_scsi_cmd(us, FDIR_READ, &buf, 0); ++ result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0); + if (result != USB_STOR_XFER_GOOD) { + printk(KERN_ERR "Execution MS Init Code Fail !!\n"); + return USB_STOR_TRANSPORT_ERROR; + } + /* the same part to test ENE */ +- info->MS_Status = *(struct MS_STATUS *)&buf[0]; ++ info->MS_Status = *(struct MS_STATUS *) bbuf; + + if (info->MS_Status.Insert && info->MS_Status.Ready) { + printk(KERN_INFO "Insert = %x\n", info->MS_Status.Insert); +@@ -2111,15 +2122,15 @@ static int ene_ms_init(struct us_data *us) + printk(KERN_INFO "IsMSPHG = %x\n", info->MS_Status.IsMSPHG); + printk(KERN_INFO "WtP= %x\n", info->MS_Status.WtP); + if (info->MS_Status.IsMSPro) { +- MSP_BlockSize = (buf[6] << 8) | buf[7]; +- MSP_UserAreaBlocks = (buf[10] << 8) | buf[11]; ++ MSP_BlockSize = (bbuf[6] << 8) | bbuf[7]; ++ MSP_UserAreaBlocks = (bbuf[10] << 8) | bbuf[11]; + info->MSP_TotalBlock = MSP_BlockSize * MSP_UserAreaBlocks; + } else { + ms_card_init(us); /* Card is MS (to ms.c)*/ + } + usb_stor_dbg(us, "MS Init Code OK !!\n"); + } else { +- usb_stor_dbg(us, "MS Card Not Ready --- %x\n", buf[0]); ++ usb_stor_dbg(us, "MS Card Not Ready --- %x\n", bbuf[0]); + return USB_STOR_TRANSPORT_ERROR; + } + +@@ -2129,9 +2140,9 @@ static int ene_ms_init(struct us_data *us) + static int ene_sd_init(struct us_data *us) + { + int result; +- u8 buf[0x200]; + struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; + struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; ++ u8 *bbuf = info->bbuf; + + usb_stor_dbg(us, "transport --- ENE_SDInit\n"); + /* SD Init Part-1 */ +@@ -2165,17 +2176,17 @@ static int ene_sd_init(struct us_data *us) + bcb->Flags = US_BULK_FLAG_IN; + bcb->CDB[0] = 0xF1; + +- result = ene_send_scsi_cmd(us, FDIR_READ, &buf, 0); ++ result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0); + if (result != USB_STOR_XFER_GOOD) { + usb_stor_dbg(us, "Execution SD Init Code Fail !!\n"); + return USB_STOR_TRANSPORT_ERROR; + } + +- info->SD_Status = *(struct SD_STATUS *)&buf[0]; ++ info->SD_Status = *(struct SD_STATUS *) bbuf; + if (info->SD_Status.Insert && info->SD_Status.Ready) { + struct SD_STATUS *s = &info->SD_Status; + +- ene_get_card_status(us, (unsigned char *)&buf); ++ ene_get_card_status(us, bbuf); + usb_stor_dbg(us, "Insert = %x\n", s->Insert); + usb_stor_dbg(us, "Ready = %x\n", s->Ready); + usb_stor_dbg(us, "IsMMC = %x\n", s->IsMMC); +@@ -2183,7 +2194,7 @@ static int ene_sd_init(struct us_data *us) + usb_stor_dbg(us, "HiSpeed = %x\n", s->HiSpeed); + usb_stor_dbg(us, "WtP = %x\n", s->WtP); + } else { +- usb_stor_dbg(us, "SD Card Not Ready --- %x\n", buf[0]); ++ usb_stor_dbg(us, "SD Card Not Ready --- %x\n", bbuf[0]); + return USB_STOR_TRANSPORT_ERROR; + } + return USB_STOR_TRANSPORT_GOOD; +@@ -2193,13 +2204,15 @@ static int ene_sd_init(struct us_data *us) + static int ene_init(struct us_data *us) + { + int result; +- u8 misc_reg03 = 0; ++ u8 misc_reg03; + struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra); ++ u8 *bbuf = info->bbuf; + +- result = ene_get_card_type(us, REG_CARD_STATUS, &misc_reg03); ++ result = ene_get_card_type(us, REG_CARD_STATUS, bbuf); + if (result != USB_STOR_XFER_GOOD) + return USB_STOR_TRANSPORT_ERROR; + ++ misc_reg03 = bbuf[0]; + if (misc_reg03 & 0x01) { + if (!info->SD_Status.Ready) { + result = ene_sd_init(us); +@@ -2316,8 +2329,9 @@ static int ene_ub6250_probe(struct usb_interface *intf, + const struct usb_device_id *id) + { + int result; +- u8 misc_reg03 = 0; ++ u8 misc_reg03; + struct us_data *us; ++ struct ene_ub6250_info *info; + + result = usb_stor_probe1(&us, intf, id, + (id - ene_ub6250_usb_ids) + ene_ub6250_unusual_dev_list, +@@ -2326,11 +2340,16 @@ static int ene_ub6250_probe(struct usb_interface *intf, + return result; + + /* FIXME: where should the code alloc extra buf ? */ +- if (!us->extra) { +- us->extra = kzalloc(sizeof(struct ene_ub6250_info), GFP_KERNEL); +- if (!us->extra) +- return -ENOMEM; +- us->extra_destructor = ene_ub6250_info_destructor; ++ us->extra = kzalloc(sizeof(struct ene_ub6250_info), GFP_KERNEL); ++ if (!us->extra) ++ return -ENOMEM; ++ us->extra_destructor = ene_ub6250_info_destructor; ++ ++ info = (struct ene_ub6250_info *)(us->extra); ++ info->bbuf = kmalloc(512, GFP_KERNEL); ++ if (!info->bbuf) { ++ kfree(us->extra); ++ return -ENOMEM; + } + + us->transport_name = "ene_ub6250"; +@@ -2342,12 +2361,13 @@ static int ene_ub6250_probe(struct usb_interface *intf, + return result; + + /* probe card type */ +- result = ene_get_card_type(us, REG_CARD_STATUS, &misc_reg03); ++ result = ene_get_card_type(us, REG_CARD_STATUS, info->bbuf); + if (result != USB_STOR_XFER_GOOD) { + usb_stor_disconnect(intf); + return USB_STOR_TRANSPORT_ERROR; + } + ++ misc_reg03 = info->bbuf[0]; + if (!(misc_reg03 & 0x01)) { + pr_info("ums_eneub6250: This driver only supports SD/MS cards. " + "It does not support SM cards.\n"); +diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c +index 6345e85822a4..a50cf45e530f 100644 +--- a/drivers/uwb/i1480/dfu/usb.c ++++ b/drivers/uwb/i1480/dfu/usb.c +@@ -341,6 +341,7 @@ error_submit_ep1: + static + int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id) + { ++ struct usb_device *udev = interface_to_usbdev(iface); + struct i1480_usb *i1480_usb; + struct i1480 *i1480; + struct device *dev = &iface->dev; +@@ -352,8 +353,8 @@ int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id) + iface->cur_altsetting->desc.bInterfaceNumber); + goto error; + } +- if (iface->num_altsetting > 1 +- && interface_to_usbdev(iface)->descriptor.idProduct == 0xbabe) { ++ if (iface->num_altsetting > 1 && ++ le16_to_cpu(udev->descriptor.idProduct) == 0xbabe) { + /* Need altsetting #1 [HW QUIRK] or EP1 won't work */ + result = usb_set_interface(interface_to_usbdev(iface), 0, 1); + if (result < 0) +diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c +index 1a11aedc4fe8..9eb5b314ba06 100644 +--- a/drivers/watchdog/pcwd_usb.c ++++ b/drivers/watchdog/pcwd_usb.c +@@ -630,6 +630,9 @@ static int usb_pcwd_probe(struct usb_interface *interface, + return -ENODEV; + } + ++ if (iface_desc->desc.bNumEndpoints < 1) ++ return -ENODEV; ++ + /* check out the endpoint: it has to be Interrupt & IN */ + endpoint = &iface_desc->endpoint[0].desc; + +diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c +index 4d8caeb94a11..bdb9c94335f1 100644 +--- a/fs/ceph/acl.c ++++ b/fs/ceph/acl.c +@@ -128,7 +128,7 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type) + if (new_mode != old_mode) { + newattrs.ia_mode = new_mode; + newattrs.ia_valid = ATTR_MODE; +- ret = ceph_setattr(dentry, &newattrs); ++ ret = __ceph_setattr(dentry, &newattrs); + if (ret) + goto out_dput; + } +@@ -138,7 +138,7 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type) + if (new_mode != old_mode) { + newattrs.ia_mode = old_mode; + newattrs.ia_valid = ATTR_MODE; +- ceph_setattr(dentry, &newattrs); ++ __ceph_setattr(dentry, &newattrs); + } + goto out_dput; + } +diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c +index d98536c8abfc..9f0d99094cc1 100644 +--- a/fs/ceph/inode.c ++++ b/fs/ceph/inode.c +@@ -1773,7 +1773,7 @@ static const struct inode_operations ceph_symlink_iops = { + /* + * setattr + */ +-int ceph_setattr(struct dentry *dentry, struct iattr *attr) ++int __ceph_setattr(struct dentry *dentry, struct iattr *attr) + { + struct inode *inode = d_inode(dentry); + struct ceph_inode_info *ci = ceph_inode(inode); +@@ -1975,11 +1975,6 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr) + if (inode_dirty_flags) + __mark_inode_dirty(inode, inode_dirty_flags); + +- if (ia_valid & ATTR_MODE) { +- err = posix_acl_chmod(inode, attr->ia_mode); +- if (err) +- goto out_put; +- } + + if (mask) { + req->r_inode = inode; +@@ -1993,13 +1988,23 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr) + ceph_cap_string(dirtied), mask); + + ceph_mdsc_put_request(req); +- if (mask & CEPH_SETATTR_SIZE) +- __ceph_do_pending_vmtruncate(inode); + ceph_free_cap_flush(prealloc_cf); ++ ++ if (err >= 0 && (mask & CEPH_SETATTR_SIZE)) ++ __ceph_do_pending_vmtruncate(inode); ++ + return err; +-out_put: +- ceph_mdsc_put_request(req); +- ceph_free_cap_flush(prealloc_cf); ++} ++ ++int ceph_setattr(struct dentry *dentry, struct iattr *attr) ++{ ++ int err; ++ ++ err = __ceph_setattr(dentry, attr); ++ ++ if (err >= 0 && (attr->ia_valid & ATTR_MODE)) ++ err = posix_acl_chmod(d_inode(dentry), attr->ia_mode); ++ + return err; + } + +diff --git a/fs/ceph/super.h b/fs/ceph/super.h +index 75b7d125ce66..8c8cb8fe3d32 100644 +--- a/fs/ceph/super.h ++++ b/fs/ceph/super.h +@@ -788,6 +788,7 @@ static inline int ceph_do_getattr(struct inode *inode, int mask, bool force) + return __ceph_do_getattr(inode, NULL, mask, force); + } + extern int ceph_permission(struct inode *inode, int mask); ++extern int __ceph_setattr(struct dentry *dentry, struct iattr *attr); + extern int ceph_setattr(struct dentry *dentry, struct iattr *attr); + extern int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry, + struct kstat *stat); +diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c +index 9d6c2dcf1bd0..f240cef8b326 100644 +--- a/fs/ext4/crypto.c ++++ b/fs/ext4/crypto.c +@@ -94,7 +94,8 @@ void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx) + * Return: An allocated and initialized encryption context on success; error + * value or NULL otherwise. + */ +-struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode) ++struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode, ++ gfp_t gfp_flags) + { + struct ext4_crypto_ctx *ctx = NULL; + int res = 0; +@@ -121,7 +122,7 @@ struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode) + list_del(&ctx->free_list); + spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags); + if (!ctx) { +- ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS); ++ ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, gfp_flags); + if (!ctx) { + res = -ENOMEM; + goto out; +@@ -258,7 +259,8 @@ static int ext4_page_crypto(struct inode *inode, + ext4_direction_t rw, + pgoff_t index, + struct page *src_page, +- struct page *dest_page) ++ struct page *dest_page, ++ gfp_t gfp_flags) + + { + u8 xts_tweak[EXT4_XTS_TWEAK_SIZE]; +@@ -269,7 +271,7 @@ static int ext4_page_crypto(struct inode *inode, + struct crypto_ablkcipher *tfm = ci->ci_ctfm; + int res = 0; + +- req = ablkcipher_request_alloc(tfm, GFP_NOFS); ++ req = ablkcipher_request_alloc(tfm, gfp_flags); + if (!req) { + printk_ratelimited(KERN_ERR + "%s: crypto_request_alloc() failed\n", +@@ -310,9 +312,10 @@ static int ext4_page_crypto(struct inode *inode, + return 0; + } + +-static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx) ++static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx, ++ gfp_t gfp_flags) + { +- ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, GFP_NOWAIT); ++ ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, gfp_flags); + if (ctx->w.bounce_page == NULL) + return ERR_PTR(-ENOMEM); + ctx->flags |= EXT4_WRITE_PATH_FL; +@@ -335,7 +338,8 @@ static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx) + * error value or NULL. + */ + struct page *ext4_encrypt(struct inode *inode, +- struct page *plaintext_page) ++ struct page *plaintext_page, ++ gfp_t gfp_flags) + { + struct ext4_crypto_ctx *ctx; + struct page *ciphertext_page = NULL; +@@ -343,17 +347,17 @@ struct page *ext4_encrypt(struct inode *inode, + + BUG_ON(!PageLocked(plaintext_page)); + +- ctx = ext4_get_crypto_ctx(inode); ++ ctx = ext4_get_crypto_ctx(inode, gfp_flags); + if (IS_ERR(ctx)) + return (struct page *) ctx; + + /* The encryption operation will require a bounce page. */ +- ciphertext_page = alloc_bounce_page(ctx); ++ ciphertext_page = alloc_bounce_page(ctx, gfp_flags); + if (IS_ERR(ciphertext_page)) + goto errout; + ctx->w.control_page = plaintext_page; + err = ext4_page_crypto(inode, EXT4_ENCRYPT, plaintext_page->index, +- plaintext_page, ciphertext_page); ++ plaintext_page, ciphertext_page, gfp_flags); + if (err) { + ciphertext_page = ERR_PTR(err); + errout: +@@ -381,8 +385,8 @@ int ext4_decrypt(struct page *page) + { + BUG_ON(!PageLocked(page)); + +- return ext4_page_crypto(page->mapping->host, +- EXT4_DECRYPT, page->index, page, page); ++ return ext4_page_crypto(page->mapping->host, EXT4_DECRYPT, ++ page->index, page, page, GFP_NOFS); + } + + int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex) +@@ -403,11 +407,11 @@ int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex) + + BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE); + +- ctx = ext4_get_crypto_ctx(inode); ++ ctx = ext4_get_crypto_ctx(inode, GFP_NOFS); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + +- ciphertext_page = alloc_bounce_page(ctx); ++ ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT); + if (IS_ERR(ciphertext_page)) { + err = PTR_ERR(ciphertext_page); + goto errout; +@@ -415,11 +419,12 @@ int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex) + + while (len--) { + err = ext4_page_crypto(inode, EXT4_ENCRYPT, lblk, +- ZERO_PAGE(0), ciphertext_page); ++ ZERO_PAGE(0), ciphertext_page, ++ GFP_NOFS); + if (err) + goto errout; + +- bio = bio_alloc(GFP_KERNEL, 1); ++ bio = bio_alloc(GFP_NOWAIT, 1); + if (!bio) { + err = -ENOMEM; + goto errout; +diff --git a/fs/ext4/crypto_fname.c b/fs/ext4/crypto_fname.c +index 2fbef8a14760..2cfe3ffc276f 100644 +--- a/fs/ext4/crypto_fname.c ++++ b/fs/ext4/crypto_fname.c +@@ -343,7 +343,7 @@ int _ext4_fname_disk_to_usr(struct inode *inode, + memcpy(buf+4, &hinfo->minor_hash, 4); + } else + memset(buf, 0, 8); +- memcpy(buf + 8, iname->name + iname->len - 16, 16); ++ memcpy(buf + 8, iname->name + ((iname->len - 17) & ~15), 16); + oname->name[0] = '_'; + ret = digest_encode(buf, 24, oname->name+1); + oname->len = ret + 1; +diff --git a/fs/ext4/crypto_policy.c b/fs/ext4/crypto_policy.c +index dd561f916f0b..e4f4fc4e56ab 100644 +--- a/fs/ext4/crypto_policy.c ++++ b/fs/ext4/crypto_policy.c +@@ -148,26 +148,38 @@ int ext4_get_policy(struct inode *inode, struct ext4_encryption_policy *policy) + int ext4_is_child_context_consistent_with_parent(struct inode *parent, + struct inode *child) + { +- struct ext4_crypt_info *parent_ci, *child_ci; ++ const struct ext4_crypt_info *parent_ci, *child_ci; ++ struct ext4_encryption_context parent_ctx, child_ctx; + int res; + +- if ((parent == NULL) || (child == NULL)) { +- pr_err("parent %p child %p\n", parent, child); +- WARN_ON(1); /* Should never happen */ +- return 0; +- } +- + /* No restrictions on file types which are never encrypted */ + if (!S_ISREG(child->i_mode) && !S_ISDIR(child->i_mode) && + !S_ISLNK(child->i_mode)) + return 1; + +- /* no restrictions if the parent directory is not encrypted */ ++ /* No restrictions if the parent directory is unencrypted */ + if (!ext4_encrypted_inode(parent)) + return 1; +- /* if the child directory is not encrypted, this is always a problem */ ++ ++ /* Encrypted directories must not contain unencrypted files */ + if (!ext4_encrypted_inode(child)) + return 0; ++ ++ /* ++ * Both parent and child are encrypted, so verify they use the same ++ * encryption policy. Compare the fscrypt_info structs if the keys are ++ * available, otherwise retrieve and compare the fscrypt_contexts. ++ * ++ * Note that the fscrypt_context retrieval will be required frequently ++ * when accessing an encrypted directory tree without the key. ++ * Performance-wise this is not a big deal because we already don't ++ * really optimize for file access without the key (to the extent that ++ * such access is even possible), given that any attempted access ++ * already causes a fscrypt_context retrieval and keyring search. ++ * ++ * In any case, if an unexpected error occurs, fall back to "forbidden". ++ */ ++ + res = ext4_get_encryption_info(parent); + if (res) + return 0; +@@ -176,17 +188,35 @@ int ext4_is_child_context_consistent_with_parent(struct inode *parent, + return 0; + parent_ci = EXT4_I(parent)->i_crypt_info; + child_ci = EXT4_I(child)->i_crypt_info; +- if (!parent_ci && !child_ci) +- return 1; +- if (!parent_ci || !child_ci) ++ if (parent_ci && child_ci) { ++ return memcmp(parent_ci->ci_master_key, child_ci->ci_master_key, ++ EXT4_KEY_DESCRIPTOR_SIZE) == 0 && ++ (parent_ci->ci_data_mode == child_ci->ci_data_mode) && ++ (parent_ci->ci_filename_mode == ++ child_ci->ci_filename_mode) && ++ (parent_ci->ci_flags == child_ci->ci_flags); ++ } ++ ++ res = ext4_xattr_get(parent, EXT4_XATTR_INDEX_ENCRYPTION, ++ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ++ &parent_ctx, sizeof(parent_ctx)); ++ if (res != sizeof(parent_ctx)) ++ return 0; ++ ++ res = ext4_xattr_get(child, EXT4_XATTR_INDEX_ENCRYPTION, ++ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ++ &child_ctx, sizeof(child_ctx)); ++ if (res != sizeof(child_ctx)) + return 0; + +- return (memcmp(parent_ci->ci_master_key, +- child_ci->ci_master_key, +- EXT4_KEY_DESCRIPTOR_SIZE) == 0 && +- (parent_ci->ci_data_mode == child_ci->ci_data_mode) && +- (parent_ci->ci_filename_mode == child_ci->ci_filename_mode) && +- (parent_ci->ci_flags == child_ci->ci_flags)); ++ return memcmp(parent_ctx.master_key_descriptor, ++ child_ctx.master_key_descriptor, ++ EXT4_KEY_DESCRIPTOR_SIZE) == 0 && ++ (parent_ctx.contents_encryption_mode == ++ child_ctx.contents_encryption_mode) && ++ (parent_ctx.filenames_encryption_mode == ++ child_ctx.filenames_encryption_mode) && ++ (parent_ctx.flags == child_ctx.flags); + } + + /** +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h +index 3de9bb357b4f..c8ad14c697c4 100644 +--- a/fs/ext4/ext4.h ++++ b/fs/ext4/ext4.h +@@ -2261,11 +2261,13 @@ extern struct kmem_cache *ext4_crypt_info_cachep; + bool ext4_valid_contents_enc_mode(uint32_t mode); + uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size); + extern struct workqueue_struct *ext4_read_workqueue; +-struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode); ++struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode, ++ gfp_t gfp_flags); + void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx); + void ext4_restore_control_page(struct page *data_page); + struct page *ext4_encrypt(struct inode *inode, +- struct page *plaintext_page); ++ struct page *plaintext_page, ++ gfp_t gfp_flags); + int ext4_decrypt(struct page *page); + int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex); + extern const struct dentry_operations ext4_encrypted_d_ops; +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c +index fafa903ab3c0..1d007e853f5c 100644 +--- a/fs/ext4/namei.c ++++ b/fs/ext4/namei.c +@@ -1243,9 +1243,9 @@ static inline int ext4_match(struct ext4_filename *fname, + if (unlikely(!name)) { + if (fname->usr_fname->name[0] == '_') { + int ret; +- if (de->name_len < 16) ++ if (de->name_len <= 32) + return 0; +- ret = memcmp(de->name + de->name_len - 16, ++ ret = memcmp(de->name + ((de->name_len - 17) & ~15), + fname->crypto_buf.name + 8, 16); + return (ret == 0) ? 1 : 0; + } +diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c +index 17fbe3882b8e..6ca56f5f72b5 100644 +--- a/fs/ext4/page-io.c ++++ b/fs/ext4/page-io.c +@@ -23,6 +23,7 @@ + #include + #include + #include ++#include + + #include "ext4_jbd2.h" + #include "xattr.h" +@@ -485,9 +486,20 @@ int ext4_bio_write_page(struct ext4_io_submit *io, + + if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) && + nr_to_submit) { +- data_page = ext4_encrypt(inode, page); ++ gfp_t gfp_flags = GFP_NOFS; ++ ++ retry_encrypt: ++ data_page = ext4_encrypt(inode, page, gfp_flags); + if (IS_ERR(data_page)) { + ret = PTR_ERR(data_page); ++ if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) { ++ if (io->io_bio) { ++ ext4_io_submit(io); ++ congestion_wait(BLK_RW_ASYNC, HZ/50); ++ } ++ gfp_flags |= __GFP_NOFAIL; ++ goto retry_encrypt; ++ } + data_page = NULL; + goto out; + } +diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c +index 5dc5e95063de..bc7642f57dc8 100644 +--- a/fs/ext4/readpage.c ++++ b/fs/ext4/readpage.c +@@ -279,7 +279,7 @@ int ext4_mpage_readpages(struct address_space *mapping, + + if (ext4_encrypted_inode(inode) && + S_ISREG(inode->i_mode)) { +- ctx = ext4_get_crypto_ctx(inode); ++ ctx = ext4_get_crypto_ctx(inode, GFP_NOFS); + if (IS_ERR(ctx)) + goto set_error_page; + } +diff --git a/fs/f2fs/crypto_fname.c b/fs/f2fs/crypto_fname.c +index ab377d496a39..38349ed5ea51 100644 +--- a/fs/f2fs/crypto_fname.c ++++ b/fs/f2fs/crypto_fname.c +@@ -333,7 +333,7 @@ int f2fs_fname_disk_to_usr(struct inode *inode, + memset(buf + 4, 0, 4); + } else + memset(buf, 0, 8); +- memcpy(buf + 8, iname->name + iname->len - 16, 16); ++ memcpy(buf + 8, iname->name + ((iname->len - 17) & ~15), 16); + oname->name[0] = '_'; + ret = digest_encode(buf, 24, oname->name + 1); + oname->len = ret + 1; +diff --git a/fs/f2fs/crypto_policy.c b/fs/f2fs/crypto_policy.c +index 5bbd1989d5e6..884f3f0fe29d 100644 +--- a/fs/f2fs/crypto_policy.c ++++ b/fs/f2fs/crypto_policy.c +@@ -141,25 +141,38 @@ int f2fs_get_policy(struct inode *inode, struct f2fs_encryption_policy *policy) + int f2fs_is_child_context_consistent_with_parent(struct inode *parent, + struct inode *child) + { +- struct f2fs_crypt_info *parent_ci, *child_ci; ++ const struct f2fs_crypt_info *parent_ci, *child_ci; ++ struct f2fs_encryption_context parent_ctx, child_ctx; + int res; + +- if ((parent == NULL) || (child == NULL)) { +- pr_err("parent %p child %p\n", parent, child); +- BUG_ON(1); +- } +- + /* No restrictions on file types which are never encrypted */ + if (!S_ISREG(child->i_mode) && !S_ISDIR(child->i_mode) && + !S_ISLNK(child->i_mode)) + return 1; + +- /* no restrictions if the parent directory is not encrypted */ ++ /* No restrictions if the parent directory is unencrypted */ + if (!f2fs_encrypted_inode(parent)) + return 1; +- /* if the child directory is not encrypted, this is always a problem */ ++ ++ /* Encrypted directories must not contain unencrypted files */ + if (!f2fs_encrypted_inode(child)) + return 0; ++ ++ /* ++ * Both parent and child are encrypted, so verify they use the same ++ * encryption policy. Compare the fscrypt_info structs if the keys are ++ * available, otherwise retrieve and compare the fscrypt_contexts. ++ * ++ * Note that the fscrypt_context retrieval will be required frequently ++ * when accessing an encrypted directory tree without the key. ++ * Performance-wise this is not a big deal because we already don't ++ * really optimize for file access without the key (to the extent that ++ * such access is even possible), given that any attempted access ++ * already causes a fscrypt_context retrieval and keyring search. ++ * ++ * In any case, if an unexpected error occurs, fall back to "forbidden". ++ */ ++ + res = f2fs_get_encryption_info(parent); + if (res) + return 0; +@@ -168,17 +181,35 @@ int f2fs_is_child_context_consistent_with_parent(struct inode *parent, + return 0; + parent_ci = F2FS_I(parent)->i_crypt_info; + child_ci = F2FS_I(child)->i_crypt_info; +- if (!parent_ci && !child_ci) +- return 1; +- if (!parent_ci || !child_ci) ++ if (parent_ci && child_ci) { ++ return memcmp(parent_ci->ci_master_key, child_ci->ci_master_key, ++ F2FS_KEY_DESCRIPTOR_SIZE) == 0 && ++ (parent_ci->ci_data_mode == child_ci->ci_data_mode) && ++ (parent_ci->ci_filename_mode == ++ child_ci->ci_filename_mode) && ++ (parent_ci->ci_flags == child_ci->ci_flags); ++ } ++ ++ res = f2fs_getxattr(parent, F2FS_XATTR_INDEX_ENCRYPTION, ++ F2FS_XATTR_NAME_ENCRYPTION_CONTEXT, ++ &parent_ctx, sizeof(parent_ctx), NULL); ++ if (res != sizeof(parent_ctx)) ++ return 0; ++ ++ res = f2fs_getxattr(child, F2FS_XATTR_INDEX_ENCRYPTION, ++ F2FS_XATTR_NAME_ENCRYPTION_CONTEXT, ++ &child_ctx, sizeof(child_ctx), NULL); ++ if (res != sizeof(child_ctx)) + return 0; + +- return (memcmp(parent_ci->ci_master_key, +- child_ci->ci_master_key, +- F2FS_KEY_DESCRIPTOR_SIZE) == 0 && +- (parent_ci->ci_data_mode == child_ci->ci_data_mode) && +- (parent_ci->ci_filename_mode == child_ci->ci_filename_mode) && +- (parent_ci->ci_flags == child_ci->ci_flags)); ++ return memcmp(parent_ctx.master_key_descriptor, ++ child_ctx.master_key_descriptor, ++ F2FS_KEY_DESCRIPTOR_SIZE) == 0 && ++ (parent_ctx.contents_encryption_mode == ++ child_ctx.contents_encryption_mode) && ++ (parent_ctx.filenames_encryption_mode == ++ child_ctx.filenames_encryption_mode) && ++ (parent_ctx.flags == child_ctx.flags); + } + + /** +diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c +index 7c1678ba8f92..60972a559685 100644 +--- a/fs/f2fs/dir.c ++++ b/fs/f2fs/dir.c +@@ -124,19 +124,29 @@ struct f2fs_dir_entry *find_target_dentry(struct f2fs_filename *fname, + + de = &d->dentry[bit_pos]; + +- /* encrypted case */ ++ if (de->hash_code != namehash) ++ goto not_match; ++ + de_name.name = d->filename[bit_pos]; + de_name.len = le16_to_cpu(de->name_len); + +- /* show encrypted name */ +- if (fname->hash) { +- if (de->hash_code == fname->hash) +- goto found; +- } else if (de_name.len == name->len && +- de->hash_code == namehash && +- !memcmp(de_name.name, name->name, name->len)) ++#ifdef CONFIG_F2FS_FS_ENCRYPTION ++ if (unlikely(!name->name)) { ++ if (fname->usr_fname->name[0] == '_') { ++ if (de_name.len > 32 && ++ !memcmp(de_name.name + ((de_name.len - 17) & ~15), ++ fname->crypto_buf.name + 8, 16)) ++ goto found; ++ goto not_match; ++ } ++ name->name = fname->crypto_buf.name; ++ name->len = fname->crypto_buf.len; ++ } ++#endif ++ if (de_name.len == name->len && ++ !memcmp(de_name.name, name->name, name->len)) + goto found; +- ++not_match: + if (max_slots && max_len > *max_slots) + *max_slots = max_len; + max_len = 0; +@@ -170,7 +180,7 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir, + int max_slots; + f2fs_hash_t namehash; + +- namehash = f2fs_dentry_hash(&name); ++ namehash = f2fs_dentry_hash(&name, fname); + + f2fs_bug_on(F2FS_I_SB(dir), level > MAX_DIR_HASH_DEPTH); + +@@ -547,7 +557,7 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name, + + level = 0; + slots = GET_DENTRY_SLOTS(new_name.len); +- dentry_hash = f2fs_dentry_hash(&new_name); ++ dentry_hash = f2fs_dentry_hash(&new_name, NULL); + + current_depth = F2FS_I(dir)->i_current_depth; + if (F2FS_I(dir)->chash == dentry_hash) { +diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h +index b1aeca83f4be..2871576fbca4 100644 +--- a/fs/f2fs/f2fs.h ++++ b/fs/f2fs/f2fs.h +@@ -1722,7 +1722,8 @@ void f2fs_msg(struct super_block *, const char *, const char *, ...); + /* + * hash.c + */ +-f2fs_hash_t f2fs_dentry_hash(const struct qstr *); ++f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info, ++ struct f2fs_filename *fname); + + /* + * node.c +diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c +index 71b7206c431e..b238d2fec3e5 100644 +--- a/fs/f2fs/hash.c ++++ b/fs/f2fs/hash.c +@@ -70,7 +70,8 @@ static void str2hashbuf(const unsigned char *msg, size_t len, + *buf++ = pad; + } + +-f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info) ++f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info, ++ struct f2fs_filename *fname) + { + __u32 hash; + f2fs_hash_t f2fs_hash; +@@ -79,6 +80,10 @@ f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info) + const unsigned char *name = name_info->name; + size_t len = name_info->len; + ++ /* encrypted bigname case */ ++ if (fname && !fname->disk_name.name) ++ return cpu_to_le32(fname->hash); ++ + if (is_dot_dotdot(name_info)) + return 0; + +diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c +index bda7126466c0..ad80f916b64d 100644 +--- a/fs/f2fs/inline.c ++++ b/fs/f2fs/inline.c +@@ -303,7 +303,7 @@ struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir, + if (IS_ERR(ipage)) + return NULL; + +- namehash = f2fs_dentry_hash(&name); ++ namehash = f2fs_dentry_hash(&name, fname); + + inline_dentry = inline_data_addr(ipage); + +@@ -468,7 +468,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name, + + f2fs_wait_on_page_writeback(ipage, NODE); + +- name_hash = f2fs_dentry_hash(name); ++ name_hash = f2fs_dentry_hash(name, NULL); + make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2); + f2fs_update_dentry(ino, mode, &d, name, name_hash, bit_pos); + +diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c +index 12935209deca..c3e1cb481fe0 100644 +--- a/fs/nfsd/nfs4xdr.c ++++ b/fs/nfsd/nfs4xdr.c +@@ -4041,8 +4041,7 @@ nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr, + struct nfsd4_getdeviceinfo *gdev) + { + struct xdr_stream *xdr = &resp->xdr; +- const struct nfsd4_layout_ops *ops = +- nfsd4_layout_ops[gdev->gd_layout_type]; ++ const struct nfsd4_layout_ops *ops; + u32 starting_len = xdr->buf->len, needed_len; + __be32 *p; + +@@ -4059,6 +4058,7 @@ nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr, + + /* If maxcount is 0 then just update notifications */ + if (gdev->gd_maxcount != 0) { ++ ops = nfsd4_layout_ops[gdev->gd_layout_type]; + nfserr = ops->encode_getdeviceinfo(xdr, gdev); + if (nfserr) { + /* +@@ -4111,8 +4111,7 @@ nfsd4_encode_layoutget(struct nfsd4_compoundres *resp, __be32 nfserr, + struct nfsd4_layoutget *lgp) + { + struct xdr_stream *xdr = &resp->xdr; +- const struct nfsd4_layout_ops *ops = +- nfsd4_layout_ops[lgp->lg_layout_type]; ++ const struct nfsd4_layout_ops *ops; + __be32 *p; + + dprintk("%s: err %d\n", __func__, nfserr); +@@ -4135,6 +4134,7 @@ nfsd4_encode_layoutget(struct nfsd4_compoundres *resp, __be32 nfserr, + *p++ = cpu_to_be32(lgp->lg_seg.iomode); + *p++ = cpu_to_be32(lgp->lg_layout_type); + ++ ops = nfsd4_layout_ops[lgp->lg_layout_type]; + nfserr = ops->encode_layoutget(xdr, lgp); + out: + kfree(lgp->lg_content); +diff --git a/fs/proc/generic.c b/fs/proc/generic.c +index ff3ffc76a937..3773335791da 100644 +--- a/fs/proc/generic.c ++++ b/fs/proc/generic.c +@@ -469,6 +469,7 @@ struct proc_dir_entry *proc_create_mount_point(const char *name) + ent->data = NULL; + ent->proc_fops = NULL; + ent->proc_iops = NULL; ++ parent->nlink++; + if (proc_register(parent, ent) < 0) { + kfree(ent); + parent->nlink--; +diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h +index 8f6849084248..e23392517db9 100644 +--- a/include/linux/kprobes.h ++++ b/include/linux/kprobes.h +@@ -330,7 +330,9 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table, + int write, void __user *buffer, + size_t *length, loff_t *ppos); + #endif +- ++extern void wait_for_kprobe_optimizer(void); ++#else ++static inline void wait_for_kprobe_optimizer(void) { } + #endif /* CONFIG_OPTPROBES */ + #ifdef CONFIG_KPROBES_ON_FTRACE + extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, +diff --git a/kernel/fork.c b/kernel/fork.c +index 278a2ddad351..0ee630f3ad4b 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -1590,11 +1590,13 @@ static struct task_struct *copy_process(unsigned long clone_flags, + */ + recalc_sigpending(); + if (signal_pending(current)) { +- spin_unlock(¤t->sighand->siglock); +- write_unlock_irq(&tasklist_lock); + retval = -ERESTARTNOINTR; + goto bad_fork_cancel_cgroup; + } ++ if (unlikely(!(ns_of_pid(pid)->nr_hashed & PIDNS_HASH_ADDING))) { ++ retval = -ENOMEM; ++ goto bad_fork_cancel_cgroup; ++ } + + if (likely(p->pid)) { + ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); +@@ -1645,6 +1647,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, + return p; + + bad_fork_cancel_cgroup: ++ spin_unlock(¤t->sighand->siglock); ++ write_unlock_irq(&tasklist_lock); + cgroup_cancel_fork(p, cgrp_ss_priv); + bad_fork_free_pid: + threadgroup_change_end(current); +diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c +index 15206453b12a..e4453d9f788c 100644 +--- a/kernel/irq/chip.c ++++ b/kernel/irq/chip.c +@@ -810,8 +810,8 @@ irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, + if (!desc) + return; + +- __irq_do_set_handler(desc, handle, 1, NULL); + desc->irq_common_data.handler_data = data; ++ __irq_do_set_handler(desc, handle, 1, NULL); + + irq_put_desc_busunlock(desc, flags); + } +diff --git a/kernel/kprobes.c b/kernel/kprobes.c +index d10ab6b9b5e0..695763516908 100644 +--- a/kernel/kprobes.c ++++ b/kernel/kprobes.c +@@ -563,7 +563,7 @@ static void kprobe_optimizer(struct work_struct *work) + } + + /* Wait for completing optimization and unoptimization */ +-static void wait_for_kprobe_optimizer(void) ++void wait_for_kprobe_optimizer(void) + { + mutex_lock(&kprobe_mutex); + +diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c +index a65ba137fd15..567ecc826bc8 100644 +--- a/kernel/pid_namespace.c ++++ b/kernel/pid_namespace.c +@@ -255,7 +255,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns) + * if reparented. + */ + for (;;) { +- set_current_state(TASK_UNINTERRUPTIBLE); ++ set_current_state(TASK_INTERRUPTIBLE); + if (pid_ns->nr_hashed == init_pids) + break; + schedule(); +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 8f258f437ac2..812069b66f47 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -3918,6 +3918,26 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq) + if (!cfs_bandwidth_used()) + return; + ++ /* Synchronize hierarchical throttle counter: */ ++ if (unlikely(!cfs_rq->throttle_uptodate)) { ++ struct rq *rq = rq_of(cfs_rq); ++ struct cfs_rq *pcfs_rq; ++ struct task_group *tg; ++ ++ cfs_rq->throttle_uptodate = 1; ++ ++ /* Get closest up-to-date node, because leaves go first: */ ++ for (tg = cfs_rq->tg->parent; tg; tg = tg->parent) { ++ pcfs_rq = tg->cfs_rq[cpu_of(rq)]; ++ if (pcfs_rq->throttle_uptodate) ++ break; ++ } ++ if (tg) { ++ cfs_rq->throttle_count = pcfs_rq->throttle_count; ++ cfs_rq->throttled_clock_task = rq_clock_task(rq); ++ } ++ } ++ + /* an active group must be handled by the update_curr()->put() path */ + if (!cfs_rq->runtime_enabled || cfs_rq->curr) + return; +@@ -4233,15 +4253,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) + + /* Don't dequeue parent if it has other entities besides us */ + if (cfs_rq->load.weight) { ++ /* Avoid re-evaluating load for this entity: */ ++ se = parent_entity(se); + /* + * Bias pick_next to pick a task from this cfs_rq, as + * p is sleeping when it is within its sched_slice. + */ +- if (task_sleep && parent_entity(se)) +- set_next_buddy(parent_entity(se)); +- +- /* avoid re-evaluating load for this entity */ +- se = parent_entity(se); ++ if (task_sleep && se && !throttled_hierarchy(cfs_rq)) ++ set_next_buddy(se); + break; + } + flags |= DEQUEUE_SLEEP; +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h +index 0517abd7dd73..4e5db65d1aab 100644 +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -417,7 +417,7 @@ struct cfs_rq { + + u64 throttled_clock, throttled_clock_task; + u64 throttled_clock_task_time; +- int throttled, throttle_count; ++ int throttled, throttle_count, throttle_uptodate; + struct list_head throttled_list; + #endif /* CONFIG_CFS_BANDWIDTH */ + #endif /* CONFIG_FAIR_GROUP_SCHED */ +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c +index c9956440d0e6..12ea4ea619ee 100644 +--- a/kernel/trace/trace_kprobe.c ++++ b/kernel/trace/trace_kprobe.c +@@ -1471,6 +1471,11 @@ static __init int kprobe_trace_self_tests_init(void) + + end: + release_all_trace_kprobes(); ++ /* ++ * Wait for the optimizer work to finish. Otherwise it might fiddle ++ * with probes in already freed __init text. ++ */ ++ wait_for_kprobe_optimizer(); + if (warn) + pr_cont("NG: Some tests are failed. Please check them.\n"); + else +diff --git a/mm/huge_memory.c b/mm/huge_memory.c +index 530e6427f823..47b469663822 100644 +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -1269,6 +1269,16 @@ out_unlock: + return ret; + } + ++/* ++ * FOLL_FORCE can write to even unwritable pmd's, but only ++ * after we've gone through a COW cycle and they are dirty. ++ */ ++static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags) ++{ ++ return pmd_write(pmd) || ++ ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd)); ++} ++ + struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, + unsigned long addr, + pmd_t *pmd, +@@ -1279,7 +1289,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, + + assert_spin_locked(pmd_lockptr(mm, pmd)); + +- if (flags & FOLL_WRITE && !pmd_write(*pmd)) ++ if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags)) + goto out; + + /* Avoid dumping huge zero page */ +diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c +index 48d0dc89b58d..e735f781e4f3 100644 +--- a/net/ipx/af_ipx.c ++++ b/net/ipx/af_ipx.c +@@ -1168,11 +1168,10 @@ static int ipxitf_ioctl(unsigned int cmd, void __user *arg) + sipx->sipx_network = ipxif->if_netnum; + memcpy(sipx->sipx_node, ipxif->if_node, + sizeof(sipx->sipx_node)); +- rc = -EFAULT; ++ rc = 0; + if (copy_to_user(arg, &ifr, sizeof(ifr))) +- break; ++ rc = -EFAULT; + ipxitf_put(ipxif); +- rc = 0; + break; + } + case SIOCAIPXITFCRT: +diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c +index ed5a9c110b3a..9ce9d5003dcc 100644 +--- a/security/integrity/ima/ima_appraise.c ++++ b/security/integrity/ima/ima_appraise.c +@@ -203,10 +203,11 @@ int ima_appraise_measurement(int func, struct integrity_iint_cache *iint, + + cause = "missing-hash"; + status = INTEGRITY_NOLABEL; +- if (opened & FILE_CREATED) { ++ if (opened & FILE_CREATED) + iint->flags |= IMA_NEW_FILE; ++ if ((iint->flags & IMA_NEW_FILE) && ++ !(iint->flags & IMA_DIGSIG_REQUIRED)) + status = INTEGRITY_PASS; +- } + goto out; + } + diff --git a/patch/kernel/mvebu64-default/03-patch-4.4.70-71.patch b/patch/kernel/mvebu64-default/03-patch-4.4.70-71.patch new file mode 100644 index 000000000..34a572eb6 --- /dev/null +++ b/patch/kernel/mvebu64-default/03-patch-4.4.70-71.patch @@ -0,0 +1,2203 @@ +diff --git a/Makefile b/Makefile +index a5ecb29c6ed3..ad91a79aed51 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 70 ++SUBLEVEL = 71 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h +index 91b963a887b7..29c3b400f949 100644 +--- a/arch/sparc/include/asm/pgtable_32.h ++++ b/arch/sparc/include/asm/pgtable_32.h +@@ -91,9 +91,9 @@ extern unsigned long pfn_base; + * ZERO_PAGE is a global shared page that is always zero: used + * for zero-mapped memory areas etc.. + */ +-extern unsigned long empty_zero_page; ++extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; + +-#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page)) ++#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) + + /* + * In general all page table modifications should use the V8 atomic +diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h +index 29d64b1758ed..be0cc1beed41 100644 +--- a/arch/sparc/include/asm/setup.h ++++ b/arch/sparc/include/asm/setup.h +@@ -16,7 +16,7 @@ extern char reboot_command[]; + */ + extern unsigned char boot_cpu_id; + +-extern unsigned long empty_zero_page; ++extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; + + extern int serial_console; + static inline int con_is_present(void) +diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c +index eb8287155279..3b7092d9ea8f 100644 +--- a/arch/sparc/mm/init_32.c ++++ b/arch/sparc/mm/init_32.c +@@ -301,7 +301,7 @@ void __init mem_init(void) + + + /* Saves us work later. */ +- memset((void *)&empty_zero_page, 0, PAGE_SIZE); ++ memset((void *)empty_zero_page, 0, PAGE_SIZE); + + i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5); + i += 1; +diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c +index fc061f7c2bd1..a7de8ae185a5 100644 +--- a/drivers/char/pcmcia/cm4040_cs.c ++++ b/drivers/char/pcmcia/cm4040_cs.c +@@ -374,7 +374,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf, + + rc = write_sync_reg(SCR_HOST_TO_READER_START, dev); + if (rc <= 0) { +- DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc); ++ DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc); + DEBUGP(2, dev, "<- cm4040_write (failed)\n"); + if (rc == -ERESTARTSYS) + return rc; +@@ -387,7 +387,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf, + for (i = 0; i < bytes_to_write; i++) { + rc = wait_for_bulk_out_ready(dev); + if (rc <= 0) { +- DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2Zx\n", ++ DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2zx\n", + rc); + DEBUGP(2, dev, "<- cm4040_write (failed)\n"); + if (rc == -ERESTARTSYS) +@@ -403,7 +403,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf, + rc = write_sync_reg(SCR_HOST_TO_READER_DONE, dev); + + if (rc <= 0) { +- DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc); ++ DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc); + DEBUGP(2, dev, "<- cm4040_write (failed)\n"); + if (rc == -ERESTARTSYS) + return rc; +diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c +index ce0645d0c1e5..61e3a097a478 100644 +--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c ++++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c +@@ -783,20 +783,23 @@ void psb_intel_lvds_init(struct drm_device *dev, + if (scan->type & DRM_MODE_TYPE_PREFERRED) { + mode_dev->panel_fixed_mode = + drm_mode_duplicate(dev, scan); ++ DRM_DEBUG_KMS("Using mode from DDC\n"); + goto out; /* FIXME: check for quirks */ + } + } + + /* Failed to get EDID, what about VBT? do we need this? */ +- if (mode_dev->vbt_mode) ++ if (dev_priv->lfp_lvds_vbt_mode) { + mode_dev->panel_fixed_mode = +- drm_mode_duplicate(dev, mode_dev->vbt_mode); ++ drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); + +- if (!mode_dev->panel_fixed_mode) +- if (dev_priv->lfp_lvds_vbt_mode) +- mode_dev->panel_fixed_mode = +- drm_mode_duplicate(dev, +- dev_priv->lfp_lvds_vbt_mode); ++ if (mode_dev->panel_fixed_mode) { ++ mode_dev->panel_fixed_mode->type |= ++ DRM_MODE_TYPE_PREFERRED; ++ DRM_DEBUG_KMS("Using mode from VBT\n"); ++ goto out; ++ } ++ } + + /* + * If we didn't get EDID, try checking if the panel is already turned +@@ -813,6 +816,7 @@ void psb_intel_lvds_init(struct drm_device *dev, + if (mode_dev->panel_fixed_mode) { + mode_dev->panel_fixed_mode->type |= + DRM_MODE_TYPE_PREFERRED; ++ DRM_DEBUG_KMS("Using pre-programmed mode\n"); + goto out; /* FIXME: check for quirks */ + } + } +diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c +index 4a09947be244..3c32f095a873 100644 +--- a/drivers/gpu/drm/radeon/ci_dpm.c ++++ b/drivers/gpu/drm/radeon/ci_dpm.c +@@ -776,6 +776,12 @@ bool ci_dpm_vblank_too_short(struct radeon_device *rdev) + u32 vblank_time = r600_dpm_get_vblank_time(rdev); + u32 switch_limit = pi->mem_gddr5 ? 450 : 300; + ++ /* disable mclk switching if the refresh is >120Hz, even if the ++ * blanking period would allow it ++ */ ++ if (r600_dpm_get_vrefresh(rdev) > 120) ++ return true; ++ + if (vblank_time < switch_limit) + return true; + else +diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c +index f81fb2641097..134874cab4c7 100644 +--- a/drivers/gpu/drm/radeon/cik.c ++++ b/drivers/gpu/drm/radeon/cik.c +@@ -7762,7 +7762,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev) + WREG32(DC_HPD5_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) { +- tmp = RREG32(DC_HPD5_INT_CONTROL); ++ tmp = RREG32(DC_HPD6_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD6_INT_CONTROL, tmp); + } +@@ -7792,7 +7792,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev) + WREG32(DC_HPD5_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { +- tmp = RREG32(DC_HPD5_INT_CONTROL); ++ tmp = RREG32(DC_HPD6_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD6_INT_CONTROL, tmp); + } +diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c +index 32491355a1d4..ba9e6ed4ae54 100644 +--- a/drivers/gpu/drm/radeon/evergreen.c ++++ b/drivers/gpu/drm/radeon/evergreen.c +@@ -4924,7 +4924,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev) + WREG32(DC_HPD5_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { +- tmp = RREG32(DC_HPD5_INT_CONTROL); ++ tmp = RREG32(DC_HPD6_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD6_INT_CONTROL, tmp); + } +@@ -4955,7 +4955,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev) + WREG32(DC_HPD5_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { +- tmp = RREG32(DC_HPD5_INT_CONTROL); ++ tmp = RREG32(DC_HPD6_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD6_INT_CONTROL, tmp); + } +diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c +index cc2fdf0be37a..0e20c08f8977 100644 +--- a/drivers/gpu/drm/radeon/r600.c ++++ b/drivers/gpu/drm/radeon/r600.c +@@ -3945,7 +3945,7 @@ static void r600_irq_ack(struct radeon_device *rdev) + WREG32(DC_HPD5_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { +- tmp = RREG32(DC_HPD5_INT_CONTROL); ++ tmp = RREG32(DC_HPD6_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD6_INT_CONTROL, tmp); + } +diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c +index f878d6962da5..5cf3a2cbc07e 100644 +--- a/drivers/gpu/drm/radeon/si.c ++++ b/drivers/gpu/drm/radeon/si.c +@@ -6335,7 +6335,7 @@ static inline void si_irq_ack(struct radeon_device *rdev) + WREG32(DC_HPD5_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { +- tmp = RREG32(DC_HPD5_INT_CONTROL); ++ tmp = RREG32(DC_HPD6_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD6_INT_CONTROL, tmp); + } +@@ -6366,7 +6366,7 @@ static inline void si_irq_ack(struct radeon_device *rdev) + WREG32(DC_HPD5_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { +- tmp = RREG32(DC_HPD5_INT_CONTROL); ++ tmp = RREG32(DC_HPD6_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD6_INT_CONTROL, tmp); + } +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c +index 35e3fd9fadf6..b62c50d1b1e4 100644 +--- a/drivers/hid/wacom_wac.c ++++ b/drivers/hid/wacom_wac.c +@@ -1440,37 +1440,38 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len) + { + unsigned char *data = wacom->data; + +- if (wacom->pen_input) ++ if (wacom->pen_input) { + dev_dbg(wacom->pen_input->dev.parent, + "%s: received report #%d\n", __func__, data[0]); +- else if (wacom->touch_input) ++ ++ if (len == WACOM_PKGLEN_PENABLED || ++ data[0] == WACOM_REPORT_PENABLED) ++ return wacom_tpc_pen(wacom); ++ } ++ else if (wacom->touch_input) { + dev_dbg(wacom->touch_input->dev.parent, + "%s: received report #%d\n", __func__, data[0]); + +- switch (len) { +- case WACOM_PKGLEN_TPC1FG: +- return wacom_tpc_single_touch(wacom, len); ++ switch (len) { ++ case WACOM_PKGLEN_TPC1FG: ++ return wacom_tpc_single_touch(wacom, len); + +- case WACOM_PKGLEN_TPC2FG: +- return wacom_tpc_mt_touch(wacom); ++ case WACOM_PKGLEN_TPC2FG: ++ return wacom_tpc_mt_touch(wacom); + +- case WACOM_PKGLEN_PENABLED: +- return wacom_tpc_pen(wacom); ++ default: ++ switch (data[0]) { ++ case WACOM_REPORT_TPC1FG: ++ case WACOM_REPORT_TPCHID: ++ case WACOM_REPORT_TPCST: ++ case WACOM_REPORT_TPC1FGE: ++ return wacom_tpc_single_touch(wacom, len); + +- default: +- switch (data[0]) { +- case WACOM_REPORT_TPC1FG: +- case WACOM_REPORT_TPCHID: +- case WACOM_REPORT_TPCST: +- case WACOM_REPORT_TPC1FGE: +- return wacom_tpc_single_touch(wacom, len); +- +- case WACOM_REPORT_TPCMT: +- case WACOM_REPORT_TPCMT2: +- return wacom_mt_touch(wacom); ++ case WACOM_REPORT_TPCMT: ++ case WACOM_REPORT_TPCMT2: ++ return wacom_mt_touch(wacom); + +- case WACOM_REPORT_PENABLED: +- return wacom_tpc_pen(wacom); ++ } + } + } + +diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c +index 0ed77eeff31e..a2e3dd715380 100644 +--- a/drivers/i2c/busses/i2c-tiny-usb.c ++++ b/drivers/i2c/busses/i2c-tiny-usb.c +@@ -178,22 +178,39 @@ static int usb_read(struct i2c_adapter *adapter, int cmd, + int value, int index, void *data, int len) + { + struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data; ++ void *dmadata = kmalloc(len, GFP_KERNEL); ++ int ret; ++ ++ if (!dmadata) ++ return -ENOMEM; + + /* do control transfer */ +- return usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0), ++ ret = usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0), + cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE | +- USB_DIR_IN, value, index, data, len, 2000); ++ USB_DIR_IN, value, index, dmadata, len, 2000); ++ ++ memcpy(data, dmadata, len); ++ kfree(dmadata); ++ return ret; + } + + static int usb_write(struct i2c_adapter *adapter, int cmd, + int value, int index, void *data, int len) + { + struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data; ++ void *dmadata = kmemdup(data, len, GFP_KERNEL); ++ int ret; ++ ++ if (!dmadata) ++ return -ENOMEM; + + /* do control transfer */ +- return usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0), ++ ret = usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0), + cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE, +- value, index, data, len, 2000); ++ value, index, dmadata, len, 2000); ++ ++ kfree(dmadata); ++ return ret; + } + + static void i2c_tiny_usb_free(struct i2c_tiny_usb *dev) +diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c +index 3b423b0ad8e7..f280744578e4 100644 +--- a/drivers/mmc/host/sdhci-iproc.c ++++ b/drivers/mmc/host/sdhci-iproc.c +@@ -156,7 +156,8 @@ static const struct sdhci_ops sdhci_iproc_ops = { + }; + + static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = { +- .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK, ++ .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | ++ SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, + .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN, + .ops = &sdhci_iproc_ops, + }; +diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c +index 8a1d9fffd7d6..26255862d1cf 100644 +--- a/drivers/net/ethernet/emulex/benet/be_main.c ++++ b/drivers/net/ethernet/emulex/benet/be_main.c +@@ -5260,9 +5260,11 @@ static netdev_features_t be_features_check(struct sk_buff *skb, + struct be_adapter *adapter = netdev_priv(dev); + u8 l4_hdr = 0; + +- /* The code below restricts offload features for some tunneled packets. ++ /* The code below restricts offload features for some tunneled and ++ * Q-in-Q packets. + * Offload features for normal (non tunnel) packets are unchanged. + */ ++ features = vlan_features_check(skb, features); + if (!skb->encapsulation || + !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)) + return features; +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index c6f5d9a6bec6..582d8f0c6266 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -730,6 +730,8 @@ static const struct usb_device_id products[] = { + {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */ + {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ + {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ ++ {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ ++ {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */ + {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ + {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ + {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c +index 0e2a19e58923..7f7c87762bc6 100644 +--- a/drivers/net/virtio_net.c ++++ b/drivers/net/virtio_net.c +@@ -1415,6 +1415,7 @@ static const struct net_device_ops virtnet_netdev = { + #ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = virtnet_busy_poll, + #endif ++ .ndo_features_check = passthru_features_check, + }; + + static void virtnet_config_changed_work(struct work_struct *work) +diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h +index 1766a20ebcb1..741f3ee81cfe 100644 +--- a/drivers/s390/net/qeth_core.h ++++ b/drivers/s390/net/qeth_core.h +@@ -717,6 +717,7 @@ enum qeth_discipline_id { + }; + + struct qeth_discipline { ++ const struct device_type *devtype; + void (*start_poll)(struct ccw_device *, int, unsigned long); + qdio_handler_t *input_handler; + qdio_handler_t *output_handler; +@@ -881,6 +882,9 @@ extern struct qeth_discipline qeth_l2_discipline; + extern struct qeth_discipline qeth_l3_discipline; + extern const struct attribute_group *qeth_generic_attr_groups[]; + extern const struct attribute_group *qeth_osn_attr_groups[]; ++extern const struct attribute_group qeth_device_attr_group; ++extern const struct attribute_group qeth_device_blkt_group; ++extern const struct device_type qeth_generic_devtype; + extern struct workqueue_struct *qeth_wq; + + int qeth_card_hw_is_reachable(struct qeth_card *); +diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c +index 31ac53fa5cee..d10bf3da8e5f 100644 +--- a/drivers/s390/net/qeth_core_main.c ++++ b/drivers/s390/net/qeth_core_main.c +@@ -5449,10 +5449,12 @@ void qeth_core_free_discipline(struct qeth_card *card) + card->discipline = NULL; + } + +-static const struct device_type qeth_generic_devtype = { ++const struct device_type qeth_generic_devtype = { + .name = "qeth_generic", + .groups = qeth_generic_attr_groups, + }; ++EXPORT_SYMBOL_GPL(qeth_generic_devtype); ++ + static const struct device_type qeth_osn_devtype = { + .name = "qeth_osn", + .groups = qeth_osn_attr_groups, +@@ -5578,23 +5580,22 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) + goto err_card; + } + +- if (card->info.type == QETH_CARD_TYPE_OSN) +- gdev->dev.type = &qeth_osn_devtype; +- else +- gdev->dev.type = &qeth_generic_devtype; +- + switch (card->info.type) { + case QETH_CARD_TYPE_OSN: + case QETH_CARD_TYPE_OSM: + rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2); + if (rc) + goto err_card; ++ ++ gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN) ++ ? card->discipline->devtype ++ : &qeth_osn_devtype; + rc = card->discipline->setup(card->gdev); + if (rc) + goto err_disc; +- case QETH_CARD_TYPE_OSD: +- case QETH_CARD_TYPE_OSX: ++ break; + default: ++ gdev->dev.type = &qeth_generic_devtype; + break; + } + +@@ -5650,8 +5651,10 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev) + if (rc) + goto err; + rc = card->discipline->setup(card->gdev); +- if (rc) ++ if (rc) { ++ qeth_core_free_discipline(card); + goto err; ++ } + } + rc = card->discipline->set_online(gdev); + err: +diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c +index e6e5b9671bf2..fa844b0ff847 100644 +--- a/drivers/s390/net/qeth_core_sys.c ++++ b/drivers/s390/net/qeth_core_sys.c +@@ -409,12 +409,16 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, + + if (card->options.layer2 == newdis) + goto out; +- else { +- card->info.mac_bits = 0; +- if (card->discipline) { +- card->discipline->remove(card->gdev); +- qeth_core_free_discipline(card); +- } ++ if (card->info.type == QETH_CARD_TYPE_OSM) { ++ /* fixed layer, can't switch */ ++ rc = -EOPNOTSUPP; ++ goto out; ++ } ++ ++ card->info.mac_bits = 0; ++ if (card->discipline) { ++ card->discipline->remove(card->gdev); ++ qeth_core_free_discipline(card); + } + + rc = qeth_core_load_discipline(card, newdis); +@@ -422,6 +426,8 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, + goto out; + + rc = card->discipline->setup(card->gdev); ++ if (rc) ++ qeth_core_free_discipline(card); + out: + mutex_unlock(&card->discipline_mutex); + return rc ? rc : count; +@@ -699,10 +705,11 @@ static struct attribute *qeth_blkt_device_attrs[] = { + &dev_attr_inter_jumbo.attr, + NULL, + }; +-static struct attribute_group qeth_device_blkt_group = { ++const struct attribute_group qeth_device_blkt_group = { + .name = "blkt", + .attrs = qeth_blkt_device_attrs, + }; ++EXPORT_SYMBOL_GPL(qeth_device_blkt_group); + + static struct attribute *qeth_device_attrs[] = { + &dev_attr_state.attr, +@@ -722,9 +729,10 @@ static struct attribute *qeth_device_attrs[] = { + &dev_attr_switch_attrs.attr, + NULL, + }; +-static struct attribute_group qeth_device_attr_group = { ++const struct attribute_group qeth_device_attr_group = { + .attrs = qeth_device_attrs, + }; ++EXPORT_SYMBOL_GPL(qeth_device_attr_group); + + const struct attribute_group *qeth_generic_attr_groups[] = { + &qeth_device_attr_group, +diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h +index 0767556404bd..eb87bf97d38a 100644 +--- a/drivers/s390/net/qeth_l2.h ++++ b/drivers/s390/net/qeth_l2.h +@@ -8,6 +8,8 @@ + + #include "qeth_core.h" + ++extern const struct attribute_group *qeth_l2_attr_groups[]; ++ + int qeth_l2_create_device_attributes(struct device *); + void qeth_l2_remove_device_attributes(struct device *); + void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card); +diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c +index df036b872b05..bf1e0e39334d 100644 +--- a/drivers/s390/net/qeth_l2_main.c ++++ b/drivers/s390/net/qeth_l2_main.c +@@ -1027,11 +1027,21 @@ static int qeth_l2_stop(struct net_device *dev) + return 0; + } + ++static const struct device_type qeth_l2_devtype = { ++ .name = "qeth_layer2", ++ .groups = qeth_l2_attr_groups, ++}; ++ + static int qeth_l2_probe_device(struct ccwgroup_device *gdev) + { + struct qeth_card *card = dev_get_drvdata(&gdev->dev); ++ int rc; + +- qeth_l2_create_device_attributes(&gdev->dev); ++ if (gdev->dev.type == &qeth_generic_devtype) { ++ rc = qeth_l2_create_device_attributes(&gdev->dev); ++ if (rc) ++ return rc; ++ } + INIT_LIST_HEAD(&card->vid_list); + hash_init(card->mac_htable); + card->options.layer2 = 1; +@@ -1043,7 +1053,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) + { + struct qeth_card *card = dev_get_drvdata(&cgdev->dev); + +- qeth_l2_remove_device_attributes(&cgdev->dev); ++ if (cgdev->dev.type == &qeth_generic_devtype) ++ qeth_l2_remove_device_attributes(&cgdev->dev); + qeth_set_allowed_threads(card, 0, 1); + wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); + +@@ -1101,7 +1112,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) + case QETH_CARD_TYPE_OSN: + card->dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, + ether_setup); +- card->dev->flags |= IFF_NOARP; + break; + default: + card->dev = alloc_etherdev(0); +@@ -1114,9 +1124,12 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) + card->dev->watchdog_timeo = QETH_TX_TIMEOUT; + card->dev->mtu = card->info.initial_mtu; + card->dev->netdev_ops = &qeth_l2_netdev_ops; +- card->dev->ethtool_ops = +- (card->info.type != QETH_CARD_TYPE_OSN) ? +- &qeth_l2_ethtool_ops : &qeth_l2_osn_ops; ++ if (card->info.type == QETH_CARD_TYPE_OSN) { ++ card->dev->ethtool_ops = &qeth_l2_osn_ops; ++ card->dev->flags |= IFF_NOARP; ++ } else { ++ card->dev->ethtool_ops = &qeth_l2_ethtool_ops; ++ } + card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) { + card->dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM; +@@ -1429,6 +1442,7 @@ static int qeth_l2_control_event(struct qeth_card *card, + } + + struct qeth_discipline qeth_l2_discipline = { ++ .devtype = &qeth_l2_devtype, + .start_poll = qeth_qdio_start_poll, + .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, + .output_handler = (qdio_handler_t *) qeth_qdio_output_handler, +diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c +index 692db49e3d2a..a48ed9e7e168 100644 +--- a/drivers/s390/net/qeth_l2_sys.c ++++ b/drivers/s390/net/qeth_l2_sys.c +@@ -272,3 +272,11 @@ void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card) + } else + qeth_bridgeport_an_set(card, 0); + } ++ ++const struct attribute_group *qeth_l2_attr_groups[] = { ++ &qeth_device_attr_group, ++ &qeth_device_blkt_group, ++ /* l2 specific, see l2_{create,remove}_device_attributes(): */ ++ &qeth_l2_bridgeport_attr_group, ++ NULL, ++}; +diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c +index cc4d3c3d8cc5..285fe0b2c753 100644 +--- a/drivers/s390/net/qeth_l3_main.c ++++ b/drivers/s390/net/qeth_l3_main.c +@@ -3227,8 +3227,11 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) + static int qeth_l3_probe_device(struct ccwgroup_device *gdev) + { + struct qeth_card *card = dev_get_drvdata(&gdev->dev); ++ int rc; + +- qeth_l3_create_device_attributes(&gdev->dev); ++ rc = qeth_l3_create_device_attributes(&gdev->dev); ++ if (rc) ++ return rc; + card->options.layer2 = 0; + card->info.hwtrap = 0; + return 0; +@@ -3519,6 +3522,7 @@ static int qeth_l3_control_event(struct qeth_card *card, + } + + struct qeth_discipline qeth_l3_discipline = { ++ .devtype = &qeth_generic_devtype, + .start_poll = qeth_qdio_start_poll, + .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, + .output_handler = (qdio_handler_t *) qeth_qdio_output_handler, +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +index 8a5fbdb45cfd..e333029e4b6c 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +@@ -4452,6 +4452,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) + struct MPT3SAS_DEVICE *sas_device_priv_data; + u32 response_code = 0; + unsigned long flags; ++ unsigned int sector_sz; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + scmd = _scsih_scsi_lookup_get_clear(ioc, smid); +@@ -4510,6 +4511,20 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) + } + + xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); ++ ++ /* In case of bogus fw or device, we could end up having ++ * unaligned partial completion. We can force alignment here, ++ * then scsi-ml does not need to handle this misbehavior. ++ */ ++ sector_sz = scmd->device->sector_size; ++ if (unlikely(scmd->request->cmd_type == REQ_TYPE_FS && sector_sz && ++ xfer_cnt % sector_sz)) { ++ sdev_printk(KERN_INFO, scmd->device, ++ "unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n", ++ xfer_cnt, sector_sz); ++ xfer_cnt = round_down(xfer_cnt, sector_sz); ++ } ++ + scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt); + if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) + log_info = le32_to_cpu(mpi_reply->IOCLogInfo); +diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c +index 119c2422aac7..75884aecf920 100644 +--- a/fs/xfs/libxfs/xfs_bmap.c ++++ b/fs/xfs/libxfs/xfs_bmap.c +@@ -2179,8 +2179,10 @@ xfs_bmap_add_extent_delay_real( + } + temp = xfs_bmap_worst_indlen(bma->ip, temp); + temp2 = xfs_bmap_worst_indlen(bma->ip, temp2); +- diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) - +- (bma->cur ? bma->cur->bc_private.b.allocated : 0)); ++ diff = (int)(temp + temp2 - ++ (startblockval(PREV.br_startblock) - ++ (bma->cur ? ++ bma->cur->bc_private.b.allocated : 0))); + if (diff > 0) { + error = xfs_mod_fdblocks(bma->ip->i_mount, + -((int64_t)diff), false); +@@ -2232,7 +2234,6 @@ xfs_bmap_add_extent_delay_real( + temp = da_new; + if (bma->cur) + temp += bma->cur->bc_private.b.allocated; +- ASSERT(temp <= da_old); + if (temp < da_old) + xfs_mod_fdblocks(bma->ip->i_mount, + (int64_t)(da_old - temp), false); +diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c +index af1bbee5586e..28bc5e78b110 100644 +--- a/fs/xfs/libxfs/xfs_btree.c ++++ b/fs/xfs/libxfs/xfs_btree.c +@@ -4064,7 +4064,7 @@ xfs_btree_change_owner( + xfs_btree_readahead_ptr(cur, ptr, 1); + + /* save for the next iteration of the loop */ +- lptr = *ptr; ++ xfs_btree_copy_ptrs(cur, &lptr, ptr, 1); + } + + /* for each buffer in the level */ +diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h +index dd4824589470..234331227c0c 100644 +--- a/fs/xfs/xfs_attr.h ++++ b/fs/xfs/xfs_attr.h +@@ -112,6 +112,7 @@ typedef struct attrlist_cursor_kern { + *========================================================================*/ + + ++/* Return 0 on success, or -errno; other state communicated via *context */ + typedef int (*put_listent_func_t)(struct xfs_attr_list_context *, int, + unsigned char *, int, int, unsigned char *); + +diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c +index 4fa14820e2e2..c8be331a3196 100644 +--- a/fs/xfs/xfs_attr_list.c ++++ b/fs/xfs/xfs_attr_list.c +@@ -108,16 +108,14 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) + (int)sfe->namelen, + (int)sfe->valuelen, + &sfe->nameval[sfe->namelen]); +- ++ if (error) ++ return error; + /* + * Either search callback finished early or + * didn't fit it all in the buffer after all. + */ + if (context->seen_enough) + break; +- +- if (error) +- return error; + sfe = XFS_ATTR_SF_NEXTENTRY(sfe); + } + trace_xfs_attr_list_sf_all(context); +@@ -581,7 +579,7 @@ xfs_attr_put_listent( + trace_xfs_attr_list_full(context); + alist->al_more = 1; + context->seen_enough = 1; +- return 1; ++ return 0; + } + + aep = (attrlist_ent_t *)&context->alist[context->firstu]; +diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c +index 832764ee035a..863e1bff403b 100644 +--- a/fs/xfs/xfs_bmap_util.c ++++ b/fs/xfs/xfs_bmap_util.c +@@ -682,7 +682,7 @@ xfs_getbmap( + * extents. + */ + if (map[i].br_startblock == DELAYSTARTBLOCK && +- map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip))) ++ map[i].br_startoff < XFS_B_TO_FSB(mp, XFS_ISIZE(ip))) + ASSERT((iflags & BMV_IF_DELALLOC) != 0); + + if (map[i].br_startblock == HOLESTARTBLOCK && +diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c +index 8146b0cf20ce..dcb70969ff1c 100644 +--- a/fs/xfs/xfs_buf.c ++++ b/fs/xfs/xfs_buf.c +@@ -979,6 +979,8 @@ void + xfs_buf_unlock( + struct xfs_buf *bp) + { ++ ASSERT(xfs_buf_islocked(bp)); ++ + XB_CLEAR_OWNER(bp); + up(&bp->b_sema); + +@@ -1713,6 +1715,28 @@ error: + } + + /* ++ * Cancel a delayed write list. ++ * ++ * Remove each buffer from the list, clear the delwri queue flag and drop the ++ * associated buffer reference. ++ */ ++void ++xfs_buf_delwri_cancel( ++ struct list_head *list) ++{ ++ struct xfs_buf *bp; ++ ++ while (!list_empty(list)) { ++ bp = list_first_entry(list, struct xfs_buf, b_list); ++ ++ xfs_buf_lock(bp); ++ bp->b_flags &= ~_XBF_DELWRI_Q; ++ list_del_init(&bp->b_list); ++ xfs_buf_relse(bp); ++ } ++} ++ ++/* + * Add a buffer to the delayed write list. + * + * This queues a buffer for writeout if it hasn't already been. Note that +diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h +index c75721acd867..149bbd451731 100644 +--- a/fs/xfs/xfs_buf.h ++++ b/fs/xfs/xfs_buf.h +@@ -304,6 +304,7 @@ extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *, + extern void *xfs_buf_offset(struct xfs_buf *, size_t); + + /* Delayed Write Buffer Routines */ ++extern void xfs_buf_delwri_cancel(struct list_head *); + extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *); + extern int xfs_buf_delwri_submit(struct list_head *); + extern int xfs_buf_delwri_submit_nowait(struct list_head *); +diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c +index 642d55d10075..2fbf643fa10a 100644 +--- a/fs/xfs/xfs_dir2_readdir.c ++++ b/fs/xfs/xfs_dir2_readdir.c +@@ -406,6 +406,7 @@ xfs_dir2_leaf_readbuf( + + /* + * Do we need more readahead? ++ * Each loop tries to process 1 full dir blk; last may be partial. + */ + blk_start_plug(&plug); + for (mip->ra_index = mip->ra_offset = i = 0; +@@ -416,7 +417,8 @@ xfs_dir2_leaf_readbuf( + * Read-ahead a contiguous directory block. + */ + if (i > mip->ra_current && +- map[mip->ra_index].br_blockcount >= geo->fsbcount) { ++ (map[mip->ra_index].br_blockcount - mip->ra_offset) >= ++ geo->fsbcount) { + xfs_dir3_data_readahead(dp, + map[mip->ra_index].br_startoff + mip->ra_offset, + XFS_FSB_TO_DADDR(dp->i_mount, +@@ -437,14 +439,19 @@ xfs_dir2_leaf_readbuf( + } + + /* +- * Advance offset through the mapping table. ++ * Advance offset through the mapping table, processing a full ++ * dir block even if it is fragmented into several extents. ++ * But stop if we have consumed all valid mappings, even if ++ * it's not yet a full directory block. + */ +- for (j = 0; j < geo->fsbcount; j += length ) { ++ for (j = 0; ++ j < geo->fsbcount && mip->ra_index < mip->map_valid; ++ j += length ) { + /* + * The rest of this extent but not more than a dir + * block. + */ +- length = min_t(int, geo->fsbcount, ++ length = min_t(int, geo->fsbcount - j, + map[mip->ra_index].br_blockcount - + mip->ra_offset); + mip->ra_offset += length; +diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c +index f5392ab2def1..ceea444dafb4 100644 +--- a/fs/xfs/xfs_file.c ++++ b/fs/xfs/xfs_file.c +@@ -1208,7 +1208,7 @@ xfs_find_get_desired_pgoff( + unsigned nr_pages; + unsigned int i; + +- want = min_t(pgoff_t, end - index, PAGEVEC_SIZE); ++ want = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1; + nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, + want); + /* +@@ -1235,17 +1235,6 @@ xfs_find_get_desired_pgoff( + break; + } + +- /* +- * At lease we found one page. If this is the first time we +- * step into the loop, and if the first page index offset is +- * greater than the given search offset, a hole was found. +- */ +- if (type == HOLE_OFF && lastoff == startoff && +- lastoff < page_offset(pvec.pages[0])) { +- found = true; +- break; +- } +- + for (i = 0; i < nr_pages; i++) { + struct page *page = pvec.pages[i]; + loff_t b_offset; +@@ -1257,18 +1246,18 @@ xfs_find_get_desired_pgoff( + * file mapping. However, page->index will not change + * because we have a reference on the page. + * +- * Searching done if the page index is out of range. +- * If the current offset is not reaches the end of +- * the specified search range, there should be a hole +- * between them. ++ * If current page offset is beyond where we've ended, ++ * we've found a hole. + */ +- if (page->index > end) { +- if (type == HOLE_OFF && lastoff < endoff) { +- *offset = lastoff; +- found = true; +- } ++ if (type == HOLE_OFF && lastoff < endoff && ++ lastoff < page_offset(pvec.pages[i])) { ++ found = true; ++ *offset = lastoff; + goto out; + } ++ /* Searching done if the page index is out of range. */ ++ if (page->index > end) ++ goto out; + + lock_page(page); + /* +diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c +index d7a490f24ead..adbc1f59969a 100644 +--- a/fs/xfs/xfs_icache.c ++++ b/fs/xfs/xfs_icache.c +@@ -210,14 +210,17 @@ xfs_iget_cache_hit( + + error = inode_init_always(mp->m_super, inode); + if (error) { ++ bool wake; + /* + * Re-initializing the inode failed, and we are in deep + * trouble. Try to re-add it to the reclaim list. + */ + rcu_read_lock(); + spin_lock(&ip->i_flags_lock); +- ++ wake = !!__xfs_iflags_test(ip, XFS_INEW); + ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM); ++ if (wake) ++ wake_up_bit(&ip->i_flags, __XFS_INEW_BIT); + ASSERT(ip->i_flags & XFS_IRECLAIMABLE); + trace_xfs_iget_reclaim_fail(ip); + goto out_error; +@@ -363,6 +366,22 @@ out_destroy: + return error; + } + ++static void ++xfs_inew_wait( ++ struct xfs_inode *ip) ++{ ++ wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT); ++ DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT); ++ ++ do { ++ prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); ++ if (!xfs_iflags_test(ip, XFS_INEW)) ++ break; ++ schedule(); ++ } while (true); ++ finish_wait(wq, &wait.wait); ++} ++ + /* + * Look up an inode by number in the given file system. + * The inode is looked up in the cache held in each AG. +@@ -467,9 +486,11 @@ out_error_or_again: + + STATIC int + xfs_inode_ag_walk_grab( +- struct xfs_inode *ip) ++ struct xfs_inode *ip, ++ int flags) + { + struct inode *inode = VFS_I(ip); ++ bool newinos = !!(flags & XFS_AGITER_INEW_WAIT); + + ASSERT(rcu_read_lock_held()); + +@@ -487,7 +508,8 @@ xfs_inode_ag_walk_grab( + goto out_unlock_noent; + + /* avoid new or reclaimable inodes. Leave for reclaim code to flush */ +- if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM)) ++ if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) || ++ __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM)) + goto out_unlock_noent; + spin_unlock(&ip->i_flags_lock); + +@@ -515,7 +537,8 @@ xfs_inode_ag_walk( + void *args), + int flags, + void *args, +- int tag) ++ int tag, ++ int iter_flags) + { + uint32_t first_index; + int last_error = 0; +@@ -557,7 +580,7 @@ restart: + for (i = 0; i < nr_found; i++) { + struct xfs_inode *ip = batch[i]; + +- if (done || xfs_inode_ag_walk_grab(ip)) ++ if (done || xfs_inode_ag_walk_grab(ip, iter_flags)) + batch[i] = NULL; + + /* +@@ -585,6 +608,9 @@ restart: + for (i = 0; i < nr_found; i++) { + if (!batch[i]) + continue; ++ if ((iter_flags & XFS_AGITER_INEW_WAIT) && ++ xfs_iflags_test(batch[i], XFS_INEW)) ++ xfs_inew_wait(batch[i]); + error = execute(batch[i], flags, args); + IRELE(batch[i]); + if (error == -EAGAIN) { +@@ -637,12 +663,13 @@ xfs_eofblocks_worker( + } + + int +-xfs_inode_ag_iterator( ++xfs_inode_ag_iterator_flags( + struct xfs_mount *mp, + int (*execute)(struct xfs_inode *ip, int flags, + void *args), + int flags, +- void *args) ++ void *args, ++ int iter_flags) + { + struct xfs_perag *pag; + int error = 0; +@@ -652,7 +679,8 @@ xfs_inode_ag_iterator( + ag = 0; + while ((pag = xfs_perag_get(mp, ag))) { + ag = pag->pag_agno + 1; +- error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1); ++ error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1, ++ iter_flags); + xfs_perag_put(pag); + if (error) { + last_error = error; +@@ -664,6 +692,17 @@ xfs_inode_ag_iterator( + } + + int ++xfs_inode_ag_iterator( ++ struct xfs_mount *mp, ++ int (*execute)(struct xfs_inode *ip, int flags, ++ void *args), ++ int flags, ++ void *args) ++{ ++ return xfs_inode_ag_iterator_flags(mp, execute, flags, args, 0); ++} ++ ++int + xfs_inode_ag_iterator_tag( + struct xfs_mount *mp, + int (*execute)(struct xfs_inode *ip, int flags, +@@ -680,7 +719,8 @@ xfs_inode_ag_iterator_tag( + ag = 0; + while ((pag = xfs_perag_get_tag(mp, ag, tag))) { + ag = pag->pag_agno + 1; +- error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag); ++ error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag, ++ 0); + xfs_perag_put(pag); + if (error) { + last_error = error; +diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h +index 62f1f91c32cb..147a79212e63 100644 +--- a/fs/xfs/xfs_icache.h ++++ b/fs/xfs/xfs_icache.h +@@ -48,6 +48,11 @@ struct xfs_eofblocks { + #define XFS_IGET_UNTRUSTED 0x2 + #define XFS_IGET_DONTCACHE 0x4 + ++/* ++ * flags for AG inode iterator ++ */ ++#define XFS_AGITER_INEW_WAIT 0x1 /* wait on new inodes */ ++ + int xfs_iget(struct xfs_mount *mp, struct xfs_trans *tp, xfs_ino_t ino, + uint flags, uint lock_flags, xfs_inode_t **ipp); + +@@ -72,6 +77,9 @@ void xfs_eofblocks_worker(struct work_struct *); + int xfs_inode_ag_iterator(struct xfs_mount *mp, + int (*execute)(struct xfs_inode *ip, int flags, void *args), + int flags, void *args); ++int xfs_inode_ag_iterator_flags(struct xfs_mount *mp, ++ int (*execute)(struct xfs_inode *ip, int flags, void *args), ++ int flags, void *args, int iter_flags); + int xfs_inode_ag_iterator_tag(struct xfs_mount *mp, + int (*execute)(struct xfs_inode *ip, int flags, void *args), + int flags, void *args, int tag); +diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h +index ca9e11989cbd..ae1a49845744 100644 +--- a/fs/xfs/xfs_inode.h ++++ b/fs/xfs/xfs_inode.h +@@ -208,7 +208,8 @@ xfs_get_initial_prid(struct xfs_inode *dp) + #define XFS_IRECLAIM (1 << 0) /* started reclaiming this inode */ + #define XFS_ISTALE (1 << 1) /* inode has been staled */ + #define XFS_IRECLAIMABLE (1 << 2) /* inode can be reclaimed */ +-#define XFS_INEW (1 << 3) /* inode has just been allocated */ ++#define __XFS_INEW_BIT 3 /* inode has just been allocated */ ++#define XFS_INEW (1 << __XFS_INEW_BIT) + #define XFS_ITRUNCATED (1 << 5) /* truncated down so flush-on-close */ + #define XFS_IDIRTY_RELEASE (1 << 6) /* dirty release already seen */ + #define __XFS_IFLOCK_BIT 7 /* inode is being flushed right now */ +@@ -453,6 +454,7 @@ static inline void xfs_finish_inode_setup(struct xfs_inode *ip) + xfs_iflags_clear(ip, XFS_INEW); + barrier(); + unlock_new_inode(VFS_I(ip)); ++ wake_up_bit(&ip->i_flags, __XFS_INEW_BIT); + } + + static inline void xfs_setup_existing_inode(struct xfs_inode *ip) +diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c +index d42738deec6d..e4a4f82ea13f 100644 +--- a/fs/xfs/xfs_ioctl.c ++++ b/fs/xfs/xfs_ioctl.c +@@ -403,6 +403,7 @@ xfs_attrlist_by_handle( + { + int error = -ENOMEM; + attrlist_cursor_kern_t *cursor; ++ struct xfs_fsop_attrlist_handlereq __user *p = arg; + xfs_fsop_attrlist_handlereq_t al_hreq; + struct dentry *dentry; + char *kbuf; +@@ -435,6 +436,11 @@ xfs_attrlist_by_handle( + if (error) + goto out_kfree; + ++ if (copy_to_user(&p->pos, cursor, sizeof(attrlist_cursor_kern_t))) { ++ error = -EFAULT; ++ goto out_kfree; ++ } ++ + if (copy_to_user(al_hreq.buffer, kbuf, al_hreq.buflen)) + error = -EFAULT; + +@@ -1379,10 +1385,11 @@ xfs_ioc_getbmap( + unsigned int cmd, + void __user *arg) + { +- struct getbmapx bmx; ++ struct getbmapx bmx = { 0 }; + int error; + +- if (copy_from_user(&bmx, arg, sizeof(struct getbmapx))) ++ /* struct getbmap is a strict subset of struct getbmapx. */ ++ if (copy_from_user(&bmx, arg, offsetof(struct getbmapx, bmv_iflags))) + return -EFAULT; + + if (bmx.bmv_count < 2) +diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c +index 532ab79d38fe..572b64a135b3 100644 +--- a/fs/xfs/xfs_qm.c ++++ b/fs/xfs/xfs_qm.c +@@ -1355,12 +1355,7 @@ xfs_qm_quotacheck( + mp->m_qflags |= flags; + + error_return: +- while (!list_empty(&buffer_list)) { +- struct xfs_buf *bp = +- list_first_entry(&buffer_list, struct xfs_buf, b_list); +- list_del_init(&bp->b_list); +- xfs_buf_relse(bp); +- } ++ xfs_buf_delwri_cancel(&buffer_list); + + if (error) { + xfs_warn(mp, +diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c +index 3640c6e896af..4d334440bd94 100644 +--- a/fs/xfs/xfs_qm_syscalls.c ++++ b/fs/xfs/xfs_qm_syscalls.c +@@ -764,5 +764,6 @@ xfs_qm_dqrele_all_inodes( + uint flags) + { + ASSERT(mp->m_quotainfo); +- xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, NULL); ++ xfs_inode_ag_iterator_flags(mp, xfs_dqrele_inode, flags, NULL, ++ XFS_AGITER_INEW_WAIT); + } +diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c +index 839b35ca21c6..e6dae28dfa1a 100644 +--- a/fs/xfs/xfs_xattr.c ++++ b/fs/xfs/xfs_xattr.c +@@ -180,7 +180,7 @@ xfs_xattr_put_listent( + arraytop = context->count + prefix_len + namelen + 1; + if (arraytop > context->firstu) { + context->count = -1; /* insufficient space */ +- return 1; ++ return 0; + } + offset = (char *)context->alist + context->count; + strncpy(offset, xfs_xattr_prefix(flags), prefix_len); +@@ -222,12 +222,15 @@ list_one_attr(const char *name, const size_t len, void *data, + } + + ssize_t +-xfs_vn_listxattr(struct dentry *dentry, char *data, size_t size) ++xfs_vn_listxattr( ++ struct dentry *dentry, ++ char *data, ++ size_t size) + { + struct xfs_attr_list_context context; + struct attrlist_cursor_kern cursor = { 0 }; +- struct inode *inode = d_inode(dentry); +- int error; ++ struct inode *inode = d_inode(dentry); ++ int error; + + /* + * First read the regular on-disk attributes. +@@ -245,7 +248,9 @@ xfs_vn_listxattr(struct dentry *dentry, char *data, size_t size) + else + context.put_listent = xfs_xattr_put_listent_sizes; + +- xfs_attr_list_int(&context); ++ error = xfs_attr_list_int(&context); ++ if (error) ++ return error; + if (context.count < 0) + return -ERANGE; + +diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h +index 67ce5bd3b56a..19db03dbbd00 100644 +--- a/include/linux/if_vlan.h ++++ b/include/linux/if_vlan.h +@@ -616,15 +616,16 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) + static inline netdev_features_t vlan_features_check(const struct sk_buff *skb, + netdev_features_t features) + { +- if (skb_vlan_tagged_multi(skb)) +- features = netdev_intersect_features(features, +- NETIF_F_SG | +- NETIF_F_HIGHDMA | +- NETIF_F_FRAGLIST | +- NETIF_F_GEN_CSUM | +- NETIF_F_HW_VLAN_CTAG_TX | +- NETIF_F_HW_VLAN_STAG_TX); +- ++ if (skb_vlan_tagged_multi(skb)) { ++ /* In the case of multi-tagged packets, use a direct mask ++ * instead of using netdev_interesect_features(), to make ++ * sure that only devices supporting NETIF_F_HW_CSUM will ++ * have checksum offloading support. ++ */ ++ features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | ++ NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX | ++ NETIF_F_HW_VLAN_STAG_TX; ++ } + return features; + } + +diff --git a/include/net/dst.h b/include/net/dst.h +index c7329dcd90cc..e4f450617919 100644 +--- a/include/net/dst.h ++++ b/include/net/dst.h +@@ -110,10 +110,16 @@ struct dst_entry { + }; + }; + ++struct dst_metrics { ++ u32 metrics[RTAX_MAX]; ++ atomic_t refcnt; ++}; ++extern const struct dst_metrics dst_default_metrics; ++ + u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old); +-extern const u32 dst_default_metrics[]; + + #define DST_METRICS_READ_ONLY 0x1UL ++#define DST_METRICS_REFCOUNTED 0x2UL + #define DST_METRICS_FLAGS 0x3UL + #define __DST_METRICS_PTR(Y) \ + ((u32 *)((Y) & ~DST_METRICS_FLAGS)) +diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h +index 3f98233388fb..bda1721e9622 100644 +--- a/include/net/ip_fib.h ++++ b/include/net/ip_fib.h +@@ -112,11 +112,11 @@ struct fib_info { + unsigned char fib_type; + __be32 fib_prefsrc; + u32 fib_priority; +- u32 *fib_metrics; +-#define fib_mtu fib_metrics[RTAX_MTU-1] +-#define fib_window fib_metrics[RTAX_WINDOW-1] +-#define fib_rtt fib_metrics[RTAX_RTT-1] +-#define fib_advmss fib_metrics[RTAX_ADVMSS-1] ++ struct dst_metrics *fib_metrics; ++#define fib_mtu fib_metrics->metrics[RTAX_MTU-1] ++#define fib_window fib_metrics->metrics[RTAX_WINDOW-1] ++#define fib_rtt fib_metrics->metrics[RTAX_RTT-1] ++#define fib_advmss fib_metrics->metrics[RTAX_ADVMSS-1] + int fib_nhs; + #ifdef CONFIG_IP_ROUTE_MULTIPATH + int fib_weight; +diff --git a/mm/memory-failure.c b/mm/memory-failure.c +index 750b7893ee3a..43aee7ab143e 100644 +--- a/mm/memory-failure.c ++++ b/mm/memory-failure.c +@@ -1619,12 +1619,8 @@ static int soft_offline_huge_page(struct page *page, int flags) + if (ret) { + pr_info("soft offline: %#lx: migration failed %d, type %lx\n", + pfn, ret, page->flags); +- /* +- * We know that soft_offline_huge_page() tries to migrate +- * only one hugepage pointed to by hpage, so we need not +- * run through the pagelist here. +- */ +- putback_active_hugepage(hpage); ++ if (!list_empty(&pagelist)) ++ putback_movable_pages(&pagelist); + if (ret > 0) + ret = -EIO; + } else { +diff --git a/mm/mlock.c b/mm/mlock.c +index d6006b146fea..9d2e773f3a95 100644 +--- a/mm/mlock.c ++++ b/mm/mlock.c +@@ -277,7 +277,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) + { + int i; + int nr = pagevec_count(pvec); +- int delta_munlocked; ++ int delta_munlocked = -nr; + struct pagevec pvec_putback; + int pgrescued = 0; + +@@ -297,6 +297,8 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) + continue; + else + __munlock_isolation_failed(page); ++ } else { ++ delta_munlocked++; + } + + /* +@@ -308,7 +310,6 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) + pagevec_add(&pvec_putback, pvec->pages[i]); + pvec->pages[i] = NULL; + } +- delta_munlocked = -nr + pagevec_count(&pvec_putback); + __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); + spin_unlock_irq(&zone->lru_lock); + +diff --git a/mm/slub.c b/mm/slub.c +index 65d5f92d51d2..4cf3a9c768b1 100644 +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -5261,6 +5261,7 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s) + char mbuf[64]; + char *buf; + struct slab_attribute *attr = to_slab_attr(slab_attrs[i]); ++ ssize_t len; + + if (!attr || !attr->store || !attr->show) + continue; +@@ -5285,8 +5286,9 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s) + buf = buffer; + } + +- attr->show(root_cache, buf); +- attr->store(s, buf, strlen(buf)); ++ len = attr->show(root_cache, buf); ++ if (len > 0) ++ attr->store(s, buf, len); + } + + if (buffer) +diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c +index 413d18e37083..ff8bb41d713f 100644 +--- a/net/bridge/br_netlink.c ++++ b/net/bridge/br_netlink.c +@@ -768,6 +768,13 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[]) + return -EPROTONOSUPPORT; + } + } ++ ++ if (data[IFLA_BR_VLAN_DEFAULT_PVID]) { ++ __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]); ++ ++ if (defpvid >= VLAN_VID_MASK) ++ return -EINVAL; ++ } + #endif + + return 0; +diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c +index 8a7ada8bb947..57be733a99bc 100644 +--- a/net/bridge/br_stp_if.c ++++ b/net/bridge/br_stp_if.c +@@ -166,6 +166,7 @@ static void br_stp_start(struct net_bridge *br) + br_debug(br, "using kernel STP\n"); + + /* To start timers on any ports left in blocking */ ++ mod_timer(&br->hello_timer, jiffies + br->hello_time); + br_port_state_selection(br); + } + +diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c +index 5f0f5af0ec35..7dbe6a5c31eb 100644 +--- a/net/bridge/br_stp_timer.c ++++ b/net/bridge/br_stp_timer.c +@@ -40,7 +40,7 @@ static void br_hello_timer_expired(unsigned long arg) + if (br->dev->flags & IFF_UP) { + br_config_bpdu_generation(br); + +- if (br->stp_enabled != BR_USER_STP) ++ if (br->stp_enabled == BR_KERNEL_STP) + mod_timer(&br->hello_timer, + round_jiffies(jiffies + br->hello_time)); + } +diff --git a/net/core/dst.c b/net/core/dst.c +index a1656e3b8d72..d7ad628bf64e 100644 +--- a/net/core/dst.c ++++ b/net/core/dst.c +@@ -151,13 +151,13 @@ int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb) + } + EXPORT_SYMBOL(dst_discard_out); + +-const u32 dst_default_metrics[RTAX_MAX + 1] = { ++const struct dst_metrics dst_default_metrics = { + /* This initializer is needed to force linker to place this variable + * into const section. Otherwise it might end into bss section. + * We really want to avoid false sharing on this variable, and catch + * any writes on it. + */ +- [RTAX_MAX] = 0xdeadbeef, ++ .refcnt = ATOMIC_INIT(1), + }; + + void dst_init(struct dst_entry *dst, struct dst_ops *ops, +@@ -169,7 +169,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops, + if (dev) + dev_hold(dev); + dst->ops = ops; +- dst_init_metrics(dst, dst_default_metrics, true); ++ dst_init_metrics(dst, dst_default_metrics.metrics, true); + dst->expires = 0UL; + dst->path = dst; + dst->from = NULL; +@@ -315,25 +315,30 @@ EXPORT_SYMBOL(dst_release); + + u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old) + { +- u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC); ++ struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC); + + if (p) { +- u32 *old_p = __DST_METRICS_PTR(old); ++ struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old); + unsigned long prev, new; + +- memcpy(p, old_p, sizeof(u32) * RTAX_MAX); ++ atomic_set(&p->refcnt, 1); ++ memcpy(p->metrics, old_p->metrics, sizeof(p->metrics)); + + new = (unsigned long) p; + prev = cmpxchg(&dst->_metrics, old, new); + + if (prev != old) { + kfree(p); +- p = __DST_METRICS_PTR(prev); ++ p = (struct dst_metrics *)__DST_METRICS_PTR(prev); + if (prev & DST_METRICS_READ_ONLY) + p = NULL; ++ } else if (prev & DST_METRICS_REFCOUNTED) { ++ if (atomic_dec_and_test(&old_p->refcnt)) ++ kfree(old_p); + } + } +- return p; ++ BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0); ++ return (u32 *)p; + } + EXPORT_SYMBOL(dst_cow_metrics_generic); + +@@ -342,7 +347,7 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old) + { + unsigned long prev, new; + +- new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY; ++ new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY; + prev = cmpxchg(&dst->_metrics, old, new); + if (prev == old) + kfree(__DST_METRICS_PTR(old)); +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c +index fe38ef58997c..d43544ce7550 100644 +--- a/net/core/rtnetlink.c ++++ b/net/core/rtnetlink.c +@@ -1458,13 +1458,13 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) + cb->nlh->nlmsg_seq, 0, + NLM_F_MULTI, + ext_filter_mask); +- /* If we ran out of room on the first message, +- * we're in trouble +- */ +- WARN_ON((err == -EMSGSIZE) && (skb->len == 0)); + +- if (err < 0) +- goto out; ++ if (err < 0) { ++ if (likely(skb->len)) ++ goto out; ++ ++ goto out_err; ++ } + + nl_dump_check_consistent(cb, nlmsg_hdr(skb)); + cont: +@@ -1472,10 +1472,12 @@ cont: + } + } + out: ++ err = skb->len; ++out_err: + cb->args[1] = idx; + cb->args[0] = h; + +- return skb->len; ++ return err; + } + + int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len) +@@ -3127,8 +3129,12 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) + err = br_dev->netdev_ops->ndo_bridge_getlink( + skb, portid, seq, dev, + filter_mask, NLM_F_MULTI); +- if (err < 0 && err != -EOPNOTSUPP) +- break; ++ if (err < 0 && err != -EOPNOTSUPP) { ++ if (likely(skb->len)) ++ break; ++ ++ goto out_err; ++ } + } + idx++; + } +@@ -3139,16 +3145,22 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) + seq, dev, + filter_mask, + NLM_F_MULTI); +- if (err < 0 && err != -EOPNOTSUPP) +- break; ++ if (err < 0 && err != -EOPNOTSUPP) { ++ if (likely(skb->len)) ++ break; ++ ++ goto out_err; ++ } + } + idx++; + } + } ++ err = skb->len; ++out_err: + rcu_read_unlock(); + cb->args[0] = idx; + +- return skb->len; ++ return err; + } + + static inline size_t bridge_nlmsg_size(void) +diff --git a/net/core/sock.c b/net/core/sock.c +index 9c708a5fb751..bd2fad27891e 100644 +--- a/net/core/sock.c ++++ b/net/core/sock.c +@@ -1690,17 +1690,17 @@ EXPORT_SYMBOL(skb_set_owner_w); + + void skb_orphan_partial(struct sk_buff *skb) + { +- /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc, +- * so we do not completely orphan skb, but transfert all +- * accounted bytes but one, to avoid unexpected reorders. +- */ + if (skb->destructor == sock_wfree + #ifdef CONFIG_INET + || skb->destructor == tcp_wfree + #endif + ) { +- atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc); +- skb->truesize = 1; ++ struct sock *sk = skb->sk; ++ ++ if (atomic_inc_not_zero(&sk->sk_refcnt)) { ++ atomic_sub(skb->truesize, &sk->sk_wmem_alloc); ++ skb->destructor = sock_efree; ++ } + } else { + skb_orphan(skb); + } +diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c +index 8113ad58fcb4..3470ad1843bb 100644 +--- a/net/dccp/ipv6.c ++++ b/net/dccp/ipv6.c +@@ -422,6 +422,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk, + newsk->sk_backlog_rcv = dccp_v4_do_rcv; + newnp->pktoptions = NULL; + newnp->opt = NULL; ++ newnp->ipv6_mc_list = NULL; ++ newnp->ipv6_ac_list = NULL; ++ newnp->ipv6_fl_list = NULL; + newnp->mcast_oif = inet6_iif(skb); + newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; + +@@ -486,6 +489,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk, + /* Clone RX bits */ + newnp->rxopt.all = np->rxopt.all; + ++ newnp->ipv6_mc_list = NULL; ++ newnp->ipv6_ac_list = NULL; ++ newnp->ipv6_fl_list = NULL; + newnp->pktoptions = NULL; + newnp->opt = NULL; + newnp->mcast_oif = inet6_iif(skb); +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c +index 1adba44f8fbc..66dcb529fd9c 100644 +--- a/net/ipv4/fib_frontend.c ++++ b/net/ipv4/fib_frontend.c +@@ -757,7 +757,7 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) + unsigned int e = 0, s_e; + struct fib_table *tb; + struct hlist_head *head; +- int dumped = 0; ++ int dumped = 0, err; + + if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) && + ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED) +@@ -777,20 +777,27 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) + if (dumped) + memset(&cb->args[2], 0, sizeof(cb->args) - + 2 * sizeof(cb->args[0])); +- if (fib_table_dump(tb, skb, cb) < 0) +- goto out; ++ err = fib_table_dump(tb, skb, cb); ++ if (err < 0) { ++ if (likely(skb->len)) ++ goto out; ++ ++ goto out_err; ++ } + dumped = 1; + next: + e++; + } + } + out: ++ err = skb->len; ++out_err: + rcu_read_unlock(); + + cb->args[1] = e; + cb->args[0] = h; + +- return skb->len; ++ return err; + } + + /* Prepare and feed intra-kernel routing request. +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c +index 67d44aa9e09f..b2504712259f 100644 +--- a/net/ipv4/fib_semantics.c ++++ b/net/ipv4/fib_semantics.c +@@ -204,6 +204,7 @@ static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp) + static void free_fib_info_rcu(struct rcu_head *head) + { + struct fib_info *fi = container_of(head, struct fib_info, rcu); ++ struct dst_metrics *m; + + change_nexthops(fi) { + if (nexthop_nh->nh_dev) +@@ -214,8 +215,9 @@ static void free_fib_info_rcu(struct rcu_head *head) + rt_fibinfo_free(&nexthop_nh->nh_rth_input); + } endfor_nexthops(fi); + +- if (fi->fib_metrics != (u32 *) dst_default_metrics) +- kfree(fi->fib_metrics); ++ m = fi->fib_metrics; ++ if (m != &dst_default_metrics && atomic_dec_and_test(&m->refcnt)) ++ kfree(m); + kfree(fi); + } + +@@ -982,11 +984,11 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg) + val = 255; + if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK)) + return -EINVAL; +- fi->fib_metrics[type - 1] = val; ++ fi->fib_metrics->metrics[type - 1] = val; + } + + if (ecn_ca) +- fi->fib_metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA; ++ fi->fib_metrics->metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA; + + return 0; + } +@@ -1044,11 +1046,12 @@ struct fib_info *fib_create_info(struct fib_config *cfg) + goto failure; + fib_info_cnt++; + if (cfg->fc_mx) { +- fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); ++ fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL); + if (!fi->fib_metrics) + goto failure; ++ atomic_set(&fi->fib_metrics->refcnt, 1); + } else +- fi->fib_metrics = (u32 *) dst_default_metrics; ++ fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics; + + fi->fib_net = net; + fi->fib_protocol = cfg->fc_protocol; +@@ -1251,7 +1254,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event, + if (fi->fib_priority && + nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority)) + goto nla_put_failure; +- if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) ++ if (rtnetlink_put_metrics(skb, fi->fib_metrics->metrics) < 0) + goto nla_put_failure; + + if (fi->fib_prefsrc && +diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c +index 7c52afb98c42..5c598f99a500 100644 +--- a/net/ipv4/fib_trie.c ++++ b/net/ipv4/fib_trie.c +@@ -1906,6 +1906,8 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb, + + /* rcu_read_lock is hold by caller */ + hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { ++ int err; ++ + if (i < s_i) { + i++; + continue; +@@ -1916,17 +1918,14 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb, + continue; + } + +- if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid, +- cb->nlh->nlmsg_seq, +- RTM_NEWROUTE, +- tb->tb_id, +- fa->fa_type, +- xkey, +- KEYLENGTH - fa->fa_slen, +- fa->fa_tos, +- fa->fa_info, NLM_F_MULTI) < 0) { ++ err = fib_dump_info(skb, NETLINK_CB(cb->skb).portid, ++ cb->nlh->nlmsg_seq, RTM_NEWROUTE, ++ tb->tb_id, fa->fa_type, ++ xkey, KEYLENGTH - fa->fa_slen, ++ fa->fa_tos, fa->fa_info, NLM_F_MULTI); ++ if (err < 0) { + cb->args[4] = i; +- return -1; ++ return err; + } + i++; + } +@@ -1948,10 +1947,13 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb, + t_key key = cb->args[3]; + + while ((l = leaf_walk_rcu(&tp, key)) != NULL) { +- if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) { ++ int err; ++ ++ err = fn_trie_dump_leaf(l, tb, skb, cb); ++ if (err < 0) { + cb->args[3] = key; + cb->args[2] = count; +- return -1; ++ return err; + } + + ++count; +diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c +index 64148914803a..45fa2aaa3d3f 100644 +--- a/net/ipv4/inet_connection_sock.c ++++ b/net/ipv4/inet_connection_sock.c +@@ -669,6 +669,8 @@ struct sock *inet_csk_clone_lock(const struct sock *sk, + inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num); + newsk->sk_write_space = sk_stream_write_space; + ++ inet_sk(newsk)->mc_list = NULL; ++ + newsk->sk_mark = inet_rsk(req)->ir_mark; + atomic64_set(&newsk->sk_cookie, + atomic64_read(&inet_rsk(req)->ir_cookie)); +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index 375248b900ba..c295d882c6e0 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -1356,8 +1356,12 @@ static void rt_add_uncached_list(struct rtable *rt) + + static void ipv4_dst_destroy(struct dst_entry *dst) + { ++ struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst); + struct rtable *rt = (struct rtable *) dst; + ++ if (p != &dst_default_metrics && atomic_dec_and_test(&p->refcnt)) ++ kfree(p); ++ + if (!list_empty(&rt->rt_uncached)) { + struct uncached_list *ul = rt->rt_uncached_list; + +@@ -1409,7 +1413,11 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr, + rt->rt_gateway = nh->nh_gw; + rt->rt_uses_gateway = 1; + } +- dst_init_metrics(&rt->dst, fi->fib_metrics, true); ++ dst_init_metrics(&rt->dst, fi->fib_metrics->metrics, true); ++ if (fi->fib_metrics != &dst_default_metrics) { ++ rt->dst._metrics |= DST_METRICS_REFCOUNTED; ++ atomic_inc(&fi->fib_metrics->refcnt); ++ } + #ifdef CONFIG_IP_ROUTE_CLASSID + rt->dst.tclassid = nh->nh_tclassid; + #endif +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c +index e1d51370977b..4bd8678329d6 100644 +--- a/net/ipv4/tcp.c ++++ b/net/ipv4/tcp.c +@@ -1071,9 +1071,12 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, + int *copied, size_t size) + { + struct tcp_sock *tp = tcp_sk(sk); ++ struct sockaddr *uaddr = msg->msg_name; + int err, flags; + +- if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE)) ++ if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) || ++ (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) && ++ uaddr->sa_family == AF_UNSPEC)) + return -EOPNOTSUPP; + if (tp->fastopen_req) + return -EALREADY; /* Another Fast Open is in progress */ +@@ -1086,7 +1089,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, + tp->fastopen_req->size = size; + + flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; +- err = __inet_stream_connect(sk->sk_socket, msg->msg_name, ++ err = __inet_stream_connect(sk->sk_socket, uaddr, + msg->msg_namelen, flags); + *copied = tp->fastopen_req->copied; + tcp_free_fastopen_req(tp); +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 818630cec54f..87791f803627 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -1134,13 +1134,14 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, + */ + if (pkt_len > mss) { + unsigned int new_len = (pkt_len / mss) * mss; +- if (!in_sack && new_len < pkt_len) { ++ if (!in_sack && new_len < pkt_len) + new_len += mss; +- if (new_len >= skb->len) +- return 0; +- } + pkt_len = new_len; + } ++ ++ if (pkt_len >= skb->len && !in_sack) ++ return 0; ++ + err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC); + if (err < 0) + return err; +@@ -3219,7 +3220,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, + int delta; + + /* Non-retransmitted hole got filled? That's reordering */ +- if (reord < prior_fackets) ++ if (reord < prior_fackets && reord <= tp->fackets_out) + tcp_update_reordering(sk, tp->fackets_out - reord, 0); + + delta = tcp_is_fack(tp) ? pkts_acked : +diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c +index 225f5f7f26ba..568bc0a52ca1 100644 +--- a/net/ipv6/ip6_offload.c ++++ b/net/ipv6/ip6_offload.c +@@ -62,7 +62,6 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, + const struct net_offload *ops; + int proto; + struct frag_hdr *fptr; +- unsigned int unfrag_ip6hlen; + u8 *prevhdr; + int offset = 0; + bool encap, udpfrag; +@@ -121,8 +120,10 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, + skb->network_header = (u8 *)ipv6h - skb->head; + + if (udpfrag) { +- unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); +- fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen); ++ int err = ip6_find_1stfragopt(skb, &prevhdr); ++ if (err < 0) ++ return ERR_PTR(err); ++ fptr = (struct frag_hdr *)((u8 *)ipv6h + err); + fptr->frag_off = htons(offset); + if (skb->next) + fptr->frag_off |= htons(IP6_MF); +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c +index 8004532fa882..1db17efe36c1 100644 +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -571,7 +571,10 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, + int ptr, offset = 0, err = 0; + u8 *prevhdr, nexthdr = 0; + +- hlen = ip6_find_1stfragopt(skb, &prevhdr); ++ err = ip6_find_1stfragopt(skb, &prevhdr); ++ if (err < 0) ++ goto fail; ++ hlen = err; + nexthdr = *prevhdr; + + mtu = ip6_skb_dst_mtu(skb); +@@ -1429,6 +1432,11 @@ alloc_new_skb: + */ + alloclen += sizeof(struct frag_hdr); + ++ copy = datalen - transhdrlen - fraggap; ++ if (copy < 0) { ++ err = -EINVAL; ++ goto error; ++ } + if (transhdrlen) { + skb = sock_alloc_send_skb(sk, + alloclen + hh_len, +@@ -1478,13 +1486,9 @@ alloc_new_skb: + data += fraggap; + pskb_trim_unique(skb_prev, maxfraglen); + } +- copy = datalen - transhdrlen - fraggap; +- +- if (copy < 0) { +- err = -EINVAL; +- kfree_skb(skb); +- goto error; +- } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) { ++ if (copy > 0 && ++ getfrag(from, data + transhdrlen, offset, ++ copy, fraggap, skb) < 0) { + err = -EFAULT; + kfree_skb(skb); + goto error; +diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c +index 1d184322a7b1..8b56c5240429 100644 +--- a/net/ipv6/output_core.c ++++ b/net/ipv6/output_core.c +@@ -79,14 +79,13 @@ EXPORT_SYMBOL(ipv6_select_ident); + int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) + { + u16 offset = sizeof(struct ipv6hdr); +- struct ipv6_opt_hdr *exthdr = +- (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1); + unsigned int packet_len = skb_tail_pointer(skb) - + skb_network_header(skb); + int found_rhdr = 0; + *nexthdr = &ipv6_hdr(skb)->nexthdr; + +- while (offset + 1 <= packet_len) { ++ while (offset <= packet_len) { ++ struct ipv6_opt_hdr *exthdr; + + switch (**nexthdr) { + +@@ -107,13 +106,16 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) + return offset; + } + +- offset += ipv6_optlen(exthdr); +- *nexthdr = &exthdr->nexthdr; ++ if (offset + sizeof(struct ipv6_opt_hdr) > packet_len) ++ return -EINVAL; ++ + exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + + offset); ++ offset += ipv6_optlen(exthdr); ++ *nexthdr = &exthdr->nexthdr; + } + +- return offset; ++ return -EINVAL; + } + EXPORT_SYMBOL(ip6_find_1stfragopt); + +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c +index 1a63c4deef26..8e958fde6e4b 100644 +--- a/net/ipv6/tcp_ipv6.c ++++ b/net/ipv6/tcp_ipv6.c +@@ -1033,6 +1033,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * + newtp->af_specific = &tcp_sock_ipv6_mapped_specific; + #endif + ++ newnp->ipv6_mc_list = NULL; + newnp->ipv6_ac_list = NULL; + newnp->ipv6_fl_list = NULL; + newnp->pktoptions = NULL; +@@ -1102,6 +1103,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * + First: no IPv4 options. + */ + newinet->inet_opt = NULL; ++ newnp->ipv6_mc_list = NULL; + newnp->ipv6_ac_list = NULL; + newnp->ipv6_fl_list = NULL; + +diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c +index 7441e1e63893..01582966ffa0 100644 +--- a/net/ipv6/udp_offload.c ++++ b/net/ipv6/udp_offload.c +@@ -29,6 +29,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, + u8 frag_hdr_sz = sizeof(struct frag_hdr); + __wsum csum; + int tnl_hlen; ++ int err; + + mss = skb_shinfo(skb)->gso_size; + if (unlikely(skb->len <= mss)) +@@ -97,7 +98,10 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, + /* Find the unfragmentable header and shift it left by frag_hdr_sz + * bytes to insert fragment header. + */ +- unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); ++ err = ip6_find_1stfragopt(skb, &prevhdr); ++ if (err < 0) ++ return ERR_PTR(err); ++ unfrag_ip6hlen = err; + nexthdr = *prevhdr; + *prevhdr = NEXTHDR_FRAGMENT; + unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) + +diff --git a/net/sctp/input.c b/net/sctp/input.c +index b6493b3f11a9..2d7859c03fd2 100644 +--- a/net/sctp/input.c ++++ b/net/sctp/input.c +@@ -472,15 +472,14 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb, + struct sctp_association **app, + struct sctp_transport **tpp) + { ++ struct sctp_init_chunk *chunkhdr, _chunkhdr; + union sctp_addr saddr; + union sctp_addr daddr; + struct sctp_af *af; + struct sock *sk = NULL; + struct sctp_association *asoc; + struct sctp_transport *transport = NULL; +- struct sctp_init_chunk *chunkhdr; + __u32 vtag = ntohl(sctphdr->vtag); +- int len = skb->len - ((void *)sctphdr - (void *)skb->data); + + *app = NULL; *tpp = NULL; + +@@ -515,13 +514,16 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb, + * discard the packet. + */ + if (vtag == 0) { +- chunkhdr = (void *)sctphdr + sizeof(struct sctphdr); +- if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t) +- + sizeof(__be32) || ++ /* chunk header + first 4 octects of init header */ ++ chunkhdr = skb_header_pointer(skb, skb_transport_offset(skb) + ++ sizeof(struct sctphdr), ++ sizeof(struct sctp_chunkhdr) + ++ sizeof(__be32), &_chunkhdr); ++ if (!chunkhdr || + chunkhdr->chunk_hdr.type != SCTP_CID_INIT || +- ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) { ++ ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) + goto out; +- } ++ + } else if (vtag != asoc->c.peer_vtag) { + goto out; + } +diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c +index ce46f1c7f133..7527c168e471 100644 +--- a/net/sctp/ipv6.c ++++ b/net/sctp/ipv6.c +@@ -239,12 +239,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, + struct sctp_bind_addr *bp; + struct ipv6_pinfo *np = inet6_sk(sk); + struct sctp_sockaddr_entry *laddr; +- union sctp_addr *baddr = NULL; + union sctp_addr *daddr = &t->ipaddr; + union sctp_addr dst_saddr; + struct in6_addr *final_p, final; + __u8 matchlen = 0; +- __u8 bmatchlen; + sctp_scope_t scope; + + memset(fl6, 0, sizeof(struct flowi6)); +@@ -311,23 +309,37 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, + */ + rcu_read_lock(); + list_for_each_entry_rcu(laddr, &bp->address_list, list) { +- if (!laddr->valid) ++ struct dst_entry *bdst; ++ __u8 bmatchlen; ++ ++ if (!laddr->valid || ++ laddr->state != SCTP_ADDR_SRC || ++ laddr->a.sa.sa_family != AF_INET6 || ++ scope > sctp_scope(&laddr->a)) + continue; +- if ((laddr->state == SCTP_ADDR_SRC) && +- (laddr->a.sa.sa_family == AF_INET6) && +- (scope <= sctp_scope(&laddr->a))) { +- bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a); +- if (!baddr || (matchlen < bmatchlen)) { +- baddr = &laddr->a; +- matchlen = bmatchlen; +- } +- } +- } +- if (baddr) { +- fl6->saddr = baddr->v6.sin6_addr; +- fl6->fl6_sport = baddr->v6.sin6_port; ++ ++ fl6->saddr = laddr->a.v6.sin6_addr; ++ fl6->fl6_sport = laddr->a.v6.sin6_port; + final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); +- dst = ip6_dst_lookup_flow(sk, fl6, final_p); ++ bdst = ip6_dst_lookup_flow(sk, fl6, final_p); ++ ++ if (!IS_ERR(bdst) && ++ ipv6_chk_addr(dev_net(bdst->dev), ++ &laddr->a.v6.sin6_addr, bdst->dev, 1)) { ++ if (!IS_ERR_OR_NULL(dst)) ++ dst_release(dst); ++ dst = bdst; ++ break; ++ } ++ ++ bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a); ++ if (matchlen > bmatchlen) ++ continue; ++ ++ if (!IS_ERR_OR_NULL(dst)) ++ dst_release(dst); ++ dst = bdst; ++ matchlen = bmatchlen; + } + rcu_read_unlock(); + +@@ -662,6 +674,9 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk, + newnp = inet6_sk(newsk); + + memcpy(newnp, np, sizeof(struct ipv6_pinfo)); ++ newnp->ipv6_mc_list = NULL; ++ newnp->ipv6_ac_list = NULL; ++ newnp->ipv6_fl_list = NULL; + + rcu_read_lock(); + opt = rcu_dereference(np->opt); +diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c +index 37b70f8e878f..0abab7926dca 100644 +--- a/sound/pci/hda/patch_sigmatel.c ++++ b/sound/pci/hda/patch_sigmatel.c +@@ -1537,6 +1537,8 @@ static const struct snd_pci_quirk stac9200_fixup_tbl[] = { + "Dell Inspiron 1501", STAC_9200_DELL_M26), + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f6, + "unknown Dell", STAC_9200_DELL_M26), ++ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0201, ++ "Dell Latitude D430", STAC_9200_DELL_M22), + /* Panasonic */ + SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-74", STAC_9200_PANASONIC), + /* Gateway machines needs EAPD to be set on resume */ diff --git a/patch/kernel/mvebu64-default/remove_devel_version.patch b/patch/kernel/mvebu64-default/remove_devel_version.patch new file mode 100644 index 000000000..5dca1f01b --- /dev/null +++ b/patch/kernel/mvebu64-default/remove_devel_version.patch @@ -0,0 +1,7 @@ +diff --git a/localversion b/localversion +deleted file mode 100644 +index 649559a..0000000 +--- a/localversion ++++ /dev/null +@@ -1 +0,0 @@ +--devel-17.04.2