diff --git a/Documentation/dontdiff b/Documentation/dontdiff index afbf541de..1343357a2 100644 --- a/Documentation/dontdiff +++ b/Documentation/dontdiff @@ -7,6 +7,7 @@ *.cis *.cpio *.csp +*.dbg *.dsp *.dvi *.elf @@ -16,6 +17,7 @@ *.gcov *.gen.S *.gif +*.gmo *.grep *.grp *.gz @@ -52,14 +54,17 @@ *.tab.h *.tex *.ver +*.vim *.xml *.xz *_MODULES +*_reg_safe.h *_vga16.c *~ \#*# *.9 -.* +.[^g]* +.gen* .*.d .mm 53c700_d.h @@ -73,9 +78,11 @@ Image Module.markers Module.symvers PENDING +PERF* SCCS System.map* TAGS +TRACEEVENT-CFLAGS aconf af_names.h aic7*reg.h* @@ -84,6 +91,7 @@ aic7*seq.h* aicasm aicdb.h* altivec*.c +ashldi3.S asm-offsets.h asm_offsets.h autoconf.h* @@ -96,11 +104,14 @@ bounds.h bsetup btfixupprep build +builtin-policy.h bvmlinux bzImage* capability_names.h capflags.c classlist.h* +clut_vga16.c +common-cmds.h comp*.log compile.h* conf @@ -109,19 +120,23 @@ config-* config_data.h* config.mak config.mak.autogen +config.tmp conmakehash consolemap_deftbl.c* cpustr.h crc32table.h* cscope.* defkeymap.c +devicetable-offsets.h devlist.h* dnotify_test docproc dslm +dtc-lexer.lex.c elf2ecoff elfconfig.h* evergreen_reg_safe.h +exception_policy.conf fixdep flask.h fore200e_mkfirm @@ -129,12 +144,15 @@ fore200e_pca_fw.c* gconf gconf.glade.h gen-devlist +gen-kdb_cmds.c gen_crc32table gen_init_cpio generated genheaders genksyms *_gray256.c +hash +hid-example hpet_example hugepage-mmap hugepage-shm @@ -149,14 +167,14 @@ int32.c int4.c int8.c kallsyms -kconfig +kern_constants.h keywords.c ksym.c* ksym.h* kxgettext lex.c lex.*.c -linux +lib1funcs.S logo_*.c logo_*_clut224.c logo_*_mono.c @@ -167,12 +185,14 @@ machtypes.h map map_hugetlb mconf +mdp miboot* mk_elfconfig mkboot mkbugboot mkcpustr mkdep +mkpiggy mkprep mkregtable mktables @@ -188,6 +208,8 @@ oui.c* page-types parse.c parse.h +parse-events* +pasyms.h patches* /*(DEBLOBBED)*/ /*(DEBLOBBED)*/ @@ -197,6 +219,7 @@ perf-archive piggyback piggy.gzip piggy.S +pmu-* pnmtologo ppc_defs.h* pss_boot.h @@ -206,7 +229,12 @@ r200_reg_safe.h r300_reg_safe.h r420_reg_safe.h r600_reg_safe.h +randomize_layout_hash.h +randomize_layout_seed.h +realmode.lds +realmode.relocs recordmcount +regdb.c relocs rlim_names.h rn50_reg_safe.h @@ -216,8 +244,17 @@ series setup setup.bin setup.elf +signing_key* +aux.h +disable.h +e_fields.h +e_fns.h +e_fptrs.h +e_vars.h sImage +slabinfo sm_tbl* +sortextable split-include syscalltab.h tables.c @@ -227,6 +264,7 @@ tftpboot.img timeconst.h times.h* trix_boot.h +user_constants.h utsrelease.h* vdso-syms.lds vdso.lds @@ -238,13 +276,17 @@ vdso32.lds vdso32.so.dbg vdso64.lds vdso64.so.dbg +vdsox32.lds +vdsox32-syms.lds version.h* vmImage vmlinux vmlinux-* vmlinux.aout vmlinux.bin.all +vmlinux.bin.bz2 vmlinux.lds +vmlinux.relocs vmlinuz voffset.h vsyscall.lds @@ -252,9 +294,12 @@ vsyscall_32.lds wanxlfw.inc uImage unifdef +utsrelease.h wakeup.bin wakeup.elf wakeup.lds +x509* zImage* zconf.hash.c +zconf.lex.c zoffset.h diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt index 9b9c4797f..5a635ff7a 100644 --- a/Documentation/kbuild/makefiles.txt +++ b/Documentation/kbuild/makefiles.txt @@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles. === 4 Host Program support --- 4.1 Simple Host Program --- 4.2 Composite Host Programs - --- 4.3 Using C++ for host programs - --- 4.4 Controlling compiler options for host programs - --- 4.5 When host programs are actually built - --- 4.6 Using hostprogs-$(CONFIG_FOO) + --- 4.3 Defining shared libraries + --- 4.4 Using C++ for host programs + --- 4.5 Controlling compiler options for host programs + --- 4.6 When host programs are actually built + --- 4.7 Using hostprogs-$(CONFIG_FOO) === 5 Kbuild clean infrastructure @@ -645,7 +646,29 @@ Both possibilities are described in the following. Finally, the two .o files are linked to the executable, lxdialog. Note: The syntax -y is not permitted for host-programs. ---- 4.3 Using C++ for host programs +--- 4.3 Defining shared libraries + + Objects with extension .so are considered shared libraries, and + will be compiled as position independent objects. + Kbuild provides support for shared libraries, but the usage + shall be restricted. + In the following example the libkconfig.so shared library is used + to link the executable conf. + + Example: + #scripts/kconfig/Makefile + hostprogs-y := conf + conf-objs := conf.o libkconfig.so + libkconfig-objs := expr.o type.o + + Shared libraries always require a corresponding -objs line, and + in the example above the shared library libkconfig is composed by + the two objects expr.o and type.o. + expr.o and type.o will be built as position independent code and + linked as a shared library libkconfig.so. C++ is not supported for + shared libraries. + +--- 4.4 Using C++ for host programs kbuild offers support for host programs written in C++. This was introduced solely to support kconfig, and is not recommended @@ -668,7 +691,7 @@ Both possibilities are described in the following. qconf-cxxobjs := qconf.o qconf-objs := check.o ---- 4.4 Controlling compiler options for host programs +--- 4.5 Controlling compiler options for host programs When compiling host programs, it is possible to set specific flags. The programs will always be compiled utilising $(HOSTCC) passed @@ -696,7 +719,7 @@ Both possibilities are described in the following. When linking qconf, it will be passed the extra option "-L$(QTDIR)/lib". ---- 4.5 When host programs are actually built +--- 4.6 When host programs are actually built Kbuild will only build host-programs when they are referenced as a prerequisite. @@ -727,7 +750,7 @@ Both possibilities are described in the following. This will tell kbuild to build lxdialog even if not referenced in any rule. ---- 4.6 Using hostprogs-$(CONFIG_FOO) +--- 4.7 Using hostprogs-$(CONFIG_FOO) A typical pattern in a Kbuild file looks like this: diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 922dec8fa..a45d4a23b 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1422,6 +1422,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted. [KNL] Should the hard-lockup detector generate backtraces on all cpus. Format: + grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to + ignore grsecurity's /proc restrictions + + grsec_sysfs_restrict= Format: 0 | 1 + Default: 1 + Disables GRKERNSEC_SYSFS_RESTRICT if enabled in config hashdist= [KNL,NUMA] Large hashes allocated during boot are distributed across NUMA nodes. Defaults on @@ -2651,6 +2657,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. noexec=on: enable non-executable mappings (default) noexec=off: disable non-executable mappings + nopcid [X86-64] + Disable PCID (Process-Context IDentifier) even if it + is supported by the processor. + nosmap [X86] Disable SMAP (Supervisor Mode Access Prevention) even if it is supported by processor. @@ -2959,6 +2969,35 @@ bytes respectively. Such letter suffixes can also be entirely omitted. the specified number of seconds. This is to be used if your oopses keep scrolling off the screen. + pax_nouderef [X86] disables UDEREF. Most likely needed under certain + virtualization environments that don't cope well with the + expand down segment used by UDEREF on X86-32 or the frequent + page table updates on X86-64. + + pax_sanitize_slab= + Format: { 0 | 1 | off | fast | full } + Options '0' and '1' are only provided for backward + compatibility, 'off' or 'fast' should be used instead. + 0|off : disable slab object sanitization + 1|fast: enable slab object sanitization excluding + whitelisted slabs (default) + full : sanitize all slabs, even the whitelisted ones + + pax_softmode= 0/1 to disable/enable PaX softmode on boot already. + + pax_extra_latent_entropy + Enable a very simple form of latent entropy extraction + from the first 4GB of memory as the bootmem allocator + passes the memory pages to the buddy allocator. + + pax_size_overflow_report_only + Enables rate-limited logging of size_overflow plugin + violations while disabling killing of the violating + task. + + pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF + when the processor supports PCID. + pcbit= [HW,ISDN] pcd. [PARIDE] diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index ffab8b5ca..b8fcd6113 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt @@ -42,6 +42,7 @@ show up in /proc/sys/kernel: - kptr_restrict - kstack_depth_to_print [ X86 only ] - l2cr [ PPC only ] +- modify_ldt [ X86 only ] - modprobe ==> Documentation/debugging-modules.txt - modules_disabled - msg_next_id [ sysv ipc ] @@ -409,6 +410,20 @@ This flag controls the L2 cache of G3 processor boards. If ============================================================== +modify_ldt: (X86 only) + +Enables (1) or disables (0) the modify_ldt syscall. Modifying the LDT +(Local Descriptor Table) may be needed to run a 16-bit or segmented code +such as Dosemu or Wine. This is done via a system call which is not needed +to run portable applications, and which can sometimes be abused to exploit +some weaknesses of the architecture, opening new vulnerabilities. + +This sysctl allows one to increase the system's security by disabling the +system call, or to restore compatibility with specific applications when it +was already disabled. + +============================================================== + modules_disabled: A toggle value indicating if modules are allowed to be loaded diff --git a/Kbuild b/Kbuild index 3d0ae152a..84e541262 100644 --- a/Kbuild +++ b/Kbuild @@ -91,6 +91,7 @@ $(obj)/$(offsets-file): arch/$(SRCARCH)/kernel/asm-offsets.s FORCE always += missing-syscalls targets += missing-syscalls +GCC_PLUGINS_missing-syscalls := n quiet_cmd_syscalls = CALL $< cmd_syscalls = $(CONFIG_SHELL) $< $(CC) $(c_flags) $(missing_syscalls_flags) diff --git a/Makefile b/Makefile index fb6a5b834..17bbabf82 100644 --- a/Makefile +++ b/Makefile @@ -302,7 +302,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ HOSTCC = gcc HOSTCXX = g++ HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89 -HOSTCXXFLAGS = -O2 +HOSTCFLAGS = -W -Wno-unused-parameter -Wno-missing-field-initializers -fno-delete-null-pointer-checks +HOSTCFLAGS += $(call cc-option, -Wno-empty-body) +HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1) HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \ @@ -731,7 +733,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g) else KBUILD_CFLAGS += -g endif -KBUILD_AFLAGS += -Wa,-gdwarf-2 +KBUILD_AFLAGS += -Wa,--gdwarf-2 endif ifdef CONFIG_DEBUG_INFO_DWARF4 KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,) @@ -910,7 +912,7 @@ export mod_sign_cmd ifeq ($(KBUILD_EXTMOD),) -core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ +core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/ vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \ $(core-y) $(core-m) $(drivers-y) $(drivers-m) \ @@ -1274,7 +1276,10 @@ MRPROPER_FILES += .config .config.old .version .old_version \ Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \ signing_key.pem signing_key.priv signing_key.x509 \ x509.genkey extra_certificates signing_key.x509.keyid \ - signing_key.x509.signer vmlinux-gdb.py + signing_key.x509.signer vmlinux-gdb.py \ + scripts/gcc-plugins/size_overflow_plugin/e_*.h \ + scripts/gcc-plugins/size_overflow_plugin/disable.h \ + scripts/gcc-plugins/randomize_layout_seed.h # clean - Delete most, but leave enough to build external modules # @@ -1314,7 +1319,7 @@ distclean: mrproper @find $(srctree) $(RCS_FIND_IGNORE) \ \( -name '*.orig' -o -name '*.rej' -o -name '*~' \ -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \ - -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \ + -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \ -type f -print | xargs rm -f diff --git a/arch/Kconfig b/arch/Kconfig index 659bdd079..417918130 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -164,6 +164,7 @@ config ARCH_USE_BUILTIN_BSWAP config KRETPROBES def_bool y depends on KPROBES && HAVE_KRETPROBES + depends on !PAX_RAP config USER_RETURN_NOTIFIER bool @@ -355,7 +356,7 @@ config HAVE_GCC_PLUGINS menuconfig GCC_PLUGINS bool "GCC plugins" depends on HAVE_GCC_PLUGINS - depends on !COMPILE_TEST + default y help GCC plugins are loadable modules that provide extra features to the compiler. They are useful for runtime instrumentation and static analysis. @@ -759,6 +760,7 @@ config VMAP_STACK default y bool "Use a virtually-mapped stack" depends on HAVE_ARCH_VMAP_STACK && !KASAN + depends on !GRKERNSEC_KSTACKOVERFLOW ---help--- Enable this if you want the use virtually-mapped kernel stacks with guard pages. This causes kernel stack overflows to be diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 498933a7d..78d2b22fa 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -308,4 +308,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) #define atomic_dec(v) atomic_sub(1,(v)) #define atomic64_dec(v) atomic64_sub(1,(v)) +#define atomic64_read_unchecked(v) atomic64_read(v) +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) +#define atomic64_inc_unchecked(v) atomic64_inc(v) +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) +#define atomic64_dec_unchecked(v) atomic64_dec(v) +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) + #endif /* _ALPHA_ATOMIC_H */ diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h index ad368a93a..fbe0f2514 100644 --- a/arch/alpha/include/asm/cache.h +++ b/arch/alpha/include/asm/cache.h @@ -4,19 +4,19 @@ #ifndef __ARCH_ALPHA_CACHE_H #define __ARCH_ALPHA_CACHE_H +#include /* Bytes per L1 (data) cache line. */ #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6) -# define L1_CACHE_BYTES 64 # define L1_CACHE_SHIFT 6 #else /* Both EV4 and EV5 are write-through, read-allocate, direct-mapped, physical. */ -# define L1_CACHE_BYTES 32 # define L1_CACHE_SHIFT 5 #endif +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) #define SMP_CACHE_BYTES L1_CACHE_BYTES #endif diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h index 968d9991f..d36b2df54 100644 --- a/arch/alpha/include/asm/elf.h +++ b/arch/alpha/include/asm/elf.h @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL) + +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28) +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19) +#endif + /* $0 is set by ld.so to a pointer to a function which might be registered using atexit. This provides a mean for the dynamic linker to call DT_FINI functions for shared libraries that have diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h index c2ebb6f36..93a0613a2 100644 --- a/arch/alpha/include/asm/pgalloc.h +++ b/arch/alpha/include/asm/pgalloc.h @@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) pgd_set(pgd, pmd); } +static inline void +pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) +{ + pgd_populate(mm, pgd, pmd); +} + extern pgd_t *pgd_alloc(struct mm_struct *mm); static inline void diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h index a9a119592..e9b84174e 100644 --- a/arch/alpha/include/asm/pgtable.h +++ b/arch/alpha/include/asm/pgtable.h @@ -101,6 +101,17 @@ struct vm_area_struct; #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS) #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) + +#ifdef CONFIG_PAX_PAGEEXEC +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE) +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE) +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE) +#else +# define PAGE_SHARED_NOEXEC PAGE_SHARED +# define PAGE_COPY_NOEXEC PAGE_COPY +# define PAGE_READONLY_NOEXEC PAGE_READONLY +#endif + #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE) #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x)) diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c index 936bc8f89..bb1859fb6 100644 --- a/arch/alpha/kernel/module.c +++ b/arch/alpha/kernel/module.c @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, /* The small sections were sorted to the end of the segment. The following should definitely cover them. */ - gp = (u64)me->core_layout.base + me->core_layout.size - 0x8000; + gp = (u64)me->core_layout.base_rw + me->core_layout.size_rw - 0x8000; got = sechdrs[me->arch.gotsecindex].sh_addr; for (i = 0; i < n; i++) { diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index ffb93f499..ced8233e7 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -1300,10 +1300,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p) generic version except that we know how to honor ADDR_LIMIT_32BIT. */ static unsigned long -arch_get_unmapped_area_1(unsigned long addr, unsigned long len, - unsigned long limit) +arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len, + unsigned long limit, unsigned long flags) { struct vm_unmapped_area_info info; + unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags); info.flags = 0; info.length = len; @@ -1311,6 +1312,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len, info.high_limit = limit; info.align_mask = 0; info.align_offset = 0; + info.threadstack_offset = offset; return vm_unmapped_area(&info); } @@ -1343,20 +1345,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, merely specific addresses, but regions of memory -- perhaps this feature should be incorporated into all ports? */ +#ifdef CONFIG_PAX_RANDMMAP + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + if (addr) { - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit); + addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags); if (addr != (unsigned long) -ENOMEM) return addr; } /* Next, try allocating at TASK_UNMAPPED_BASE. */ - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE), - len, limit); + addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags); + if (addr != (unsigned long) -ENOMEM) return addr; /* Finally, try allocating in low memory. */ - addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit); + addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags); return addr; } diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index 83e9eee57..db02682e0 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -52,6 +52,124 @@ __load_new_mm_context(struct mm_struct *next_mm) __reload_thread(pcb); } +#ifdef CONFIG_PAX_PAGEEXEC +/* + * PaX: decide what to do with offenders (regs->pc = fault address) + * + * returns 1 when task should be killed + * 2 when patched PLT trampoline was detected + * 3 when unpatched PLT trampoline was detected + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + +#ifdef CONFIG_PAX_EMUPLT + int err; + + do { /* PaX: patched PLT emulation #1 */ + unsigned int ldah, ldq, jmp; + + err = get_user(ldah, (unsigned int *)regs->pc); + err |= get_user(ldq, (unsigned int *)(regs->pc+4)); + err |= get_user(jmp, (unsigned int *)(regs->pc+8)); + + if (err) + break; + + if ((ldah & 0xFFFF0000U) == 0x277B0000U && + (ldq & 0xFFFF0000U) == 0xA77B0000U && + jmp == 0x6BFB0000U) + { + unsigned long r27, addr; + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16; + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL; + + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL); + err = get_user(r27, (unsigned long *)addr); + if (err) + break; + + regs->r27 = r27; + regs->pc = r27; + return 2; + } + } while (0); + + do { /* PaX: patched PLT emulation #2 */ + unsigned int ldah, lda, br; + + err = get_user(ldah, (unsigned int *)regs->pc); + err |= get_user(lda, (unsigned int *)(regs->pc+4)); + err |= get_user(br, (unsigned int *)(regs->pc+8)); + + if (err) + break; + + if ((ldah & 0xFFFF0000U) == 0x277B0000U && + (lda & 0xFFFF0000U) == 0xA77B0000U && + (br & 0xFFE00000U) == 0xC3E00000U) + { + unsigned long addr = br | 0xFFFFFFFFFFE00000UL; + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16; + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL; + + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL); + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2); + return 2; + } + } while (0); + + do { /* PaX: unpatched PLT emulation */ + unsigned int br; + + err = get_user(br, (unsigned int *)regs->pc); + + if (!err && (br & 0xFFE00000U) == 0xC3800000U) { + unsigned int br2, ldq, nop, jmp; + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver; + + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2); + err = get_user(br2, (unsigned int *)addr); + err |= get_user(ldq, (unsigned int *)(addr+4)); + err |= get_user(nop, (unsigned int *)(addr+8)); + err |= get_user(jmp, (unsigned int *)(addr+12)); + err |= get_user(resolver, (unsigned long *)(addr+16)); + + if (err) + break; + + if (br2 == 0xC3600000U && + ldq == 0xA77B000CU && + nop == 0x47FF041FU && + jmp == 0x6B7B0000U) + { + regs->r28 = regs->pc+4; + regs->r27 = addr+16; + regs->pc = resolver; + return 3; + } + } + } while (0); +#endif + + return 1; +} + +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 5; i++) { + unsigned int c; + if (get_user(c, (unsigned int *)pc+i)) + printk(KERN_CONT "???????? "); + else + printk(KERN_CONT "%08x ", c); + } + printk("\n"); +} +#endif /* * This routine handles page faults. It determines the address, @@ -132,8 +250,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr, good_area: si_code = SEGV_ACCERR; if (cause < 0) { - if (!(vma->vm_flags & VM_EXEC)) + if (!(vma->vm_flags & VM_EXEC)) { + +#ifdef CONFIG_PAX_PAGEEXEC + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc) + goto bad_area; + + up_read(&mm->mmap_sem); + switch (pax_handle_fetch_fault(regs)) { + +#ifdef CONFIG_PAX_EMUPLT + case 2: + case 3: + return; +#endif + + } + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp()); + do_group_exit(SIGKILL); +#else goto bad_area; +#endif + + } } else if (!cause) { /* Allow reads even for write-only mappings */ if (!(vma->vm_flags & (VM_READ | VM_WRITE))) diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c index 42b05046f..60132211c 100644 --- a/arch/arc/kernel/kprobes.c +++ b/arch/arc/kernel/kprobes.c @@ -424,6 +424,7 @@ static void __used kretprobe_trampoline_holder(void) "kretprobe_trampoline:\n" "nop\n"); } +#ifdef CONFIG_KRETPROBES void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { @@ -433,6 +434,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, /* Replace the return addr with trampoline addr */ regs->blink = (unsigned long)&kretprobe_trampoline; } +#endif static int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) @@ -509,6 +511,7 @@ int __init arch_init_kprobes(void) return register_kprobe(&trampoline_p); } +#ifdef CONFIG_KRETPROBES int __kprobes arch_trampoline_kprobe(struct kprobe *p) { if (p->addr == (kprobe_opcode_t *) &kretprobe_trampoline) @@ -516,6 +519,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p) return 0; } +#endif void trap_is_kprobe(unsigned long address, struct pt_regs *regs) { diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index b5d529fdf..0bb4d4fde 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1622,6 +1622,7 @@ config AEABI config OABI_COMPAT bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)" depends on AEABI && !THUMB2_KERNEL + depends on !GRKERNSEC help This option preserves the old syscall interface along with the new (ARM EABI) one. It also provides a compatibility layer to @@ -1690,6 +1691,7 @@ config HIGHPTE config CPU_SW_DOMAIN_PAN bool "Enable use of CPU domains to implement privileged no-access" depends on MMU && !ARM_LPAE + depends on !PAX_KERNEXEC && !PAX_MEMORY_UDEREF default y help Increase kernel security by ensuring that normal kernel accesses @@ -1766,7 +1768,7 @@ config ALIGNMENT_TRAP config UACCESS_WITH_MEMCPY bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()" - depends on MMU + depends on MMU && !PAX_MEMORY_UDEREF default y if CPU_FEROCEON help Implement faster copy_to_user and clear_user methods for CPU @@ -2021,6 +2023,7 @@ config KEXEC depends on (!SMP || PM_SLEEP_SMP) depends on !CPU_V7M select KEXEC_CORE + depends on !GRKERNSEC_KMEM help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot @@ -2065,7 +2068,7 @@ config EFI_STUB config EFI bool "UEFI runtime support" - depends on OF && !CPU_BIG_ENDIAN && MMU && AUTO_ZRELADDR && !XIP_KERNEL + depends on OF && !CPU_BIG_ENDIAN && MMU && AUTO_ZRELADDR && !XIP_KERNEL && !PAX_KERNEXEC select UCS2_STRING select EFI_PARAMS_FROM_FDT select EFI_STUB diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index d83f7c369..a6aba4c83 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -7,6 +7,7 @@ config ARM_PTDUMP depends on DEBUG_KERNEL depends on MMU select DEBUG_FS + depends on !GRKERNSEC_KMEM ---help--- Say Y here if you want to show the kernel pagetable layout in a debugfs file. This information is only useful for kernel developers diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index d50430c40..39509a662 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -24,6 +24,8 @@ endif GCOV_PROFILE := n +GCC_PLUGINS := n + # # Architecture dependencies # diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c index 6fc73bf87..d0af3c7b6 100644 --- a/arch/arm/crypto/sha1_glue.c +++ b/arch/arm/crypto/sha1_glue.c @@ -27,8 +27,8 @@ #include "sha1.h" -asmlinkage void sha1_block_data_order(u32 *digest, - const unsigned char *data, unsigned int rounds); +asmlinkage void sha1_block_data_order(struct sha1_state *digest, + const u8 *data, int rounds); int sha1_update_arm(struct shash_desc *desc, const u8 *data, unsigned int len) @@ -36,22 +36,20 @@ int sha1_update_arm(struct shash_desc *desc, const u8 *data, /* make sure casting to sha1_block_fn() is safe */ BUILD_BUG_ON(offsetof(struct sha1_state, state) != 0); - return sha1_base_do_update(desc, data, len, - (sha1_block_fn *)sha1_block_data_order); + return sha1_base_do_update(desc, data, len, sha1_block_data_order); } EXPORT_SYMBOL_GPL(sha1_update_arm); static int sha1_final(struct shash_desc *desc, u8 *out) { - sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_block_data_order); + sha1_base_do_finalize(desc, sha1_block_data_order); return sha1_base_finish(desc, out); } int sha1_finup_arm(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - sha1_base_do_update(desc, data, len, - (sha1_block_fn *)sha1_block_data_order); + sha1_base_do_update(desc, data, len, sha1_block_data_order); return sha1_final(desc, out); } EXPORT_SYMBOL_GPL(sha1_finup_arm); diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c index 4e22f122f..49902aa8d 100644 --- a/arch/arm/crypto/sha1_neon_glue.c +++ b/arch/arm/crypto/sha1_neon_glue.c @@ -31,8 +31,8 @@ #include "sha1.h" -asmlinkage void sha1_transform_neon(void *state_h, const char *data, - unsigned int rounds); +asmlinkage void sha1_transform_neon(struct sha1_state *state_h, const u8 *data, + int rounds); static int sha1_neon_update(struct shash_desc *desc, const u8 *data, unsigned int len) @@ -45,7 +45,7 @@ static int sha1_neon_update(struct shash_desc *desc, const u8 *data, kernel_neon_begin(); sha1_base_do_update(desc, data, len, - (sha1_block_fn *)sha1_transform_neon); + sha1_transform_neon); kernel_neon_end(); return 0; @@ -60,8 +60,8 @@ static int sha1_neon_finup(struct shash_desc *desc, const u8 *data, kernel_neon_begin(); if (len) sha1_base_do_update(desc, data, len, - (sha1_block_fn *)sha1_transform_neon); - sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_transform_neon); + sha1_transform_neon); + sha1_base_do_finalize(desc, sha1_transform_neon); kernel_neon_end(); return sha1_base_finish(desc, out); diff --git a/arch/arm/crypto/sha256_glue.c b/arch/arm/crypto/sha256_glue.c index a84e869ef..53a0c6107 100644 --- a/arch/arm/crypto/sha256_glue.c +++ b/arch/arm/crypto/sha256_glue.c @@ -30,8 +30,8 @@ #include "sha256_glue.h" -asmlinkage void sha256_block_data_order(u32 *digest, const void *data, - unsigned int num_blks); +asmlinkage void sha256_block_data_order(struct sha256_state *digest, const u8 *data, + int num_blks); int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data, unsigned int len) @@ -39,23 +39,20 @@ int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data, /* make sure casting to sha256_block_fn() is safe */ BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0); - return sha256_base_do_update(desc, data, len, - (sha256_block_fn *)sha256_block_data_order); + return sha256_base_do_update(desc, data, len, sha256_block_data_order); } EXPORT_SYMBOL(crypto_sha256_arm_update); static int sha256_final(struct shash_desc *desc, u8 *out) { - sha256_base_do_finalize(desc, - (sha256_block_fn *)sha256_block_data_order); + sha256_base_do_finalize(desc, sha256_block_data_order); return sha256_base_finish(desc, out); } int crypto_sha256_arm_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - sha256_base_do_update(desc, data, len, - (sha256_block_fn *)sha256_block_data_order); + sha256_base_do_update(desc, data, len, sha256_block_data_order); return sha256_final(desc, out); } EXPORT_SYMBOL(crypto_sha256_arm_finup); diff --git a/arch/arm/crypto/sha256_neon_glue.c b/arch/arm/crypto/sha256_neon_glue.c index 39ccd6588..f9511cbdf 100644 --- a/arch/arm/crypto/sha256_neon_glue.c +++ b/arch/arm/crypto/sha256_neon_glue.c @@ -26,8 +26,8 @@ #include "sha256_glue.h" -asmlinkage void sha256_block_data_order_neon(u32 *digest, const void *data, - unsigned int num_blks); +asmlinkage void sha256_block_data_order_neon(struct sha256_state *digest, const u8 *data, + int num_blks); static int sha256_update(struct shash_desc *desc, const u8 *data, unsigned int len) @@ -39,8 +39,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data, return crypto_sha256_arm_update(desc, data, len); kernel_neon_begin(); - sha256_base_do_update(desc, data, len, - (sha256_block_fn *)sha256_block_data_order_neon); + sha256_base_do_update(desc, data, len, sha256_block_data_order_neon); kernel_neon_end(); return 0; @@ -54,10 +53,8 @@ static int sha256_finup(struct shash_desc *desc, const u8 *data, kernel_neon_begin(); if (len) - sha256_base_do_update(desc, data, len, - (sha256_block_fn *)sha256_block_data_order_neon); - sha256_base_do_finalize(desc, - (sha256_block_fn *)sha256_block_data_order_neon); + sha256_base_do_update(desc, data, len, sha256_block_data_order_neon); + sha256_base_do_finalize(desc, sha256_block_data_order_neon); kernel_neon_end(); return sha256_base_finish(desc, out); diff --git a/arch/arm/crypto/sha512-glue.c b/arch/arm/crypto/sha512-glue.c index 269a394e4..c7a91f1be 100644 --- a/arch/arm/crypto/sha512-glue.c +++ b/arch/arm/crypto/sha512-glue.c @@ -28,27 +28,24 @@ MODULE_ALIAS_CRYPTO("sha512"); MODULE_ALIAS_CRYPTO("sha384-arm"); MODULE_ALIAS_CRYPTO("sha512-arm"); -asmlinkage void sha512_block_data_order(u64 *state, u8 const *src, int blocks); +asmlinkage void sha512_block_data_order(struct sha512_state *state, u8 const *src, int blocks); int sha512_arm_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - return sha512_base_do_update(desc, data, len, - (sha512_block_fn *)sha512_block_data_order); + return sha512_base_do_update(desc, data, len, sha512_block_data_order); } int sha512_arm_final(struct shash_desc *desc, u8 *out) { - sha512_base_do_finalize(desc, - (sha512_block_fn *)sha512_block_data_order); + sha512_base_do_finalize(desc, sha512_block_data_order); return sha512_base_finish(desc, out); } int sha512_arm_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - sha512_base_do_update(desc, data, len, - (sha512_block_fn *)sha512_block_data_order); + sha512_base_do_update(desc, data, len, sha512_block_data_order); return sha512_arm_final(desc, out); } diff --git a/arch/arm/crypto/sha512-neon-glue.c b/arch/arm/crypto/sha512-neon-glue.c index 32693684a..9fcbc0079 100644 --- a/arch/arm/crypto/sha512-neon-glue.c +++ b/arch/arm/crypto/sha512-neon-glue.c @@ -22,7 +22,7 @@ MODULE_ALIAS_CRYPTO("sha384-neon"); MODULE_ALIAS_CRYPTO("sha512-neon"); -asmlinkage void sha512_block_data_order_neon(u64 *state, u8 const *src, +asmlinkage void sha512_block_data_order_neon(struct sha512_state *state, u8 const *src, int blocks); static int sha512_neon_update(struct shash_desc *desc, const u8 *data, @@ -35,8 +35,7 @@ static int sha512_neon_update(struct shash_desc *desc, const u8 *data, return sha512_arm_update(desc, data, len); kernel_neon_begin(); - sha512_base_do_update(desc, data, len, - (sha512_block_fn *)sha512_block_data_order_neon); + sha512_base_do_update(desc, data, len, sha512_block_data_order_neon); kernel_neon_end(); return 0; @@ -50,10 +49,8 @@ static int sha512_neon_finup(struct shash_desc *desc, const u8 *data, kernel_neon_begin(); if (len) - sha512_base_do_update(desc, data, len, - (sha512_block_fn *)sha512_block_data_order_neon); - sha512_base_do_finalize(desc, - (sha512_block_fn *)sha512_block_data_order_neon); + sha512_base_do_update(desc, data, len, sha512_block_data_order_neon); + sha512_base_do_finalize(desc, sha512_block_data_order_neon); kernel_neon_end(); return sha512_base_finish(desc, out); diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 66d0e215a..8fa3237d4 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -18,17 +18,41 @@ #include #include +#ifdef CONFIG_GENERIC_ATOMIC64 +#include +#endif + #define ATOMIC_INIT(i) { (i) } #ifdef __KERNEL__ +#ifdef CONFIG_THUMB2_KERNEL +#define REFCOUNT_TRAP_INSN "bkpt 0xf1" +#else +#define REFCOUNT_TRAP_INSN "bkpt 0xf103" +#endif + +#define _ASM_EXTABLE(from, to) \ +" .pushsection __ex_table,\"a\"\n"\ +" .align 3\n" \ +" .long " #from ", " #to"\n" \ +" .popsection" + /* * On ARM, ordinary assignment (str instruction) doesn't clear the local * strex/ldrex monitor on some implementations. The reason we can use it for * atomic_set() is the clrex or dummy strex done on every exception return. */ #define atomic_read(v) READ_ONCE((v)->counter) +static inline int atomic_read_unchecked(const atomic_unchecked_t *v) +{ + return READ_ONCE(v->counter); +} #define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i)) +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) +{ + WRITE_ONCE(v->counter, i); +} #if __LINUX_ARM_ARCH__ >= 6 @@ -38,45 +62,74 @@ * to ensure that the update happens. */ -#define ATOMIC_OP(op, c_op, asm_op) \ -static inline void atomic_##op(int i, atomic_t *v) \ +#ifdef CONFIG_PAX_REFCOUNT +#define __OVERFLOW_POST \ + " bvc 3f\n" \ + "2: " REFCOUNT_TRAP_INSN "\n"\ + "3:\n" +#define __OVERFLOW_POST_RETURN \ + " bvc 3f\n" \ + " mov %1, %0\n" \ + "2: " REFCOUNT_TRAP_INSN "\n"\ + "3:\n" +#define __OVERFLOW_EXTABLE \ + "4:\n" \ + _ASM_EXTABLE(2b, 4b) +#else +#define __OVERFLOW_POST +#define __OVERFLOW_POST_RETURN +#define __OVERFLOW_EXTABLE +#endif + +#define __ATOMIC_OP(op, suffix, c_op, asm_op) \ +static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \ { \ unsigned long tmp; \ int result; \ \ prefetchw(&v->counter); \ - __asm__ __volatile__("@ atomic_" #op "\n" \ + __asm__ __volatile__("@ atomic_" #op #suffix "\n" \ "1: ldrex %0, [%3]\n" \ " " #asm_op " %0, %0, %4\n" \ + __OVERFLOW_POST \ " strex %1, %0, [%3]\n" \ " teq %1, #0\n" \ -" bne 1b" \ +" bne 1b\n" \ + __OVERFLOW_EXTABLE \ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ : "r" (&v->counter), "Ir" (i) \ : "cc"); \ } \ -#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ -static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \ +#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, _unchecked, c_op, asm_op)\ + __ATOMIC_OP(op, , c_op, asm_op##s) + +#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op) \ +static inline int atomic_##op##_return##suffix##_relaxed(int i, atomic##suffix##_t *v)\ { \ - unsigned long tmp; \ + int tmp; \ int result; \ \ prefetchw(&v->counter); \ \ - __asm__ __volatile__("@ atomic_" #op "_return\n" \ + __asm__ __volatile__("@ atomic_" #op "_return" #suffix "\n" \ "1: ldrex %0, [%3]\n" \ -" " #asm_op " %0, %0, %4\n" \ -" strex %1, %0, [%3]\n" \ -" teq %1, #0\n" \ -" bne 1b" \ - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ +" " #asm_op " %1, %0, %4\n" \ + __OVERFLOW_POST_RETURN \ +" strex %0, %1, [%3]\n" \ +" teq %0, #0\n" \ +" bne 1b\n" \ + __OVERFLOW_EXTABLE \ + : "=&r" (tmp), "=&r" (result), "+Qo" (v->counter) \ : "r" (&v->counter), "Ir" (i) \ : "cc"); \ \ return result; \ } +#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)\ + __ATOMIC_OP_RETURN(op, , c_op, asm_op##s) + #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ { \ @@ -99,6 +152,7 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ } #define atomic_add_return_relaxed atomic_add_return_relaxed +#define atomic_add_return_unchecked_relaxed atomic_add_return_unchecked_relaxed #define atomic_sub_return_relaxed atomic_sub_return_relaxed #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed @@ -141,12 +195,17 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) __asm__ __volatile__ ("@ atomic_add_unless\n" "1: ldrex %0, [%4]\n" " teq %0, %5\n" -" beq 2f\n" -" add %1, %0, %6\n" +" beq 4f\n" +" adds %1, %0, %6\n" + + __OVERFLOW_POST + " strex %2, %1, [%4]\n" " teq %2, #0\n" " bne 1b\n" -"2:" + + __OVERFLOW_EXTABLE + : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "r" (u), "r" (a) : "cc"); @@ -157,14 +216,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) return oldval; } +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new) +{ + unsigned long oldval, res; + + smp_mb(); + + do { + __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n" + "ldrex %1, [%3]\n" + "mov %0, #0\n" + "teq %1, %4\n" + "strexeq %0, %5, [%3]\n" + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) + : "r" (&ptr->counter), "Ir" (old), "r" (new) + : "cc"); + } while (res); + + smp_mb(); + + return oldval; +} + #else /* ARM_ARCH_6 */ #ifdef CONFIG_SMP #error SMP not supported on pre-ARMv6 CPUs #endif -#define ATOMIC_OP(op, c_op, asm_op) \ -static inline void atomic_##op(int i, atomic_t *v) \ +#define __ATOMIC_OP(op, suffix, c_op, asm_op) \ +static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \ { \ unsigned long flags; \ \ @@ -173,8 +254,11 @@ static inline void atomic_##op(int i, atomic_t *v) \ raw_local_irq_restore(flags); \ } \ -#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ -static inline int atomic_##op##_return(int i, atomic_t *v) \ +#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op) \ + __ATOMIC_OP(op, _unchecked, c_op, asm_op) + +#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op) \ +static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\ { \ unsigned long flags; \ int val; \ @@ -201,6 +285,9 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ return val; \ } +#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op)\ + __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op) + static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { int ret; @@ -215,6 +302,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) return ret; } +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new) +{ + return atomic_cmpxchg((atomic_t *)v, old, new); +} + static inline int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; @@ -250,16 +342,29 @@ ATOMIC_OPS(xor, ^=, eor) #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN +#undef __ATOMIC_OP_RETURN #undef ATOMIC_OP +#undef __ATOMIC_OP #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) +#define atomic_xchg_unchecked(v, new) (xchg_unchecked(&((v)->counter), new)) #define atomic_inc(v) atomic_add(1, v) +static inline void atomic_inc_unchecked(atomic_unchecked_t *v) +{ + atomic_add_unchecked(1, v); +} #define atomic_dec(v) atomic_sub(1, v) +static inline void atomic_dec_unchecked(atomic_unchecked_t *v) +{ + atomic_sub_unchecked(1, v); +} #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) +#define atomic_inc_and_test_unchecked(v) (atomic_add_return_unchecked(1, v) == 0) #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) #define atomic_inc_return_relaxed(v) (atomic_add_return_relaxed(1, v)) +#define atomic_inc_return_unchecked_relaxed(v) (atomic_add_return_unchecked_relaxed(1, v)) #define atomic_dec_return_relaxed(v) (atomic_sub_return_relaxed(1, v)) #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) @@ -270,6 +375,14 @@ typedef struct { long long counter; } atomic64_t; +#ifdef CONFIG_PAX_REFCOUNT +typedef struct { + long long counter; +} atomic64_unchecked_t; +#else +typedef atomic64_t atomic64_unchecked_t; +#endif + #define ATOMIC64_INIT(i) { (i) } #ifdef CONFIG_ARM_LPAE @@ -286,6 +399,19 @@ static inline long long atomic64_read(const atomic64_t *v) return result; } +static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v) +{ + long long result; + + __asm__ __volatile__("@ atomic64_read_unchecked\n" +" ldrd %0, %H0, [%1]" + : "=&r" (result) + : "r" (&v->counter), "Qo" (v->counter) + ); + + return result; +} + static inline void atomic64_set(atomic64_t *v, long long i) { __asm__ __volatile__("@ atomic64_set\n" @@ -294,6 +420,15 @@ static inline void atomic64_set(atomic64_t *v, long long i) : "r" (&v->counter), "r" (i) ); } + +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i) +{ + __asm__ __volatile__("@ atomic64_set_unchecked\n" +" strd %2, %H2, [%1]" + : "=Qo" (v->counter) + : "r" (&v->counter), "r" (i) + ); +} #else static inline long long atomic64_read(const atomic64_t *v) { @@ -308,6 +443,19 @@ static inline long long atomic64_read(const atomic64_t *v) return result; } +static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v) +{ + long long result; + + __asm__ __volatile__("@ atomic64_read_unchecked\n" +" ldrexd %0, %H0, [%1]" + : "=&r" (result) + : "r" (&v->counter), "Qo" (v->counter) + ); + + return result; +} + static inline void atomic64_set(atomic64_t *v, long long i) { long long tmp; @@ -322,50 +470,82 @@ static inline void atomic64_set(atomic64_t *v, long long i) : "r" (&v->counter), "r" (i) : "cc"); } + +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i) +{ + long long tmp; + + prefetchw(&v->counter); + __asm__ __volatile__("@ atomic64_set_unchecked\n" +"1: ldrexd %0, %H0, [%2]\n" +" strexd %0, %3, %H3, [%2]\n" +" teq %0, #0\n" +" bne 1b" + : "=&r" (tmp), "=Qo" (v->counter) + : "r" (&v->counter), "r" (i) + : "cc"); +} #endif -#define ATOMIC64_OP(op, op1, op2) \ -static inline void atomic64_##op(long long i, atomic64_t *v) \ +#define __OVERFLOW_POST_RETURN64 \ + " bvc 3f\n" \ +" mov %Q1, %Q0\n" \ +" mov %R1, %R0\n" \ + "2: " REFCOUNT_TRAP_INSN "\n"\ + "3:\n" + +#define __ATOMIC64_OP(op, suffix, op1, op2) \ +static inline void atomic64_##op##suffix(long long i, atomic64##suffix##_t *v)\ { \ long long result; \ unsigned long tmp; \ \ prefetchw(&v->counter); \ - __asm__ __volatile__("@ atomic64_" #op "\n" \ + __asm__ __volatile__("@ atomic64_" #op #suffix "\n" \ "1: ldrexd %0, %H0, [%3]\n" \ " " #op1 " %Q0, %Q0, %Q4\n" \ " " #op2 " %R0, %R0, %R4\n" \ + __OVERFLOW_POST \ " strexd %1, %0, %H0, [%3]\n" \ " teq %1, #0\n" \ -" bne 1b" \ +" bne 1b\n" \ + __OVERFLOW_EXTABLE \ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ : "r" (&v->counter), "r" (i) \ : "cc"); \ } \ -#define ATOMIC64_OP_RETURN(op, op1, op2) \ +#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, _unchecked, op1, op2) \ + __ATOMIC64_OP(op, , op1, op2##s) + +#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2) \ static inline long long \ -atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \ +atomic64_##op##_return##suffix##_relaxed(long long i, atomic64##suffix##_t *v) \ { \ long long result; \ - unsigned long tmp; \ + long long tmp; \ \ prefetchw(&v->counter); \ \ - __asm__ __volatile__("@ atomic64_" #op "_return\n" \ + __asm__ __volatile__("@ atomic64_" #op "_return" #suffix "\n" \ "1: ldrexd %0, %H0, [%3]\n" \ -" " #op1 " %Q0, %Q0, %Q4\n" \ -" " #op2 " %R0, %R0, %R4\n" \ -" strexd %1, %0, %H0, [%3]\n" \ -" teq %1, #0\n" \ -" bne 1b" \ - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ +" " #op1 " %Q1, %Q0, %Q4\n" \ +" " #op2 " %R1, %R0, %R4\n" \ + __OVERFLOW_POST_RETURN64 \ +" strexd %0, %1, %H1, [%3]\n" \ +" teq %0, #0\n" \ +" bne 1b\n" \ + __OVERFLOW_EXTABLE \ + : "=&r" (tmp), "=&r" (result), "+Qo" (v->counter) \ : "r" (&v->counter), "r" (i) \ : "cc"); \ \ return result; \ } +#define ATOMIC64_OP_RETURN(op, op1, op2) __ATOMIC64_OP_RETURN(op, _unchecked, op1, op2) \ + __ATOMIC64_OP_RETURN(op, , op1, op2##s) + #define ATOMIC64_FETCH_OP(op, op1, op2) \ static inline long long \ atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v) \ @@ -398,6 +578,7 @@ ATOMIC64_OPS(add, adds, adc) ATOMIC64_OPS(sub, subs, sbc) #define atomic64_add_return_relaxed atomic64_add_return_relaxed +#define atomic64_add_return_unchecked_relaxed atomic64_add_return_unchecked_relaxed #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed @@ -422,7 +603,10 @@ ATOMIC64_OPS(xor, eor, eor) #undef ATOMIC64_OPS #undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP_RETURN +#undef __ATOMIC64_OP_RETURN #undef ATOMIC64_OP +#undef __ATOMIC64_OP +#undef __OVERFLOW_POST_RETURN static inline long long atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new) @@ -448,6 +632,13 @@ atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new) } #define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed +static inline long long +atomic64_cmpxchg_unchecked_relaxed(atomic64_unchecked_t *ptr, long long old, long long new) +{ + return atomic64_cmpxchg_relaxed((atomic64_t *)ptr, old, new); +} +#define atomic64_cmpxchg_unchecked_relaxed atomic64_cmpxchg_unchecked_relaxed + static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new) { long long result; @@ -468,25 +659,36 @@ static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new) } #define atomic64_xchg_relaxed atomic64_xchg_relaxed +static inline long long atomic64_xchg_unchecked_relaxed(atomic64_unchecked_t *ptr, long long new) +{ + return atomic64_xchg_relaxed((atomic64_t *)ptr, new); +} +#define atomic64_xchg_unchecked_relaxed atomic64_xchg_unchecked_relaxed + static inline long long atomic64_dec_if_positive(atomic64_t *v) { long long result; - unsigned long tmp; + u64 tmp; smp_mb(); prefetchw(&v->counter); __asm__ __volatile__("@ atomic64_dec_if_positive\n" "1: ldrexd %0, %H0, [%3]\n" -" subs %Q0, %Q0, #1\n" -" sbc %R0, %R0, #0\n" -" teq %R0, #0\n" -" bmi 2f\n" -" strexd %1, %0, %H0, [%3]\n" -" teq %1, #0\n" +" subs %Q1, %Q0, #1\n" +" sbcs %R1, %R0, #0\n" + + __OVERFLOW_POST_RETURN64 + +" teq %R1, #0\n" +" bmi 4f\n" +" strexd %0, %1, %H1, [%3]\n" +" teq %0, #0\n" " bne 1b\n" -"2:" - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + + __OVERFLOW_EXTABLE + + : "=&r" (tmp), "=&r" (result), "+Qo" (v->counter) : "r" (&v->counter) : "cc"); @@ -509,13 +711,18 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) " teq %0, %5\n" " teqeq %H0, %H5\n" " moveq %1, #0\n" -" beq 2f\n" +" beq 4f\n" " adds %Q0, %Q0, %Q6\n" -" adc %R0, %R0, %R6\n" +" adcs %R0, %R0, %R6\n" + + __OVERFLOW_POST + " strexd %2, %0, %H0, [%4]\n" " teq %2, #0\n" " bne 1b\n" -"2:" + + __OVERFLOW_EXTABLE + : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "r" (u), "r" (a) : "cc"); @@ -526,12 +733,19 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) return ret; } +#undef __OVERFLOW_EXTABLE +#undef __OVERFLOW_POST_RETURN64 +#undef __OVERFLOW_POST + #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) #define atomic64_inc(v) atomic64_add(1LL, (v)) +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v)) #define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1LL, (v)) +#define atomic64_inc_return_unchecked_relaxed(v) atomic64_add_return_unchecked_relaxed(1LL, (v)) #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) #define atomic64_dec(v) atomic64_sub(1LL, (v)) +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v)) #define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1LL, (v)) #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h index 75fe66bc0..2255c8669 100644 --- a/arch/arm/include/asm/cache.h +++ b/arch/arm/include/asm/cache.h @@ -4,8 +4,10 @@ #ifndef __ASMARM_CACHE_H #define __ASMARM_CACHE_H +#include + #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* * Memory returned by kmalloc() may be used for DMA, so we must make diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index bdd283bc5..e66fb8377 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -116,7 +116,7 @@ struct cpu_cache_fns { void (*dma_unmap_area)(const void *, size_t, int); void (*dma_flush_range)(const void *, const void *); -}; +} __no_const __no_randomize_layout; /* * Select the calling method diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h index 524692f4a..a8871ec56 100644 --- a/arch/arm/include/asm/checksum.h +++ b/arch/arm/include/asm/checksum.h @@ -37,7 +37,19 @@ __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); __wsum -csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr); +__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr); + +static inline __wsum +csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr) +{ + __wsum ret; + pax_open_userland(); + ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr); + pax_close_userland(); + return ret; +} + + /* * Fold a partial checksum without adding pseudo headers diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h index 97882f9ba..ff9d6ac47 100644 --- a/arch/arm/include/asm/cmpxchg.h +++ b/arch/arm/include/asm/cmpxchg.h @@ -117,6 +117,10 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \ sizeof(*(ptr))); \ }) +#define xchg_unchecked_relaxed(ptr, x) ({ \ + (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \ + sizeof(*(ptr))); \ +}) #include @@ -128,6 +132,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size #endif #define xchg xchg_relaxed +#define xchg_unchecked xchg_unchecked_relaxed /* * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h index baefe1d51..29cb35ac0 100644 --- a/arch/arm/include/asm/cpuidle.h +++ b/arch/arm/include/asm/cpuidle.h @@ -32,7 +32,7 @@ struct device_node; struct cpuidle_ops { int (*suspend)(unsigned long arg); int (*init)(struct device_node *, int cpu); -}; +} __no_const; struct of_cpuidle_method { const char *method; diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h index 99d9f630d..ec44cb50b 100644 --- a/arch/arm/include/asm/domain.h +++ b/arch/arm/include/asm/domain.h @@ -42,7 +42,6 @@ #define DOMAIN_USER 1 #define DOMAIN_IO 0 #endif -#define DOMAIN_VECTORS 3 /* * Domain types @@ -51,10 +50,29 @@ #define DOMAIN_CLIENT 1 #ifdef CONFIG_CPU_USE_DOMAINS #define DOMAIN_MANAGER 3 +#define DOMAIN_VECTORS 3 +#define DOMAIN_USERCLIENT DOMAIN_CLIENT +#else + +#ifdef CONFIG_PAX_KERNEXEC +#define DOMAIN_MANAGER 1 +#define DOMAIN_KERNEXEC 3 #else #define DOMAIN_MANAGER 1 #endif +#ifdef CONFIG_PAX_MEMORY_UDEREF +#define DOMAIN_USERCLIENT 0 +#define DOMAIN_UDEREF 1 +#define DOMAIN_VECTORS DOMAIN_KERNEL +#else +#define DOMAIN_USERCLIENT 1 +#define DOMAIN_VECTORS DOMAIN_USER +#endif + +#endif +#define DOMAIN_KERNELCLIENT 1 + #define domain_mask(dom) ((3) << (2 * (dom))) #define domain_val(dom,type) ((type) << (2 * (dom))) @@ -62,13 +80,19 @@ #define DACR_INIT \ (domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \ + domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT) | \ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)) +#elif defined(CONFIG_PAX_MEMORY_UDEREF) + /* DOMAIN_VECTORS is defined to DOMAIN_KERNEL */ +#define DACR_INIT \ + (domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \ + domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ + domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT)) #else #define DACR_INIT \ - (domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \ + (domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \ + domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT) | \ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)) #endif @@ -124,6 +148,17 @@ static inline void set_domain(unsigned val) set_domain(domain); \ } while (0) +#elif defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) +#define modify_domain(dom,type) \ + do { \ + struct thread_info *thread = current_thread_info(); \ + unsigned int domain = get_domain(); \ + domain &= ~domain_mask(dom); \ + domain = domain | domain_val(dom, type); \ + thread->cpu_domain = domain; \ + set_domain(domain); \ + } while (0) + #else static inline void modify_domain(unsigned dom, unsigned type) { } #endif diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index d2315ffd8..f60b47ba9 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -117,7 +117,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); the loader. We need to make sure that it is out of the way of the program that it will "exec", and that there is sufficient room for the brk. */ -#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) + +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE 0x00008000UL + +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10) +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10) +#endif /* When the program starts, a1 contains a pointer to a function to be registered with atexit, as per the SVR4 ABI. A value of 0 means we diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h index de5354746..52b9a289f 100644 --- a/arch/arm/include/asm/fncpy.h +++ b/arch/arm/include/asm/fncpy.h @@ -81,7 +81,9 @@ BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \ (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \ \ + pax_open_kernel(); \ memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \ + pax_close_kernel(); \ flush_icache_range((unsigned long)(dest_buf), \ (unsigned long)(dest_buf) + (size)); \ \ diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index 6795368ad..6c4d74984 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h @@ -107,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, return -EFAULT; preempt_disable(); + __ua_flags = uaccess_save_and_enable(); __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" "1: " TUSER(ldr) " %1, [%4]\n" diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h index 83eb2f772..ed77159d4 100644 --- a/arch/arm/include/asm/kmap_types.h +++ b/arch/arm/include/asm/kmap_types.h @@ -4,6 +4,6 @@ /* * This is the "bare minimum". AIO seems to require this. */ -#define KM_TYPE_NR 16 +#define KM_TYPE_NR 17 #endif diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h index 9e614a18e..3302cca0c 100644 --- a/arch/arm/include/asm/mach/dma.h +++ b/arch/arm/include/asm/mach/dma.h @@ -22,7 +22,7 @@ struct dma_ops { int (*residue)(unsigned int, dma_t *); /* optional */ int (*setspeed)(unsigned int, dma_t *, int); /* optional */ const char *type; -}; +} __do_const; struct dma_struct { void *addr; /* single DMA address */ diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h index 9b7c328fb..2dfe68bd2 100644 --- a/arch/arm/include/asm/mach/map.h +++ b/arch/arm/include/asm/mach/map.h @@ -23,17 +23,19 @@ struct map_desc { /* types 0-3 are defined in asm/io.h */ enum { - MT_UNCACHED = 4, - MT_CACHECLEAN, - MT_MINICLEAN, + MT_UNCACHED_RW = 4, + MT_CACHECLEAN_RO, + MT_MINICLEAN_RO, MT_LOW_VECTORS, MT_HIGH_VECTORS, - MT_MEMORY_RWX, + __MT_MEMORY_RWX, MT_MEMORY_RW, - MT_ROM, - MT_MEMORY_RWX_NONCACHED, + MT_MEMORY_RX, + MT_ROM_RX, + MT_MEMORY_RW_NONCACHED, + MT_MEMORY_RX_NONCACHED, MT_MEMORY_RW_DTCM, - MT_MEMORY_RWX_ITCM, + MT_MEMORY_RX_ITCM, MT_MEMORY_RW_SO, MT_MEMORY_DMA_READY, }; diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h index c2bf24f40..69e437cf2 100644 --- a/arch/arm/include/asm/outercache.h +++ b/arch/arm/include/asm/outercache.h @@ -39,7 +39,7 @@ struct outer_cache_fns { /* This is an ARM L2C thing */ void (*write_sec)(unsigned long, unsigned); void (*configure)(const struct l2x0_regs *); -}; +} __no_const; extern struct outer_cache_fns outer_cache; diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index 4355f0ec4..cd9168e6b 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -23,6 +23,7 @@ #else +#include #include /* @@ -114,7 +115,7 @@ struct cpu_user_fns { void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); void (*cpu_copy_user_highpage)(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma); -}; +} __no_const; #ifdef MULTI_USER extern struct cpu_user_fns cpu_user; diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index b2902a5cd..da11e4d1d 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -17,6 +17,7 @@ #include #include #include +#include #define check_pgt_cache() do { } while (0) @@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE)); } +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + pud_populate(mm, pud, pmd); +} + #else /* !CONFIG_ARM_LPAE */ /* @@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); }) #define pmd_free(mm, pmd) do { } while (0) #define pud_populate(mm,pmd,pte) BUG() +#define pud_populate_kernel(mm,pmd,pte) BUG() #endif /* CONFIG_ARM_LPAE */ @@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) __free_page(pte); } +static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot) +{ +#ifdef CONFIG_ARM_LPAE + pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot); +#else + if (addr & SECTION_SIZE) + pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot); + else + pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot); +#endif + flush_pmd_entry(pmdp); +} + static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte, pmdval_t prot) { diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h index 3f82e9da7..2a85e8b56 100644 --- a/arch/arm/include/asm/pgtable-2level-hwdef.h +++ b/arch/arm/include/asm/pgtable-2level-hwdef.h @@ -28,7 +28,7 @@ /* * - section */ -#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */ +#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */ #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2) #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3) #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */ @@ -40,6 +40,7 @@ #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */ #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */ #define PMD_SECT_AF (_AT(pmdval_t, 0)) +#define PMD_SECT_RDONLY (_AT(pmdval_t, 0)) #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0)) #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE) @@ -70,6 +71,7 @@ * - extended small page/tiny page */ #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */ +#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */ #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4) #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4) #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4) diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index 92fd2c8a9..061dae1f1 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h @@ -127,6 +127,9 @@ #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ #define L_PTE_NONE (_AT(pteval_t, 1) << 11) +/* Two-level page tables only have PXN in the PGD, not in the PTE. */ +#define L_PTE_PXN (_AT(pteval_t, 0)) + /* * These are the memory types, defined to be compatible with * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 2a029bcea..a0524c76e 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -80,6 +80,7 @@ #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */ #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */ +#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */ #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */ #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) @@ -90,10 +91,12 @@ #define L_PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55) #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57) #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58) +#define PMD_SECT_RDONLY PMD_SECT_AP2 /* * To be used in assembly code with the upper page attributes. */ +#define L_PTE_PXN_HIGH (1 << (53 - 32)) #define L_PTE_XN_HIGH (1 << (54 - 32)) #define L_PTE_DIRTY_HIGH (1 << (55 - 32)) diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index a8d656d9a..2febb8aee 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -33,6 +33,9 @@ #include #endif +#define ktla_ktva(addr) (addr) +#define ktva_ktla(addr) (addr) + /* * Just any arbitrary offset to the start of the vmalloc VM area: the * current 8MB value just means that there will be a 8MB "hole" after the @@ -48,6 +51,9 @@ #define LIBRARY_TEXT_START 0x0c000000 #ifndef __ASSEMBLY__ +extern pteval_t __supported_pte_mask; +extern pmdval_t __supported_pmd_mask; + extern void __pte_error(const char *file, int line, pte_t); extern void __pmd_error(const char *file, int line, pmd_t); extern void __pgd_error(const char *file, int line, pgd_t); @@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t); #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd) #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd) +#define __HAVE_ARCH_PAX_OPEN_KERNEL +#define __HAVE_ARCH_PAX_CLOSE_KERNEL + +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) +#include +#include +#include + +static inline int test_domain(int domain, int domaintype) +{ + return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype); +} +#endif + +#ifdef CONFIG_PAX_KERNEXEC +static inline unsigned long pax_open_kernel(void) { +#ifdef CONFIG_ARM_LPAE + /* TODO */ +#else + preempt_disable(); + BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC)); + modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC); +#endif + return 0; +} + +static inline unsigned long pax_close_kernel(void) { +#ifdef CONFIG_ARM_LPAE + /* TODO */ +#else + BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER)); + /* DOMAIN_MANAGER = "client" under KERNEXEC */ + modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER); + preempt_enable_no_resched(); +#endif + return 0; +} +#else +static inline unsigned long pax_open_kernel(void) { return 0; } +static inline unsigned long pax_close_kernel(void) { return 0; } +#endif + /* * This is the lowest virtual address we can permit any user space * mapping to be mapped at. This is particularly important for @@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t); /* * The pgprot_* and protection_map entries will be fixed up in runtime * to include the cachable and bufferable bits based on memory policy, - * as well as any architecture dependent bits like global/ASID and SMP - * shared mapping bits. + * as well as any architecture dependent bits like global/ASID, PXN, + * and SMP shared mapping bits. */ #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG @@ -308,7 +356,7 @@ static inline pte_t pte_mknexec(pte_t pte) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | - L_PTE_NONE | L_PTE_VALID; + L_PTE_NONE | L_PTE_VALID | __supported_pte_mask; pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); return pte; } diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index 3d6dc8b46..1262ad315 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -108,7 +108,7 @@ struct smp_operations { int (*cpu_disable)(unsigned int cpu); #endif #endif -}; +} __no_const; struct of_cpu_method { const char *method; diff --git a/arch/arm/include/asm/string.h b/arch/arm/include/asm/string.h index cf4f3aad0..8f2f2d9a6 100644 --- a/arch/arm/include/asm/string.h +++ b/arch/arm/include/asm/string.h @@ -7,19 +7,19 @@ */ #define __HAVE_ARCH_STRRCHR -extern char * strrchr(const char * s, int c); +extern char * strrchr(const char * s, int c) __nocapture(-1); #define __HAVE_ARCH_STRCHR -extern char * strchr(const char * s, int c); +extern char * strchr(const char * s, int c) __nocapture(-1); #define __HAVE_ARCH_MEMCPY -extern void * memcpy(void *, const void *, __kernel_size_t); +extern void * memcpy(void *, const void *, __kernel_size_t) __nocapture(2); #define __HAVE_ARCH_MEMMOVE -extern void * memmove(void *, const void *, __kernel_size_t); +extern void * memmove(void *, const void *, __kernel_size_t) __nocapture(2); #define __HAVE_ARCH_MEMCHR -extern void * memchr(const void *, int, __kernel_size_t); +extern void * memchr(const void *, int, __kernel_size_t) __nocapture(-1); #define __HAVE_ARCH_MEMSET extern void * memset(void *, int, __kernel_size_t); diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 776757d16..a552c1d75 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -73,6 +73,9 @@ struct thread_info { .flags = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ + .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \ + domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \ + domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \ } #define init_thread_info (init_thread_union.thread_info) @@ -143,6 +146,10 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ #define TIF_SECCOMP 7 /* seccomp syscall filtering active */ +/* within 8 bits of TIF_SYSCALL_TRACE + * to meet flexible second operand requirements + */ +#define TIF_GRSEC_SETXID 8 #define TIF_NOHZ 12 /* in adaptive nohz mode */ #define TIF_USING_IWMMXT 17 @@ -158,10 +165,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID) /* Checks for any syscall work in entry-common.S */ #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ - _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP) + _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID) /* * Change these and you break ASM code in entry-common.S diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h index f6fcc67ef..5895d62b9 100644 --- a/arch/arm/include/asm/timex.h +++ b/arch/arm/include/asm/timex.h @@ -13,6 +13,7 @@ #define _ASMARM_TIMEX_H typedef unsigned long cycles_t; +extern int read_current_timer(unsigned long *timer_val); #define get_cycles() ({ cycles_t c; read_current_timer(&c) ? 0 : c; }) #endif diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h index 5f833f7ad..76e664486 100644 --- a/arch/arm/include/asm/tls.h +++ b/arch/arm/include/asm/tls.h @@ -3,6 +3,7 @@ #include #include +#include #ifdef __ASSEMBLY__ #include @@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val) * at 0xffff0fe0 must be used instead. (see * entry-armv.S for details) */ + pax_open_kernel(); *((unsigned int *)0xffff0ff0) = val; + pax_close_kernel(); #endif } diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 1f59ea051..81245f0b0 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -18,6 +18,7 @@ #include #include #include +#include #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS #include @@ -50,6 +51,59 @@ struct exception_table_entry extern int fixup_exception(struct pt_regs *regs); /* + * These two are intentionally not defined anywhere - if the kernel + * code generates any references to them, that's a bug. + */ +extern int __get_user_bad(void); +extern int __put_user_bad(void); + +/* + * Note that this is actually 0x1,0000,0000 + */ +#define KERNEL_DS 0x00000000 +#define get_ds() (KERNEL_DS) + +#ifdef CONFIG_MMU + +#define USER_DS TASK_SIZE +#define get_fs() (current_thread_info()->addr_limit) + +static inline void set_fs(mm_segment_t fs) +{ + current_thread_info()->addr_limit = fs; + modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER); +} + +#define segment_eq(a, b) ((a) == (b)) + +#define __HAVE_ARCH_PAX_OPEN_USERLAND +#define __HAVE_ARCH_PAX_CLOSE_USERLAND + +static inline void pax_open_userland(void) +{ + +#ifdef CONFIG_PAX_MEMORY_UDEREF + if (segment_eq(get_fs(), USER_DS)) { + BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF)); + modify_domain(DOMAIN_USER, DOMAIN_UDEREF); + } +#endif + +} + +static inline void pax_close_userland(void) +{ + +#ifdef CONFIG_PAX_MEMORY_UDEREF + if (segment_eq(get_fs(), USER_DS)) { + BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS)); + modify_domain(DOMAIN_USER, DOMAIN_NOACCESS); + } +#endif + +} + +/* * These two functions allow hooking accesses to userspace to increase * system integrity by ensuring that the kernel can not inadvertantly * perform such accesses (eg, via list poison values) which could then @@ -66,6 +120,7 @@ static inline unsigned int uaccess_save_and_enable(void) return old_domain; #else + pax_open_userland(); return 0; #endif } @@ -75,35 +130,11 @@ static inline void uaccess_restore(unsigned int flags) #ifdef CONFIG_CPU_SW_DOMAIN_PAN /* Restore the user access mask */ set_domain(flags); +#else + pax_close_userland(); #endif } -/* - * These two are intentionally not defined anywhere - if the kernel - * code generates any references to them, that's a bug. - */ -extern int __get_user_bad(void); -extern int __put_user_bad(void); - -/* - * Note that this is actually 0x1,0000,0000 - */ -#define KERNEL_DS 0x00000000 -#define get_ds() (KERNEL_DS) - -#ifdef CONFIG_MMU - -#define USER_DS TASK_SIZE -#define get_fs() (current_thread_info()->addr_limit) - -static inline void set_fs(mm_segment_t fs) -{ - current_thread_info()->addr_limit = fs; - modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); -} - -#define segment_eq(a, b) ((a) == (b)) - /* We use 33-bit arithmetic here... */ #define __range_ok(addr, size) ({ \ unsigned long flag, roksum; \ @@ -268,6 +299,7 @@ static inline void set_fs(mm_segment_t fs) #endif /* CONFIG_MMU */ +#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size)) #define access_ok(type, addr, size) (__range_ok(addr, size) == 0) #define user_addr_max() \ @@ -474,10 +506,10 @@ do { \ #ifdef CONFIG_MMU -extern unsigned long __must_check +extern unsigned long __must_check __size_overflow(3) arm_copy_from_user(void *to, const void __user *from, unsigned long n); -static inline unsigned long __must_check +static inline unsigned long __must_check __size_overflow(3) __copy_from_user(void *to, const void __user *from, unsigned long n) { unsigned int __ua_flags; @@ -489,9 +521,9 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) return n; } -extern unsigned long __must_check +extern unsigned long __must_check __size_overflow(3) arm_copy_to_user(void __user *to, const void *from, unsigned long n); -extern unsigned long __must_check +extern unsigned long __must_check __size_overflow(3) __copy_to_user_std(void __user *to, const void *from, unsigned long n); static inline unsigned long __must_check @@ -511,9 +543,9 @@ __copy_to_user(void __user *to, const void *from, unsigned long n) #endif } -extern unsigned long __must_check +extern unsigned long __must_check __size_overflow(2) arm_clear_user(void __user *addr, unsigned long n); -extern unsigned long __must_check +extern unsigned long __must_check __size_overflow(2) __clear_user_std(void __user *addr, unsigned long n); static inline unsigned long __must_check @@ -534,6 +566,10 @@ __clear_user(void __user *addr, unsigned long n) static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) { unsigned long res = n; + + if ((long)n < 0) + return n; + if (likely(access_ok(VERIFY_READ, from, n))) res = __copy_from_user(to, from, n); if (unlikely(res)) @@ -543,6 +579,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) { + if ((long)n < 0) + return n; + if (access_ok(VERIFY_WRITE, to, n)) n = __copy_to_user(to, from, n); return n; diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h index 5af0ed1b8..cea838833 100644 --- a/arch/arm/include/uapi/asm/ptrace.h +++ b/arch/arm/include/uapi/asm/ptrace.h @@ -92,7 +92,7 @@ * ARMv7 groups of PSR bits */ #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */ -#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */ +#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */ #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */ diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index 7e45f69a0..2c047db42 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -59,7 +59,7 @@ EXPORT_SYMBOL(arm_delay_ops); /* networking */ EXPORT_SYMBOL(csum_partial); -EXPORT_SYMBOL(csum_partial_copy_from_user); +EXPORT_SYMBOL(__csum_partial_copy_from_user); EXPORT_SYMBOL(csum_partial_copy_nocheck); EXPORT_SYMBOL(__csum_ipv6_magic); diff --git a/arch/arm/kernel/efi.c b/arch/arm/kernel/efi.c index 9f43ba012..1cee4757e 100644 --- a/arch/arm/kernel/efi.c +++ b/arch/arm/kernel/efi.c @@ -60,9 +60,9 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) * preference. */ if (md->attribute & EFI_MEMORY_WB) - desc.type = MT_MEMORY_RWX; + desc.type = __MT_MEMORY_RWX; else if (md->attribute & EFI_MEMORY_WT) - desc.type = MT_MEMORY_RWX_NONCACHED; + desc.type = MT_MEMORY_RW_NONCACHED; else if (md->attribute & EFI_MEMORY_WC) desc.type = MT_DEVICE_WC; else diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 9f157e7c5..8e3f85763 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -50,6 +50,87 @@ 9997: .endm + .macro pax_enter_kernel +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) + @ make aligned space for saved DACR + sub sp, sp, #8 + @ save regs + stmdb sp!, {r1, r2} + @ read DACR from cpu_domain into r1 + mov r2, sp + @ assume 8K pages, since we have to split the immediate in two + bic r2, r2, #(0x1fc0) + bic r2, r2, #(0x3f) + ldr r1, [r2, #TI_CPU_DOMAIN] + @ store old DACR on stack + str r1, [sp, #8] +#ifdef CONFIG_PAX_KERNEXEC + @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT + bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3)) + orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT)) +#endif +#ifdef CONFIG_PAX_MEMORY_UDEREF + @ set current DOMAIN_USER to DOMAIN_NOACCESS + bic r1, r1, #(domain_val(DOMAIN_USER, 3)) +#endif + @ write r1 to current_thread_info()->cpu_domain + str r1, [r2, #TI_CPU_DOMAIN] + @ write r1 to DACR + mcr p15, 0, r1, c3, c0, 0 + @ instruction sync + instr_sync + @ restore regs + ldmia sp!, {r1, r2} +#endif + .endm + + .macro pax_open_userland +#ifdef CONFIG_PAX_MEMORY_UDEREF + @ save regs + stmdb sp!, {r0, r1} + @ read DACR from cpu_domain into r1 + mov r0, sp + @ assume 8K pages, since we have to split the immediate in two + bic r0, r0, #(0x1fc0) + bic r0, r0, #(0x3f) + ldr r1, [r0, #TI_CPU_DOMAIN] + @ set current DOMAIN_USER to DOMAIN_CLIENT + bic r1, r1, #(domain_val(DOMAIN_USER, 3)) + orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF)) + @ write r1 to current_thread_info()->cpu_domain + str r1, [r0, #TI_CPU_DOMAIN] + @ write r1 to DACR + mcr p15, 0, r1, c3, c0, 0 + @ instruction sync + instr_sync + @ restore regs + ldmia sp!, {r0, r1} +#endif + .endm + + .macro pax_close_userland +#ifdef CONFIG_PAX_MEMORY_UDEREF + @ save regs + stmdb sp!, {r0, r1} + @ read DACR from cpu_domain into r1 + mov r0, sp + @ assume 8K pages, since we have to split the immediate in two + bic r0, r0, #(0x1fc0) + bic r0, r0, #(0x3f) + ldr r1, [r0, #TI_CPU_DOMAIN] + @ set current DOMAIN_USER to DOMAIN_NOACCESS + bic r1, r1, #(domain_val(DOMAIN_USER, 3)) + @ write r1 to current_thread_info()->cpu_domain + str r1, [r0, #TI_CPU_DOMAIN] + @ write r1 to DACR + mcr p15, 0, r1, c3, c0, 0 + @ instruction sync + instr_sync + @ restore regs + ldmia sp!, {r0, r1} +#endif + .endm + .macro pabt_helper @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5 #ifdef MULTI_PABORT @@ -92,11 +173,15 @@ * Invalid mode handlers */ .macro inv_entry, reason + + pax_enter_kernel + sub sp, sp, #PT_REGS_SIZE ARM( stmib sp, {r1 - lr} ) THUMB( stmia sp, {r0 - r12} ) THUMB( str sp, [sp, #S_SP] ) THUMB( str lr, [sp, #S_LR] ) + mov r1, #\reason .endm @@ -152,6 +237,9 @@ ENDPROC(__und_invalid) .macro svc_entry, stack_hole=0, trace=1, uaccess=1 UNWIND(.fnstart ) UNWIND(.save {r0 - pc} ) + + pax_enter_kernel + sub sp, sp, #(SVC_REGS_SIZE + \stack_hole - 4) #ifdef CONFIG_THUMB2_KERNEL SPFIX( str r0, [sp] ) @ temporarily saved @@ -167,7 +255,12 @@ ENDPROC(__und_invalid) ldmia r0, {r3 - r5} add r7, sp, #S_SP - 4 @ here for interlock avoidance mov r6, #-1 @ "" "" "" "" +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) + @ offset sp by 8 as done in pax_enter_kernel + add r2, sp, #(SVC_REGS_SIZE + \stack_hole + 4) +#else add r2, sp, #(SVC_REGS_SIZE + \stack_hole - 4) +#endif SPFIX( addeq r2, r2, #4 ) str r3, [sp, #-4]! @ save the "real" r0 copied @ from the exception stack @@ -382,6 +475,9 @@ ENDPROC(__fiq_abt) .macro usr_entry, trace=1, uaccess=1 UNWIND(.fnstart ) UNWIND(.cantunwind ) @ don't unwind the user space + + pax_enter_kernel_user + sub sp, sp, #PT_REGS_SIZE ARM( stmib sp, {r1 - r12} ) THUMB( stmia sp, {r0 - r12} ) @@ -495,7 +591,9 @@ __und_usr: tst r3, #PSR_T_BIT @ Thumb mode? bne __und_usr_thumb sub r4, r2, #4 @ ARM instr at LR - 4 + pax_open_userland 1: ldrt r0, [r4] + pax_close_userland ARM_BE8(rev r0, r0) @ little endian instruction uaccess_disable ip @@ -531,11 +629,15 @@ __und_usr_thumb: */ .arch armv6t2 #endif + pax_open_userland 2: ldrht r5, [r4] + pax_close_userland ARM_BE8(rev16 r5, r5) @ little endian instruction cmp r5, #0xe800 @ 32bit instruction if xx != 0 blo __und_usr_fault_16_pan @ 16bit undefined instruction + pax_open_userland 3: ldrht r0, [r2] + pax_close_userland ARM_BE8(rev16 r0, r0) @ little endian instruction uaccess_disable ip add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 @@ -566,7 +668,8 @@ ENDPROC(__und_usr) */ .pushsection .text.fixup, "ax" .align 2 -4: str r4, [sp, #S_PC] @ retry current instruction +4: pax_close_userland + str r4, [sp, #S_PC] @ retry current instruction ret r9 .popsection .pushsection __ex_table,"a" @@ -788,7 +891,7 @@ ENTRY(__switch_to) THUMB( str lr, [ip], #4 ) ldr r4, [r2, #TI_TP_VALUE] ldr r5, [r2, #TI_TP_VALUE + 4] -#ifdef CONFIG_CPU_USE_DOMAINS +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) mrc p15, 0, r6, c3, c0, 0 @ Get domain register str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register ldr r6, [r2, #TI_CPU_DOMAIN] @@ -799,7 +902,7 @@ ENTRY(__switch_to) ldr r8, =__stack_chk_guard ldr r7, [r7, #TSK_STACK_CANARY] #endif -#ifdef CONFIG_CPU_USE_DOMAINS +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) mcr p15, 0, r6, c3, c0, 0 @ Set domain register #endif mov r5, r0 diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 10c3283d6..c47cdf5d9 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -11,18 +11,46 @@ #include #include #include +#include #include +#include "entry-header.S" + #ifdef CONFIG_NEED_RET_TO_USER #include #else .macro arch_ret_to_user, tmp1, tmp2 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) + @ save regs + stmdb sp!, {r1, r2} + @ read DACR from cpu_domain into r1 + mov r2, sp + @ assume 8K pages, since we have to split the immediate in two + bic r2, r2, #(0x1fc0) + bic r2, r2, #(0x3f) + ldr r1, [r2, #TI_CPU_DOMAIN] +#ifdef CONFIG_PAX_KERNEXEC + @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT + bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3)) + orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT)) +#endif +#ifdef CONFIG_PAX_MEMORY_UDEREF + @ set current DOMAIN_USER to DOMAIN_UDEREF + bic r1, r1, #(domain_val(DOMAIN_USER, 3)) + orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF)) +#endif + @ write r1 to current_thread_info()->cpu_domain + str r1, [r2, #TI_CPU_DOMAIN] + @ write r1 to DACR + mcr p15, 0, r1, c3, c0, 0 + @ instruction sync + instr_sync + @ restore regs + ldmia sp!, {r1, r2} +#endif .endm #endif -#include "entry-header.S" - - .align 5 #if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING)) /* @@ -36,7 +64,9 @@ ret_fast_syscall: UNWIND(.cantunwind ) disable_irq_notrace @ disable interrupts ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing - tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK + tst r1, #_TIF_SYSCALL_WORK + bne fast_work_pending + tst r1, #_TIF_WORK_MASK bne fast_work_pending /* perform architecture specific actions before user return */ @@ -62,7 +92,9 @@ ret_fast_syscall: str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 disable_irq_notrace @ disable interrupts ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing - tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK + tst r1, #_TIF_SYSCALL_WORK + bne __sys_trace_return_nosave + tst r1, #_TIF_WORK_MASK beq no_work_pending UNWIND(.fnend ) ENDPROC(ret_fast_syscall) @@ -199,6 +231,12 @@ ENTRY(vector_swi) uaccess_disable tbl + /* + * do this here to avoid a performance hit of wrapping the code above + * that directly dereferences userland to parse the SWI instruction + */ + pax_enter_kernel_user + adr tbl, sys_call_table @ load syscall table pointer #if defined(CONFIG_OABI_COMPAT) diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 6391728c8..6bf90b858 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -196,6 +196,59 @@ msr cpsr_c, \rtemp @ switch back to the SVC mode .endm + .macro pax_enter_kernel_user +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) + @ save regs + stmdb sp!, {r0, r1} + @ read DACR from cpu_domain into r1 + mov r0, sp + @ assume 8K pages, since we have to split the immediate in two + bic r0, r0, #(0x1fc0) + bic r0, r0, #(0x3f) + ldr r1, [r0, #TI_CPU_DOMAIN] +#ifdef CONFIG_PAX_MEMORY_UDEREF + @ set current DOMAIN_USER to DOMAIN_NOACCESS + bic r1, r1, #(domain_val(DOMAIN_USER, 3)) +#endif +#ifdef CONFIG_PAX_KERNEXEC + @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT + bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3)) + orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT)) +#endif + @ write r1 to current_thread_info()->cpu_domain + str r1, [r0, #TI_CPU_DOMAIN] + @ write r1 to DACR + mcr p15, 0, r1, c3, c0, 0 + @ instruction sync + instr_sync + @ restore regs + ldmia sp!, {r0, r1} +#endif + .endm + + .macro pax_exit_kernel +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) + @ save regs + stmdb sp!, {r0, r1} + @ read old DACR from stack into r1 + ldr r1, [sp, #(8 + S_SP)] + sub r1, r1, #8 + ldr r1, [r1] + + @ write r1 to current_thread_info()->cpu_domain + mov r0, sp + @ assume 8K pages, since we have to split the immediate in two + bic r0, r0, #(0x1fc0) + bic r0, r0, #(0x3f) + str r1, [r0, #TI_CPU_DOMAIN] + @ write r1 to DACR + mcr p15, 0, r1, c3, c0, 0 + @ instruction sync + instr_sync + @ restore regs + ldmia sp!, {r0, r1} +#endif + .endm .macro svc_exit, rpsr, irq = 0 .if \irq != 0 @@ -219,6 +272,8 @@ uaccess_restore str r1, [tsk, #TI_ADDR_LIMIT] + pax_exit_kernel + #ifndef CONFIG_THUMB2_KERNEL @ ARM mode SVC restore msr spsr_cxsf, \rpsr diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c index 059c3da0f..8e45cfc02 100644 --- a/arch/arm/kernel/fiq.c +++ b/arch/arm/kernel/fiq.c @@ -95,7 +95,10 @@ void set_fiq_handler(void *start, unsigned int length) void *base = vectors_page; unsigned offset = FIQ_OFFSET; + pax_open_kernel(); memcpy(base + offset, start, length); + pax_close_kernel(); + if (!cache_is_vipt_nonaliasing()) flush_icache_range((unsigned long)base + offset, offset + length); diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index 4f14b5ce6..91ff2610e 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -38,17 +38,47 @@ #endif #ifdef CONFIG_MMU -void *module_alloc(unsigned long size) +static inline void *__module_alloc(unsigned long size, pgprot_t prot) { - void *p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, - GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, + void *p; + + if (!size || (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) && PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)) + return NULL; + + p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, + GFP_KERNEL, prot, 0, NUMA_NO_NODE, __builtin_return_address(0)); if (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || p) return p; return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, - GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, + GFP_KERNEL, prot, 0, NUMA_NO_NODE, __builtin_return_address(0)); } + +void *module_alloc(unsigned long size) +{ + +#ifdef CONFIG_PAX_KERNEXEC + return __module_alloc(size, PAGE_KERNEL); +#else + return __module_alloc(size, PAGE_KERNEL_EXEC); +#endif + +} + +#ifdef CONFIG_PAX_KERNEXEC +void module_memfree_exec(void *module_region) +{ + module_memfree(module_region); +} +EXPORT_SYMBOL(module_memfree_exec); + +void *module_alloc_exec(unsigned long size) +{ + return __module_alloc(size, PAGE_KERNEL_EXEC); +} +EXPORT_SYMBOL(module_alloc_exec); +#endif #endif int diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c index 69bda1a57..755113ad7 100644 --- a/arch/arm/kernel/patch.c +++ b/arch/arm/kernel/patch.c @@ -66,6 +66,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap) else __acquire(&patch_lock); + pax_open_kernel(); if (thumb2 && __opcode_is_thumb16(insn)) { *(u16 *)waddr = __opcode_to_mem_thumb16(insn); size = sizeof(u16); @@ -97,6 +98,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap) *(u32 *)waddr = insn; size = sizeof(u32); } + pax_close_kernel(); if (waddr != addr) { flush_kernel_vmap_range(waddr, twopage ? size / 2 : size); diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 91d2d5b01..042c26e5f 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -118,8 +118,8 @@ void __show_regs(struct pt_regs *regs) show_regs_print_info(KERN_DEFAULT); - print_symbol("PC is at %s\n", instruction_pointer(regs)); - print_symbol("LR is at %s\n", regs->ARM_lr); + printk("PC is at %pA\n", (void *)instruction_pointer(regs)); + printk("LR is at %pA\n", (void *)regs->ARM_lr); printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n" "sp : %08lx ip : %08lx fp : %08lx\n", regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr, @@ -233,7 +233,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); -#ifdef CONFIG_CPU_USE_DOMAINS +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) /* * Copy the initial value of the domain access control register * from the current thread: thread->addr_limit will have been @@ -336,7 +336,7 @@ static struct vm_area_struct gate_vma = { static int __init gate_vma_init(void) { - gate_vma.vm_page_prot = PAGE_READONLY_EXEC; + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags); return 0; } arch_initcall(gate_vma_init); @@ -365,92 +365,14 @@ const char *arch_vma_name(struct vm_area_struct *vma) return is_gate_vma(vma) ? "[vectors]" : NULL; } -/* If possible, provide a placement hint at a random offset from the - * stack for the sigpage and vdso pages. - */ -static unsigned long sigpage_addr(const struct mm_struct *mm, - unsigned int npages) -{ - unsigned long offset; - unsigned long first; - unsigned long last; - unsigned long addr; - unsigned int slots; - - first = PAGE_ALIGN(mm->start_stack); - - last = TASK_SIZE - (npages << PAGE_SHIFT); - - /* No room after stack? */ - if (first > last) - return 0; - - /* Just enough room? */ - if (first == last) - return first; - - slots = ((last - first) >> PAGE_SHIFT) + 1; - - offset = get_random_int() % slots; - - addr = first + (offset << PAGE_SHIFT); - - return addr; -} - -static struct page *signal_page; -extern struct page *get_signal_page(void); - -static const struct vm_special_mapping sigpage_mapping = { - .name = "[sigpage]", - .pages = &signal_page, -}; - int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - unsigned long npages; - unsigned long addr; - unsigned long hint; - int ret = 0; - - if (!signal_page) - signal_page = get_signal_page(); - if (!signal_page) - return -ENOMEM; - - npages = 1; /* for sigpage */ - npages += vdso_total_pages; if (down_write_killable(&mm->mmap_sem)) return -EINTR; - hint = sigpage_addr(mm, npages); - addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0); - if (IS_ERR_VALUE(addr)) { - ret = addr; - goto up_fail; - } - - vma = _install_special_mapping(mm, addr, PAGE_SIZE, - VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, - &sigpage_mapping); - - if (IS_ERR(vma)) { - ret = PTR_ERR(vma); - goto up_fail; - } - - mm->context.sigpage = addr; - - /* Unlike the sigpage, failure to install the vdso is unlikely - * to be fatal to the process, so no error check needed - * here. - */ - arm_install_vdso(mm, addr + PAGE_SIZE); - - up_fail: + mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC; up_write(&mm->mmap_sem); - return ret; + return 0; } #endif diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index ae738a631..ee4d46fe3 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs, regs->ARM_ip = ip; } +#ifdef CONFIG_GRKERNSEC_SETXID +extern void gr_delayed_cred_worker(void); +#endif + asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno) { current_thread_info()->syscall = scno; +#ifdef CONFIG_GRKERNSEC_SETXID + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) + gr_delayed_cred_worker(); +#endif + if (test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c index 3fa867a2a..d6106070b 100644 --- a/arch/arm/kernel/reboot.c +++ b/arch/arm/kernel/reboot.c @@ -120,6 +120,7 @@ void machine_power_off(void) if (pm_power_off) pm_power_off(); + while (1); } /* diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 34e3f3c45..3d2dada38 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -112,6 +112,8 @@ EXPORT_SYMBOL(elf_hwcap); unsigned int elf_hwcap2 __read_mostly; EXPORT_SYMBOL(elf_hwcap2); +pteval_t __supported_pte_mask __read_only; +pmdval_t __supported_pmd_mask __read_only; #ifdef MULTI_CPU struct processor processor __ro_after_init; @@ -257,9 +259,13 @@ static int __get_cpu_architecture(void) * Register 0 and check for VMSAv7 or PMSAv7 */ unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0); if ((mmfr0 & 0x0000000f) >= 0x00000003 || - (mmfr0 & 0x000000f0) >= 0x00000030) + (mmfr0 & 0x000000f0) >= 0x00000030) { cpu_arch = CPU_ARCH_ARMv7; - else if ((mmfr0 & 0x0000000f) == 0x00000002 || + if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) { + __supported_pte_mask |= L_PTE_PXN; + __supported_pmd_mask |= PMD_PXNTABLE; + } + } else if ((mmfr0 & 0x0000000f) == 0x00000002 || (mmfr0 & 0x000000f0) == 0x00000020) cpu_arch = CPU_ARCH_ARMv6; else diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 7b8f21414..ece8e2863 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -24,8 +24,6 @@ extern const unsigned long sigreturn_codes[7]; -static unsigned long signal_return_offset; - #ifdef CONFIG_CRUNCH static int preserve_crunch_context(struct crunch_sigframe __user *frame) { @@ -388,8 +386,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, * except when the MPU has protected the vectors * page from PL0 */ - retcode = mm->context.sigpage + signal_return_offset + - (idx << 2) + thumb; + retcode = mm->context.sigpage + (idx << 2) + thumb; } else #endif { @@ -601,33 +598,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) } while (thread_flags & _TIF_WORK_MASK); return 0; } - -struct page *get_signal_page(void) -{ - unsigned long ptr; - unsigned offset; - struct page *page; - void *addr; - - page = alloc_pages(GFP_KERNEL, 0); - - if (!page) - return NULL; - - addr = page_address(page); - - /* Give the signal return code some randomness */ - offset = 0x200 + (get_random_int() & 0x7fc); - signal_return_offset = offset; - - /* - * Copy signal return handlers into the vector page, and - * set sigreturn to be a pointer to these. - */ - memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes)); - - ptr = (unsigned long)addr + offset; - flush_icache_range(ptr, ptr + sizeof(sigreturn_codes)); - - return page; -} diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c index b10e13607..cb5edf9c7 100644 --- a/arch/arm/kernel/tcm.c +++ b/arch/arm/kernel/tcm.c @@ -64,7 +64,7 @@ static struct map_desc itcm_iomap[] __initdata = { .virtual = ITCM_OFFSET, .pfn = __phys_to_pfn(ITCM_OFFSET), .length = 0, - .type = MT_MEMORY_RWX_ITCM, + .type = MT_MEMORY_RX_ITCM, } }; @@ -362,7 +362,9 @@ void __init tcm_init(void) start = &__sitcm_text; end = &__eitcm_text; ram = &__itcm_start; + pax_open_kernel(); memcpy(start, ram, itcm_code_sz); + pax_close_kernel(); pr_debug("CPU ITCM: copied code from %p - %p\n", start, end); itcm_present = true; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 9688ec0c6..dd072c025 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long); void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame) { #ifdef CONFIG_KALLSYMS - printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from); + printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from); #else printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); #endif @@ -287,6 +287,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; static int die_owner = -1; static unsigned int die_nest_count; +extern void gr_handle_kernel_exploit(void); + static unsigned long oops_begin(void) { int cpu; @@ -329,6 +331,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); + + gr_handle_kernel_exploit(); + if (signr) do_exit(signr); } diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index f7f55df0b..49c9f9e69 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -44,7 +44,8 @@ #endif #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \ - defined(CONFIG_GENERIC_BUG) || defined(CONFIG_JUMP_LABEL) + defined(CONFIG_GENERIC_BUG) || defined(CONFIG_JUMP_LABEL) || \ + defined(CONFIG_PAX_REFCOUNT) #define ARM_EXIT_KEEP(x) x #define ARM_EXIT_DISCARD(x) #else diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 19b5f5c1c..9aa8e585c 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -59,7 +59,7 @@ static unsigned long hyp_default_vectors; static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu); /* The VMID used in the VTTBR */ -static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); +static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1); static u32 kvm_next_vmid; static unsigned int kvm_vmid_bits __read_mostly; static DEFINE_SPINLOCK(kvm_vmid_lock); @@ -423,7 +423,7 @@ void force_vm_exit(const cpumask_t *mask) */ static bool need_new_vmid_gen(struct kvm *kvm) { - return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen)); + return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen)); } /** @@ -456,7 +456,7 @@ static void update_vttbr(struct kvm *kvm) /* First user of a new VMID generation? */ if (unlikely(kvm_next_vmid == 0)) { - atomic64_inc(&kvm_vmid_gen); + atomic64_inc_unchecked(&kvm_vmid_gen); kvm_next_vmid = 1; /* @@ -473,7 +473,7 @@ static void update_vttbr(struct kvm *kvm) kvm_call_hyp(__kvm_flush_vm_context); } - kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen); + kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen); kvm->arch.vmid = kvm_next_vmid; kvm_next_vmid++; kvm_next_vmid &= (1 << kvm_vmid_bits) - 1; diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S index 6ee2f6706..d1cce7642 100644 --- a/arch/arm/lib/copy_page.S +++ b/arch/arm/lib/copy_page.S @@ -10,6 +10,7 @@ * ASM optimised string functions */ #include +#include #include #include #include diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S index 1712f132b..a3165dc56 100644 --- a/arch/arm/lib/csumpartialcopyuser.S +++ b/arch/arm/lib/csumpartialcopyuser.S @@ -71,8 +71,8 @@ * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT */ -#define FN_ENTRY ENTRY(csum_partial_copy_from_user) -#define FN_EXIT ENDPROC(csum_partial_copy_from_user) +#define FN_ENTRY ENTRY(__csum_partial_copy_from_user) +#define FN_EXIT ENDPROC(__csum_partial_copy_from_user) #include "csumpartialcopygeneric.S" diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c index 6bd1089b0..e9994008b 100644 --- a/arch/arm/lib/uaccess_with_memcpy.c +++ b/arch/arm/lib/uaccess_with_memcpy.c @@ -84,7 +84,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp) return 1; } -static unsigned long noinline +static unsigned long noinline __size_overflow(3) __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) { unsigned long ua_flags; @@ -157,7 +157,7 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n) return n; } -static unsigned long noinline +static unsigned long noinline __size_overflow(2) __clear_user_memset(void __user *addr, unsigned long n) { unsigned long ua_flags; diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c index 06332f626..1fa0c7132 100644 --- a/arch/arm/mach-exynos/suspend.c +++ b/arch/arm/mach-exynos/suspend.c @@ -724,8 +724,10 @@ void __init exynos_pm_init(void) tmp |= pm_data->wake_disable_mask; pmu_raw_writel(tmp, S5P_WAKEUP_MASK); - exynos_pm_syscore_ops.suspend = pm_data->pm_suspend; - exynos_pm_syscore_ops.resume = pm_data->pm_resume; + pax_open_kernel(); + const_cast(exynos_pm_syscore_ops.suspend) = pm_data->pm_suspend; + const_cast(exynos_pm_syscore_ops.resume) = pm_data->pm_resume; + pax_close_kernel(); register_syscore_ops(&exynos_pm_syscore_ops); suspend_set_ops(&exynos_suspend_ops); diff --git a/arch/arm/mach-mmp/mmp2.c b/arch/arm/mach-mmp/mmp2.c index afba5460c..9e5403d90 100644 --- a/arch/arm/mach-mmp/mmp2.c +++ b/arch/arm/mach-mmp/mmp2.c @@ -98,7 +98,9 @@ void __init mmp2_init_irq(void) { mmp2_init_icu(); #ifdef CONFIG_PM - icu_irq_chip.irq_set_wake = mmp2_set_wake; + pax_open_kernel(); + const_cast(icu_irq_chip.irq_set_wake) = mmp2_set_wake; + pax_close_kernel(); #endif } diff --git a/arch/arm/mach-mmp/pxa910.c b/arch/arm/mach-mmp/pxa910.c index 1ccbba9ac..7a95c292f 100644 --- a/arch/arm/mach-mmp/pxa910.c +++ b/arch/arm/mach-mmp/pxa910.c @@ -84,7 +84,9 @@ void __init pxa910_init_irq(void) { icu_init_irq(); #ifdef CONFIG_PM - icu_irq_chip.irq_set_wake = pxa910_set_wake; + pax_open_kernel(); + const_cast(icu_irq_chip.irq_set_wake) = pxa910_set_wake; + pax_close_kernel(); #endif } diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c index ae2a018b9..297ad08ab 100644 --- a/arch/arm/mach-mvebu/coherency.c +++ b/arch/arm/mach-mvebu/coherency.c @@ -156,7 +156,7 @@ static void __init armada_370_coherency_init(struct device_node *np) /* * This ioremap hook is used on Armada 375/38x to ensure that all MMIO - * areas are mapped as MT_UNCACHED instead of MT_DEVICE. This is + * areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This is * needed for the HW I/O coherency mechanism to work properly without * deadlock. */ @@ -164,7 +164,7 @@ static void __iomem * armada_wa_ioremap_caller(phys_addr_t phys_addr, size_t size, unsigned int mtype, void *caller) { - mtype = MT_UNCACHED; + mtype = MT_UNCACHED_RW; return __arm_ioremap_caller(phys_addr, size, mtype, caller); } @@ -174,7 +174,7 @@ static void __init armada_375_380_coherency_init(struct device_node *np) coherency_cpu_base = of_iomap(np, 0); arch_ioremap_caller = armada_wa_ioremap_caller; - pci_ioremap_set_mem_type(MT_UNCACHED); + pci_ioremap_set_mem_type(MT_UNCACHED_RW); /* * We should switch the PL310 to I/O coherency mode only if diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c index f39bd51bc..866c78042 100644 --- a/arch/arm/mach-mvebu/pmsu.c +++ b/arch/arm/mach-mvebu/pmsu.c @@ -93,7 +93,7 @@ #define ARMADA_370_CRYPT0_ENG_ATTR 0x1 extern void ll_disable_coherency(void); -extern void ll_enable_coherency(void); +extern int ll_enable_coherency(void); extern void armada_370_xp_cpu_resume(void); extern void armada_38x_cpu_resume(void); diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c index 6b6fda65f..232df1e03 100644 --- a/arch/arm/mach-omap2/board-n8x0.c +++ b/arch/arm/mach-omap2/board-n8x0.c @@ -568,7 +568,7 @@ static int n8x0_menelaus_late_init(struct device *dev) } #endif -struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = { +struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = { .late_init = n8x0_menelaus_late_init, }; diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c index 7d62ad48c..97774b1fc 100644 --- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c +++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c @@ -89,7 +89,7 @@ struct cpu_pm_ops { void (*resume)(void); void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state); void (*hotplug_restart)(void); -}; +} __no_const; static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info); static struct powerdomain *mpuss_pd; @@ -107,7 +107,7 @@ static void dummy_cpu_resume(void) static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state) {} -static struct cpu_pm_ops omap_pm_ops = { +static struct cpu_pm_ops omap_pm_ops __read_only = { .finish_suspend = default_finish_suspend, .resume = dummy_cpu_resume, .scu_prepare = dummy_scu_prepare, diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index b4de3da6d..e02739337 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index e920dd83e..ef999171b 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c @@ -530,7 +530,7 @@ void omap_device_delete(struct omap_device *od) struct platform_device __init *omap_device_build(const char *pdev_name, int pdev_id, struct omap_hwmod *oh, - void *pdata, int pdata_len) + const void *pdata, int pdata_len) { struct omap_hwmod *ohs[] = { oh }; @@ -558,7 +558,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name, struct platform_device __init *omap_device_build_ss(const char *pdev_name, int pdev_id, struct omap_hwmod **ohs, - int oh_cnt, void *pdata, + int oh_cnt, const void *pdata, int pdata_len) { int ret = -ENOMEM; diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h index 78c02b355..c94109a2f 100644 --- a/arch/arm/mach-omap2/omap_device.h +++ b/arch/arm/mach-omap2/omap_device.h @@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev); /* Core code interface */ struct platform_device *omap_device_build(const char *pdev_name, int pdev_id, - struct omap_hwmod *oh, void *pdata, + struct omap_hwmod *oh, const void *pdata, int pdata_len); struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id, struct omap_hwmod **oh, int oh_cnt, - void *pdata, int pdata_len); + const void *pdata, int pdata_len); struct omap_device *omap_device_alloc(struct platform_device *pdev, struct omap_hwmod **ohs, int oh_cnt); diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 1052b2969..54669b02d 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -206,10 +206,10 @@ struct omap_hwmod_soc_ops { void (*update_context_lost)(struct omap_hwmod *oh); int (*get_context_lost)(struct omap_hwmod *oh); int (*disable_direct_prcm)(struct omap_hwmod *oh); -}; +} __no_const; /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */ -static struct omap_hwmod_soc_ops soc_ops; +static struct omap_hwmod_soc_ops soc_ops __read_only; /* omap_hwmod_list contains all registered struct omap_hwmods */ static LIST_HEAD(omap_hwmod_list); diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c index 95fee54c3..b5dd79d96 100644 --- a/arch/arm/mach-omap2/powerdomains43xx_data.c +++ b/arch/arm/mach-omap2/powerdomains43xx_data.c @@ -10,6 +10,7 @@ #include #include +#include #include "powerdomain.h" @@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void) void __init am43xx_powerdomains_init(void) { - omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp; + pax_open_kernel(); + const_cast(omap4_pwrdm_operations.pwrdm_has_voltdm) = am43xx_check_vcvp; + pax_close_kernel(); pwrdm_register_platform_funcs(&omap4_pwrdm_operations); pwrdm_register_pwrdms(powerdomains_am43xx); pwrdm_complete_init(); diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c index ff0a68cf7..b312aa004 100644 --- a/arch/arm/mach-omap2/wd_timer.c +++ b/arch/arm/mach-omap2/wd_timer.c @@ -110,7 +110,9 @@ static int __init omap_init_wdt(void) struct omap_hwmod *oh; char *oh_name = "wd_timer2"; char *dev_name = "omap_wdt"; - struct omap_wd_timer_platform_data pdata; + static struct omap_wd_timer_platform_data pdata = { + .read_reset_sources = prm_read_reset_sources + }; if (!cpu_class_is_omap2() || of_have_populated_dt()) return 0; @@ -121,8 +123,6 @@ static int __init omap_init_wdt(void) return -EINVAL; } - pdata.read_reset_sources = prm_read_reset_sources; - pdev = omap_device_build(dev_name, id, oh, &pdata, sizeof(struct omap_wd_timer_platform_data)); WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n", diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c index 92ec8c3b4..3b094723b 100644 --- a/arch/arm/mach-s3c64xx/mach-smdk6410.c +++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c @@ -240,7 +240,7 @@ static struct platform_device smdk6410_b_pwr_5v = { }; #endif -static struct s3c_ide_platdata smdk6410_ide_pdata __initdata = { +static const struct s3c_ide_platdata smdk6410_ide_pdata __initconst = { .setup_gpio = s3c64xx_ide_setup_gpio, }; diff --git a/arch/arm/mach-shmobile/platsmp-apmu.c b/arch/arm/mach-shmobile/platsmp-apmu.c index 0c6bb458b..0f18d70ab 100644 --- a/arch/arm/mach-shmobile/platsmp-apmu.c +++ b/arch/arm/mach-shmobile/platsmp-apmu.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "common.h" #include "platsmp-apmu.h" #include "rcar-gen2.h" @@ -316,6 +317,8 @@ static int shmobile_smp_apmu_enter_suspend(suspend_state_t state) void __init shmobile_smp_apmu_suspend_init(void) { - shmobile_suspend_ops.enter = shmobile_smp_apmu_enter_suspend; + pax_open_kernel(); + const_cast(shmobile_suspend_ops.enter) = shmobile_smp_apmu_enter_suspend; + pax_close_kernel(); } #endif diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c index afcee04f2..63e52ac6c 100644 --- a/arch/arm/mach-tegra/cpuidle-tegra20.c +++ b/arch/arm/mach-tegra/cpuidle-tegra20.c @@ -178,7 +178,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev, bool entered_lp2 = false; if (tegra_pending_sgi()) - ACCESS_ONCE(abort_flag) = true; + ACCESS_ONCE_RW(abort_flag) = true; cpuidle_coupled_parallel_barrier(dev, &abort_barrier); diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c index a69b22d37..8523a039e 100644 --- a/arch/arm/mach-tegra/irq.c +++ b/arch/arm/mach-tegra/irq.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c index a970e7fcb..6f2bf9af1 100644 --- a/arch/arm/mach-ux500/pm.c +++ b/arch/arm/mach-ux500/pm.c @@ -10,6 +10,7 @@ */ #include +#include #include #include #include diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c index 7cd9865bd..a00b6abdc 100644 --- a/arch/arm/mach-zynq/platsmp.c +++ b/arch/arm/mach-zynq/platsmp.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include "common.h" diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index c1799dd1d..9111dcc93 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -446,6 +446,7 @@ config CPU_32v5 config CPU_32v6 bool + select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF select TLS_REG_EMUL if !CPU_32v6K && !MMU config CPU_32v6K @@ -603,6 +604,7 @@ config CPU_CP15_MPU config CPU_USE_DOMAINS bool + depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF help This option enables or disables the use of domain switching via the set_fs() function. @@ -813,7 +815,7 @@ config NEED_KUSER_HELPERS config KUSER_HELPERS bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS - depends on MMU + depends on MMU && (!(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND) default y help Warning: disabling this option may break user programs. @@ -827,7 +829,7 @@ config KUSER_HELPERS See Documentation/arm/kernel_user_helpers.txt for details. However, the fixed address nature of these helpers can be used - by ROP (return orientated programming) authors when creating + by ROP (Return Oriented Programming) authors when creating exploits. If all of the binaries and libraries which run on your platform @@ -842,7 +844,7 @@ config KUSER_HELPERS config VDSO bool "Enable VDSO for acceleration of some system calls" - depends on AEABI && MMU && CPU_V7 + depends on AEABI && MMU && CPU_V7 && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF default y if ARM_ARCH_TIMER select GENERIC_TIME_VSYSCALL help diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 7d5f4c736..c6a081602 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -778,6 +778,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) u16 tinstr = 0; int isize = 4; int thumb2_32b = 0; + bool is_user_mode = user_mode(regs); if (interrupts_enabled(regs)) local_irq_enable(); @@ -786,14 +787,24 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (thumb_mode(regs)) { u16 *ptr = (u16 *)(instrptr & ~1); - fault = probe_kernel_address(ptr, tinstr); + if (is_user_mode) { + pax_open_userland(); + fault = probe_kernel_address(ptr, tinstr); + pax_close_userland(); + } else + fault = probe_kernel_address(ptr, tinstr); tinstr = __mem_to_opcode_thumb16(tinstr); if (!fault) { if (cpu_architecture() >= CPU_ARCH_ARMv7 && IS_T32(tinstr)) { /* Thumb-2 32-bit */ u16 tinst2 = 0; - fault = probe_kernel_address(ptr + 1, tinst2); + if (is_user_mode) { + pax_open_userland(); + fault = probe_kernel_address(ptr + 1, tinst2); + pax_close_userland(); + } else + fault = probe_kernel_address(ptr + 1, tinst2); tinst2 = __mem_to_opcode_thumb16(tinst2); instr = __opcode_thumb32_compose(tinstr, tinst2); thumb2_32b = 1; @@ -803,7 +814,12 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) } } } else { - fault = probe_kernel_address((void *)instrptr, instr); + if (is_user_mode) { + pax_open_userland(); + fault = probe_kernel_address((void *)instrptr, instr); + pax_close_userland(); + } else + fault = probe_kernel_address((void *)instrptr, instr); instr = __mem_to_opcode_arm(instr); } @@ -812,7 +828,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) goto bad_or_fault; } - if (user_mode(regs)) + if (is_user_mode) goto user; ai_sys += 1; diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index d1870c777..36d500fd4 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -44,7 +44,7 @@ struct l2c_init_data { void (*configure)(void __iomem *); void (*unlock)(void __iomem *, unsigned); struct outer_cache_fns outer_cache; -}; +} __do_const; #define CACHE_LINE_SIZE 32 diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index c8c8b9ed0..c55cc79e7 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -43,7 +43,7 @@ #define NUM_USER_ASIDS ASID_FIRST_VERSION static DEFINE_RAW_SPINLOCK(cpu_asid_lock); -static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); +static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); static DEFINE_PER_CPU(atomic64_t, active_asids); @@ -193,7 +193,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) { static u32 cur_idx = 1; u64 asid = atomic64_read(&mm->context.id); - u64 generation = atomic64_read(&asid_generation); + u64 generation = atomic64_read_unchecked(&asid_generation); if (asid != 0) { u64 newasid = generation | (asid & ~ASID_MASK); @@ -225,7 +225,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) */ asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); if (asid == NUM_USER_ASIDS) { - generation = atomic64_add_return(ASID_FIRST_VERSION, + generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION, &asid_generation); flush_context(cpu); asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); @@ -254,14 +254,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) cpu_set_reserved_ttbr0(); asid = atomic64_read(&mm->context.id); - if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) + if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) && atomic64_xchg(&per_cpu(active_asids, cpu), asid)) goto switch_mm_fastpath; raw_spin_lock_irqsave(&cpu_asid_lock, flags); /* Check that our ASID belongs to the current generation. */ asid = atomic64_read(&mm->context.id); - if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) { + if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) { asid = new_context(mm, cpu); atomic64_set(&mm->context.id, asid); } diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 0122ad1a6..1aae1cbe7 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -25,6 +25,7 @@ #include #include #include +#include #include "fault.h" @@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, if (fixup_exception(regs)) return; +#ifdef CONFIG_PAX_MEMORY_UDEREF + if (addr < TASK_SIZE) { + if (current->signal->curr_ip) + printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", ¤t->signal->curr_ip, current->comm, task_pid_nr(current), + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr); + else + printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current), + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr); + } +#endif + +#ifdef CONFIG_PAX_KERNEXEC + if ((fsr & FSR_WRITE) && + (((unsigned long)_stext <= addr && addr < init_mm.end_code) || + (MODULES_VADDR <= addr && addr < MODULES_END))) + { + if (current->signal->curr_ip) + printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", ¤t->signal->curr_ip, current->comm, task_pid_nr(current), + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid())); + else + printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current), + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid())); + } +#endif + /* * No handler, we'll have to terminate things with extreme prejudice. */ @@ -173,6 +199,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr, } #endif +#ifdef CONFIG_PAX_PAGEEXEC + if ((tsk->mm->pax_flags & MF_PAX_PAGEEXEC) && (fsr & FSR_LNX_PF)) { + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp); + do_group_exit(SIGKILL); + } +#endif + tsk->thread.address = addr; tsk->thread.error_code = fsr; tsk->thread.trap_no = 14; @@ -400,6 +433,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) } #endif /* CONFIG_MMU */ +#ifdef CONFIG_PAX_PAGEEXEC +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) +{ + long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 20; i++) { + unsigned char c; + if (get_user(c, (__force unsigned char __user *)pc+i)) + printk(KERN_CONT "?? "); + else + printk(KERN_CONT "%02x ", c); + } + printk("\n"); + + printk(KERN_ERR "PAX: bytes at SP-4: "); + for (i = -1; i < 20; i++) { + unsigned long c; + if (get_user(c, (__force unsigned long __user *)sp+i)) + printk(KERN_CONT "???????? "); + else + printk(KERN_CONT "%08lx ", c); + } + printk("\n"); +} +#endif + /* * First Level Translation Fault Handler * @@ -547,9 +607,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) const struct fsr_info *inf = fsr_info + fsr_fs(fsr); struct siginfo info; +#ifdef CONFIG_PAX_MEMORY_UDEREF + if (addr < TASK_SIZE && is_domain_fault(fsr)) { + if (current->signal->curr_ip) + printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", ¤t->signal->curr_ip, current->comm, task_pid_nr(current), + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr); + else + printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current), + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr); + goto die; + } +#endif + if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs)) return; +die: pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n", inf->name, fsr, addr); show_pte(current->mm, addr); @@ -574,15 +647,118 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs * ifsr_info[nr].name = name; } +asmlinkage int sys_sigreturn(struct pt_regs *regs); +asmlinkage int sys_rt_sigreturn(struct pt_regs *regs); + asmlinkage void __exception do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) { const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr); struct siginfo info; + unsigned long pc = instruction_pointer(regs); + + if (user_mode(regs)) { + unsigned long sigpage = current->mm->context.sigpage; + + if (sigpage <= pc && pc < sigpage + 7*4) { + if (pc < sigpage + 3*4) + sys_sigreturn(regs); + else + sys_rt_sigreturn(regs); + return; + } + if (pc == 0xffff0f60UL) { + /* + * PaX: __kuser_cmpxchg64 emulation + */ + // TODO + //regs->ARM_pc = regs->ARM_lr; + //return; + } + if (pc == 0xffff0fa0UL) { + /* + * PaX: __kuser_memory_barrier emulation + */ + // dmb(); implied by the exception + regs->ARM_pc = regs->ARM_lr; +#ifdef CONFIG_ARM_THUMB + if (regs->ARM_lr & 1) { + regs->ARM_cpsr |= PSR_T_BIT; + regs->ARM_pc &= ~0x1U; + } else + regs->ARM_cpsr &= ~PSR_T_BIT; +#endif + return; + } + if (pc == 0xffff0fc0UL) { + /* + * PaX: __kuser_cmpxchg emulation + */ + // TODO + //long new; + //int op; + + //op = FUTEX_OP_SET << 28; + //new = futex_atomic_op_inuser(op, regs->ARM_r2); + //regs->ARM_r0 = old != new; + //regs->ARM_pc = regs->ARM_lr; + //return; + } + if (pc == 0xffff0fe0UL) { + /* + * PaX: __kuser_get_tls emulation + */ + regs->ARM_r0 = current_thread_info()->tp_value[0]; + regs->ARM_pc = regs->ARM_lr; +#ifdef CONFIG_ARM_THUMB + if (regs->ARM_lr & 1) { + regs->ARM_cpsr |= PSR_T_BIT; + regs->ARM_pc &= ~0x1U; + } else + regs->ARM_cpsr &= ~PSR_T_BIT; +#endif + return; + } + } + +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) + else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) { + if (current->signal->curr_ip) + printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", ¤t->signal->curr_ip, current->comm, task_pid_nr(current), + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), + pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc); + else + printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current), + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), + pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc); + goto die; + } +#endif + +#ifdef CONFIG_PAX_REFCOUNT + if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) { +#ifdef CONFIG_THUMB2_KERNEL + unsigned short bkpt; + + if (!probe_kernel_address((const unsigned short *)pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) { +#else + unsigned int bkpt; + + if (!probe_kernel_address((const unsigned int *)pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) { +#endif + current->thread.error_code = ifsr; + current->thread.trap_no = 0; + pax_report_refcount_error(regs, NULL); + fixup_exception(regs); + return; + } + } +#endif if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs)) return; +die: pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n", inf->name, ifsr, addr); diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h index afc1f84e7..b1daab579 100644 --- a/arch/arm/mm/fault.h +++ b/arch/arm/mm/fault.h @@ -3,6 +3,7 @@ /* * Fault status register encodings. We steal bit 31 for our own purposes. + * Set when the FSR value is from an instruction fault. */ #define FSR_LNX_PF (1 << 31) #define FSR_WRITE (1 << 11) @@ -26,6 +27,17 @@ static inline int fsr_fs(unsigned int fsr) } #endif +/* valid for LPAE and !LPAE */ +static inline int is_xn_fault(unsigned int fsr) +{ + return ((fsr_fs(fsr) & 0x3c) == 0xc); +} + +static inline int is_domain_fault(unsigned int fsr) +{ + return ((fsr_fs(fsr) & 0xD) == 0x9); +} + void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs); void early_abt_enable(void); diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 370581aeb..b985cc165 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -747,7 +747,46 @@ void free_tcmmem(void) { #ifdef CONFIG_HAVE_TCM extern char __tcm_start, __tcm_end; +#endif +#ifdef CONFIG_PAX_KERNEXEC + unsigned long addr; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + int cpu_arch = cpu_architecture(); + unsigned int cr = get_cr(); + + if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { + /* make pages tables, etc before .text NX */ + for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) { + pgd = pgd_offset_k(addr); + pud = pud_offset(pgd, addr); + pmd = pmd_offset(pud, addr); + __section_update(pmd, addr, PMD_SECT_XN); + } + /* make init NX */ + for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) { + pgd = pgd_offset_k(addr); + pud = pud_offset(pgd, addr); + pmd = pmd_offset(pud, addr); + __section_update(pmd, addr, PMD_SECT_XN); + } + /* make kernel code/rodata RX */ + for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) { + pgd = pgd_offset_k(addr); + pud = pud_offset(pgd, addr); + pmd = pmd_offset(pud, addr); +#ifdef CONFIG_ARM_LPAE + __section_update(pmd, addr, PMD_SECT_RDONLY); +#else + __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE); +#endif + } + } +#endif + +#ifdef CONFIG_HAVE_TCM poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link"); #endif diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index ff0eed23d..f17f1c920 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -411,9 +411,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached) unsigned int mtype; if (cached) - mtype = MT_MEMORY_RWX; + mtype = MT_MEMORY_RX; else - mtype = MT_MEMORY_RWX_NONCACHED; + mtype = MT_MEMORY_RX_NONCACHED; return __arm_ioremap_caller(phys_addr, size, mtype, __builtin_return_address(0)); diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 66353caa3..8aad9f865 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, struct vm_area_struct *vma; int do_align = 0; int aliasing = cache_is_vipt_aliasing(); + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); struct vm_unmapped_area_info info; /* @@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, if (len > TASK_SIZE) return -ENOMEM; +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + if (addr) { if (do_align) addr = COLOUR_ALIGN(addr, pgoff); @@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) return addr; } @@ -99,19 +103,21 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, info.high_limit = TASK_SIZE; info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; info.align_offset = pgoff << PAGE_SHIFT; + info.threadstack_offset = offset; return vm_unmapped_area(&info); } unsigned long -arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - const unsigned long len, const unsigned long pgoff, - const unsigned long flags) +arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr0, + unsigned long len, unsigned long pgoff, + unsigned long flags) { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long addr = addr0; int do_align = 0; int aliasing = cache_is_vipt_aliasing(); + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); struct vm_unmapped_area_info info; /* @@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, return addr; } +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + /* requesting a specific address */ if (addr) { if (do_align) @@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, else addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) return addr; } @@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, info.high_limit = mm->mmap_base; info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; info.align_offset = pgoff << PAGE_SHIFT; + info.threadstack_offset = offset; addr = vm_unmapped_area(&info); /* @@ -182,14 +192,30 @@ void arch_pick_mmap_layout(struct mm_struct *mm) { unsigned long random_factor = 0UL; +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + if (current->flags & PF_RANDOMIZE) random_factor = arch_mmap_rnd(); if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + mm->get_unmapped_area = arch_get_unmapped_area; } else { mm->mmap_base = mmap_base(random_factor); + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base -= mm->delta_mmap + mm->delta_stack; +#endif + mm->get_unmapped_area = arch_get_unmapped_area_topdown; } } diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4001dd158..c6dce7b51 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -243,6 +243,14 @@ __setup("noalign", noalign_setup); #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE +#ifdef CONFIG_PAX_KERNEXEC +#define L_PTE_KERNEXEC L_PTE_RDONLY +#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY +#else +#define L_PTE_KERNEXEC L_PTE_DIRTY +#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE +#endif + static struct mem_type mem_types[] __ro_after_init = { [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | @@ -272,19 +280,19 @@ static struct mem_type mem_types[] __ro_after_init = { .prot_sect = PROT_SECT_DEVICE, .domain = DOMAIN_IO, }, - [MT_UNCACHED] = { + [MT_UNCACHED_RW] = { .prot_pte = PROT_PTE_DEVICE, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, .domain = DOMAIN_IO, }, - [MT_CACHECLEAN] = { - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, + [MT_CACHECLEAN_RO] = { + .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY, .domain = DOMAIN_KERNEL, }, #ifndef CONFIG_ARM_LPAE - [MT_MINICLEAN] = { - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE, + [MT_MINICLEAN_RO] = { + .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY, .domain = DOMAIN_KERNEL, }, #endif @@ -300,7 +308,7 @@ static struct mem_type mem_types[] __ro_after_init = { .prot_l1 = PMD_TYPE_TABLE, .domain = DOMAIN_VECTORS, }, - [MT_MEMORY_RWX] = { + [__MT_MEMORY_RWX] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, @@ -313,17 +321,30 @@ static struct mem_type mem_types[] __ro_after_init = { .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, .domain = DOMAIN_KERNEL, }, - [MT_ROM] = { - .prot_sect = PMD_TYPE_SECT, + [MT_MEMORY_RX] = { + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC, + .prot_l1 = PMD_TYPE_TABLE, + .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC, + .domain = DOMAIN_KERNEL, + }, + [MT_ROM_RX] = { + .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY, .domain = DOMAIN_KERNEL, }, - [MT_MEMORY_RWX_NONCACHED] = { + [MT_MEMORY_RW_NONCACHED] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_MT_BUFFERABLE, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, .domain = DOMAIN_KERNEL, }, + [MT_MEMORY_RX_NONCACHED] = { + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC | + L_PTE_MT_BUFFERABLE, + .prot_l1 = PMD_TYPE_TABLE, + .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC, + .domain = DOMAIN_KERNEL, + }, [MT_MEMORY_RW_DTCM] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN, @@ -331,9 +352,10 @@ static struct mem_type mem_types[] __ro_after_init = { .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, .domain = DOMAIN_KERNEL, }, - [MT_MEMORY_RWX_ITCM] = { - .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, + [MT_MEMORY_RX_ITCM] = { + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC, .prot_l1 = PMD_TYPE_TABLE, + .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC, .domain = DOMAIN_KERNEL, }, [MT_MEMORY_RW_SO] = { @@ -586,9 +608,14 @@ static void __init build_mem_type_table(void) * Mark cache clean areas and XIP ROM read only * from SVC mode and no access from userspace. */ - mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; - mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; - mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; + mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; +#ifdef CONFIG_PAX_KERNEXEC + mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; + mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; + mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; +#endif + mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; + mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; #endif /* @@ -605,13 +632,17 @@ static void __init build_mem_type_table(void) mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; - mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S; - mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED; + mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S; + mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED; mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S; mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED; + mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S; + mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED; mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED; - mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S; - mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED; + mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S; + mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED; + mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S; + mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED; } } @@ -622,15 +653,20 @@ static void __init build_mem_type_table(void) if (cpu_arch >= CPU_ARCH_ARMv6) { if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { /* Non-cacheable Normal is XCB = 001 */ - mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= + mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= + PMD_SECT_BUFFERED; + mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERED; } else { /* For both ARMv6 and non-TEX-remapping ARMv7 */ - mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= + mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= + PMD_SECT_TEX(1); + mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_TEX(1); } } else { - mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; + mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; + mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; } #ifdef CONFIG_ARM_LPAE @@ -651,6 +687,8 @@ static void __init build_mem_type_table(void) user_pgprot |= PTE_EXT_PXN; #endif + user_pgprot |= __supported_pte_mask; + for (i = 0; i < 16; i++) { pteval_t v = pgprot_val(protection_map[i]); protection_map[i] = __pgprot(v | user_pgprot); @@ -668,21 +706,24 @@ static void __init build_mem_type_table(void) mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; - mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd; - mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot; + mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd; + mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot; mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd; mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot; + mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd; + mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot; mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot; - mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask; - mem_types[MT_ROM].prot_sect |= cp->pmd; + mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask; + mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask; + mem_types[MT_ROM_RX].prot_sect |= cp->pmd; switch (cp->pmd) { case PMD_SECT_WT: - mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT; + mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT; break; case PMD_SECT_WB: case PMD_SECT_WBWA: - mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB; + mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB; break; } pr_info("Memory policy: %sData cache %s\n", @@ -959,7 +1000,7 @@ static void __init create_mapping(struct map_desc *md) return; } - if ((md->type == MT_DEVICE || md->type == MT_ROM) && + if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) && md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START && (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n", @@ -1320,18 +1361,15 @@ void __init arm_mm_memblock_reserve(void) * Any other function or debugging method which may touch any device _will_ * crash the kernel. */ + +static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE); + static void __init devicemaps_init(const struct machine_desc *mdesc) { struct map_desc map; unsigned long addr; - void *vectors; - /* - * Allocate the vector page early. - */ - vectors = early_alloc(PAGE_SIZE * 2); - - early_trap_init(vectors); + early_trap_init(&vectors); /* * Clear page table except top pmd used by early fixmaps @@ -1347,7 +1385,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); map.virtual = MODULES_VADDR; map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK; - map.type = MT_ROM; + map.type = MT_ROM_RX; create_mapping(&map); #endif @@ -1358,14 +1396,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS); map.virtual = FLUSH_BASE; map.length = SZ_1M; - map.type = MT_CACHECLEAN; + map.type = MT_CACHECLEAN_RO; create_mapping(&map); #endif #ifdef FLUSH_BASE_MINICACHE map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M); map.virtual = FLUSH_BASE_MINICACHE; map.length = SZ_1M; - map.type = MT_MINICLEAN; + map.type = MT_MINICLEAN_RO; create_mapping(&map); #endif @@ -1374,7 +1412,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) * location (0xffff0000). If we aren't using high-vectors, also * create a mapping at the low-vectors virtual address. */ - map.pfn = __phys_to_pfn(virt_to_phys(vectors)); + map.pfn = __phys_to_pfn(virt_to_phys(&vectors)); map.virtual = 0xffff0000; map.length = PAGE_SIZE; #ifdef CONFIG_KUSER_HELPERS @@ -1437,12 +1475,14 @@ static void __init kmap_init(void) static void __init map_lowmem(void) { struct memblock_region *reg; +#ifndef CONFIG_PAX_KERNEXEC #ifdef CONFIG_XIP_KERNEL phys_addr_t kernel_x_start = round_down(__pa(_sdata), SECTION_SIZE); #else phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); #endif phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); +#endif /* Map all the lowmem memory banks. */ for_each_memblock(memory, reg) { @@ -1458,11 +1498,48 @@ static void __init map_lowmem(void) if (start >= end) break; +#ifdef CONFIG_PAX_KERNEXEC + map.pfn = __phys_to_pfn(start); + map.virtual = __phys_to_virt(start); + map.length = end - start; + + if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) { + struct map_desc kernel; + struct map_desc initmap; + + /* when freeing initmem we will make this RW */ + initmap.pfn = __phys_to_pfn(__pa(__init_begin)); + initmap.virtual = (unsigned long)__init_begin; + initmap.length = _sdata - __init_begin; + initmap.type = __MT_MEMORY_RWX; + create_mapping(&initmap); + + /* when freeing initmem we will make this RX */ + kernel.pfn = __phys_to_pfn(__pa(_stext)); + kernel.virtual = (unsigned long)_stext; + kernel.length = __init_begin - _stext; + kernel.type = __MT_MEMORY_RWX; + create_mapping(&kernel); + + if (map.virtual < (unsigned long)_stext) { + map.length = (unsigned long)_stext - map.virtual; + map.type = __MT_MEMORY_RWX; + create_mapping(&map); + } + + map.pfn = __phys_to_pfn(__pa(_sdata)); + map.virtual = (unsigned long)_sdata; + map.length = end - __pa(_sdata); + } + + map.type = MT_MEMORY_RW; + create_mapping(&map); +#else if (end < kernel_x_start) { map.pfn = __phys_to_pfn(start); map.virtual = __phys_to_virt(start); map.length = end - start; - map.type = MT_MEMORY_RWX; + map.type = __MT_MEMORY_RWX; create_mapping(&map); } else if (start >= kernel_x_end) { @@ -1486,7 +1563,7 @@ static void __init map_lowmem(void) map.pfn = __phys_to_pfn(kernel_x_start); map.virtual = __phys_to_virt(kernel_x_start); map.length = kernel_x_end - kernel_x_start; - map.type = MT_MEMORY_RWX; + map.type = __MT_MEMORY_RWX; create_mapping(&map); @@ -1499,6 +1576,7 @@ static void __init map_lowmem(void) create_mapping(&map); } } +#endif } } diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 93d0b6d0b..2db6d9995 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -20,6 +20,7 @@ #include #include #include +#include #include "bpf_jit_32.h" @@ -72,54 +73,38 @@ struct jit_ctx { #endif }; +#ifdef CONFIG_GRKERNSEC_BPF_HARDEN +int bpf_jit_enable __read_only; +#else int bpf_jit_enable __read_mostly; +#endif -static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret, - unsigned int size) -{ - void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size); - - if (!ptr) - return -EFAULT; - memcpy(ret, ptr, size); - return 0; -} - -static u64 jit_get_skb_b(struct sk_buff *skb, int offset) +static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset) { u8 ret; int err; - if (offset < 0) - err = call_neg_helper(skb, offset, &ret, 1); - else - err = skb_copy_bits(skb, offset, &ret, 1); + err = skb_copy_bits(skb, offset, &ret, 1); return (u64)err << 32 | ret; } -static u64 jit_get_skb_h(struct sk_buff *skb, int offset) +static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset) { u16 ret; int err; - if (offset < 0) - err = call_neg_helper(skb, offset, &ret, 2); - else - err = skb_copy_bits(skb, offset, &ret, 2); + err = skb_copy_bits(skb, offset, &ret, 2); return (u64)err << 32 | ntohs(ret); } -static u64 jit_get_skb_w(struct sk_buff *skb, int offset) +static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset) { u32 ret; int err; - if (offset < 0) - err = call_neg_helper(skb, offset, &ret, 4); - else - err = skb_copy_bits(skb, offset, &ret, 4); + err = skb_copy_bits(skb, offset, &ret, 4); return (u64)err << 32 | ntohl(ret); } @@ -191,8 +176,10 @@ static void jit_fill_hole(void *area, unsigned int size) { u32 *ptr; /* We are guaranteed to have aligned memory. */ + pax_open_kernel(); for (ptr = area; size >= sizeof(u32); size -= sizeof(u32)) *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF); + pax_close_kernel(); } static void build_prologue(struct jit_ctx *ctx) @@ -554,6 +541,9 @@ static int build_body(struct jit_ctx *ctx) case BPF_LD | BPF_B | BPF_ABS: load_order = 0; load: + /* the interpreter will deal with the negative K */ + if ((int)k < 0) + return -ENOTSUPP; emit_mov_i(r_off, k, ctx); load_common: ctx->seen |= SEEN_DATA | SEEN_CALL; @@ -568,18 +558,6 @@ static int build_body(struct jit_ctx *ctx) condt = ARM_COND_HI; } - /* - * test for negative offset, only if we are - * currently scheduled to take the fast - * path. this will update the flags so that - * the slowpath instruction are ignored if the - * offset is negative. - * - * for loard_order == 0 the HI condition will - * make loads at offset 0 take the slow path too. - */ - _emit(condt, ARM_CMP_I(r_off, 0), ctx); - _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data), ctx); diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c index 8151bde99..9be301f92 100644 --- a/arch/arm/plat-iop/setup.c +++ b/arch/arm/plat-iop/setup.c @@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = { .virtual = IOP3XX_PERIPHERAL_VIRT_BASE, .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE), .length = IOP3XX_PERIPHERAL_SIZE, - .type = MT_UNCACHED, + .type = MT_UNCACHED_RW, }, }; diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c index a5bc92d7e..0bb473095 100644 --- a/arch/arm/plat-omap/sram.c +++ b/arch/arm/plat-omap/sram.c @@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size, * Looks like we need to preserve some bootloader code at the * beginning of SRAM for jumping to flash for reboot to work... */ + pax_open_kernel(); memset_io(omap_sram_base + omap_sram_skip, 0, omap_sram_size - omap_sram_skip); + pax_close_kernel(); } diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c index a4ec240ee..96faf9bee 100644 --- a/arch/arm/probes/kprobes/core.c +++ b/arch/arm/probes/kprobes/core.c @@ -485,6 +485,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) return (void *)orig_ret_address; } +#ifdef CONFIG_KRETPROBES void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { @@ -493,6 +494,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, /* Replace the return addr with trampoline addr. */ regs->ARM_lr = (unsigned long)&kretprobe_trampoline; } +#endif int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) { @@ -605,10 +607,12 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) return 0; } +#ifdef CONFIG_KRETPROBES int __kprobes arch_trampoline_kprobe(struct kprobe *p) { return 0; } +#endif #ifdef CONFIG_THUMB2_KERNEL diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 969ef880d..305b8563e 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -896,6 +896,7 @@ config RELOCATABLE config RANDOMIZE_BASE bool "Randomize the address of the kernel image" + depends on BROKEN_SECURITY select ARM64_MODULE_PLTS if MODULES select RELOCATABLE help diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index b661fe742..6d124fc07 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug @@ -6,6 +6,7 @@ config ARM64_PTDUMP bool "Export kernel pagetable layout to userspace via debugfs" depends on DEBUG_KERNEL select DEBUG_FS + depends on !GRKERNSEC_KMEM help Say Y here if you want to show the kernel pagetable layout in a debugfs file. This information is only useful for kernel developers diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c index aefda9868..29378748a 100644 --- a/arch/arm64/crypto/sha1-ce-glue.c +++ b/arch/arm64/crypto/sha1-ce-glue.c @@ -29,7 +29,7 @@ struct sha1_ce_state { u32 finalize; }; -asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src, +asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src, int blocks); static int sha1_ce_update(struct shash_desc *desc, const u8 *data, @@ -39,8 +39,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data, sctx->finalize = 0; kernel_neon_begin_partial(16); - sha1_base_do_update(desc, data, len, - (sha1_block_fn *)sha1_ce_transform); + sha1_base_do_update(desc, data, len, sha1_ce_transform); kernel_neon_end(); return 0; @@ -64,10 +63,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data, sctx->finalize = finalize; kernel_neon_begin_partial(16); - sha1_base_do_update(desc, data, len, - (sha1_block_fn *)sha1_ce_transform); + sha1_base_do_update(desc, data, len, sha1_ce_transform); if (!finalize) - sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform); + sha1_base_do_finalize(desc, sha1_ce_transform); kernel_neon_end(); return sha1_base_finish(desc, out); } @@ -78,7 +76,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out) sctx->finalize = 0; kernel_neon_begin_partial(16); - sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform); + sha1_base_do_finalize(desc, sha1_ce_transform); kernel_neon_end(); return sha1_base_finish(desc, out); } diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index c0235e0ff..86eb6849c 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -57,11 +57,13 @@ #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) #define atomic_add_return_relaxed atomic_add_return_relaxed +#define atomic_add_return_unchecked_relaxed atomic_add_return_relaxed #define atomic_add_return_acquire atomic_add_return_acquire #define atomic_add_return_release atomic_add_return_release #define atomic_add_return atomic_add_return #define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v)) +#define atomic_inc_return_unchecked_relaxed(v) atomic_add_return_relaxed(1, (v)) #define atomic_inc_return_acquire(v) atomic_add_return_acquire(1, (v)) #define atomic_inc_return_release(v) atomic_add_return_release(1, (v)) #define atomic_inc_return(v) atomic_add_return(1, (v)) @@ -128,6 +130,8 @@ #define __atomic_add_unless(v, a, u) ___atomic_add_unless(v, a, u,) #define atomic_andnot atomic_andnot +#define atomic_inc_return_unchecked_relaxed(v) atomic_add_return_relaxed(1, (v)) + /* * 64-bit atomic operations. */ @@ -206,5 +210,16 @@ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) +#define atomic64_read_unchecked(v) atomic64_read(v) +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) +#define atomic64_inc_unchecked(v) atomic64_inc(v) +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) +#define atomic64_dec_unchecked(v) atomic64_dec(v) +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) +#define atomic64_xchg_unchecked(v, n) atomic64_xchg((v), (n)) + #endif #endif diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 5082b30bc..9ef38c255 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -16,10 +16,14 @@ #ifndef __ASM_CACHE_H #define __ASM_CACHE_H +#include + #include +#include + #define L1_CACHE_SHIFT 7 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* * Memory returned by kmalloc() may be used for DMA, so we must make diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 5394c8405..05e5a9575 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h @@ -123,16 +123,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size) { switch (size) { case 1: - ACCESS_ONCE(*(u8 *)ptr) = (u8)val; + ACCESS_ONCE_RW(*(u8 *)ptr) = (u8)val; break; case 2: - ACCESS_ONCE(*(u16 *)ptr) = (u16)val; + ACCESS_ONCE_RW(*(u16 *)ptr) = (u16)val; break; case 4: - ACCESS_ONCE(*(u32 *)ptr) = (u32)val; + ACCESS_ONCE_RW(*(u32 *)ptr) = (u32)val; break; case 8: - ACCESS_ONCE(*(u64 *)ptr) = (u64)val; + ACCESS_ONCE_RW(*(u64 *)ptr) = (u64)val; break; default: BUILD_BUG(); diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index d25f4f137..61d52da44 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h @@ -51,6 +51,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) { __pud_populate(pud, __pa(pmd), PMD_TYPE_TABLE); } + +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + pud_populate(mm, pud, pmd); +} #else static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot) { @@ -80,6 +85,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) { __pgd_populate(pgd, __pa(pud), PUD_TYPE_TABLE); } + +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) +{ + pgd_populate(mm, pgd, pud); +} #else static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot) { diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index ffbb9a520..d8b49ffdb 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -23,6 +23,9 @@ #include #include +#define ktla_ktva(addr) (addr) +#define ktva_ktla(addr) (addr) + /* * VMALLOC range. * @@ -728,6 +731,9 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, #define kc_vaddr_to_offset(v) ((v) & ~VA_START) #define kc_offset_to_vaddr(o) ((o) | VA_START) +#define ktla_ktva(addr) (addr) +#define ktva_ktla(addr) (addr) + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_PGTABLE_H */ diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h index 2eb714c46..3a1047127 100644 --- a/arch/arm64/include/asm/string.h +++ b/arch/arm64/include/asm/string.h @@ -17,40 +17,40 @@ #define __ASM_STRING_H #define __HAVE_ARCH_STRRCHR -extern char *strrchr(const char *, int c); +extern char *strrchr(const char *, int c) __nocapture(-1); #define __HAVE_ARCH_STRCHR -extern char *strchr(const char *, int c); +extern char *strchr(const char *, int c) __nocapture(-1); #define __HAVE_ARCH_STRCMP -extern int strcmp(const char *, const char *); +extern int strcmp(const char *, const char *) __nocapture(); #define __HAVE_ARCH_STRNCMP -extern int strncmp(const char *, const char *, __kernel_size_t); +extern int strncmp(const char *, const char *, __kernel_size_t) __nocapture(1, 2); #define __HAVE_ARCH_STRLEN -extern __kernel_size_t strlen(const char *); +extern __kernel_size_t strlen(const char *) __nocapture(1); #define __HAVE_ARCH_STRNLEN -extern __kernel_size_t strnlen(const char *, __kernel_size_t); +extern __kernel_size_t strnlen(const char *, __kernel_size_t) __nocapture(1); #define __HAVE_ARCH_MEMCPY -extern void *memcpy(void *, const void *, __kernel_size_t); -extern void *__memcpy(void *, const void *, __kernel_size_t); +extern void *memcpy(void *, const void *, __kernel_size_t) __nocapture(2); +extern void *__memcpy(void *, const void *, __kernel_size_t) __nocapture(2); #define __HAVE_ARCH_MEMMOVE -extern void *memmove(void *, const void *, __kernel_size_t); -extern void *__memmove(void *, const void *, __kernel_size_t); +extern void *memmove(void *, const void *, __kernel_size_t) __nocapture(2); +extern void *__memmove(void *, const void *, __kernel_size_t) __nocapture(2); #define __HAVE_ARCH_MEMCHR -extern void *memchr(const void *, int, __kernel_size_t); +extern void *memchr(const void *, int, __kernel_size_t) __nocapture(-1); #define __HAVE_ARCH_MEMSET extern void *memset(void *, int, __kernel_size_t); extern void *__memset(void *, int, __kernel_size_t); #define __HAVE_ARCH_MEMCMP -extern int memcmp(const void *, const void *, size_t); +extern int memcmp(const void *, const void *, size_t) __nocapture(1, 2); #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 55d0adbf6..b986918c2 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -110,6 +110,7 @@ static inline void set_fs(mm_segment_t fs) */ #define untagged_addr(addr) sign_extend64(addr, 55) +#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size)) #define access_ok(type, addr, size) __range_ok(addr, size) #define user_addr_max get_fs @@ -279,6 +280,9 @@ static inline unsigned long __must_check __copy_from_user(void *to, const void _ static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n) { + if ((long)n < 0) + return n; + kasan_check_read(from, n); check_object_size(from, n, true); return __arch_copy_to_user(to, from, n); @@ -287,6 +291,10 @@ static inline unsigned long __must_check __copy_to_user(void __user *to, const v static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) { unsigned long res = n; + + if ((long)n < 0) + return n; + kasan_check_write(to, n); if (access_ok(VERIFY_READ, from, n)) { @@ -300,6 +308,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) { + if ((long)n < 0) + return n; + kasan_check_read(from, n); if (access_ok(VERIFY_WRITE, to, n)) { diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index d55a7b099..d8dbd8a2a 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -198,7 +198,7 @@ EXPORT_SYMBOL(arch_hibernation_header_restore); static int create_safe_exec_page(void *src_start, size_t length, unsigned long dst_addr, phys_addr_t *phys_dst_addr, - void *(*allocator)(gfp_t mask), + unsigned long (*allocator)(gfp_t mask), gfp_t mask) { int rc = 0; @@ -206,7 +206,7 @@ static int create_safe_exec_page(void *src_start, size_t length, pud_t *pud; pmd_t *pmd; pte_t *pte; - unsigned long dst = (unsigned long)allocator(mask); + unsigned long dst = allocator(mask); if (!dst) { rc = -ENOMEM; @@ -216,9 +216,9 @@ static int create_safe_exec_page(void *src_start, size_t length, memcpy((void *)dst, src_start, length); flush_icache_range(dst, dst + length); - pgd = pgd_offset_raw(allocator(mask), dst_addr); + pgd = pgd_offset_raw((pgd_t *)allocator(mask), dst_addr); if (pgd_none(*pgd)) { - pud = allocator(mask); + pud = (pud_t *)allocator(mask); if (!pud) { rc = -ENOMEM; goto out; @@ -228,7 +228,7 @@ static int create_safe_exec_page(void *src_start, size_t length, pud = pud_offset(pgd, dst_addr); if (pud_none(*pud)) { - pmd = allocator(mask); + pmd = (pmd_t *)allocator(mask); if (!pmd) { rc = -ENOMEM; goto out; @@ -238,7 +238,7 @@ static int create_safe_exec_page(void *src_start, size_t length, pmd = pmd_offset(pud, dst_addr); if (pmd_none(*pmd)) { - pte = allocator(mask); + pte = (pte_t *)allocator(mask); if (!pte) { rc = -ENOMEM; goto out; @@ -510,7 +510,7 @@ int swsusp_arch_resume(void) rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size, (unsigned long)hibernate_exit, &phys_hibernate_exit, - (void *)get_safe_page, GFP_ATOMIC); + get_safe_page, GFP_ATOMIC); if (rc) { pr_err("Failed to create safe executable page for hibernate_exit code."); goto out; diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index f5077ea7a..46b4664fc 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -639,6 +639,7 @@ void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs) return (void *)orig_ret_address; } +#ifdef CONFIG_KRETPROBES void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { @@ -652,6 +653,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p) { return 0; } +#endif int __init arch_init_kprobes(void) { diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 01753cd7d..b65d17a19 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -64,7 +64,7 @@ EXPORT_SYMBOL(__stack_chk_guard); /* * Function pointers to optional machine specific functions */ -void (*pm_power_off)(void); +void (* pm_power_off)(void); EXPORT_SYMBOL_GPL(pm_power_off); void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); @@ -110,7 +110,7 @@ void machine_shutdown(void) * activity (executing tasks, handling interrupts). smp_send_stop() * achieves this. */ -void machine_halt(void) +void __noreturn machine_halt(void) { local_irq_disable(); smp_send_stop(); @@ -123,12 +123,13 @@ void machine_halt(void) * achieves this. When the system power is turned off, it will take all CPUs * with it. */ -void machine_power_off(void) +void __noreturn machine_power_off(void) { local_irq_disable(); smp_send_stop(); if (pm_power_off) pm_power_off(); + while(1); } /* @@ -140,7 +141,7 @@ void machine_power_off(void) * executing pre-reset code, and using RAM that the primary CPU's code wishes * to use. Implementing such co-ordination would be essentially impossible. */ -void machine_restart(char *cmd) +void __noreturn machine_restart(char *cmd) { /* Disable interrupts first */ local_irq_disable(); diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index c2efddfca..c58e0a2c6 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -95,8 +95,8 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) struct pt_regs *irq_args; unsigned long orig_sp = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr); - if (object_is_on_stack((void *)orig_sp) && - object_is_on_stack((void *)frame->fp)) { + if (object_starts_on_stack((void *)orig_sp) && + object_starts_on_stack((void *)frame->fp)) { frame->sp = orig_sp; /* orig_sp is the saved pt_regs, find the elr */ diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 11e5eae08..d8cdfa7a4 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -547,7 +547,7 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs) __show_regs(regs); } - return sys_ni_syscall(); + return -ENOSYS; } static const char *esr_class_str[] = { diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h index c3a58a189..78fbf54a9 100644 --- a/arch/avr32/include/asm/cache.h +++ b/arch/avr32/include/asm/cache.h @@ -1,8 +1,10 @@ #ifndef __ASM_AVR32_CACHE_H #define __ASM_AVR32_CACHE_H +#include + #define L1_CACHE_SHIFT 5 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* * Memory returned by kmalloc() may be used for DMA, so we must make diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h index 0388ece75..87c8df102 100644 --- a/arch/avr32/include/asm/elf.h +++ b/arch/avr32/include/asm/elf.h @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t; the loader. We need to make sure that it is out of the way of the program that it will "exec", and that there is sufficient room for the brk. */ -#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE 0x00001000UL + +#define PAX_DELTA_MMAP_LEN 15 +#define PAX_DELTA_STACK_LEN 15 +#endif /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. This could be done in user space, diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h index 479330b89..53717a870 100644 --- a/arch/avr32/include/asm/kmap_types.h +++ b/arch/avr32/include/asm/kmap_types.h @@ -2,9 +2,9 @@ #define __ASM_AVR32_KMAP_TYPES_H #ifdef CONFIG_DEBUG_HIGHMEM -# define KM_TYPE_NR 29 +# define KM_TYPE_NR 30 #else -# define KM_TYPE_NR 14 +# define KM_TYPE_NR 15 #endif #endif /* __ASM_AVR32_KMAP_TYPES_H */ diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c index b3977e920..4230c51a6 100644 --- a/arch/avr32/mm/fault.c +++ b/arch/avr32/mm/fault.c @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap) int exception_trace = 1; +#ifdef CONFIG_PAX_PAGEEXEC +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 20; i++) { + unsigned char c; + if (get_user(c, (unsigned char *)pc+i)) + printk(KERN_CONT "???????? "); + else + printk(KERN_CONT "%02x ", c); + } + printk("\n"); +} +#endif + /* * This routine handles page faults. It determines the address and the * problem, and then passes it off to one of the appropriate routines. @@ -178,6 +195,16 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) up_read(&mm->mmap_sem); if (user_mode(regs)) { + +#ifdef CONFIG_PAX_PAGEEXEC + if (mm->pax_flags & MF_PAX_PAGEEXEC) { + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) { + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp); + do_group_exit(SIGKILL); + } + } +#endif + if (exception_trace && printk_ratelimit()) printk("%s%s[%d]: segfault at %08lx pc %08lx " "sp %08lx ecr %lu\n", diff --git a/arch/blackfin/Kconfig.debug b/arch/blackfin/Kconfig.debug index f3337ee03..15b6f8d7b 100644 --- a/arch/blackfin/Kconfig.debug +++ b/arch/blackfin/Kconfig.debug @@ -18,6 +18,7 @@ config DEBUG_VERBOSE config DEBUG_MMRS tristate "Generate Blackfin MMR tree" select DEBUG_FS + depends on !GRKERNSEC_KMEM help Create a tree of Blackfin MMRs via the debugfs tree. If you enable this, you will find all MMRs laid out in the diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h index 568885a2c..f8008df2e 100644 --- a/arch/blackfin/include/asm/cache.h +++ b/arch/blackfin/include/asm/cache.h @@ -7,6 +7,7 @@ #ifndef __ARCH_BLACKFIN_CACHE_H #define __ARCH_BLACKFIN_CACHE_H +#include #include /* for asmlinkage */ /* @@ -14,7 +15,7 @@ * Blackfin loads 32 bytes for cache */ #define L1_CACHE_SHIFT 5 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) #define SMP_CACHE_BYTES L1_CACHE_BYTES #define ARCH_DMA_MINALIGN L1_CACHE_BYTES diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h index aea27184d..3639a60b7 100644 --- a/arch/cris/include/arch-v10/arch/cache.h +++ b/arch/cris/include/arch-v10/arch/cache.h @@ -1,8 +1,9 @@ #ifndef _ASM_ARCH_CACHE_H #define _ASM_ARCH_CACHE_H +#include /* Etrax 100LX have 32-byte cache-lines. */ -#define L1_CACHE_BYTES 32 #define L1_CACHE_SHIFT 5 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) #endif /* _ASM_ARCH_CACHE_H */ diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h index 7caf25d58..ee65ac530 100644 --- a/arch/cris/include/arch-v32/arch/cache.h +++ b/arch/cris/include/arch-v32/arch/cache.h @@ -1,11 +1,12 @@ #ifndef _ASM_CRIS_ARCH_CACHE_H #define _ASM_CRIS_ARCH_CACHE_H +#include #include /* A cache-line is 32 bytes. */ -#define L1_CACHE_BYTES 32 #define L1_CACHE_SHIFT 5 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) #define __read_mostly __attribute__((__section__(".data..read_mostly"))) diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h index 1c2a5e264..2579e5f45 100644 --- a/arch/frv/include/asm/atomic.h +++ b/arch/frv/include/asm/atomic.h @@ -146,6 +146,16 @@ static inline void atomic64_dec(atomic64_t *v) #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter)) #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter)) +#define atomic64_read_unchecked(v) atomic64_read(v) +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) +#define atomic64_inc_unchecked(v) atomic64_inc(v) +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) +#define atomic64_dec_unchecked(v) atomic64_dec(v) +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) + static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h index 2797163b8..c2a401df9 100644 --- a/arch/frv/include/asm/cache.h +++ b/arch/frv/include/asm/cache.h @@ -12,10 +12,11 @@ #ifndef __ASM_CACHE_H #define __ASM_CACHE_H +#include /* bytes per L1 cache line */ #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT) -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES))) #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES))) diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h index 43901f220..0d8b86566 100644 --- a/arch/frv/include/asm/kmap_types.h +++ b/arch/frv/include/asm/kmap_types.h @@ -2,6 +2,6 @@ #ifndef _ASM_KMAP_TYPES_H #define _ASM_KMAP_TYPES_H -#define KM_TYPE_NR 17 +#define KM_TYPE_NR 18 #endif diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c index 836f14707..4cf23f597 100644 --- a/arch/frv/mm/elf-fdpic.c +++ b/arch/frv/mm/elf-fdpic.c @@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi { struct vm_area_struct *vma; struct vm_unmapped_area_info info; + unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags); if (len > TASK_SIZE) return -ENOMEM; @@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(current->mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) goto success; } @@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi info.high_limit = (current->mm->start_stack - 0x00200000); info.align_mask = 0; info.align_offset = 0; + info.threadstack_offset = offset; addr = vm_unmapped_area(&info); if (!(addr & ~PAGE_MASK)) goto success; diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h index 69952c184..4fa2908f2 100644 --- a/arch/hexagon/include/asm/cache.h +++ b/arch/hexagon/include/asm/cache.h @@ -21,9 +21,11 @@ #ifndef __ASM_CACHE_H #define __ASM_CACHE_H +#include + /* Bytes per L1 cache line */ -#define L1_CACHE_SHIFT (5) -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#define L1_CACHE_SHIFT 5 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) #define ARCH_DMA_MINALIGN L1_CACHE_BYTES diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 18ca6a9ce..77b0e0d0b 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -519,6 +519,7 @@ config KEXEC bool "kexec system call" depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU) select KEXEC_CORE + depends on !GRKERNSEC_KMEM help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index c100d780f..c44d46de0 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile @@ -98,5 +98,6 @@ endef archprepare: make_nr_irqs_h PHONY += make_nr_irqs_h +GCC_PLUGINS_make_nr_irqs_h := n make_nr_irqs_h: $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 65d4bb2b6..8b2e661c2 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -323,4 +323,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v) #define atomic64_inc(v) atomic64_add(1, (v)) #define atomic64_dec(v) atomic64_sub(1, (v)) +#define atomic64_read_unchecked(v) atomic64_read(v) +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) +#define atomic64_inc_unchecked(v) atomic64_inc(v) +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) +#define atomic64_dec_unchecked(v) atomic64_dec(v) +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) + #endif /* _ASM_IA64_ATOMIC_H */ diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h index 988254a7d..e1ee8855a 100644 --- a/arch/ia64/include/asm/cache.h +++ b/arch/ia64/include/asm/cache.h @@ -1,6 +1,7 @@ #ifndef _ASM_IA64_CACHE_H #define _ASM_IA64_CACHE_H +#include /* * Copyright (C) 1998-2000 Hewlett-Packard Co @@ -9,7 +10,7 @@ /* Bytes per L1 (data) cache line. */ #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) #ifdef CONFIG_SMP # define SMP_CACHE_SHIFT L1_CACHE_SHIFT diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h index 5a83c5cc3..4d7f553aa 100644 --- a/arch/ia64/include/asm/elf.h +++ b/arch/ia64/include/asm/elf.h @@ -42,6 +42,13 @@ */ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL) + +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13) +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13) +#endif + #define PT_IA_64_UNWIND 0x70000001 /* IA-64 relocations: */ diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h index f5e70e961..624fad58c 100644 --- a/arch/ia64/include/asm/pgalloc.h +++ b/arch/ia64/include/asm/pgalloc.h @@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud) pgd_val(*pgd_entry) = __pa(pud); } +static inline void +pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud) +{ + pgd_populate(mm, pgd_entry, pud); +} + static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) { return quicklist_alloc(0, GFP_KERNEL, NULL); @@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd) pud_val(*pud_entry) = __pa(pmd); } +static inline void +pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd) +{ + pud_populate(mm, pud_entry, pmd); +} + static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) { return quicklist_alloc(0, GFP_KERNEL, NULL); diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 9f3ed9ee8..c99b418c5 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h @@ -12,7 +12,7 @@ * David Mosberger-Tang */ - +#include #include #include #include @@ -139,6 +139,17 @@ #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX) + +#ifdef CONFIG_PAX_PAGEEXEC +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW) +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) +#else +# define PAGE_SHARED_NOEXEC PAGE_SHARED +# define PAGE_READONLY_NOEXEC PAGE_READONLY +# define PAGE_COPY_NOEXEC PAGE_COPY +#endif + #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX) #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX) #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX) diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index ca9e76149..40dffaf15 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h @@ -73,7 +73,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p)); - ACCESS_ONCE(*p) = (tmp + 2) & ~1; + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1; } static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock) diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index bfe13196f..da0014bdd 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -70,6 +70,7 @@ && ((segment).seg == KERNEL_DS.seg \ || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \ }) +#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size)) #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs()) /* @@ -241,17 +242,23 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use static inline unsigned long __copy_to_user (void __user *to, const void *from, unsigned long count) { + if (count > INT_MAX) + return count; + check_object_size(from, count, true); - return __copy_user(to, (__force void __user *) from, count); + return __copy_user(to, (void __force_user *) from, count); } static inline unsigned long __copy_from_user (void *to, const void __user *from, unsigned long count) { + if (count > INT_MAX) + return count; + check_object_size(to, count, false); - return __copy_user((__force void __user *) to, from, count); + return __copy_user((void __force_user *) to, from, count); } #define __copy_to_user_inatomic __copy_to_user @@ -260,11 +267,11 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) ({ \ void __user *__cu_to = (to); \ const void *__cu_from = (from); \ - long __cu_len = (n); \ + unsigned long __cu_len = (n); \ \ - if (__access_ok(__cu_to, __cu_len, get_fs())) { \ - check_object_size(__cu_from, __cu_len, true); \ - __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ + if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \ + check_object_size(__cu_from, __cu_len, true); \ + __cu_len = __copy_user(__cu_to, (void __force_user *) __cu_from, __cu_len); \ } \ __cu_len; \ }) @@ -272,10 +279,10 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) { - check_object_size(to, n, false); - if (likely(__access_ok(from, n, get_fs()))) - n = __copy_user((__force void __user *) to, from, n); - else + if (likely(__access_ok(from, n, get_fs()))) { + check_object_size(to, n, false); + n = __copy_user((void __force_user *) to, from, n); + } else if ((long)n > 0) memset(to, 0, n); return n; } diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index c7c51445c..7e31461d6 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -499,6 +499,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) return 1; } +#ifdef CONFIG_KRETPROBES void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { @@ -507,6 +508,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, /* Replace the return addr with trampoline addr */ regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip; } +#endif /* Check the instruction in the slot is break */ static int __kprobes __is_ia64_break_inst(bundle_t *bundle, uint slot) @@ -1119,6 +1121,7 @@ int __init arch_init_kprobes(void) return register_kprobe(&trampoline_p); } +#ifdef CONFIG_KRETPROBES int __kprobes arch_trampoline_kprobe(struct kprobe *p) { if (p->addr == @@ -1127,3 +1130,4 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p) return 0; } +#endif diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c index 6ab0ae7d6..88f1b6018 100644 --- a/arch/ia64/kernel/module.c +++ b/arch/ia64/kernel/module.c @@ -486,13 +486,13 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings, static inline int in_init (const struct module *mod, uint64_t addr) { - return addr - (uint64_t) mod->init_layout.base < mod->init_layout.size; + return within_module_init(addr, mod); } static inline int in_core (const struct module *mod, uint64_t addr) { - return addr - (uint64_t) mod->core_layout.base < mod->core_layout.size; + return within_module_core(addr, mod); } static inline int @@ -676,6 +676,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend, case RV_BDREL: val -= (uint64_t) (in_init(mod, val) ? mod->init_layout.base : mod->core_layout.base); + if (within_module_rx(val, &mod->init_layout)) + val -= mod->init_layout.base_rx; + else if (within_module_rw(val, &mod->init_layout)) + val -= mod->init_layout.base_rw; + else if (within_module_rx(val, &mod->core_layout)) + val -= mod->core_layout.base_rx; + else if (within_module_rw(val, &mod->core_layout)) + val -= mod->core_layout.base_rw; break; case RV_LTV: @@ -810,15 +818,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind * addresses have been selected... */ uint64_t gp; - if (mod->core_layout.size > MAX_LTOFF) + if (mod->core_layout.size_rx + mod->core_layout.size_rw > MAX_LTOFF) /* * This takes advantage of fact that SHF_ARCH_SMALL gets allocated * at the end of the module. */ - gp = mod->core_layout.size - MAX_LTOFF / 2; + gp = mod->core_layout.size_rx + mod->core_layout.size_rw - MAX_LTOFF / 2; else - gp = mod->core_layout.size / 2; - gp = (uint64_t) mod->core_layout.base + ((gp + 7) & -8); + gp = (mod->core_layout.size_rx + mod->core_layout.size_rw) / 2; + gp = (uint64_t) mod->core_layout.base_rx + ((gp + 7) & -8); mod->arch.gp = gp; DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp); } diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index c39c3cd3a..3c77738fc 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c @@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } -static struct notifier_block __refdata palinfo_cpu_notifier = +static struct notifier_block palinfo_cpu_notifier = { .notifier_call = palinfo_cpu_callback, .priority = 0, diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c index 41e33f84c..65180b2a2 100644 --- a/arch/ia64/kernel/sys_ia64.c +++ b/arch/ia64/kernel/sys_ia64.c @@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len unsigned long align_mask = 0; struct mm_struct *mm = current->mm; struct vm_unmapped_area_info info; + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); if (len > RGN_MAP_LIMIT) return -ENOMEM; @@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len if (REGION_NUMBER(addr) == RGN_HPAGE) addr = 0; #endif + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + addr = mm->free_area_cache; + else +#endif + if (!addr) addr = TASK_UNMAPPED_BASE; @@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len info.high_limit = TASK_SIZE; info.align_mask = align_mask; info.align_offset = 0; + info.threadstack_offset = offset; return vm_unmapped_area(&info); } diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index f89d20c97..410a1b175 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -172,7 +172,7 @@ SECTIONS { /* Per-cpu data: */ . = ALIGN(PERCPU_PAGE_SIZE); PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu) - __phys_per_cpu_start = __per_cpu_load; + __phys_per_cpu_start = per_cpu_load; /* * ensure percpu data fits * into percpu page size diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index fa6ad95e9..b46bd89b9 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address) return pte_present(pte); } +#ifdef CONFIG_PAX_PAGEEXEC +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 8; i++) { + unsigned int c; + if (get_user(c, (unsigned int *)pc+i)) + printk(KERN_CONT "???????? "); + else + printk(KERN_CONT "%08x ", c); + } + printk("\n"); +} +#endif + # define VM_READ_BIT 0 # define VM_WRITE_BIT 1 # define VM_EXEC_BIT 2 @@ -151,8 +168,21 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE)))) goto bad_area; - if ((vma->vm_flags & mask) != mask) + if ((vma->vm_flags & mask) != mask) { + +#ifdef CONFIG_PAX_PAGEEXEC + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) { + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip) + goto bad_area; + + up_read(&mm->mmap_sem); + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12); + do_group_exit(SIGKILL); + } +#endif + goto bad_area; + } /* * If for any reason at all we couldn't handle the fault, make diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index 85de86d36..db7f6b877 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -138,6 +138,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u unsigned long pgoff, unsigned long flags) { struct vm_unmapped_area_info info; + unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags); if (len > RGN_MAP_LIMIT) return -ENOMEM; @@ -161,6 +162,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT; info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1); info.align_offset = 0; + info.threadstack_offset = offset; return vm_unmapped_area(&info); } diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 1841ef691..74d833027 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -119,6 +119,19 @@ ia64_init_addr_space (void) vma->vm_start = current->thread.rbs_bot & PAGE_MASK; vma->vm_end = vma->vm_start + PAGE_SIZE; vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; + +#ifdef CONFIG_PAX_PAGEEXEC + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) { + vma->vm_flags &= ~VM_EXEC; + +#ifdef CONFIG_PAX_MPROTECT + if (current->mm->pax_flags & MF_PAX_MPROTECT) + vma->vm_flags &= ~VM_MAYEXEC; +#endif + + } +#endif + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); down_write(¤t->mm->mmap_sem); if (insert_vm_struct(current->mm, vma)) { @@ -279,7 +292,7 @@ static int __init gate_vma_init(void) gate_vma.vm_start = FIXADDR_USER_START; gate_vma.vm_end = FIXADDR_USER_END; gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; - gate_vma.vm_page_prot = __P101; + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags); return 0; } diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h index 40b3ee981..8c2c112b9 100644 --- a/arch/m32r/include/asm/cache.h +++ b/arch/m32r/include/asm/cache.h @@ -1,8 +1,10 @@ #ifndef _ASM_M32R_CACHE_H #define _ASM_M32R_CACHE_H +#include + /* L1 cache line size */ #define L1_CACHE_SHIFT 4 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) #endif /* _ASM_M32R_CACHE_H */ diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c index 82abd159d..d95ae5db0 100644 --- a/arch/m32r/lib/usercopy.c +++ b/arch/m32r/lib/usercopy.c @@ -14,6 +14,9 @@ unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n) { + if ((long)n < 0) + return n; + prefetch(from); if (access_ok(VERIFY_WRITE, to, n)) __copy_user(to,from,n); @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n) unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n) { + if ((long)n < 0) + return n; + prefetchw(to); if (access_ok(VERIFY_READ, from, n)) __copy_user_zeroing(to,from,n); diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h index 0395c51e4..5f2603124 100644 --- a/arch/m68k/include/asm/cache.h +++ b/arch/m68k/include/asm/cache.h @@ -4,9 +4,11 @@ #ifndef __ARCH_M68K_CACHE_H #define __ARCH_M68K_CACHE_H +#include + /* bytes per L1 cache line */ #define L1_CACHE_SHIFT 4 -#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT) +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) #define ARCH_DMA_MINALIGN L1_CACHE_BYTES diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c index 4e5aa2f4f..172c46953 100644 --- a/arch/m68k/kernel/time.c +++ b/arch/m68k/kernel/time.c @@ -107,6 +107,7 @@ static int rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) switch (cmd) { case RTC_PLL_GET: + memset(&pll, 0, sizeof(pll)); if (!mach_get_rtc_pll || mach_get_rtc_pll(&pll)) return -EINVAL; return copy_to_user(argp, &pll, sizeof pll) ? -EFAULT : 0; diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c index db1b7da91..8e1368481 100644 --- a/arch/metag/mm/hugetlbpage.c +++ b/arch/metag/mm/hugetlbpage.c @@ -189,6 +189,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len) info.high_limit = TASK_SIZE; info.align_mask = PAGE_MASK & HUGEPT_MASK; info.align_offset = 0; + info.threadstack_offset = 0; return vm_unmapped_area(&info); } diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h index 4efe96a03..60e869982 100644 --- a/arch/microblaze/include/asm/cache.h +++ b/arch/microblaze/include/asm/cache.h @@ -13,11 +13,12 @@ #ifndef _ASM_MICROBLAZE_CACHE_H #define _ASM_MICROBLAZE_CACHE_H +#include #include #define L1_CACHE_SHIFT 5 /* word-granular cache in microblaze */ -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) #define SMP_CACHE_BYTES L1_CACHE_BYTES diff --git a/arch/mips/Kbuild b/arch/mips/Kbuild index 5c3f688a5..f8cc1b3f3 100644 --- a/arch/mips/Kbuild +++ b/arch/mips/Kbuild @@ -1,7 +1,7 @@ # Fail on warnings - also for files referenced in subdirs # -Werror can be disabled for specific files using: # CFLAGS_ := -Wno-error -subdir-ccflags-y := -Werror +# subdir-ccflags-y := -Werror # platform specific definitions include arch/mips/Kbuild.platforms diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index b3c5bde43..d6b51047a 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -49,6 +49,7 @@ config MIPS select HAVE_MOD_ARCH_SPECIFIC select HAVE_NMI select VIRT_TO_BUS + select HAVE_GCC_PLUGINS select MODULES_USE_ELF_REL if MODULES select MODULES_USE_ELF_RELA if MODULES && 64BIT select CLONE_BACKWARDS @@ -2595,7 +2596,7 @@ config RELOCATION_TABLE_SIZE config RANDOMIZE_BASE bool "Randomize the address of the kernel image" - depends on RELOCATABLE + depends on RELOCATABLE && BROKEN_SECURITY ---help--- Randomizes the physical and virtual address at which the kernel image is loaded, as a security feature that @@ -2811,6 +2812,7 @@ source "kernel/Kconfig.preempt" config KEXEC bool "Kexec system call" select KEXEC_CORE + depends on !GRKERNSEC_KMEM help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 0ab176bdb..c4469a461 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -22,15 +22,39 @@ #include #include +#ifdef CONFIG_GENERIC_ATOMIC64 +#include +#endif + #define ATOMIC_INIT(i) { (i) } +#ifdef CONFIG_64BIT +#define _ASM_EXTABLE(from, to) \ +" .section __ex_table,\"a\"\n" \ +" .dword " #from ", " #to"\n" \ +" .previous\n" +#else +#define _ASM_EXTABLE(from, to) \ +" .section __ex_table,\"a\"\n" \ +" .word " #from ", " #to"\n" \ +" .previous\n" +#endif + /* * atomic_read - read atomic variable * @v: pointer of type atomic_t * * Atomically reads the value of @v. */ -#define atomic_read(v) READ_ONCE((v)->counter) +static inline int atomic_read(const atomic_t *v) +{ + return READ_ONCE(v->counter); +} + +static inline int atomic_read_unchecked(const atomic_unchecked_t *v) +{ + return READ_ONCE(v->counter); +} /* * atomic_set - set atomic variable @@ -39,47 +63,77 @@ * * Atomically sets the value of @v to @i. */ -#define atomic_set(v, i) WRITE_ONCE((v)->counter, (i)) +static inline void atomic_set(atomic_t *v, int i) +{ + WRITE_ONCE(v->counter, i); +} + +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) +{ + WRITE_ONCE(v->counter, i); +} -#define ATOMIC_OP(op, c_op, asm_op) \ -static __inline__ void atomic_##op(int i, atomic_t * v) \ +#ifdef CONFIG_PAX_REFCOUNT +#define __OVERFLOW_POST \ + " b 4f \n" \ + " .set noreorder \n" \ + "3: b 5f \n" \ + " move %0, %1 \n" \ + " .set reorder \n" +#define __OVERFLOW_EXTABLE \ + "3:\n" \ + _ASM_EXTABLE(2b, 3b) +#else +#define __OVERFLOW_POST +#define __OVERFLOW_EXTABLE +#endif + +#define __ATOMIC_OP(op, suffix, asm_op, extable) \ +static inline void atomic_##op##suffix(int i, atomic##suffix##_t * v) \ { \ if (kernel_uses_llsc && R10000_LLSC_WAR) { \ int temp; \ \ __asm__ __volatile__( \ - " .set arch=r4000 \n" \ - "1: ll %0, %1 # atomic_" #op " \n" \ - " " #asm_op " %0, %2 \n" \ + " .set mips3 \n" \ + "1: ll %0, %1 # atomic_" #op #suffix "\n" \ + "2: " #asm_op " %0, %2 \n" \ " sc %0, %1 \n" \ " beqzl %0, 1b \n" \ + extable \ " .set mips0 \n" \ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "Ir" (i)); \ } else if (kernel_uses_llsc) { \ int temp; \ \ - do { \ - __asm__ __volatile__( \ - " .set "MIPS_ISA_LEVEL" \n" \ - " ll %0, %1 # atomic_" #op "\n" \ - " " #asm_op " %0, %2 \n" \ - " sc %0, %1 \n" \ - " .set mips0 \n" \ - : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ - : "Ir" (i)); \ - } while (unlikely(!temp)); \ + __asm__ __volatile__( \ + " .set "MIPS_ISA_LEVEL" \n" \ + "1: ll %0, %1 # atomic_" #op #suffix "\n" \ + "2: " #asm_op " %0, %2 \n" \ + " sc %0, %1 \n" \ + " beqz %0, 1b \n" \ + extable \ + " .set mips0 \n" \ + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ + : "Ir" (i)); \ } else { \ unsigned long flags; \ \ raw_local_irq_save(flags); \ - v->counter c_op i; \ + __asm__ __volatile__( \ + "2: " #asm_op " %0, %1 \n" \ + extable \ + : "+r" (v->counter) : "Ir" (i)); \ raw_local_irq_restore(flags); \ } \ } -#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ -static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \ +#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, _unchecked, asm_op##u, ) \ + __ATOMIC_OP(op, , asm_op, __OVERFLOW_EXTABLE) + +#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op, extable) \ +static inline int atomic_##op##_return##suffix##_relaxed(int i, atomic##suffix##_t * v) \ { \ int result; \ \ @@ -87,12 +141,15 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \ int temp; \ \ __asm__ __volatile__( \ - " .set arch=r4000 \n" \ - "1: ll %1, %2 # atomic_" #op "_return \n" \ - " " #asm_op " %0, %1, %3 \n" \ + " .set mips3 \n" \ + "1: ll %1, %2 # atomic_" #op "_return" #suffix"\n" \ + "2: " #asm_op " %0, %1, %3 \n" \ " sc %0, %2 \n" \ " beqzl %0, 1b \n" \ - " " #asm_op " %0, %1, %3 \n" \ + post_op \ + extable \ + "4: " #asm_op " %0, %1, %3 \n" \ + "5: \n" \ " .set mips0 \n" \ : "=&r" (result), "=&r" (temp), \ "+" GCC_OFF_SMALL_ASM() (v->counter) \ @@ -100,32 +157,40 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \ } else if (kernel_uses_llsc) { \ int temp; \ \ - do { \ - __asm__ __volatile__( \ - " .set "MIPS_ISA_LEVEL" \n" \ - " ll %1, %2 # atomic_" #op "_return \n" \ - " " #asm_op " %0, %1, %3 \n" \ - " sc %0, %2 \n" \ - " .set mips0 \n" \ - : "=&r" (result), "=&r" (temp), \ - "+" GCC_OFF_SMALL_ASM() (v->counter) \ - : "Ir" (i)); \ - } while (unlikely(!result)); \ - \ - result = temp; result c_op i; \ + __asm__ __volatile__( \ + " .set "MIPS_ISA_LEVEL" \n" \ + "1: ll %1, %2 # atomic_" #op "_return" #suffix "\n" \ + "2: " #asm_op " %0, %1, %3 \n" \ + " sc %0, %2 \n" \ + post_op \ + extable \ + "4: " #asm_op " %0, %1, %3 \n" \ + "5: \n" \ + " .set mips0 \n" \ + : "=&r" (result), "=&r" (temp), \ + "+" GCC_OFF_SMALL_ASM() (v->counter) \ + : "Ir" (i)); \ } else { \ unsigned long flags; \ \ raw_local_irq_save(flags); \ - result = v->counter; \ - result c_op i; \ - v->counter = result; \ + __asm__ __volatile__( \ + " lw %0, %1 \n" \ + "2: " #asm_op " %0, %1, %2 \n" \ + " sw %0, %1 \n" \ + "3: \n" \ + extable \ + : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter) \ + : "Ir" (i)); \ raw_local_irq_restore(flags); \ } \ \ return result; \ } +#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, asm_op##u, , ) \ + __ATOMIC_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE) + #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \ { \ @@ -173,13 +238,13 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \ return result; \ } -#define ATOMIC_OPS(op, c_op, asm_op) \ - ATOMIC_OP(op, c_op, asm_op) \ - ATOMIC_OP_RETURN(op, c_op, asm_op) \ - ATOMIC_FETCH_OP(op, c_op, asm_op) +#define ATOMIC_OPS(op, asm_op) \ + ATOMIC_OP(op, asm_op) \ + ATOMIC_OP_RETURN(op, asm_op) \ + ATOMIC_FETCH_OP(op, asm_op) -ATOMIC_OPS(add, +=, addu) -ATOMIC_OPS(sub, -=, subu) +ATOMIC_OPS(add, addu) +ATOMIC_OPS(sub, subu) #define atomic_add_return_relaxed atomic_add_return_relaxed #define atomic_sub_return_relaxed atomic_sub_return_relaxed @@ -187,13 +252,13 @@ ATOMIC_OPS(sub, -=, subu) #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed #undef ATOMIC_OPS -#define ATOMIC_OPS(op, c_op, asm_op) \ - ATOMIC_OP(op, c_op, asm_op) \ - ATOMIC_FETCH_OP(op, c_op, asm_op) +#define ATOMIC_OPS(op, asm_op) \ + ATOMIC_OP(op, asm_op) \ + ATOMIC_FETCH_OP(op, asm_op) -ATOMIC_OPS(and, &=, and) -ATOMIC_OPS(or, |=, or) -ATOMIC_OPS(xor, ^=, xor) +ATOMIC_OPS(and, and) +ATOMIC_OPS(or, or) +ATOMIC_OPS(xor, xor) #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed @@ -202,7 +267,9 @@ ATOMIC_OPS(xor, ^=, xor) #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN +#undef __ATOMIC_OP_RETURN #undef ATOMIC_OP +#undef __ATOMIC_OP /* * atomic_sub_if_positive - conditionally subtract integer from atomic variable @@ -212,7 +279,7 @@ ATOMIC_OPS(xor, ^=, xor) * Atomically test @v and subtract @i if @v is greater or equal than @i. * The function returns the old value of @v minus @i. */ -static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) +static __inline__ int atomic_sub_if_positive(int i, atomic_t *v) { int result; @@ -222,7 +289,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) int temp; __asm__ __volatile__( - " .set arch=r4000 \n" + " .set "MIPS_ISA_LEVEL" \n" "1: ll %1, %2 # atomic_sub_if_positive\n" " subu %0, %1, %3 \n" " bltz %0, 1f \n" @@ -271,8 +338,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) return result; } -#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) -#define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +{ + return cmpxchg(&v->counter, old, new); +} + +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, + int new) +{ + return cmpxchg(&(v->counter), old, new); +} + +static inline int atomic_xchg(atomic_t *v, int new) +{ + return xchg(&v->counter, new); +} + +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new) +{ + return xchg(&(v->counter), new); +} /** * __atomic_add_unless - add unless the number is a given value @@ -300,6 +385,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) #define atomic_dec_return(v) atomic_sub_return(1, (v)) #define atomic_inc_return(v) atomic_add_return(1, (v)) +static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v) +{ + return atomic_add_return_unchecked(1, v); +} /* * atomic_sub_and_test - subtract value from variable and test result @@ -321,6 +410,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) * other cases. */ #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) +static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v) +{ + return atomic_add_return_unchecked(1, v) == 0; +} /* * atomic_dec_and_test - decrement by 1 and test @@ -345,6 +438,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) * Atomically increments @v by 1. */ #define atomic_inc(v) atomic_add(1, (v)) +static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v) +{ + atomic_add_unchecked(1, v); +} /* * atomic_dec - decrement and test @@ -353,6 +450,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) * Atomically decrements @v by 1. */ #define atomic_dec(v) atomic_sub(1, (v)) +static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v) +{ + atomic_sub_unchecked(1, v); +} /* * atomic_add_negative - add and test if negative @@ -374,54 +475,77 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) * @v: pointer of type atomic64_t * */ -#define atomic64_read(v) READ_ONCE((v)->counter) +static inline long atomic64_read(const atomic64_t *v) +{ + return READ_ONCE(v->counter); +} + +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v) +{ + return READ_ONCE(v->counter); +} /* * atomic64_set - set atomic variable * @v: pointer of type atomic64_t * @i: required value */ -#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i)) +static inline void atomic64_set(atomic64_t *v, long i) +{ + WRITE_ONCE(v->counter, i); +} -#define ATOMIC64_OP(op, c_op, asm_op) \ -static __inline__ void atomic64_##op(long i, atomic64_t * v) \ +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i) +{ + WRITE_ONCE(v->counter, i); +} + +#define __ATOMIC64_OP(op, suffix, asm_op, extable) \ +static inline void atomic64_##op##suffix(long i, atomic64##suffix##_t * v) \ { \ if (kernel_uses_llsc && R10000_LLSC_WAR) { \ long temp; \ \ __asm__ __volatile__( \ - " .set arch=r4000 \n" \ - "1: lld %0, %1 # atomic64_" #op " \n" \ - " " #asm_op " %0, %2 \n" \ + " .set "MIPS_ISA_LEVEL" \n" \ + "1: lld %0, %1 # atomic64_" #op #suffix "\n" \ + "2: " #asm_op " %0, %2 \n" \ " scd %0, %1 \n" \ " beqzl %0, 1b \n" \ + extable \ " .set mips0 \n" \ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "Ir" (i)); \ } else if (kernel_uses_llsc) { \ long temp; \ \ - do { \ - __asm__ __volatile__( \ - " .set "MIPS_ISA_LEVEL" \n" \ - " lld %0, %1 # atomic64_" #op "\n" \ - " " #asm_op " %0, %2 \n" \ - " scd %0, %1 \n" \ - " .set mips0 \n" \ - : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ - : "Ir" (i)); \ - } while (unlikely(!temp)); \ + __asm__ __volatile__( \ + " .set "MIPS_ISA_LEVEL" \n" \ + "1: lld %0, %1 # atomic64_" #op #suffix "\n" \ + "2: " #asm_op " %0, %2 \n" \ + " scd %0, %1 \n" \ + " beqz %0, 1b \n" \ + extable \ + " .set mips0 \n" \ + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ + : "Ir" (i)); \ } else { \ unsigned long flags; \ \ raw_local_irq_save(flags); \ - v->counter c_op i; \ + __asm__ __volatile__( \ + "2: " #asm_op " %0, %1 \n" \ + extable \ + : "+" GCC_OFF_SMALL_ASM() (v->counter) : "Ir" (i)); \ raw_local_irq_restore(flags); \ } \ } -#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \ -static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \ +#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, _unchecked, asm_op##u, ) \ + __ATOMIC64_OP(op, , asm_op, __OVERFLOW_EXTABLE) + +#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op, extable) \ +static inline long atomic64_##op##_return##suffix##_relaxed(long i, atomic64##suffix##_t * v)\ { \ long result; \ \ @@ -429,12 +553,15 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \ long temp; \ \ __asm__ __volatile__( \ - " .set arch=r4000 \n" \ + " .set mips3 \n" \ "1: lld %1, %2 # atomic64_" #op "_return\n" \ - " " #asm_op " %0, %1, %3 \n" \ + "2: " #asm_op " %0, %1, %3 \n" \ " scd %0, %2 \n" \ " beqzl %0, 1b \n" \ - " " #asm_op " %0, %1, %3 \n" \ + post_op \ + extable \ + "4: " #asm_op " %0, %1, %3 \n" \ + "5: \n" \ " .set mips0 \n" \ : "=&r" (result), "=&r" (temp), \ "+" GCC_OFF_SMALL_ASM() (v->counter) \ @@ -442,33 +569,42 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \ } else if (kernel_uses_llsc) { \ long temp; \ \ - do { \ - __asm__ __volatile__( \ - " .set "MIPS_ISA_LEVEL" \n" \ - " lld %1, %2 # atomic64_" #op "_return\n" \ - " " #asm_op " %0, %1, %3 \n" \ - " scd %0, %2 \n" \ - " .set mips0 \n" \ - : "=&r" (result), "=&r" (temp), \ - "=" GCC_OFF_SMALL_ASM() (v->counter) \ - : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \ - : "memory"); \ - } while (unlikely(!result)); \ - \ - result = temp; result c_op i; \ + __asm__ __volatile__( \ + " .set "MIPS_ISA_LEVEL" \n" \ + "1: lld %1, %2 # atomic64_" #op "_return" #suffix "\n"\ + "2: " #asm_op " %0, %1, %3 \n" \ + " scd %0, %2 \n" \ + " beqz %0, 1b \n" \ + post_op \ + extable \ + "4: " #asm_op " %0, %1, %3 \n" \ + "5: \n" \ + " .set mips0 \n" \ + : "=&r" (result), "=&r" (temp), \ + "=" GCC_OFF_SMALL_ASM() (v->counter) \ + : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \ + : "memory"); \ } else { \ unsigned long flags; \ \ raw_local_irq_save(flags); \ - result = v->counter; \ - result c_op i; \ - v->counter = result; \ + __asm__ __volatile__( \ + " ld %0, %1 \n" \ + "2: " #asm_op " %0, %1, %2 \n" \ + " sd %0, %1 \n" \ + "3: \n" \ + extable \ + : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter) \ + : "Ir" (i)); \ raw_local_irq_restore(flags); \ } \ \ return result; \ } +#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, _unchecked, asm_op##u, , ) \ + __ATOMIC64_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE) + #define ATOMIC64_FETCH_OP(op, c_op, asm_op) \ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ { \ @@ -517,13 +653,13 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ return result; \ } -#define ATOMIC64_OPS(op, c_op, asm_op) \ - ATOMIC64_OP(op, c_op, asm_op) \ - ATOMIC64_OP_RETURN(op, c_op, asm_op) \ - ATOMIC64_FETCH_OP(op, c_op, asm_op) +#define ATOMIC64_OPS(op, asm_op) \ + ATOMIC64_OP(op, asm_op) \ + ATOMIC64_OP_RETURN(op, asm_op) \ + ATOMIC64_FETCH_OP(op, asm_op) -ATOMIC64_OPS(add, +=, daddu) -ATOMIC64_OPS(sub, -=, dsubu) +ATOMIC64_OPS(add, daddu) +ATOMIC64_OPS(sub, dsubu) #define atomic64_add_return_relaxed atomic64_add_return_relaxed #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed @@ -531,13 +667,13 @@ ATOMIC64_OPS(sub, -=, dsubu) #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed #undef ATOMIC64_OPS -#define ATOMIC64_OPS(op, c_op, asm_op) \ - ATOMIC64_OP(op, c_op, asm_op) \ - ATOMIC64_FETCH_OP(op, c_op, asm_op) +#define ATOMIC64_OPS(op, asm_op) \ + ATOMIC64_OP(op, asm_op) \ + ATOMIC64_FETCH_OP(op, asm_op) -ATOMIC64_OPS(and, &=, and) -ATOMIC64_OPS(or, |=, or) -ATOMIC64_OPS(xor, ^=, xor) +ATOMIC64_OPS(and, and) +ATOMIC64_OPS(or, or) +ATOMIC64_OPS(xor, xor) #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed @@ -546,7 +682,11 @@ ATOMIC64_OPS(xor, ^=, xor) #undef ATOMIC64_OPS #undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP_RETURN +#undef __ATOMIC64_OP_RETURN #undef ATOMIC64_OP +#undef __ATOMIC64_OP +#undef __OVERFLOW_EXTABLE +#undef __OVERFLOW_POST /* * atomic64_sub_if_positive - conditionally subtract integer from atomic @@ -557,7 +697,7 @@ ATOMIC64_OPS(xor, ^=, xor) * Atomically test @v and subtract @i if @v is greater or equal than @i. * The function returns the old value of @v minus @i. */ -static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) +static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v) { long result; @@ -567,7 +707,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) long temp; __asm__ __volatile__( - " .set arch=r4000 \n" + " .set "MIPS_ISA_LEVEL" \n" "1: lld %1, %2 # atomic64_sub_if_positive\n" " dsubu %0, %1, %3 \n" " bltz %0, 1f \n" @@ -616,9 +756,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) return result; } -#define atomic64_cmpxchg(v, o, n) \ - ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) -#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new))) +static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) +{ + return cmpxchg(&v->counter, old, new); +} + +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, + long new) +{ + return cmpxchg(&(v->counter), old, new); +} + +static inline long atomic64_xchg(atomic64_t *v, long new) +{ + return xchg(&v->counter, new); +} + +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new) +{ + return xchg(&(v->counter), new); +} /** * atomic64_add_unless - add unless the number is a given value @@ -648,6 +805,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) #define atomic64_dec_return(v) atomic64_sub_return(1, (v)) #define atomic64_inc_return(v) atomic64_add_return(1, (v)) +#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v)) /* * atomic64_sub_and_test - subtract value from variable and test result @@ -669,6 +827,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) * other cases. */ #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) +#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0) /* * atomic64_dec_and_test - decrement by 1 and test @@ -693,6 +852,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) * Atomically increments @v by 1. */ #define atomic64_inc(v) atomic64_add(1, (v)) +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v)) /* * atomic64_dec - decrement and test @@ -701,6 +861,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) * Atomically decrements @v by 1. */ #define atomic64_dec(v) atomic64_sub(1, (v)) +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v)) /* * atomic64_add_negative - add and test if negative diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h index b4db69fbc..8f3b09336 100644 --- a/arch/mips/include/asm/cache.h +++ b/arch/mips/include/asm/cache.h @@ -9,10 +9,11 @@ #ifndef _ASM_CACHE_H #define _ASM_CACHE_H +#include #include #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) #define SMP_CACHE_SHIFT L1_CACHE_SHIFT #define SMP_CACHE_BYTES L1_CACHE_BYTES diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h index 2b3dc2973..1f7bdc44f 100644 --- a/arch/mips/include/asm/elf.h +++ b/arch/mips/include/asm/elf.h @@ -458,6 +458,13 @@ extern const char *__elf_platform; #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) #endif +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL) + +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#endif + /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ #define ARCH_DLINFO \ do { \ diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h index c1f6afa4b..38cc6e9a3 100644 --- a/arch/mips/include/asm/exec.h +++ b/arch/mips/include/asm/exec.h @@ -12,6 +12,6 @@ #ifndef _ASM_EXEC_H #define _ASM_EXEC_H -extern unsigned long arch_align_stack(unsigned long sp); +#define arch_align_stack(x) ((x) & ~0xfUL) #endif /* _ASM_EXEC_H */ diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h index 9e8ef5994..1139d6bbd 100644 --- a/arch/mips/include/asm/hw_irq.h +++ b/arch/mips/include/asm/hw_irq.h @@ -10,7 +10,7 @@ #include -extern atomic_t irq_err_count; +extern atomic_unchecked_t irq_err_count; /* * interrupt-retrigger: NOP for now. This may not be appropriate for all diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h index 6bf10e796..3c0b52fd9 100644 --- a/arch/mips/include/asm/irq.h +++ b/arch/mips/include/asm/irq.h @@ -11,7 +11,6 @@ #include #include -#include #include diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h index 8feaed62a..1bd8a64b9 100644 --- a/arch/mips/include/asm/local.h +++ b/arch/mips/include/asm/local.h @@ -13,15 +13,25 @@ typedef struct atomic_long_t a; } local_t; +typedef struct { + atomic_long_unchecked_t a; +} local_unchecked_t; + #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } #define local_read(l) atomic_long_read(&(l)->a) +#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a) #define local_set(l, i) atomic_long_set(&(l)->a, (i)) +#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i)) #define local_add(i, l) atomic_long_add((i), (&(l)->a)) +#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a)) #define local_sub(i, l) atomic_long_sub((i), (&(l)->a)) +#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a)) #define local_inc(l) atomic_long_inc(&(l)->a) +#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a) #define local_dec(l) atomic_long_dec(&(l)->a) +#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a) /* * Same as above, but return the result value @@ -71,6 +81,51 @@ static __inline__ long local_add_return(long i, local_t * l) return result; } +static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l) +{ + unsigned long result; + + if (kernel_uses_llsc && R10000_LLSC_WAR) { + unsigned long temp; + + __asm__ __volatile__( + " .set mips3 \n" + "1:" __LL "%1, %2 # local_add_return \n" + " addu %0, %1, %3 \n" + __SC "%0, %2 \n" + " beqzl %0, 1b \n" + " addu %0, %1, %3 \n" + " .set mips0 \n" + : "=&r" (result), "=&r" (temp), "=m" (l->a.counter) + : "Ir" (i), "m" (l->a.counter) + : "memory"); + } else if (kernel_uses_llsc) { + unsigned long temp; + + __asm__ __volatile__( + " .set mips3 \n" + "1:" __LL "%1, %2 # local_add_return \n" + " addu %0, %1, %3 \n" + __SC "%0, %2 \n" + " beqz %0, 1b \n" + " addu %0, %1, %3 \n" + " .set mips0 \n" + : "=&r" (result), "=&r" (temp), "=m" (l->a.counter) + : "Ir" (i), "m" (l->a.counter) + : "memory"); + } else { + unsigned long flags; + + local_irq_save(flags); + result = l->a.counter; + result += i; + l->a.counter = result; + local_irq_restore(flags); + } + + return result; +} + static __inline__ long local_sub_return(long i, local_t * l) { unsigned long result; @@ -118,6 +173,8 @@ static __inline__ long local_sub_return(long i, local_t * l) #define local_cmpxchg(l, o, n) \ ((long)cmpxchg_local(&((l)->a.counter), (o), (n))) +#define local_cmpxchg_unchecked(l, o, n) \ + ((long)cmpxchg_local(&((l)->a.counter), (o), (n))) #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n))) /** diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index 5f9875980..a3a7cb2c5 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -118,7 +118,7 @@ extern void copy_user_highpage(struct page *to, struct page *from, #ifdef CONFIG_CPU_MIPS32 typedef struct { unsigned long pte_low, pte_high; } pte_t; #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; }) #else typedef struct { unsigned long long pte; } pte_t; #define pte_val(x) ((x).pte) diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index a03e86969..e1928f512 100644 --- a/arch/mips/include/asm/pgalloc.h +++ b/arch/mips/include/asm/pgalloc.h @@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) { set_pud(pud, __pud((unsigned long)pmd)); } + +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + pud_populate(mm, pud, pmd); +} #endif /* diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 9e9e94415..43354f53d 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -20,6 +20,9 @@ #include #include +#define ktla_ktva(addr) (addr) +#define ktva_ktla(addr) (addr) + struct mm_struct; struct vm_area_struct; diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index e309d8fcb..20eefecfa 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -101,6 +101,9 @@ static inline struct thread_info *current_thread_info(void) #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */ #define TIF_UPROBE 6 /* breakpointed or singlestepping */ #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ +/* li takes a 32bit immediate */ +#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */ + #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_NOHZ 19 /* in adaptive nohz mode */ @@ -137,14 +140,16 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_USEDMSA (1< #include #include diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c index 1ab34322d..0e6687994 100644 --- a/arch/mips/kernel/binfmt_elfo32.c +++ b/arch/mips/kernel/binfmt_elfo32.c @@ -41,6 +41,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; #undef ELF_ET_DYN_BASE #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL) + +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#endif + #include #include diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c index 44a1f792e..2bd6aa3ad 100644 --- a/arch/mips/kernel/irq-gt641xx.c +++ b/arch/mips/kernel/irq-gt641xx.c @@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void) } } - atomic_inc(&irq_err_count); + atomic_inc_unchecked(&irq_err_count); } void __init gt641xx_irq_init(void) diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index f25f7eab7..19e1c628b 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c @@ -34,17 +34,17 @@ void ack_bad_irq(unsigned int irq) printk("unexpected IRQ # %d\n", irq); } -atomic_t irq_err_count; +atomic_unchecked_t irq_err_count; int arch_show_interrupts(struct seq_file *p, int prec) { - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count)); return 0; } asmlinkage void spurious_interrupt(void) { - atomic_inc(&irq_err_count); + atomic_inc_unchecked(&irq_err_count); } void __init init_IRQ(void) @@ -61,6 +61,8 @@ void __init init_IRQ(void) } #ifdef CONFIG_DEBUG_STACKOVERFLOW + +extern void gr_handle_kernel_exploit(void); static inline void check_stack_overflow(void) { unsigned long sp; @@ -76,6 +78,7 @@ static inline void check_stack_overflow(void) printk("do_IRQ: stack overflow: %ld\n", sp - sizeof(struct thread_info)); dump_stack(); + gr_handle_kernel_exploit(); } } #else diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c index f5c8bce70..b06a56098 100644 --- a/arch/mips/kernel/kprobes.c +++ b/arch/mips/kernel/kprobes.c @@ -535,6 +535,7 @@ static void __used kretprobe_trampoline_holder(void) void kretprobe_trampoline(void); +#ifdef CONFIG_KRETPROBES void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { @@ -543,6 +544,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, /* Replace the return addr with trampoline addr */ regs->regs[31] = (unsigned long)kretprobe_trampoline; } +#endif /* * Called when the probe at kretprobe trampoline is hit @@ -611,6 +613,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, return 1; } +#ifdef CONFIG_KRETPROBES int __kprobes arch_trampoline_kprobe(struct kprobe *p) { if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline) @@ -618,6 +621,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p) return 0; } +#endif static struct kprobe trampoline_p = { .addr = (kprobe_opcode_t *)kretprobe_trampoline, diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c index 7cf653e21..7df52f647 100644 --- a/arch/mips/kernel/pm-cps.c +++ b/arch/mips/kernel/pm-cps.c @@ -168,7 +168,7 @@ int cps_pm_enter_state(enum cps_pm_state state) nc_core_ready_count = nc_addr; /* Ensure ready_count is zero-initialised before the assembly runs */ - ACCESS_ONCE(*nc_core_ready_count) = 0; + ACCESS_ONCE_RW(*nc_core_ready_count) = 0; coupled_barrier(&per_cpu(pm_barrier, core), online); /* Run the generated entry code */ diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 9514e5f22..a3fc55023 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -545,18 +545,6 @@ unsigned long get_wchan(struct task_struct *task) return pc; } -/* - * Don't forget that the stack pointer must be aligned on a 8 bytes - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI. - */ -unsigned long arch_align_stack(unsigned long sp) -{ - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) - sp -= get_random_int() & ~PAGE_MASK; - - return sp & ALMASK; -} - static void arch_dump_stack(void *info) { struct pt_regs *regs; diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index a92994d60..e389b11a8 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -882,6 +882,10 @@ long arch_ptrace(struct task_struct *child, long request, return ret; } +#ifdef CONFIG_GRKERNSEC_SETXID +extern void gr_delayed_cred_worker(void); +#endif + /* * Notification of system call entry/exit * - triggered by current->work.syscall_trace @@ -899,6 +903,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) if (secure_computing(NULL) == -1) return -1; +#ifdef CONFIG_GRKERNSEC_SETXID + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) + gr_delayed_cred_worker(); +#endif + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) trace_sys_enter(regs, regs->regs[2]); diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c index 4472a7f98..c5905e682 100644 --- a/arch/mips/kernel/sync-r4k.c +++ b/arch/mips/kernel/sync-r4k.c @@ -18,8 +18,8 @@ #include static unsigned int initcount = 0; -static atomic_t count_count_start = ATOMIC_INIT(0); -static atomic_t count_count_stop = ATOMIC_INIT(0); +static atomic_unchecked_t count_count_start = ATOMIC_INIT(0); +static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0); #define COUNTON 100 #define NR_LOOPS 3 @@ -46,13 +46,13 @@ void synchronise_count_master(int cpu) for (i = 0; i < NR_LOOPS; i++) { /* slaves loop on '!= 2' */ - while (atomic_read(&count_count_start) != 1) + while (atomic_read_unchecked(&count_count_start) != 1) mb(); - atomic_set(&count_count_stop, 0); + atomic_set_unchecked(&count_count_stop, 0); smp_wmb(); /* Let the slave writes its count register */ - atomic_inc(&count_count_start); + atomic_inc_unchecked(&count_count_start); /* Count will be initialised to current timer */ if (i == 1) @@ -67,11 +67,11 @@ void synchronise_count_master(int cpu) /* * Wait for slave to leave the synchronization point: */ - while (atomic_read(&count_count_stop) != 1) + while (atomic_read_unchecked(&count_count_stop) != 1) mb(); - atomic_set(&count_count_start, 0); + atomic_set_unchecked(&count_count_start, 0); smp_wmb(); - atomic_inc(&count_count_stop); + atomic_inc_unchecked(&count_count_stop); } /* Arrange for an interrupt in a short while */ write_c0_compare(read_c0_count() + COUNTON); @@ -96,8 +96,8 @@ void synchronise_count_slave(int cpu) */ for (i = 0; i < NR_LOOPS; i++) { - atomic_inc(&count_count_start); - while (atomic_read(&count_count_start) != 2) + atomic_inc_unchecked(&count_count_start); + while (atomic_read_unchecked(&count_count_start) != 2) mb(); /* @@ -106,8 +106,8 @@ void synchronise_count_slave(int cpu) if (i == NR_LOOPS-1) write_c0_count(initcount); - atomic_inc(&count_count_stop); - while (atomic_read(&count_count_stop) != 2) + atomic_inc_unchecked(&count_count_stop); + while (atomic_read_unchecked(&count_count_stop) != 2) mb(); } /* Arrange for an interrupt in a short while */ diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 3905003df..7c0cc88bf 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -702,7 +702,18 @@ asmlinkage void do_ov(struct pt_regs *regs) }; prev_state = exception_enter(); - die_if_kernel("Integer overflow", regs); + if (unlikely(!user_mode(regs))) { + +#ifdef CONFIG_PAX_REFCOUNT + if (fixup_exception(regs)) { + pax_report_refcount_error(regs, NULL); + exception_exit(prev_state); + return; + } +#endif + + die("Integer overflow", regs); + } force_sig_info(SIGFPE, &info, current); exception_exit(prev_state); diff --git a/arch/mips/lib/ashldi3.c b/arch/mips/lib/ashldi3.c index c3e22053d..b4302f878 100644 --- a/arch/mips/lib/ashldi3.c +++ b/arch/mips/lib/ashldi3.c @@ -2,7 +2,11 @@ #include "libgcc.h" -long long notrace __ashldi3(long long u, word_type b) +#ifdef CONFIG_64BIT +DWtype notrace __ashlti3(DWtype u, word_type b) +#else +DWtype notrace __ashldi3(DWtype u, word_type b) +#endif { DWunion uu, w; word_type bm; @@ -11,19 +15,22 @@ long long notrace __ashldi3(long long u, word_type b) return u; uu.ll = u; - bm = 32 - b; + bm = BITS_PER_LONG - b; if (bm <= 0) { w.s.low = 0; - w.s.high = (unsigned int) uu.s.low << -bm; + w.s.high = (unsigned long) uu.s.low << -bm; } else { - const unsigned int carries = (unsigned int) uu.s.low >> bm; + const unsigned long carries = (unsigned long) uu.s.low >> bm; - w.s.low = (unsigned int) uu.s.low << b; - w.s.high = ((unsigned int) uu.s.high << b) | carries; + w.s.low = (unsigned long) uu.s.low << b; + w.s.high = ((unsigned long) uu.s.high << b) | carries; } return w.ll; } - +#ifdef CONFIG_64BIT +EXPORT_SYMBOL(__ashlti3); +#else EXPORT_SYMBOL(__ashldi3); +#endif diff --git a/arch/mips/lib/ashrdi3.c b/arch/mips/lib/ashrdi3.c index 174560248..d20aabfd9 100644 --- a/arch/mips/lib/ashrdi3.c +++ b/arch/mips/lib/ashrdi3.c @@ -2,7 +2,11 @@ #include "libgcc.h" -long long notrace __ashrdi3(long long u, word_type b) +#ifdef CONFIG_64BIT +DWtype notrace __ashrti3(DWtype u, word_type b) +#else +DWtype notrace __ashrdi3(DWtype u, word_type b) +#endif { DWunion uu, w; word_type bm; @@ -11,21 +15,24 @@ long long notrace __ashrdi3(long long u, word_type b) return u; uu.ll = u; - bm = 32 - b; + bm = BITS_PER_LONG - b; if (bm <= 0) { /* w.s.high = 1..1 or 0..0 */ w.s.high = - uu.s.high >> 31; + uu.s.high >> (BITS_PER_LONG - 1); w.s.low = uu.s.high >> -bm; } else { - const unsigned int carries = (unsigned int) uu.s.high << bm; + const unsigned long carries = (unsigned long) uu.s.high << bm; w.s.high = uu.s.high >> b; - w.s.low = ((unsigned int) uu.s.low >> b) | carries; + w.s.low = ((unsigned long) uu.s.low >> b) | carries; } return w.ll; } - +#ifdef CONFIG_64BIT +EXPORT_SYMBOL(__ashrti3); +#else EXPORT_SYMBOL(__ashrdi3); +#endif diff --git a/arch/mips/lib/libgcc.h b/arch/mips/lib/libgcc.h index 05909d58e..b03284bf8 100644 --- a/arch/mips/lib/libgcc.h +++ b/arch/mips/lib/libgcc.h @@ -5,13 +5,19 @@ typedef int word_type __attribute__ ((mode (__word__))); +#ifdef CONFIG_64BIT +typedef int DWtype __attribute__((mode(TI))); +#else +typedef long long DWtype; +#endif + #ifdef __BIG_ENDIAN struct DWstruct { - int high, low; + long high, low; }; #elif defined(__LITTLE_ENDIAN) struct DWstruct { - int low, high; + long low, high; }; #else #error I feel sick. @@ -19,7 +25,7 @@ struct DWstruct { typedef union { struct DWstruct s; - long long ll; + DWtype ll; } DWunion; #endif /* __ASM_LIBGCC_H */ diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 3bef306cd..fcec1330f 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -30,6 +30,23 @@ int show_unhandled_signals = 1; +#ifdef CONFIG_PAX_PAGEEXEC +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 5; i++) { + unsigned int c; + if (get_user(c, (unsigned int *)pc+i)) + printk(KERN_CONT "???????? "); + else + printk(KERN_CONT "%08x ", c); + } + printk("\n"); +} +#endif + /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate @@ -204,6 +221,14 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write, bad_area_nosemaphore: /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) { + +#ifdef CONFIG_PAX_PAGEEXEC + if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) { + pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs)); + do_group_exit(SIGKILL); + } +#endif + tsk->thread.cp0_badvaddr = address; tsk->thread.error_code = write; if (show_unhandled_signals && diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index e86ebcf5c..7a78a070c 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -474,10 +474,10 @@ void __init mem_init(void) #ifdef CONFIG_64BIT if ((unsigned long) &_text > (unsigned long) CKSEG0) - /* The -4 is a hack so that user tools don't have to handle + /* The -0x2000-4 is a hack so that user tools don't have to handle the overflow. */ kclist_add(&kcore_kseg0, (void *) CKSEG0, - 0x80000000 - 4, KCORE_TEXT); + 0x80000000 - 0x2000 - 4, KCORE_TEXT); #endif } #endif /* !CONFIG_NEED_MULTIPLE_NODES */ diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index d08ea3ff0..66bb13d7a 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, struct vm_area_struct *vma; unsigned long addr = addr0; int do_color_align; + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); struct vm_unmapped_area_info info; if (unlikely(len > TASK_SIZE)) @@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, do_color_align = 1; /* requesting a specific address */ + +#ifdef CONFIG_PAX_RANDMMAP + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + if (addr) { if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); @@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) return addr; } info.length = len; info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0; info.align_offset = pgoff << PAGE_SHIFT; + info.threadstack_offset = offset; if (dir == DOWN) { info.flags = VM_UNMAPPED_AREA_TOPDOWN; @@ -160,14 +166,30 @@ void arch_pick_mmap_layout(struct mm_struct *mm) { unsigned long random_factor = 0UL; +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + if (current->flags & PF_RANDOMIZE) random_factor = arch_mmap_rnd(); if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + mm->get_unmapped_area = arch_get_unmapped_area; } else { mm->mmap_base = mmap_base(random_factor); + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base -= mm->delta_mmap + mm->delta_stack; +#endif + mm->get_unmapped_area = arch_get_unmapped_area_topdown; } } diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c index cfceaea92..65deeb41e 100644 --- a/arch/mips/sgi-ip27/ip27-nmi.c +++ b/arch/mips/sgi-ip27/ip27-nmi.c @@ -187,9 +187,9 @@ void cont_nmi_dump(void) { #ifndef REAL_NMI_SIGNAL - static atomic_t nmied_cpus = ATOMIC_INIT(0); + static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0); - atomic_inc(&nmied_cpus); + atomic_inc_unchecked(&nmied_cpus); #endif /* * Only allow 1 cpu to proceed @@ -233,7 +233,7 @@ cont_nmi_dump(void) udelay(10000); } #else - while (atomic_read(&nmied_cpus) != num_online_cpus()); + while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus()); #endif /* diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c index 160b88000..3b53fdc63 100644 --- a/arch/mips/sni/rm200.c +++ b/arch/mips/sni/rm200.c @@ -270,7 +270,7 @@ void sni_rm200_mask_and_ack_8259A(struct irq_data *d) "spurious RM200 8259A interrupt: IRQ%d.\n", irq); spurious_irq_mask |= irqmask; } - atomic_inc(&irq_err_count); + atomic_inc_unchecked(&irq_err_count); /* * Theoretically we do not have to handle this IRQ, * but in Linux this does not cause problems and is diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c index 41e873bc8..34d33a769 100644 --- a/arch/mips/vr41xx/common/icu.c +++ b/arch/mips/vr41xx/common/icu.c @@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq) printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2); - atomic_inc(&irq_err_count); + atomic_inc_unchecked(&irq_err_count); return -1; } diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c index ae0e4ee6c..e8f069236 100644 --- a/arch/mips/vr41xx/common/irq.c +++ b/arch/mips/vr41xx/common/irq.c @@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq) irq_cascade_t *cascade; if (irq >= NR_IRQS) { - atomic_inc(&irq_err_count); + atomic_inc_unchecked(&irq_err_count); return; } @@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq) ret = cascade->get_irq(irq); irq = ret; if (ret < 0) - atomic_inc(&irq_err_count); + atomic_inc_unchecked(&irq_err_count); else irq_dispatch(irq); if (!irqd_irq_disabled(idata) && chip->irq_unmask) diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h index 967d144f3..db12197e6 100644 --- a/arch/mn10300/proc-mn103e010/include/proc/cache.h +++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h @@ -11,12 +11,14 @@ #ifndef _ASM_PROC_CACHE_H #define _ASM_PROC_CACHE_H +#include + /* L1 cache */ #define L1_CACHE_NWAYS 4 /* number of ways in caches */ #define L1_CACHE_NENTRIES 256 /* number of entries in each way */ -#define L1_CACHE_BYTES 16 /* bytes per entry */ #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */ +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */ #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */ #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */ diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h index bcb5df2d8..84fabd26c 100644 --- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h +++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h @@ -16,13 +16,15 @@ #ifndef _ASM_PROC_CACHE_H #define _ASM_PROC_CACHE_H +#include + /* * L1 cache */ #define L1_CACHE_NWAYS 4 /* number of ways in caches */ #define L1_CACHE_NENTRIES 128 /* number of entries in each way */ -#define L1_CACHE_BYTES 32 /* bytes per entry */ #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */ +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */ #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */ #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */ diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h index 5f55da9cb..7ce943743 100644 --- a/arch/openrisc/include/asm/cache.h +++ b/arch/openrisc/include/asm/cache.h @@ -19,13 +19,15 @@ #ifndef __ASM_OPENRISC_CACHE_H #define __ASM_OPENRISC_CACHE_H +#include + /* FIXME: How can we replace these with values from the CPU... * they shouldn't be hard-coded! */ #define __ro_after_init __read_mostly -#define L1_CACHE_BYTES 16 #define L1_CACHE_SHIFT 4 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) #endif /* __ASM_OPENRISC_CACHE_H */ diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 5394b9c5f..e77a306e4 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -327,6 +327,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) return dec; } +#define atomic64_read_unchecked(v) atomic64_read(v) +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) +#define atomic64_inc_unchecked(v) atomic64_inc(v) +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) +#define atomic64_dec_unchecked(v) atomic64_dec(v) +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) + #endif /* !CONFIG_64BIT */ diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h index df0f52bd1..810699b6d 100644 --- a/arch/parisc/include/asm/cache.h +++ b/arch/parisc/include/asm/cache.h @@ -5,6 +5,7 @@ #ifndef __ARCH_PARISC_CACHE_H #define __ARCH_PARISC_CACHE_H +#include /* * PA 2.0 processors have 64 and 128-byte L2 cachelines; PA 1.1 processors @@ -14,6 +15,8 @@ #define L1_CACHE_BYTES 16 #define L1_CACHE_SHIFT 4 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + #ifndef __ASSEMBLY__ #define SMP_CACHE_BYTES L1_CACHE_BYTES diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h index 78c9fd32c..42fa66a33 100644 --- a/arch/parisc/include/asm/elf.h +++ b/arch/parisc/include/asm/elf.h @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE 0x10000UL + +#define PAX_DELTA_MMAP_LEN 16 +#define PAX_DELTA_STACK_LEN 16 +#endif + /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. This could be done in user space, but it's not easy, and we've already done it here. */ diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h index f08dda3f0..ea6aa1b4d 100644 --- a/arch/parisc/include/asm/pgalloc.h +++ b/arch/parisc/include/asm/pgalloc.h @@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT)); } +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) +{ + pgd_populate(mm, pgd, pmd); +} + static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) { pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, PMD_ORDER); @@ -96,6 +101,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) #define pmd_free(mm, x) do { } while (0) #define pgd_populate(mm, pmd, pte) BUG() +#define pgd_populate_kernel(mm, pmd, pte) BUG() #endif diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 3a4ed9f91..29b7218a9 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -236,6 +236,17 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED) #define PAGE_COPY PAGE_EXECREAD #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED) + +#ifdef CONFIG_PAX_PAGEEXEC +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED) +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) +#else +# define PAGE_SHARED_NOEXEC PAGE_SHARED +# define PAGE_COPY_NOEXEC PAGE_COPY +# define PAGE_READONLY_NOEXEC PAGE_READONLY +#endif + #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC) #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX) diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 9a2aee1b9..a8e588f5d 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -223,10 +223,10 @@ static inline void copy_user_overflow(int size, unsigned long count) static __always_inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) { - int sz = __compiletime_object_size(to); + size_t sz = __compiletime_object_size(to); unsigned long ret = n; - if (likely(sz < 0 || sz >= n)) { + if (likely(sz == (size_t)-1 || sz >= n)) { check_object_size(to, n, false); ret = __copy_from_user(to, from, n); } else if (!__builtin_constant_p(n)) @@ -234,7 +234,7 @@ copy_from_user(void *to, const void __user *from, unsigned long n) else __bad_copy_user(); - if (unlikely(ret)) + if (unlikely(ret && (long)ret > 0)) memset(to + (n - ret), 0, ret); return ret; @@ -243,9 +243,9 @@ copy_from_user(void *to, const void __user *from, unsigned long n) static __always_inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) { - int sz = __compiletime_object_size(from); + size_t sz = __compiletime_object_size(to); - if (likely(sz < 0 || sz >= n)) { + if (likely(sz == (size_t)-1 || sz >= n)) { check_object_size(from, n, true); n = __copy_to_user(to, from, n); } else if (!__builtin_constant_p(n)) diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index a0ecdb4ab..71d2069cc 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c @@ -100,14 +100,12 @@ * or init pieces the location is */ static inline int in_init(struct module *me, void *loc) { - return (loc >= me->init_layout.base && - loc <= (me->init_layout.base + me->init_layout.size)); + within_module_init((unsigned long)loc, me); } static inline int in_core(struct module *me, void *loc) { - return (loc >= me->core_layout.base && - loc <= (me->core_layout.base + me->core_layout.size)); + within_module_core((unsigned long)loc, me); } static inline int in_local(struct module *me, void *loc) @@ -367,13 +365,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr, } /* align things a bit */ - me->core_layout.size = ALIGN(me->core_layout.size, 16); - me->arch.got_offset = me->core_layout.size; - me->core_layout.size += gots * sizeof(struct got_entry); + me->core_layout.size_rw = ALIGN(me->core_layout.size_rw, 16); + me->arch.got_offset = me->core_layout.size_rw; + me->core_layout.size_rw += gots * sizeof(struct got_entry); - me->core_layout.size = ALIGN(me->core_layout.size, 16); - me->arch.fdesc_offset = me->core_layout.size; - me->core_layout.size += fdescs * sizeof(Elf_Fdesc); + me->core_layout.size_rw = ALIGN(me->core_layout.size_rw, 16); + me->arch.fdesc_offset = me->core_layout.size_rw; + me->core_layout.size_rw += fdescs * sizeof(Elf_Fdesc); me->arch.got_max = gots; me->arch.fdesc_max = fdescs; @@ -391,7 +389,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend) BUG_ON(value == 0); - got = me->core_layout.base + me->arch.got_offset; + got = me->core_layout.base_rw + me->arch.got_offset; for (i = 0; got[i].addr; i++) if (got[i].addr == value) goto out; @@ -409,7 +407,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend) #ifdef CONFIG_64BIT static Elf_Addr get_fdesc(struct module *me, unsigned long value) { - Elf_Fdesc *fdesc = me->core_layout.base + me->arch.fdesc_offset; + Elf_Fdesc *fdesc = me->core_layout.base_rw + me->arch.fdesc_offset; if (!value) { printk(KERN_ERR "%s: zero OPD requested!\n", me->name); @@ -427,7 +425,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value) /* Create new one */ fdesc->addr = value; - fdesc->gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset; + fdesc->gp = (Elf_Addr)me->core_layout.base_rw + me->arch.got_offset; return (Elf_Addr)fdesc; } #endif /* CONFIG_64BIT */ @@ -847,7 +845,7 @@ register_unwind_table(struct module *me, table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr; end = table + sechdrs[me->arch.unwind_section].sh_size; - gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset; + gp = (Elf_Addr)me->core_layout.base_rw + me->arch.got_offset; DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n", me->arch.unwind_section, table, end, gp); diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index 0a393a04e..5b3199e0e 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -92,6 +92,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long task_size = TASK_SIZE; int do_color_align, last_mmap; struct vm_unmapped_area_info info; + unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags); if (len > task_size) return -ENOMEM; @@ -109,6 +110,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, goto found_addr; } +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + if (addr) { if (do_color_align && last_mmap) addr = COLOR_ALIGN(addr, last_mmap, pgoff); @@ -127,6 +132,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, info.high_limit = mmap_upper_limit(); info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0; info.align_offset = shared_align_offset(last_mmap, pgoff); + info.threadstack_offset = offset; addr = vm_unmapped_area(&info); found_addr: @@ -146,6 +152,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, unsigned long addr = addr0; int do_color_align, last_mmap; struct vm_unmapped_area_info info; + unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags); #ifdef CONFIG_64BIT /* This should only ever run for 32-bit processes. */ @@ -170,6 +177,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, } /* requesting a specific address */ +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + if (addr) { if (do_color_align && last_mmap) addr = COLOR_ALIGN(addr, last_mmap, pgoff); @@ -187,6 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, info.high_limit = mm->mmap_base; info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0; info.align_offset = shared_align_offset(last_mmap, pgoff); + info.threadstack_offset = offset; addr = vm_unmapped_area(&info); if (!(addr & ~PAGE_MASK)) goto found_addr; @@ -252,6 +264,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm) mm->mmap_legacy_base = mmap_legacy_base(); mm->mmap_base = mmap_upper_limit(); +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) { + mm->mmap_legacy_base += mm->delta_mmap; + mm->mmap_base -= mm->delta_mmap + mm->delta_stack; + } +#endif + if (mmap_is_legacy()) { mm->mmap_base = mm->mmap_legacy_base; mm->get_unmapped_area = arch_get_unmapped_area; diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 378df9207..9b2ab51b7 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -719,9 +719,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) down_read(¤t->mm->mmap_sem); vma = find_vma(current->mm,regs->iaoq[0]); - if (vma && (regs->iaoq[0] >= vma->vm_start) - && (vma->vm_flags & VM_EXEC)) { - + if (vma && (regs->iaoq[0] >= vma->vm_start)) { fault_address = regs->iaoq[0]; fault_space = regs->iasq[0]; diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 1a0b4f63f..f9d326db6 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -16,6 +16,7 @@ #include #include #include +#include #include @@ -50,7 +51,7 @@ int show_unhandled_signals = 1; static unsigned long parisc_acctyp(unsigned long code, unsigned int inst) { - if (code == 6 || code == 16) + if (code == 6 || code == 7 || code == 16) return VM_EXEC; switch (inst & 0xf0000000) { @@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst) } #endif +#ifdef CONFIG_PAX_PAGEEXEC +/* + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address) + * + * returns 1 when task should be killed + * 2 when rt_sigreturn trampoline was detected + * 3 when unpatched PLT trampoline was detected + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + +#ifdef CONFIG_PAX_EMUPLT + int err; + + do { /* PaX: unpatched PLT emulation */ + unsigned int bl, depwi; + + err = get_user(bl, (unsigned int *)instruction_pointer(regs)); + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4)); + + if (err) + break; + + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) { + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12; + + err = get_user(ldw, (unsigned int *)addr); + err |= get_user(bv, (unsigned int *)(addr+4)); + err |= get_user(ldw2, (unsigned int *)(addr+8)); + + if (err) + break; + + if (ldw == 0x0E801096U && + bv == 0xEAC0C000U && + ldw2 == 0x0E881095U) + { + unsigned int resolver, map; + + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8)); + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12)); + if (err) + break; + + regs->gr[20] = instruction_pointer(regs)+8; + regs->gr[21] = map; + regs->gr[22] = resolver; + regs->iaoq[0] = resolver | 3UL; + regs->iaoq[1] = regs->iaoq[0] + 4; + return 3; + } + } + } while (0); +#endif + +#ifdef CONFIG_PAX_EMUTRAMP + +#ifndef CONFIG_PAX_EMUSIGRT + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP)) + return 1; +#endif + + do { /* PaX: rt_sigreturn emulation */ + unsigned int ldi1, ldi2, bel, nop; + + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs)); + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4)); + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8)); + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12)); + + if (err) + break; + + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) && + ldi2 == 0x3414015AU && + bel == 0xE4008200U && + nop == 0x08000240U) + { + regs->gr[25] = (ldi1 & 2) >> 1; + regs->gr[20] = __NR_rt_sigreturn; + regs->gr[31] = regs->iaoq[1] + 16; + regs->sr[0] = regs->iasq[1]; + regs->iaoq[0] = 0x100UL; + regs->iaoq[1] = regs->iaoq[0] + 4; + regs->iasq[0] = regs->sr[2]; + regs->iasq[1] = regs->sr[2]; + return 2; + } + } while (0); +#endif + + return 1; +} + +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 5; i++) { + unsigned int c; + if (get_user(c, (unsigned int *)pc+i)) + printk(KERN_CONT "???????? "); + else + printk(KERN_CONT "%08x ", c); + } + printk("\n"); +} +#endif + int fixup_exception(struct pt_regs *regs) { const struct exception_table_entry *fix; @@ -281,8 +392,33 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, good_area: - if ((vma->vm_flags & acc_type) != acc_type) + if ((vma->vm_flags & acc_type) != acc_type) { + +#ifdef CONFIG_PAX_PAGEEXEC + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) && + (address & ~3UL) == instruction_pointer(regs)) + { + up_read(&mm->mmap_sem); + switch (pax_handle_fetch_fault(regs)) { + +#ifdef CONFIG_PAX_EMUPLT + case 3: + return; +#endif + +#ifdef CONFIG_PAX_EMUTRAMP + case 2: + return; +#endif + + } + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]); + do_group_exit(SIGKILL); + } +#endif + goto bad_area; + } /* * If for any reason at all we couldn't handle the fault, make diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 65fba4c34..3cfec12d0 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -140,6 +140,7 @@ config PPC select ARCH_USE_BUILTIN_BSWAP select OLD_SIGSUSPEND select OLD_SIGACTION if PPC32 + select HAVE_GCC_PLUGINS select HAVE_DEBUG_STACKOVERFLOW select HAVE_IRQ_EXIT_ON_IRQ_STACK select ARCH_USE_CMPXCHG_LOCKREF if PPC64 @@ -441,6 +442,7 @@ config KEXEC bool "kexec system call" depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) || PPC_BOOK3E select KEXEC_CORE + depends on !GRKERNSEC_KMEM help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 2b9033519..5e1a3d6a3 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -12,6 +12,11 @@ #define ATOMIC_INIT(i) { (i) } +#define _ASM_EXTABLE(from, to) \ +" .section __ex_table,\"a\"\n" \ + PPC_LONG" " #from ", " #to"\n" \ +" .previous\n" + /* * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with * a "bne-" instruction at the end, so an isync is enough as a acquire barrier @@ -39,38 +44,79 @@ static __inline__ int atomic_read(const atomic_t *v) return t; } +static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v) +{ + int t; + + __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter)); + + return t; +} + static __inline__ void atomic_set(atomic_t *v, int i) { __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); } -#define ATOMIC_OP(op, asm_op) \ -static __inline__ void atomic_##op(int a, atomic_t *v) \ +static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i) +{ + __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); +} + +#ifdef CONFIG_PAX_REFCOUNT +#define __REFCOUNT_OP(op) op##o. +#define __OVERFLOW_PRE \ + " mcrxr cr0\n" +#define __OVERFLOW_POST \ + " bf 4*cr0+so, 3f\n" \ + "2: .long 0x00c00b00\n" \ + "3:\n" +#define __OVERFLOW_EXTABLE \ + "\n4:\n" \ + _ASM_EXTABLE(2b, 4b) +#else +#define __REFCOUNT_OP(op) op +#define __OVERFLOW_PRE +#define __OVERFLOW_POST +#define __OVERFLOW_EXTABLE +#endif + +#define __ATOMIC_OP(op, suffix, pre_op, asm_op, post_op, extable) \ +static inline void atomic_##op##suffix(int a, atomic##suffix##_t *v) \ { \ int t; \ \ __asm__ __volatile__( \ -"1: lwarx %0,0,%3 # atomic_" #op "\n" \ +"1: lwarx %0,0,%3 # atomic_" #op #suffix "\n" \ + pre_op \ #asm_op " %0,%2,%0\n" \ + post_op \ PPC405_ERR77(0,%3) \ " stwcx. %0,0,%3 \n" \ " bne- 1b\n" \ + extable \ : "=&r" (t), "+m" (v->counter) \ : "r" (a), "r" (&v->counter) \ : "cc"); \ } \ -#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \ -static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \ +#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , , asm_op, , ) \ + __ATOMIC_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE) + +#define __ATOMIC_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\ +static inline int atomic_##op##_return##suffix##_relaxed(int a, atomic##suffix##_t *v)\ { \ int t; \ \ __asm__ __volatile__( \ -"1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \ +"1: lwarx %0,0,%2 # atomic_" #op "_return" #suffix "_relaxed\n"\ + pre_op \ #asm_op " %0,%2,%0\n" \ + post_op \ PPC405_ERR77(0, %3) \ " stwcx. %0,0,%3\n" \ " bne- 1b\n" \ + extable \ : "=&r" (t), "+m" (v->counter) \ : "r" (a), "r" (&v->counter) \ : "cc"); \ @@ -78,6 +124,9 @@ static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \ return t; \ } +#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) __ATOMIC_OP_RETURN(op, , , asm_op, , )\ + __ATOMIC_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE) + #define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \ static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \ { \ @@ -105,6 +154,7 @@ ATOMIC_OPS(add, add) ATOMIC_OPS(sub, subf) #define atomic_add_return_relaxed atomic_add_return_relaxed +#define atomic_add_return_unchecked_relaxed atomic_add_return_unchecked_relaxed #define atomic_sub_return_relaxed atomic_sub_return_relaxed #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed @@ -126,41 +176,22 @@ ATOMIC_OPS(xor, xor) #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP_RELAXED #undef ATOMIC_OP_RETURN_RELAXED +#undef __ATOMIC_OP_RETURN #undef ATOMIC_OP +#undef __ATOMIC_OP #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) -static __inline__ void atomic_inc(atomic_t *v) -{ - int t; - - __asm__ __volatile__( -"1: lwarx %0,0,%2 # atomic_inc\n\ - addic %0,%0,1\n" - PPC405_ERR77(0,%2) -" stwcx. %0,0,%2 \n\ - bne- 1b" - : "=&r" (t), "+m" (v->counter) - : "r" (&v->counter) - : "cc", "xer"); -} - -static __inline__ int atomic_inc_return_relaxed(atomic_t *v) -{ - int t; - - __asm__ __volatile__( -"1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n" -" addic %0,%0,1\n" - PPC405_ERR77(0, %2) -" stwcx. %0,0,%2\n" -" bne- 1b" - : "=&r" (t), "+m" (v->counter) - : "r" (&v->counter) - : "cc", "xer"); - - return t; -} +/* + * atomic_inc - increment atomic variable + * @v: pointer of type atomic_t + * + * Automatically increments @v by 1 + */ +#define atomic_inc(v) atomic_add(1, (v)) +#define atomic_inc_unchecked(v) atomic_add_unchecked(1, (v)) +#define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v)) +#define atomic_inc_return_unchecked_relaxed(v) atomic_add_return_unchecked_relaxed(1, (v)) /* * atomic_inc_and_test - increment and test @@ -171,37 +202,20 @@ static __inline__ int atomic_inc_return_relaxed(atomic_t *v) * other cases. */ #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) +#define atomic_inc_and_test_unchecked(v) (atomic_inc_return_unchecked(v) == 0) -static __inline__ void atomic_dec(atomic_t *v) -{ - int t; - - __asm__ __volatile__( -"1: lwarx %0,0,%2 # atomic_dec\n\ - addic %0,%0,-1\n" - PPC405_ERR77(0,%2)\ -" stwcx. %0,0,%2\n\ - bne- 1b" - : "=&r" (t), "+m" (v->counter) - : "r" (&v->counter) - : "cc", "xer"); -} +/* + * atomic_dec - decrement atomic variable + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1 + */ +#define atomic_dec(v) atomic_sub(1, (v)) +#define atomic_dec_return_relaxed(v) atomic_sub_return_relaxed(1, (v)) -static __inline__ int atomic_dec_return_relaxed(atomic_t *v) +static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v) { - int t; - - __asm__ __volatile__( -"1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n" -" addic %0,%0,-1\n" - PPC405_ERR77(0, %2) -" stwcx. %0,0,%2\n" -" bne- 1b" - : "=&r" (t), "+m" (v->counter) - : "r" (&v->counter) - : "cc", "xer"); - - return t; + atomic_sub_unchecked(1, v); } #define atomic_inc_return_relaxed atomic_inc_return_relaxed @@ -216,6 +230,16 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new)) +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new) +{ + return cmpxchg(&(v->counter), old, new); +} + +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new) +{ + return xchg(&(v->counter), new); +} + /** * __atomic_add_unless - add unless the number is a given value * @v: pointer of type atomic_t @@ -233,14 +257,21 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) PPC_ATOMIC_ENTRY_BARRIER "1: lwarx %0,0,%1 # __atomic_add_unless\n\ cmpw 0,%0,%3 \n\ - beq 2f \n\ - add %0,%2,%0 \n" + beq 5f \n" + + __OVERFLOW_PRE + __REFCOUNT_OP(add) " %0,%2,%0 \n" + __OVERFLOW_POST + PPC405_ERR77(0,%2) " stwcx. %0,0,%1 \n\ bne- 1b \n" + + __OVERFLOW_EXTABLE + PPC_ATOMIC_EXIT_BARRIER " subf %0,%2,%0 \n\ -2:" +5:" : "=&r" (t) : "r" (&v->counter), "r" (a), "r" (u) : "cc", "memory"); @@ -323,37 +354,59 @@ static __inline__ long atomic64_read(const atomic64_t *v) return t; } +static __inline__ long atomic64_read_unchecked(const atomic64_unchecked_t *v) +{ + long t; + + __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter)); + + return t; +} + static __inline__ void atomic64_set(atomic64_t *v, long i) { __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); } -#define ATOMIC64_OP(op, asm_op) \ -static __inline__ void atomic64_##op(long a, atomic64_t *v) \ +static __inline__ void atomic64_set_unchecked(atomic64_unchecked_t *v, long i) +{ + __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); +} + +#define __ATOMIC64_OP(op, suffix, pre_op, asm_op, post_op, extable) \ +static inline void atomic64_##op##suffix(long a, atomic64##suffix##_t *v)\ { \ long t; \ \ __asm__ __volatile__( \ "1: ldarx %0,0,%3 # atomic64_" #op "\n" \ + pre_op \ #asm_op " %0,%2,%0\n" \ + post_op \ " stdcx. %0,0,%3 \n" \ " bne- 1b\n" \ + extable \ : "=&r" (t), "+m" (v->counter) \ : "r" (a), "r" (&v->counter) \ : "cc"); \ } -#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \ -static inline long \ -atomic64_##op##_return_relaxed(long a, atomic64_t *v) \ +#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , , asm_op, , ) \ + __ATOMIC64_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE) + +#define __ATOMIC64_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\ +static inline long atomic64_##op##_return##suffix##_relaxed(long a, atomic64##suffix##_t *v)\ { \ long t; \ \ __asm__ __volatile__( \ "1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \ + pre_op \ #asm_op " %0,%2,%0\n" \ + post_op \ " stdcx. %0,0,%3\n" \ " bne- 1b\n" \ + extable \ : "=&r" (t), "+m" (v->counter) \ : "r" (a), "r" (&v->counter) \ : "cc"); \ @@ -361,6 +414,9 @@ atomic64_##op##_return_relaxed(long a, atomic64_t *v) \ return t; \ } +#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) __ATOMIC64_OP_RETURN(op, , , asm_op, , )\ + __ATOMIC64_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE) + #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \ static inline long \ atomic64_fetch_##op##_relaxed(long a, atomic64_t *v) \ @@ -409,38 +465,33 @@ ATOMIC64_OPS(xor, xor) #undef ATOPIC64_OPS #undef ATOMIC64_FETCH_OP_RELAXED #undef ATOMIC64_OP_RETURN_RELAXED +#undef __ATOMIC64_OP_RETURN #undef ATOMIC64_OP +#undef __ATOMIC64_OP +#undef __OVERFLOW_EXTABLE +#undef __OVERFLOW_POST +#undef __OVERFLOW_PRE +#undef __REFCOUNT_OP #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) -static __inline__ void atomic64_inc(atomic64_t *v) -{ - long t; +/* + * atomic64_inc - increment atomic variable + * @v: pointer of type atomic64_t + * + * Automatically increments @v by 1 + */ +#define atomic64_inc(v) atomic64_add(1, (v)) +#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v)) - __asm__ __volatile__( -"1: ldarx %0,0,%2 # atomic64_inc\n\ - addic %0,%0,1\n\ - stdcx. %0,0,%2 \n\ - bne- 1b" - : "=&r" (t), "+m" (v->counter) - : "r" (&v->counter) - : "cc", "xer"); +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v) +{ + atomic64_add_unchecked(1, v); } -static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v) +static inline long atomic64_inc_return_unchecked_relaxed(atomic64_unchecked_t *v) { - long t; - - __asm__ __volatile__( -"1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n" -" addic %0,%0,1\n" -" stdcx. %0,0,%2\n" -" bne- 1b" - : "=&r" (t), "+m" (v->counter) - : "r" (&v->counter) - : "cc", "xer"); - - return t; + return atomic64_add_return_unchecked_relaxed(1, v); } /* @@ -453,34 +504,18 @@ static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v) */ #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) -static __inline__ void atomic64_dec(atomic64_t *v) -{ - long t; - - __asm__ __volatile__( -"1: ldarx %0,0,%2 # atomic64_dec\n\ - addic %0,%0,-1\n\ - stdcx. %0,0,%2\n\ - bne- 1b" - : "=&r" (t), "+m" (v->counter) - : "r" (&v->counter) - : "cc", "xer"); -} +/* + * atomic64_dec - decrement atomic variable + * @v: pointer of type atomic64_t + * + * Atomically decrements @v by 1 + */ +#define atomic64_dec(v) atomic64_sub(1, (v)) +#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v)) -static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v) +static __inline__ void atomic64_dec_unchecked(atomic64_unchecked_t *v) { - long t; - - __asm__ __volatile__( -"1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n" -" addic %0,%0,-1\n" -" stdcx. %0,0,%2\n" -" bne- 1b" - : "=&r" (t), "+m" (v->counter) - : "r" (&v->counter) - : "cc", "xer"); - - return t; + atomic64_sub_unchecked(1, v); } #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed @@ -522,6 +557,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new)) +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new) +{ + return cmpxchg(&(v->counter), old, new); +} + +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new) +{ + return xchg(&(v->counter), new); +} + /** * atomic64_add_unless - add unless the number is a given value * @v: pointer of type atomic64_t @@ -537,15 +582,22 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) __asm__ __volatile__ ( PPC_ATOMIC_ENTRY_BARRIER -"1: ldarx %0,0,%1 # __atomic_add_unless\n\ +"1: ldarx %0,0,%1 # atomic64_add_unless\n\ cmpd 0,%0,%3 \n\ - beq 2f \n\ - add %0,%2,%0 \n" + beq 5f \n" + + __OVERFLOW_PRE + __REFCOUNT_OP(add) " %0,%2,%0 \n" + __OVERFLOW_POST + " stdcx. %0,0,%1 \n\ bne- 1b \n" PPC_ATOMIC_EXIT_BARRIER + + __OVERFLOW_EXTABLE + " subf %0,%2,%0 \n\ -2:" +5:" : "=&r" (t) : "r" (&v->counter), "r" (a), "r" (u) : "cc", "memory"); diff --git a/arch/powerpc/include/asm/book3s/32/hash.h b/arch/powerpc/include/asm/book3s/32/hash.h index 880db13a2..bb4ed4add 100644 --- a/arch/powerpc/include/asm/book3s/32/hash.h +++ b/arch/powerpc/include/asm/book3s/32/hash.h @@ -20,6 +20,7 @@ #define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */ #define _PAGE_USER 0x004 /* usermode access allowed */ #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */ +#define _PAGE_NX _PAGE_GUARDED #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */ #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */ #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */ diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 6b8b2d57f..cf17a29a6 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -227,7 +227,7 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm, pte_t *ptep, pte_t entry) { unsigned long set = pte_val(entry) & - (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); + (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC | _PAGE_NX); unsigned long clr = ~pte_val(entry) & _PAGE_RO; pte_update(ptep, clr, set); diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h index cd5e7aa8c..77090615a 100644 --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h @@ -91,6 +91,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) pgd_set(pgd, __pgtable_ptr_val(pud) | PGD_VAL_BITS); } +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) +{ + pgd_populate(mm, pgd, pud); +} + static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) { return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL); @@ -106,6 +111,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pud_set(pud, __pgtable_ptr_val(pmd) | PUD_VAL_BITS); } +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + pud_populate_kernel(mm, pud, pmd); +} + static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, unsigned long address) { diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h index ffbafbf76..71d037fb0 100644 --- a/arch/powerpc/include/asm/cache.h +++ b/arch/powerpc/include/asm/cache.h @@ -3,6 +3,8 @@ #ifdef __KERNEL__ +#include +#include /* bytes per L1 cache line */ #if defined(CONFIG_8xx) || defined(CONFIG_403GCX) @@ -22,7 +24,7 @@ #define L1_CACHE_SHIFT 7 #endif -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) #define SMP_CACHE_BYTES L1_CACHE_BYTES diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index ee46ffef6..b36c98cf5 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -30,6 +30,18 @@ #define ELF_ET_DYN_BASE 0x20000000 +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE (0x10000000UL) + +#ifdef __powerpc64__ +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28) +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28) +#else +#define PAX_DELTA_MMAP_LEN 15 +#define PAX_DELTA_STACK_LEN 15 +#endif +#endif + #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0) /* diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h index 8196e9c7d..d83a9f35f 100644 --- a/arch/powerpc/include/asm/exec.h +++ b/arch/powerpc/include/asm/exec.h @@ -4,6 +4,6 @@ #ifndef _ASM_POWERPC_EXEC_H #define _ASM_POWERPC_EXEC_H -extern unsigned long arch_align_stack(unsigned long sp); +#define arch_align_stack(x) ((x) & ~0xfUL) #endif /* _ASM_POWERPC_EXEC_H */ diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h index 5acabbd7a..7ea14fa61 100644 --- a/arch/powerpc/include/asm/kmap_types.h +++ b/arch/powerpc/include/asm/kmap_types.h @@ -10,7 +10,7 @@ * 2 of the License, or (at your option) any later version. */ -#define KM_TYPE_NR 16 +#define KM_TYPE_NR 17 #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_KMAP_TYPES_H */ diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h index b8da91363..c02b593cc 100644 --- a/arch/powerpc/include/asm/local.h +++ b/arch/powerpc/include/asm/local.h @@ -9,21 +9,65 @@ typedef struct atomic_long_t a; } local_t; +typedef struct +{ + atomic_long_unchecked_t a; +} local_unchecked_t; + #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } #define local_read(l) atomic_long_read(&(l)->a) +#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a) #define local_set(l,i) atomic_long_set(&(l)->a, (i)) +#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i)) #define local_add(i,l) atomic_long_add((i),(&(l)->a)) +#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a)) #define local_sub(i,l) atomic_long_sub((i),(&(l)->a)) +#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a)) #define local_inc(l) atomic_long_inc(&(l)->a) +#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a) #define local_dec(l) atomic_long_dec(&(l)->a) +#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a) static __inline__ long local_add_return(long a, local_t *l) { long t; __asm__ __volatile__( +"1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n" + +#ifdef CONFIG_PAX_REFCOUNT +" mcrxr cr0\n" +" addo. %0,%1,%0\n" +" bf 4*cr0+so, 3f\n" +"2:.long " "0x00c00b00""\n" +#else +" add %0,%1,%0\n" +#endif + +"3:\n" + PPC405_ERR77(0,%2) + PPC_STLCX "%0,0,%2 \n\ + bne- 1b" + +#ifdef CONFIG_PAX_REFCOUNT +"\n4:\n" + _ASM_EXTABLE(2b, 4b) +#endif + + : "=&r" (t) + : "r" (a), "r" (&(l->a.counter)) + : "cc", "memory"); + + return t; +} + +static __inline__ long local_add_return_unchecked(long a, local_unchecked_t *l) +{ + long t; + + __asm__ __volatile__( "1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n\ add %0,%1,%0\n" PPC405_ERR77(0,%2) @@ -101,6 +145,8 @@ static __inline__ long local_dec_return(local_t *l) #define local_cmpxchg(l, o, n) \ (cmpxchg_local(&((l)->a.counter), (o), (n))) +#define local_cmpxchg_unchecked(l, o, n) \ + (cmpxchg_local(&((l)->a.counter), (o), (n))) #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n))) /** diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h index 30922f699..0bb237c59 100644 --- a/arch/powerpc/include/asm/mman.h +++ b/arch/powerpc/include/asm/mman.h @@ -26,7 +26,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, } #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags) { return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0); } diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h index 897d2e1c8..399f34fda 100644 --- a/arch/powerpc/include/asm/nohash/64/pgalloc.h +++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h @@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) #ifndef CONFIG_PPC_64K_PAGES #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, (unsigned long)PUD) +#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD)) static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) { @@ -70,6 +71,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pud_set(pud, (unsigned long)pmd); } +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + pud_populate(mm, pud, pmd); +} + static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) { @@ -139,6 +145,7 @@ extern void __tlb_remove_table(void *_table); #endif #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd) +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd)) static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 56398e7e6..287a77203 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -230,8 +230,9 @@ extern long long virt_phys_offset; * and needs to be executable. This means the whole heap ends * up being executable. */ -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_DATA_DEFAULT_FLAGS32 \ + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) @@ -259,6 +260,9 @@ extern long long virt_phys_offset; #define is_kernel_addr(x) ((x) >= PAGE_OFFSET) #endif +#define ktla_ktva(addr) (addr) +#define ktva_ktla(addr) (addr) + #ifndef CONFIG_PPC_BOOK3S_64 /* * Use the top bit of the higher-level page table entries to indicate whether diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h index dd5f0712a..047071863 100644 --- a/arch/powerpc/include/asm/page_64.h +++ b/arch/powerpc/include/asm/page_64.h @@ -169,15 +169,18 @@ do { \ * stack by default, so in the absence of a PT_GNU_STACK program header * we turn execute permission off. */ -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_STACK_DEFAULT_FLAGS32 \ + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#ifndef CONFIG_PAX_PAGEEXEC #define VM_STACK_DEFAULT_FLAGS \ (is_32bit_task() ? \ VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64) +#endif #include diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 9bd87f269..f600e6d02 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -1,6 +1,7 @@ #ifndef _ASM_POWERPC_PGTABLE_H #define _ASM_POWERPC_PGTABLE_H +#include #ifndef __ASSEMBLY__ #include #include diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h index 4ba26dd25..2d1137dcc 100644 --- a/arch/powerpc/include/asm/pte-common.h +++ b/arch/powerpc/include/asm/pte-common.h @@ -16,6 +16,9 @@ #ifndef _PAGE_EXEC #define _PAGE_EXEC 0 #endif +#ifndef _PAGE_NX +#define _PAGE_NX 0 +#endif #ifndef _PAGE_ENDIAN #define _PAGE_ENDIAN 0 #endif @@ -53,13 +56,13 @@ #define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE() #endif #ifndef _PAGE_KERNEL_RO -#define _PAGE_KERNEL_RO (_PAGE_RO) +#define _PAGE_KERNEL_RO (_PAGE_RO | _PAGE_NX) #endif #ifndef _PAGE_KERNEL_ROX #define _PAGE_KERNEL_ROX (_PAGE_EXEC | _PAGE_RO) #endif #ifndef _PAGE_KERNEL_RW -#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE) +#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE | _PAGE_NX) #endif #ifndef _PAGE_KERNEL_RWX #define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE | _PAGE_EXEC) @@ -142,15 +145,12 @@ static inline bool pte_user(pte_t pte) * Note due to the way vm flags are laid out, the bits are XWR */ #define PAGE_NONE __pgprot(_PAGE_BASE) -#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) -#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | \ - _PAGE_EXEC) -#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO) -#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | \ - _PAGE_EXEC) -#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO) -#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | \ - _PAGE_EXEC) +#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_NX) +#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) +#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | _PAGE_NX) +#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | _PAGE_EXEC) +#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | _PAGE_NX) +#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | _PAGE_EXEC) #define __P000 PAGE_NONE #define __P001 PAGE_READONLY @@ -171,11 +171,9 @@ static inline bool pte_user(pte_t pte) #define __S111 PAGE_SHARED_X /* Permission masks used for kernel mappings */ -#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) -#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ - _PAGE_NO_CACHE) -#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ - _PAGE_NO_CACHE | _PAGE_GUARDED) +#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW | _PAGE_NX) +#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE) +#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE | _PAGE_GUARDED) #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX) #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 13f5fad21..6ec27c38f 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -270,6 +270,7 @@ #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */ #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */ #define DSISR_NOHPTE 0x40000000 /* no translation found */ +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */ #define DSISR_PROTFAULT 0x08000000 /* protection fault */ #define DSISR_ISSTORE 0x02000000 /* access was a store */ #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */ diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 0d02c11dc..33a8f086e 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -51,7 +51,7 @@ struct smp_ops_t { int (*cpu_disable)(void); void (*cpu_die)(unsigned int nr); int (*cpu_bootable)(unsigned int nr); -}; +} __no_const; extern void smp_send_debugger_break(void); extern void start_secondary_resume(void); diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index fa37fe93b..867d3cff6 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -27,6 +27,7 @@ #include #include #include +#include #ifdef CONFIG_PPC64 /* use 0x800000yy when locked, where yy == CPU number */ @@ -228,13 +229,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw) __asm__ __volatile__( "1: " PPC_LWARX(%0,0,%1,1) "\n" __DO_SIGN_EXTEND -" addic. %0,%0,1\n\ - ble- 2f\n" + +#ifdef CONFIG_PAX_REFCOUNT +" mcrxr cr0\n" +" addico. %0,%0,1\n" +" bf 4*cr0+so, 3f\n" +"2:.long " "0x00c00b00""\n" +#else +" addic. %0,%0,1\n" +#endif + +"3:\n" + "ble- 4f\n" PPC405_ERR77(0,%1) " stwcx. %0,0,%1\n\ bne- 1b\n" PPC_ACQUIRE_BARRIER -"2:" : "=&r" (tmp) +"4:" + +#ifdef CONFIG_PAX_REFCOUNT + _ASM_EXTABLE(2b,4b) +#endif + + : "=&r" (tmp) : "r" (&rw->lock) : "cr0", "xer", "memory"); @@ -310,11 +327,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) __asm__ __volatile__( "# read_unlock\n\t" PPC_RELEASE_BARRIER -"1: lwarx %0,0,%1\n\ - addic %0,%0,-1\n" +"1: lwarx %0,0,%1\n" + +#ifdef CONFIG_PAX_REFCOUNT +" mcrxr cr0\n" +" addico. %0,%0,-1\n" +" bf 4*cr0+so, 3f\n" +"2:.long " "0x00c00b00""\n" +#else +" addic. %0,%0,-1\n" +#endif + +"3:\n" PPC405_ERR77(0,%1) " stwcx. %0,0,%1\n\ bne- 1b" + +#ifdef CONFIG_PAX_REFCOUNT +"\n4:\n" + _ASM_EXTABLE(2b, 4b) +#endif + : "=&r"(tmp) : "r"(&rw->lock) : "cr0", "xer", "memory"); diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h index da3cdffca..c774844ed 100644 --- a/arch/powerpc/include/asm/string.h +++ b/arch/powerpc/include/asm/string.h @@ -11,17 +11,17 @@ #define __HAVE_ARCH_MEMCMP #define __HAVE_ARCH_MEMCHR -extern char * strcpy(char *,const char *); -extern char * strncpy(char *,const char *, __kernel_size_t); -extern __kernel_size_t strlen(const char *); -extern int strcmp(const char *,const char *); -extern int strncmp(const char *, const char *, __kernel_size_t); -extern char * strcat(char *, const char *); +extern char * strcpy(char *,const char *) __nocapture(2); +extern char * strncpy(char *,const char *, __kernel_size_t) __nocapture(2); +extern __kernel_size_t strlen(const char *) __nocapture(1); +extern int strcmp(const char *,const char *) __nocapture(); +extern int strncmp(const char *, const char *, __kernel_size_t) __nocapture(1, 2); +extern char * strcat(char *, const char *) __nocapture(2); extern void * memset(void *,int,__kernel_size_t); -extern void * memcpy(void *,const void *,__kernel_size_t); -extern void * memmove(void *,const void *,__kernel_size_t); -extern int memcmp(const void *,const void *,__kernel_size_t); -extern void * memchr(const void *,int,__kernel_size_t); +extern void * memcpy(void *,const void *,__kernel_size_t) __nocapture(2); +extern void * memmove(void *,const void *,__kernel_size_t) __nocapture(2); +extern int memcmp(const void *,const void *,__kernel_size_t) __nocapture(1, 2); +extern void * memchr(const void *,int,__kernel_size_t) __nocapture(1); #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 87e4b2d8d..c36239071 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -107,6 +107,8 @@ static inline struct thread_info *current_thread_info(void) #if defined(CONFIG_PPC64) #define TIF_ELF2ABI 18 /* function descriptors must die! */ #endif +/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */ +#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */ /* as above, but as bit values */ #define _TIF_SYSCALL_TRACE (1< INT_MAX) + return n; + + if (likely(access_ok(VERIFY_READ, from, n))) { + check_object_size(to, n, false); + n = __copy_from_user(to, from, n); + } else + memset(to, 0, n); + return n; +} + +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) +{ + if ((long)n < 0 || n > INT_MAX) + return n; + + if (likely(access_ok(VERIFY_WRITE, to, n))) { + check_object_size(from, n, true); + n = __copy_to_user(to, from, n); + } + return n; +} + +extern unsigned long copy_in_user(void __user *to, const void __user *from, + unsigned long n); + +#endif /* __powerpc64__ */ + extern unsigned long __clear_user(void __user *addr, unsigned long size); static inline unsigned long clear_user(void __user *addr, unsigned long size) diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 1925341db..a1841acd5 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -15,7 +15,7 @@ CFLAGS_btext.o += -fPIC endif CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) -CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) +CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) @@ -31,6 +31,8 @@ CFLAGS_REMOVE_ftrace.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_time.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) endif +CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS) + obj-y := cputable.o ptrace.o syscalls.o \ irq.o align.o signal_32.o pmc.o vdso.o \ process.o systbl.o idle.o \ diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 38a1f9643..ed94e428d 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -1010,6 +1010,7 @@ storage_fault_common: std r14,_DAR(r1) std r15,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD + bl save_nvgprs mr r4,r14 mr r5,r15 ld r14,PACA_EXGEN+EX_R14(r13) @@ -1018,8 +1019,7 @@ storage_fault_common: cmpdi r3,0 bne- 1f b ret_from_except_lite -1: bl save_nvgprs - mr r5,r3 +1: mr r5,r3 addi r3,r1,STACK_FRAME_OVERHEAD ld r4,_DAR(r1) bl bad_page_fault diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 1ba82ea90..f78bd7009 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1445,10 +1445,10 @@ handle_page_fault: 11: ld r4,_DAR(r1) ld r5,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD + bl save_nvgprs bl do_page_fault cmpdi r3,0 beq+ 12f - bl save_nvgprs mr r5,r3 addi r3,r1,STACK_FRAME_OVERHEAD lwz r4,_DAR(r1) diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 3c05c311e..a8e688833 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -482,6 +482,8 @@ void migrate_irqs(void) } #endif +extern void gr_handle_kernel_exploit(void); + static inline void check_stack_overflow(void) { #ifdef CONFIG_DEBUG_STACKOVERFLOW @@ -494,6 +496,7 @@ static inline void check_stack_overflow(void) pr_err("do_IRQ: stack overflow: %ld\n", sp - sizeof(struct thread_info)); dump_stack(); + gr_handle_kernel_exploit(); } #endif } diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index e785cc9e1..514488c4b 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -131,6 +131,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, kcb->kprobe_saved_msr = regs->msr; } +#ifdef CONFIG_KRETPROBES void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { @@ -139,6 +140,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, /* Replace the return addr with trampoline addr */ regs->link = (unsigned long)kretprobe_trampoline; } +#endif static int __kprobes kprobe_handler(struct pt_regs *regs) { @@ -547,6 +549,7 @@ int __init arch_init_kprobes(void) return register_kprobe(&trampoline_p); } +#ifdef CONFIG_KRETPROBES int __kprobes arch_trampoline_kprobe(struct kprobe *p) { if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline) @@ -554,3 +557,4 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p) return 0; } +#endif diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c index 5a7a78f12..c0e4207d1 100644 --- a/arch/powerpc/kernel/module_32.c +++ b/arch/powerpc/kernel/module_32.c @@ -158,7 +158,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr, me->arch.core_plt_section = i; } if (!me->arch.core_plt_section || !me->arch.init_plt_section) { - pr_err("Module doesn't contain .plt or .init.plt sections.\n"); + pr_err("Module $s doesn't contain .plt or .init.plt sections.\n", me->name); return -ENOEXEC; } @@ -188,11 +188,16 @@ static uint32_t do_plt_call(void *location, pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location); /* Init, or core PLT? */ - if (location >= mod->core_layout.base - && location < mod->core_layout.base + mod->core_layout.size) + if ((location >= mod->core_layout.base_rx && location < mod->core_layout.base_rx + mod->core_layout.size_rx) || + (location >= mod->core_layout.base_rw && location < mod->core_layout.base_rw + mod->core_layout.size_rw)) entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; - else + else if ((location >= mod->init_layout.base_rx && location < mod->init_layout.base_rx + mod->init_layout.size_rx) || + (location >= mod->init_layout.base_rw && location < mod->init_layout.base_rw + mod->init_layout.size_rw)) entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; + else { + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name); + return ~0UL; + } /* Find this entry, or if that fails, the next avail. entry */ while (entry->jump[0]) { @@ -301,7 +306,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, #ifdef CONFIG_DYNAMIC_FTRACE int module_finalize_ftrace(struct module *module, const Elf_Shdr *sechdrs) { - module->arch.tramp = do_plt_call(module->core_layout.base, + module->arch.tramp = do_plt_call(module->core_layout.base_rx, (unsigned long)ftrace_caller, sechdrs, module); if (!module->arch.tramp) diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 49a680d5a..2514bbcba 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1375,8 +1375,8 @@ void show_regs(struct pt_regs * regs) * Lookup NIP late so we have the best change of getting the * above info out without failing */ - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip); + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link); #endif show_stack(current, (unsigned long *) regs->gpr[1]); if (!user_mode(regs)) @@ -1897,10 +1897,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) newsp = stack[0]; ip = stack[STACK_FRAME_LR_SAVE]; if (!firstframe || ip != lr) { - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip); #ifdef CONFIG_FUNCTION_GRAPH_TRACER if ((ip == rth) && curr_frame >= 0) { - pr_cont(" (%pS)", + pr_cont(" (%pA)", (void *)current->ret_stack[curr_frame].ret); curr_frame--; } @@ -1920,7 +1920,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) struct pt_regs *regs = (struct pt_regs *) (sp + STACK_FRAME_OVERHEAD); lr = regs->link; - printk("--- interrupt: %lx at %pS\n LR = %pS\n", + printk("--- interrupt: %lx at %pA\n LR = %pA\n", regs->trap, (void *)regs->nip, (void *)lr); firstframe = 1; } @@ -1957,13 +1957,6 @@ void notrace __ppc64_runlatch_off(void) } #endif /* CONFIG_PPC64 */ -unsigned long arch_align_stack(unsigned long sp) -{ - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) - sp -= get_random_int() & ~PAGE_MASK; - return sp & ~0xf; -} - static inline unsigned long brk_rnd(void) { unsigned long rnd = 0; diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 5c8f12fe9..98047fb5c 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -3151,7 +3151,7 @@ static int do_seccomp(struct pt_regs *regs) * have already loaded -ENOSYS into r3, or seccomp has put * something else in r3 (via SECCOMP_RET_ERRNO/TRACE). */ - if (__secure_computing(NULL)) + if (secure_computing(NULL)) return -1; /* @@ -3169,6 +3169,10 @@ static int do_seccomp(struct pt_regs *regs) static inline int do_seccomp(struct pt_regs *regs) { return 0; } #endif /* CONFIG_SECCOMP */ +#ifdef CONFIG_GRKERNSEC_SETXID +extern void gr_delayed_cred_worker(void); +#endif + /** * do_syscall_trace_enter() - Do syscall tracing on kernel entry. * @regs: the pt_regs of the task to trace (current) @@ -3192,6 +3196,11 @@ long do_syscall_trace_enter(struct pt_regs *regs) { user_exit(); +#ifdef CONFIG_GRKERNSEC_SETXID + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) + gr_delayed_cred_worker(); +#endif + /* * The tracer may decide to abort the syscall, if so tracehook * will return !0. Note that the tracer may also just change @@ -3210,6 +3219,7 @@ long do_syscall_trace_enter(struct pt_regs *regs) if (regs->gpr[0] >= NR_syscalls) goto skip; + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) trace_sys_enter(regs, regs->gpr[0]); @@ -3241,6 +3251,11 @@ void do_syscall_trace_leave(struct pt_regs *regs) { int step; +#ifdef CONFIG_GRKERNSEC_SETXID + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) + gr_delayed_cred_worker(); +#endif + audit_syscall_exit(regs); if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 27aa913ac..dc0d9f53c 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -1006,7 +1006,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset, /* Save user registers on the stack */ frame = &rt_sf->uc.uc_mcontext; addr = frame; - if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) { + if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base != ~0UL) { sigret = 0; tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp; } else { diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 96698fdf9..fe57485e7 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -791,7 +791,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, tsk->thread.fp_state.fpscr = 0; /* Set up to return from userspace. */ - if (vdso64_rt_sigtramp && tsk->mm->context.vdso_base) { + if (vdso64_rt_sigtramp && tsk->mm->context.vdso_base != ~0UL) { regs->link = tsk->mm->context.vdso_base + vdso64_rt_sigtramp; } else { err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 023a46272..9d940854d 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -146,6 +147,8 @@ static unsigned long oops_begin(struct pt_regs *regs) } NOKPROBE_SYMBOL(oops_begin); +extern void gr_handle_kernel_exploit(void); + static void oops_end(unsigned long flags, struct pt_regs *regs, int signr) { @@ -195,6 +198,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); + + gr_handle_kernel_exploit(); + do_exit(signr); } NOKPROBE_SYMBOL(oops_end); @@ -1162,6 +1168,26 @@ void program_check_exception(struct pt_regs *regs) enum ctx_state prev_state = exception_enter(); unsigned int reason = get_reason(regs); +#ifdef CONFIG_PAX_REFCOUNT + unsigned int bkpt; + const struct exception_table_entry *entry; + + if (reason & REASON_ILLEGAL) { + /* Check if PaX bad instruction */ + if (!probe_kernel_address((const void *)regs->nip, bkpt) && bkpt == 0xc00b00) { + current->thread.trap_nr = 0; + pax_report_refcount_error(regs, NULL); + /* fixup_exception() for PowerPC does not exist, simulate its job */ + if ((entry = search_exception_tables(regs->nip)) != NULL) { + regs->nip = entry->fixup; + return; + } + /* fixup_exception() could not handle */ + goto bail; + } + } +#endif + /* We can now get here via a FP Unavailable exception if the core * has no FPU, in that case the reason flags will be 0 */ diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 4111d30ba..fa5e7be35 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -35,6 +35,7 @@ #include #include #include +#include #undef DEBUG @@ -180,7 +181,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) vdso_base = VDSO32_MBASE; #endif - current->mm->context.vdso_base = 0; + current->mm->context.vdso_base = ~0UL; /* vDSO has a problem and was disabled, just don't "enable" it for the * process @@ -201,7 +202,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) vdso_base = get_unmapped_area(NULL, vdso_base, (vdso_pages << PAGE_SHIFT) + ((VDSO_ALIGNMENT - 1) & PAGE_MASK), - 0, 0); + 0, MAP_PRIVATE | MAP_EXECUTABLE); if (IS_ERR_VALUE(vdso_base)) { rc = vdso_base; goto fail_mmapsem; diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c index 5eea6f3c1..5d1039679 100644 --- a/arch/powerpc/lib/usercopy_64.c +++ b/arch/powerpc/lib/usercopy_64.c @@ -9,22 +9,6 @@ #include #include -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) -{ - if (likely(access_ok(VERIFY_READ, from, n))) - n = __copy_from_user(to, from, n); - else - memset(to, 0, n); - return n; -} - -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) -{ - if (likely(access_ok(VERIFY_WRITE, to, n))) - n = __copy_to_user(to, from, n); - return n; -} - unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long n) { @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from, return n; } -EXPORT_SYMBOL(copy_from_user); -EXPORT_SYMBOL(copy_to_user); EXPORT_SYMBOL(copy_in_user); diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index d0b137d96..af92bde11 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -34,6 +34,10 @@ #include #include #include +#include +#include +#include +#include #include #include @@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs) } #endif +#ifdef CONFIG_PAX_PAGEEXEC +/* + * PaX: decide what to do with offenders (regs->nip = fault address) + * + * returns 1 when task should be killed + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + return 1; +} + +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 5; i++) { + unsigned int c; + if (get_user(c, (unsigned int __user *)pc+i)) + printk(KERN_CONT "???????? "); + else + printk(KERN_CONT "%08x ", c); + } + printk("\n"); +} +#endif + /* * Check whether the instruction at regs->nip is a store using * an update addressing form which will update r1. @@ -227,7 +258,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, * indicate errors in DSISR but can validly be set in SRR1. */ if (trap == 0x400) - error_code &= 0x48200000; + error_code &= 0x58200000; else is_write = error_code & DSISR_ISSTORE; #else @@ -384,12 +415,16 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, * "undefined". Of those that can be set, this is the only * one which seems bad. */ - if (error_code & 0x10000000) + if (error_code & DSISR_GUARDED) /* Guarded storage error. */ goto bad_area; #endif /* CONFIG_8xx */ if (is_exec) { +#ifdef CONFIG_PPC_STD_MMU + if (error_code & DSISR_GUARDED) + goto bad_area; +#endif /* * Allow execution from readable areas if the MMU does not * provide separate controls over reading and executing. @@ -484,6 +519,23 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, bad_area_nosemaphore: /* User mode accesses cause a SIGSEGV */ if (user_mode(regs)) { + +#ifdef CONFIG_PAX_PAGEEXEC + if (mm->pax_flags & MF_PAX_PAGEEXEC) { +#ifdef CONFIG_PPC_STD_MMU + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) { +#else + if (is_exec && regs->nip == address) { +#endif + switch (pax_handle_fetch_fault(regs)) { + } + + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]); + do_group_exit(SIGKILL); + } + } +#endif + _exception(SIGSEGV, regs, code, address); goto bail; } diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c index 2f1e44362..de888bf31 100644 --- a/arch/powerpc/mm/mmap.c +++ b/arch/powerpc/mm/mmap.c @@ -194,6 +194,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm) { unsigned long random_factor = 0UL; +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + if (current->flags & PF_RANDOMIZE) random_factor = arch_mmap_rnd(); @@ -205,9 +209,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm) */ if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + mm->get_unmapped_area = arch_get_unmapped_area; } else { mm->mmap_base = mmap_base(random_factor); + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base -= mm->delta_mmap + mm->delta_stack; +#endif + mm->get_unmapped_area = arch_get_unmapped_area_topdown; } } diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 2b2745890..7c7c59b1b 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, if ((mm->task_size - len) < addr) return 0; vma = find_vma(mm, addr); - return (!vma || (addr + len) <= vma->vm_start); + return check_heap_stack_gap(vma, addr, len, 0); } static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) @@ -276,6 +276,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm, info.align_offset = 0; addr = TASK_UNMAPPED_BASE; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + addr += mm->delta_mmap; +#endif + while (addr < TASK_SIZE) { info.low_limit = addr; if (!slice_scan_available(addr, available, 1, &addr)) @@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, if (fixed && addr > (mm->task_size - len)) return -ENOMEM; +#ifdef CONFIG_PAX_RANDMMAP + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP)) + addr = 0; +#endif + /* If hint, make sure it matches our alignment restrictions */ if (!fixed && addr) { addr = _ALIGN_UP(addr, 1ul << pshift); @@ -555,10 +566,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, } unsigned long arch_get_unmapped_area_topdown(struct file *filp, - const unsigned long addr0, - const unsigned long len, - const unsigned long pgoff, - const unsigned long flags) + unsigned long addr0, + unsigned long len, + unsigned long pgoff, + unsigned long flags) { return slice_get_unmapped_area(addr0, len, flags, current->mm->context.user_psize, 1); diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 06254467e..139a0aa77 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -263,9 +263,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) return VM_FAULT_NOPAGE; } -static int spufs_mem_mmap_access(struct vm_area_struct *vma, +static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma, unsigned long address, - void *buf, int len, int write) + void *buf, size_t len, int write) { struct spu_context *ctx = vma->vm_file->private_data; unsigned long offset = address - vma->vm_start; diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug index 26c5d5beb..a308c28f4 100644 --- a/arch/s390/Kconfig.debug +++ b/arch/s390/Kconfig.debug @@ -9,6 +9,7 @@ config S390_PTDUMP bool "Export kernel pagetable layout to userspace via debugfs" depends on DEBUG_KERNEL select DEBUG_FS + depends on !GRKERNSEC_KMEM ---help--- Say Y here if you want to show the kernel pagetable layout in a debugfs file. This information is only useful for kernel developers diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index d28cc2f5b..a93731242 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -342,4 +342,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) +#define atomic64_read_unchecked(v) atomic64_read(v) +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) +#define atomic64_inc_unchecked(v) atomic64_inc(v) +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) +#define atomic64_dec_unchecked(v) atomic64_dec(v) +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) + #endif /* __ARCH_S390_ATOMIC__ */ diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h index 05219a5e0..032f5f046 100644 --- a/arch/s390/include/asm/cache.h +++ b/arch/s390/include/asm/cache.h @@ -9,8 +9,10 @@ #ifndef __ARCH_S390_CACHE_H #define __ARCH_S390_CACHE_H -#define L1_CACHE_BYTES 256 +#include + #define L1_CACHE_SHIFT 8 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) #define NET_SKB_PAD 32 #define __read_mostly __section(.data..read_mostly) diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 1736c7d3c..261351cfd 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -167,6 +167,13 @@ extern unsigned int vdso_enabled; (STACK_TOP / 3 * 2) : \ (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1)) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL) + +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26) +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26) +#endif + /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. */ diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h index c4a93d632..4d2a9b416 100644 --- a/arch/s390/include/asm/exec.h +++ b/arch/s390/include/asm/exec.h @@ -7,6 +7,6 @@ #ifndef __ASM_EXEC_H #define __ASM_EXEC_H -extern unsigned long arch_align_stack(unsigned long sp); +#define arch_align_stack(x) ((x) & ~0xfUL) #endif /* __ASM_EXEC_H */ diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 52d7c8709..577d292e6 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size) __range_ok((unsigned long)(addr), (size)); \ }) +#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size)) #define access_ok(type, addr, size) __access_ok(addr, size) /* @@ -337,6 +338,10 @@ static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) { might_fault(); + + if ((long)n < 0) + return n; + return __copy_to_user(to, from, n); } @@ -360,10 +365,14 @@ copy_to_user(void __user *to, const void *from, unsigned long n) static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) { - unsigned int sz = __compiletime_object_size(to); + size_t sz = __compiletime_object_size(to); might_fault(); - if (unlikely(sz != -1 && sz < n)) { + + if ((long)n < 0) + return n; + + if (unlikely(sz != (size_t)-1 && sz < n)) { if (!__builtin_constant_p(n)) copy_user_overflow(sz, n); else diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index fdb40424a..b72ae722d 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -269,6 +269,7 @@ static void pop_kprobe(struct kprobe_ctlblk *kcb) } NOKPROBE_SYMBOL(pop_kprobe); +#ifdef CONFIG_KRETPROBES void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14]; @@ -277,6 +278,7 @@ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) regs->gprs[14] = (unsigned long) &kretprobe_trampoline; } NOKPROBE_SYMBOL(arch_prepare_kretprobe); +#endif static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p) { @@ -740,8 +742,10 @@ int __init arch_init_kprobes(void) return register_kprobe(&trampoline); } +#ifdef CONFIG_KRETPROBES int arch_trampoline_kprobe(struct kprobe *p) { return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline; } NOKPROBE_SYMBOL(arch_trampoline_kprobe); +#endif diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index fbc07891f..e7962a199 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -163,11 +163,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, /* Increase core size by size of got & plt and set start offsets for got and plt. */ - me->core_layout.size = ALIGN(me->core_layout.size, 4); - me->arch.got_offset = me->core_layout.size; - me->core_layout.size += me->arch.got_size; - me->arch.plt_offset = me->core_layout.size; - me->core_layout.size += me->arch.plt_size; + me->core_layout.size_rw = ALIGN(me->core_layout.size_rw, 4); + me->arch.got_offset = me->core_layout.size_rw; + me->core_layout.size_rw += me->arch.got_size; + me->arch.plt_offset = me->core_layout.size_rx; + me->core_layout.size_rx += me->arch.plt_size; return 0; } @@ -283,7 +283,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, if (info->got_initialized == 0) { Elf_Addr *gotent; - gotent = me->core_layout.base + me->arch.got_offset + + gotent = me->core_layout.base_rw + me->arch.got_offset + info->got_offset; *gotent = val; info->got_initialized = 1; @@ -306,7 +306,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, rc = apply_rela_bits(loc, val, 0, 64, 0); else if (r_type == R_390_GOTENT || r_type == R_390_GOTPLTENT) { - val += (Elf_Addr) me->core_layout.base - loc; + val += (Elf_Addr) me->core_layout.base_rw - loc; rc = apply_rela_bits(loc, val, 1, 32, 1); } break; @@ -319,7 +319,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ if (info->plt_initialized == 0) { unsigned int *ip; - ip = me->core_layout.base + me->arch.plt_offset + + ip = me->core_layout.base_rx + me->arch.plt_offset + info->plt_offset; ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ ip[1] = 0x100a0004; @@ -338,7 +338,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, val - loc + 0xffffUL < 0x1ffffeUL) || (r_type == R_390_PLT32DBL && val - loc + 0xffffffffULL < 0x1fffffffeULL))) - val = (Elf_Addr) me->core_layout.base + + val = (Elf_Addr) me->core_layout.base_rx + me->arch.plt_offset + info->plt_offset; val += rela->r_addend - loc; @@ -360,7 +360,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, case R_390_GOTOFF32: /* 32 bit offset to GOT. */ case R_390_GOTOFF64: /* 64 bit offset to GOT. */ val = val + rela->r_addend - - ((Elf_Addr) me->core_layout.base + me->arch.got_offset); + ((Elf_Addr) me->core_layout.base_rw + me->arch.got_offset); if (r_type == R_390_GOTOFF16) rc = apply_rela_bits(loc, val, 0, 16, 0); else if (r_type == R_390_GOTOFF32) @@ -370,7 +370,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, break; case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ - val = (Elf_Addr) me->core_layout.base + me->arch.got_offset + + val = (Elf_Addr) me->core_layout.base_rw + me->arch.got_offset + rela->r_addend - loc; if (r_type == R_390_GOTPC) rc = apply_rela_bits(loc, val, 1, 32, 0); diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index bba4fa74b..9c32b3ca1 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -217,13 +217,6 @@ unsigned long get_wchan(struct task_struct *p) return 0; } -unsigned long arch_align_stack(unsigned long sp) -{ - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) - sp -= get_random_int() & ~PAGE_MASK; - return sp & ~0xf; -} - static inline unsigned long brk_rnd(void) { return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT; diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index eb9df2822..7b686bab6 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -201,9 +201,9 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr, } static unsigned long -s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr, - const unsigned long len, const unsigned long pgoff, - const unsigned long flags) +s390_get_unmapped_area_topdown(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags) { struct mm_struct *mm = current->mm; unsigned long area; @@ -230,6 +230,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm) { unsigned long random_factor = 0UL; +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + if (current->flags & PF_RANDOMIZE) random_factor = arch_mmap_rnd(); @@ -239,9 +243,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm) */ if (mmap_is_legacy()) { mm->mmap_base = mmap_base_legacy(random_factor); + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + mm->get_unmapped_area = s390_get_unmapped_area; } else { mm->mmap_base = mmap_base(random_factor); + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base -= mm->delta_mmap + mm->delta_stack; +#endif + mm->get_unmapped_area = s390_get_unmapped_area_topdown; } } diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h index ae3d59f2d..f65f0751c 100644 --- a/arch/score/include/asm/cache.h +++ b/arch/score/include/asm/cache.h @@ -1,7 +1,9 @@ #ifndef _ASM_SCORE_CACHE_H #define _ASM_SCORE_CACHE_H +#include + #define L1_CACHE_SHIFT 4 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) #endif /* _ASM_SCORE_CACHE_H */ diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h index f9f3cd59c..58ff438bd 100644 --- a/arch/score/include/asm/exec.h +++ b/arch/score/include/asm/exec.h @@ -1,6 +1,6 @@ #ifndef _ASM_SCORE_EXEC_H #define _ASM_SCORE_EXEC_H -extern unsigned long arch_align_stack(unsigned long sp); +#define arch_align_stack(x) (x) #endif /* _ASM_SCORE_EXEC_H */ diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c index aae948070..93e40a460 100644 --- a/arch/score/kernel/process.c +++ b/arch/score/kernel/process.c @@ -114,8 +114,3 @@ unsigned long get_wchan(struct task_struct *task) return task_pt_regs(task)->cp0_epc; } - -unsigned long arch_align_stack(unsigned long sp) -{ - return sp; -} diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h index ef9e555aa..331bd29e7 100644 --- a/arch/sh/include/asm/cache.h +++ b/arch/sh/include/asm/cache.h @@ -9,10 +9,11 @@ #define __ASM_SH_CACHE_H #ifdef __KERNEL__ +#include #include #include -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) #define __read_mostly __attribute__((__section__(".data..read_mostly"))) diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c index 83acbf3f6..fa67491b6 100644 --- a/arch/sh/kernel/kprobes.c +++ b/arch/sh/kernel/kprobes.c @@ -72,6 +72,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) (unsigned long)p->addr + sizeof(kprobe_opcode_t)); } +#ifdef CONFIG_KRETPROBES int __kprobes arch_trampoline_kprobe(struct kprobe *p) { if (*p->addr == BREAKPOINT_INSTRUCTION) @@ -79,6 +80,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p) return 0; } +#endif /** * If an illegal slot instruction exception occurs for an address @@ -203,6 +205,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) } /* Called with kretprobe_lock held */ +#ifdef CONFIG_KRETPROBES void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { @@ -211,6 +214,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, /* Replace the return addr with trampoline addr */ regs->pr = (unsigned long)kretprobe_trampoline; } +#endif static int __kprobes kprobe_handler(struct pt_regs *regs) { diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index 677717780..d44b59233 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, struct mm_struct *mm = current->mm; struct vm_area_struct *vma; int do_colour_align; + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); struct vm_unmapped_area_info info; if (flags & MAP_FIXED) { @@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, if (filp || (flags & MAP_SHARED)) do_colour_align = 1; +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + if (addr) { if (do_colour_align) addr = COLOUR_ALIGN(addr, pgoff); @@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) return addr; } info.flags = 0; info.length = len; - info.low_limit = TASK_UNMAPPED_BASE; + info.low_limit = mm->mmap_base; info.high_limit = TASK_SIZE; info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0; info.align_offset = pgoff << PAGE_SHIFT; @@ -77,14 +81,15 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, } unsigned long -arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - const unsigned long len, const unsigned long pgoff, - const unsigned long flags) +arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr0, + unsigned long len, unsigned long pgoff, + unsigned long flags) { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long addr = addr0; int do_colour_align; + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); struct vm_unmapped_area_info info; if (flags & MAP_FIXED) { @@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, if (filp || (flags & MAP_SHARED)) do_colour_align = 1; +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + /* requesting a specific address */ if (addr) { if (do_colour_align) @@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) return addr; } @@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, VM_BUG_ON(addr != -ENOMEM); info.flags = 0; info.low_limit = TASK_UNMAPPED_BASE; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + info.low_limit += mm->delta_mmap; +#endif + info.high_limit = TASK_SIZE; addr = vm_unmapped_area(&info); } diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 165ecdd24..2bac5bf64 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -38,6 +38,7 @@ config SPARC select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER select MODULES_USE_ELF_RELA + select HAVE_GCC_PLUGINS select ODD_RT_SIGACTION select OLD_SIGSUSPEND select ARCH_HAS_SG_CHAIN diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index 24827a3f7..5dd45ac40 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -15,18 +15,38 @@ #define ATOMIC64_INIT(i) { (i) } #define atomic_read(v) READ_ONCE((v)->counter) +static inline int atomic_read_unchecked(const atomic_unchecked_t *v) +{ + return READ_ONCE(v->counter); +} #define atomic64_read(v) READ_ONCE((v)->counter) +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v) +{ + return READ_ONCE(v->counter); +} #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) +{ + WRITE_ONCE(v->counter, i); +} #define atomic64_set(v, i) WRITE_ONCE(((v)->counter), (i)) +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i) +{ + WRITE_ONCE(v->counter, i); +} + +#define __ATOMIC_OP(op, suffix) \ +void atomic_##op##suffix(int, atomic##suffix##_t *); \ +void atomic64_##op##suffix(long, atomic64##suffix##_t *); -#define ATOMIC_OP(op) \ -void atomic_##op(int, atomic_t *); \ -void atomic64_##op(long, atomic64_t *); +#define ATOMIC_OP(op) __ATOMIC_OP(op, ) __ATOMIC_OP(op, _unchecked) -#define ATOMIC_OP_RETURN(op) \ -int atomic_##op##_return(int, atomic_t *); \ -long atomic64_##op##_return(long, atomic64_t *); +#define __ATOMIC_OP_RETURN(op, suffix) \ +int atomic_##op##_return##suffix(int, atomic##suffix##_t *); \ +long atomic64_##op##_return##suffix(long, atomic64##suffix##_t *); + +#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, ) __ATOMIC_OP_RETURN(op, _unchecked) #define ATOMIC_FETCH_OP(op) \ int atomic_fetch_##op(int, atomic_t *); \ @@ -47,13 +67,23 @@ ATOMIC_OPS(xor) #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN +#undef __ATOMIC_OP_RETURN #undef ATOMIC_OP +#undef __ATOMIC_OP #define atomic_dec_return(v) atomic_sub_return(1, v) #define atomic64_dec_return(v) atomic64_sub_return(1, v) #define atomic_inc_return(v) atomic_add_return(1, v) +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v) +{ + return atomic_add_return_unchecked(1, v); +} #define atomic64_inc_return(v) atomic64_add_return(1, v) +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v) +{ + return atomic64_add_return_unchecked(1, v); +} /* * atomic_inc_and_test - increment and test @@ -64,6 +94,10 @@ ATOMIC_OPS(xor) * other cases. */ #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v) +{ + return atomic_inc_return_unchecked(v) == 0; +} #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) @@ -73,25 +107,60 @@ ATOMIC_OPS(xor) #define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0) #define atomic_inc(v) atomic_add(1, v) +static inline void atomic_inc_unchecked(atomic_unchecked_t *v) +{ + atomic_add_unchecked(1, v); +} #define atomic64_inc(v) atomic64_add(1, v) +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v) +{ + atomic64_add_unchecked(1, v); +} #define atomic_dec(v) atomic_sub(1, v) +static inline void atomic_dec_unchecked(atomic_unchecked_t *v) +{ + atomic_sub_unchecked(1, v); +} #define atomic64_dec(v) atomic64_sub(1, v) +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v) +{ + atomic64_sub_unchecked(1, v); +} #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0) #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0) #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new) +{ + return cmpxchg(&v->counter, old, new); +} #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new) +{ + return xchg(&v->counter, new); +} static inline int __atomic_add_unless(atomic_t *v, int a, int u) { - int c, old; + int c, old, new; c = atomic_read(v); for (;;) { - if (unlikely(c == (u))) + if (unlikely(c == u)) break; - old = atomic_cmpxchg((v), c, c + (a)); + + asm volatile("addcc %2, %0, %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "tvs %%icc, 6\n" +#endif + + : "=r" (new) + : "0" (c), "ir" (a) + : "cc"); + + old = atomic_cmpxchg(v, c, new); if (likely(old == c)) break; c = old; @@ -101,21 +170,42 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) #define atomic64_cmpxchg(v, o, n) \ ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, + long new) +{ + return cmpxchg(&(v->counter), old, new); +} + #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new) +{ + return xchg(&v->counter, new); +} static inline long atomic64_add_unless(atomic64_t *v, long a, long u) { - long c, old; + long c, old, new; c = atomic64_read(v); for (;;) { - if (unlikely(c == (u))) + if (unlikely(c == u)) break; - old = atomic64_cmpxchg((v), c, c + (a)); + + asm volatile("addcc %2, %0, %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "tvs %%xcc, 6\n" +#endif + + : "=r" (new) + : "0" (c), "ir" (a) + : "cc"); + + old = atomic64_cmpxchg(v, c, new); if (likely(old == c)) break; c = old; } - return c != (u); + return c != u; } #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h index 5bb6991b4..5c2132e96 100644 --- a/arch/sparc/include/asm/cache.h +++ b/arch/sparc/include/asm/cache.h @@ -7,10 +7,12 @@ #ifndef _SPARC_CACHE_H #define _SPARC_CACHE_H +#include + #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) #define L1_CACHE_SHIFT 5 -#define L1_CACHE_BYTES 32 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) #ifdef CONFIG_SPARC32 #define SMP_CACHE_BYTES_SHIFT 5 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h index a24e41fcd..47677ff75 100644 --- a/arch/sparc/include/asm/elf_32.h +++ b/arch/sparc/include/asm/elf_32.h @@ -114,6 +114,13 @@ typedef struct { #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE 0x10000UL + +#define PAX_DELTA_MMAP_LEN 16 +#define PAX_DELTA_STACK_LEN 16 +#endif + /* This yields a mask that user programs can use to figure out what instruction set this cpu supports. This can NOT be done in userspace on Sparc. */ diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h index 3f2d40387..4385ed999 100644 --- a/arch/sparc/include/asm/elf_64.h +++ b/arch/sparc/include/asm/elf_64.h @@ -190,6 +190,13 @@ typedef struct { #define ELF_ET_DYN_BASE 0x0000010000000000UL #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL) + +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28) +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29) +#endif + extern unsigned long sparc64_elf_hwcap; #define ELF_HWCAP sparc64_elf_hwcap diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h index 0346c7e62..c5c25b906 100644 --- a/arch/sparc/include/asm/pgalloc_32.h +++ b/arch/sparc/include/asm/pgalloc_32.h @@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp) } #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD) +#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD)) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h index 3529f1378..d98a28c42 100644 --- a/arch/sparc/include/asm/pgalloc_64.h +++ b/arch/sparc/include/asm/pgalloc_64.h @@ -21,6 +21,7 @@ static inline void __pgd_populate(pgd_t *pgd, pud_t *pud) } #define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD) +#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD)) static inline pgd_t *pgd_alloc(struct mm_struct *mm) { @@ -38,6 +39,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd) } #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD) +#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD)) static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) { diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h index 59ba6f620..451812819 100644 --- a/arch/sparc/include/asm/pgtable.h +++ b/arch/sparc/include/asm/pgtable.h @@ -5,4 +5,8 @@ #else #include #endif + +#define ktla_ktva(addr) (addr) +#define ktva_ktla(addr) (addr) + #endif diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h index ce6f56980..593b043b9 100644 --- a/arch/sparc/include/asm/pgtable_32.h +++ b/arch/sparc/include/asm/pgtable_32.h @@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail); #define PAGE_SHARED SRMMU_PAGE_SHARED #define PAGE_COPY SRMMU_PAGE_COPY #define PAGE_READONLY SRMMU_PAGE_RDONLY +#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC +#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC +#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC #define PAGE_KERNEL SRMMU_PAGE_KERNEL /* Top-level page directory - dummy used by init-mm. @@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd; /* xwr */ #define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY +#define __P001 PAGE_READONLY_NOEXEC +#define __P010 PAGE_COPY_NOEXEC +#define __P011 PAGE_COPY_NOEXEC #define __P100 PAGE_READONLY #define __P101 PAGE_READONLY #define __P110 PAGE_COPY #define __P111 PAGE_COPY #define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED +#define __S001 PAGE_READONLY_NOEXEC +#define __S010 PAGE_SHARED_NOEXEC +#define __S011 PAGE_SHARED_NOEXEC #define __S100 PAGE_READONLY #define __S101 PAGE_READONLY #define __S110 PAGE_SHARED diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h index ae51a111a..eadfd03fc 100644 --- a/arch/sparc/include/asm/pgtsrmmu.h +++ b/arch/sparc/include/asm/pgtsrmmu.h @@ -111,6 +111,11 @@ SRMMU_EXEC | SRMMU_REF) #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \ SRMMU_EXEC | SRMMU_REF) + +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF) +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF) +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF) + #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \ SRMMU_DIRTY | SRMMU_REF) diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h index 29d64b175..4272fe8d2 100644 --- a/arch/sparc/include/asm/setup.h +++ b/arch/sparc/include/asm/setup.h @@ -55,8 +55,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs); void handle_ld_nf(u32 insn, struct pt_regs *regs); /* init_64.c */ -extern atomic_t dcpage_flushes; -extern atomic_t dcpage_flushes_xcall; +extern atomic_unchecked_t dcpage_flushes; +extern atomic_unchecked_t dcpage_flushes_xcall; extern int sysctl_tsb_ratio; #endif diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 07c9f2e9b..352fff04f 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h @@ -103,7 +103,12 @@ static inline void arch_read_lock(arch_rwlock_t *lock) __asm__ __volatile__ ( "1: ldsw [%2], %0\n" " brlz,pn %0, 2f\n" -"4: add %0, 1, %1\n" +"4: addcc %0, 1, %1\n" + +#ifdef CONFIG_PAX_REFCOUNT +" tvs %%icc, 6\n" +#endif + " cas [%2], %0, %1\n" " cmp %0, %1\n" " bne,pn %%icc, 1b\n" @@ -116,7 +121,7 @@ static inline void arch_read_lock(arch_rwlock_t *lock) " .previous" : "=&r" (tmp1), "=&r" (tmp2) : "r" (lock) - : "memory"); + : "memory", "cc"); } static inline int arch_read_trylock(arch_rwlock_t *lock) @@ -127,7 +132,12 @@ static inline int arch_read_trylock(arch_rwlock_t *lock) "1: ldsw [%2], %0\n" " brlz,a,pn %0, 2f\n" " mov 0, %0\n" -" add %0, 1, %1\n" +" addcc %0, 1, %1\n" + +#ifdef CONFIG_PAX_REFCOUNT +" tvs %%icc, 6\n" +#endif + " cas [%2], %0, %1\n" " cmp %0, %1\n" " bne,pn %%icc, 1b\n" @@ -146,7 +156,12 @@ static inline void arch_read_unlock(arch_rwlock_t *lock) __asm__ __volatile__( "1: lduw [%2], %0\n" -" sub %0, 1, %1\n" +" subcc %0, 1, %1\n" + +#ifdef CONFIG_PAX_REFCOUNT +" tvs %%icc, 6\n" +#endif + " cas [%2], %0, %1\n" " cmp %0, %1\n" " bne,pn %%xcc, 1b\n" diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h index 229475f0d..2fca91633 100644 --- a/arch/sparc/include/asm/thread_info_32.h +++ b/arch/sparc/include/asm/thread_info_32.h @@ -48,6 +48,7 @@ struct thread_info { struct reg_window32 reg_window[NSWINS]; /* align for ldd! */ unsigned long rwbuf_stkptrs[NSWINS]; unsigned long w_saved; + unsigned long lowest_stack; }; /* diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h index 3d7b925f6..493ce8222 100644 --- a/arch/sparc/include/asm/thread_info_64.h +++ b/arch/sparc/include/asm/thread_info_64.h @@ -59,6 +59,8 @@ struct thread_info { struct pt_regs *kern_una_regs; unsigned int kern_una_insn; + unsigned long lowest_stack; + unsigned long fpregs[(7 * 256) / sizeof(unsigned long)] __attribute__ ((aligned(64))); }; @@ -180,12 +182,13 @@ register struct thread_info *current_thread_info_reg asm("g6"); #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ /* flag bit 4 is available */ #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */ -/* flag bit 6 is available */ +#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */ #define TIF_32BIT 7 /* 32-bit binary */ #define TIF_NOHZ 8 /* in adaptive nohz mode */ #define TIF_SECCOMP 9 /* secure computing */ #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */ #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */ + /* NOTE: Thread flags >= 12 should be ones we have no interest * in using in assembly, else we can't use the mask as * an immediate value in instructions such as andcc. @@ -205,12 +208,17 @@ register struct thread_info *current_thread_info_reg asm("g6"); #define _TIF_SYSCALL_AUDIT (1< #else diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index ea55f86d7..dbf15cfb5 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -47,6 +47,7 @@ #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; }) #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size))) +#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size)) #define access_ok(type, addr, size) \ ({ (void)(type); __access_ok((unsigned long)(addr), size); }) @@ -248,6 +249,9 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) { + if ((long)n < 0) + return n; + if (n && __access_ok((unsigned long) to, n)) { check_object_size(from, n, true); return __copy_user(to, (__force void __user *) from, n); @@ -257,12 +261,18 @@ static inline unsigned long copy_to_user(void __user *to, const void *from, unsi static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) { + if ((long)n < 0) + return n; + check_object_size(from, n, true); return __copy_user(to, (__force void __user *) from, n); } static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) { + if ((long)n < 0) + return n; + if (n && __access_ok((unsigned long) from, n)) { check_object_size(to, n, false); return __copy_user((__force void __user *) to, from, n); @@ -274,6 +284,9 @@ static inline unsigned long copy_from_user(void *to, const void __user *from, un static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) { + if ((long)n < 0) + return n; + return __copy_user((__force void __user *) to, from, n); } diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index 5373136c4..c528f7e8a 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -77,6 +78,11 @@ static inline int __access_ok(const void __user * addr, unsigned long size) return 1; } +static inline int access_ok_noprefault(int type, const void __user * addr, unsigned long size) +{ + return 1; +} + static inline int access_ok(int type, const void __user * addr, unsigned long size) { return 1; @@ -191,6 +197,9 @@ unsigned long __must_check ___copy_from_user(void *to, static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long size) { + if ((long)size < 0 || size > INT_MAX) + return size; + check_object_size(to, size, false); return ___copy_from_user(to, from, size); @@ -203,6 +212,9 @@ unsigned long __must_check ___copy_to_user(void __user *to, static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long size) { + if ((long)size < 0 || size > INT_MAX) + return size; + check_object_size(from, size, true); return ___copy_to_user(to, from, size); diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index fa3c02d41..c9a6309fb 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile @@ -4,7 +4,7 @@ # asflags-y := -ansi -ccflags-y := -Werror +#ccflags-y := -Werror extra-y := head_$(BITS).o diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c index b0377db12..1da3b5367 100644 --- a/arch/sparc/kernel/kprobes.c +++ b/arch/sparc/kernel/kprobes.c @@ -499,6 +499,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) * value kept in ri->ret_addr so we don't need to keep adjusting it * back and forth. */ +#ifdef CONFIG_KRETPROBES void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { @@ -508,6 +509,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, regs->u_regs[UREG_RETPC] = ((unsigned long)kretprobe_trampoline) - 8; } +#endif /* * Called when the probe at kretprobe trampoline is hit diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index b7780a5be..28315f023 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c @@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r) printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n", r->psr, r->pc, r->npc, r->y, print_tainted()); - printk("PC: <%pS>\n", (void *) r->pc); + printk("PC: <%pA>\n", (void *) r->pc); printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3], r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]); printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11], r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]); - printk("RPC: <%pS>\n", (void *) r->u_regs[15]); + printk("RPC: <%pA>\n", (void *) r->u_regs[15]); printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3], @@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) rw = (struct reg_window32 *) fp; pc = rw->ins[7]; printk("[%08lx : ", pc); - printk("%pS ] ", (void *) pc); + printk("%pA ] ", (void *) pc); fp = rw->ins[6]; } while (++count < 16); printk("\n"); diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 47ff5588e..2333c8a4c 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs) printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n", rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]); if (regs->tstate & TSTATE_PRIV) - printk("I7: <%pS>\n", (void *) rwk->ins[7]); + printk("I7: <%pA>\n", (void *) rwk->ins[7]); } void show_regs(struct pt_regs *regs) @@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs) printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate, regs->tpc, regs->tnpc, regs->y, print_tainted()); - printk("TPC: <%pS>\n", (void *) regs->tpc); + printk("TPC: <%pA>\n", (void *) regs->tpc); printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n", regs->u_regs[0], regs->u_regs[1], regs->u_regs[2], regs->u_regs[3]); @@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs) printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n", regs->u_regs[12], regs->u_regs[13], regs->u_regs[14], regs->u_regs[15]); - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]); + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]); show_regwindow(regs); show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]); } @@ -278,7 +278,7 @@ void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) ((tp && tp->task) ? tp->task->pid : -1)); if (gp->tstate & TSTATE_PRIV) { - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n", + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n", (void *) gp->tpc, (void *) gp->o7, (void *) gp->i7, diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c index 79cc0d1a4..46d6233bb 100644 --- a/arch/sparc/kernel/prom_common.c +++ b/arch/sparc/kernel/prom_common.c @@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf) unsigned int prom_early_allocated __initdata; -static struct of_pdt_ops prom_sparc_ops __initdata = { +static const struct of_pdt_ops prom_sparc_ops __initconst = { .nextprop = prom_common_nextprop, .getproplen = prom_getproplen, .getproperty = prom_getproperty, diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c index ac082dd8c..71709420f 100644 --- a/arch/sparc/kernel/ptrace_64.c +++ b/arch/sparc/kernel/ptrace_64.c @@ -1068,6 +1068,10 @@ long arch_ptrace(struct task_struct *child, long request, return ret; } +#ifdef CONFIG_GRKERNSEC_SETXID +extern void gr_delayed_cred_worker(void); +#endif + asmlinkage int syscall_trace_enter(struct pt_regs *regs) { int ret = 0; @@ -1078,6 +1082,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs) if (test_thread_flag(TIF_NOHZ)) user_exit(); +#ifdef CONFIG_GRKERNSEC_SETXID + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) + gr_delayed_cred_worker(); +#endif + if (test_thread_flag(TIF_SYSCALL_TRACE)) ret = tracehook_report_syscall_entry(regs); @@ -1096,6 +1105,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs) if (test_thread_flag(TIF_NOHZ)) user_exit(); +#ifdef CONFIG_GRKERNSEC_SETXID + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) + gr_delayed_cred_worker(); +#endif + audit_syscall_exit(regs); if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 8182f7caf..a5ab37f5a 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -895,7 +895,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) return; #ifdef CONFIG_DEBUG_DCFLUSH - atomic_inc(&dcpage_flushes); + atomic_inc_unchecked(&dcpage_flushes); #endif this_cpu = get_cpu(); @@ -919,7 +919,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) xcall_deliver(data0, __pa(pg_addr), (u64) pg_addr, cpumask_of(cpu)); #ifdef CONFIG_DEBUG_DCFLUSH - atomic_inc(&dcpage_flushes_xcall); + atomic_inc_unchecked(&dcpage_flushes_xcall); #endif } } @@ -938,7 +938,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) preempt_disable(); #ifdef CONFIG_DEBUG_DCFLUSH - atomic_inc(&dcpage_flushes); + atomic_inc_unchecked(&dcpage_flushes); #endif data0 = 0; pg_addr = page_address(page); @@ -955,7 +955,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) xcall_deliver(data0, __pa(pg_addr), (u64) pg_addr, cpu_online_mask); #ifdef CONFIG_DEBUG_DCFLUSH - atomic_inc(&dcpage_flushes_xcall); + atomic_inc_unchecked(&dcpage_flushes_xcall); #endif } __local_flush_dcache_page(page); diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c index 646988d4c..b88905f08 100644 --- a/arch/sparc/kernel/sys_sparc_32.c +++ b/arch/sparc/kernel/sys_sparc_32.c @@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi if (len > TASK_SIZE - PAGE_SIZE) return -ENOMEM; if (!addr) - addr = TASK_UNMAPPED_BASE; + addr = current->mm->mmap_base; info.flags = 0; info.length = len; diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index fe8b8ee8e..3f17a966f 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi struct vm_area_struct * vma; unsigned long task_size = TASK_SIZE; int do_color_align; + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); struct vm_unmapped_area_info info; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ - if ((flags & MAP_SHARED) && + if ((filp || (flags & MAP_SHARED)) && ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) return -EINVAL; return addr; @@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi if (filp || (flags & MAP_SHARED)) do_color_align = 1; +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + if (addr) { if (do_color_align) addr = COLOR_ALIGN(addr, pgoff); @@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) return addr; } info.flags = 0; info.length = len; - info.low_limit = TASK_UNMAPPED_BASE; + info.low_limit = mm->mmap_base; info.high_limit = min(task_size, VA_EXCLUDE_START); info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; info.align_offset = pgoff << PAGE_SHIFT; + info.threadstack_offset = offset; addr = vm_unmapped_area(&info); if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { VM_BUG_ON(addr != -ENOMEM); info.low_limit = VA_EXCLUDE_END; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + info.low_limit += mm->delta_mmap; +#endif + info.high_limit = task_size; addr = vm_unmapped_area(&info); } @@ -141,15 +152,16 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi } unsigned long -arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - const unsigned long len, const unsigned long pgoff, - const unsigned long flags) +arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr0, + unsigned long len, unsigned long pgoff, + unsigned long flags) { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long task_size = STACK_TOP32; unsigned long addr = addr0; int do_color_align; + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); struct vm_unmapped_area_info info; /* This should only ever run for 32-bit processes. */ @@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ - if ((flags & MAP_SHARED) && + if ((filp || (flags & MAP_SHARED)) && ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) return -EINVAL; return addr; @@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, if (filp || (flags & MAP_SHARED)) do_color_align = 1; +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + /* requesting a specific address */ if (addr) { if (do_color_align) @@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) return addr; } @@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, info.high_limit = mm->mmap_base; info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; info.align_offset = pgoff << PAGE_SHIFT; + info.threadstack_offset = offset; addr = vm_unmapped_area(&info); /* @@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, VM_BUG_ON(addr != -ENOMEM); info.flags = 0; info.low_limit = TASK_UNMAPPED_BASE; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + info.low_limit += mm->delta_mmap; +#endif + info.high_limit = STACK_TOP32; addr = vm_unmapped_area(&info); } @@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u EXPORT_SYMBOL(get_fb_unmapped_area); /* Essentially the same as PowerPC. */ -static unsigned long mmap_rnd(void) +static unsigned long mmap_rnd(struct mm_struct *mm) { unsigned long rnd = 0UL; +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + if (current->flags & PF_RANDOMIZE) { unsigned long val = get_random_long(); if (test_thread_flag(TIF_32BIT)) @@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void) void arch_pick_mmap_layout(struct mm_struct *mm) { - unsigned long random_factor = mmap_rnd(); + unsigned long random_factor = mmap_rnd(mm); unsigned long gap; /* @@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm) gap == RLIM_INFINITY || sysctl_legacy_va_layout) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + mm->get_unmapped_area = arch_get_unmapped_area; } else { /* We know it's 32-bit */ @@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm) gap = (task_size / 6 * 5); mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base -= mm->delta_mmap + mm->delta_stack; +#endif + mm->get_unmapped_area = arch_get_unmapped_area_topdown; } } diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S index c4a1b5c40..c5e0ef330 100644 --- a/arch/sparc/kernel/syscalls.S +++ b/arch/sparc/kernel/syscalls.S @@ -62,7 +62,7 @@ sys32_rt_sigreturn: #endif .align 32 1: ldx [%g6 + TI_FLAGS], %l5 - andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0 + andcc %l5, _TIF_WORK_SYSCALL, %g0 be,pt %icc, rtrap nop call syscall_trace_leave @@ -230,7 +230,7 @@ linux_sparc_syscall32: srl %i3, 0, %o3 ! IEU0 srl %i2, 0, %o2 ! IEU0 Group - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0 + andcc %l0, _TIF_WORK_SYSCALL, %g0 bne,pn %icc, linux_syscall_trace32 ! CTI mov %i0, %l5 ! IEU1 5: call %l7 ! CTI Group brk forced @@ -254,7 +254,7 @@ linux_sparc_syscall: mov %i3, %o3 ! IEU1 mov %i4, %o4 ! IEU0 Group - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0 + andcc %l0, _TIF_WORK_SYSCALL, %g0 bne,pn %icc, linux_syscall_trace ! CTI Group mov %i0, %l5 ! IEU0 2: call %l7 ! CTI Group brk forced @@ -269,7 +269,7 @@ ret_sys_call: cmp %o0, -ERESTART_RESTARTBLOCK bgeu,pn %xcc, 1f - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0 + andcc %l0, _TIF_WORK_SYSCALL, %g0 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc 2: diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c index 4f21df7d4..0a374da5c 100644 --- a/arch/sparc/kernel/traps_32.c +++ b/arch/sparc/kernel/traps_32.c @@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc) #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t") #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t") +extern void gr_handle_kernel_exploit(void); + void __noreturn die_if_kernel(char *str, struct pt_regs *regs) { static int die_counter; @@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs) count++ < 30 && (((unsigned long) rw) >= PAGE_OFFSET) && !(((unsigned long) rw) & 0x7)) { - printk("Caller[%08lx]: %pS\n", rw->ins[7], + printk("Caller[%08lx]: %pA\n", rw->ins[7], (void *) rw->ins[7]); rw = (struct reg_window32 *)rw->ins[6]; } } printk("Instruction DUMP:"); instruction_dump ((unsigned long *) regs->pc); - if(regs->psr & PSR_PS) + if(regs->psr & PSR_PS) { + gr_handle_kernel_exploit(); do_exit(SIGKILL); + } do_exit(SIGSEGV); } diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index 4094a51b1..4a360da34 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p) i + 1, p->trapstack[i].tstate, p->trapstack[i].tpc, p->trapstack[i].tnpc, p->trapstack[i].tt); - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc); + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc); } } @@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl) lvl -= 0x100; if (regs->tstate & TSTATE_PRIV) { + +#ifdef CONFIG_PAX_REFCOUNT + if (lvl == 6) + pax_report_refcount_error(regs, NULL); +#endif + sprintf(buffer, "Kernel bad sw trap %lx", lvl); die_if_kernel(buffer, regs); } @@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl) void bad_trap_tl1(struct pt_regs *regs, long lvl) { char buffer[32]; - + if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs, 0, lvl, SIGTRAP) == NOTIFY_STOP) return; +#ifdef CONFIG_PAX_REFCOUNT + if (lvl == 6) + pax_report_refcount_error(regs, NULL); +#endif + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); sprintf (buffer, "Bad trap %lx at tl>0", lvl); @@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate); printk("%s" "ERROR(%d): ", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id()); - printk("TPC<%pS>\n", (void *) regs->tpc); + printk("TPC<%pA>\n", (void *) regs->tpc); printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT, @@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs) smp_processor_id(), (type & 0x1) ? 'I' : 'D', regs->tpc); - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc); + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc); panic("Irrecoverable Cheetah+ parity error."); } @@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs) smp_processor_id(), (type & 0x1) ? 'I' : 'D', regs->tpc); - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc); + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc); } struct sun4v_error_entry { @@ -1839,8 +1850,8 @@ struct sun4v_error_entry { /*0x38*/u64 reserved_5; }; -static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0); -static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0); +static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0); +static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0); static const char *sun4v_err_type_to_str(u8 type) { @@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs) } static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, - int cpu, const char *pfx, atomic_t *ocnt) + int cpu, const char *pfx, atomic_unchecked_t *ocnt) { u64 *raw_ptr = (u64 *) ent; u32 attrs; @@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, show_regs(regs); - if ((cnt = atomic_read(ocnt)) != 0) { - atomic_set(ocnt, 0); + if ((cnt = atomic_read_unchecked(ocnt)) != 0) { + atomic_set_unchecked(ocnt, 0); wmb(); printk("%s: Queue overflowed %d times.\n", pfx, cnt); @@ -2048,7 +2059,7 @@ void sun4v_resum_error(struct pt_regs *regs, unsigned long offset) */ void sun4v_resum_overflow(struct pt_regs *regs) { - atomic_inc(&sun4v_resum_oflow_cnt); + atomic_inc_unchecked(&sun4v_resum_oflow_cnt); } /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate. @@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs) /* XXX Actually even this can make not that much sense. Perhaps * XXX we should just pull the plug and panic directly from here? */ - atomic_inc(&sun4v_nonresum_oflow_cnt); + atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt); } static void sun4v_tlb_error(struct pt_regs *regs) @@ -2120,9 +2131,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl) printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n", regs->tpc, tl); - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc); + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc); printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]); - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n", + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n", (void *) regs->u_regs[UREG_I7]); printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] " "pte[%lx] error[%lx]\n", @@ -2143,9 +2154,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl) printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n", regs->tpc, tl); - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc); + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc); printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]); - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n", + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n", (void *) regs->u_regs[UREG_I7]); printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] " "pte[%lx] error[%lx]\n", @@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) fp = (unsigned long)sf->fp + STACK_BIAS; } - printk(" [%016lx] %pS\n", pc, (void *) pc); + printk(" [%016lx] %pA\n", pc, (void *) pc); #ifdef CONFIG_FUNCTION_GRAPH_TRACER if ((pc + 8UL) == (unsigned long) &return_to_handler) { int index = tsk->curr_ret_stack; if (tsk->ret_stack && index >= graph) { pc = tsk->ret_stack[index - graph].ret; - printk(" [%016lx] %pS\n", pc, (void *) pc); + printk(" [%016lx] %pA\n", pc, (void *) pc); graph++; } } @@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw) return (struct reg_window *) (fp + STACK_BIAS); } +extern void gr_handle_kernel_exploit(void); + void __noreturn die_if_kernel(char *str, struct pt_regs *regs) { static int die_counter; @@ -2414,7 +2427,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs) while (rw && count++ < 30 && kstack_valid(tp, (unsigned long) rw)) { - printk("Caller[%016lx]: %pS\n", rw->ins[7], + printk("Caller[%016lx]: %pA\n", rw->ins[7], (void *) rw->ins[7]); rw = kernel_stack_up(rw); @@ -2429,8 +2442,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs) } if (panic_on_oops) panic("Fatal exception"); - if (regs->tstate & TSTATE_PRIV) + if (regs->tstate & TSTATE_PRIV) { + gr_handle_kernel_exploit(); do_exit(SIGKILL); + } do_exit(SIGSEGV); } EXPORT_SYMBOL(die_if_kernel); diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c index 52c00d90d..6f8aa4e3a 100644 --- a/arch/sparc/kernel/unaligned_64.c +++ b/arch/sparc/kernel/unaligned_64.c @@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs) static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); if (__ratelimit(&ratelimit)) { - printk("Kernel unaligned access at TPC[%lx] %pS\n", + printk("Kernel unaligned access at TPC[%lx] %pA\n", regs->tpc, (void *) regs->tpc); } } diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile index 69912d2f8..6c0c227e1 100644 --- a/arch/sparc/lib/Makefile +++ b/arch/sparc/lib/Makefile @@ -2,7 +2,7 @@ # asflags-y := -ansi -DST_DIV0=0x02 -ccflags-y := -Werror +#ccflags-y := -Werror lib-$(CONFIG_SPARC32) += ashrdi3.o lib-$(CONFIG_SPARC32) += memcpy.o memset.o diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S index 1c6a1bde5..93e969872 100644 --- a/arch/sparc/lib/atomic_64.S +++ b/arch/sparc/lib/atomic_64.S @@ -17,11 +17,22 @@ * barriers. */ -#define ATOMIC_OP(op) \ -ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ +#ifdef CONFIG_PAX_REFCOUNT +#define __REFCOUNT_OP(op) op##cc +#define __OVERFLOW_IOP tvs %icc, 6; +#define __OVERFLOW_XOP tvs %xcc, 6; +#else +#define __REFCOUNT_OP(op) op +#define __OVERFLOW_IOP +#define __OVERFLOW_XOP +#endif + +#define __ATOMIC_OP(op, suffix, asm_op, post_op) \ +ENTRY(atomic_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \ BACKOFF_SETUP(%o2); \ 1: lduw [%o1], %g1; \ - op %g1, %o0, %g7; \ + asm_op %g1, %o0, %g7; \ + post_op \ cas [%o1], %g1, %g7; \ cmp %g1, %g7; \ bne,pn %icc, BACKOFF_LABEL(2f, 1b); \ @@ -29,14 +40,18 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ retl; \ nop; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ -ENDPROC(atomic_##op); \ -EXPORT_SYMBOL(atomic_##op); +ENDPROC(atomic_##op##suffix); \ +EXPORT_SYMBOL(atomic_##op##suffix); + +#define ATOMIC_OP(op) __ATOMIC_OP(op, , op, ) \ + __ATOMIC_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP) -#define ATOMIC_OP_RETURN(op) \ -ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ +#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op) \ +ENTRY(atomic_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\ BACKOFF_SETUP(%o2); \ 1: lduw [%o1], %g1; \ - op %g1, %o0, %g7; \ + asm_op %g1, %o0, %g7; \ + post_op \ cas [%o1], %g1, %g7; \ cmp %g1, %g7; \ bne,pn %icc, BACKOFF_LABEL(2f, 1b); \ @@ -44,8 +59,11 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ retl; \ sra %g1, 0, %o0; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ -ENDPROC(atomic_##op##_return); \ -EXPORT_SYMBOL(atomic_##op##_return); +ENDPROC(atomic_##op##_return##suffix); \ +EXPORT_SYMBOL(atomic_##op##_return##suffix) + +#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, , op, ) \ + __ATOMIC_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP) #define ATOMIC_FETCH_OP(op) \ ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ @@ -77,13 +95,16 @@ ATOMIC_OPS(xor) #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN +#undef __ATOMIC_OP_RETURN #undef ATOMIC_OP +#undef __ATOMIC_OP -#define ATOMIC64_OP(op) \ -ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ +#define __ATOMIC64_OP(op, suffix, asm_op, post_op) \ +ENTRY(atomic64_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \ BACKOFF_SETUP(%o2); \ 1: ldx [%o1], %g1; \ - op %g1, %o0, %g7; \ + asm_op %g1, %o0, %g7; \ + post_op \ casx [%o1], %g1, %g7; \ cmp %g1, %g7; \ bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \ @@ -91,14 +112,18 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ retl; \ nop; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ -ENDPROC(atomic64_##op); \ -EXPORT_SYMBOL(atomic64_##op); +ENDPROC(atomic64_##op##suffix); \ +EXPORT_SYMBOL(atomic64_##op##suffix); + +#define ATOMIC64_OP(op) __ATOMIC64_OP(op, , op, ) \ + __ATOMIC64_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP) -#define ATOMIC64_OP_RETURN(op) \ -ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ +#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op) \ +ENTRY(atomic64_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\ BACKOFF_SETUP(%o2); \ 1: ldx [%o1], %g1; \ - op %g1, %o0, %g7; \ + asm_op %g1, %o0, %g7; \ + post_op \ casx [%o1], %g1, %g7; \ cmp %g1, %g7; \ bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \ @@ -106,8 +131,11 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ retl; \ op %g1, %o0, %o0; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ -ENDPROC(atomic64_##op##_return); \ -EXPORT_SYMBOL(atomic64_##op##_return); +ENDPROC(atomic64_##op##_return##suffix); \ +EXPORT_SYMBOL(atomic64_##op##_return##suffix); + +#define ATOMIC64_OP_RETURN(op) __ATOMIC64_OP_RETURN(op, , op, ) \ + __ATOMIC64_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP) #define ATOMIC64_FETCH_OP(op) \ ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ @@ -139,7 +167,12 @@ ATOMIC64_OPS(xor) #undef ATOMIC64_OPS #undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP_RETURN +#undef __ATOMIC64_OP_RETURN #undef ATOMIC64_OP +#undef __ATOMIC64_OP +#undef __OVERFLOW_XOP +#undef __OVERFLOW_IOP +#undef __REFCOUNT_OP ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */ BACKOFF_SETUP(%o2) diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile index 30c3eccfd..736f015d4 100644 --- a/arch/sparc/mm/Makefile +++ b/arch/sparc/mm/Makefile @@ -2,7 +2,7 @@ # asflags-y := -ansi -ccflags-y := -Werror +#ccflags-y := -Werror obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o obj-y += fault_$(BITS).o diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 4714061d6..bad7f9ada 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -22,6 +22,9 @@ #include #include #include +#include +#include +#include #include #include @@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault) return safe_compute_effective_address(regs, insn); } +#ifdef CONFIG_PAX_PAGEEXEC +#ifdef CONFIG_PAX_DLRESOLVE +static void pax_emuplt_close(struct vm_area_struct *vma) +{ + vma->vm_mm->call_dl_resolve = 0UL; +} + +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + unsigned int *kaddr; + + vmf->page = alloc_page(GFP_HIGHUSER); + if (!vmf->page) + return VM_FAULT_OOM; + + kaddr = kmap(vmf->page); + memset(kaddr, 0, PAGE_SIZE); + kaddr[0] = 0x9DE3BFA8U; /* save */ + flush_dcache_page(vmf->page); + kunmap(vmf->page); + return VM_FAULT_MAJOR; +} + +static const struct vm_operations_struct pax_vm_ops = { + .close = pax_emuplt_close, + .fault = pax_emuplt_fault +}; + +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr) +{ + int ret; + + INIT_LIST_HEAD(&vma->anon_vma_chain); + vma->vm_mm = current->mm; + vma->vm_start = addr; + vma->vm_end = addr + PAGE_SIZE; + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + vma->vm_ops = &pax_vm_ops; + + ret = insert_vm_struct(current->mm, vma); + if (ret) + return ret; + + ++current->mm->total_vm; + return 0; +} +#endif + +/* + * PaX: decide what to do with offenders (regs->pc = fault address) + * + * returns 1 when task should be killed + * 2 when patched PLT trampoline was detected + * 3 when unpatched PLT trampoline was detected + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + +#ifdef CONFIG_PAX_EMUPLT + int err; + + do { /* PaX: patched PLT emulation #1 */ + unsigned int sethi1, sethi2, jmpl; + + err = get_user(sethi1, (unsigned int *)regs->pc); + err |= get_user(sethi2, (unsigned int *)(regs->pc+4)); + err |= get_user(jmpl, (unsigned int *)(regs->pc+8)); + + if (err) + break; + + if ((sethi1 & 0xFFC00000U) == 0x03000000U && + (sethi2 & 0xFFC00000U) == 0x03000000U && + (jmpl & 0xFFFFE000U) == 0x81C06000U) + { + unsigned int addr; + + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10; + addr = regs->u_regs[UREG_G1]; + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); + regs->pc = addr; + regs->npc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: patched PLT emulation #2 */ + unsigned int ba; + + err = get_user(ba, (unsigned int *)regs->pc); + + if (err) + break; + + if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) { + unsigned int addr; + + if ((ba & 0xFFC00000U) == 0x30800000U) + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2); + else + addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2); + regs->pc = addr; + regs->npc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: patched PLT emulation #3 */ + unsigned int sethi, bajmpl, nop; + + err = get_user(sethi, (unsigned int *)regs->pc); + err |= get_user(bajmpl, (unsigned int *)(regs->pc+4)); + err |= get_user(nop, (unsigned int *)(regs->pc+8)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) && + nop == 0x01000000U) + { + unsigned int addr; + + addr = (sethi & 0x003FFFFFU) << 10; + regs->u_regs[UREG_G1] = addr; + if ((bajmpl & 0xFFFFE000U) == 0x81C06000U) + addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); + else + addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2); + regs->pc = addr; + regs->npc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: unpatched PLT emulation step 1 */ + unsigned int sethi, ba, nop; + + err = get_user(sethi, (unsigned int *)regs->pc); + err |= get_user(ba, (unsigned int *)(regs->pc+4)); + err |= get_user(nop, (unsigned int *)(regs->pc+8)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) && + nop == 0x01000000U) + { + unsigned int addr, save, call; + + if ((ba & 0xFFC00000U) == 0x30800000U) + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2); + else + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2); + + err = get_user(save, (unsigned int *)addr); + err |= get_user(call, (unsigned int *)(addr+4)); + err |= get_user(nop, (unsigned int *)(addr+8)); + if (err) + break; + +#ifdef CONFIG_PAX_DLRESOLVE + if (save == 0x9DE3BFA8U && + (call & 0xC0000000U) == 0x40000000U && + nop == 0x01000000U) + { + struct vm_area_struct *vma; + unsigned long call_dl_resolve; + + down_read(¤t->mm->mmap_sem); + call_dl_resolve = current->mm->call_dl_resolve; + up_read(¤t->mm->mmap_sem); + if (likely(call_dl_resolve)) + goto emulate; + + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + + down_write(¤t->mm->mmap_sem); + if (current->mm->call_dl_resolve) { + call_dl_resolve = current->mm->call_dl_resolve; + up_write(¤t->mm->mmap_sem); + if (vma) + kmem_cache_free(vm_area_cachep, vma); + goto emulate; + } + + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE); + if (!vma || (call_dl_resolve & ~PAGE_MASK)) { + up_write(¤t->mm->mmap_sem); + if (vma) + kmem_cache_free(vm_area_cachep, vma); + return 1; + } + + if (pax_insert_vma(vma, call_dl_resolve)) { + up_write(¤t->mm->mmap_sem); + kmem_cache_free(vm_area_cachep, vma); + return 1; + } + + current->mm->call_dl_resolve = call_dl_resolve; + up_write(¤t->mm->mmap_sem); + +emulate: + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; + regs->pc = call_dl_resolve; + regs->npc = addr+4; + return 3; + } +#endif + + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */ + if ((save & 0xFFC00000U) == 0x05000000U && + (call & 0xFFFFE000U) == 0x85C0A000U && + nop == 0x01000000U) + { + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; + regs->u_regs[UREG_G2] = addr + 4; + addr = (save & 0x003FFFFFU) << 10; + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); + regs->pc = addr; + regs->npc = addr+4; + return 3; + } + } + } while (0); + + do { /* PaX: unpatched PLT emulation step 2 */ + unsigned int save, call, nop; + + err = get_user(save, (unsigned int *)(regs->pc-4)); + err |= get_user(call, (unsigned int *)regs->pc); + err |= get_user(nop, (unsigned int *)(regs->pc+4)); + if (err) + break; + + if (save == 0x9DE3BFA8U && + (call & 0xC0000000U) == 0x40000000U && + nop == 0x01000000U) + { + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2); + + regs->u_regs[UREG_RETPC] = regs->pc; + regs->pc = dl_resolve; + regs->npc = dl_resolve+4; + return 3; + } + } while (0); +#endif + + return 1; +} + +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 8; i++) { + unsigned int c; + if (get_user(c, (unsigned int *)pc+i)) + printk(KERN_CONT "???????? "); + else + printk(KERN_CONT "%08x ", c); + } + printk("\n"); +} +#endif + static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs, int text_fault) { @@ -226,6 +500,24 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, if (!(vma->vm_flags & VM_WRITE)) goto bad_area; } else { + +#ifdef CONFIG_PAX_PAGEEXEC + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) { + up_read(&mm->mmap_sem); + switch (pax_handle_fetch_fault(regs)) { + +#ifdef CONFIG_PAX_EMUPLT + case 2: + case 3: + return; +#endif + + } + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]); + do_group_exit(SIGKILL); + } +#endif + /* Allow reads even for write-only mappings */ if (!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 643c149a3..845c11351 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -23,6 +23,9 @@ #include #include #include +#include +#include +#include #include #include @@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", regs->tpc); printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]); - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]); + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]); printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr); dump_stack(); unhandled_fault(regs->tpc, current, regs); @@ -276,6 +279,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs) show_regs(regs); } +#ifdef CONFIG_PAX_PAGEEXEC +#ifdef CONFIG_PAX_DLRESOLVE +static void pax_emuplt_close(struct vm_area_struct *vma) +{ + vma->vm_mm->call_dl_resolve = 0UL; +} + +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + unsigned int *kaddr; + + vmf->page = alloc_page(GFP_HIGHUSER); + if (!vmf->page) + return VM_FAULT_OOM; + + kaddr = kmap(vmf->page); + memset(kaddr, 0, PAGE_SIZE); + kaddr[0] = 0x9DE3BFA8U; /* save */ + flush_dcache_page(vmf->page); + kunmap(vmf->page); + return VM_FAULT_MAJOR; +} + +static const struct vm_operations_struct pax_vm_ops = { + .close = pax_emuplt_close, + .fault = pax_emuplt_fault +}; + +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr) +{ + int ret; + + INIT_LIST_HEAD(&vma->anon_vma_chain); + vma->vm_mm = current->mm; + vma->vm_start = addr; + vma->vm_end = addr + PAGE_SIZE; + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + vma->vm_ops = &pax_vm_ops; + + ret = insert_vm_struct(current->mm, vma); + if (ret) + return ret; + + ++current->mm->total_vm; + return 0; +} +#endif + +/* + * PaX: decide what to do with offenders (regs->tpc = fault address) + * + * returns 1 when task should be killed + * 2 when patched PLT trampoline was detected + * 3 when unpatched PLT trampoline was detected + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + +#ifdef CONFIG_PAX_EMUPLT + int err; + + do { /* PaX: patched PLT emulation #1 */ + unsigned int sethi1, sethi2, jmpl; + + err = get_user(sethi1, (unsigned int *)regs->tpc); + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4)); + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8)); + + if (err) + break; + + if ((sethi1 & 0xFFC00000U) == 0x03000000U && + (sethi2 & 0xFFC00000U) == 0x03000000U && + (jmpl & 0xFFFFE000U) == 0x81C06000U) + { + unsigned long addr; + + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10; + addr = regs->u_regs[UREG_G1]; + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); + + if (test_thread_flag(TIF_32BIT)) + addr &= 0xFFFFFFFFUL; + + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: patched PLT emulation #2 */ + unsigned int ba; + + err = get_user(ba, (unsigned int *)regs->tpc); + + if (err) + break; + + if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) { + unsigned long addr; + + if ((ba & 0xFFC00000U) == 0x30800000U) + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2); + else + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); + + if (test_thread_flag(TIF_32BIT)) + addr &= 0xFFFFFFFFUL; + + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: patched PLT emulation #3 */ + unsigned int sethi, bajmpl, nop; + + err = get_user(sethi, (unsigned int *)regs->tpc); + err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4)); + err |= get_user(nop, (unsigned int *)(regs->tpc+8)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) && + nop == 0x01000000U) + { + unsigned long addr; + + addr = (sethi & 0x003FFFFFU) << 10; + regs->u_regs[UREG_G1] = addr; + if ((bajmpl & 0xFFFFE000U) == 0x81C06000U) + addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); + else + addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); + + if (test_thread_flag(TIF_32BIT)) + addr &= 0xFFFFFFFFUL; + + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: patched PLT emulation #4 */ + unsigned int sethi, mov1, call, mov2; + + err = get_user(sethi, (unsigned int *)regs->tpc); + err |= get_user(mov1, (unsigned int *)(regs->tpc+4)); + err |= get_user(call, (unsigned int *)(regs->tpc+8)); + err |= get_user(mov2, (unsigned int *)(regs->tpc+12)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + mov1 == 0x8210000FU && + (call & 0xC0000000U) == 0x40000000U && + mov2 == 0x9E100001U) + { + unsigned long addr; + + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC]; + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2); + + if (test_thread_flag(TIF_32BIT)) + addr &= 0xFFFFFFFFUL; + + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: patched PLT emulation #5 */ + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop; + + err = get_user(sethi, (unsigned int *)regs->tpc); + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4)); + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8)); + err |= get_user(or1, (unsigned int *)(regs->tpc+12)); + err |= get_user(or2, (unsigned int *)(regs->tpc+16)); + err |= get_user(sllx, (unsigned int *)(regs->tpc+20)); + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24)); + err |= get_user(nop, (unsigned int *)(regs->tpc+28)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + (sethi1 & 0xFFC00000U) == 0x03000000U && + (sethi2 & 0xFFC00000U) == 0x0B000000U && + (or1 & 0xFFFFE000U) == 0x82106000U && + (or2 & 0xFFFFE000U) == 0x8A116000U && + sllx == 0x83287020U && + jmpl == 0x81C04005U && + nop == 0x01000000U) + { + unsigned long addr; + + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU); + regs->u_regs[UREG_G1] <<= 32; + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU); + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5]; + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: patched PLT emulation #6 */ + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop; + + err = get_user(sethi, (unsigned int *)regs->tpc); + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4)); + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8)); + err |= get_user(sllx, (unsigned int *)(regs->tpc+12)); + err |= get_user(or, (unsigned int *)(regs->tpc+16)); + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20)); + err |= get_user(nop, (unsigned int *)(regs->tpc+24)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + (sethi1 & 0xFFC00000U) == 0x03000000U && + (sethi2 & 0xFFC00000U) == 0x0B000000U && + sllx == 0x83287020U && + (or & 0xFFFFE000U) == 0x8A116000U && + jmpl == 0x81C04005U && + nop == 0x01000000U) + { + unsigned long addr; + + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10; + regs->u_regs[UREG_G1] <<= 32; + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU); + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5]; + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: unpatched PLT emulation step 1 */ + unsigned int sethi, ba, nop; + + err = get_user(sethi, (unsigned int *)regs->tpc); + err |= get_user(ba, (unsigned int *)(regs->tpc+4)); + err |= get_user(nop, (unsigned int *)(regs->tpc+8)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) && + nop == 0x01000000U) + { + unsigned long addr; + unsigned int save, call; + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl; + + if ((ba & 0xFFC00000U) == 0x30800000U) + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2); + else + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); + + if (test_thread_flag(TIF_32BIT)) + addr &= 0xFFFFFFFFUL; + + err = get_user(save, (unsigned int *)addr); + err |= get_user(call, (unsigned int *)(addr+4)); + err |= get_user(nop, (unsigned int *)(addr+8)); + if (err) + break; + +#ifdef CONFIG_PAX_DLRESOLVE + if (save == 0x9DE3BFA8U && + (call & 0xC0000000U) == 0x40000000U && + nop == 0x01000000U) + { + struct vm_area_struct *vma; + unsigned long call_dl_resolve; + + down_read(¤t->mm->mmap_sem); + call_dl_resolve = current->mm->call_dl_resolve; + up_read(¤t->mm->mmap_sem); + if (likely(call_dl_resolve)) + goto emulate; + + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + + down_write(¤t->mm->mmap_sem); + if (current->mm->call_dl_resolve) { + call_dl_resolve = current->mm->call_dl_resolve; + up_write(¤t->mm->mmap_sem); + if (vma) + kmem_cache_free(vm_area_cachep, vma); + goto emulate; + } + + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE); + if (!vma || (call_dl_resolve & ~PAGE_MASK)) { + up_write(¤t->mm->mmap_sem); + if (vma) + kmem_cache_free(vm_area_cachep, vma); + return 1; + } + + if (pax_insert_vma(vma, call_dl_resolve)) { + up_write(¤t->mm->mmap_sem); + kmem_cache_free(vm_area_cachep, vma); + return 1; + } + + current->mm->call_dl_resolve = call_dl_resolve; + up_write(¤t->mm->mmap_sem); + +emulate: + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; + regs->tpc = call_dl_resolve; + regs->tnpc = addr+4; + return 3; + } +#endif + + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */ + if ((save & 0xFFC00000U) == 0x05000000U && + (call & 0xFFFFE000U) == 0x85C0A000U && + nop == 0x01000000U) + { + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; + regs->u_regs[UREG_G2] = addr + 4; + addr = (save & 0x003FFFFFU) << 10; + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); + + if (test_thread_flag(TIF_32BIT)) + addr &= 0xFFFFFFFFUL; + + regs->tpc = addr; + regs->tnpc = addr+4; + return 3; + } + + /* PaX: 64-bit PLT stub */ + err = get_user(sethi1, (unsigned int *)addr); + err |= get_user(sethi2, (unsigned int *)(addr+4)); + err |= get_user(or1, (unsigned int *)(addr+8)); + err |= get_user(or2, (unsigned int *)(addr+12)); + err |= get_user(sllx, (unsigned int *)(addr+16)); + err |= get_user(add, (unsigned int *)(addr+20)); + err |= get_user(jmpl, (unsigned int *)(addr+24)); + err |= get_user(nop, (unsigned int *)(addr+28)); + if (err) + break; + + if ((sethi1 & 0xFFC00000U) == 0x09000000U && + (sethi2 & 0xFFC00000U) == 0x0B000000U && + (or1 & 0xFFFFE000U) == 0x88112000U && + (or2 & 0xFFFFE000U) == 0x8A116000U && + sllx == 0x89293020U && + add == 0x8A010005U && + jmpl == 0x89C14000U && + nop == 0x01000000U) + { + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU); + regs->u_regs[UREG_G4] <<= 32; + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU); + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4]; + regs->u_regs[UREG_G4] = addr + 24; + addr = regs->u_regs[UREG_G5]; + regs->tpc = addr; + regs->tnpc = addr+4; + return 3; + } + } + } while (0); + +#ifdef CONFIG_PAX_DLRESOLVE + do { /* PaX: unpatched PLT emulation step 2 */ + unsigned int save, call, nop; + + err = get_user(save, (unsigned int *)(regs->tpc-4)); + err |= get_user(call, (unsigned int *)regs->tpc); + err |= get_user(nop, (unsigned int *)(regs->tpc+4)); + if (err) + break; + + if (save == 0x9DE3BFA8U && + (call & 0xC0000000U) == 0x40000000U && + nop == 0x01000000U) + { + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2); + + if (test_thread_flag(TIF_32BIT)) + dl_resolve &= 0xFFFFFFFFUL; + + regs->u_regs[UREG_RETPC] = regs->tpc; + regs->tpc = dl_resolve; + regs->tnpc = dl_resolve+4; + return 3; + } + } while (0); +#endif + + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */ + unsigned int sethi, ba, nop; + + err = get_user(sethi, (unsigned int *)regs->tpc); + err |= get_user(ba, (unsigned int *)(regs->tpc+4)); + err |= get_user(nop, (unsigned int *)(regs->tpc+8)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + (ba & 0xFFF00000U) == 0x30600000U && + nop == 0x01000000U) + { + unsigned long addr; + + addr = (sethi & 0x003FFFFFU) << 10; + regs->u_regs[UREG_G1] = addr; + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); + + if (test_thread_flag(TIF_32BIT)) + addr &= 0xFFFFFFFFUL; + + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + +#endif + + return 1; +} + +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 8; i++) { + unsigned int c; + if (get_user(c, (unsigned int *)pc+i)) + printk(KERN_CONT "???????? "); + else + printk(KERN_CONT "%08x ", c); + } + printk("\n"); +} +#endif + asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) { enum ctx_state prev_state = exception_enter(); @@ -350,6 +813,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) if (!vma) goto bad_area; +#ifdef CONFIG_PAX_PAGEEXEC + /* PaX: detect ITLB misses on non-exec pages */ + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address && + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB)) + { + if (address != regs->tpc) + goto good_area; + + up_read(&mm->mmap_sem); + switch (pax_handle_fetch_fault(regs)) { + +#ifdef CONFIG_PAX_EMUPLT + case 2: + case 3: + return; +#endif + + } + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS)); + do_group_exit(SIGKILL); + } +#endif + /* Pure DTLB misses do not tell us whether the fault causing * load/store/atomic was a write or not, it only says that there * was no match. So in such a case we (carefully) read the diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 988acc8b1..f26345c45 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -26,8 +26,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, - unsigned long flags) + unsigned long flags, + unsigned long offset) { + struct mm_struct *mm = current->mm; unsigned long task_size = TASK_SIZE; struct vm_unmapped_area_info info; @@ -36,15 +38,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, info.flags = 0; info.length = len; - info.low_limit = TASK_UNMAPPED_BASE; + info.low_limit = mm->mmap_base; info.high_limit = min(task_size, VA_EXCLUDE_START); info.align_mask = PAGE_MASK & ~HPAGE_MASK; info.align_offset = 0; + info.threadstack_offset = offset; addr = vm_unmapped_area(&info); if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { VM_BUG_ON(addr != -ENOMEM); info.low_limit = VA_EXCLUDE_END; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + info.low_limit += mm->delta_mmap; +#endif + info.high_limit = task_size; addr = vm_unmapped_area(&info); } @@ -53,10 +62,11 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, } static unsigned long -hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - const unsigned long len, - const unsigned long pgoff, - const unsigned long flags) +hugetlb_get_unmapped_area_topdown(struct file *filp, unsigned long addr0, + unsigned long len, + unsigned long pgoff, + unsigned long flags, + unsigned long offset) { struct mm_struct *mm = current->mm; unsigned long addr = addr0; @@ -71,6 +81,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, info.high_limit = mm->mmap_base; info.align_mask = PAGE_MASK & ~HPAGE_MASK; info.align_offset = 0; + info.threadstack_offset = offset; addr = vm_unmapped_area(&info); /* @@ -83,6 +94,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, VM_BUG_ON(addr != -ENOMEM); info.flags = 0; info.low_limit = TASK_UNMAPPED_BASE; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + info.low_limit += mm->delta_mmap; +#endif + info.high_limit = STACK_TOP32; addr = vm_unmapped_area(&info); } @@ -97,6 +114,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long task_size = TASK_SIZE; + unsigned long offset = gr_rand_threadstack_offset(mm, file, flags); if (test_thread_flag(TIF_32BIT)) task_size = STACK_TOP32; @@ -112,19 +130,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, return addr; } +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + if (addr) { addr = ALIGN(addr, HPAGE_SIZE); vma = find_vma(mm, addr); - if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) return hugetlb_get_unmapped_area_bottomup(file, addr, len, - pgoff, flags); + pgoff, flags, offset); else return hugetlb_get_unmapped_area_topdown(file, addr, len, - pgoff, flags); + pgoff, flags, offset); } pte_t *huge_pte_alloc(struct mm_struct *mm, diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 37aa537b3..06b756c14 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -189,9 +189,9 @@ unsigned long sparc64_kern_sec_context __read_mostly; int num_kernel_image_mappings; #ifdef CONFIG_DEBUG_DCFLUSH -atomic_t dcpage_flushes = ATOMIC_INIT(0); +atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0); #ifdef CONFIG_SMP -atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0); +atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0); #endif #endif @@ -199,7 +199,7 @@ inline void flush_dcache_page_impl(struct page *page) { BUG_ON(tlb_type == hypervisor); #ifdef CONFIG_DEBUG_DCFLUSH - atomic_inc(&dcpage_flushes); + atomic_inc_unchecked(&dcpage_flushes); #endif #ifdef DCACHE_ALIASING_POSSIBLE @@ -462,10 +462,10 @@ void mmu_info(struct seq_file *m) #ifdef CONFIG_DEBUG_DCFLUSH seq_printf(m, "DCPageFlushes\t: %d\n", - atomic_read(&dcpage_flushes)); + atomic_read_unchecked(&dcpage_flushes)); #ifdef CONFIG_SMP seq_printf(m, "DCPageFlushesXC\t: %d\n", - atomic_read(&dcpage_flushes_xcall)); + atomic_read_unchecked(&dcpage_flushes_xcall)); #endif /* CONFIG_SMP */ #endif /* CONFIG_DEBUG_DCFLUSH */ } diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 4583c0320..5e074bb8a 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig @@ -192,6 +192,7 @@ source "kernel/Kconfig.hz" config KEXEC bool "kexec system call" select KEXEC_CORE + depends on !GRKERNSEC_KMEM ---help--- kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h index 4cefa0c9f..98d8b8365 100644 --- a/arch/tile/include/asm/atomic_64.h +++ b/arch/tile/include/asm/atomic_64.h @@ -195,6 +195,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) +#define atomic64_read_unchecked(v) atomic64_read(v) +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) +#define atomic64_inc_unchecked(v) atomic64_inc(v) +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) +#define atomic64_dec_unchecked(v) atomic64_dec(v) +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_TILE_ATOMIC_64_H */ diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h index 4810e48db..08b733bd6 100644 --- a/arch/tile/include/asm/cache.h +++ b/arch/tile/include/asm/cache.h @@ -15,11 +15,12 @@ #ifndef _ASM_TILE_CACHE_H #define _ASM_TILE_CACHE_H +#include #include /* bytes per L1 data cache line */ #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE() -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per L2 cache line */ #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE() diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h index a77369e91..7ba6ecd51 100644 --- a/arch/tile/include/asm/uaccess.h +++ b/arch/tile/include/asm/uaccess.h @@ -428,9 +428,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) { - int sz = __compiletime_object_size(to); + size_t sz = __compiletime_object_size(to); - if (likely(sz == -1 || sz >= n)) + if (likely(sz == (size_t)-1 || sz >= n)) n = _copy_from_user(to, from, n); else if (!__builtin_constant_p(n)) copy_user_overflow(sz, n); diff --git a/arch/tile/kernel/kprobes.c b/arch/tile/kernel/kprobes.c index c68694bb1..12bf0cb7b 100644 --- a/arch/tile/kernel/kprobes.c +++ b/arch/tile/kernel/kprobes.c @@ -430,6 +430,7 @@ static void __used kretprobe_trampoline_holder(void) void kretprobe_trampoline(void); +#ifdef CONFIG_KRETPROBES void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { @@ -438,6 +439,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, /* Replace the return addr with trampoline addr */ regs->lr = (unsigned long)kretprobe_trampoline; } +#endif /* * Called when the probe at kretprobe trampoline is hit. @@ -507,6 +509,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, return 1; } +#ifdef CONFIG_KRETPROBES int __kprobes arch_trampoline_kprobe(struct kprobe *p) { if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline) @@ -514,6 +517,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p) return 0; } +#endif static struct kprobe trampoline_p = { .addr = (kprobe_opcode_t *)kretprobe_trampoline, diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c index 77ceaa343..3630dea00 100644 --- a/arch/tile/mm/hugetlbpage.c +++ b/arch/tile/mm/hugetlbpage.c @@ -174,6 +174,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, info.high_limit = TASK_SIZE; info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; + info.threadstack_offset = 0; return vm_unmapped_area(&info); } @@ -191,6 +192,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, info.high_limit = current->mm->mmap_base; info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; + info.threadstack_offset = 0; addr = vm_unmapped_area(&info); /* diff --git a/arch/um/Makefile b/arch/um/Makefile index 0ca46eded..8d7fd38ec 100644 --- a/arch/um/Makefile +++ b/arch/um/Makefile @@ -73,6 +73,8 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -I%,,$(KBUILD_CFLAGS))) \ -D_FILE_OFFSET_BITS=64 -idirafter $(srctree)/include \ -idirafter $(obj)/include -D__KERNEL__ -D__UM_HOST__ +USER_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(USER_CFLAGS)) + #This will adjust *FLAGS accordingly to the platform. include $(ARCH_DIR)/Makefile-os-$(OS) diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c index 62087028a..00292c812 100644 --- a/arch/um/drivers/line.c +++ b/arch/um/drivers/line.c @@ -377,7 +377,7 @@ int setup_one_line(struct line *lines, int n, char *init, struct tty_driver *driver = line->driver->driver; int err = -EINVAL; - if (line->port.count) { + if (atomic_read(&line->port.count)) { *error_out = "Device is already open"; goto out; } diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h index 19e1bdd67..3665b77a6 100644 --- a/arch/um/include/asm/cache.h +++ b/arch/um/include/asm/cache.h @@ -1,6 +1,7 @@ #ifndef __UM_CACHE_H #define __UM_CACHE_H +#include #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT) # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) @@ -12,6 +13,6 @@ # define L1_CACHE_SHIFT 5 #endif -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) #endif diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h index 2e0a6b1d8..a64d0f53a 100644 --- a/arch/um/include/asm/kmap_types.h +++ b/arch/um/include/asm/kmap_types.h @@ -8,6 +8,6 @@ /* No more #include "asm/arch/kmap_types.h" ! */ -#define KM_TYPE_NR 14 +#define KM_TYPE_NR 15 #endif diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h index f878bec23..ca0930054 100644 --- a/arch/um/include/asm/page.h +++ b/arch/um/include/asm/page.h @@ -14,6 +14,9 @@ #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) +#define ktla_ktva(addr) (addr) +#define ktva_ktla(addr) (addr) + #ifndef __ASSEMBLY__ struct page; diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h index bae8523a1..ba9484b8f 100644 --- a/arch/um/include/asm/pgtable-3level.h +++ b/arch/um/include/asm/pgtable-3level.h @@ -58,6 +58,7 @@ #define pud_present(x) (pud_val(x) & _PAGE_PRESENT) #define pud_populate(mm, pud, pmd) \ set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd))) +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd)) #ifdef CONFIG_64BIT #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval)) diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index 034b42c7a..5c186ce90 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -343,22 +343,6 @@ int singlestepping(void * t) return 2; } -/* - * Only x86 and x86_64 have an arch_align_stack(). - * All other arches have "#define arch_align_stack(x) (x)" - * in their asm/exec.h - * As this is included in UML from asm-um/system-generic.h, - * we can use it to behave as the subarch does. - */ -#ifndef arch_align_stack -unsigned long arch_align_stack(unsigned long sp) -{ - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) - sp -= get_random_int() % 8192; - return sp & ~0xf; -} -#endif - unsigned long get_wchan(struct task_struct *p) { unsigned long stack_page, sp, ip; diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h index ad8f795d8..2c7eec635 100644 --- a/arch/unicore32/include/asm/cache.h +++ b/arch/unicore32/include/asm/cache.h @@ -12,8 +12,10 @@ #ifndef __UNICORE_CACHE_H__ #define __UNICORE_CACHE_H__ -#define L1_CACHE_SHIFT (5) -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#include + +#define L1_CACHE_SHIFT 5 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* * Memory returned by kmalloc() may be used for DMA, so we must make diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index bada636d1..1775eac0a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -39,14 +39,13 @@ config X86 select ARCH_MIGHT_HAVE_PC_SERIO select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT - select ARCH_SUPPORTS_INT128 if X86_64 + select ARCH_SUPPORTS_INT128 if X86_64 && !PAX_SIZE_OVERFLOW_EXTRA && !PAX_SIZE_OVERFLOW select ARCH_SUPPORTS_NUMA_BALANCING if X86_64 select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF if X86_64 select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP - select ARCH_WANTS_DYNAMIC_TASK_STRUCT select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_IPC_PARSE_VERSION if X86_32 select BUILDTIME_EXTABLE_SORT @@ -94,7 +93,7 @@ config X86 select HAVE_ARCH_WITHIN_STACK_FRAMES select HAVE_EBPF_JIT if X86_64 select HAVE_ARCH_VMAP_STACK if X86_64 - select HAVE_CC_STACKPROTECTOR + select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_LOCAL select HAVE_CONTEXT_TRACKING if X86_64 @@ -136,6 +135,7 @@ config X86 select HAVE_NMI select HAVE_OPROFILE select HAVE_OPTPROBES + select HAVE_PAX_INITIFY_INIT_EXIT if GCC_PLUGINS select HAVE_PCSPKR_PLATFORM select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS_NMI @@ -190,11 +190,13 @@ config MMU def_bool y config ARCH_MMAP_RND_BITS_MIN - default 28 if 64BIT + default 28 if 64BIT && !PAX_PER_CPU_PGD + default 27 if 64BIT && PAX_PER_CPU_PGD default 8 config ARCH_MMAP_RND_BITS_MAX - default 32 if 64BIT + default 32 if 64BIT && !PAX_PER_CPU_PGD + default 27 if 64BIT && PAX_PER_CPU_PGD default 16 config ARCH_MMAP_RND_COMPAT_BITS_MIN @@ -296,7 +298,7 @@ config X86_64_SMP config X86_32_LAZY_GS def_bool y - depends on X86_32 && !CC_STACKPROTECTOR + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF config ARCH_SUPPORTS_UPROBES def_bool y @@ -690,6 +692,7 @@ config SCHED_OMIT_FRAME_POINTER menuconfig HYPERVISOR_GUEST bool "Linux guest support" + depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN) ---help--- Say Y here to enable options for running Linux under various hyper- visors. This option enables basic hypervisor detection and platform @@ -1090,6 +1093,7 @@ config VM86 config X86_16BIT bool "Enable support for 16-bit segments" if EXPERT + depends on !GRKERNSEC default y depends on MODIFY_LDT_SYSCALL ---help--- @@ -1244,6 +1248,7 @@ choice config NOHIGHMEM bool "off" + depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE) ---help--- Linux can use up to 64 Gigabytes of physical memory on x86 systems. However, the address space of 32-bit x86 processors is only 4 @@ -1280,6 +1285,7 @@ config NOHIGHMEM config HIGHMEM4G bool "4GB" + depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE) ---help--- Select this if you have a 32-bit processor and between 1 and 4 gigabytes of physical RAM. @@ -1332,7 +1338,7 @@ config PAGE_OFFSET hex default 0xB0000000 if VMSPLIT_3G_OPT default 0x80000000 if VMSPLIT_2G - default 0x78000000 if VMSPLIT_2G_OPT + default 0x70000000 if VMSPLIT_2G_OPT default 0x40000000 if VMSPLIT_1G default 0xC0000000 depends on X86_32 @@ -1353,7 +1359,6 @@ config X86_PAE config ARCH_PHYS_ADDR_T_64BIT def_bool y - depends on X86_64 || X86_PAE config ARCH_DMA_ADDR_T_64BIT def_bool y @@ -1484,7 +1489,7 @@ config ARCH_PROC_KCORE_TEXT config ILLEGAL_POINTER_VALUE hex - default 0 if X86_32 + default 0xfffff000 if X86_32 default 0xdead000000000000 if X86_64 source "mm/Kconfig" @@ -1807,6 +1812,7 @@ source kernel/Kconfig.hz config KEXEC bool "kexec system call" select KEXEC_CORE + depends on !GRKERNSEC_KMEM ---help--- kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot @@ -1934,7 +1940,7 @@ config RELOCATABLE config RANDOMIZE_BASE bool "Randomize the address of the kernel image (KASLR)" - depends on RELOCATABLE + depends on RELOCATABLE && BROKEN_SECURITY default n ---help--- In support of Kernel Address Space Layout Randomization (KASLR), @@ -1978,7 +1984,9 @@ config X86_NEED_RELOCS config PHYSICAL_ALIGN hex "Alignment value to which kernel should be aligned" - default "0x200000" + default "0x1000000" + range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE + range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE range 0x2000 0x1000000 if X86_32 range 0x200000 0x1000000 if X86_64 ---help--- @@ -2093,6 +2101,7 @@ config COMPAT_VDSO def_bool n prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)" depends on X86_32 || IA32_EMULATION + depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF ---help--- Certain buggy versions of glibc will crash if they are presented with a 32-bit vDSO that is not mapped at the address @@ -2133,15 +2142,6 @@ choice If unsure, select "Emulate". - config LEGACY_VSYSCALL_NATIVE - bool "Native" - help - Actual executable code is located in the fixed vsyscall - address mapping, implementing time() efficiently. Since - this makes the mapping executable, it can be used during - security vulnerability exploitation (traditionally as - ROP gadgets). This configuration is not recommended. - config LEGACY_VSYSCALL_EMULATE bool "Emulate" help @@ -2222,6 +2222,22 @@ config MODIFY_LDT_SYSCALL Saying 'N' here may make sense for embedded or server kernels. +config DEFAULT_MODIFY_LDT_SYSCALL + bool "Allow userspace to modify the LDT by default" + default y + + ---help--- + Modifying the LDT (Local Descriptor Table) may be needed to run a + 16-bit or segmented code such as Dosemu or Wine. This is done via + a system call which is not needed to run portable applications, + and which can sometimes be abused to exploit some weaknesses of + the architecture, opening new vulnerabilities. + + For this reason this option allows one to enable or disable the + feature at runtime. It is recommended to say 'N' here to leave + the system protected, and to enable it at runtime only if needed + by setting the sys.kernel.modify_ldt sysctl. + source "kernel/livepatch/Kconfig" endmenu diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 3ba5ff2f2..44bdacc79 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -329,7 +329,7 @@ config X86_PPRO_FENCE config X86_F00F_BUG def_bool y - depends on M586MMX || M586TSC || M586 || M486 + depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC config X86_INVD_BUG def_bool y @@ -337,7 +337,7 @@ config X86_INVD_BUG config X86_ALIGNMENT_16 def_bool y - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1 config X86_INTEL_USERCOPY def_bool y @@ -379,7 +379,7 @@ config X86_CMPXCHG64 # generates cmov. config X86_CMOV def_bool y - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX) + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX) config X86_MINIMUM_CPU_FAMILY int diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 67eec5509..1a5c1ab1d 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -55,6 +55,7 @@ config X86_PTDUMP tristate "Export kernel pagetable layout to userspace via debugfs" depends on DEBUG_KERNEL select DEBUG_FS + depends on !GRKERNSEC_KMEM select X86_PTDUMP_CORE ---help--- Say Y here if you want to show the kernel pagetable layout in a @@ -84,6 +85,7 @@ config DEBUG_RODATA_TEST config DEBUG_WX bool "Warn on W+X mappings at boot" + depends on BROKEN select X86_PTDUMP_CORE ---help--- Generate a warning if any W+X mappings are found at boot. @@ -111,7 +113,7 @@ config DEBUG_WX config DEBUG_SET_MODULE_RONX bool "Set loadable kernel module data as NX and text as RO" - depends on MODULES + depends on MODULES && BROKEN ---help--- This option helps catch unintended modifications to loadable kernel module's text and read-only data. It also prevents execution @@ -353,6 +355,7 @@ config X86_DEBUG_FPU config PUNIT_ATOM_DEBUG tristate "ATOM Punit debug driver" select DEBUG_FS + depends on !GRKERNSEC_KMEM select IOSF_MBI ---help--- This is a debug driver, which gets the power states diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 2d449337a..86ecceb84 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -75,9 +75,6 @@ ifeq ($(CONFIG_X86_32),y) # CPU-specific tuning. Anything which can be shared with UML should go here. include arch/x86/Makefile_32.cpu KBUILD_CFLAGS += $(cflags-y) - - # temporary until string.h is fixed - KBUILD_CFLAGS += -ffreestanding else BITS := 64 UTS_MACHINE := x86_64 @@ -126,6 +123,9 @@ else KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args) endif +# temporary until string.h is fixed +KBUILD_CFLAGS += -ffreestanding + ifdef CONFIG_X86_X32 x32_ld_ok := $(call try-run,\ /bin/echo -e '1: .quad 1b' | \ @@ -192,6 +192,7 @@ archheaders: $(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all archprepare: + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD))) ifeq ($(CONFIG_KEXEC_FILE),y) $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c endif @@ -278,3 +279,9 @@ define archhelp echo ' FDARGS="..." arguments for the booted kernel' echo ' FDINITRD=file initrd for the booted kernel' endef + +define OLD_LD + +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils. +*** Please upgrade your binutils to 2.18 or newer +endef diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index 12ea8f838..46969be22 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile @@ -11,6 +11,7 @@ KASAN_SANITIZE := n OBJECT_FILES_NON_STANDARD := y +GCC_PLUGINS := n # Kernel does not boot with kcov instrumentation here. # One of the problems observed was insertion of __sanitizer_cov_trace_pc() diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h index 0d41d6813..2d6120cf3 100644 --- a/arch/x86/boot/bitops.h +++ b/arch/x86/boot/bitops.h @@ -28,7 +28,7 @@ static inline bool variable_test_bit(int nr, const void *addr) bool v; const u32 *p = (const u32 *)addr; - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr)); + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr)); return v; } @@ -39,7 +39,7 @@ static inline bool variable_test_bit(int nr, const void *addr) static inline void set_bit(int nr, void *addr) { - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr)); + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr)); } #endif /* BOOT_BITOPS_H */ diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h index e5612f3e3..e755d051b 100644 --- a/arch/x86/boot/boot.h +++ b/arch/x86/boot/boot.h @@ -84,7 +84,7 @@ static inline void io_delay(void) static inline u16 ds(void) { u16 seg; - asm("movw %%ds,%0" : "=rm" (seg)); + asm volatile("movw %%ds,%0" : "=rm" (seg)); return seg; } diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 34d9e1585..c26e0471b 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -18,6 +18,7 @@ KASAN_SANITIZE := n OBJECT_FILES_NON_STANDARD := y +GCC_PLUGINS := n # Prevents link failures: __sanitizer_cov_trace_pc() is not linked in. KCOV_INSTRUMENT := n @@ -35,6 +36,23 @@ KBUILD_CFLAGS += -mno-mmx -mno-sse KBUILD_CFLAGS += $(call cc-option,-ffreestanding) KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector) +ifdef CONFIG_DEBUG_INFO +ifdef CONFIG_DEBUG_INFO_SPLIT +KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g) +else +KBUILD_CFLAGS += -g +endif +KBUILD_AFLAGS += -Wa,--gdwarf-2 +endif +ifdef CONFIG_DEBUG_INFO_DWARF4 +KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,) +endif + +ifdef CONFIG_DEBUG_INFO_REDUCED +KBUILD_CFLAGS += $(call cc-option, -femit-struct-debug-baseonly) \ + $(call cc-option,-fno-var-tracking) +endif + KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ GCOV_PROFILE := n UBSAN_SANITIZE :=n diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S index a53440e81..c3dbf1ed7 100644 --- a/arch/x86/boot/compressed/efi_stub_32.S +++ b/arch/x86/boot/compressed/efi_stub_32.S @@ -46,16 +46,13 @@ ENTRY(efi_call_phys) * parameter 2, ..., param n. To make things easy, we save the return * address of efi_call_phys in a global variable. */ - popl %ecx - movl %ecx, saved_return_addr(%edx) - /* get the function pointer into ECX*/ - popl %ecx - movl %ecx, efi_rt_function_ptr(%edx) + popl saved_return_addr(%edx) + popl efi_rt_function_ptr(%edx) /* * 3. Call the physical function. */ - call *%ecx + call *efi_rt_function_ptr(%edx) /* * 4. Balance the stack. And because EAX contain the return value, @@ -67,15 +64,12 @@ ENTRY(efi_call_phys) 1: popl %edx subl $1b, %edx - movl efi_rt_function_ptr(%edx), %ecx - pushl %ecx + pushl efi_rt_function_ptr(%edx) /* * 10. Push the saved return address onto the stack and return. */ - movl saved_return_addr(%edx), %ecx - pushl %ecx - ret + jmpl *saved_return_addr(%edx) ENDPROC(efi_call_phys) .previous diff --git a/arch/x86/boot/compressed/efi_stub_64.S b/arch/x86/boot/compressed/efi_stub_64.S index 99494dff2..7fa59bf5a 100644 --- a/arch/x86/boot/compressed/efi_stub_64.S +++ b/arch/x86/boot/compressed/efi_stub_64.S @@ -2,4 +2,5 @@ #include #include +#define efi_call efi_call_early #include "../../platform/efi/efi_stub_64.S" diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S index 630384a4c..278e7884d 100644 --- a/arch/x86/boot/compressed/efi_thunk_64.S +++ b/arch/x86/boot/compressed/efi_thunk_64.S @@ -189,8 +189,8 @@ efi_gdt64: .long 0 /* Filled out by user */ .word 0 .quad 0x0000000000000000 /* NULL descriptor */ - .quad 0x00af9a000000ffff /* __KERNEL_CS */ - .quad 0x00cf92000000ffff /* __KERNEL_DS */ + .quad 0x00af9b000000ffff /* __KERNEL_CS */ + .quad 0x00cf93000000ffff /* __KERNEL_DS */ .quad 0x0080890000000000 /* TS descriptor */ .quad 0x0000000000000000 /* TS continued */ efi_gdt64_end: diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index fd0b6a272..720686443 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S @@ -169,10 +169,10 @@ preferred_addr: addl %eax, %ebx notl %eax andl %eax, %ebx - cmpl $LOAD_PHYSICAL_ADDR, %ebx + cmpl $____LOAD_PHYSICAL_ADDR, %ebx jge 1f #endif - movl $LOAD_PHYSICAL_ADDR, %ebx + movl $____LOAD_PHYSICAL_ADDR, %ebx 1: /* Target address to relocate to for decompression */ diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index efdfba21a..af6d96264 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -103,10 +103,10 @@ ENTRY(startup_32) addl %eax, %ebx notl %eax andl %eax, %ebx - cmpl $LOAD_PHYSICAL_ADDR, %ebx + cmpl $____LOAD_PHYSICAL_ADDR, %ebx jge 1f #endif - movl $LOAD_PHYSICAL_ADDR, %ebx + movl $____LOAD_PHYSICAL_ADDR, %ebx 1: /* Target address to relocate to for decompression */ @@ -333,10 +333,10 @@ preferred_addr: addq %rax, %rbp notq %rax andq %rax, %rbp - cmpq $LOAD_PHYSICAL_ADDR, %rbp + cmpq $____LOAD_PHYSICAL_ADDR, %rbp jge 1f #endif - movq $LOAD_PHYSICAL_ADDR, %rbp + movq $____LOAD_PHYSICAL_ADDR, %rbp 1: /* Target address to relocate to for decompression */ @@ -444,8 +444,8 @@ gdt: .long gdt .word 0 .quad 0x0000000000000000 /* NULL descriptor */ - .quad 0x00af9a000000ffff /* __KERNEL_CS */ - .quad 0x00cf92000000ffff /* __KERNEL_DS */ + .quad 0x00af9b000000ffff /* __KERNEL_CS */ + .quad 0x00cf93000000ffff /* __KERNEL_DS */ .quad 0x0080890000000000 /* TS descriptor */ .quad 0x0000000000000000 /* TS continued */ gdt_end: @@ -465,7 +465,7 @@ efi32_config: .global efi64_config efi64_config: .fill 4,8,0 - .quad efi_call + .quad efi_call_early .byte 1 #endif /* CONFIG_EFI_STUB */ diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index b3c5a5f03..596115e47 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -176,13 +176,17 @@ static void handle_relocations(void *output, unsigned long output_len, int *reloc; unsigned long delta, map, ptr; unsigned long min_addr = (unsigned long)output; +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) + unsigned long max_addr = min_addr + (VO___bss_start - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR); +#else unsigned long max_addr = min_addr + (VO___bss_start - VO__text); +#endif /* * Calculate the delta between where vmlinux was linked to load * and where it was actually loaded. */ - delta = min_addr - LOAD_PHYSICAL_ADDR; + delta = min_addr - ____LOAD_PHYSICAL_ADDR; /* * The kernel contains a table of relocation addresses. Those @@ -199,7 +203,7 @@ static void handle_relocations(void *output, unsigned long output_len, * from __START_KERNEL_map. */ if (IS_ENABLED(CONFIG_X86_64)) - delta = virt_addr - LOAD_PHYSICAL_ADDR; + delta = virt_addr - ____LOAD_PHYSICAL_ADDR; if (!delta) { debug_putstr("No relocation needed... "); @@ -274,7 +278,7 @@ static void parse_elf(void *output) Elf32_Ehdr ehdr; Elf32_Phdr *phdrs, *phdr; #endif - void *dest; + void *dest, *prev; int i; memcpy(&ehdr, output, sizeof(ehdr)); @@ -301,11 +305,14 @@ static void parse_elf(void *output) case PT_LOAD: #ifdef CONFIG_RELOCATABLE dest = output; - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR); + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR); #else dest = (void *)(phdr->p_paddr); #endif memmove(dest, output + phdr->p_offset, phdr->p_filesz); + if (i) + memset(prev, 0xff, dest - prev); + prev = dest + phdr->p_filesz; break; default: /* Ignore other PT_* */ break; } @@ -337,7 +344,11 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap, unsigned char *output, unsigned long output_len) { +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) + const unsigned long kernel_total_size = VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR; +#else const unsigned long kernel_total_size = VO__end - VO__text; +#endif unsigned long virt_addr = (unsigned long)output; /* Retain x86 boot parameters pointer passed from startup_32/64. */ @@ -395,7 +406,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap, error("Destination address too large"); #endif #ifndef CONFIG_RELOCATABLE - if ((unsigned long)output != LOAD_PHYSICAL_ADDR) + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR) error("Destination address does not match LOAD_PHYSICAL_ADDR"); if ((unsigned long)output != virt_addr) error("Destination virtual address changed when not relocatable"); diff --git a/arch/x86/boot/compressed/pagetable.c b/arch/x86/boot/compressed/pagetable.c index 56589d0a8..f2085bef3 100644 --- a/arch/x86/boot/compressed/pagetable.c +++ b/arch/x86/boot/compressed/pagetable.c @@ -14,6 +14,7 @@ */ #define __pa(x) ((unsigned long)(x)) #define __va(x) ((void *)((unsigned long)(x))) +#undef CONFIG_PAX_KERNEXEC #include "misc.h" diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c index 4ad7d70e8..c70396355 100644 --- a/arch/x86/boot/cpucheck.c +++ b/arch/x86/boot/cpucheck.c @@ -126,9 +126,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) u32 ecx = MSR_K7_HWCR; u32 eax, edx; - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); eax &= ~(1 << 15); - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); get_cpuflags(); /* Make sure it really did something */ err = check_cpuflags(); @@ -141,9 +141,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) u32 ecx = MSR_VIA_FCR; u32 eax, edx; - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); eax |= (1<<1)|(1<<7); - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); set_bit(X86_FEATURE_CX8, cpu.flags); err = check_cpuflags(); @@ -154,12 +154,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) u32 eax, edx; u32 level = 1; - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx)); - asm("cpuid" + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx)); + asm volatile("cpuid" : "+a" (level), "=d" (cpu.flags[0]) : : "ecx", "ebx"); - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); err = check_cpuflags(); } else if (err == 0x01 && diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index 3dd5be33a..16720a237 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S @@ -438,7 +438,7 @@ setup_data: .quad 0 # 64-bit physical pointer to # single linked list of # struct setup_data -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr # # Getting to provably safe in-place decompression is hard. Worst case @@ -543,7 +543,12 @@ pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_min_extract_offset) +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) +#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR) +#else #define VO_INIT_SIZE (VO__end - VO__text) +#endif + #if ZO_INIT_SIZE > VO_INIT_SIZE # define INIT_SIZE ZO_INIT_SIZE #else diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c index db75d07c3..8e6d0afad 100644 --- a/arch/x86/boot/memory.c +++ b/arch/x86/boot/memory.c @@ -19,7 +19,7 @@ static int detect_memory_e820(void) { - int count = 0; + unsigned int count = 0; struct biosregs ireg, oreg; struct e820entry *desc = boot_params.e820_map; static struct e820entry buf; /* static so it is zeroed */ diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c index ba3e10065..6501b8fa6 100644 --- a/arch/x86/boot/video-vesa.c +++ b/arch/x86/boot/video-vesa.c @@ -201,6 +201,7 @@ static void vesa_store_pm_info(void) boot_params.screen_info.vesapm_seg = oreg.es; boot_params.screen_info.vesapm_off = oreg.di; + boot_params.screen_info.vesapm_size = oreg.cx; } /* diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c index 77780e386..86be0cbe1 100644 --- a/arch/x86/boot/video.c +++ b/arch/x86/boot/video.c @@ -100,7 +100,7 @@ static void store_mode_params(void) static unsigned int get_entry(void) { char entry_buf[4]; - int i, len = 0; + unsigned int i, len = 0; int key; unsigned int v; diff --git a/arch/x86/crypto/aes-i586-asm_32.S b/arch/x86/crypto/aes-i586-asm_32.S index 2849dbc59..d7ff39cf2 100644 --- a/arch/x86/crypto/aes-i586-asm_32.S +++ b/arch/x86/crypto/aes-i586-asm_32.S @@ -38,6 +38,7 @@ #include #include +#include #define tlen 1024 // length of each of 4 'xor' arrays (256 32-bit words) @@ -286,7 +287,7 @@ ENTRY(aes_enc_blk) pop %ebx mov %r0,(%ebp) pop %ebp - ret + pax_ret aes_enc_blk ENDPROC(aes_enc_blk) // AES (Rijndael) Decryption Subroutine @@ -358,5 +359,5 @@ ENTRY(aes_dec_blk) pop %ebx mov %r0,(%ebp) pop %ebp - ret + pax_ret aes_dec_blk ENDPROC(aes_dec_blk) diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S index 910565547..cf8174710 100644 --- a/arch/x86/crypto/aes-x86_64-asm_64.S +++ b/arch/x86/crypto/aes-x86_64-asm_64.S @@ -8,6 +8,8 @@ * including this sentence is retained in full. */ +#include + .extern crypto_ft_tab .extern crypto_it_tab .extern crypto_fl_tab @@ -77,7 +79,7 @@ movl r6 ## E,4(r9); \ movl r7 ## E,8(r9); \ movl r8 ## E,12(r9); \ - ret; \ + pax_ret FUNC; \ ENDPROC(FUNC); #define round(TAB,OFFSET,r1,r2,r3,r4,r5,r6,r7,r8,ra,rb,rc,rd) \ diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S index a916c4a61..7e7b7cf34 100644 --- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S +++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S @@ -64,6 +64,7 @@ #include #include +#include #define CONCAT(a,b) a##b #define VMOVDQ vmovdqu @@ -436,7 +437,7 @@ ddq_add_8: /* main body of aes ctr load */ -.macro do_aes_ctrmain key_len +.macro do_aes_ctrmain func key_len cmp $16, num_bytes jb .Ldo_return2\key_len @@ -537,7 +538,7 @@ ddq_add_8: /* return updated IV */ vpshufb xbyteswap, xcounter, xcounter vmovdqu xcounter, (p_iv) - ret + pax_ret \func .endm /* @@ -549,7 +550,7 @@ ddq_add_8: */ ENTRY(aes_ctr_enc_128_avx_by8) /* call the aes main loop */ - do_aes_ctrmain KEY_128 + do_aes_ctrmain aes_ctr_enc_128_avx_by8 KEY_128 ENDPROC(aes_ctr_enc_128_avx_by8) @@ -562,7 +563,7 @@ ENDPROC(aes_ctr_enc_128_avx_by8) */ ENTRY(aes_ctr_enc_192_avx_by8) /* call the aes main loop */ - do_aes_ctrmain KEY_192 + do_aes_ctrmain aes_ctr_enc_192_avx_by8 KEY_192 ENDPROC(aes_ctr_enc_192_avx_by8) @@ -575,6 +576,6 @@ ENDPROC(aes_ctr_enc_192_avx_by8) */ ENTRY(aes_ctr_enc_256_avx_by8) /* call the aes main loop */ - do_aes_ctrmain KEY_256 + do_aes_ctrmain aes_ctr_enc_256_avx_by8 KEY_256 ENDPROC(aes_ctr_enc_256_avx_by8) diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 383a6f84a..dc7f45daf 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -32,6 +32,7 @@ #include #include #include +#include /* * The following macros are used to move an (un)aligned 16 byte value to/from @@ -218,7 +219,7 @@ enc: .octa 0x2 * num_initial_blocks = b mod 4 * encrypt the initial num_initial_blocks blocks and apply ghash on * the ciphertext -* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers +* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers * are clobbered * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified */ @@ -228,8 +229,8 @@ enc: .octa 0x2 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation MOVADQ SHUF_MASK(%rip), %xmm14 mov arg7, %r10 # %r10 = AAD - mov arg8, %r12 # %r12 = aadLen - mov %r12, %r11 + mov arg8, %r15 # %r15 = aadLen + mov %r15, %r11 pxor %xmm\i, %xmm\i _get_AAD_loop\num_initial_blocks\operation: @@ -238,17 +239,17 @@ _get_AAD_loop\num_initial_blocks\operation: psrldq $4, %xmm\i pxor \TMP1, %xmm\i add $4, %r10 - sub $4, %r12 + sub $4, %r15 jne _get_AAD_loop\num_initial_blocks\operation cmp $16, %r11 je _get_AAD_loop2_done\num_initial_blocks\operation - mov $16, %r12 + mov $16, %r15 _get_AAD_loop2\num_initial_blocks\operation: psrldq $4, %xmm\i - sub $4, %r12 - cmp %r11, %r12 + sub $4, %r15 + cmp %r11, %r15 jne _get_AAD_loop2\num_initial_blocks\operation _get_AAD_loop2_done\num_initial_blocks\operation: @@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation: * num_initial_blocks = b mod 4 * encrypt the initial num_initial_blocks blocks and apply ghash on * the ciphertext -* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers +* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers * are clobbered * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified */ @@ -453,8 +454,8 @@ _initial_blocks_done\num_initial_blocks\operation: XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation MOVADQ SHUF_MASK(%rip), %xmm14 mov arg7, %r10 # %r10 = AAD - mov arg8, %r12 # %r12 = aadLen - mov %r12, %r11 + mov arg8, %r15 # %r15 = aadLen + mov %r15, %r11 pxor %xmm\i, %xmm\i _get_AAD_loop\num_initial_blocks\operation: movd (%r10), \TMP1 @@ -462,15 +463,15 @@ _get_AAD_loop\num_initial_blocks\operation: psrldq $4, %xmm\i pxor \TMP1, %xmm\i add $4, %r10 - sub $4, %r12 + sub $4, %r15 jne _get_AAD_loop\num_initial_blocks\operation cmp $16, %r11 je _get_AAD_loop2_done\num_initial_blocks\operation - mov $16, %r12 + mov $16, %r15 _get_AAD_loop2\num_initial_blocks\operation: psrldq $4, %xmm\i - sub $4, %r12 - cmp %r11, %r12 + sub $4, %r15 + cmp %r11, %r15 jne _get_AAD_loop2\num_initial_blocks\operation _get_AAD_loop2_done\num_initial_blocks\operation: PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data @@ -1280,8 +1281,8 @@ _esb_loop_\@: * poly = x^128 + x^127 + x^126 + x^121 + 1 * *****************************************************************************/ -ENTRY(aesni_gcm_dec) - push %r12 +RAP_ENTRY(aesni_gcm_dec) + push %r15 push %r13 push %r14 mov %rsp, %r14 @@ -1291,8 +1292,8 @@ ENTRY(aesni_gcm_dec) */ sub $VARIABLE_OFFSET, %rsp and $~63, %rsp # align rsp to 64 bytes - mov %arg6, %r12 - movdqu (%r12), %xmm13 # %xmm13 = HashKey + mov %arg6, %r15 + movdqu (%r15), %xmm13 # %xmm13 = HashKey movdqa SHUF_MASK(%rip), %xmm2 PSHUFB_XMM %xmm2, %xmm13 @@ -1320,10 +1321,10 @@ ENTRY(aesni_gcm_dec) movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly) mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext and $-16, %r13 # %r13 = %r13 - (%r13 mod 16) - mov %r13, %r12 - and $(3<<4), %r12 + mov %r13, %r15 + and $(3<<4), %r15 jz _initial_num_blocks_is_0_decrypt - cmp $(2<<4), %r12 + cmp $(2<<4), %r15 jb _initial_num_blocks_is_1_decrypt je _initial_num_blocks_is_2_decrypt _initial_num_blocks_is_3_decrypt: @@ -1373,16 +1374,16 @@ _zero_cipher_left_decrypt: sub $16, %r11 add %r13, %r11 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block - lea SHIFT_MASK+16(%rip), %r12 - sub %r13, %r12 + lea SHIFT_MASK+16(%rip), %r15 + sub %r13, %r15 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes # (%r13 is the number of bytes in plaintext mod 16) - movdqu (%r12), %xmm2 # get the appropriate shuffle mask + movdqu (%r15), %xmm2 # get the appropriate shuffle mask PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes movdqa %xmm1, %xmm2 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn) - movdqu ALL_F-SHIFT_MASK(%r12), %xmm1 + movdqu ALL_F-SHIFT_MASK(%r15), %xmm1 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0 pand %xmm1, %xmm2 @@ -1411,9 +1412,9 @@ _less_than_8_bytes_left_decrypt: sub $1, %r13 jne _less_than_8_bytes_left_decrypt _multiple_of_16_bytes_decrypt: - mov arg8, %r12 # %r13 = aadLen (number of bytes) - shl $3, %r12 # convert into number of bits - movd %r12d, %xmm15 # len(A) in %xmm15 + mov arg8, %r15 # %r13 = aadLen (number of bytes) + shl $3, %r15 # convert into number of bits + movd %r15d, %xmm15 # len(A) in %xmm15 shl $3, %arg4 # len(C) in bits (*128) MOVQ_R64_XMM %arg4, %xmm1 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000 @@ -1452,8 +1453,8 @@ _return_T_done_decrypt: mov %r14, %rsp pop %r14 pop %r13 - pop %r12 - ret + pop %r15 + pax_ret aesni_gcm_dec ENDPROC(aesni_gcm_dec) @@ -1540,8 +1541,8 @@ ENDPROC(aesni_gcm_dec) * * poly = x^128 + x^127 + x^126 + x^121 + 1 ***************************************************************************/ -ENTRY(aesni_gcm_enc) - push %r12 +RAP_ENTRY(aesni_gcm_enc) + push %r15 push %r13 push %r14 mov %rsp, %r14 @@ -1551,8 +1552,8 @@ ENTRY(aesni_gcm_enc) # sub $VARIABLE_OFFSET, %rsp and $~63, %rsp - mov %arg6, %r12 - movdqu (%r12), %xmm13 + mov %arg6, %r15 + movdqu (%r15), %xmm13 movdqa SHUF_MASK(%rip), %xmm2 PSHUFB_XMM %xmm2, %xmm13 @@ -1576,13 +1577,13 @@ ENTRY(aesni_gcm_enc) movdqa %xmm13, HashKey(%rsp) mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly) and $-16, %r13 - mov %r13, %r12 + mov %r13, %r15 # Encrypt first few blocks - and $(3<<4), %r12 + and $(3<<4), %r15 jz _initial_num_blocks_is_0_encrypt - cmp $(2<<4), %r12 + cmp $(2<<4), %r15 jb _initial_num_blocks_is_1_encrypt je _initial_num_blocks_is_2_encrypt _initial_num_blocks_is_3_encrypt: @@ -1635,14 +1636,14 @@ _zero_cipher_left_encrypt: sub $16, %r11 add %r13, %r11 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks - lea SHIFT_MASK+16(%rip), %r12 - sub %r13, %r12 + lea SHIFT_MASK+16(%rip), %r15 + sub %r13, %r15 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes # (%r13 is the number of bytes in plaintext mod 16) - movdqu (%r12), %xmm2 # get the appropriate shuffle mask + movdqu (%r15), %xmm2 # get the appropriate shuffle mask PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn) - movdqu ALL_F-SHIFT_MASK(%r12), %xmm1 + movdqu ALL_F-SHIFT_MASK(%r15), %xmm1 # get the appropriate mask to mask out top 16-r13 bytes of xmm0 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0 movdqa SHUF_MASK(%rip), %xmm10 @@ -1675,9 +1676,9 @@ _less_than_8_bytes_left_encrypt: sub $1, %r13 jne _less_than_8_bytes_left_encrypt _multiple_of_16_bytes_encrypt: - mov arg8, %r12 # %r12 = addLen (number of bytes) - shl $3, %r12 - movd %r12d, %xmm15 # len(A) in %xmm15 + mov arg8, %r15 # %r15 = addLen (number of bytes) + shl $3, %r15 + movd %r15d, %xmm15 # len(A) in %xmm15 shl $3, %arg4 # len(C) in bits (*128) MOVQ_R64_XMM %arg4, %xmm1 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000 @@ -1716,8 +1717,8 @@ _return_T_done_encrypt: mov %r14, %rsp pop %r14 pop %r13 - pop %r12 - ret + pop %r15 + pax_ret aesni_gcm_enc ENDPROC(aesni_gcm_enc) #endif @@ -1734,7 +1735,7 @@ _key_expansion_256a: pxor %xmm1, %xmm0 movaps %xmm0, (TKEYP) add $0x10, TKEYP - ret + pax_ret _key_expansion_128 ENDPROC(_key_expansion_128) ENDPROC(_key_expansion_256a) @@ -1760,7 +1761,7 @@ _key_expansion_192a: shufps $0b01001110, %xmm2, %xmm1 movaps %xmm1, 0x10(TKEYP) add $0x20, TKEYP - ret + pax_ret _key_expansion_192a ENDPROC(_key_expansion_192a) .align 4 @@ -1780,7 +1781,7 @@ _key_expansion_192b: movaps %xmm0, (TKEYP) add $0x10, TKEYP - ret + pax_ret _key_expansion_192b ENDPROC(_key_expansion_192b) .align 4 @@ -1793,7 +1794,7 @@ _key_expansion_256b: pxor %xmm1, %xmm2 movaps %xmm2, (TKEYP) add $0x10, TKEYP - ret + pax_ret _key_expansion_256b ENDPROC(_key_expansion_256b) /* @@ -1820,72 +1821,72 @@ ENTRY(aesni_set_key) movaps %xmm2, (TKEYP) add $0x10, TKEYP AESKEYGENASSIST 0x1 %xmm2 %xmm1 # round 1 - call _key_expansion_256a + pax_direct_call _key_expansion_256a AESKEYGENASSIST 0x1 %xmm0 %xmm1 - call _key_expansion_256b + pax_direct_call _key_expansion_256b AESKEYGENASSIST 0x2 %xmm2 %xmm1 # round 2 - call _key_expansion_256a + pax_direct_call _key_expansion_256a AESKEYGENASSIST 0x2 %xmm0 %xmm1 - call _key_expansion_256b + pax_direct_call _key_expansion_256b AESKEYGENASSIST 0x4 %xmm2 %xmm1 # round 3 - call _key_expansion_256a + pax_direct_call _key_expansion_256a AESKEYGENASSIST 0x4 %xmm0 %xmm1 - call _key_expansion_256b + pax_direct_call _key_expansion_256b AESKEYGENASSIST 0x8 %xmm2 %xmm1 # round 4 - call _key_expansion_256a + pax_direct_call _key_expansion_256a AESKEYGENASSIST 0x8 %xmm0 %xmm1 - call _key_expansion_256b + pax_direct_call _key_expansion_256b AESKEYGENASSIST 0x10 %xmm2 %xmm1 # round 5 - call _key_expansion_256a + pax_direct_call _key_expansion_256a AESKEYGENASSIST 0x10 %xmm0 %xmm1 - call _key_expansion_256b + pax_direct_call _key_expansion_256b AESKEYGENASSIST 0x20 %xmm2 %xmm1 # round 6 - call _key_expansion_256a + pax_direct_call _key_expansion_256a AESKEYGENASSIST 0x20 %xmm0 %xmm1 - call _key_expansion_256b + pax_direct_call _key_expansion_256b AESKEYGENASSIST 0x40 %xmm2 %xmm1 # round 7 - call _key_expansion_256a + pax_direct_call _key_expansion_256a jmp .Ldec_key .Lenc_key192: movq 0x10(UKEYP), %xmm2 # other user key AESKEYGENASSIST 0x1 %xmm2 %xmm1 # round 1 - call _key_expansion_192a + pax_direct_call _key_expansion_192a AESKEYGENASSIST 0x2 %xmm2 %xmm1 # round 2 - call _key_expansion_192b + pax_direct_call _key_expansion_192b AESKEYGENASSIST 0x4 %xmm2 %xmm1 # round 3 - call _key_expansion_192a + pax_direct_call _key_expansion_192a AESKEYGENASSIST 0x8 %xmm2 %xmm1 # round 4 - call _key_expansion_192b + pax_direct_call _key_expansion_192b AESKEYGENASSIST 0x10 %xmm2 %xmm1 # round 5 - call _key_expansion_192a + pax_direct_call _key_expansion_192a AESKEYGENASSIST 0x20 %xmm2 %xmm1 # round 6 - call _key_expansion_192b + pax_direct_call _key_expansion_192b AESKEYGENASSIST 0x40 %xmm2 %xmm1 # round 7 - call _key_expansion_192a + pax_direct_call _key_expansion_192a AESKEYGENASSIST 0x80 %xmm2 %xmm1 # round 8 - call _key_expansion_192b + pax_direct_call _key_expansion_192b jmp .Ldec_key .Lenc_key128: AESKEYGENASSIST 0x1 %xmm0 %xmm1 # round 1 - call _key_expansion_128 + pax_direct_call _key_expansion_128 AESKEYGENASSIST 0x2 %xmm0 %xmm1 # round 2 - call _key_expansion_128 + pax_direct_call _key_expansion_128 AESKEYGENASSIST 0x4 %xmm0 %xmm1 # round 3 - call _key_expansion_128 + pax_direct_call _key_expansion_128 AESKEYGENASSIST 0x8 %xmm0 %xmm1 # round 4 - call _key_expansion_128 + pax_direct_call _key_expansion_128 AESKEYGENASSIST 0x10 %xmm0 %xmm1 # round 5 - call _key_expansion_128 + pax_direct_call _key_expansion_128 AESKEYGENASSIST 0x20 %xmm0 %xmm1 # round 6 - call _key_expansion_128 + pax_direct_call _key_expansion_128 AESKEYGENASSIST 0x40 %xmm0 %xmm1 # round 7 - call _key_expansion_128 + pax_direct_call _key_expansion_128 AESKEYGENASSIST 0x80 %xmm0 %xmm1 # round 8 - call _key_expansion_128 + pax_direct_call _key_expansion_128 AESKEYGENASSIST 0x1b %xmm0 %xmm1 # round 9 - call _key_expansion_128 + pax_direct_call _key_expansion_128 AESKEYGENASSIST 0x36 %xmm0 %xmm1 # round 10 - call _key_expansion_128 + pax_direct_call _key_expansion_128 .Ldec_key: sub $0x10, TKEYP movaps (KEYP), %xmm0 @@ -1908,13 +1909,13 @@ ENTRY(aesni_set_key) popl KEYP #endif FRAME_END - ret + pax_ret aesni_set_key ENDPROC(aesni_set_key) /* * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) */ -ENTRY(aesni_enc) +RAP_ENTRY(aesni_enc) FRAME_BEGIN #ifndef __x86_64__ pushl KEYP @@ -1925,14 +1926,14 @@ ENTRY(aesni_enc) #endif movl 480(KEYP), KLEN # key length movups (INP), STATE # input - call _aesni_enc1 + pax_direct_call _aesni_enc1 movups STATE, (OUTP) # output #ifndef __x86_64__ popl KLEN popl KEYP #endif FRAME_END - ret + pax_ret aesni_enc ENDPROC(aesni_enc) /* @@ -1990,7 +1991,7 @@ _aesni_enc1: AESENC KEY STATE movaps 0x70(TKEYP), KEY AESENCLAST KEY STATE - ret + pax_ret _aesni_enc1 ENDPROC(_aesni_enc1) /* @@ -2099,13 +2100,13 @@ _aesni_enc4: AESENCLAST KEY STATE2 AESENCLAST KEY STATE3 AESENCLAST KEY STATE4 - ret + pax_ret _aesni_enc4 ENDPROC(_aesni_enc4) /* * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) */ -ENTRY(aesni_dec) +RAP_ENTRY(aesni_dec) FRAME_BEGIN #ifndef __x86_64__ pushl KEYP @@ -2117,14 +2118,14 @@ ENTRY(aesni_dec) mov 480(KEYP), KLEN # key length add $240, KEYP movups (INP), STATE # input - call _aesni_dec1 + pax_direct_call _aesni_dec1 movups STATE, (OUTP) #output #ifndef __x86_64__ popl KLEN popl KEYP #endif FRAME_END - ret + pax_ret aesni_dec ENDPROC(aesni_dec) /* @@ -2182,7 +2183,7 @@ _aesni_dec1: AESDEC KEY STATE movaps 0x70(TKEYP), KEY AESDECLAST KEY STATE - ret + pax_ret _aesni_dec1 ENDPROC(_aesni_dec1) /* @@ -2291,7 +2292,7 @@ _aesni_dec4: AESDECLAST KEY STATE2 AESDECLAST KEY STATE3 AESDECLAST KEY STATE4 - ret + pax_ret _aesni_dec4 ENDPROC(_aesni_dec4) /* @@ -2322,7 +2323,7 @@ ENTRY(aesni_ecb_enc) movups 0x10(INP), STATE2 movups 0x20(INP), STATE3 movups 0x30(INP), STATE4 - call _aesni_enc4 + pax_direct_call _aesni_enc4 movups STATE1, (OUTP) movups STATE2, 0x10(OUTP) movups STATE3, 0x20(OUTP) @@ -2337,7 +2338,7 @@ ENTRY(aesni_ecb_enc) .align 4 .Lecb_enc_loop1: movups (INP), STATE1 - call _aesni_enc1 + pax_direct_call _aesni_enc1 movups STATE1, (OUTP) sub $16, LEN add $16, INP @@ -2351,7 +2352,7 @@ ENTRY(aesni_ecb_enc) popl LEN #endif FRAME_END - ret + pax_ret aesni_ecb_enc ENDPROC(aesni_ecb_enc) /* @@ -2383,7 +2384,7 @@ ENTRY(aesni_ecb_dec) movups 0x10(INP), STATE2 movups 0x20(INP), STATE3 movups 0x30(INP), STATE4 - call _aesni_dec4 + pax_direct_call _aesni_dec4 movups STATE1, (OUTP) movups STATE2, 0x10(OUTP) movups STATE3, 0x20(OUTP) @@ -2398,7 +2399,7 @@ ENTRY(aesni_ecb_dec) .align 4 .Lecb_dec_loop1: movups (INP), STATE1 - call _aesni_dec1 + pax_direct_call _aesni_dec1 movups STATE1, (OUTP) sub $16, LEN add $16, INP @@ -2412,7 +2413,7 @@ ENTRY(aesni_ecb_dec) popl LEN #endif FRAME_END - ret + pax_ret aesni_ecb_dec ENDPROC(aesni_ecb_dec) /* @@ -2440,7 +2441,7 @@ ENTRY(aesni_cbc_enc) .Lcbc_enc_loop: movups (INP), IN # load input pxor IN, STATE - call _aesni_enc1 + pax_direct_call _aesni_enc1 movups STATE, (OUTP) # store output sub $16, LEN add $16, INP @@ -2456,7 +2457,7 @@ ENTRY(aesni_cbc_enc) popl IVP #endif FRAME_END - ret + pax_ret aesni_cbc_enc ENDPROC(aesni_cbc_enc) /* @@ -2500,7 +2501,7 @@ ENTRY(aesni_cbc_dec) movups 0x30(INP), IN2 movaps IN2, STATE4 #endif - call _aesni_dec4 + pax_direct_call _aesni_dec4 pxor IV, STATE1 #ifdef __x86_64__ pxor IN1, STATE2 @@ -2530,7 +2531,7 @@ ENTRY(aesni_cbc_dec) .Lcbc_dec_loop1: movups (INP), IN movaps IN, STATE - call _aesni_dec1 + pax_direct_call _aesni_dec1 pxor IV, STATE movups STATE, (OUTP) movaps IN, IV @@ -2549,7 +2550,7 @@ ENTRY(aesni_cbc_dec) popl IVP #endif FRAME_END - ret + pax_ret aesni_cbc_dec ENDPROC(aesni_cbc_dec) #ifdef __x86_64__ @@ -2578,7 +2579,7 @@ _aesni_inc_init: mov $1, TCTR_LOW MOVQ_R64_XMM TCTR_LOW INC MOVQ_R64_XMM CTR TCTR_LOW - ret + pax_ret _aesni_inc_init ENDPROC(_aesni_inc_init) /* @@ -2607,37 +2608,37 @@ _aesni_inc: .Linc_low: movaps CTR, IV PSHUFB_XMM BSWAP_MASK IV - ret + pax_ret _aesni_inc ENDPROC(_aesni_inc) /* * void aesni_ctr_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, * size_t len, u8 *iv) */ -ENTRY(aesni_ctr_enc) +RAP_ENTRY(aesni_ctr_enc) FRAME_BEGIN cmp $16, LEN jb .Lctr_enc_just_ret mov 480(KEYP), KLEN movups (IVP), IV - call _aesni_inc_init + pax_direct_call _aesni_inc_init cmp $64, LEN jb .Lctr_enc_loop1 .align 4 .Lctr_enc_loop4: movaps IV, STATE1 - call _aesni_inc + pax_direct_call _aesni_inc movups (INP), IN1 movaps IV, STATE2 - call _aesni_inc + pax_direct_call _aesni_inc movups 0x10(INP), IN2 movaps IV, STATE3 - call _aesni_inc + pax_direct_call _aesni_inc movups 0x20(INP), IN3 movaps IV, STATE4 - call _aesni_inc + pax_direct_call _aesni_inc movups 0x30(INP), IN4 - call _aesni_enc4 + pax_direct_call _aesni_enc4 pxor IN1, STATE1 movups STATE1, (OUTP) pxor IN2, STATE2 @@ -2656,9 +2657,9 @@ ENTRY(aesni_ctr_enc) .align 4 .Lctr_enc_loop1: movaps IV, STATE - call _aesni_inc + pax_direct_call _aesni_inc movups (INP), IN - call _aesni_enc1 + pax_direct_call _aesni_enc1 pxor IN, STATE movups STATE, (OUTP) sub $16, LEN @@ -2670,7 +2671,7 @@ ENTRY(aesni_ctr_enc) movups IV, (IVP) .Lctr_enc_just_ret: FRAME_END - ret + pax_ret aesni_ctr_enc ENDPROC(aesni_ctr_enc) /* @@ -2734,7 +2735,7 @@ ENTRY(aesni_xts_crypt8) pxor INC, STATE4 movdqu IV, 0x30(OUTP) - call *%r11 + pax_indirect_call "%r11", _aesni_enc4 movdqu 0x00(OUTP), INC pxor INC, STATE1 @@ -2779,7 +2780,7 @@ ENTRY(aesni_xts_crypt8) _aesni_gf128mul_x_ble() movups IV, (IVP) - call *%r11 + pax_indirect_call "%r11", _aesni_enc4 movdqu 0x40(OUTP), INC pxor INC, STATE1 @@ -2798,7 +2799,7 @@ ENTRY(aesni_xts_crypt8) movdqu STATE4, 0x70(OUTP) FRAME_END - ret + pax_ret aesni_xts_crypt8 ENDPROC(aesni_xts_crypt8) #endif diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S index 522ab68d1..782ae42a2 100644 --- a/arch/x86/crypto/aesni-intel_avx-x86_64.S +++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S @@ -121,6 +121,7 @@ #include #include +#include .data .align 16 @@ -1486,7 +1487,7 @@ ENTRY(aesni_gcm_precomp_avx_gen2) pop %r14 pop %r13 pop %r12 - ret + pax_ret aesni_gcm_precomp_avx_gen2 ENDPROC(aesni_gcm_precomp_avx_gen2) ############################################################################### @@ -1507,7 +1508,7 @@ ENDPROC(aesni_gcm_precomp_avx_gen2) ############################################################################### ENTRY(aesni_gcm_enc_avx_gen2) GCM_ENC_DEC_AVX ENC - ret + pax_ret aesni_gcm_enc_avx_gen2 ENDPROC(aesni_gcm_enc_avx_gen2) ############################################################################### @@ -1528,7 +1529,7 @@ ENDPROC(aesni_gcm_enc_avx_gen2) ############################################################################### ENTRY(aesni_gcm_dec_avx_gen2) GCM_ENC_DEC_AVX DEC - ret + pax_ret aesni_gcm_dec_avx_gen2 ENDPROC(aesni_gcm_dec_avx_gen2) #endif /* CONFIG_AS_AVX */ @@ -2762,7 +2763,7 @@ ENTRY(aesni_gcm_precomp_avx_gen4) pop %r14 pop %r13 pop %r12 - ret + pax_ret aesni_gcm_precomp_avx_gen4 ENDPROC(aesni_gcm_precomp_avx_gen4) @@ -2784,7 +2785,7 @@ ENDPROC(aesni_gcm_precomp_avx_gen4) ############################################################################### ENTRY(aesni_gcm_enc_avx_gen4) GCM_ENC_DEC_AVX2 ENC - ret + pax_ret aesni_gcm_enc_avx_gen4 ENDPROC(aesni_gcm_enc_avx_gen4) ############################################################################### @@ -2805,7 +2806,7 @@ ENDPROC(aesni_gcm_enc_avx_gen4) ############################################################################### ENTRY(aesni_gcm_dec_avx_gen4) GCM_ENC_DEC_AVX2 DEC - ret + pax_ret aesni_gcm_dec_avx_gen4 ENDPROC(aesni_gcm_dec_avx_gen4) #endif /* CONFIG_AS_AVX2 */ diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index aa8b0672f..f9da224d0 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -71,9 +71,9 @@ struct aesni_xts_ctx { asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len); -asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out, +asmlinkage void aesni_enc(void *ctx, u8 *out, const u8 *in); -asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out, +asmlinkage void aesni_dec(void *ctx, u8 *out, const u8 *in); asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len); @@ -83,6 +83,15 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv); asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv); +int _key_expansion_128(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len) __rap_hash; +int _key_expansion_192a(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len) __rap_hash; +int _key_expansion_192b(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len) __rap_hash; +int _key_expansion_256a(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len) __rap_hash; +int _key_expansion_256b(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len) __rap_hash; +void _aesni_enc1(void *ctx, u8 *out, const u8 *in) __rap_hash; +void _aesni_enc4(void *ctx, u8 *out, const u8 *in) __rap_hash; +void _aesni_dec1(void *ctx, u8 *out, const u8 *in) __rap_hash; +void _aesni_dec4(void *ctx, u8 *out, const u8 *in) __rap_hash; int crypto_fpu_init(void); void crypto_fpu_exit(void); @@ -96,6 +105,8 @@ static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv); asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv); +void _aesni_inc(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv) __rap_hash; +void _aesni_inc_init(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv) __rap_hash; asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, bool enc, u8 *iv); diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S index 246c67006..d4e1aa5df 100644 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S @@ -21,6 +21,7 @@ */ #include +#include .file "blowfish-x86_64-asm.S" .text @@ -149,13 +150,13 @@ ENTRY(__blowfish_enc_blk) jnz .L__enc_xor; write_block(); - ret; + pax_ret __blowfish_enc_blk; .L__enc_xor: xor_block(); - ret; + pax_ret __blowfish_enc_blk; ENDPROC(__blowfish_enc_blk) -ENTRY(blowfish_dec_blk) +RAP_ENTRY(blowfish_dec_blk) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -183,7 +184,7 @@ ENTRY(blowfish_dec_blk) movq %r11, %rbp; - ret; + pax_ret blowfish_dec_blk; ENDPROC(blowfish_dec_blk) /********************************************************************** @@ -334,17 +335,17 @@ ENTRY(__blowfish_enc_blk_4way) popq %rbx; popq %rbp; - ret; + pax_ret __blowfish_enc_blk_4way; .L__enc_xor4: xor_block4(); popq %rbx; popq %rbp; - ret; + pax_ret __blowfish_enc_blk_4way; ENDPROC(__blowfish_enc_blk_4way) -ENTRY(blowfish_dec_blk_4way) +RAP_ENTRY(blowfish_dec_blk_4way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -375,5 +376,5 @@ ENTRY(blowfish_dec_blk_4way) popq %rbx; popq %rbp; - ret; + pax_ret blowfish_dec_blk_4way; ENDPROC(blowfish_dec_blk_4way) diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S index aa9e8bd16..7e68f75ff 100644 --- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S @@ -17,6 +17,7 @@ #include #include +#include #define CAMELLIA_TABLE_BYTE_LEN 272 @@ -192,7 +193,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd: roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rcx, (%r9)); - ret; + pax_ret roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd; ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) .align 8 @@ -200,7 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab: roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3, %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11, %rax, (%r9)); - ret; + pax_ret roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab; ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) /* @@ -212,7 +213,7 @@ ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) #define two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i, dir, store_ab) \ leaq (key_table + (i) * 8)(CTX), %r9; \ - call roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd; \ + pax_direct_call roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd; \ \ vmovdqu x4, 0 * 16(mem_cd); \ vmovdqu x5, 1 * 16(mem_cd); \ @@ -224,7 +225,7 @@ ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) vmovdqu x3, 7 * 16(mem_cd); \ \ leaq (key_table + ((i) + (dir)) * 8)(CTX), %r9; \ - call roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab; \ + pax_direct_call roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab; \ \ store_ab(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab); @@ -783,7 +784,7 @@ __camellia_enc_blk16: %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax)); FRAME_END - ret; + pax_ret camellia_xts_enc_16way; .align 8 .Lenc_max32: @@ -870,7 +871,7 @@ __camellia_dec_blk16: %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax)); FRAME_END - ret; + pax_ret camellia_xts_dec_16way; .align 8 .Ldec_max32: @@ -889,7 +890,7 @@ __camellia_dec_blk16: jmp .Ldec_max24; ENDPROC(__camellia_dec_blk16) -ENTRY(camellia_ecb_enc_16way) +RAP_ENTRY(camellia_ecb_enc_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) @@ -904,17 +905,17 @@ ENTRY(camellia_ecb_enc_16way) /* now dst can be used as temporary buffer (even in src == dst case) */ movq %rsi, %rax; - call __camellia_enc_blk16; + pax_direct_call __camellia_enc_blk16; write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, %xmm8, %rsi); FRAME_END - ret; + pax_ret camellia_ecb_enc_16way; ENDPROC(camellia_ecb_enc_16way) -ENTRY(camellia_ecb_dec_16way) +RAP_ENTRY(camellia_ecb_dec_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) @@ -934,17 +935,17 @@ ENTRY(camellia_ecb_dec_16way) /* now dst can be used as temporary buffer (even in src == dst case) */ movq %rsi, %rax; - call __camellia_dec_blk16; + pax_direct_call __camellia_dec_blk16; write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, %xmm8, %rsi); FRAME_END - ret; + pax_ret camellia_ecb_dec_16way; ENDPROC(camellia_ecb_dec_16way) -ENTRY(camellia_cbc_dec_16way) +RAP_ENTRY(camellia_cbc_dec_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) @@ -968,7 +969,7 @@ ENTRY(camellia_cbc_dec_16way) subq $(16 * 16), %rsp; movq %rsp, %rax; - call __camellia_dec_blk16; + pax_direct_call __camellia_dec_blk16; addq $(16 * 16), %rsp; @@ -992,7 +993,7 @@ ENTRY(camellia_cbc_dec_16way) %xmm8, %rsi); FRAME_END - ret; + pax_ret camellia_cbc_dec_16way; ENDPROC(camellia_cbc_dec_16way) #define inc_le128(x, minus_one, tmp) \ @@ -1001,7 +1002,7 @@ ENDPROC(camellia_cbc_dec_16way) vpslldq $8, tmp, tmp; \ vpsubq tmp, x, x; -ENTRY(camellia_ctr_16way) +RAP_ENTRY(camellia_ctr_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) @@ -1080,7 +1081,7 @@ ENTRY(camellia_ctr_16way) vpxor 14 * 16(%rax), %xmm15, %xmm14; vpxor 15 * 16(%rax), %xmm15, %xmm15; - call __camellia_enc_blk16; + pax_direct_call __camellia_enc_blk16; addq $(16 * 16), %rsp; @@ -1105,7 +1106,7 @@ ENTRY(camellia_ctr_16way) %xmm8, %rsi); FRAME_END - ret; + pax_ret camellia_ctr_16way; ENDPROC(camellia_ctr_16way) #define gf128mul_x_ble(iv, mask, tmp) \ @@ -1224,7 +1225,7 @@ camellia_xts_crypt_16way: vpxor 14 * 16(%rax), %xmm15, %xmm14; vpxor 15 * 16(%rax), %xmm15, %xmm15; - call *%r9; + pax_indirect_call "%r9", camellia_xts_enc_16way; addq $(16 * 16), %rsp; @@ -1249,10 +1250,10 @@ camellia_xts_crypt_16way: %xmm8, %rsi); FRAME_END - ret; + pax_ret camellia_xts_crypt_16way; ENDPROC(camellia_xts_crypt_16way) -ENTRY(camellia_xts_enc_16way) +RAP_ENTRY(camellia_xts_enc_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) @@ -1266,7 +1267,7 @@ ENTRY(camellia_xts_enc_16way) jmp camellia_xts_crypt_16way; ENDPROC(camellia_xts_enc_16way) -ENTRY(camellia_xts_dec_16way) +RAP_ENTRY(camellia_xts_dec_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S index 16186c186..a75145225 100644 --- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S @@ -12,6 +12,7 @@ #include #include +#include #define CAMELLIA_TABLE_BYTE_LEN 272 @@ -231,7 +232,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd: roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rcx, (%r9)); - ret; + pax_ret roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd; ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) .align 8 @@ -239,7 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab: roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3, %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11, %rax, (%r9)); - ret; + pax_ret roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab; ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) /* @@ -251,7 +252,7 @@ ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) #define two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i, dir, store_ab) \ leaq (key_table + (i) * 8)(CTX), %r9; \ - call roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd; \ + pax_direct_call roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd; \ \ vmovdqu x0, 4 * 32(mem_cd); \ vmovdqu x1, 5 * 32(mem_cd); \ @@ -263,7 +264,7 @@ ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) vmovdqu x7, 3 * 32(mem_cd); \ \ leaq (key_table + ((i) + (dir)) * 8)(CTX), %r9; \ - call roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab; \ + pax_direct_call roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab; \ \ store_ab(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab); @@ -823,7 +824,7 @@ __camellia_enc_blk32: %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax)); FRAME_END - ret; + pax_ret __camellia_enc_blk32; .align 8 .Lenc_max32: @@ -910,7 +911,7 @@ __camellia_dec_blk32: %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax)); FRAME_END - ret; + pax_ret __camellia_dec_blk32; .align 8 .Ldec_max32: @@ -929,7 +930,7 @@ __camellia_dec_blk32: jmp .Ldec_max24; ENDPROC(__camellia_dec_blk32) -ENTRY(camellia_ecb_enc_32way) +RAP_ENTRY(camellia_ecb_enc_32way) /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) @@ -946,7 +947,7 @@ ENTRY(camellia_ecb_enc_32way) /* now dst can be used as temporary buffer (even in src == dst case) */ movq %rsi, %rax; - call __camellia_enc_blk32; + pax_direct_call __camellia_enc_blk32; write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0, %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9, @@ -955,10 +956,10 @@ ENTRY(camellia_ecb_enc_32way) vzeroupper; FRAME_END - ret; + pax_ret camellia_ecb_enc_32way; ENDPROC(camellia_ecb_enc_32way) -ENTRY(camellia_ecb_dec_32way) +RAP_ENTRY(camellia_ecb_dec_32way) /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) @@ -980,7 +981,7 @@ ENTRY(camellia_ecb_dec_32way) /* now dst can be used as temporary buffer (even in src == dst case) */ movq %rsi, %rax; - call __camellia_dec_blk32; + pax_direct_call __camellia_dec_blk32; write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0, %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9, @@ -989,10 +990,10 @@ ENTRY(camellia_ecb_dec_32way) vzeroupper; FRAME_END - ret; + pax_ret camellia_ecb_dec_32way; ENDPROC(camellia_ecb_dec_32way) -ENTRY(camellia_cbc_dec_32way) +RAP_ENTRY(camellia_cbc_dec_32way) /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) @@ -1028,7 +1029,7 @@ ENTRY(camellia_cbc_dec_32way) movq %rsp, %rax; .Lcbc_dec_continue: - call __camellia_dec_blk32; + pax_direct_call __camellia_dec_blk32; vmovdqu %ymm7, (%rax); vpxor %ymm7, %ymm7, %ymm7; @@ -1057,7 +1058,7 @@ ENTRY(camellia_cbc_dec_32way) vzeroupper; FRAME_END - ret; + pax_ret camellia_cbc_dec_32way; ENDPROC(camellia_cbc_dec_32way) #define inc_le128(x, minus_one, tmp) \ @@ -1074,7 +1075,7 @@ ENDPROC(camellia_cbc_dec_32way) vpslldq $8, tmp1, tmp1; \ vpsubq tmp1, x, x; -ENTRY(camellia_ctr_32way) +RAP_ENTRY(camellia_ctr_32way) /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) @@ -1170,7 +1171,7 @@ ENTRY(camellia_ctr_32way) vpxor 14 * 32(%rax), %ymm15, %ymm14; vpxor 15 * 32(%rax), %ymm15, %ymm15; - call __camellia_enc_blk32; + pax_direct_call __camellia_enc_blk32; movq %r10, %rsp; @@ -1197,7 +1198,7 @@ ENTRY(camellia_ctr_32way) vzeroupper; FRAME_END - ret; + pax_ret camellia_ctr_32way; ENDPROC(camellia_ctr_32way) #define gf128mul_x_ble(iv, mask, tmp) \ @@ -1337,7 +1338,7 @@ camellia_xts_crypt_32way: vpxor 14 * 32(%rax), %ymm15, %ymm14; vpxor 15 * 32(%rax), %ymm15, %ymm15; - call *%r9; + pax_indirect_call "%r9", __camellia_enc_blk32; addq $(16 * 32), %rsp; @@ -1364,10 +1365,10 @@ camellia_xts_crypt_32way: vzeroupper; FRAME_END - ret; + pax_ret camellia_xts_crypt_32way; ENDPROC(camellia_xts_crypt_32way) -ENTRY(camellia_xts_enc_32way) +RAP_ENTRY(camellia_xts_enc_32way) /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) @@ -1382,7 +1383,7 @@ ENTRY(camellia_xts_enc_32way) jmp camellia_xts_crypt_32way; ENDPROC(camellia_xts_enc_32way) -ENTRY(camellia_xts_dec_32way) +RAP_ENTRY(camellia_xts_dec_32way) /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S index 310319c60..4fa639ab5 100644 --- a/arch/x86/crypto/camellia-x86_64-asm_64.S +++ b/arch/x86/crypto/camellia-x86_64-asm_64.S @@ -21,6 +21,7 @@ */ #include +#include .file "camellia-x86_64-asm_64.S" .text @@ -228,16 +229,16 @@ ENTRY(__camellia_enc_blk) enc_outunpack(mov, RT1); movq RRBP, %rbp; - ret; + pax_ret __camellia_enc_blk; .L__enc_xor: enc_outunpack(xor, RT1); movq RRBP, %rbp; - ret; + pax_ret __camellia_enc_blk; ENDPROC(__camellia_enc_blk) -ENTRY(camellia_dec_blk) +RAP_ENTRY(camellia_dec_blk) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -272,7 +273,7 @@ ENTRY(camellia_dec_blk) dec_outunpack(); movq RRBP, %rbp; - ret; + pax_ret camellia_dec_blk; ENDPROC(camellia_dec_blk) /********************************************************************** @@ -463,17 +464,17 @@ ENTRY(__camellia_enc_blk_2way) movq RRBP, %rbp; popq %rbx; - ret; + pax_ret __camellia_enc_blk_2way; .L__enc2_xor: enc_outunpack2(xor, RT2); movq RRBP, %rbp; popq %rbx; - ret; + pax_ret __camellia_enc_blk_2way; ENDPROC(__camellia_enc_blk_2way) -ENTRY(camellia_dec_blk_2way) +RAP_ENTRY(camellia_dec_blk_2way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -510,5 +511,5 @@ ENTRY(camellia_dec_blk_2way) movq RRBP, %rbp; movq RXOR, %rbx; - ret; + pax_ret camellia_dec_blk_2way; ENDPROC(camellia_dec_blk_2way) diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c index 60907c139..3fc99c431 100644 --- a/arch/x86/crypto/camellia_aesni_avx2_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c @@ -27,20 +27,22 @@ #define CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS 32 /* 32-way AVX2/AES-NI parallel cipher functions */ -asmlinkage void camellia_ecb_enc_32way(struct camellia_ctx *ctx, u8 *dst, +asmlinkage void camellia_ecb_enc_32way(void *ctx, u8 *dst, const u8 *src); -asmlinkage void camellia_ecb_dec_32way(struct camellia_ctx *ctx, u8 *dst, +asmlinkage void camellia_ecb_dec_32way(void *ctx, u8 *dst, const u8 *src); +void __camellia_enc_blk32(void *ctx, u8 *dst, const u8 *src) __rap_hash; +void __camellia_dec_blk32(void *ctx, u8 *dst, const u8 *src) __rap_hash; -asmlinkage void camellia_cbc_dec_32way(struct camellia_ctx *ctx, u8 *dst, +asmlinkage void camellia_cbc_dec_32way(void *ctx, u8 *dst, const u8 *src); -asmlinkage void camellia_ctr_32way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void camellia_ctr_32way(void *ctx, u128 *dst, + const u128 *src, le128 *iv); -asmlinkage void camellia_xts_enc_32way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); -asmlinkage void camellia_xts_dec_32way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void camellia_xts_enc_32way(void *ctx, u128 *dst, + const u128 *src, le128 *iv); +asmlinkage void camellia_xts_dec_32way(void *ctx, u128 *dst, + const u128 *src, le128 *iv); static const struct common_glue_ctx camellia_enc = { .num_funcs = 4, diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c index d96429da8..18ab2e6cd 100644 --- a/arch/x86/crypto/camellia_aesni_avx_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx_glue.c @@ -26,28 +26,28 @@ #define CAMELLIA_AESNI_PARALLEL_BLOCKS 16 /* 16-way parallel cipher functions (avx/aes-ni) */ -asmlinkage void camellia_ecb_enc_16way(struct camellia_ctx *ctx, u8 *dst, +asmlinkage void camellia_ecb_enc_16way(void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(camellia_ecb_enc_16way); -asmlinkage void camellia_ecb_dec_16way(struct camellia_ctx *ctx, u8 *dst, +asmlinkage void camellia_ecb_dec_16way(void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(camellia_ecb_dec_16way); -asmlinkage void camellia_cbc_dec_16way(struct camellia_ctx *ctx, u8 *dst, +asmlinkage void camellia_cbc_dec_16way(void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(camellia_cbc_dec_16way); -asmlinkage void camellia_ctr_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void camellia_ctr_16way(void *ctx, u128 *dst, + const u128 *src, le128 *iv); EXPORT_SYMBOL_GPL(camellia_ctr_16way); -asmlinkage void camellia_xts_enc_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void camellia_xts_enc_16way(void *ctx, u128 *dst, + const u128 *src, le128 *iv); EXPORT_SYMBOL_GPL(camellia_xts_enc_16way); -asmlinkage void camellia_xts_dec_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void camellia_xts_dec_16way(void *ctx, u128 *dst, + const u128 *src, le128 *iv); EXPORT_SYMBOL_GPL(camellia_xts_dec_16way); void camellia_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c index aa76cad9d..ffd88081b 100644 --- a/arch/x86/crypto/camellia_glue.c +++ b/arch/x86/crypto/camellia_glue.c @@ -39,7 +39,7 @@ asmlinkage void __camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst, const u8 *src, bool xor); EXPORT_SYMBOL_GPL(__camellia_enc_blk); -asmlinkage void camellia_dec_blk(struct camellia_ctx *ctx, u8 *dst, +asmlinkage void camellia_dec_blk(void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(camellia_dec_blk); @@ -47,7 +47,7 @@ EXPORT_SYMBOL_GPL(camellia_dec_blk); asmlinkage void __camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst, const u8 *src, bool xor); EXPORT_SYMBOL_GPL(__camellia_enc_blk_2way); -asmlinkage void camellia_dec_blk_2way(struct camellia_ctx *ctx, u8 *dst, +asmlinkage void camellia_dec_blk_2way(void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(camellia_dec_blk_2way); @@ -1279,8 +1279,10 @@ static int camellia_setkey(struct crypto_tfm *tfm, const u8 *in_key, &tfm->crt_flags); } -void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src) +void camellia_decrypt_cbc_2way(void *ctx, u8 *_dst, const u8 *_src) { + u128 *dst = (u128 *)_dst; + u128 *src = (u128 *)_src; u128 iv = *src; camellia_dec_blk_2way(ctx, (u8 *)dst, (u8 *)src); diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S index 14fa1966b..80d99b655 100644 --- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S @@ -25,6 +25,7 @@ #include #include +#include .file "cast5-avx-x86_64-asm_64.S" @@ -282,7 +283,7 @@ __cast5_enc_blk16: outunpack_blocks(RR3, RL3, RTMP, RX, RKM); outunpack_blocks(RR4, RL4, RTMP, RX, RKM); - ret; + pax_ret __cast5_enc_blk16; ENDPROC(__cast5_enc_blk16) .align 16 @@ -353,14 +354,14 @@ __cast5_dec_blk16: outunpack_blocks(RR3, RL3, RTMP, RX, RKM); outunpack_blocks(RR4, RL4, RTMP, RX, RKM); - ret; + pax_ret __cast5_dec_blk16; .L__skip_dec: vpsrldq $4, RKR, RKR; jmp .L__dec_tail; ENDPROC(__cast5_dec_blk16) -ENTRY(cast5_ecb_enc_16way) +RAP_ENTRY(cast5_ecb_enc_16way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -379,7 +380,7 @@ ENTRY(cast5_ecb_enc_16way) vmovdqu (6*4*4)(%rdx), RL4; vmovdqu (7*4*4)(%rdx), RR4; - call __cast5_enc_blk16; + pax_direct_call __cast5_enc_blk16; vmovdqu RR1, (0*4*4)(%r11); vmovdqu RL1, (1*4*4)(%r11); @@ -391,10 +392,10 @@ ENTRY(cast5_ecb_enc_16way) vmovdqu RL4, (7*4*4)(%r11); FRAME_END - ret; + pax_ret cast5_ecb_enc_16way; ENDPROC(cast5_ecb_enc_16way) -ENTRY(cast5_ecb_dec_16way) +RAP_ENTRY(cast5_ecb_dec_16way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -413,7 +414,7 @@ ENTRY(cast5_ecb_dec_16way) vmovdqu (6*4*4)(%rdx), RL4; vmovdqu (7*4*4)(%rdx), RR4; - call __cast5_dec_blk16; + pax_direct_call __cast5_dec_blk16; vmovdqu RR1, (0*4*4)(%r11); vmovdqu RL1, (1*4*4)(%r11); @@ -425,7 +426,7 @@ ENTRY(cast5_ecb_dec_16way) vmovdqu RL4, (7*4*4)(%r11); FRAME_END - ret; + pax_ret cast5_ecb_dec_16way; ENDPROC(cast5_ecb_dec_16way) ENTRY(cast5_cbc_dec_16way) @@ -436,10 +437,10 @@ ENTRY(cast5_cbc_dec_16way) */ FRAME_BEGIN - pushq %r12; + pushq %r14; movq %rsi, %r11; - movq %rdx, %r12; + movq %rdx, %r14; vmovdqu (0*16)(%rdx), RL1; vmovdqu (1*16)(%rdx), RR1; @@ -450,19 +451,19 @@ ENTRY(cast5_cbc_dec_16way) vmovdqu (6*16)(%rdx), RL4; vmovdqu (7*16)(%rdx), RR4; - call __cast5_dec_blk16; + pax_direct_call __cast5_dec_blk16; /* xor with src */ - vmovq (%r12), RX; + vmovq (%r14), RX; vpshufd $0x4f, RX, RX; vpxor RX, RR1, RR1; - vpxor 0*16+8(%r12), RL1, RL1; - vpxor 1*16+8(%r12), RR2, RR2; - vpxor 2*16+8(%r12), RL2, RL2; - vpxor 3*16+8(%r12), RR3, RR3; - vpxor 4*16+8(%r12), RL3, RL3; - vpxor 5*16+8(%r12), RR4, RR4; - vpxor 6*16+8(%r12), RL4, RL4; + vpxor 0*16+8(%r14), RL1, RL1; + vpxor 1*16+8(%r14), RR2, RR2; + vpxor 2*16+8(%r14), RL2, RL2; + vpxor 3*16+8(%r14), RR3, RR3; + vpxor 4*16+8(%r14), RL3, RL3; + vpxor 5*16+8(%r14), RR4, RR4; + vpxor 6*16+8(%r14), RL4, RL4; vmovdqu RR1, (0*16)(%r11); vmovdqu RL1, (1*16)(%r11); @@ -473,10 +474,10 @@ ENTRY(cast5_cbc_dec_16way) vmovdqu RR4, (6*16)(%r11); vmovdqu RL4, (7*16)(%r11); - popq %r12; + popq %r14; FRAME_END - ret; + pax_ret cast5_cbc_dec_16way; ENDPROC(cast5_cbc_dec_16way) ENTRY(cast5_ctr_16way) @@ -488,10 +489,10 @@ ENTRY(cast5_ctr_16way) */ FRAME_BEGIN - pushq %r12; + pushq %r14; movq %rsi, %r11; - movq %rdx, %r12; + movq %rdx, %r14; vpcmpeqd RTMP, RTMP, RTMP; vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */ @@ -528,17 +529,17 @@ ENTRY(cast5_ctr_16way) vpshufb R1ST, RX, RX; /* be: IV16, IV16 */ vmovq RX, (%rcx); - call __cast5_enc_blk16; + pax_direct_call __cast5_enc_blk16; /* dst = src ^ iv */ - vpxor (0*16)(%r12), RR1, RR1; - vpxor (1*16)(%r12), RL1, RL1; - vpxor (2*16)(%r12), RR2, RR2; - vpxor (3*16)(%r12), RL2, RL2; - vpxor (4*16)(%r12), RR3, RR3; - vpxor (5*16)(%r12), RL3, RL3; - vpxor (6*16)(%r12), RR4, RR4; - vpxor (7*16)(%r12), RL4, RL4; + vpxor (0*16)(%r14), RR1, RR1; + vpxor (1*16)(%r14), RL1, RL1; + vpxor (2*16)(%r14), RR2, RR2; + vpxor (3*16)(%r14), RL2, RL2; + vpxor (4*16)(%r14), RR3, RR3; + vpxor (5*16)(%r14), RL3, RL3; + vpxor (6*16)(%r14), RR4, RR4; + vpxor (7*16)(%r14), RL4, RL4; vmovdqu RR1, (0*16)(%r11); vmovdqu RL1, (1*16)(%r11); vmovdqu RR2, (2*16)(%r11); @@ -548,8 +549,8 @@ ENTRY(cast5_ctr_16way) vmovdqu RR4, (6*16)(%r11); vmovdqu RL4, (7*16)(%r11); - popq %r12; + popq %r14; FRAME_END - ret; + pax_ret cast5_ctr_16way; ENDPROC(cast5_ctr_16way) diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c index 8648158f3..b56922a5e 100644 --- a/arch/x86/crypto/cast5_avx_glue.c +++ b/arch/x86/crypto/cast5_avx_glue.c @@ -44,6 +44,8 @@ asmlinkage void cast5_cbc_dec_16way(struct cast5_ctx *ctx, u8 *dst, const u8 *src); asmlinkage void cast5_ctr_16way(struct cast5_ctx *ctx, u8 *dst, const u8 *src, __be64 *iv); +void __cast5_enc_blk16(struct cast5_ctx *ctx, u8 *dst, const u8 *src) __rap_hash; +void __cast5_dec_blk16(struct cast5_ctx *ctx, u8 *dst, const u8 *src) __rap_hash; static inline bool cast5_fpu_begin(bool fpu_enabled, unsigned int nbytes) { diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S index c41938988..7e2ed7ca0 100644 --- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S @@ -25,6 +25,7 @@ #include #include +#include #include "glue_helper-asm-avx.S" .file "cast6-avx-x86_64-asm_64.S" @@ -296,7 +297,7 @@ __cast6_enc_blk8: outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); - ret; + pax_ret __cast6_enc_blk8; ENDPROC(__cast6_enc_blk8) .align 8 @@ -341,10 +342,10 @@ __cast6_dec_blk8: outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); - ret; + pax_ret __cast6_dec_blk8; ENDPROC(__cast6_dec_blk8) -ENTRY(cast6_ecb_enc_8way) +RAP_ENTRY(cast6_ecb_enc_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -356,15 +357,15 @@ ENTRY(cast6_ecb_enc_8way) load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - call __cast6_enc_blk8; + pax_direct_call __cast6_enc_blk8; store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); FRAME_END - ret; + pax_ret cast6_ecb_enc_8way; ENDPROC(cast6_ecb_enc_8way) -ENTRY(cast6_ecb_dec_8way) +RAP_ENTRY(cast6_ecb_dec_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -376,15 +377,15 @@ ENTRY(cast6_ecb_dec_8way) load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - call __cast6_dec_blk8; + pax_direct_call __cast6_dec_blk8; store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); FRAME_END - ret; + pax_ret cast6_ecb_dec_8way; ENDPROC(cast6_ecb_dec_8way) -ENTRY(cast6_cbc_dec_8way) +RAP_ENTRY(cast6_cbc_dec_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -392,24 +393,24 @@ ENTRY(cast6_cbc_dec_8way) */ FRAME_BEGIN - pushq %r12; + pushq %r14; movq %rsi, %r11; - movq %rdx, %r12; + movq %rdx, %r14; load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - call __cast6_dec_blk8; + pax_direct_call __cast6_dec_blk8; - store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - popq %r12; + popq %r14; FRAME_END - ret; + pax_ret cast6_cbc_dec_8way; ENDPROC(cast6_cbc_dec_8way) -ENTRY(cast6_ctr_8way) +RAP_ENTRY(cast6_ctr_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -418,25 +419,25 @@ ENTRY(cast6_ctr_8way) */ FRAME_BEGIN - pushq %r12; + pushq %r14; movq %rsi, %r11; - movq %rdx, %r12; + movq %rdx, %r14; load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2, RX, RKR, RKM); - call __cast6_enc_blk8; + pax_direct_call __cast6_enc_blk8; - store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - popq %r12; + popq %r14; FRAME_END - ret; + pax_ret cast6_ctr_8way; ENDPROC(cast6_ctr_8way) -ENTRY(cast6_xts_enc_8way) +RAP_ENTRY(cast6_xts_enc_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -451,16 +452,16 @@ ENTRY(cast6_xts_enc_8way) load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2, RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask); - call __cast6_enc_blk8; + pax_direct_call __cast6_enc_blk8; /* dst <= regs xor IVs(in dst) */ store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); FRAME_END - ret; + pax_ret cast6_xts_enc_8way; ENDPROC(cast6_xts_enc_8way) -ENTRY(cast6_xts_dec_8way) +RAP_ENTRY(cast6_xts_dec_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -475,11 +476,11 @@ ENTRY(cast6_xts_dec_8way) load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2, RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask); - call __cast6_dec_blk8; + pax_direct_call __cast6_dec_blk8; /* dst <= regs xor IVs(in dst) */ store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); FRAME_END - ret; + pax_ret cast6_xts_dec_8way; ENDPROC(cast6_xts_dec_8way) diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c index 50e684768..593d6323e 100644 --- a/arch/x86/crypto/cast6_avx_glue.c +++ b/arch/x86/crypto/cast6_avx_glue.c @@ -41,20 +41,23 @@ #define CAST6_PARALLEL_BLOCKS 8 -asmlinkage void cast6_ecb_enc_8way(struct cast6_ctx *ctx, u8 *dst, +asmlinkage void cast6_ecb_enc_8way(void *ctx, u8 *dst, const u8 *src); -asmlinkage void cast6_ecb_dec_8way(struct cast6_ctx *ctx, u8 *dst, +asmlinkage void cast6_ecb_dec_8way(void *ctx, u8 *dst, const u8 *src); -asmlinkage void cast6_cbc_dec_8way(struct cast6_ctx *ctx, u8 *dst, +asmlinkage void cast6_cbc_dec_8way(void *ctx, u8 *dst, const u8 *src); -asmlinkage void cast6_ctr_8way(struct cast6_ctx *ctx, u8 *dst, const u8 *src, +asmlinkage void cast6_ctr_8way(void *ctx, u128 *dst, const u128 *src, le128 *iv); -asmlinkage void cast6_xts_enc_8way(struct cast6_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); -asmlinkage void cast6_xts_dec_8way(struct cast6_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void cast6_xts_enc_8way(void *ctx, u128 *dst, + const u128 *src, le128 *iv); +asmlinkage void cast6_xts_dec_8way(void *ctx, u128 *dst, + const u128 *src, le128 *iv); + +void __cast6_enc_blk8(void *ctx, u8 *dst, const u8 *src) __rap_hash; +void __cast6_dec_blk8(void *ctx, u8 *dst, const u8 *src) __rap_hash; static void cast6_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) { diff --git a/arch/x86/crypto/chacha20-avx2-x86_64.S b/arch/x86/crypto/chacha20-avx2-x86_64.S index 16694e625..4675b5ec3 100644 --- a/arch/x86/crypto/chacha20-avx2-x86_64.S +++ b/arch/x86/crypto/chacha20-avx2-x86_64.S @@ -10,6 +10,7 @@ */ #include +#include .data .align 32 @@ -439,5 +440,5 @@ ENTRY(chacha20_8block_xor_avx2) vzeroupper mov %r8,%rsp - ret + pax_ret chacha20_8block_xor_avx2 ENDPROC(chacha20_8block_xor_avx2) diff --git a/arch/x86/crypto/chacha20-ssse3-x86_64.S b/arch/x86/crypto/chacha20-ssse3-x86_64.S index 3a33124e9..ba21c6f35 100644 --- a/arch/x86/crypto/chacha20-ssse3-x86_64.S +++ b/arch/x86/crypto/chacha20-ssse3-x86_64.S @@ -10,6 +10,7 @@ */ #include +#include .data .align 16 @@ -139,7 +140,7 @@ ENTRY(chacha20_block_xor_ssse3) pxor %xmm7,%xmm3 movdqu %xmm3,0x30(%rsi) - ret + pax_ret chacha20_block_xor_ssse3 ENDPROC(chacha20_block_xor_ssse3) ENTRY(chacha20_4block_xor_ssse3) @@ -623,5 +624,5 @@ ENTRY(chacha20_4block_xor_ssse3) movdqu %xmm15,0xf0(%rsi) mov %r11,%rsp - ret + pax_ret chacha20_4block_xor_ssse3 ENDPROC(chacha20_4block_xor_ssse3) diff --git a/arch/x86/crypto/crc32-pclmul_asm.S b/arch/x86/crypto/crc32-pclmul_asm.S index f24730429..d253bd10f 100644 --- a/arch/x86/crypto/crc32-pclmul_asm.S +++ b/arch/x86/crypto/crc32-pclmul_asm.S @@ -39,6 +39,7 @@ #include #include +#include .align 16 @@ -102,6 +103,12 @@ * size_t len, uint crc32) */ +#ifndef __x86_64__ +__i686_get_pc_thunk_cx: + mov (%esp),%ecx + ret +#endif + ENTRY(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */ movdqa (BUF), %xmm1 movdqa 0x10(BUF), %xmm2 @@ -113,9 +120,8 @@ ENTRY(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */ add $0x40, BUF #ifndef __x86_64__ /* This is for position independent code(-fPIC) support for 32bit */ - call delta + call __i686_get_pc_thunk_cx delta: - pop %ecx #endif cmp $0x40, LEN jb less_64 @@ -123,7 +129,7 @@ delta: #ifdef __x86_64__ movdqa .Lconstant_R2R1(%rip), CONSTANT #else - movdqa .Lconstant_R2R1 - delta(%ecx), CONSTANT + movdqa %cs:.Lconstant_R2R1 - delta (%ecx), CONSTANT #endif loop_64:/* 64 bytes Full cache line folding */ @@ -172,7 +178,7 @@ less_64:/* Folding cache line into 128bit */ #ifdef __x86_64__ movdqa .Lconstant_R4R3(%rip), CONSTANT #else - movdqa .Lconstant_R4R3 - delta(%ecx), CONSTANT + movdqa %cs:.Lconstant_R4R3 - delta(%ecx), CONSTANT #endif prefetchnta (BUF) @@ -220,8 +226,8 @@ fold_64: movdqa .Lconstant_R5(%rip), CONSTANT movdqa .Lconstant_mask32(%rip), %xmm3 #else - movdqa .Lconstant_R5 - delta(%ecx), CONSTANT - movdqa .Lconstant_mask32 - delta(%ecx), %xmm3 + movdqa %cs:.Lconstant_R5 - delta(%ecx), CONSTANT + movdqa %cs:.Lconstant_mask32 - delta(%ecx), %xmm3 #endif psrldq $0x04, %xmm2 pand %xmm3, %xmm1 @@ -232,7 +238,7 @@ fold_64: #ifdef __x86_64__ movdqa .Lconstant_RUpoly(%rip), CONSTANT #else - movdqa .Lconstant_RUpoly - delta(%ecx), CONSTANT + movdqa %cs:.Lconstant_RUpoly - delta(%ecx), CONSTANT #endif movdqa %xmm1, %xmm2 pand %xmm3, %xmm1 @@ -242,5 +248,5 @@ fold_64: pxor %xmm2, %xmm1 PEXTRD 0x01, %xmm1, %eax - ret + pax_ret crc32_pclmul_le_16 ENDPROC(crc32_pclmul_le_16) diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S index dc05f010c..83302a8ac 100644 --- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S +++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S @@ -45,6 +45,7 @@ #include #include +#include ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction @@ -309,7 +310,7 @@ do_return: popq %rsi popq %rdi popq %rbx - ret + pax_ret crc_pcl ENDPROC(crc_pcl) .section .rodata, "a", %progbits diff --git a/arch/x86/crypto/crct10dif-pcl-asm_64.S b/arch/x86/crypto/crct10dif-pcl-asm_64.S index 35e97569d..504835312 100644 --- a/arch/x86/crypto/crct10dif-pcl-asm_64.S +++ b/arch/x86/crypto/crct10dif-pcl-asm_64.S @@ -59,6 +59,7 @@ # #include +#include .text @@ -367,7 +368,7 @@ _cleanup: # scale the result back to 16 bits shr $16, %eax mov %rcx, %rsp - ret + pax_ret crc_t10dif_pcl ######################################################################## diff --git a/arch/x86/crypto/des3_ede-asm_64.S b/arch/x86/crypto/des3_ede-asm_64.S index 038f6ae87..ec7142bfc 100644 --- a/arch/x86/crypto/des3_ede-asm_64.S +++ b/arch/x86/crypto/des3_ede-asm_64.S @@ -15,6 +15,7 @@ */ #include +#include .file "des3_ede-asm_64.S" .text @@ -250,7 +251,7 @@ ENTRY(des3_ede_x86_64_crypt_blk) popq %rbx; popq %rbp; - ret; + pax_ret des3_ede_x86_64_crypt_blk; ENDPROC(des3_ede_x86_64_crypt_blk) /*********************************************************************** @@ -534,7 +535,7 @@ ENTRY(des3_ede_x86_64_crypt_blk_3way) popq %rbx; popq %rbp; - ret; + pax_ret des3_ede_x86_64_crypt_blk_3way; ENDPROC(des3_ede_x86_64_crypt_blk_3way) .data diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S index eed55c8cc..18f64dcdf 100644 --- a/arch/x86/crypto/ghash-clmulni-intel_asm.S +++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S @@ -19,6 +19,7 @@ #include #include #include +#include .data @@ -90,7 +91,7 @@ __clmul_gf128mul_ble: psrlq $1, T2 pxor T2, T1 pxor T1, DATA - ret + pax_ret __clmul_gf128mul_ble ENDPROC(__clmul_gf128mul_ble) /* void clmul_ghash_mul(char *dst, const u128 *shash) */ @@ -100,11 +101,11 @@ ENTRY(clmul_ghash_mul) movups (%rsi), SHASH movaps .Lbswap_mask, BSWAP PSHUFB_XMM BSWAP DATA - call __clmul_gf128mul_ble + pax_direct_call __clmul_gf128mul_ble PSHUFB_XMM BSWAP DATA movups DATA, (%rdi) FRAME_END - ret + pax_ret clmul_ghash_mul ENDPROC(clmul_ghash_mul) /* @@ -124,7 +125,7 @@ ENTRY(clmul_ghash_update) movups (%rsi), IN1 PSHUFB_XMM BSWAP IN1 pxor IN1, DATA - call __clmul_gf128mul_ble + pax_direct_call __clmul_gf128mul_ble sub $16, %rdx add $16, %rsi cmp $16, %rdx @@ -133,5 +134,5 @@ ENTRY(clmul_ghash_update) movups DATA, (%rdi) .Lupdate_just_ret: FRAME_END - ret + pax_ret clmul_ghash_update ENDPROC(clmul_ghash_update) diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c index 0420bab19..590ca78ff 100644 --- a/arch/x86/crypto/ghash-clmulni-intel_glue.c +++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c @@ -26,6 +26,7 @@ #define GHASH_DIGEST_SIZE 16 void clmul_ghash_mul(char *dst, const u128 *shash); +void __clmul_gf128mul_ble(char *dst, const u128 *shash) __rap_hash; void clmul_ghash_update(char *dst, const char *src, unsigned int srclen, const u128 *shash); diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c index 6a8559893..fed2ada5f 100644 --- a/arch/x86/crypto/glue_helper.c +++ b/arch/x86/crypto/glue_helper.c @@ -165,7 +165,7 @@ __glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx, src -= num_blocks - 1; dst -= num_blocks - 1; - gctx->funcs[i].fn_u.cbc(ctx, dst, src); + gctx->funcs[i].fn_u.cbc(ctx, (u8 *)dst, (u8 *)src); nbytes -= bsize; if (nbytes < bsize) diff --git a/arch/x86/crypto/poly1305-avx2-x86_64.S b/arch/x86/crypto/poly1305-avx2-x86_64.S index eff2f414e..932718e36 100644 --- a/arch/x86/crypto/poly1305-avx2-x86_64.S +++ b/arch/x86/crypto/poly1305-avx2-x86_64.S @@ -10,6 +10,7 @@ */ #include +#include .data .align 32 @@ -382,5 +383,5 @@ ENTRY(poly1305_4block_avx2) pop %r13 pop %r12 pop %rbx - ret + pax_ret poly1305_4block_avx2 ENDPROC(poly1305_4block_avx2) diff --git a/arch/x86/crypto/poly1305-sse2-x86_64.S b/arch/x86/crypto/poly1305-sse2-x86_64.S index 338c74805..497359c8d 100644 --- a/arch/x86/crypto/poly1305-sse2-x86_64.S +++ b/arch/x86/crypto/poly1305-sse2-x86_64.S @@ -10,6 +10,7 @@ */ #include +#include .data .align 16 @@ -273,7 +274,7 @@ ENTRY(poly1305_block_sse2) add $0x10,%rsp pop %r12 pop %rbx - ret + pax_ret poly1305_block_sse2 ENDPROC(poly1305_block_sse2) @@ -578,5 +579,5 @@ ENTRY(poly1305_2block_sse2) pop %r13 pop %r12 pop %rbx - ret + pax_ret poly1305_2block_sse2 ENDPROC(poly1305_2block_sse2) diff --git a/arch/x86/crypto/salsa20-i586-asm_32.S b/arch/x86/crypto/salsa20-i586-asm_32.S index 329452b8f..f1365001b 100644 --- a/arch/x86/crypto/salsa20-i586-asm_32.S +++ b/arch/x86/crypto/salsa20-i586-asm_32.S @@ -3,6 +3,7 @@ # Public domain. #include +#include .text @@ -924,7 +925,7 @@ ENTRY(salsa20_encrypt_bytes) movl 96(%esp),%ebp # leave add %eax,%esp - ret + pax_ret salsa20_encrypt_bytes ._bytesatleast65: # bytes -= 64 sub $64,%ebx @@ -1059,7 +1060,7 @@ ENTRY(salsa20_keysetup) movl 80(%esp),%ebp # leave add %eax,%esp - ret + pax_ret salsa20_keysetup ENDPROC(salsa20_keysetup) # enter salsa20_ivsetup @@ -1110,5 +1111,5 @@ ENTRY(salsa20_ivsetup) movl 80(%esp),%ebp # leave add %eax,%esp - ret + pax_ret salsa20_ivsetup ENDPROC(salsa20_ivsetup) diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S index 9279e0b2d..6745d48dd 100644 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S @@ -1,4 +1,5 @@ #include +#include # enter salsa20_encrypt_bytes ENTRY(salsa20_encrypt_bytes) @@ -789,7 +790,7 @@ ENTRY(salsa20_encrypt_bytes) add %r11,%rsp mov %rdi,%rax mov %rsi,%rdx - ret + pax_ret salsa20_encrypt_bytes # bytesatleast65: ._bytesatleast65: # bytes -= 64 @@ -889,7 +890,7 @@ ENTRY(salsa20_keysetup) add %r11,%rsp mov %rdi,%rax mov %rsi,%rdx - ret + pax_ret salsa20_keysetup ENDPROC(salsa20_keysetup) # enter salsa20_ivsetup @@ -914,5 +915,5 @@ ENTRY(salsa20_ivsetup) add %r11,%rsp mov %rdi,%rax mov %rsi,%rdx - ret + pax_ret salsa20_ivsetup ENDPROC(salsa20_ivsetup) diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S index 8be571808..c5a99560c 100644 --- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S @@ -25,6 +25,7 @@ #include #include +#include #include "glue_helper-asm-avx.S" .file "serpent-avx-x86_64-asm_64.S" @@ -619,7 +620,7 @@ __serpent_enc_blk8_avx: write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2); write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); - ret; + pax_ret __serpent_enc_blk8_avx; ENDPROC(__serpent_enc_blk8_avx) .align 8 @@ -673,10 +674,10 @@ __serpent_dec_blk8_avx: write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2); write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2); - ret; + pax_ret __serpent_dec_blk8_avx; ENDPROC(__serpent_dec_blk8_avx) -ENTRY(serpent_ecb_enc_8way_avx) +RAP_ENTRY(serpent_ecb_enc_8way_avx) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -686,15 +687,15 @@ ENTRY(serpent_ecb_enc_8way_avx) load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - call __serpent_enc_blk8_avx; + pax_direct_call __serpent_enc_blk8_avx; store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); FRAME_END - ret; + pax_ret serpent_ecb_enc_8way_avx; ENDPROC(serpent_ecb_enc_8way_avx) -ENTRY(serpent_ecb_dec_8way_avx) +RAP_ENTRY(serpent_ecb_dec_8way_avx) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -704,15 +705,15 @@ ENTRY(serpent_ecb_dec_8way_avx) load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - call __serpent_dec_blk8_avx; + pax_direct_call __serpent_dec_blk8_avx; store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); FRAME_END - ret; + pax_ret serpent_ecb_dec_8way_avx; ENDPROC(serpent_ecb_dec_8way_avx) -ENTRY(serpent_cbc_dec_8way_avx) +RAP_ENTRY(serpent_cbc_dec_8way_avx) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -722,15 +723,15 @@ ENTRY(serpent_cbc_dec_8way_avx) load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - call __serpent_dec_blk8_avx; + pax_direct_call __serpent_dec_blk8_avx; store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); FRAME_END - ret; + pax_ret serpent_cbc_dec_8way_avx; ENDPROC(serpent_cbc_dec_8way_avx) -ENTRY(serpent_ctr_8way_avx) +RAP_ENTRY(serpent_ctr_8way_avx) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -742,15 +743,15 @@ ENTRY(serpent_ctr_8way_avx) load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2, RK0, RK1, RK2); - call __serpent_enc_blk8_avx; + pax_direct_call __serpent_enc_blk8_avx; store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); FRAME_END - ret; + pax_ret serpent_ctr_8way_avx; ENDPROC(serpent_ctr_8way_avx) -ENTRY(serpent_xts_enc_8way_avx) +RAP_ENTRY(serpent_xts_enc_8way_avx) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -763,16 +764,16 @@ ENTRY(serpent_xts_enc_8way_avx) load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2, RK0, RK1, RK2, .Lxts_gf128mul_and_shl1_mask); - call __serpent_enc_blk8_avx; + pax_direct_call __serpent_enc_blk8_avx; /* dst <= regs xor IVs(in dst) */ store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); FRAME_END - ret; + pax_ret serpent_xts_enc_8way_avx; ENDPROC(serpent_xts_enc_8way_avx) -ENTRY(serpent_xts_dec_8way_avx) +RAP_ENTRY(serpent_xts_dec_8way_avx) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -785,11 +786,11 @@ ENTRY(serpent_xts_dec_8way_avx) load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2, RK0, RK1, RK2, .Lxts_gf128mul_and_shl1_mask); - call __serpent_dec_blk8_avx; + pax_direct_call __serpent_dec_blk8_avx; /* dst <= regs xor IVs(in dst) */ store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); FRAME_END - ret; + pax_ret serpent_xts_dec_8way_avx; ENDPROC(serpent_xts_dec_8way_avx) diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S index 97c48add3..541b03c2e 100644 --- a/arch/x86/crypto/serpent-avx2-asm_64.S +++ b/arch/x86/crypto/serpent-avx2-asm_64.S @@ -16,6 +16,7 @@ #include #include +#include #include "glue_helper-asm-avx2.S" .file "serpent-avx2-asm_64.S" @@ -611,7 +612,7 @@ __serpent_enc_blk16: write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2); write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); - ret; + pax_ret __serpent_enc_blk16; ENDPROC(__serpent_enc_blk16) .align 8 @@ -665,10 +666,10 @@ __serpent_dec_blk16: write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2); write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2); - ret; + pax_ret __serpent_dec_blk16; ENDPROC(__serpent_dec_blk16) -ENTRY(serpent_ecb_enc_16way) +RAP_ENTRY(serpent_ecb_enc_16way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -680,17 +681,17 @@ ENTRY(serpent_ecb_enc_16way) load_16way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - call __serpent_enc_blk16; + pax_direct_call __serpent_enc_blk16; store_16way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); vzeroupper; FRAME_END - ret; + pax_ret serpent_ecb_enc_16way; ENDPROC(serpent_ecb_enc_16way) -ENTRY(serpent_ecb_dec_16way) +RAP_ENTRY(serpent_ecb_dec_16way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -702,17 +703,17 @@ ENTRY(serpent_ecb_dec_16way) load_16way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - call __serpent_dec_blk16; + pax_direct_call __serpent_dec_blk16; store_16way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); vzeroupper; FRAME_END - ret; + pax_ret serpent_ecb_dec_16way; ENDPROC(serpent_ecb_dec_16way) -ENTRY(serpent_cbc_dec_16way) +RAP_ENTRY(serpent_cbc_dec_16way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -724,7 +725,7 @@ ENTRY(serpent_cbc_dec_16way) load_16way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - call __serpent_dec_blk16; + pax_direct_call __serpent_dec_blk16; store_cbc_16way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2, RK0); @@ -732,10 +733,10 @@ ENTRY(serpent_cbc_dec_16way) vzeroupper; FRAME_END - ret; + pax_ret serpent_cbc_dec_16way; ENDPROC(serpent_cbc_dec_16way) -ENTRY(serpent_ctr_16way) +RAP_ENTRY(serpent_ctr_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) @@ -750,17 +751,17 @@ ENTRY(serpent_ctr_16way) RD2, RK0, RK0x, RK1, RK1x, RK2, RK2x, RK3, RK3x, RNOT, tp); - call __serpent_enc_blk16; + pax_direct_call __serpent_enc_blk16; store_ctr_16way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); vzeroupper; FRAME_END - ret; + pax_ret serpent_ctr_16way; ENDPROC(serpent_ctr_16way) -ENTRY(serpent_xts_enc_16way) +RAP_ENTRY(serpent_xts_enc_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) @@ -776,17 +777,17 @@ ENTRY(serpent_xts_enc_16way) .Lxts_gf128mul_and_shl1_mask_0, .Lxts_gf128mul_and_shl1_mask_1); - call __serpent_enc_blk16; + pax_direct_call __serpent_enc_blk16; store_xts_16way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); vzeroupper; FRAME_END - ret; + pax_ret serpent_xts_enc_16way; ENDPROC(serpent_xts_enc_16way) -ENTRY(serpent_xts_dec_16way) +RAP_ENTRY(serpent_xts_dec_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) @@ -802,12 +803,12 @@ ENTRY(serpent_xts_dec_16way) .Lxts_gf128mul_and_shl1_mask_0, .Lxts_gf128mul_and_shl1_mask_1); - call __serpent_dec_blk16; + pax_direct_call __serpent_dec_blk16; store_xts_16way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); vzeroupper; FRAME_END - ret; + pax_ret serpent_xts_dec_16way; ENDPROC(serpent_xts_dec_16way) diff --git a/arch/x86/crypto/serpent-sse2-i586-asm_32.S b/arch/x86/crypto/serpent-sse2-i586-asm_32.S index d348f1553..48aa0c349 100644 --- a/arch/x86/crypto/serpent-sse2-i586-asm_32.S +++ b/arch/x86/crypto/serpent-sse2-i586-asm_32.S @@ -25,6 +25,7 @@ */ #include +#include .file "serpent-sse2-i586-asm_32.S" .text @@ -568,12 +569,12 @@ ENTRY(__serpent_enc_blk_4way) write_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE); - ret; + pax_ret __serpent_enc_blk_4way; .L__enc_xor4: xor_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE); - ret; + pax_ret __serpent_enc_blk_4way; ENDPROC(__serpent_enc_blk_4way) ENTRY(serpent_dec_blk_4way) @@ -627,5 +628,5 @@ ENTRY(serpent_dec_blk_4way) movl arg_dst(%esp), %eax; write_blocks(%eax, RC, RD, RB, RE, RT0, RT1, RA); - ret; + pax_ret serpent_dec_blk_4way; ENDPROC(serpent_dec_blk_4way) diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S index acc066c7c..d96c7c29a 100644 --- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S +++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S @@ -25,6 +25,7 @@ */ #include +#include .file "serpent-sse2-x86_64-asm_64.S" .text @@ -690,13 +691,13 @@ ENTRY(__serpent_enc_blk_8way) write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2); write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); - ret; + pax_ret __serpent_enc_blk_8way; .L__enc_xor8: xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2); xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); - ret; + pax_ret __serpent_enc_blk_8way; ENDPROC(__serpent_enc_blk_8way) ENTRY(serpent_dec_blk_8way) @@ -750,5 +751,5 @@ ENTRY(serpent_dec_blk_8way) write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2); write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2); - ret; + pax_ret serpent_dec_blk_8way; ENDPROC(serpent_dec_blk_8way) diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c index 870f6d812..aaf38ded4 100644 --- a/arch/x86/crypto/serpent_avx2_glue.c +++ b/arch/x86/crypto/serpent_avx2_glue.c @@ -27,18 +27,20 @@ #define SERPENT_AVX2_PARALLEL_BLOCKS 16 /* 16-way AVX2 parallel cipher functions */ -asmlinkage void serpent_ecb_enc_16way(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_ecb_enc_16way(void *ctx, u8 *dst, const u8 *src); -asmlinkage void serpent_ecb_dec_16way(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_ecb_dec_16way(void *ctx, u8 *dst, const u8 *src); -asmlinkage void serpent_cbc_dec_16way(void *ctx, u128 *dst, const u128 *src); +asmlinkage void serpent_cbc_dec_16way(void *ctx, u8 *dst, const u8 *src); +void __serpent_enc_blk16(void *ctx, u8 *dst, const u8 *src) __rap_hash; +void __serpent_dec_blk16(void *ctx, u8 *dst, const u8 *src) __rap_hash; asmlinkage void serpent_ctr_16way(void *ctx, u128 *dst, const u128 *src, le128 *iv); -asmlinkage void serpent_xts_enc_16way(struct serpent_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); -asmlinkage void serpent_xts_dec_16way(struct serpent_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void serpent_xts_enc_16way(void *ctx, u128 *dst, + const u128 *src, le128 *iv); +asmlinkage void serpent_xts_dec_16way(void *ctx, u128 *dst, + const u128 *src, le128 *iv); static const struct common_glue_ctx serpent_enc = { .num_funcs = 3, diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c index 6f778d3da..3cf277e02 100644 --- a/arch/x86/crypto/serpent_avx_glue.c +++ b/arch/x86/crypto/serpent_avx_glue.c @@ -41,28 +41,28 @@ #include /* 8-way parallel cipher functions */ -asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_ecb_enc_8way_avx(void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(serpent_ecb_enc_8way_avx); -asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_ecb_dec_8way_avx(void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(serpent_ecb_dec_8way_avx); -asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_cbc_dec_8way_avx(void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(serpent_cbc_dec_8way_avx); -asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void serpent_ctr_8way_avx(void *ctx, u128 *dst, + const u128 *src, le128 *iv); EXPORT_SYMBOL_GPL(serpent_ctr_8way_avx); -asmlinkage void serpent_xts_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void serpent_xts_enc_8way_avx(void *ctx, u128 *dst, + const u128 *src, le128 *iv); EXPORT_SYMBOL_GPL(serpent_xts_enc_8way_avx); -asmlinkage void serpent_xts_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void serpent_xts_dec_8way_avx(void *ctx, u128 *dst, + const u128 *src, le128 *iv); EXPORT_SYMBOL_GPL(serpent_xts_dec_8way_avx); void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c index 644f97ab8..4d069a1c5 100644 --- a/arch/x86/crypto/serpent_sse2_glue.c +++ b/arch/x86/crypto/serpent_sse2_glue.c @@ -45,8 +45,10 @@ #include #include -static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src) +static void serpent_decrypt_cbc_xway(void *ctx, u8 *_dst, const u8 *_src) { + u128 *dst = (u128 *)_dst; + const u128 *src = (const u128 *)_src; u128 ivs[SERPENT_PARALLEL_BLOCKS - 1]; unsigned int j; diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h b/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h index 08ad1a9ac..293bc9e85 100644 --- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h +++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h @@ -106,5 +106,6 @@ struct job_sha1 *sha1_mb_mgr_submit_avx2(struct sha1_mb_mgr *state, struct job_sha1 *job); struct job_sha1 *sha1_mb_mgr_flush_avx2(struct sha1_mb_mgr *state); struct job_sha1 *sha1_mb_mgr_get_comp_job_avx2(struct sha1_mb_mgr *state); +struct job_sha1 *sha1_x8_avx2(struct sha1_mb_mgr *state) __rap_hash; #endif diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S index 96df6a39d..f5f561fc3 100644 --- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S +++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S @@ -53,6 +53,7 @@ */ #include #include +#include #include "sha1_mb_mgr_datastruct.S" @@ -103,7 +104,7 @@ offset = \_offset # JOB* sha1_mb_mgr_flush_avx2(MB_MGR *state) # arg 1 : rcx : state -ENTRY(sha1_mb_mgr_flush_avx2) +RAP_ENTRY(sha1_mb_mgr_flush_avx2) FRAME_BEGIN push %rbx @@ -183,7 +184,7 @@ LABEL skip_ %I # "state" and "args" are the same address, arg1 # len is arg2 - call sha1_x8_avx2 + pax_direct_call sha1_x8_avx2 # state and idx are intact @@ -215,7 +216,7 @@ len_is_0: return: pop %rbx FRAME_END - ret + pax_ret sha1_mb_mgr_flush_avx2 return_null: xor job_rax, job_rax @@ -226,7 +227,7 @@ ENDPROC(sha1_mb_mgr_flush_avx2) ################################################################# .align 16 -ENTRY(sha1_mb_mgr_get_comp_job_avx2) +RAP_ENTRY(sha1_mb_mgr_get_comp_job_avx2) push %rbx ## if bit 32+3 is set, then all lanes are empty @@ -273,12 +274,12 @@ ENTRY(sha1_mb_mgr_get_comp_job_avx2) pop %rbx - ret + pax_ret sha1_mb_mgr_get_comp_job_avx2 .return_null: xor job_rax, job_rax pop %rbx - ret + pax_ret sha1_mb_mgr_get_comp_job_avx2 ENDPROC(sha1_mb_mgr_get_comp_job_avx2) .data diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S index 63a0d9c8e..53b60ac98 100644 --- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S +++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S @@ -54,6 +54,7 @@ #include #include +#include #include "sha1_mb_mgr_datastruct.S" @@ -98,7 +99,7 @@ lane_data = %r10 # JOB* submit_mb_mgr_submit_avx2(MB_MGR *state, job_sha1 *job) # arg 1 : rcx : state # arg 2 : rdx : job -ENTRY(sha1_mb_mgr_submit_avx2) +RAP_ENTRY(sha1_mb_mgr_submit_avx2) FRAME_BEGIN push %rbx push %r12 @@ -163,7 +164,7 @@ start_loop: # "state" and "args" are the same address, arg1 # len is arg2 - call sha1_x8_avx2 + pax_direct_call sha1_x8_avx2 # state and idx are intact @@ -195,7 +196,7 @@ return: pop %r12 pop %rbx FRAME_END - ret + pax_ret sha1_mb_mgr_submit_avx2 return_null: xor job_rax, job_rax diff --git a/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S b/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S index c9dae1cd2..605514153 100644 --- a/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S +++ b/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S @@ -53,6 +53,7 @@ */ #include +#include #include "sha1_mb_mgr_datastruct.S" ## code to compute oct SHA1 using SSE-256 @@ -457,7 +458,7 @@ lloop: pop %r13 pop %r12 - ret + pax_ret sha1_x8_avx2 ENDPROC(sha1_x8_avx2) diff --git a/arch/x86/crypto/sha1_avx2_x86_64_asm.S b/arch/x86/crypto/sha1_avx2_x86_64_asm.S index 1cd792db1..2236003f0 100644 --- a/arch/x86/crypto/sha1_avx2_x86_64_asm.S +++ b/arch/x86/crypto/sha1_avx2_x86_64_asm.S @@ -70,6 +70,7 @@ */ #include +#include #define CTX %rdi /* arg1 */ #define BUF %rsi /* arg2 */ @@ -671,7 +672,7 @@ _loop3: pop %rbp pop %rbx - ret + pax_ret \name ENDPROC(\name) .endm diff --git a/arch/x86/crypto/sha1_ni_asm.S b/arch/x86/crypto/sha1_ni_asm.S index 874a651b9..aa3d201fe 100644 --- a/arch/x86/crypto/sha1_ni_asm.S +++ b/arch/x86/crypto/sha1_ni_asm.S @@ -54,6 +54,7 @@ */ #include +#include #define DIGEST_PTR %rdi /* 1st arg */ #define DATA_PTR %rsi /* 2nd arg */ @@ -290,7 +291,7 @@ ENTRY(sha1_ni_transform) .Ldone_hash: mov RSPSAVE, %rsp - ret + pax_ret sha1_ni_transform ENDPROC(sha1_ni_transform) .data diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S index a4109506a..f0fefc3d2 100644 --- a/arch/x86/crypto/sha1_ssse3_asm.S +++ b/arch/x86/crypto/sha1_ssse3_asm.S @@ -29,6 +29,7 @@ */ #include +#include #define CTX %rdi // arg1 #define BUF %rsi // arg2 @@ -71,13 +72,14 @@ * param: function's name */ .macro SHA1_VECTOR_ASM name - ENTRY(\name) +ALIGN + RAP_ENTRY(\name) push %rbx push %rbp - push %r12 + push %r14 - mov %rsp, %r12 + mov %rsp, %r14 sub $64, %rsp # allocate workspace and $~15, %rsp # align stack @@ -99,12 +101,12 @@ xor %rax, %rax rep stosq - mov %r12, %rsp # deallocate workspace + mov %r14, %rsp # deallocate workspace - pop %r12 + pop %r14 pop %rbp pop %rbx - ret + pax_ret \name ENDPROC(\name) .endm diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c index fc6173915..03f7efe90 100644 --- a/arch/x86/crypto/sha1_ssse3_glue.c +++ b/arch/x86/crypto/sha1_ssse3_glue.c @@ -31,8 +31,8 @@ #include #include -typedef void (sha1_transform_fn)(u32 *digest, const char *data, - unsigned int rounds); +typedef void (sha1_transform_fn)(struct sha1_state *digest, const u8 *data, + int rounds); static int sha1_update(struct shash_desc *desc, const u8 *data, unsigned int len, sha1_transform_fn *sha1_xform) @@ -47,8 +47,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data, BUILD_BUG_ON(offsetof(struct sha1_state, state) != 0); kernel_fpu_begin(); - sha1_base_do_update(desc, data, len, - (sha1_block_fn *)sha1_xform); + sha1_base_do_update(desc, data, len, sha1_xform); kernel_fpu_end(); return 0; @@ -62,29 +61,26 @@ static int sha1_finup(struct shash_desc *desc, const u8 *data, kernel_fpu_begin(); if (len) - sha1_base_do_update(desc, data, len, - (sha1_block_fn *)sha1_xform); - sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_xform); + sha1_base_do_update(desc, data, len, sha1_xform); + sha1_base_do_finalize(desc, sha1_xform); kernel_fpu_end(); return sha1_base_finish(desc, out); } -asmlinkage void sha1_transform_ssse3(u32 *digest, const char *data, - unsigned int rounds); +asmlinkage void sha1_transform_ssse3(struct sha1_state *digest, const u8 *data, + int rounds); static int sha1_ssse3_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - return sha1_update(desc, data, len, - (sha1_transform_fn *) sha1_transform_ssse3); + return sha1_update(desc, data, len, sha1_transform_ssse3); } static int sha1_ssse3_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - return sha1_finup(desc, data, len, out, - (sha1_transform_fn *) sha1_transform_ssse3); + return sha1_finup(desc, data, len, out, sha1_transform_ssse3); } /* Add padding and return the message digest. */ @@ -124,21 +120,19 @@ static void unregister_sha1_ssse3(void) } #ifdef CONFIG_AS_AVX -asmlinkage void sha1_transform_avx(u32 *digest, const char *data, - unsigned int rounds); +asmlinkage void sha1_transform_avx(struct sha1_state *digest, const u8 *data, + int rounds); static int sha1_avx_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - return sha1_update(desc, data, len, - (sha1_transform_fn *) sha1_transform_avx); + return sha1_update(desc, data, len, sha1_transform_avx); } static int sha1_avx_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - return sha1_finup(desc, data, len, out, - (sha1_transform_fn *) sha1_transform_avx); + return sha1_finup(desc, data, len, out, sha1_transform_avx); } static int sha1_avx_final(struct shash_desc *desc, u8 *out) @@ -196,8 +190,8 @@ static inline void unregister_sha1_avx(void) { } #if defined(CONFIG_AS_AVX2) && (CONFIG_AS_AVX) #define SHA1_AVX2_BLOCK_OPTSIZE 4 /* optimal 4*64 bytes of SHA1 blocks */ -asmlinkage void sha1_transform_avx2(u32 *digest, const char *data, - unsigned int rounds); +asmlinkage void sha1_transform_avx2(struct sha1_state *digest, const u8 *data, + int rounds); static bool avx2_usable(void) { @@ -209,8 +203,8 @@ static bool avx2_usable(void) return false; } -static void sha1_apply_transform_avx2(u32 *digest, const char *data, - unsigned int rounds) +static void sha1_apply_transform_avx2(struct sha1_state *digest, const u8 *data, + int rounds) { /* Select the optimal transform based on data block size */ if (rounds >= SHA1_AVX2_BLOCK_OPTSIZE) @@ -222,15 +216,13 @@ static void sha1_apply_transform_avx2(u32 *digest, const char *data, static int sha1_avx2_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - return sha1_update(desc, data, len, - (sha1_transform_fn *) sha1_apply_transform_avx2); + return sha1_update(desc, data, len, sha1_apply_transform_avx2); } static int sha1_avx2_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - return sha1_finup(desc, data, len, out, - (sha1_transform_fn *) sha1_apply_transform_avx2); + return sha1_finup(desc, data, len, out, sha1_apply_transform_avx2); } static int sha1_avx2_final(struct shash_desc *desc, u8 *out) @@ -274,21 +266,19 @@ static inline void unregister_sha1_avx2(void) { } #endif #ifdef CONFIG_AS_SHA1_NI -asmlinkage void sha1_ni_transform(u32 *digest, const char *data, - unsigned int rounds); +asmlinkage void sha1_ni_transform(struct sha1_state *digest, const u8 *data, + int rounds); static int sha1_ni_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - return sha1_update(desc, data, len, - (sha1_transform_fn *) sha1_ni_transform); + return sha1_update(desc, data, len, sha1_ni_transform); } static int sha1_ni_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - return sha1_finup(desc, data, len, out, - (sha1_transform_fn *) sha1_ni_transform); + return sha1_finup(desc, data, len, out, sha1_ni_transform); } static int sha1_ni_final(struct shash_desc *desc, u8 *out) diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S index 92b3b5d75..47aadd72c 100644 --- a/arch/x86/crypto/sha256-avx-asm.S +++ b/arch/x86/crypto/sha256-avx-asm.S @@ -49,6 +49,7 @@ #ifdef CONFIG_AS_AVX #include +#include ## assume buffers not aligned #define VMOVDQ vmovdqu @@ -347,8 +348,7 @@ a = TMP_ ## arg 3 : Num blocks ######################################################################## .text -ENTRY(sha256_transform_avx) -.align 32 +RAP_ENTRY(sha256_transform_avx) pushq %rbx pushq %rbp pushq %r13 @@ -460,7 +460,7 @@ done_hash: popq %r13 popq %rbp popq %rbx - ret + pax_ret sha256_transform_avx ENDPROC(sha256_transform_avx) .data diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S index 570ec5ec6..6c7f33c3b 100644 --- a/arch/x86/crypto/sha256-avx2-asm.S +++ b/arch/x86/crypto/sha256-avx2-asm.S @@ -50,6 +50,7 @@ #ifdef CONFIG_AS_AVX2 #include +#include ## assume buffers not aligned #define VMOVDQ vmovdqu @@ -528,8 +529,7 @@ STACK_SIZE = _RSP + _RSP_SIZE ## arg 3 : Num blocks ######################################################################## .text -ENTRY(sha256_transform_rorx) -.align 32 +RAP_ENTRY(sha256_transform_rorx) pushq %rbx pushq %rbp pushq %r12 @@ -720,7 +720,7 @@ done_hash: popq %r12 popq %rbp popq %rbx - ret + pax_ret sha256_transform_rorx ENDPROC(sha256_transform_rorx) .data diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h b/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h index b01ae408c..880e1d4d3 100644 --- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h +++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h @@ -104,5 +104,6 @@ struct job_sha256 *sha256_mb_mgr_submit_avx2(struct sha256_mb_mgr *state, struct job_sha256 *job); struct job_sha256 *sha256_mb_mgr_flush_avx2(struct sha256_mb_mgr *state); struct job_sha256 *sha256_mb_mgr_get_comp_job_avx2(struct sha256_mb_mgr *state); +struct job_sha256 *sha256_x8_avx2(struct sha256_mb_mgr *state) __rap_hash; #endif diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S index a78a0694d..39196418e 100644 --- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S +++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S @@ -52,6 +52,7 @@ */ #include #include +#include #include "sha256_mb_mgr_datastruct.S" .extern sha256_x8_avx2 @@ -101,7 +102,7 @@ offset = \_offset # JOB_SHA256* sha256_mb_mgr_flush_avx2(MB_MGR *state) # arg 1 : rcx : state -ENTRY(sha256_mb_mgr_flush_avx2) +RAP_ENTRY(sha256_mb_mgr_flush_avx2) FRAME_BEGIN push %rbx @@ -181,7 +182,7 @@ LABEL skip_ %I # "state" and "args" are the same address, arg1 # len is arg2 - call sha256_x8_avx2 + pax_direct_call sha256_x8_avx2 # state and idx are intact len_is_0: @@ -215,7 +216,7 @@ len_is_0: return: pop %rbx FRAME_END - ret + pax_ret sha256_mb_mgr_flush_avx2 return_null: xor job_rax, job_rax @@ -225,7 +226,7 @@ ENDPROC(sha256_mb_mgr_flush_avx2) ############################################################################## .align 16 -ENTRY(sha256_mb_mgr_get_comp_job_avx2) +RAP_ENTRY(sha256_mb_mgr_get_comp_job_avx2) push %rbx ## if bit 32+3 is set, then all lanes are empty @@ -276,12 +277,12 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2) pop %rbx - ret + pax_ret sha256_mb_mgr_get_comp_job_avx2 .return_null: xor job_rax, job_rax pop %rbx - ret + pax_ret sha256_mb_mgr_get_comp_job_avx2 ENDPROC(sha256_mb_mgr_get_comp_job_avx2) .data diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S index 7ea670e25..835723c1c 100644 --- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S +++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S @@ -53,6 +53,7 @@ #include #include +#include #include "sha256_mb_mgr_datastruct.S" .extern sha256_x8_avx2 @@ -96,7 +97,7 @@ lane_data = %r10 # JOB* sha256_mb_mgr_submit_avx2(MB_MGR *state, JOB_SHA256 *job) # arg 1 : rcx : state # arg 2 : rdx : job -ENTRY(sha256_mb_mgr_submit_avx2) +RAP_ENTRY(sha256_mb_mgr_submit_avx2) FRAME_BEGIN push %rbx push %r12 @@ -164,7 +165,7 @@ start_loop: # "state" and "args" are the same address, arg1 # len is arg2 - call sha256_x8_avx2 + pax_direct_call sha256_x8_avx2 # state and idx are intact @@ -200,7 +201,7 @@ return: pop %r12 pop %rbx FRAME_END - ret + pax_ret sha256_mb_mgr_submit_avx2 return_null: xor job_rax, job_rax diff --git a/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S b/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S index aa21aea4c..cb35a6e5d 100644 --- a/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S +++ b/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S @@ -52,6 +52,7 @@ */ #include +#include #include "sha256_mb_mgr_datastruct.S" ## code to compute oct SHA256 using SSE-256 @@ -435,7 +436,7 @@ Lrounds_16_xx: pop %r13 pop %r12 - ret + pax_ret sha256_x8_avx2 ENDPROC(sha256_x8_avx2) .data .align 64 diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S index 2cedc44e8..35ed999f3 100644 --- a/arch/x86/crypto/sha256-ssse3-asm.S +++ b/arch/x86/crypto/sha256-ssse3-asm.S @@ -47,6 +47,7 @@ ######################################################################## #include +#include ## assume buffers not aligned #define MOVDQ movdqu @@ -352,9 +353,7 @@ a = TMP_ ## arg 2 : pointer to input data ## arg 3 : Num blocks ######################################################################## -.text -ENTRY(sha256_transform_ssse3) -.align 32 +RAP_ENTRY(sha256_transform_ssse3) pushq %rbx pushq %rbp pushq %r13 @@ -471,7 +470,7 @@ done_hash: popq %rbp popq %rbx - ret + pax_ret sha256_transform_ssse3 ENDPROC(sha256_transform_ssse3) .data diff --git a/arch/x86/crypto/sha256_ni_asm.S b/arch/x86/crypto/sha256_ni_asm.S index 748cdf21a..cd2180d18 100644 --- a/arch/x86/crypto/sha256_ni_asm.S +++ b/arch/x86/crypto/sha256_ni_asm.S @@ -54,6 +54,7 @@ */ #include +#include #define DIGEST_PTR %rdi /* 1st arg */ #define DATA_PTR %rsi /* 2nd arg */ @@ -97,7 +98,7 @@ .text .align 32 -ENTRY(sha256_ni_transform) +RAP_ENTRY(sha256_ni_transform) shl $6, NUM_BLKS /* convert to bytes */ jz .Ldone_hash @@ -326,7 +327,7 @@ ENTRY(sha256_ni_transform) .Ldone_hash: - ret + pax_ret sha256_ni_transform ENDPROC(sha256_ni_transform) .data diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c index 9e79baf03..c5186c743 100644 --- a/arch/x86/crypto/sha256_ssse3_glue.c +++ b/arch/x86/crypto/sha256_ssse3_glue.c @@ -40,9 +40,9 @@ #include #include -asmlinkage void sha256_transform_ssse3(u32 *digest, const char *data, - u64 rounds); -typedef void (sha256_transform_fn)(u32 *digest, const char *data, u64 rounds); +asmlinkage void sha256_transform_ssse3(struct sha256_state *digest, const u8 *data, + int rounds); +typedef void (sha256_transform_fn)(struct sha256_state *digest, const u8 *data, int rounds); static int sha256_update(struct shash_desc *desc, const u8 *data, unsigned int len, sha256_transform_fn *sha256_xform) @@ -57,8 +57,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data, BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0); kernel_fpu_begin(); - sha256_base_do_update(desc, data, len, - (sha256_block_fn *)sha256_xform); + sha256_base_do_update(desc, data, len, sha256_xform); kernel_fpu_end(); return 0; @@ -72,9 +71,8 @@ static int sha256_finup(struct shash_desc *desc, const u8 *data, kernel_fpu_begin(); if (len) - sha256_base_do_update(desc, data, len, - (sha256_block_fn *)sha256_xform); - sha256_base_do_finalize(desc, (sha256_block_fn *)sha256_xform); + sha256_base_do_update(desc, data, len, sha256_xform); + sha256_base_do_finalize(desc, sha256_xform); kernel_fpu_end(); return sha256_base_finish(desc, out); @@ -146,8 +144,8 @@ static void unregister_sha256_ssse3(void) } #ifdef CONFIG_AS_AVX -asmlinkage void sha256_transform_avx(u32 *digest, const char *data, - u64 rounds); +asmlinkage void sha256_transform_avx(struct sha256_state *digest, const u8 *data, + int rounds); static int sha256_avx_update(struct shash_desc *desc, const u8 *data, unsigned int len) @@ -230,8 +228,8 @@ static inline void unregister_sha256_avx(void) { } #endif #if defined(CONFIG_AS_AVX2) && defined(CONFIG_AS_AVX) -asmlinkage void sha256_transform_rorx(u32 *digest, const char *data, - u64 rounds); +asmlinkage void sha256_transform_rorx(struct sha256_state *digest, const u8 *data, + int rounds); static int sha256_avx2_update(struct shash_desc *desc, const u8 *data, unsigned int len) @@ -312,8 +310,8 @@ static inline void unregister_sha256_avx2(void) { } #endif #ifdef CONFIG_AS_SHA256_NI -asmlinkage void sha256_ni_transform(u32 *digest, const char *data, - u64 rounds); /*unsigned int rounds);*/ +asmlinkage void sha256_ni_transform(struct sha256_state *digest, const u8 *data, + int rounds); /*unsigned int rounds);*/ static int sha256_ni_update(struct shash_desc *desc, const u8 *data, unsigned int len) diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S index 565274d6a..106c3dcf7 100644 --- a/arch/x86/crypto/sha512-avx-asm.S +++ b/arch/x86/crypto/sha512-avx-asm.S @@ -49,6 +49,7 @@ #ifdef CONFIG_AS_AVX #include +#include .text @@ -277,7 +278,8 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE # message blocks. # L is the message length in SHA512 blocks ######################################################################## -ENTRY(sha512_transform_avx) +ALIGN +RAP_ENTRY(sha512_transform_avx) cmp $0, msglen je nowork @@ -364,7 +366,7 @@ updateblock: mov frame_RSPSAVE(%rsp), %rsp nowork: - ret + pax_ret sha512_transform_avx ENDPROC(sha512_transform_avx) ######################################################################## diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S index 1f20b35d8..f12df89c0 100644 --- a/arch/x86/crypto/sha512-avx2-asm.S +++ b/arch/x86/crypto/sha512-avx2-asm.S @@ -51,6 +51,7 @@ #ifdef CONFIG_AS_AVX2 #include +#include .text @@ -568,7 +569,8 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE # message blocks. # L is the message length in SHA512 blocks ######################################################################## -ENTRY(sha512_transform_rorx) +ALIGN +RAP_ENTRY(sha512_transform_rorx) # Allocate Stack Space mov %rsp, %rax sub $frame_size, %rsp @@ -678,7 +680,7 @@ done_hash: # Restore Stack Pointer mov frame_RSPSAVE(%rsp), %rsp - ret + pax_ret sha512_transform_rorx ENDPROC(sha512_transform_rorx) ######################################################################## diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h b/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h index 178f17eef..88a59c642 100644 --- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h +++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h @@ -100,5 +100,6 @@ struct job_sha512 *sha512_mb_mgr_submit_avx2(struct sha512_mb_mgr *state, struct job_sha512 *job); struct job_sha512 *sha512_mb_mgr_flush_avx2(struct sha512_mb_mgr *state); struct job_sha512 *sha512_mb_mgr_get_comp_job_avx2(struct sha512_mb_mgr *state); +struct job_sha512 *sha512_x4_avx2(struct sha512_mb_mgr *state) __rap_hash; #endif diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S index 3ddba19a0..392d6a11b 100644 --- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S +++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S @@ -53,6 +53,7 @@ #include #include +#include #include "sha512_mb_mgr_datastruct.S" .extern sha512_x4_avx2 @@ -107,7 +108,7 @@ offset = \_offset # JOB* sha512_mb_mgr_flush_avx2(MB_MGR *state) # arg 1 : rcx : state -ENTRY(sha512_mb_mgr_flush_avx2) +RAP_ENTRY(sha512_mb_mgr_flush_avx2) FRAME_BEGIN push %rbx @@ -177,7 +178,7 @@ LABEL skip_ %I # "state" and "args" are the same address, arg1 # len is arg2 - call sha512_x4_avx2 + pax_direct_call sha512_x4_avx2 # state and idx are intact len_is_0: @@ -212,7 +213,7 @@ len_is_0: return: pop %rbx FRAME_END - ret + pax_ret sha512_mb_mgr_flush_avx2 return_null: xor job_rax, job_rax @@ -220,7 +221,7 @@ return_null: ENDPROC(sha512_mb_mgr_flush_avx2) .align 16 -ENTRY(sha512_mb_mgr_get_comp_job_avx2) +RAP_ENTRY(sha512_mb_mgr_get_comp_job_avx2) push %rbx mov _unused_lanes(state), unused_lanes @@ -273,12 +274,12 @@ ENTRY(sha512_mb_mgr_get_comp_job_avx2) pop %rbx - ret + pax_ret sha512_mb_mgr_get_comp_job_avx2 .return_null: xor job_rax, job_rax pop %rbx - ret + pax_ret sha512_mb_mgr_get_comp_job_avx2 ENDPROC(sha512_mb_mgr_get_comp_job_avx2) .data diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S index 815f07bdd..a1f961ae4 100644 --- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S +++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S @@ -53,6 +53,7 @@ #include #include +#include #include "sha512_mb_mgr_datastruct.S" .extern sha512_x4_avx2 @@ -98,7 +99,7 @@ # JOB* sha512_mb_mgr_submit_avx2(MB_MGR *state, JOB *job) # arg 1 : rcx : state # arg 2 : rdx : job -ENTRY(sha512_mb_mgr_submit_avx2) +RAP_ENTRY(sha512_mb_mgr_submit_avx2) FRAME_BEGIN push %rbx push %r12 @@ -167,7 +168,7 @@ start_loop: # "state" and "args" are the same address, arg1 # len is arg2 - call sha512_x4_avx2 + pax_direct_call sha512_x4_avx2 # state and idx are intact len_is_0: @@ -203,7 +204,7 @@ return: pop %r12 pop %rbx FRAME_END - ret + pax_ret sha512_mb_mgr_submit_avx2 return_null: xor job_rax, job_rax diff --git a/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S b/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S index 31ab1eff6..da5a002a6 100644 --- a/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S +++ b/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S @@ -63,6 +63,7 @@ # clobbers ymm0-15 #include +#include #include "sha512_mb_mgr_datastruct.S" arg1 = %rdi @@ -358,7 +359,7 @@ Lrounds_16_xx: pop %r12 # outer calling routine restores XMM and other GP registers - ret + pax_ret sha512_x4_avx2 ENDPROC(sha512_x4_avx2) .data diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S index e610e29cb..6b3848e2c 100644 --- a/arch/x86/crypto/sha512-ssse3-asm.S +++ b/arch/x86/crypto/sha512-ssse3-asm.S @@ -48,6 +48,7 @@ ######################################################################## #include +#include .text @@ -275,7 +276,8 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE # message blocks. # L is the message length in SHA512 blocks. ######################################################################## -ENTRY(sha512_transform_ssse3) +ALIGN +RAP_ENTRY(sha512_transform_ssse3) cmp $0, msglen je nowork @@ -363,7 +365,7 @@ updateblock: mov frame_RSPSAVE(%rsp), %rsp nowork: - ret + pax_ret sha512_transform_ssse3 ENDPROC(sha512_transform_ssse3) ######################################################################## diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c index 2b0e2a682..59a1f9446 100644 --- a/arch/x86/crypto/sha512_ssse3_glue.c +++ b/arch/x86/crypto/sha512_ssse3_glue.c @@ -39,10 +39,10 @@ #include -asmlinkage void sha512_transform_ssse3(u64 *digest, const char *data, - u64 rounds); +asmlinkage void sha512_transform_ssse3(struct sha512_state *digest, const u8 *data, + int rounds); -typedef void (sha512_transform_fn)(u64 *digest, const char *data, u64 rounds); +typedef void (sha512_transform_fn)(struct sha512_state *digest, const u8 *data, int rounds); static int sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len, sha512_transform_fn *sha512_xform) @@ -57,8 +57,7 @@ static int sha512_update(struct shash_desc *desc, const u8 *data, BUILD_BUG_ON(offsetof(struct sha512_state, state) != 0); kernel_fpu_begin(); - sha512_base_do_update(desc, data, len, - (sha512_block_fn *)sha512_xform); + sha512_base_do_update(desc, data, len, sha512_xform); kernel_fpu_end(); return 0; @@ -72,9 +71,8 @@ static int sha512_finup(struct shash_desc *desc, const u8 *data, kernel_fpu_begin(); if (len) - sha512_base_do_update(desc, data, len, - (sha512_block_fn *)sha512_xform); - sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_xform); + sha512_base_do_update(desc, data, len, sha512_xform); + sha512_base_do_finalize(desc, sha512_xform); kernel_fpu_end(); return sha512_base_finish(desc, out); @@ -146,8 +144,8 @@ static void unregister_sha512_ssse3(void) } #ifdef CONFIG_AS_AVX -asmlinkage void sha512_transform_avx(u64 *digest, const char *data, - u64 rounds); +asmlinkage void sha512_transform_avx(struct sha512_state *digest, const u8 *data, + int rounds); static bool avx_usable(void) { if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) { @@ -229,8 +227,8 @@ static inline void unregister_sha512_avx(void) { } #endif #if defined(CONFIG_AS_AVX2) && defined(CONFIG_AS_AVX) -asmlinkage void sha512_transform_rorx(u64 *digest, const char *data, - u64 rounds); +asmlinkage void sha512_transform_rorx(struct sha512_state *digest, const u8 *data, + int rounds); static int sha512_avx2_update(struct shash_desc *desc, const u8 *data, unsigned int len) diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S index dc66273e6..91dc734b4 100644 --- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S @@ -25,6 +25,7 @@ #include #include +#include #include "glue_helper-asm-avx.S" .file "twofish-avx-x86_64-asm_64.S" @@ -285,7 +286,7 @@ __twofish_enc_blk8: outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2); outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2); - ret; + pax_ret __twofish_enc_blk8; ENDPROC(__twofish_enc_blk8) .align 8 @@ -325,10 +326,10 @@ __twofish_dec_blk8: outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2); outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2); - ret; + pax_ret __twofish_dec_blk8; ENDPROC(__twofish_dec_blk8) -ENTRY(twofish_ecb_enc_8way) +RAP_ENTRY(twofish_ecb_enc_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -340,15 +341,15 @@ ENTRY(twofish_ecb_enc_8way) load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - call __twofish_enc_blk8; + pax_direct_call __twofish_enc_blk8; store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); FRAME_END - ret; + pax_ret twofish_ecb_enc_8way; ENDPROC(twofish_ecb_enc_8way) -ENTRY(twofish_ecb_dec_8way) +RAP_ENTRY(twofish_ecb_dec_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -360,15 +361,15 @@ ENTRY(twofish_ecb_dec_8way) load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); - call __twofish_dec_blk8; + pax_direct_call __twofish_dec_blk8; store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); FRAME_END - ret; + pax_ret twofish_ecb_dec_8way; ENDPROC(twofish_ecb_dec_8way) -ENTRY(twofish_cbc_dec_8way) +RAP_ENTRY(twofish_cbc_dec_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -376,24 +377,24 @@ ENTRY(twofish_cbc_dec_8way) */ FRAME_BEGIN - pushq %r12; + pushq %r14; movq %rsi, %r11; - movq %rdx, %r12; + movq %rdx, %r14; load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); - call __twofish_dec_blk8; + pax_direct_call __twofish_dec_blk8; - store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - popq %r12; + popq %r14; FRAME_END - ret; + pax_ret twofish_cbc_dec_8way; ENDPROC(twofish_cbc_dec_8way) -ENTRY(twofish_ctr_8way) +RAP_ENTRY(twofish_ctr_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -402,25 +403,25 @@ ENTRY(twofish_ctr_8way) */ FRAME_BEGIN - pushq %r12; + pushq %r14; movq %rsi, %r11; - movq %rdx, %r12; + movq %rdx, %r14; load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2, RX0, RX1, RY0); - call __twofish_enc_blk8; + pax_direct_call __twofish_enc_blk8; - store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); + store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); - popq %r12; + popq %r14; FRAME_END - ret; + pax_ret twofish_ctr_8way; ENDPROC(twofish_ctr_8way) -ENTRY(twofish_xts_enc_8way) +RAP_ENTRY(twofish_xts_enc_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -435,16 +436,16 @@ ENTRY(twofish_xts_enc_8way) load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2, RX0, RX1, RY0, .Lxts_gf128mul_and_shl1_mask); - call __twofish_enc_blk8; + pax_direct_call __twofish_enc_blk8; /* dst <= regs xor IVs(in dst) */ store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); FRAME_END - ret; + pax_ret twofish_xts_enc_8way; ENDPROC(twofish_xts_enc_8way) -ENTRY(twofish_xts_dec_8way) +RAP_ENTRY(twofish_xts_dec_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -459,11 +460,11 @@ ENTRY(twofish_xts_dec_8way) load_xts_8way(%rcx, %rdx, %rsi, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2, RX0, RX1, RY0, .Lxts_gf128mul_and_shl1_mask); - call __twofish_dec_blk8; + pax_direct_call __twofish_dec_blk8; /* dst <= regs xor IVs(in dst) */ store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); FRAME_END - ret; + pax_ret twofish_xts_dec_8way; ENDPROC(twofish_xts_dec_8way) diff --git a/arch/x86/crypto/twofish-i586-asm_32.S b/arch/x86/crypto/twofish-i586-asm_32.S index 694ea4587..91b9a8d9f 100644 --- a/arch/x86/crypto/twofish-i586-asm_32.S +++ b/arch/x86/crypto/twofish-i586-asm_32.S @@ -22,6 +22,7 @@ #include #include +#include /* return address at 0 */ @@ -220,7 +221,7 @@ xor %esi, d ## D;\ ror $1, d ## D; -ENTRY(twofish_enc_blk) +RAP_ENTRY(twofish_enc_blk) push %ebp /* save registers according to calling convention*/ push %ebx push %esi @@ -273,10 +274,10 @@ ENTRY(twofish_enc_blk) pop %ebx pop %ebp mov $1, %eax - ret + pax_ret twofish_enc_blk ENDPROC(twofish_enc_blk) -ENTRY(twofish_dec_blk) +RAP_ENTRY(twofish_dec_blk) push %ebp /* save registers according to calling convention*/ push %ebx push %esi @@ -330,5 +331,5 @@ ENTRY(twofish_dec_blk) pop %ebx pop %ebp mov $1, %eax - ret + pax_ret twofish_dec_blk ENDPROC(twofish_dec_blk) diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S index 1c3b7ceb3..9a65a0b72 100644 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S @@ -21,6 +21,7 @@ */ #include +#include .file "twofish-x86_64-asm-3way.S" .text @@ -258,7 +259,7 @@ ENTRY(__twofish_enc_blk_3way) popq %r13; popq %r14; popq %r15; - ret; + pax_ret __twofish_enc_blk_3way; .L__enc_xor3: outunpack_enc3(xor); @@ -269,10 +270,10 @@ ENTRY(__twofish_enc_blk_3way) popq %r13; popq %r14; popq %r15; - ret; + pax_ret __twofish_enc_blk_3way; ENDPROC(__twofish_enc_blk_3way) -ENTRY(twofish_dec_blk_3way) +RAP_ENTRY(twofish_dec_blk_3way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -308,5 +309,5 @@ ENTRY(twofish_dec_blk_3way) popq %r13; popq %r14; popq %r15; - ret; + pax_ret twofish_dec_blk_3way; ENDPROC(twofish_dec_blk_3way) diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S index a350c990d..b59af9f23 100644 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S @@ -22,6 +22,7 @@ #include #include +#include #define a_offset 0 #define b_offset 4 @@ -215,7 +216,7 @@ xor %r8d, d ## D;\ ror $1, d ## D; -ENTRY(twofish_enc_blk) +RAP_ENTRY(twofish_enc_blk) pushq R1 /* %rdi contains the ctx address */ @@ -265,10 +266,10 @@ ENTRY(twofish_enc_blk) popq R1 movl $1,%eax - ret + pax_ret twofish_enc_blk ENDPROC(twofish_enc_blk) -ENTRY(twofish_dec_blk) +RAP_ENTRY(twofish_dec_blk) pushq R1 /* %rdi contains the ctx address */ @@ -317,5 +318,5 @@ ENTRY(twofish_dec_blk) popq R1 movl $1,%eax - ret + pax_ret twofish_dec_blk ENDPROC(twofish_dec_blk) diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c index b7a3904b9..0d8bc6053 100644 --- a/arch/x86/crypto/twofish_avx_glue.c +++ b/arch/x86/crypto/twofish_avx_glue.c @@ -46,24 +46,27 @@ #define TWOFISH_PARALLEL_BLOCKS 8 /* 8-way parallel cipher functions */ -asmlinkage void twofish_ecb_enc_8way(struct twofish_ctx *ctx, u8 *dst, +asmlinkage void twofish_ecb_enc_8way(void *ctx, u8 *dst, const u8 *src); -asmlinkage void twofish_ecb_dec_8way(struct twofish_ctx *ctx, u8 *dst, +asmlinkage void twofish_ecb_dec_8way(void *ctx, u8 *dst, const u8 *src); +void __twofish_enc_blk8(void *ctx, u8 *dst, const u8 *src) __rap_hash; +void __twofish_dec_blk8(void *ctx, u8 *dst, const u8 *src) __rap_hash; -asmlinkage void twofish_cbc_dec_8way(struct twofish_ctx *ctx, u8 *dst, +asmlinkage void twofish_cbc_dec_8way(void *ctx, u8 *dst, const u8 *src); -asmlinkage void twofish_ctr_8way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void twofish_ctr_8way(void *ctx, u128 *dst, + const u128 *src, le128 *iv); -asmlinkage void twofish_xts_enc_8way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); -asmlinkage void twofish_xts_dec_8way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void twofish_xts_enc_8way(void *ctx, u128 *dst, + const u128 *src, le128 *iv); +asmlinkage void twofish_xts_dec_8way(void *ctx, u128 *dst, + const u128 *src, le128 *iv); -static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, +static inline void twofish_enc_blk_3way(void *_ctx, u8 *dst, const u8 *src) { + struct twofish_ctx *ctx = _ctx; __twofish_enc_blk_3way(ctx, dst, src, false); } diff --git a/arch/x86/crypto/twofish_glue.c b/arch/x86/crypto/twofish_glue.c index 77e06c2da..a45c27b26 100644 --- a/arch/x86/crypto/twofish_glue.c +++ b/arch/x86/crypto/twofish_glue.c @@ -44,10 +44,10 @@ #include #include -asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst, +asmlinkage void twofish_enc_blk(void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(twofish_enc_blk); -asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst, +asmlinkage void twofish_dec_blk(void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(twofish_dec_blk); diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c index 2ebb5e978..a0b0aa93c 100644 --- a/arch/x86/crypto/twofish_glue_3way.c +++ b/arch/x86/crypto/twofish_glue_3way.c @@ -36,21 +36,21 @@ EXPORT_SYMBOL_GPL(__twofish_enc_blk_3way); EXPORT_SYMBOL_GPL(twofish_dec_blk_3way); -static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, +static inline void twofish_enc_blk_3way(void *ctx, u8 *dst, const u8 *src) { __twofish_enc_blk_3way(ctx, dst, src, false); } -static inline void twofish_enc_blk_xor_3way(struct twofish_ctx *ctx, u8 *dst, +static inline void twofish_enc_blk_xor_3way(void *ctx, u8 *dst, const u8 *src) { __twofish_enc_blk_3way(ctx, dst, src, true); } -void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src) +void twofish_dec_blk_cbc_3way(void *ctx, u8 *_dst, const u8 *_src) { - u128 ivs[2]; + u128 ivs[2], *dst = (u128 *)_dst, *src = (u128 *)_src; ivs[0] = src[0]; ivs[1] = src[1]; @@ -118,10 +118,10 @@ static const struct common_glue_ctx twofish_ctr = { .funcs = { { .num_blocks = 3, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_ctr_3way) } + .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr_3way) } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_ctr) } + .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr) } } } }; diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile index 9976fcecd..4c336fd0b 100644 --- a/arch/x86/entry/Makefile +++ b/arch/x86/entry/Makefile @@ -14,4 +14,3 @@ obj-y += vdso/ obj-y += vsyscall/ obj-$(CONFIG_IA32_EMULATION) += entry_64_compat.o syscall_32.o - diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 9a9e58840..4f8115ab2 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -95,23 +95,26 @@ For 32-bit we have the following conventions - kernel is built with .endm .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR + movq %r12, R12+\offset(%rsp) +#endif .if \r11 - movq %r11, 6*8+\offset(%rsp) + movq %r11, R11+\offset(%rsp) .endif .if \r8910 - movq %r10, 7*8+\offset(%rsp) - movq %r9, 8*8+\offset(%rsp) - movq %r8, 9*8+\offset(%rsp) + movq %r10, R10+\offset(%rsp) + movq %r9, R9+\offset(%rsp) + movq %r8, R8+\offset(%rsp) .endif .if \rax - movq %rax, 10*8+\offset(%rsp) + movq %rax, RAX+\offset(%rsp) .endif .if \rcx - movq %rcx, 11*8+\offset(%rsp) + movq %rcx, RCX+\offset(%rsp) .endif - movq %rdx, 12*8+\offset(%rsp) - movq %rsi, 13*8+\offset(%rsp) - movq %rdi, 14*8+\offset(%rsp) + movq %rdx, RDX+\offset(%rsp) + movq %rsi, RSI+\offset(%rsp) + movq %rdi, RDI+\offset(%rsp) .endm .macro SAVE_C_REGS offset=0 SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1 @@ -130,67 +133,78 @@ For 32-bit we have the following conventions - kernel is built with .endm .macro SAVE_EXTRA_REGS offset=0 - movq %r15, 0*8+\offset(%rsp) - movq %r14, 1*8+\offset(%rsp) - movq %r13, 2*8+\offset(%rsp) - movq %r12, 3*8+\offset(%rsp) - movq %rbp, 4*8+\offset(%rsp) - movq %rbx, 5*8+\offset(%rsp) + movq %r15, R15+\offset(%rsp) + movq %r14, R14+\offset(%rsp) + movq %r13, R13+\offset(%rsp) +#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR + movq %r12, R12+\offset(%rsp) +#endif + movq %rbp, RBP+\offset(%rsp) + movq %rbx, RBX+\offset(%rsp) .endm .macro RESTORE_EXTRA_REGS offset=0 - movq 0*8+\offset(%rsp), %r15 - movq 1*8+\offset(%rsp), %r14 - movq 2*8+\offset(%rsp), %r13 - movq 3*8+\offset(%rsp), %r12 - movq 4*8+\offset(%rsp), %rbp - movq 5*8+\offset(%rsp), %rbx + movq R15+\offset(%rsp), %r15 + movq R14+\offset(%rsp), %r14 + movq R13+\offset(%rsp), %r13 +#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR + movq R12+\offset(%rsp), %r12 +#endif + movq RBP+\offset(%rsp), %rbp + movq RBX+\offset(%rsp), %rbx .endm .macro ZERO_EXTRA_REGS xorl %r15d, %r15d xorl %r14d, %r14d xorl %r13d, %r13d +#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR xorl %r12d, %r12d +#endif xorl %ebp, %ebp xorl %ebx, %ebx .endm - .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1 + .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1, rstor_r12=1 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR + .if \rstor_r12 + movq R12(%rsp), %r12 + .endif +#endif .if \rstor_r11 - movq 6*8(%rsp), %r11 + movq R11(%rsp), %r11 .endif .if \rstor_r8910 - movq 7*8(%rsp), %r10 - movq 8*8(%rsp), %r9 - movq 9*8(%rsp), %r8 + movq R10(%rsp), %r10 + movq R9(%rsp), %r9 + movq R8(%rsp), %r8 .endif .if \rstor_rax - movq 10*8(%rsp), %rax + movq RAX(%rsp), %rax .endif .if \rstor_rcx - movq 11*8(%rsp), %rcx + movq RCX(%rsp), %rcx .endif .if \rstor_rdx - movq 12*8(%rsp), %rdx + movq RDX(%rsp), %rdx .endif - movq 13*8(%rsp), %rsi - movq 14*8(%rsp), %rdi + movq RSI(%rsp), %rsi + movq RDI(%rsp), %rdi .endm .macro RESTORE_C_REGS - RESTORE_C_REGS_HELPER 1,1,1,1,1 + RESTORE_C_REGS_HELPER 1,1,1,1,1,1 .endm .macro RESTORE_C_REGS_EXCEPT_RAX - RESTORE_C_REGS_HELPER 0,1,1,1,1 + RESTORE_C_REGS_HELPER 0,1,1,1,1,0 .endm .macro RESTORE_C_REGS_EXCEPT_RCX - RESTORE_C_REGS_HELPER 1,0,1,1,1 + RESTORE_C_REGS_HELPER 1,0,1,1,1,0 .endm .macro RESTORE_C_REGS_EXCEPT_R11 - RESTORE_C_REGS_HELPER 1,1,0,1,1 + RESTORE_C_REGS_HELPER 1,1,0,1,1,1 .endm .macro RESTORE_C_REGS_EXCEPT_RCX_R11 - RESTORE_C_REGS_HELPER 1,0,0,1,1 + RESTORE_C_REGS_HELPER 1,0,0,1,1,1 .endm .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0 @@ -212,7 +226,7 @@ For 32-bit we have the following conventions - kernel is built with #ifdef HAVE_JUMP_LABEL STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0 #endif - call enter_from_user_mode + pax_direct_call enter_from_user_mode .Lafter_call_\@: #endif .endm diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index bdd9cc59d..486d4bfcb 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -42,6 +42,21 @@ __visible inline void enter_from_user_mode(void) static inline void enter_from_user_mode(void) {} #endif +void pax_enter_kernel(void) __rap_hash; +void pax_enter_kernel_user(void) __rap_hash; +void pax_exit_kernel(void) __rap_hash; +void pax_exit_kernel_user(void) __rap_hash; + +void paranoid_entry(void) __rap_hash; +void paranoid_entry_nmi(void) __rap_hash; +void error_entry(void) __rap_hash; + +#ifdef CONFIG_PAX_MEMORY_STACKLEAK +asmlinkage void pax_erase_kstack(void) __rap_hash; +#else +static void pax_erase_kstack(void) {} +#endif + static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch) { #ifdef CONFIG_X86_64 @@ -56,6 +71,10 @@ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch) } } +#ifdef CONFIG_GRKERNSEC_SETXID +extern void gr_delayed_cred_worker(void); +#endif + /* * Returns the syscall nr to run (which should match regs->orig_ax) or -1 * to skip the syscall. @@ -74,12 +93,19 @@ static long syscall_trace_enter(struct pt_regs *regs) work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY; +#ifdef CONFIG_GRKERNSEC_SETXID + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) + gr_delayed_cred_worker(); +#endif + if (unlikely(work & _TIF_SYSCALL_EMU)) emulated = true; if ((emulated || (work & _TIF_SYSCALL_TRACE)) && - tracehook_report_syscall_entry(regs)) + tracehook_report_syscall_entry(regs)) { + pax_erase_kstack(); return -1L; + } if (emulated) return -1L; @@ -113,9 +139,11 @@ static long syscall_trace_enter(struct pt_regs *regs) sd.args[5] = regs->bp; } - ret = __secure_computing(&sd); - if (ret == -1) + ret = secure_computing(&sd); + if (ret == -1) { + pax_erase_kstack(); return ret; + } } #endif @@ -124,6 +152,7 @@ static long syscall_trace_enter(struct pt_regs *regs) do_audit_syscall_entry(regs, arch); + pax_erase_kstack(); return ret ?: regs->orig_ax; } @@ -229,7 +258,7 @@ static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags) step = unlikely( (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU)) == _TIF_SINGLESTEP); - if (step || cached_flags & _TIF_SYSCALL_TRACE) + if (step || (cached_flags & _TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, step); } @@ -248,6 +277,11 @@ __visible inline void syscall_return_slowpath(struct pt_regs *regs) WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax)) local_irq_enable(); +#ifdef CONFIG_GRKERNSEC_SETXID + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) + gr_delayed_cred_worker(); +#endif + /* * First do one-time work. If these work items are enabled, we * want to run them exactly once per syscall exit with IRQs on. @@ -346,6 +380,7 @@ __visible long do_fast_syscall_32(struct pt_regs *regs) unsigned long landing_pad = (unsigned long)current->mm->context.vdso + vdso_image_32.sym_int80_landing_pad; + u32 __user *saved_bp = (u32 __force_user *)(unsigned long)(u32)regs->sp; /* * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward @@ -365,11 +400,9 @@ __visible long do_fast_syscall_32(struct pt_regs *regs) * Micro-optimization: the pointer we're following is explicitly * 32 bits, so it can't be out of range. */ - __get_user(*(u32 *)®s->bp, - (u32 __user __force *)(unsigned long)(u32)regs->sp) + __get_user_nocheck(*(u32 *)®s->bp, saved_bp, sizeof(u32)) #else - get_user(*(u32 *)®s->bp, - (u32 __user __force *)(unsigned long)(u32)regs->sp) + get_user(regs->bp, saved_bp) #endif ) { diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index edba8606b..d684e0fdb 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -45,6 +45,7 @@ #include #include #include +#include .section .entry.text, "ax" @@ -148,13 +149,157 @@ movl \reg, PT_GS(%esp) .endm .macro SET_KERNEL_GS reg + +#ifdef CONFIG_CC_STACKPROTECTOR movl $(__KERNEL_STACK_CANARY), \reg +#elif defined(CONFIG_PAX_MEMORY_UDEREF) + movl $(__USER_DS), \reg +#else + xorl \reg, \reg +#endif + movl \reg, %gs .endm #endif /* CONFIG_X86_32_LAZY_GS */ -.macro SAVE_ALL pt_regs_ax=%eax +.macro pax_enter_kernel +#ifdef CONFIG_PAX_KERNEXEC + pax_direct_call pax_enter_kernel +#endif +.endm + +.macro pax_exit_kernel +#ifdef CONFIG_PAX_KERNEXEC + pax_direct_call pax_exit_kernel +#endif +.endm + +#ifdef CONFIG_PAX_KERNEXEC +ENTRY(pax_enter_kernel) +#ifdef CONFIG_PARAVIRT + pushl %eax + pushl %ecx + pax_indirect_call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0), read_cr0 + mov %eax, %esi +#else + mov %cr0, %esi +#endif + bts $X86_CR0_WP_BIT, %esi + jnc 1f + mov %cs, %esi + cmp $__KERNEL_CS, %esi + jz 3f + ljmp $__KERNEL_CS, $3f +1: ljmp $__KERNEXEC_KERNEL_CS, $2f +2: +#ifdef CONFIG_PARAVIRT + mov %esi, %eax + pax_indirect_call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0), write_cr0 +#else + mov %esi, %cr0 +#endif +3: +#ifdef CONFIG_PARAVIRT + popl %ecx + popl %eax +#endif + pax_ret pax_enter_kernel +ENDPROC(pax_enter_kernel) + +ENTRY(pax_exit_kernel) +#ifdef CONFIG_PARAVIRT + pushl %eax + pushl %ecx +#endif + mov %cs, %esi + cmp $__KERNEXEC_KERNEL_CS, %esi + jnz 2f +#ifdef CONFIG_PARAVIRT + pax_indirect_call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0), read_cr0 + mov %eax, %esi +#else + mov %cr0, %esi +#endif + btr $X86_CR0_WP_BIT, %esi + ljmp $__KERNEL_CS, $1f +1: +#ifdef CONFIG_PARAVIRT + mov %esi, %eax + pax_indirect_call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0), write_cr0 +#else + mov %esi, %cr0 +#endif +2: +#ifdef CONFIG_PARAVIRT + popl %ecx + popl %eax +#endif + pax_ret pax_exit_kernel +ENDPROC(pax_exit_kernel) +#endif + + .macro pax_erase_kstack +#ifdef CONFIG_PAX_MEMORY_STACKLEAK + pax_direct_call pax_erase_kstack +#endif + .endm + +#ifdef CONFIG_PAX_MEMORY_STACKLEAK +/* + * ebp: thread_info + */ +ENTRY(pax_erase_kstack) + pushl %edi + pushl %ecx + pushl %eax + pushl %ebp + + GET_CURRENT(%ebp) + mov TASK_lowest_stack(%ebp), %edi + mov $-0xBEEF, %eax + std + +1: mov %edi, %ecx + and $THREAD_SIZE_asm - 1, %ecx + shr $2, %ecx + repne scasl + jecxz 2f + + cmp $2*16, %ecx + jc 2f + + mov $2*16, %ecx + repe scasl + jecxz 2f + jne 1b + +2: cld + or $2*4, %edi + mov %esp, %ecx + sub %edi, %ecx + + cmp $THREAD_SIZE_asm, %ecx + jb 3f + ud2 +3: + + shr $2, %ecx + rep stosl + + mov TASK_thread_sp0(%ebp), %edi + sub $128, %edi + mov %edi, TASK_lowest_stack(%ebp) + + popl %ebp + popl %eax + popl %ecx + popl %edi + pax_ret pax_erase_kstack +ENDPROC(pax_erase_kstack) +#endif + +.macro __SAVE_ALL pt_regs_ax, _DS cld PUSH_GS pushl %fs @@ -167,7 +312,7 @@ pushl %edx pushl %ecx pushl %ebx - movl $(__USER_DS), %edx + movl $\_DS, %edx movl %edx, %ds movl %edx, %es movl $(__KERNEL_PERCPU), %edx @@ -175,6 +320,15 @@ SET_KERNEL_GS %edx .endm +.macro SAVE_ALL pt_regs_ax=%eax +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) + __SAVE_ALL \pt_regs_ax, __KERNEL_DS + pax_enter_kernel +#else + __SAVE_ALL \pt_regs_ax, __USER_DS +#endif +.endm + .macro RESTORE_INT_REGS popl %ebx popl %ecx @@ -235,7 +389,7 @@ ENTRY(__switch_to_asm) popl %ebp jmp __switch_to -END(__switch_to_asm) +ENDPROC(__switch_to_asm) /* * A newly forked process directly context switches into this address. @@ -246,7 +400,7 @@ END(__switch_to_asm) */ ENTRY(ret_from_fork) pushl %eax - call schedule_tail + pax_direct_call schedule_tail popl %eax testl %ebx, %ebx @@ -255,12 +409,12 @@ ENTRY(ret_from_fork) 2: /* When we fork, we trace the syscall return in the child, too. */ movl %esp, %eax - call syscall_return_slowpath + pax_direct_call syscall_return_slowpath jmp restore_all /* kernel thread */ 1: movl %edi, %eax - call *%ebx + pax_indirect_call "%ebx", kthreadd /* * A kernel thread is allowed to return here after successfully * calling do_execve(). Exit to userspace to complete the execve() @@ -268,7 +422,7 @@ ENTRY(ret_from_fork) */ movl $0, PT_EAX(%esp) jmp 2b -END(ret_from_fork) +ENDPROC(ret_from_fork) /* * Return to user mode is not as complex as all this looks, @@ -294,15 +448,23 @@ ret_from_intr: andl $SEGMENT_RPL_MASK, %eax #endif cmpl $USER_RPL, %eax + +#ifdef CONFIG_PAX_KERNEXEC + jae resume_userspace + + pax_exit_kernel + jmp resume_kernel +#else jb resume_kernel # not returning to v8086 or userspace +#endif ENTRY(resume_userspace) DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF movl %esp, %eax - call prepare_exit_to_usermode - jmp restore_all -END(ret_from_exception) + pax_direct_call prepare_exit_to_usermode + jmp .Lsyscall_32_done +ENDPROC(ret_from_exception) #ifdef CONFIG_PREEMPT ENTRY(resume_kernel) @@ -312,9 +474,9 @@ need_resched: jnz restore_all testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? jz restore_all - call preempt_schedule_irq + pax_direct_call preempt_schedule_irq jmp need_resched -END(resume_kernel) +ENDPROC(resume_kernel) #endif GLOBAL(__begin_SYSENTER_singlestep_region) @@ -381,6 +543,10 @@ sysenter_past_esp: pushl %eax /* pt_regs->orig_ax */ SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */ +#ifdef CONFIG_PAX_RANDKSTACK + pax_erase_kstack +#endif + /* * SYSENTER doesn't filter flags, so we need to clear NT, AC * and TF ourselves. To save a few cycles, we can check whether @@ -411,16 +577,25 @@ sysenter_past_esp: TRACE_IRQS_OFF movl %esp, %eax - call do_fast_syscall_32 + pax_direct_call do_fast_syscall_32 /* XEN PV guests always use IRET path */ ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ "jmp .Lsyscall_32_done", X86_FEATURE_XENPV +#ifdef CONFIG_PAX_RANDKSTACK + movl %esp, %eax + pax_direct_call pax_randomize_kstack +#endif + + pax_erase_kstack + /* Opportunistic SYSEXIT */ TRACE_IRQS_ON /* User mode traces as IRQs on. */ movl PT_EIP(%esp), %edx /* pt_regs->ip */ movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */ 1: mov PT_FS(%esp), %fs +2: mov PT_DS(%esp), %ds +3: mov PT_ES(%esp), %es PTGS_TO_GS popl %ebx /* pt_regs->bx */ addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */ @@ -446,10 +621,16 @@ sysenter_past_esp: sysexit .pushsection .fixup, "ax" -2: movl $0, PT_FS(%esp) +4: movl $0, PT_FS(%esp) + jmp 1b +5: movl $0, PT_DS(%esp) + jmp 1b +6: movl $0, PT_ES(%esp) jmp 1b .popsection - _ASM_EXTABLE(1b, 2b) + _ASM_EXTABLE(1b, 4b) + _ASM_EXTABLE(2b, 5b) + _ASM_EXTABLE(3b, 6b) PTGS_TO_GS_EX .Lsysenter_fix_flags: @@ -492,6 +673,10 @@ ENTRY(entry_INT80_32) pushl %eax /* pt_regs->orig_ax */ SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */ +#ifdef CONFIG_PAX_RANDKSTACK + pax_erase_kstack +#endif + /* * User mode is traced as though IRQs are on, and the interrupt gate * turned them off. @@ -499,9 +684,16 @@ ENTRY(entry_INT80_32) TRACE_IRQS_OFF movl %esp, %eax - call do_int80_syscall_32 + pax_direct_call do_int80_syscall_32 .Lsyscall_32_done: +#ifdef CONFIG_PAX_RANDKSTACK + movl %esp, %eax + pax_direct_call pax_randomize_kstack +#endif + + pax_erase_kstack + restore_all: TRACE_IRQS_IRET restore_all_notrace: @@ -545,14 +737,34 @@ ldt_ss: * compensating for the offset by changing to the ESPFIX segment with * a base address that matches for the difference. */ -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8) +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx) mov %esp, %edx /* load kernel esp */ mov PT_OLDESP(%esp), %eax /* load userspace esp */ mov %dx, %ax /* eax: new kernel esp */ sub %eax, %edx /* offset (low word is 0) */ +#ifdef CONFIG_SMP + movl PER_CPU_VAR(cpu_number), %ebx + shll $PAGE_SHIFT_asm, %ebx + addl $cpu_gdt_table, %ebx +#else + movl $cpu_gdt_table, %ebx +#endif shr $16, %edx - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ + +#ifdef CONFIG_PAX_KERNEXEC + mov %cr0, %esi + btr $X86_CR0_WP_BIT, %esi + mov %esi, %cr0 +#endif + + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */ + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */ + +#ifdef CONFIG_PAX_KERNEXEC + bts $X86_CR0_WP_BIT, %esi + mov %esi, %cr0 +#endif + pushl $__ESPFIX_SS pushl %eax /* new kernel esp */ /* @@ -576,8 +788,15 @@ ENDPROC(entry_INT80_32) */ #ifdef CONFIG_X86_ESPFIX32 /* fixup the stack */ - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ +#ifdef CONFIG_SMP + movl PER_CPU_VAR(cpu_number), %ebx + shll $PAGE_SHIFT_asm, %ebx + addl $cpu_gdt_table, %ebx +#else + movl $cpu_gdt_table, %ebx +#endif + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */ + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */ shl $16, %eax addl %esp, %eax /* the adjusted stack pointer */ pushl $__KERNEL_DS @@ -613,7 +832,7 @@ ENTRY(irq_entries_start) jmp common_interrupt .align 8 .endr -END(irq_entries_start) +ENDPROC(irq_entries_start) /* * the CPU automatically disables interrupts when executing an IRQ vector, @@ -626,7 +845,7 @@ common_interrupt: SAVE_ALL TRACE_IRQS_OFF movl %esp, %eax - call do_IRQ + pax_direct_call do_IRQ jmp ret_from_intr ENDPROC(common_interrupt) @@ -637,7 +856,7 @@ ENTRY(name) \ SAVE_ALL; \ TRACE_IRQS_OFF \ movl %esp, %eax; \ - call fn; \ + pax_direct_call fn; \ jmp ret_from_intr; \ ENDPROC(name) @@ -660,7 +879,7 @@ ENTRY(coprocessor_error) pushl $0 pushl $do_coprocessor_error jmp error_code -END(coprocessor_error) +ENDPROC(coprocessor_error) ENTRY(simd_coprocessor_error) ASM_CLAC @@ -674,20 +893,20 @@ ENTRY(simd_coprocessor_error) pushl $do_simd_coprocessor_error #endif jmp error_code -END(simd_coprocessor_error) +ENDPROC(simd_coprocessor_error) ENTRY(device_not_available) ASM_CLAC pushl $-1 # mark this as an int pushl $do_device_not_available jmp error_code -END(device_not_available) +ENDPROC(device_not_available) #ifdef CONFIG_PARAVIRT ENTRY(native_iret) iret _ASM_EXTABLE(native_iret, iret_exc) -END(native_iret) +ENDPROC(native_iret) #endif ENTRY(overflow) @@ -695,59 +914,59 @@ ENTRY(overflow) pushl $0 pushl $do_overflow jmp error_code -END(overflow) +ENDPROC(overflow) ENTRY(bounds) ASM_CLAC pushl $0 pushl $do_bounds jmp error_code -END(bounds) +ENDPROC(bounds) ENTRY(invalid_op) ASM_CLAC pushl $0 pushl $do_invalid_op jmp error_code -END(invalid_op) +ENDPROC(invalid_op) ENTRY(coprocessor_segment_overrun) ASM_CLAC pushl $0 pushl $do_coprocessor_segment_overrun jmp error_code -END(coprocessor_segment_overrun) +ENDPROC(coprocessor_segment_overrun) ENTRY(invalid_TSS) ASM_CLAC pushl $do_invalid_TSS jmp error_code -END(invalid_TSS) +ENDPROC(invalid_TSS) ENTRY(segment_not_present) ASM_CLAC pushl $do_segment_not_present jmp error_code -END(segment_not_present) +ENDPROC(segment_not_present) ENTRY(stack_segment) ASM_CLAC pushl $do_stack_segment jmp error_code -END(stack_segment) +ENDPROC(stack_segment) ENTRY(alignment_check) ASM_CLAC pushl $do_alignment_check jmp error_code -END(alignment_check) +ENDPROC(alignment_check) ENTRY(divide_error) ASM_CLAC pushl $0 # no error code pushl $do_divide_error jmp error_code -END(divide_error) +ENDPROC(divide_error) #ifdef CONFIG_X86_MCE ENTRY(machine_check) @@ -755,7 +974,7 @@ ENTRY(machine_check) pushl $0 pushl machine_check_vector jmp error_code -END(machine_check) +ENDPROC(machine_check) #endif ENTRY(spurious_interrupt_bug) @@ -763,7 +982,32 @@ ENTRY(spurious_interrupt_bug) pushl $0 pushl $do_spurious_interrupt_bug jmp error_code -END(spurious_interrupt_bug) +ENDPROC(spurious_interrupt_bug) + +#ifdef CONFIG_PAX_REFCOUNT +ENTRY(refcount_error) + ASM_CLAC + pushl $0 + pushl $do_refcount_error + jmp error_code +ENDPROC(refcount_error) +#endif + +#ifdef CONFIG_PAX_RAP +ENTRY(rap_call_error) + ASM_CLAC + pushl $0 + pushl $do_rap_call_error + jmp error_code +ENDPROC(rap_call_error) + +ENTRY(rap_ret_error) + ASM_CLAC + pushl $0 + pushl $do_rap_ret_error + jmp error_code +ENDPROC(rap_ret_error) +#endif #ifdef CONFIG_XEN ENTRY(xen_hypervisor_callback) @@ -788,9 +1032,9 @@ ENTRY(xen_hypervisor_callback) ENTRY(xen_do_upcall) 1: mov %esp, %eax - call xen_evtchn_do_upcall + pax_direct_call xen_evtchn_do_upcall #ifndef CONFIG_PREEMPT - call xen_maybe_preempt_hcall + pax_direct_call xen_maybe_preempt_hcall #endif jmp ret_from_intr ENDPROC(xen_hypervisor_callback) @@ -861,8 +1105,8 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR, #ifdef CONFIG_DYNAMIC_FTRACE ENTRY(mcount) - ret -END(mcount) + pax_ret mcount +ENDPROC(mcount) ENTRY(ftrace_caller) pushl %eax @@ -876,7 +1120,7 @@ ENTRY(ftrace_caller) .globl ftrace_call ftrace_call: - call ftrace_stub + pax_direct_call ftrace_stub addl $4, %esp /* skip NULL pointer */ popl %edx @@ -891,8 +1135,8 @@ ftrace_graph_call: /* This is weak to keep gas from relaxing the jumps */ WEAK(ftrace_stub) - ret -END(ftrace_caller) + pax_ret ftrace_caller +ENDPROC(ftrace_caller) ENTRY(ftrace_regs_caller) pushf /* push flags before compare (in cs location) */ @@ -931,7 +1175,7 @@ ENTRY(ftrace_regs_caller) pushl %esp /* Save pt_regs as 4th parameter */ GLOBAL(ftrace_regs_call) - call ftrace_stub + pax_direct_call ftrace_stub addl $4, %esp /* Skip pt_regs */ movl 14*4(%esp), %eax /* Move flags back into cs */ @@ -973,7 +1217,7 @@ ENTRY(mcount) #endif .globl ftrace_stub ftrace_stub: - ret + pax_ret ftrace_stub /* taken from glibc */ trace: @@ -984,13 +1228,13 @@ trace: movl 0x4(%ebp), %edx subl $MCOUNT_INSN_SIZE, %eax - call *ftrace_trace_function + pax_indirect_call "ftrace_trace_function", ftrace_stub popl %edx popl %ecx popl %eax jmp ftrace_stub -END(mcount) +ENDPROC(mcount) #endif /* CONFIG_DYNAMIC_FTRACE */ EXPORT_SYMBOL(mcount) #endif /* CONFIG_FUNCTION_TRACER */ @@ -1004,19 +1248,19 @@ ENTRY(ftrace_graph_caller) lea 0x4(%ebp), %edx movl (%ebp), %ecx subl $MCOUNT_INSN_SIZE, %eax - call prepare_ftrace_return + pax_direct_call prepare_ftrace_return popl %edx popl %ecx popl %eax - ret -END(ftrace_graph_caller) + pax_ret ftrace_graph_caller +ENDPROC(ftrace_graph_caller) .globl return_to_handler return_to_handler: pushl %eax pushl %edx movl %ebp, %eax - call ftrace_return_to_handler + pax_direct_call ftrace_return_to_handler movl %eax, %ecx popl %edx popl %eax @@ -1028,7 +1272,7 @@ ENTRY(trace_page_fault) ASM_CLAC pushl $trace_do_page_fault jmp error_code -END(trace_page_fault) +ENDPROC(trace_page_fault) #endif ENTRY(page_fault) @@ -1057,16 +1301,19 @@ error_code: movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart REG_TO_PTGS %ecx SET_KERNEL_GS %ecx - movl $(__USER_DS), %ecx + movl $(__KERNEL_DS), %ecx movl %ecx, %ds movl %ecx, %es + + pax_enter_kernel + TRACE_IRQS_OFF movl %esp, %eax # pt_regs pointer - call *%edi + pax_indirect_call "%edi", do_page_fault jmp ret_from_exception -END(page_fault) +ENDPROC(page_fault) -ENTRY(debug) +ENTRY(int1) /* * #DB can happen at the first instruction of * entry_SYSENTER_32 or in Xen's SYSENTER prologue. If this @@ -1083,13 +1330,19 @@ ENTRY(debug) movl %esp, %eax # pt_regs pointer /* Are we currently on the SYSENTER stack? */ - PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx) +#ifdef CONFIG_SMP + imul $TSS_size, PER_CPU_VAR(cpu_number), %ecx + lea cpu_tss(%ecx), %ecx +#else + movl $cpu_tss, %ecx +#endif + movl CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack(%ecx), %ecx subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */ cmpl $SIZEOF_SYSENTER_stack, %ecx jb .Ldebug_from_sysenter_stack TRACE_IRQS_OFF - call do_debug + pax_direct_call do_debug jmp ret_from_exception .Ldebug_from_sysenter_stack: @@ -1097,10 +1350,10 @@ ENTRY(debug) movl %esp, %ebp movl PER_CPU_VAR(cpu_current_top_of_stack), %esp TRACE_IRQS_OFF - call do_debug + pax_direct_call do_debug movl %ebp, %esp jmp ret_from_exception -END(debug) +ENDPROC(int1) /* * NMI is doubly nasty. It can happen on the first instruction of @@ -1125,13 +1378,22 @@ ENTRY(nmi) movl %esp, %eax # pt_regs pointer /* Are we currently on the SYSENTER stack? */ - PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx) +#ifdef CONFIG_SMP + imul $TSS_size, PER_CPU_VAR(cpu_number), %ecx + lea cpu_tss(%ecx), %ecx +#else + movl $cpu_tss, %ecx +#endif + movl CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack(%ecx), %ecx subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */ cmpl $SIZEOF_SYSENTER_stack, %ecx jb .Lnmi_from_sysenter_stack /* Not on SYSENTER stack. */ - call do_nmi + pax_direct_call do_nmi + + pax_exit_kernel + jmp restore_all_notrace .Lnmi_from_sysenter_stack: @@ -1141,8 +1403,11 @@ ENTRY(nmi) */ movl %esp, %ebp movl PER_CPU_VAR(cpu_current_top_of_stack), %esp - call do_nmi + pax_direct_call do_nmi movl %ebp, %esp + + pax_exit_kernel + jmp restore_all_notrace #ifdef CONFIG_X86_ESPFIX32 @@ -1161,12 +1426,15 @@ nmi_espfix_stack: SAVE_ALL FIXUP_ESPFIX_STACK # %eax == %esp xorl %edx, %edx # zero error code - call do_nmi + pax_direct_call do_nmi + + pax_exit_kernel + RESTORE_REGS lss 12+4(%esp), %esp # back to espfix stack jmp irq_return #endif -END(nmi) +ENDPROC(nmi) ENTRY(int3) ASM_CLAC @@ -1175,21 +1443,21 @@ ENTRY(int3) TRACE_IRQS_OFF xorl %edx, %edx # zero error code movl %esp, %eax # pt_regs pointer - call do_int3 + pax_direct_call do_int3 jmp ret_from_exception -END(int3) +ENDPROC(int3) ENTRY(general_protection) pushl $do_general_protection jmp error_code -END(general_protection) +ENDPROC(general_protection) #ifdef CONFIG_KVM_GUEST ENTRY(async_page_fault) ASM_CLAC pushl $do_async_page_fault jmp error_code -END(async_page_fault) +ENDPROC(async_page_fault) #endif ENTRY(rewind_stack_do_exit) @@ -1199,6 +1467,6 @@ ENTRY(rewind_stack_do_exit) movl PER_CPU_VAR(cpu_current_top_of_stack), %esi leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp - call do_exit + pax_direct_call do_group_exit 1: jmp 1b -END(rewind_stack_do_exit) +ENDPROC(rewind_stack_do_exit) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index ef766a358..d3f0e593f 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -37,6 +37,9 @@ #include #include #include +#include +#include +#include /* Avoid __ASSEMBLER__'ifying just for this. */ #include @@ -54,6 +57,392 @@ ENTRY(native_usergs_sysret64) ENDPROC(native_usergs_sysret64) #endif /* CONFIG_PARAVIRT */ + .macro ljmpq sel, off +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM) + .byte 0x48; ljmp *1234f(%rip) + .pushsection .rodata + .align 16 + 1234: .quad \off; .word \sel + .popsection +#else + pushq $\sel + pushq $\off + lretq +#endif + .endm + + .macro pax_enter_kernel + pax_set_fptr_mask +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) + pax_direct_call pax_enter_kernel +#endif + .endm + + .macro pax_exit_kernel +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) + pax_direct_call pax_exit_kernel +#endif + .endm + +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) +ENTRY(pax_enter_kernel) + pushq %rdi + +#ifdef CONFIG_PARAVIRT + PV_SAVE_REGS(CLBR_RDI) +#endif + +#ifdef CONFIG_PAX_KERNEXEC + GET_CR0_INTO_RDI + bts $X86_CR0_WP_BIT,%rdi + jnc 3f + mov %cs,%edi + cmp $__KERNEL_CS,%edi + jnz 2f +1: +#endif + +#ifdef CONFIG_PAX_MEMORY_UDEREF + ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID + GET_CR3_INTO_RDI + cmp $0,%dil + jnz 112f + mov $__KERNEL_DS,%edi + mov %edi,%ss + jmp 111f +112: cmp $1,%dil + jz 113f + ud2 +113: sub $4097,%rdi + bts $63,%rdi + SET_RDI_INTO_CR3 + mov $__UDEREF_KERNEL_DS,%edi + mov %edi,%ss +111: +#endif + +#ifdef CONFIG_PARAVIRT + PV_RESTORE_REGS(CLBR_RDI) +#endif + + popq %rdi + pax_ret pax_enter_kernel + +#ifdef CONFIG_PAX_KERNEXEC +2: ljmpq __KERNEL_CS,1b +3: ljmpq __KERNEXEC_KERNEL_CS,4f +4: SET_RDI_INTO_CR0 + jmp 1b +#endif +ENDPROC(pax_enter_kernel) + +ENTRY(pax_exit_kernel) + pushq %rdi + +#ifdef CONFIG_PARAVIRT + PV_SAVE_REGS(CLBR_RDI) +#endif + +#ifdef CONFIG_PAX_KERNEXEC + mov %cs,%rdi + cmp $__KERNEXEC_KERNEL_CS,%edi + jz 2f + GET_CR0_INTO_RDI + bts $X86_CR0_WP_BIT,%rdi + jnc 4f +1: +#endif + +#ifdef CONFIG_PAX_MEMORY_UDEREF + ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID + mov %ss,%edi + cmp $__UDEREF_KERNEL_DS,%edi + jnz 111f + GET_CR3_INTO_RDI + cmp $0,%dil + jz 112f + ud2 +112: add $4097,%rdi + bts $63,%rdi + SET_RDI_INTO_CR3 + mov $__KERNEL_DS,%edi + mov %edi,%ss +111: +#endif + +#ifdef CONFIG_PARAVIRT + PV_RESTORE_REGS(CLBR_RDI); +#endif + + popq %rdi + pax_ret pax_exit_kernel + +#ifdef CONFIG_PAX_KERNEXEC +2: GET_CR0_INTO_RDI + btr $X86_CR0_WP_BIT,%rdi + jnc 4f + ljmpq __KERNEL_CS,3f +3: SET_RDI_INTO_CR0 + jmp 1b +4: ud2 + jmp 4b +#endif +ENDPROC(pax_exit_kernel) +#endif + + .macro pax_enter_kernel_user + pax_set_fptr_mask +#ifdef CONFIG_PAX_MEMORY_UDEREF + pax_direct_call pax_enter_kernel_user +#endif + .endm + + .macro pax_exit_kernel_user +#ifdef CONFIG_PAX_MEMORY_UDEREF + pax_direct_call pax_exit_kernel_user +#endif +#ifdef CONFIG_PAX_RANDKSTACK + pushq %rax + pushq %r11 + pax_direct_call pax_randomize_kstack + popq %r11 + popq %rax +#endif + .endm + +#ifdef CONFIG_PAX_MEMORY_UDEREF +ENTRY(pax_enter_kernel_user) +GLOBAL(patch_pax_enter_kernel_user) + pushq %rdi + pushq %rbx + +#ifdef CONFIG_PARAVIRT + PV_SAVE_REGS(CLBR_RDI) +#endif + + ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID + GET_CR3_INTO_RDI + cmp $1,%dil + jnz 4f + sub $4097,%rdi + bts $63,%rdi + SET_RDI_INTO_CR3 + jmp 3f +111: + + GET_CR3_INTO_RDI + mov %rdi,%rbx + add $__START_KERNEL_map,%rbx + sub phys_base(%rip),%rbx + +#ifdef CONFIG_PARAVIRT + pushq %rdi + i = 0 + .rept USER_PGD_PTRS + mov i*8(%rbx),%rsi + mov $0,%sil + lea i*8(%rbx),%rdi + pax_indirect_call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched), pv_mmu_ops.set_pgd_batched + i = i + 1 + .endr + popq %rdi +#else + i = 0 + .rept USER_PGD_PTRS + movb $0,i*8(%rbx) + i = i + 1 + .endr +#endif + + SET_RDI_INTO_CR3 + +#ifdef CONFIG_PAX_KERNEXEC + GET_CR0_INTO_RDI + bts $X86_CR0_WP_BIT,%rdi + SET_RDI_INTO_CR0 +#endif + +3: + +#ifdef CONFIG_PARAVIRT + PV_RESTORE_REGS(CLBR_RDI) +#endif + + popq %rbx + popq %rdi + pax_ret pax_enter_kernel_user +4: ud2 +ENDPROC(pax_enter_kernel_user) + +ENTRY(pax_exit_kernel_user) +GLOBAL(patch_pax_exit_kernel_user) + pushq %rdi + pushq %rbx + +#ifdef CONFIG_PARAVIRT + PV_SAVE_REGS(CLBR_RDI) +#endif + + GET_CR3_INTO_RDI + ALTERNATIVE "jmp 1f", "", X86_FEATURE_PCID + cmp $0,%dil + jnz 3f + add $4097,%rdi + bts $63,%rdi + SET_RDI_INTO_CR3 + jmp 2f +1: + + mov %rdi,%rbx + +#ifdef CONFIG_PAX_KERNEXEC + GET_CR0_INTO_RDI + btr $X86_CR0_WP_BIT,%rdi + jnc 3f + SET_RDI_INTO_CR0 +#endif + + add $__START_KERNEL_map,%rbx + sub phys_base(%rip),%rbx + +#ifdef CONFIG_PARAVIRT + i = 0 + .rept USER_PGD_PTRS + mov i*8(%rbx),%rsi + mov $0x67,%sil + lea i*8(%rbx),%rdi + pax_indirect_call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched), pv_mmu_ops.set_pgd_batched + i = i + 1 + .endr +#else + i = 0 + .rept USER_PGD_PTRS + movb $0x67,i*8(%rbx) + i = i + 1 + .endr +#endif + +2: + +#ifdef CONFIG_PARAVIRT + PV_RESTORE_REGS(CLBR_RDI) +#endif + + popq %rbx + popq %rdi + pax_ret pax_exit_kernel_user +3: ud2 +ENDPROC(pax_exit_kernel_user) +#endif + + .macro pax_enter_kernel_nmi + pax_set_fptr_mask + +#ifdef CONFIG_PAX_KERNEXEC + GET_CR0_INTO_RDI + bts $X86_CR0_WP_BIT,%rdi + jc 110f + SET_RDI_INTO_CR0 + or $2,%ebx +110: +#endif + +#ifdef CONFIG_PAX_MEMORY_UDEREF + ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID + GET_CR3_INTO_RDI + cmp $0,%dil + jz 111f + sub $4097,%rdi + or $4,%ebx + bts $63,%rdi + SET_RDI_INTO_CR3 + mov $__UDEREF_KERNEL_DS,%edi + mov %edi,%ss +111: +#endif + .endm + + .macro pax_exit_kernel_nmi +#ifdef CONFIG_PAX_KERNEXEC + btr $1,%ebx + jnc 110f + GET_CR0_INTO_RDI + btr $X86_CR0_WP_BIT,%rdi + SET_RDI_INTO_CR0 +110: +#endif + +#ifdef CONFIG_PAX_MEMORY_UDEREF + ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID + btr $2,%ebx + jnc 111f + GET_CR3_INTO_RDI + add $4097,%rdi + bts $63,%rdi + SET_RDI_INTO_CR3 + mov $__KERNEL_DS,%edi + mov %edi,%ss +111: +#endif + .endm + + .macro pax_erase_kstack +#ifdef CONFIG_PAX_MEMORY_STACKLEAK + pax_direct_call pax_erase_kstack +#endif + .endm + +#ifdef CONFIG_PAX_MEMORY_STACKLEAK +ENTRY(pax_erase_kstack) + pushq %rdi + pushq %rcx + pushq %rax + pushq %r11 + + GET_CURRENT(%r11) + mov TASK_lowest_stack(%r11), %rdi + mov $-0xBEEF, %rax + std + +1: mov %edi, %ecx + and $THREAD_SIZE_asm - 1, %ecx + shr $3, %ecx + repne scasq + jecxz 2f + + cmp $2*8, %ecx + jc 2f + + mov $2*8, %ecx + repe scasq + jecxz 2f + jne 1b + +2: cld + or $2*8, %rdi + mov %esp, %ecx + sub %edi, %ecx + + cmp $THREAD_SIZE_asm, %rcx + jb 3f + ud2 +3: + + shr $3, %ecx + rep stosq + + mov TASK_thread_sp0(%r11), %rdi + sub $256, %rdi + mov %rdi, TASK_lowest_stack(%r11) + + popq %r11 + popq %rax + popq %rcx + popq %rdi + pax_ret pax_erase_kstack +ENDPROC(pax_erase_kstack) +#endif + .macro TRACE_IRQS_IRETQ #ifdef CONFIG_TRACE_IRQFLAGS bt $9, EFLAGS(%rsp) /* interrupts off? */ @@ -77,19 +466,19 @@ ENDPROC(native_usergs_sysret64) #if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS) .macro TRACE_IRQS_OFF_DEBUG - call debug_stack_set_zero + pax_direct_call debug_stack_set_zero TRACE_IRQS_OFF - call debug_stack_reset + pax_direct_call debug_stack_reset .endm .macro TRACE_IRQS_ON_DEBUG - call debug_stack_set_zero + pax_direct_call debug_stack_set_zero TRACE_IRQS_ON - call debug_stack_reset + pax_direct_call debug_stack_reset .endm .macro TRACE_IRQS_IRETQ_DEBUG - bt $9, EFLAGS(%rsp) /* interrupts off? */ + bt $X86_EFLAGS_IF_BIT, EFLAGS(%rsp) /* interrupts off? */ jnc 1f TRACE_IRQS_ON_DEBUG 1: @@ -176,6 +565,16 @@ GLOBAL(entry_SYSCALL_64_after_swapgs) pushq %r11 /* pt_regs->r11 */ sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */ +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR + movq %r12, R12(%rsp) +#endif + + pax_enter_kernel_user + +#ifdef CONFIG_PAX_RANDKSTACK + pax_erase_kstack +#endif + /* * If we need to do entry work or if we guess we'll need to do * exit work, go straight to the slow path. @@ -206,7 +605,7 @@ entry_SYSCALL_64_fastpath: * It might end up jumping to the slow path. If it jumps, RAX * and all argument registers are clobbered. */ - call *sys_call_table(, %rax, 8) + pax_indirect_call "sys_call_table(, %rax, 8)", sys_ni_syscall .Lentry_SYSCALL_64_after_fastpath_call: movq %rax, RAX(%rsp) @@ -223,6 +622,9 @@ entry_SYSCALL_64_fastpath: testl $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) jnz 1f + pax_exit_kernel_user + pax_erase_kstack + LOCKDEP_SYS_EXIT TRACE_IRQS_ON /* user mode is traced as IRQs on */ movq RIP(%rsp), %rcx @@ -241,16 +643,19 @@ entry_SYSCALL_64_fastpath: ENABLE_INTERRUPTS(CLBR_NONE) SAVE_EXTRA_REGS movq %rsp, %rdi - call syscall_return_slowpath /* returns with IRQs disabled */ + pax_direct_call syscall_return_slowpath /* returns with IRQs disabled */ jmp return_from_SYSCALL_64 entry_SYSCALL64_slow_path: /* IRQs are off. */ SAVE_EXTRA_REGS movq %rsp, %rdi - call do_syscall_64 /* returns with IRQs disabled */ + pax_direct_call do_syscall_64 /* returns with IRQs disabled */ return_from_SYSCALL_64: + pax_exit_kernel_user + pax_erase_kstack + RESTORE_EXTRA_REGS TRACE_IRQS_IRETQ /* we're about to change IF */ @@ -275,13 +680,12 @@ return_from_SYSCALL_64: .error "virtual address width changed -- SYSRET checks need update" .endif - /* Change top 16 bits to be the sign-extension of 47th bit */ - shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx - sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx - - /* If this changed %rcx, it was not canonical */ - cmpq %rcx, %r11 - jne opportunistic_sysret_failed + /* + * If the top 17 bits are not 0 then RIP isn't a userland address, + * it may not even be canonical, fall back to iret + */ + shr $(__VIRTUAL_MASK_SHIFT), %r11 + jnz opportunistic_sysret_failed cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */ jne opportunistic_sysret_failed @@ -329,7 +733,7 @@ syscall_return_via_sysret: opportunistic_sysret_failed: SWAPGS jmp restore_c_regs_and_iret -END(entry_SYSCALL_64) +ENDPROC(entry_SYSCALL_64) ENTRY(stub_ptregs_64) /* @@ -355,13 +759,17 @@ ENTRY(stub_ptregs_64) 1: jmp *%rax /* Called from C */ -END(stub_ptregs_64) +ENDPROC(stub_ptregs_64) .macro ptregs_stub func -ENTRY(ptregs_\func) +RAP_ENTRY(ptregs_\func) +#ifdef CONFIG_PAX_RAP + leaq rap_\func(%rip), %rax +#else leaq \func(%rip), %rax +#endif jmp stub_ptregs_64 -END(ptregs_\func) +ENDPROC(ptregs_\func) .endm /* Instantiate ptregs_stub for each ptregs-using syscall */ @@ -381,7 +789,9 @@ ENTRY(__switch_to_asm) */ pushq %rbp pushq %rbx +#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR pushq %r12 +#endif pushq %r13 pushq %r14 pushq %r15 @@ -399,38 +809,49 @@ ENTRY(__switch_to_asm) popq %r15 popq %r14 popq %r13 +#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR popq %r12 +#endif popq %rbx popq %rbp jmp __switch_to -END(__switch_to_asm) +ENDPROC(__switch_to_asm) /* * A newly forked process directly context switches into this address. * * rax: prev task we switched from * rbx: kernel thread func (NULL for user thread) - * r12: kernel thread arg + * r13: kernel thread arg */ +#ifdef CONFIG_PAX_RAP + __ALIGN + pax_retloc __switch_to + .globl ret_from_fork +ret_from_fork: +#else ENTRY(ret_from_fork) +#endif movq %rax, %rdi - call schedule_tail /* rdi: 'prev' task parameter */ + pax_direct_call schedule_tail /* rdi: 'prev' task parameter */ testq %rbx, %rbx /* from kernel_thread? */ jnz 1f /* kernel threads are uncommon */ 2: movq %rsp, %rdi - call syscall_return_slowpath /* returns with IRQs disabled */ + pax_direct_call syscall_return_slowpath /* returns with IRQs disabled */ + pax_exit_kernel_user + pax_erase_kstack TRACE_IRQS_ON /* user mode is traced as IRQS on */ SWAPGS jmp restore_regs_and_iret 1: /* kernel thread */ - movq %r12, %rdi - call *%rbx + movq %r13, %rdi + pax_indirect_call %rbx, kthreadd /* * A kernel thread is allowed to return here after successfully * calling do_execve(). Exit to userspace to complete the execve() @@ -438,7 +859,7 @@ ENTRY(ret_from_fork) */ movq $0, RAX(%rsp) jmp 2b -END(ret_from_fork) +ENDPROC(ret_from_fork) /* * Build the entry stubs with some assembler magic. @@ -453,7 +874,7 @@ ENTRY(irq_entries_start) jmp common_interrupt .align 8 .endr -END(irq_entries_start) +ENDPROC(irq_entries_start) /* * Interrupt entry/exit. @@ -479,6 +900,12 @@ END(irq_entries_start) */ SWAPGS +#ifdef CONFIG_PAX_MEMORY_UDEREF + pax_enter_kernel_user +#else + pax_enter_kernel +#endif + /* * We need to tell lockdep that IRQs are off. We can't do this until * we fix gsbase, and we should do it before enter_from_user_mode @@ -491,7 +918,9 @@ END(irq_entries_start) CALL_enter_from_user_mode -1: + jmp 2f +1: pax_enter_kernel +2: /* * Save previous stack pointer, optionally switch to interrupt stack. * irq_count is used to check if a CPU is already on an interrupt stack @@ -503,10 +932,11 @@ END(irq_entries_start) incl PER_CPU_VAR(irq_count) cmovzq PER_CPU_VAR(irq_stack_ptr), %rsp pushq %rdi + /* We entered an interrupt context - irqs are off: */ TRACE_IRQS_OFF - call \func /* rdi points to pt_regs */ + pax_direct_call \func /* rdi points to pt_regs */ .endm /* @@ -533,7 +963,9 @@ ret_from_intr: /* Interrupt came from user space */ GLOBAL(retint_user) mov %rsp,%rdi - call prepare_exit_to_usermode + pax_direct_call prepare_exit_to_usermode + pax_exit_kernel_user +# pax_erase_kstack TRACE_IRQS_IRETQ SWAPGS jmp restore_regs_and_iret @@ -547,10 +979,25 @@ retint_kernel: jnc 1f 0: cmpl $0, PER_CPU_VAR(__preempt_count) jnz 1f - call preempt_schedule_irq + pax_direct_call preempt_schedule_irq jmp 0b 1: #endif + + pax_exit_kernel + +#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC_PLUGIN) + /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup, + * namely calling EFI runtime services with a phys mapping. We're + * starting off with NOPs and patch in the real instrumentation + * (BTS/OR) before starting any userland process; even before starting + * up the APs. + */ + ALTERNATIVE "", "pax_force_retaddr 16*8", X86_FEATURE_ALWAYS +#else + pax_force_retaddr RIP +#endif + /* * The iretq could re-enable interrupts: */ @@ -614,15 +1061,15 @@ native_irq_return_ldt: SWAPGS movq PER_CPU_VAR(espfix_waddr), %rdi movq %rax, (0*8)(%rdi) /* user RAX */ - movq (1*8)(%rsp), %rax /* user RIP */ + movq (8 + RIP-RIP)(%rsp), %rax /* user RIP */ movq %rax, (1*8)(%rdi) - movq (2*8)(%rsp), %rax /* user CS */ + movq (8 + CS-RIP)(%rsp), %rax /* user CS */ movq %rax, (2*8)(%rdi) - movq (3*8)(%rsp), %rax /* user RFLAGS */ + movq (8 + EFLAGS-RIP)(%rsp), %rax /* user RFLAGS */ movq %rax, (3*8)(%rdi) - movq (5*8)(%rsp), %rax /* user SS */ + movq (8 + SS-RIP)(%rsp), %rax /* user SS */ movq %rax, (5*8)(%rdi) - movq (4*8)(%rsp), %rax /* user RSP */ + movq (8 + RSP-RIP)(%rsp), %rax /* user RSP */ movq %rax, (4*8)(%rdi) /* Now RAX == RSP. */ @@ -654,7 +1101,7 @@ native_irq_return_ldt: */ jmp native_irq_return_iret #endif -END(common_interrupt) +ENDPROC(common_interrupt) /* * APIC interrupts. @@ -666,7 +1113,7 @@ ENTRY(\sym) .Lcommon_\sym: interrupt \do_sym jmp ret_from_intr -END(\sym) +ENDPROC(\sym) .endm #ifdef CONFIG_TRACING @@ -742,15 +1189,19 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt /* * Exception entry points. */ -#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8) +#define CPU_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13) -.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 +.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 rap_hash=0 ENTRY(\sym) /* Sanity check */ .if \shift_ist != -1 && \paranoid == 0 .error "using shift_ist requires paranoid=1" .endif + .if \paranoid != 0 && \rap_hash==tailcall + .error "tail called idt entry cannot be paranoid" + .endif + ASM_CLAC PARAVIRT_ADJUST_EXCEPTION_FRAME @@ -765,9 +1216,9 @@ ENTRY(\sym) testb $3, CS(%rsp) /* If coming from userspace, switch stacks */ jnz 1f .endif - call paranoid_entry + pax_direct_call paranoid_entry .else - call error_entry + pax_direct_call error_entry .endif /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */ @@ -789,10 +1240,23 @@ ENTRY(\sym) .endif .if \shift_ist != -1 +#ifdef CONFIG_SMP + imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d + leaq cpu_tss(%r13), %r13 +#else + leaq cpu_tss(%rip), %r13 +#endif subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) .endif - call \do_sym + .ifc \rap_hash,tailcall + jmp \do_sym + .exitm + .elseif \rap_hash == 0 + pax_direct_call \do_sym + .else + pax_indirect_call \do_sym, \rap_hash + .endif .if \shift_ist != -1 addq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) @@ -812,11 +1276,11 @@ ENTRY(\sym) * run in real process context if user_mode(regs). */ 1: - call error_entry + pax_direct_call error_entry movq %rsp, %rdi /* pt_regs pointer */ - call sync_regs + pax_direct_call sync_regs movq %rax, %rsp /* switch stack */ movq %rsp, %rdi /* pt_regs pointer */ @@ -828,11 +1292,15 @@ ENTRY(\sym) xorl %esi, %esi /* no error code */ .endif - call \do_sym + .if \rap_hash == 0 + pax_direct_call \do_sym + .else + pax_indirect_call \do_sym, \rap_hash + .endif jmp error_exit /* %ebx: no swapgs flag */ .endif -END(\sym) +ENDPROC(\sym) .endm #ifdef CONFIG_TRACING @@ -860,6 +1328,14 @@ idtentry coprocessor_error do_coprocessor_error has_error_code=0 idtentry alignment_check do_alignment_check has_error_code=1 idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0 +#ifdef CONFIG_PAX_REFCOUNT +idtentry refcount_error do_refcount_error has_error_code=0 +#endif + +#ifdef CONFIG_PAX_RAP +idtentry rap_call_error do_rap_call_error has_error_code=0 +idtentry rap_ret_error do_rap_ret_error has_error_code=0 +#endif /* * Reload gs selector with exception handling @@ -874,8 +1350,8 @@ ENTRY(native_load_gs_index) 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE SWAPGS popfq - ret -END(native_load_gs_index) + pax_ret native_load_gs_index +ENDPROC(native_load_gs_index) EXPORT_SYMBOL(native_load_gs_index) _ASM_EXTABLE(.Lgs_change, bad_gs) @@ -901,14 +1377,14 @@ ENTRY(do_softirq_own_stack) incl PER_CPU_VAR(irq_count) cmove PER_CPU_VAR(irq_stack_ptr), %rsp push %rbp /* frame pointer backlink */ - call __do_softirq + pax_direct_call __do_softirq leaveq decl PER_CPU_VAR(irq_count) - ret -END(do_softirq_own_stack) + pax_ret do_softirq_own_stack +ENDPROC(do_softirq_own_stack) #ifdef CONFIG_XEN -idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0 +idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0 rap_hash=tailcall /* * A note on the "critical region" in our callback handler. @@ -929,19 +1405,18 @@ ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */ * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will * see the correct pointer to the pt_regs */ - movq %rdi, %rsp /* we don't return, adjust the stack frame */ 11: incl PER_CPU_VAR(irq_count) movq %rsp, %rbp cmovzq PER_CPU_VAR(irq_stack_ptr), %rsp pushq %rbp /* frame pointer backlink */ - call xen_evtchn_do_upcall + pax_direct_call xen_evtchn_do_upcall popq %rsp decl PER_CPU_VAR(irq_count) #ifndef CONFIG_PREEMPT - call xen_maybe_preempt_hcall + pax_direct_call xen_maybe_preempt_hcall #endif jmp error_exit -END(xen_do_hypervisor_callback) +ENDPROC(xen_do_hypervisor_callback) /* * Hypervisor uses this for application faults while it executes. @@ -986,7 +1461,7 @@ ENTRY(xen_failsafe_callback) SAVE_C_REGS SAVE_EXTRA_REGS jmp error_exit -END(xen_failsafe_callback) +ENDPROC(xen_failsafe_callback) apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ xen_hvm_callback_vector xen_evtchn_do_upcall @@ -998,7 +1473,7 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ hyperv_callback_vector hyperv_vector_handler #endif /* CONFIG_HYPERV */ -idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK +idtentry int1 do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK idtentry stack_segment do_stack_segment has_error_code=1 @@ -1016,7 +1491,7 @@ idtentry async_page_fault do_async_page_fault has_error_code=1 #endif #ifdef CONFIG_X86_MCE -idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip) +idtentry machine_check has_error_code=0 paranoid=1 do_sym="machine_check_vector(%rip)" rap_hash=do_machine_check #endif /* @@ -1035,8 +1510,32 @@ ENTRY(paranoid_entry) js 1f /* negative -> in kernel */ SWAPGS xorl %ebx, %ebx -1: ret -END(paranoid_entry) +1: +#ifdef CONFIG_PAX_MEMORY_UDEREF + testb $3, CS+8(%rsp) + jz 1f + pax_enter_kernel_user + jmp 2f +#endif +1: pax_enter_kernel +2: + pax_ret paranoid_entry +ENDPROC(paranoid_entry) + +ENTRY(paranoid_entry_nmi) + cld + SAVE_C_REGS 8 + SAVE_EXTRA_REGS 8 + movl $1, %ebx + movl $MSR_GS_BASE, %ecx + rdmsr + testl %edx, %edx + js 1f /* negative -> in kernel */ + SWAPGS + xorl %ebx, %ebx +1: pax_enter_kernel_nmi + pax_ret paranoid_entry_nmi +ENDPROC(paranoid_entry_nmi) /* * "Paranoid" exit path from exception stack. This is invoked @@ -1053,19 +1552,26 @@ END(paranoid_entry) ENTRY(paranoid_exit) DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF_DEBUG - testl %ebx, %ebx /* swapgs needed? */ + testl $1, %ebx /* swapgs needed? */ jnz paranoid_exit_no_swapgs +#ifdef CONFIG_PAX_MEMORY_UDEREF + pax_exit_kernel_user +#else + pax_exit_kernel +#endif TRACE_IRQS_IRETQ SWAPGS_UNSAFE_STACK jmp paranoid_exit_restore paranoid_exit_no_swapgs: + pax_exit_kernel TRACE_IRQS_IRETQ_DEBUG paranoid_exit_restore: RESTORE_EXTRA_REGS RESTORE_C_REGS REMOVE_PT_GPREGS_FROM_STACK 8 + pax_force_retaddr_bts INTERRUPT_RETURN -END(paranoid_exit) +ENDPROC(paranoid_exit) /* * Save all registers in pt_regs, and switch gs if needed. @@ -1085,6 +1591,12 @@ ENTRY(error_entry) */ SWAPGS +#ifdef CONFIG_PAX_MEMORY_UDEREF + pax_enter_kernel_user +#else + pax_enter_kernel +#endif + .Lerror_entry_from_usermode_after_swapgs: /* * We need to tell lockdep that IRQs are off. We can't do this until @@ -1093,11 +1605,11 @@ ENTRY(error_entry) */ TRACE_IRQS_OFF CALL_enter_from_user_mode - ret + pax_ret error_entry .Lerror_entry_done: TRACE_IRQS_OFF - ret + pax_ret error_entry /* * There are two places in the kernel that can potentially fault with @@ -1114,7 +1626,7 @@ ENTRY(error_entry) cmpq %rax, RIP+8(%rsp) je .Lbstep_iret cmpq $.Lgs_change, RIP+8(%rsp) - jne .Lerror_entry_done + jne 1f /* * hack: .Lgs_change can fail with user gsbase. If this happens, fix up @@ -1122,7 +1634,8 @@ ENTRY(error_entry) * .Lgs_change's error handler with kernel gsbase. */ SWAPGS - jmp .Lerror_entry_done +1: pax_enter_kernel + jmp .Lerror_entry_done .Lbstep_iret: /* Fix truncated RIP */ @@ -1136,17 +1649,23 @@ ENTRY(error_entry) */ SWAPGS +#ifdef CONFIG_PAX_MEMORY_UDEREF + pax_enter_kernel_user +#else + pax_enter_kernel +#endif + /* * Pretend that the exception came from user mode: set up pt_regs * as if we faulted immediately after IRET and clear EBX so that * error_exit knows that we will be returning to user mode. */ mov %rsp, %rdi - call fixup_bad_iret + pax_direct_call fixup_bad_iret mov %rax, %rsp decl %ebx jmp .Lerror_entry_from_usermode_after_swapgs -END(error_entry) +ENDPROC(error_entry) /* @@ -1158,10 +1677,10 @@ ENTRY(error_exit) movl %ebx, %eax DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF - testl %eax, %eax + testl $1, %eax jnz retint_kernel jmp retint_user -END(error_exit) +ENDPROC(error_exit) /* Runs on exception stack */ ENTRY(nmi) @@ -1215,6 +1734,8 @@ ENTRY(nmi) * other IST entries. */ + ASM_CLAC + /* Use %rdx as our temp variable throughout */ pushq %rdx @@ -1258,6 +1779,12 @@ ENTRY(nmi) pushq %r14 /* pt_regs->r14 */ pushq %r15 /* pt_regs->r15 */ +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) + xorl %ebx, %ebx +#endif + + pax_enter_kernel_nmi + /* * At this point we no longer need to worry about stack damage * due to nesting -- we're on the normal thread stack and we're @@ -1266,7 +1793,9 @@ ENTRY(nmi) movq %rsp, %rdi movq $-1, %rsi - call do_nmi + pax_direct_call do_nmi + + pax_exit_kernel_nmi /* * Return back to user mode. We must *not* do the normal exit @@ -1274,6 +1803,11 @@ ENTRY(nmi) * do_nmi doesn't modify pt_regs. */ SWAPGS + +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) + movq RBX(%rsp), %rbx +#endif + jmp restore_c_regs_and_iret .Lnmi_from_kernel: @@ -1395,6 +1929,7 @@ nested_nmi_out: popq %rdx /* We are returning to kernel mode, so this cannot result in a fault. */ +# pax_force_retaddr_bts INTERRUPT_RETURN first_nmi: @@ -1423,7 +1958,7 @@ first_nmi: pushq %rsp /* RSP (minus 8 because of the previous push) */ addq $8, (%rsp) /* Fix up RSP */ pushfq /* RFLAGS */ - pushq $__KERNEL_CS /* CS */ + pushq 4*8(%rsp) /* CS */ pushq $1f /* RIP */ INTERRUPT_RETURN /* continues at repeat_nmi below */ 1: @@ -1468,20 +2003,22 @@ end_repeat_nmi: ALLOC_PT_GPREGS_ON_STACK /* - * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit + * Use paranoid_entry_nmi to handle SWAPGS, but no need to use paranoid_exit * as we should not be calling schedule in NMI context. * Even with normal interrupts enabled. An NMI should not be * setting NEED_RESCHED or anything that normal interrupts and * exceptions might do. */ - call paranoid_entry + pax_direct_call paranoid_entry_nmi /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ movq %rsp, %rdi movq $-1, %rsi - call do_nmi + pax_direct_call do_nmi + + pax_exit_kernel_nmi - testl %ebx, %ebx /* swapgs needed? */ + testl $1, %ebx /* swapgs needed? */ jnz nmi_restore nmi_swapgs: SWAPGS_UNSAFE_STACK @@ -1492,6 +2029,8 @@ nmi_restore: /* Point RSP at the "iret" frame. */ REMOVE_PT_GPREGS_FROM_STACK 6*8 + pax_force_retaddr_bts + /* * Clear "NMI executing". Set DF first so that we can easily * distinguish the remaining code between here and IRET from @@ -1509,12 +2048,12 @@ nmi_restore: * mode, so this cannot result in a fault. */ INTERRUPT_RETURN -END(nmi) +ENDPROC(nmi) ENTRY(ignore_sysret) mov $-ENOSYS, %eax sysret -END(ignore_sysret) +ENDPROC(ignore_sysret) ENTRY(rewind_stack_do_exit) /* Prevent any naive code from trying to unwind to our caller. */ @@ -1523,6 +2062,6 @@ ENTRY(rewind_stack_do_exit) movq PER_CPU_VAR(cpu_current_top_of_stack), %rax leaq -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%rax), %rsp - call do_exit + pax_direct_call do_group_exit 1: jmp 1b -END(rewind_stack_do_exit) +ENDPROC(rewind_stack_do_exit) diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index e1721dafb..28b685fcb 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -13,11 +13,39 @@ #include #include #include +#include #include #include +#include .section .entry.text, "ax" + .macro pax_enter_kernel_user + pax_set_fptr_mask +#ifdef CONFIG_PAX_MEMORY_UDEREF + pax_direct_call pax_enter_kernel_user +#endif + .endm + + .macro pax_exit_kernel_user +#ifdef CONFIG_PAX_MEMORY_UDEREF + pax_direct_call pax_exit_kernel_user +#endif +#ifdef CONFIG_PAX_RANDKSTACK + pushq %rax + pushq %r11 + pax_direct_call pax_randomize_kstack + popq %r11 + popq %rax +#endif + .endm + + .macro pax_erase_kstack +#ifdef CONFIG_PAX_MEMORY_STACKLEAK + pax_direct_call pax_erase_kstack +#endif + .endm + /* * 32-bit SYSENTER entry. * @@ -74,23 +102,34 @@ ENTRY(entry_SYSENTER_compat) pushq $__USER32_CS /* pt_regs->cs */ pushq $0 /* pt_regs->ip = 0 (placeholder) */ pushq %rax /* pt_regs->orig_ax */ + xorl %eax,%eax pushq %rdi /* pt_regs->di */ pushq %rsi /* pt_regs->si */ pushq %rdx /* pt_regs->dx */ pushq %rcx /* pt_regs->cx */ pushq $-ENOSYS /* pt_regs->ax */ - pushq $0 /* pt_regs->r8 = 0 */ - pushq $0 /* pt_regs->r9 = 0 */ - pushq $0 /* pt_regs->r10 = 0 */ - pushq $0 /* pt_regs->r11 = 0 */ + pushq %rax /* pt_regs->r8 = 0 */ + pushq %rax /* pt_regs->r9 = 0 */ + pushq %rax /* pt_regs->r10 = 0 */ + pushq %rax /* pt_regs->r11 = 0 */ pushq %rbx /* pt_regs->rbx */ pushq %rbp /* pt_regs->rbp (will be overwritten) */ - pushq $0 /* pt_regs->r12 = 0 */ - pushq $0 /* pt_regs->r13 = 0 */ - pushq $0 /* pt_regs->r14 = 0 */ - pushq $0 /* pt_regs->r15 = 0 */ +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR + pushq %r12 /* pt_regs->r12 */ +#else + pushq %rax /* pt_regs->r12 = 0 */ +#endif + pushq %rax /* pt_regs->r13 = 0 */ + pushq %rax /* pt_regs->r14 = 0 */ + pushq %rax /* pt_regs->r15 = 0 */ cld + pax_enter_kernel_user + +#ifdef CONFIG_PAX_RANDKSTACK + pax_erase_kstack +#endif + /* * SYSENTER doesn't filter flags, so we need to clear NT and AC * ourselves. To save a few cycles, we can check whether @@ -121,7 +160,7 @@ ENTRY(entry_SYSENTER_compat) TRACE_IRQS_OFF movq %rsp, %rdi - call do_fast_syscall_32 + pax_direct_call do_fast_syscall_32 /* XEN PV guests always use IRET path */ ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ "jmp .Lsyscall_32_done", X86_FEATURE_XENPV @@ -204,16 +243,27 @@ ENTRY(entry_SYSCALL_compat) pushq %rdx /* pt_regs->dx */ pushq %rbp /* pt_regs->cx (stashed in bp) */ pushq $-ENOSYS /* pt_regs->ax */ - pushq $0 /* pt_regs->r8 = 0 */ - pushq $0 /* pt_regs->r9 = 0 */ - pushq $0 /* pt_regs->r10 = 0 */ - pushq $0 /* pt_regs->r11 = 0 */ + xorl %eax,%eax + pushq %rax /* pt_regs->r8 = 0 */ + pushq %rax /* pt_regs->r9 = 0 */ + pushq %rax /* pt_regs->r10 = 0 */ + pushq %rax /* pt_regs->r11 = 0 */ pushq %rbx /* pt_regs->rbx */ pushq %rbp /* pt_regs->rbp (will be overwritten) */ - pushq $0 /* pt_regs->r12 = 0 */ - pushq $0 /* pt_regs->r13 = 0 */ - pushq $0 /* pt_regs->r14 = 0 */ - pushq $0 /* pt_regs->r15 = 0 */ +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR + pushq %r12 /* pt_regs->r12 */ +#else + pushq %rax /* pt_regs->r12 = 0 */ +#endif + pushq %rax /* pt_regs->r13 = 0 */ + pushq %rax /* pt_regs->r14 = 0 */ + pushq %rax /* pt_regs->r15 = 0 */ + + pax_enter_kernel_user + +#ifdef CONFIG_PAX_RANDKSTACK + pax_erase_kstack +#endif /* * User mode is traced as though IRQs are on, and SYSENTER @@ -222,18 +272,25 @@ ENTRY(entry_SYSCALL_compat) TRACE_IRQS_OFF movq %rsp, %rdi - call do_fast_syscall_32 + pax_direct_call do_fast_syscall_32 /* XEN PV guests always use IRET path */ ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ "jmp .Lsyscall_32_done", X86_FEATURE_XENPV /* Opportunistic SYSRET */ sysret32_from_system_call: + pax_exit_kernel_user + pax_erase_kstack TRACE_IRQS_ON /* User mode traces as IRQs on. */ movq RBX(%rsp), %rbx /* pt_regs->rbx */ movq RBP(%rsp), %rbp /* pt_regs->rbp */ movq EFLAGS(%rsp), %r11 /* pt_regs->flags (in r11) */ movq RIP(%rsp), %rcx /* pt_regs->ip (in rcx) */ + +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR + movq R12(%rsp), %r12 +#endif + addq $RAX, %rsp /* Skip r8-r15 */ popq %rax /* pt_regs->rax */ popq %rdx /* Skip pt_regs->cx */ @@ -262,7 +319,7 @@ sysret32_from_system_call: movq RSP-ORIG_RAX(%rsp), %rsp swapgs sysretl -END(entry_SYSCALL_compat) +ENDPROC(entry_SYSCALL_compat) /* * 32-bit legacy system call entry. @@ -314,10 +371,11 @@ ENTRY(entry_INT80_compat) pushq %rdx /* pt_regs->dx */ pushq %rcx /* pt_regs->cx */ pushq $-ENOSYS /* pt_regs->ax */ - pushq $0 /* pt_regs->r8 = 0 */ - pushq $0 /* pt_regs->r9 = 0 */ - pushq $0 /* pt_regs->r10 = 0 */ - pushq $0 /* pt_regs->r11 = 0 */ + xorl %eax,%eax + pushq %rax /* pt_regs->r8 = 0 */ + pushq %rax /* pt_regs->r9 = 0 */ + pushq %rax /* pt_regs->r10 = 0 */ + pushq %rax /* pt_regs->r11 = 0 */ pushq %rbx /* pt_regs->rbx */ pushq %rbp /* pt_regs->rbp */ pushq %r12 /* pt_regs->r12 */ @@ -326,6 +384,12 @@ ENTRY(entry_INT80_compat) pushq %r15 /* pt_regs->r15 */ cld + pax_enter_kernel_user + +#ifdef CONFIG_PAX_RANDKSTACK + pax_erase_kstack +#endif + /* * User mode is traced as though IRQs are on, and the interrupt * gate turned them off. @@ -333,17 +397,23 @@ ENTRY(entry_INT80_compat) TRACE_IRQS_OFF movq %rsp, %rdi - call do_int80_syscall_32 + pax_direct_call do_int80_syscall_32 .Lsyscall_32_done: /* Go back to user mode. */ + pax_exit_kernel_user + pax_erase_kstack TRACE_IRQS_ON SWAPGS jmp restore_regs_and_iret -END(entry_INT80_compat) +ENDPROC(entry_INT80_compat) ALIGN +#ifdef CONFIG_PAX_RAP +RAP_ENTRY(rap_stub32_clone) +#else GLOBAL(stub32_clone) +#endif /* * The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr). * The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val). @@ -352,4 +422,8 @@ GLOBAL(stub32_clone) * so we need to swap arguments here before calling it: */ xchg %r8, %rcx +#ifdef CONFIG_PAX_RAP + jmp rap_sys_clone +#else jmp sys_clone +#endif diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c index 8f895ee13..5cc22ed08 100644 --- a/arch/x86/entry/syscall_32.c +++ b/arch/x86/entry/syscall_32.c @@ -6,11 +6,19 @@ #include #include +#ifdef CONFIG_PAX_RAP +#define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long rap_##sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ; +#else #define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ; +#endif #include #undef __SYSCALL_I386 +#ifdef CONFIG_PAX_RAP +#define __SYSCALL_I386(nr, sym, qual) [nr] = rap_##sym, +#else #define __SYSCALL_I386(nr, sym, qual) [nr] = sym, +#endif extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c index 9dbc5abb6..b2d64fb79 100644 --- a/arch/x86/entry/syscall_64.c +++ b/arch/x86/entry/syscall_64.c @@ -6,7 +6,11 @@ #include #include +#ifdef CONFIG_PAX_RAP +#define __SYSCALL_64_QUAL_(sym) rap_##sym +#else #define __SYSCALL_64_QUAL_(sym) sym +#endif #define __SYSCALL_64_QUAL_ptregs(sym) ptregs_##sym #define __SYSCALL_64(nr, sym, qual) extern asmlinkage long __SYSCALL_64_QUAL_##qual(sym)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); diff --git a/arch/x86/entry/thunk_32.S b/arch/x86/entry/thunk_32.S index fee6bc79b..02a69fb98 100644 --- a/arch/x86/entry/thunk_32.S +++ b/arch/x86/entry/thunk_32.S @@ -5,7 +5,7 @@ * Subject to the GNU public license, v.2. No warranty of any kind. */ #include - #include + #include #include /* put return address in eax (arg1) */ @@ -21,11 +21,11 @@ movl 3*4(%esp), %eax .endif - call \func + pax_direct_call \func popl %edx popl %ecx popl %eax - ret + pax_ret \name _ASM_NOKPROBE(\name) .endm diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S index be36bf4e0..a22f6731a 100644 --- a/arch/x86/entry/thunk_64.S +++ b/arch/x86/entry/thunk_64.S @@ -9,6 +9,7 @@ #include "calling.h" #include #include +#include /* rdi: arg1 ... normal C conventions. rax is saved/restored. */ .macro THUNK name, func, put_ret_addr_in_rdi=0 @@ -33,8 +34,19 @@ movq 8(%rbp), %rdi .endif - call \func - jmp .L_restore + pax_direct_call \func + + popq %r11 + popq %r10 + popq %r9 + popq %r8 + popq %rax + popq %rcx + popq %rdx + popq %rsi + popq %rdi + popq %rbp + pax_ret \name _ASM_NOKPROBE(\name) .endm @@ -53,21 +65,3 @@ EXPORT_SYMBOL(___preempt_schedule) EXPORT_SYMBOL(___preempt_schedule_notrace) #endif - -#if defined(CONFIG_TRACE_IRQFLAGS) \ - || defined(CONFIG_DEBUG_LOCK_ALLOC) \ - || defined(CONFIG_PREEMPT) -.L_restore: - popq %r11 - popq %r10 - popq %r9 - popq %r8 - popq %rax - popq %rcx - popq %rdx - popq %rsi - popq %rdi - popq %rbp - ret - _ASM_NOKPROBE(.L_restore) -#endif diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index d5409660f..3ea2a6a5f 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile @@ -17,6 +17,9 @@ VDSO32-$(CONFIG_IA32_EMULATION) := y # files to link into the vdso vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o +GCC_PLUGINS_vdso-note.o := n +GCC_PLUGINS_vclock_gettime.o := n +GCC_PLUGINS_vgetcpu.o := n # files to link into kernel obj-y += vma.o @@ -75,7 +78,7 @@ CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \ -fno-omit-frame-pointer -foptimize-sibling-calls \ -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO -$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS)) $(CFL) +$(vobjs): KBUILD_CFLAGS := $(KBUILD_CFLAGS) $(CFL) # # vDSO code runs in userspace and -pg doesn't help with profiling anyway. @@ -145,7 +148,6 @@ KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS)) KBUILD_CFLAGS_32 := $(filter-out -mcmodel=kernel,$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32)) -KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector) KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls) @@ -170,7 +172,7 @@ quiet_cmd_vdso = VDSO $@ -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \ sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@' -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=both) \ +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=both) \ $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS) GCOV_PROFILE := n diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c index 02223cb4b..84f10fc75 100644 --- a/arch/x86/entry/vdso/vclock_gettime.c +++ b/arch/x86/entry/vdso/vclock_gettime.c @@ -300,5 +300,5 @@ notrace time_t __vdso_time(time_t *t) *t = result; return result; } -int time(time_t *t) +time_t time(time_t *t) __attribute__((weak, alias("__vdso_time"))); diff --git a/arch/x86/entry/vdso/vdso2c.h b/arch/x86/entry/vdso/vdso2c.h index 3dab75f2a..2c439d084 100644 --- a/arch/x86/entry/vdso/vdso2c.h +++ b/arch/x86/entry/vdso/vdso2c.h @@ -12,7 +12,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len, unsigned long load_size = -1; /* Work around bogus warning */ unsigned long mapping_size; ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr; - int i; + unsigned int i; unsigned long j; ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr, *alt_sec = NULL; @@ -89,7 +89,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len, for (i = 0; i < GET_LE(&symtab_hdr->sh_size) / GET_LE(&symtab_hdr->sh_entsize); i++) { - int k; + unsigned int k; ELF(Sym) *sym = raw_addr + GET_LE(&symtab_hdr->sh_offset) + GET_LE(&symtab_hdr->sh_entsize) * i; const char *name = raw_addr + GET_LE(&strtab_hdr->sh_offset) + diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 23c881caa..e4808fca0 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -21,10 +21,7 @@ #include #include #include - -#if defined(CONFIG_X86_64) -unsigned int __read_mostly vdso64_enabled = 1; -#endif +#include void __init init_vdso_image(const struct vdso_image *image) { @@ -42,7 +39,7 @@ static int vdso_fault(const struct vm_special_mapping *sm, { const struct vdso_image *image = vma->vm_mm->context.vdso_image; - if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size) + if (!image || vmf->pgoff >= (image->size >> PAGE_SHIFT)) return VM_FAULT_SIGBUS; vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT)); @@ -80,7 +77,7 @@ static int vdso_mremap(const struct vm_special_mapping *sm, return -EFAULT; vdso_fix_landing(image, new_vma); - current->mm->context.vdso = (void __user *)new_vma->vm_start; + current->mm->context.vdso = new_vma->vm_start; return 0; } @@ -154,15 +151,15 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr) return -EINTR; addr = get_unmapped_area(NULL, addr, - image->size - image->sym_vvar_start, 0, 0); + image->size - image->sym_vvar_start, 0, MAP_EXECUTABLE); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } text_start = addr - image->sym_vvar_start; - current->mm->context.vdso = (void __user *)text_start; - current->mm->context.vdso_image = image; + mm->context.vdso = text_start; + mm->context.vdso_image = image; /* * MAYWRITE to allow gdb to COW and set breakpoints @@ -193,8 +190,8 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr) up_fail: if (ret) { - current->mm->context.vdso = NULL; - current->mm->context.vdso_image = NULL; + mm->context.vdso = 0; + mm->context.vdso_image = NULL; } up_write(&mm->mmap_sem); @@ -248,7 +245,14 @@ static unsigned long vdso_addr(unsigned long start, unsigned len) static int map_vdso_randomized(const struct vdso_image *image) { - unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start); + unsigned long addr; + +#ifdef CONFIG_PAX_RANDMMAP + if (current->mm->pax_flags & MF_PAX_RANDMMAP) + addr = 0; + else +#endif + addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start); return map_vdso(image, addr); } @@ -292,8 +296,6 @@ static int load_vdso32(void) #ifdef CONFIG_X86_64 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { - if (!vdso64_enabled) - return 0; return map_vdso_randomized(&vdso_image_64); } @@ -303,11 +305,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { #ifdef CONFIG_X86_X32_ABI - if (test_thread_flag(TIF_X32)) { - if (!vdso64_enabled) - return 0; + if (test_thread_flag(TIF_X32)) return map_vdso_randomized(&vdso_image_x32); - } #endif #ifdef CONFIG_IA32_EMULATION return load_vdso32(); @@ -324,15 +323,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) #endif #ifdef CONFIG_X86_64 -static __init int vdso_setup(char *s) -{ - vdso64_enabled = simple_strtoul(s, NULL, 0); - return 0; -} -__setup("vdso=", vdso_setup); -#endif - -#ifdef CONFIG_X86_64 static void vgetcpu_cpu_init(void *arg) { int cpu = smp_processor_id(); diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index 636c4b341..666991b77 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c @@ -38,10 +38,8 @@ #define CREATE_TRACE_POINTS #include "vsyscall_trace.h" -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = -#if defined(CONFIG_LEGACY_VSYSCALL_NATIVE) - NATIVE; -#elif defined(CONFIG_LEGACY_VSYSCALL_NONE) +static enum { EMULATE, NONE } vsyscall_mode = +#if defined(CONFIG_LEGACY_VSYSCALL_NONE) NONE; #else EMULATE; @@ -52,8 +50,6 @@ static int __init vsyscall_setup(char *str) if (str) { if (!strcmp("emulate", str)) vsyscall_mode = EMULATE; - else if (!strcmp("native", str)) - vsyscall_mode = NATIVE; else if (!strcmp("none", str)) vsyscall_mode = NONE; else @@ -271,8 +267,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) return true; sigsegv: - force_sig(SIGSEGV, current); - return true; + do_group_exit(SIGKILL); } /* @@ -290,8 +285,8 @@ static const struct vm_operations_struct gate_vma_ops = { static struct vm_area_struct gate_vma = { .vm_start = VSYSCALL_ADDR, .vm_end = VSYSCALL_ADDR + PAGE_SIZE, - .vm_page_prot = PAGE_READONLY_EXEC, - .vm_flags = VM_READ | VM_EXEC, + .vm_page_prot = PAGE_READONLY, + .vm_flags = VM_READ, .vm_ops = &gate_vma_ops, }; @@ -332,10 +327,7 @@ void __init map_vsyscall(void) unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page); if (vsyscall_mode != NONE) - __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, - vsyscall_mode == NATIVE - ? PAGE_KERNEL_VSYSCALL - : PAGE_KERNEL_VVAR); + __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR); BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) != (unsigned long)VSYSCALL_ADDR); diff --git a/arch/x86/entry/vsyscall/vsyscall_emu_64.S b/arch/x86/entry/vsyscall/vsyscall_emu_64.S index c9596a9af..e1f6d5dbd 100644 --- a/arch/x86/entry/vsyscall/vsyscall_emu_64.S +++ b/arch/x86/entry/vsyscall/vsyscall_emu_64.S @@ -7,12 +7,13 @@ */ #include +#include #include #include #include -__PAGE_ALIGNED_DATA + __READ_ONLY .globl __vsyscall_page .balign PAGE_SIZE, 0xcc .type __vsyscall_page, @object diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c index b28200dea..e93e14d68 100644 --- a/arch/x86/events/amd/iommu.c +++ b/arch/x86/events/amd/iommu.c @@ -80,12 +80,12 @@ static struct attribute_group amd_iommu_format_group = { * sysfs events attributes *---------------------------------------------*/ struct amd_iommu_event_desc { - struct kobj_attribute attr; + struct device_attribute attr; const char *event; }; -static ssize_t _iommu_event_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) +static ssize_t _iommu_event_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct amd_iommu_event_desc *event = container_of(attr, struct amd_iommu_event_desc, attr); @@ -407,7 +407,7 @@ static void perf_iommu_del(struct perf_event *event, int flags) static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu) { struct attribute **attrs; - struct attribute_group *attr_group; + attribute_group_no_const *attr_group; int i = 0, j; while (amd_iommu_v2_event_descs[i].attr.attr.name) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 7fe88bb57..afd16308c 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -1570,7 +1570,7 @@ static void __init pmu_check_apic(void) } -static struct attribute_group x86_pmu_format_group = { +static attribute_group_no_const x86_pmu_format_group = { .name = "format", .attrs = NULL, }; @@ -1701,7 +1701,7 @@ static struct attribute *events_attr[] = { NULL, }; -static struct attribute_group x86_pmu_events_group = { +static attribute_group_no_const x86_pmu_events_group = { .name = "events", .attrs = events_attr, }; @@ -2325,7 +2325,7 @@ static unsigned long get_segment_base(unsigned int segment) if (idx > GDT_ENTRIES) return 0; - desc = raw_cpu_ptr(gdt_page.gdt) + idx; + desc = get_cpu_gdt_table(smp_processor_id()) + idx; } return get_desc_base(desc); @@ -2425,7 +2425,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs break; perf_callchain_store(entry, frame.return_address); - fp = (void __user *)frame.next_frame; + fp = (void __force_user *)frame.next_frame; } pagefault_enable(); } diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index cb8522290..bebbc923f 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2411,6 +2411,8 @@ __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, } static void +intel_start_scheduling(struct cpu_hw_events *cpuc) __acquires(&cpuc->excl_cntrs->lock); +static void intel_start_scheduling(struct cpu_hw_events *cpuc) { struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; @@ -2420,14 +2422,18 @@ intel_start_scheduling(struct cpu_hw_events *cpuc) /* * nothing needed if in group validation mode */ - if (cpuc->is_fake || !is_ht_workaround_enabled()) + if (cpuc->is_fake || !is_ht_workaround_enabled()) { + __acquire(&excl_cntrs->lock); return; + } /* * no exclusion needed */ - if (WARN_ON_ONCE(!excl_cntrs)) + if (WARN_ON_ONCE(!excl_cntrs)) { + __acquire(&excl_cntrs->lock); return; + } xl = &excl_cntrs->states[tid]; @@ -2467,6 +2473,8 @@ static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cnt } static void +intel_stop_scheduling(struct cpu_hw_events *cpuc) __releases(&cpuc->excl_cntrs->lock); +static void intel_stop_scheduling(struct cpu_hw_events *cpuc) { struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; @@ -2476,13 +2484,18 @@ intel_stop_scheduling(struct cpu_hw_events *cpuc) /* * nothing needed if in group validation mode */ - if (cpuc->is_fake || !is_ht_workaround_enabled()) + if (cpuc->is_fake || !is_ht_workaround_enabled()) { + __release(&excl_cntrs->lock); return; + } + /* * no exclusion needed */ - if (WARN_ON_ONCE(!excl_cntrs)) + if (WARN_ON_ONCE(!excl_cntrs)) { + __release(&excl_cntrs->lock); return; + } xl = &excl_cntrs->states[tid]; @@ -2665,19 +2678,22 @@ static void intel_put_excl_constraints(struct cpu_hw_events *cpuc, * unused now. */ if (hwc->idx >= 0) { + bool sched_started; + xl = &excl_cntrs->states[tid]; + sched_started = xl->sched_started; /* * put_constraint may be called from x86_schedule_events() * which already has the lock held so here make locking * conditional. */ - if (!xl->sched_started) + if (!sched_started) raw_spin_lock(&excl_cntrs->lock); xl->state[hwc->idx] = INTEL_EXCL_UNUSED; - if (!xl->sched_started) + if (!sched_started) raw_spin_unlock(&excl_cntrs->lock); } } @@ -3617,10 +3633,10 @@ __init int intel_pmu_init(void) } if (boot_cpu_has(X86_FEATURE_PDCM)) { - u64 capabilities; + u64 capabilities = x86_pmu.intel_cap.capabilities; - rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities); - x86_pmu.intel_cap.capabilities = capabilities; + if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities)) + x86_pmu.intel_cap.capabilities = capabilities; } intel_ds_init(); diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c index 8f82b0293..b10c4b0f9 100644 --- a/arch/x86/events/intel/cqm.c +++ b/arch/x86/events/intel/cqm.c @@ -1488,7 +1488,7 @@ static struct attribute *intel_cmt_mbm_events_attr[] = { NULL, }; -static struct attribute_group intel_cqm_events_group = { +static attribute_group_no_const intel_cqm_events_group __read_only = { .name = "events", .attrs = NULL, }; @@ -1732,7 +1732,9 @@ static int __init intel_cqm_init(void) goto out; } - event_attr_intel_cqm_llc_scale.event_str = str; + pax_open_kernel(); + const_cast(event_attr_intel_cqm_llc_scale.event_str) = str; + pax_close_kernel(); ret = intel_cqm_setup_rmid_cache(); if (ret) @@ -1743,12 +1745,14 @@ static int __init intel_cqm_init(void) if (ret && !cqm_enabled) goto out; + pax_open_kernel(); if (cqm_enabled && mbm_enabled) - intel_cqm_events_group.attrs = intel_cmt_mbm_events_attr; + const_cast(intel_cqm_events_group.attrs) = intel_cmt_mbm_events_attr; else if (!cqm_enabled && mbm_enabled) - intel_cqm_events_group.attrs = intel_mbm_events_attr; + const_cast(intel_cqm_events_group.attrs) = intel_mbm_events_attr; else if (cqm_enabled && !mbm_enabled) - intel_cqm_events_group.attrs = intel_cqm_events_attr; + const_cast(intel_cqm_events_group.attrs) = intel_cqm_events_attr; + pax_close_kernel(); ret = perf_pmu_register(&intel_cqm_pmu, "intel_cqm", -1); if (ret) { diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index fec8a461b..0cc43ca4c 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -97,14 +97,14 @@ MODULE_LICENSE("GPL"); #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \ -static ssize_t __cstate_##_var##_show(struct kobject *kobj, \ - struct kobj_attribute *attr, \ +static ssize_t __cstate_##_var##_show(struct device *dev, \ + struct device_attribute *attr, \ char *page) \ { \ BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ return sprintf(page, _format "\n"); \ } \ -static struct kobj_attribute format_attr_##_var = \ +static struct device_attribute format_attr_##_var = \ __ATTR(_name, 0444, __cstate_##_var##_show, NULL) static ssize_t cstate_get_attr_cpumask(struct device *dev, diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index be202390b..99d75dd4b 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -601,7 +601,7 @@ int intel_pmu_drain_bts_buffer(void) static inline void intel_pmu_drain_pebs_buffer(void) { - struct pt_regs regs; + struct pt_regs regs = {}; x86_pmu.drain_pebs(®s); } @@ -947,7 +947,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); unsigned long from = cpuc->lbr_entries[0].from; unsigned long old_to, to = cpuc->lbr_entries[0].to; - unsigned long ip = regs->ip; + unsigned long ip = ktva_ktla(regs->ip); int is_64bit = 0; void *kaddr; int size; @@ -999,6 +999,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) } else { kaddr = (void *)to; } + kaddr = (void *)ktva_ktla((unsigned long)kaddr); do { struct insn insn; @@ -1158,7 +1159,7 @@ static void setup_pebs_sample_data(struct perf_event *event, } if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) { - regs->ip = pebs->real_ip; + set_linear_ip(regs, pebs->real_ip); regs->flags |= PERF_EFLAGS_EXACT; } else if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(regs)) regs->flags |= PERF_EFLAGS_EXACT; diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index 81b321ace..ef545937b 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -805,7 +805,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort) * Ensure we don't blindy read any address by validating it is * a known text address. */ - if (kernel_text_address(from)) { + if (kernel_text_address(ktva_ktla(from))) { addr = (void *)from; /* * Assume we can get the maximum possible size @@ -827,7 +827,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort) #ifdef CONFIG_X86_64 is64 = kernel_ip((unsigned long)addr) || !test_thread_flag(TIF_IA32); #endif - insn_init(&insn, addr, bytes_read, is64); + insn_init(&insn, (void *)ktva_ktla((unsigned long)addr), bytes_read, is64); insn_get_opcode(&insn); if (!insn.opcode.got) return X86_BR_ABORT; diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index c5047b8f7..7297def33 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -174,11 +174,9 @@ static const struct attribute_group *pt_attr_groups[] = { static int __init pt_pmu_hw_init(void) { - struct dev_ext_attribute *de_attrs; - struct attribute **attrs; - size_t size; + static struct dev_ext_attribute de_attrs[ARRAY_SIZE(pt_caps)]; + static struct attribute *attrs[ARRAY_SIZE(pt_caps)]; u64 reg; - int ret; long i; rdmsrl(MSR_PLATFORM_INFO, reg); @@ -209,8 +207,6 @@ static int __init pt_pmu_hw_init(void) pt_pmu.vmx = true; } - attrs = NULL; - for (i = 0; i < PT_CPUID_LEAVES; i++) { cpuid_count(20, i, &pt_pmu.caps[CR_EAX + i*PT_CPUID_REGS_NUM], @@ -219,39 +215,25 @@ static int __init pt_pmu_hw_init(void) &pt_pmu.caps[CR_EDX + i*PT_CPUID_REGS_NUM]); } - ret = -ENOMEM; - size = sizeof(struct attribute *) * (ARRAY_SIZE(pt_caps)+1); - attrs = kzalloc(size, GFP_KERNEL); - if (!attrs) - goto fail; - - size = sizeof(struct dev_ext_attribute) * (ARRAY_SIZE(pt_caps)+1); - de_attrs = kzalloc(size, GFP_KERNEL); - if (!de_attrs) - goto fail; - + pax_open_kernel(); for (i = 0; i < ARRAY_SIZE(pt_caps); i++) { - struct dev_ext_attribute *de_attr = de_attrs + i; + struct dev_ext_attribute *de_attr = &de_attrs[i]; - de_attr->attr.attr.name = pt_caps[i].name; + const_cast(de_attr->attr.attr.name) = pt_caps[i].name; sysfs_attr_init(&de_attr->attr.attr); - de_attr->attr.attr.mode = S_IRUGO; - de_attr->attr.show = pt_cap_show; - de_attr->var = (void *)i; + const_cast(de_attr->attr.attr.mode) = S_IRUGO; + const_cast(de_attr->attr.show) = pt_cap_show; + const_cast(de_attr->var) = (void *)i; attrs[i] = &de_attr->attr.attr; } - pt_cap_group.attrs = attrs; + const_cast(pt_cap_group.attrs) = attrs; + pax_close_kernel(); return 0; - -fail: - kfree(attrs); - - return ret; } #define RTIT_CTL_CYC_PSB (RTIT_CTL_CYCLEACC | \ diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c index 0a535cea8..b8d9b16c8 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/intel/rapl.c @@ -117,14 +117,14 @@ static const char *const rapl_domain_names[NR_RAPL_DOMAINS] __initconst = { #define RAPL_EVENT_MASK 0xFFULL #define DEFINE_RAPL_FORMAT_ATTR(_var, _name, _format) \ -static ssize_t __rapl_##_var##_show(struct kobject *kobj, \ - struct kobj_attribute *attr, \ +static ssize_t __rapl_##_var##_show(struct device *dev, \ + struct device_attribute *attr, \ char *page) \ { \ BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ return sprintf(page, _format "\n"); \ } \ -static struct kobj_attribute format_attr_##_var = \ +static struct device_attribute format_attr_##_var = \ __ATTR(_name, 0444, __rapl_##_var##_show, NULL) #define RAPL_CNTR_WIDTH 32 @@ -535,7 +535,7 @@ static struct attribute *rapl_events_knl_attr[] = { NULL, }; -static struct attribute_group rapl_pmu_events_group = { +static attribute_group_no_const rapl_pmu_events_group __read_only = { .name = "events", .attrs = NULL, /* patched at runtime */ }; diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 19d646a78..e20a9b216 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -90,8 +90,8 @@ struct pci2phy_map *__find_pci2phy_map(int segment) return map; } -ssize_t uncore_event_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) +ssize_t uncore_event_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct uncore_event_desc *event = container_of(attr, struct uncore_event_desc, attr); @@ -798,7 +798,7 @@ static void uncore_types_exit(struct intel_uncore_type **types) static int __init uncore_type_init(struct intel_uncore_type *type, bool setid) { struct intel_uncore_pmu *pmus; - struct attribute_group *attr_group; + attribute_group_no_const *attr_group; struct attribute **attrs; size_t size; int i, j; diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index ad986c1e2..9bb701622 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h @@ -124,9 +124,9 @@ struct intel_uncore_box { #define UNCORE_BOX_FLAG_CTL_OFFS8 1 /* event config registers are 8-byte apart */ struct uncore_event_desc { - struct kobj_attribute attr; + struct device_attribute attr; const char *config; -}; +} __do_const; struct pci2phy_map { struct list_head list; @@ -136,8 +136,8 @@ struct pci2phy_map { struct pci2phy_map *__find_pci2phy_map(int segment); -ssize_t uncore_event_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf); +ssize_t uncore_event_show(struct device *dev, + struct device_attribute *attr, char *buf); #define INTEL_UNCORE_EVENT_DESC(_name, _config) \ { \ @@ -146,14 +146,14 @@ ssize_t uncore_event_show(struct kobject *kobj, } #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \ -static ssize_t __uncore_##_var##_show(struct kobject *kobj, \ - struct kobj_attribute *attr, \ +static ssize_t __uncore_##_var##_show(struct device *dev, \ + struct device_attribute *attr, \ char *page) \ { \ BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ return sprintf(page, _format "\n"); \ } \ -static struct kobj_attribute format_attr_##_var = \ +static struct device_attribute format_attr_##_var = \ __ATTR(_name, 0444, __uncore_##_var##_show, NULL) static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box) diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index bcbb1d2ae..d2511bf2f 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -804,7 +804,7 @@ static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip) regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS; if (regs->flags & X86_VM_MASK) regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK); - regs->ip = ip; + regs->ip = kernel_ip(ip) ? ktva_ktla(ip) : ip; } ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event); diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index cb26f18d4..4f43f2399 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c @@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm) unsigned long dump_start, dump_size; struct user32 dump; + memset(&dump, 0, sizeof(dump)); + fs = get_fs(); set_fs(KERNEL_DS); has_dumped = 1; diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index cb13c0564..d63fa1e60 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c @@ -112,7 +112,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, return err; } -asmlinkage long sys32_sigreturn(void) +SYS32_SYSCALL_DEFINE0(sigreturn) { struct pt_regs *regs = current_pt_regs(); struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(regs->sp-8); @@ -123,7 +123,7 @@ asmlinkage long sys32_sigreturn(void) if (__get_user(set.sig[0], &frame->sc.oldmask) || (_COMPAT_NSIG_WORDS > 1 && __copy_from_user((((char *) &set.sig) + 4), - &frame->extramask, + frame->extramask, sizeof(frame->extramask)))) goto badframe; @@ -138,7 +138,7 @@ asmlinkage long sys32_sigreturn(void) return 0; } -asmlinkage long sys32_rt_sigreturn(void) +SYS32_SYSCALL_DEFINE0(rt_sigreturn) { struct pt_regs *regs = current_pt_regs(); struct rt_sigframe_ia32 __user *frame; @@ -243,7 +243,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, sp -= frame_size; /* Align the stack pointer according to the i386 ABI, * i.e. so that on function entry ((sp + 4) & 15) == 0. */ - sp = ((sp + 4) & -16ul) - 4; + sp = ((sp - 12) & -16ul) - 4; return (void __user *) sp; } @@ -288,10 +288,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig, } else { /* Return stub is in 32bit vsyscall page */ if (current->mm->context.vdso) - restorer = current->mm->context.vdso + - vdso_image_32.sym___kernel_sigreturn; + restorer = (void __force_user *)(current->mm->context.vdso + + vdso_image_32.sym___kernel_sigreturn); else - restorer = &frame->retcode; + restorer = frame->retcode; } put_user_try { @@ -301,7 +301,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig, * These are actually not used anymore, but left because some * gdb versions depend on them as a marker. */ - put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode); + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode); } put_user_catch(err); if (err) @@ -343,7 +343,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig, 0xb8, __NR_ia32_rt_sigreturn, 0x80cd, - 0, + 0 }; frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate); @@ -366,16 +366,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig, if (ksig->ka.sa.sa_flags & SA_RESTORER) restorer = ksig->ka.sa.sa_restorer; + else if (current->mm->context.vdso) + /* Return stub is in 32bit vsyscall page */ + restorer = (void __force_user *)(current->mm->context.vdso + + vdso_image_32.sym___kernel_rt_sigreturn); else - restorer = current->mm->context.vdso + - vdso_image_32.sym___kernel_rt_sigreturn; + restorer = frame->retcode; put_user_ex(ptr_to_compat(restorer), &frame->pretcode); /* * Not actually used anymore, but left because some gdb * versions need it. */ - put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode); + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode); } put_user_catch(err); err |= __copy_siginfo_to_user32(&frame->info, &ksig->info, false); diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c index 719cd702b..113980ad7 100644 --- a/arch/x86/ia32/sys_ia32.c +++ b/arch/x86/ia32/sys_ia32.c @@ -49,18 +49,27 @@ #define AA(__x) ((unsigned long)(__x)) +static inline loff_t compose_loff(unsigned int high, unsigned int low) +{ + loff_t retval = low; + + BUILD_BUG_ON(sizeof retval != sizeof low + sizeof high); + __builtin_memcpy((unsigned char *)&retval + sizeof low, &high, sizeof high); + return retval; +} + -asmlinkage long sys32_truncate64(const char __user *filename, - unsigned long offset_low, - unsigned long offset_high) +SYS32_SYSCALL_DEFINE3(truncate64, const char __user *, filename, + unsigned int, offset_low, + unsigned int, offset_high) { - return sys_truncate(filename, ((loff_t) offset_high << 32) | offset_low); + return sys_truncate(filename, compose_loff(offset_high, offset_low)); } -asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low, - unsigned long offset_high) +SYS32_SYSCALL_DEFINE3(ftruncate64, unsigned int, fd, unsigned int, offset_low, + unsigned int, offset_high) { - return sys_ftruncate(fd, ((loff_t) offset_high << 32) | offset_low); + return sys_ftruncate(fd, ((unsigned long) offset_high << 32) | offset_low); } /* @@ -69,8 +78,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low, */ static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat) { - typeof(ubuf->st_uid) uid = 0; - typeof(ubuf->st_gid) gid = 0; + typeof(((struct stat64 *)0)->st_uid) uid = 0; + typeof(((struct stat64 *)0)->st_gid) gid = 0; SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid)); SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid)); if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) || @@ -95,8 +104,8 @@ static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat) return 0; } -asmlinkage long sys32_stat64(const char __user *filename, - struct stat64 __user *statbuf) +SYS32_SYSCALL_DEFINE2(stat64, const char __user *, filename, + struct stat64 __user *, statbuf) { struct kstat stat; int ret = vfs_stat(filename, &stat); @@ -106,8 +115,8 @@ asmlinkage long sys32_stat64(const char __user *filename, return ret; } -asmlinkage long sys32_lstat64(const char __user *filename, - struct stat64 __user *statbuf) +SYS32_SYSCALL_DEFINE2(lstat64, const char __user *, filename, + struct stat64 __user *, statbuf) { struct kstat stat; int ret = vfs_lstat(filename, &stat); @@ -116,7 +125,7 @@ asmlinkage long sys32_lstat64(const char __user *filename, return ret; } -asmlinkage long sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf) +SYS32_SYSCALL_DEFINE2(fstat64, unsigned int, fd, struct stat64 __user *, statbuf) { struct kstat stat; int ret = vfs_fstat(fd, &stat); @@ -125,8 +134,8 @@ asmlinkage long sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf) return ret; } -asmlinkage long sys32_fstatat(unsigned int dfd, const char __user *filename, - struct stat64 __user *statbuf, int flag) +SYS32_SYSCALL_DEFINE4(fstatat, unsigned int, dfd, const char __user *, filename, + struct stat64 __user *, statbuf, int, flag) { struct kstat stat; int error; @@ -152,7 +161,7 @@ struct mmap_arg_struct32 { unsigned int offset; }; -asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *arg) +SYS32_SYSCALL_DEFINE1(mmap, struct mmap_arg_struct32 __user *, arg) { struct mmap_arg_struct32 a; @@ -166,22 +175,22 @@ asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *arg) a.offset>>PAGE_SHIFT); } -asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr, - int options) +SYS32_SYSCALL_DEFINE3(waitpid, compat_pid_t, pid, unsigned int __user *, stat_addr, + int, options) { return compat_sys_wait4(pid, stat_addr, options, NULL); } /* warning: next two assume little endian */ -asmlinkage long sys32_pread(unsigned int fd, char __user *ubuf, u32 count, - u32 poslo, u32 poshi) +SYS32_SYSCALL_DEFINE5(pread, unsigned int, fd, char __user *, ubuf, u32, count, + u32, poslo, u32, poshi) { return sys_pread64(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo)); } -asmlinkage long sys32_pwrite(unsigned int fd, const char __user *ubuf, - u32 count, u32 poslo, u32 poshi) +SYS32_SYSCALL_DEFINE5(pwrite, unsigned int, fd, const char __user *, ubuf, + u32, count, u32, poslo, u32, poshi) { return sys_pwrite64(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo)); @@ -192,40 +201,40 @@ asmlinkage long sys32_pwrite(unsigned int fd, const char __user *ubuf, * Some system calls that need sign extended arguments. This could be * done by a generic wrapper. */ -long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high, - __u32 len_low, __u32 len_high, int advice) +SYS32_SYSCALL_DEFINE6(fadvise64_64, int, fd, __u32, offset_low, __u32, offset_high, + __u32, len_low, __u32, len_high, int, advice) { return sys_fadvise64_64(fd, - (((u64)offset_high)<<32) | offset_low, - (((u64)len_high)<<32) | len_low, + compose_loff(offset_high, offset_low), + compose_loff(len_high, len_low), advice); } -asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi, - size_t count) +SYS32_SYSCALL_DEFINE4(readahead, int, fd, unsigned, off_lo, unsigned, off_hi, + size_t, count) { - return sys_readahead(fd, ((u64)off_hi << 32) | off_lo, count); + return sys_readahead(fd, compose_loff(off_hi, off_lo), count); } -asmlinkage long sys32_sync_file_range(int fd, unsigned off_low, unsigned off_hi, - unsigned n_low, unsigned n_hi, int flags) +SYS32_SYSCALL_DEFINE6(sync_file_range, int, fd, unsigned, off_low, unsigned, off_hi, + unsigned, n_low, unsigned, n_hi, int, flags) { return sys_sync_file_range(fd, - ((u64)off_hi << 32) | off_low, - ((u64)n_hi << 32) | n_low, flags); + compose_loff(off_hi, off_low), + compose_loff(n_hi, n_low), flags); } -asmlinkage long sys32_fadvise64(int fd, unsigned offset_lo, unsigned offset_hi, - size_t len, int advice) +SYS32_SYSCALL_DEFINE5(fadvise64, int, fd, unsigned, offset_lo, unsigned, offset_hi, + int, len, int, advice) { - return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo, + return sys_fadvise64_64(fd, compose_loff(offset_hi, offset_lo), len, advice); } -asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo, - unsigned offset_hi, unsigned len_lo, - unsigned len_hi) +SYS32_SYSCALL_DEFINE6(fallocate, int, fd, int, mode, unsigned, offset_lo, + unsigned, offset_hi, unsigned, len_lo, + unsigned, len_hi) { - return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo, - ((u64)len_hi << 32) | len_lo); + return sys_fallocate(fd, mode, compose_loff(offset_hi, offset_lo), + compose_loff(len_hi, len_lo)); } diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h index e7636bac7..f5c86c4f6 100644 --- a/arch/x86/include/asm/alternative-asm.h +++ b/arch/x86/include/asm/alternative-asm.h @@ -3,7 +3,9 @@ #ifdef __ASSEMBLY__ +#include #include +#include #ifdef CONFIG_SMP .macro LOCK_PREFIX @@ -18,6 +20,114 @@ .endm #endif +.macro pax_force_retaddr_bts rip=0 +#ifdef KERNEXEC_PLUGIN + btsq $63,\rip(%rsp) +#endif +.endm + +#if defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS) && defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR) +#error PAX: the KERNEXEC BTS and OR methods must not be enabled at once +#endif + +.macro pax_force_retaddr rip=0 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS + btsq $63,\rip(%rsp) +#endif +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR + orq %r12,\rip(%rsp) +#endif +.endm + +.macro pax_force_fptr ptr +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS + btsq $63,\ptr +#endif +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR + orq %r12,\ptr +#endif +.endm + +.macro pax_set_fptr_mask +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR + movabs $0x8000000000000000,%r12 +#endif +.endm + +#ifdef CONFIG_PAX_RAP +.macro rap_call target hash="" sym="" + + jmp 2001f + .ifb \hash + __ASM_RAP_RET_HASH(__rap_hash_ret_\target) + .else + __ASM_RAP_RET_HASH(__rap_hash_ret_\hash) + .endif + .skip 8-(2002f-2001f),0xcc + + .ifnb \sym + .globl \sym +\sym : + .endif + +2001: call \target +2002: +.endm + +.macro rap_retloc caller + __ASM_RAP_RET_HASH(__rap_hash_ret_\caller) + .skip 8-(2002f-2001f),0xcc +2001: call \caller +2002: +.endm + +.macro rap_ret func + ret +.endm +#endif + +.macro pax_direct_call_global target sym +#ifdef CONFIG_PAX_RAP + rap_call \target, , \sym +#else + .globl \sym +\sym : + call \target +#endif +.endm + +.macro pax_indirect_call target extra +#ifdef CONFIG_PAX_RAP + rap_call "*\target" hash=\extra +#else + call *\target +#endif +.endm + +.macro pax_direct_call target +#ifdef CONFIG_PAX_RAP + rap_call \target +#else + call \target +#endif +.endm + +.macro pax_retloc caller +#ifdef CONFIG_PAX_RAP + rap_retloc \caller +#else +#endif +.endm + +.macro pax_ret func + pax_force_retaddr +#ifdef CONFIG_PAX_RAP + rap_ret \func +#else + ret +#endif +.endm + /* * Issue one struct alt_instr descriptor entry (need to put it into * the section .altinstructions, see below). This entry contains @@ -50,7 +160,7 @@ altinstruction_entry 140b,143f,\feature,142b-140b,144f-143f,142b-141b .popsection - .pushsection .altinstr_replacement,"ax" + .pushsection .altinstr_replacement,"a" 143: \newinstr 144: @@ -86,7 +196,7 @@ altinstruction_entry 140b,144f,\feature2,142b-140b,145f-144f,142b-141b .popsection - .pushsection .altinstr_replacement,"ax" + .pushsection .altinstr_replacement,"a" 143: \newinstr1 144: @@ -95,6 +205,26 @@ .popsection .endm +.macro __PAX_REFCOUNT section, counter +#ifdef CONFIG_PAX_REFCOUNT + jo 111f + .pushsection .text.\section +111: lea \counter,%_ASM_CX + int $X86_REFCOUNT_VECTOR +222: + .popsection +333: + _ASM_EXTABLE(222b, 333b) +#endif +.endm + +.macro PAX_REFCOUNT64_OVERFLOW counter + __PAX_REFCOUNT refcount64_overflow, \counter +.endm + +.macro PAX_REFCOUNT64_UNDERFLOW counter + __PAX_REFCOUNT refcount64_underflow, \counter +.endm #endif /* __ASSEMBLY__ */ #endif /* _ASM_X86_ALTERNATIVE_ASM_H */ diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 1b020381a..d42eaf6a5 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -7,6 +7,7 @@ #include #include #include +#include /* * Alternative inline assembly for SMP. @@ -84,6 +85,18 @@ static inline int alternatives_text_reserved(void *start, void *end) } #endif /* CONFIG_SMP */ +#ifdef CONFIG_PAX_RAP +#define PAX_DIRECT_CALL(target) "rap_direct_call " target +#define PAX_DIRECT_CALL_HASH(target, hash) "rap_direct_call " target " " hash +#define PAX_INDIRECT_CALL(target, extra) "rap_indirect_call " target " " extra +#define PAX_RET(extra) "rap_ret " extra +#else +#define PAX_DIRECT_CALL(target) "call " target +#define PAX_DIRECT_CALL_HASH(target, hash) "call " target +#define PAX_INDIRECT_CALL(target, extra) "call " target +#define PAX_RET(extra) "ret" +#endif + #define b_replacement(num) "664"#num #define e_replacement(num) "665"#num @@ -137,7 +150,7 @@ static inline int alternatives_text_reserved(void *start, void *end) ".pushsection .altinstructions,\"a\"\n" \ ALTINSTR_ENTRY(feature, 1) \ ".popsection\n" \ - ".pushsection .altinstr_replacement, \"ax\"\n" \ + ".pushsection .altinstr_replacement, \"a\"\n" \ ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ ".popsection" @@ -147,7 +160,7 @@ static inline int alternatives_text_reserved(void *start, void *end) ALTINSTR_ENTRY(feature1, 1) \ ALTINSTR_ENTRY(feature2, 2) \ ".popsection\n" \ - ".pushsection .altinstr_replacement, \"ax\"\n" \ + ".pushsection .altinstr_replacement, \"a\"\n" \ ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ ".popsection" @@ -206,7 +219,7 @@ static inline int alternatives_text_reserved(void *start, void *end) /* Like alternative_io, but for replacing a direct call with another one. */ #define alternative_call(oldfunc, newfunc, feature, output, input...) \ - asm volatile (ALTERNATIVE("call %P[old]", "call %P[new]", feature) \ + asm volatile (ALTERNATIVE(PAX_DIRECT_CALL("%P[old]"), PAX_DIRECT_CALL("%P[new]"), feature) \ : output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input) /* @@ -219,8 +232,8 @@ static inline int alternatives_text_reserved(void *start, void *end) output, input...) \ { \ register void *__sp asm(_ASM_SP); \ - asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\ - "call %P[new2]", feature2) \ + asm volatile (ALTERNATIVE_2(PAX_DIRECT_CALL("%P[old]"), PAX_DIRECT_CALL("%P[new1]"), feature1,\ + PAX_DIRECT_CALL("%P[new2]"), feature2) \ : output, "+r" (__sp) \ : [old] "i" (oldfunc), [new1] "i" (newfunc1), \ [new2] "i" (newfunc2), ## input); \ @@ -238,6 +251,35 @@ static inline int alternatives_text_reserved(void *start, void *end) */ #define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr +#ifdef CONFIG_PAX_REFCOUNT +#define __PAX_REFCOUNT(size) \ + "jo 111f\n" \ + ".if "__stringify(size)" == 4\n\t" \ + ".pushsection .text.refcount_overflow\n" \ + ".elseif "__stringify(size)" == -4\n\t" \ + ".pushsection .text.refcount_underflow\n" \ + ".elseif "__stringify(size)" == 8\n\t" \ + ".pushsection .text.refcount64_overflow\n" \ + ".elseif "__stringify(size)" == -8\n\t" \ + ".pushsection .text.refcount64_underflow\n" \ + ".else\n" \ + ".error \"invalid size\"\n" \ + ".endif\n" \ + "111:\tlea %[counter],%%"_ASM_CX"\n\t" \ + "int $"__stringify(X86_REFCOUNT_VECTOR)"\n" \ + "222:\n\t" \ + ".popsection\n" \ + "333:\n" \ + _ASM_EXTABLE(222b, 333b) + +#define PAX_REFCOUNT_OVERFLOW(size) __PAX_REFCOUNT(size) +#define PAX_REFCOUNT_UNDERFLOW(size) __PAX_REFCOUNT(-(size)) +#else +#define __PAX_REFCOUNT(size) +#define PAX_REFCOUNT_OVERFLOW(size) +#define PAX_REFCOUNT_UNDERFLOW(size) +#endif + #endif /* __ASSEMBLY__ */ #endif /* _ASM_X86_ALTERNATIVE_H */ diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index f5aaf6c83..6f2398207 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -49,7 +49,7 @@ static inline void generic_apic_probe(void) #ifdef CONFIG_X86_LOCAL_APIC -extern unsigned int apic_verbosity; +extern int apic_verbosity; extern int local_apic_timer_c2_ok; extern int disable_apic; diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h index 93eebc636..6a643955f 100644 --- a/arch/x86/include/asm/apm.h +++ b/arch/x86/include/asm/apm.h @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, __asm__ __volatile__(APM_DO_ZERO_SEGS "pushl %%edi\n\t" "pushl %%ebp\n\t" - "lcall *%%cs:apm_bios_entry\n\t" + "lcall *%%ss:apm_bios_entry\n\t" "setc %%al\n\t" "popl %%ebp\n\t" "popl %%edi\n\t" @@ -58,7 +58,7 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, __asm__ __volatile__(APM_DO_ZERO_SEGS "pushl %%edi\n\t" "pushl %%ebp\n\t" - "lcall *%%cs:apm_bios_entry\n\t" + "lcall *%%ss:apm_bios_entry\n\t" "setc %%bl\n\t" "popl %%ebp\n\t" "popl %%edi\n\t" diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h index 44b8762fa..59e9d90a5 100644 --- a/arch/x86/include/asm/asm-prototypes.h +++ b/arch/x86/include/asm/asm-prototypes.h @@ -11,6 +11,8 @@ #include #include +#include + #ifndef CONFIG_X86_CMPXCHG64 extern void cmpxchg8b_emu(void); #endif diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index 7acb51c49..46ba0b30f 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h @@ -79,30 +79,6 @@ _ASM_PTR (entry); \ .popsection -.macro ALIGN_DESTINATION - /* check for bad alignment of destination */ - movl %edi,%ecx - andl $7,%ecx - jz 102f /* already aligned */ - subl $8,%ecx - negl %ecx - subl %ecx,%edx -100: movb (%rsi),%al -101: movb %al,(%rdi) - incq %rsi - incq %rdi - decl %ecx - jnz 100b -102: - .section .fixup,"ax" -103: addl %ecx,%edx /* ecx is zerorest also */ - jmp copy_user_handle_tail - .previous - - _ASM_EXTABLE(100b,103b) - _ASM_EXTABLE(101b,103b) - .endm - #else # define _EXPAND_EXTABLE_HANDLE(x) #x # define _ASM_EXTABLE_HANDLE(from, to, handler) \ diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index 14635c5ea..199ea31d4 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -27,6 +27,17 @@ static __always_inline int atomic_read(const atomic_t *v) } /** + * atomic_read_unchecked - read atomic variable + * @v: pointer of type atomic_unchecked_t + * + * Atomically reads the value of @v. + */ +static __always_inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v) +{ + return ACCESS_ONCE((v)->counter); +} + +/** * atomic_set - set atomic variable * @v: pointer of type atomic_t * @i: required value @@ -39,6 +50,18 @@ static __always_inline void atomic_set(atomic_t *v, int i) } /** + * atomic_set_unchecked - set atomic variable + * @v: pointer of type atomic_unchecked_t + * @i: required value + * + * Atomically sets the value of @v to @i. + */ +static __always_inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) +{ + v->counter = i; +} + +/** * atomic_add - add integer to atomic variable * @i: integer value to add * @v: pointer of type atomic_t @@ -47,8 +70,24 @@ static __always_inline void atomic_set(atomic_t *v, int i) */ static __always_inline void atomic_add(int i, atomic_t *v) { - asm volatile(LOCK_PREFIX "addl %1,%0" - : "+m" (v->counter) + asm volatile(LOCK_PREFIX "addl %1,%0\n\t" + PAX_REFCOUNT_OVERFLOW(4) + : [counter] "+m" (v->counter) + : "ir" (i) + : "cc", "cx"); +} + +/** + * atomic_add_unchecked - add integer to atomic variable + * @i: integer value to add + * @v: pointer of type atomic_unchecked_t + * + * Atomically adds @i to @v. + */ +static __always_inline void atomic_add_unchecked(int i, atomic_unchecked_t *v) +{ + asm volatile(LOCK_PREFIX "addl %1,%0\n" + : [counter] "+m" (v->counter) : "ir" (i)); } @@ -61,7 +100,23 @@ static __always_inline void atomic_add(int i, atomic_t *v) */ static __always_inline void atomic_sub(int i, atomic_t *v) { - asm volatile(LOCK_PREFIX "subl %1,%0" + asm volatile(LOCK_PREFIX "subl %1,%0\n\t" + PAX_REFCOUNT_UNDERFLOW(4) + : [counter] "+m" (v->counter) + : "ir" (i) + : "cc", "cx"); +} + +/** + * atomic_sub_unchecked - subtract integer from atomic variable + * @i: integer value to subtract + * @v: pointer of type atomic_unchecked_t + * + * Atomically subtracts @i from @v. + */ +static __always_inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v) +{ + asm volatile(LOCK_PREFIX "subl %1,%0\n" : "+m" (v->counter) : "ir" (i)); } @@ -77,7 +132,7 @@ static __always_inline void atomic_sub(int i, atomic_t *v) */ static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) { - GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e); + GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, -4, "er", i, "%0", e); } /** @@ -88,7 +143,21 @@ static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) */ static __always_inline void atomic_inc(atomic_t *v) { - asm volatile(LOCK_PREFIX "incl %0" + asm volatile(LOCK_PREFIX "incl %0\n\t" + PAX_REFCOUNT_OVERFLOW(4) + : [counter] "+m" (v->counter) + : : "cc", "cx"); +} + +/** + * atomic_inc_unchecked - increment atomic variable + * @v: pointer of type atomic_unchecked_t + * + * Atomically increments @v by 1. + */ +static __always_inline void atomic_inc_unchecked(atomic_unchecked_t *v) +{ + asm volatile(LOCK_PREFIX "incl %0\n" : "+m" (v->counter)); } @@ -100,7 +169,21 @@ static __always_inline void atomic_inc(atomic_t *v) */ static __always_inline void atomic_dec(atomic_t *v) { - asm volatile(LOCK_PREFIX "decl %0" + asm volatile(LOCK_PREFIX "decl %0\n\t" + PAX_REFCOUNT_UNDERFLOW(4) + : [counter] "+m" (v->counter) + : : "cc", "cx"); +} + +/** + * atomic_dec_unchecked - decrement atomic variable + * @v: pointer of type atomic_unchecked_t + * + * Atomically decrements @v by 1. + */ +static __always_inline void atomic_dec_unchecked(atomic_unchecked_t *v) +{ + asm volatile(LOCK_PREFIX "decl %0\n" : "+m" (v->counter)); } @@ -114,7 +197,7 @@ static __always_inline void atomic_dec(atomic_t *v) */ static __always_inline bool atomic_dec_and_test(atomic_t *v) { - GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e); + GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, -4, "%0", e); } /** @@ -127,7 +210,20 @@ static __always_inline bool atomic_dec_and_test(atomic_t *v) */ static __always_inline bool atomic_inc_and_test(atomic_t *v) { - GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e); + GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, 4, "%0", e); +} + +/** + * atomic_inc_and_test_unchecked - increment and test + * @v: pointer of type atomic_unchecked_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +static __always_inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v) +{ + GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", e); } /** @@ -141,7 +237,7 @@ static __always_inline bool atomic_inc_and_test(atomic_t *v) */ static __always_inline bool atomic_add_negative(int i, atomic_t *v) { - GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s); + GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, 4, "er", i, "%0", s); } /** @@ -151,7 +247,19 @@ static __always_inline bool atomic_add_negative(int i, atomic_t *v) * * Atomically adds @i to @v and returns @i + @v */ -static __always_inline int atomic_add_return(int i, atomic_t *v) +static __always_inline int __intentional_overflow(-1) atomic_add_return(int i, atomic_t *v) +{ + return i + xadd_check_overflow(&v->counter, i); +} + +/** + * atomic_add_return_unchecked - add integer and return + * @i: integer value to add + * @v: pointer of type atomi_uncheckedc_t + * + * Atomically adds @i to @v and returns @i + @v + */ +static __always_inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v) { return i + xadd(&v->counter, i); } @@ -163,25 +271,34 @@ static __always_inline int atomic_add_return(int i, atomic_t *v) * * Atomically subtracts @i from @v and returns @v - @i */ -static __always_inline int atomic_sub_return(int i, atomic_t *v) +static __always_inline int __intentional_overflow(-1) atomic_sub_return(int i, atomic_t *v) { return atomic_add_return(-i, v); } #define atomic_inc_return(v) (atomic_add_return(1, v)) +static __always_inline int atomic_inc_return_unchecked(atomic_unchecked_t *v) +{ + return atomic_add_return_unchecked(1, v); +} #define atomic_dec_return(v) (atomic_sub_return(1, v)) static __always_inline int atomic_fetch_add(int i, atomic_t *v) { - return xadd(&v->counter, i); + return xadd_check_overflow(&v->counter, i); } static __always_inline int atomic_fetch_sub(int i, atomic_t *v) { - return xadd(&v->counter, -i); + return xadd_check_overflow(&v->counter, -i); } -static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) +static __always_inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new) +{ + return cmpxchg(&v->counter, old, new); +} + +static __always_inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new) { return cmpxchg(&v->counter, old, new); } @@ -191,6 +308,11 @@ static inline int atomic_xchg(atomic_t *v, int new) return xchg(&v->counter, new); } +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new) +{ + return xchg(&v->counter, new); +} + #define ATOMIC_OP(op) \ static inline void atomic_##op(int i, atomic_t *v) \ { \ @@ -236,12 +358,20 @@ ATOMIC_OPS(xor, ^) */ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u) { - int c, old; + int c, old, new; c = atomic_read(v); for (;;) { - if (unlikely(c == (u))) + if (unlikely(c == u)) break; - old = atomic_cmpxchg((v), c, c + (a)); + + asm volatile("addl %2,%0\n\t" + PAX_REFCOUNT_OVERFLOW(4) + : "=r" (new) + : "0" (c), "ir" (a), + [counter] "m" (v->counter) + : "cc", "cx"); + + old = atomic_cmpxchg(v, c, new); if (likely(old == c)) break; c = old; @@ -250,6 +380,114 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u) } /** + * atomic_inc_not_zero_hint - increment if not null + * @v: pointer of type atomic_t + * @hint: probable value of the atomic before the increment + * + * This version of atomic_inc_not_zero() gives a hint of probable + * value of the atomic. This helps processor to not read the memory + * before doing the atomic read/modify/write cycle, lowering + * number of bus transactions on some arches. + * + * Returns: 0 if increment was not done, 1 otherwise. + */ +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint) +{ + int val, c = hint, new; + + /* sanity test, should be removed by compiler if hint is a constant */ + if (!hint) + return __atomic_add_unless(v, 1, 0); + + do { + asm volatile("incl %0\n\t" + PAX_REFCOUNT_OVERFLOW(4) + : "=r" (new) + : "0" (c), + [counter] "m" (v->counter) + : "cc", "cx"); + + val = atomic_cmpxchg(v, c, new); + if (val == c) + return 1; + c = val; + } while (c); + + return 0; +} + +#define atomic_inc_unless_negative atomic_inc_unless_negative +static inline int atomic_inc_unless_negative(atomic_t *p) +{ + int v, v1, new; + + for (v = 0; v >= 0; v = v1) { + asm volatile("incl %0\n\t" + PAX_REFCOUNT_OVERFLOW(4) + : "=r" (new) + : "0" (v), + [counter] "m" (p->counter) + : "cc", "cx"); + + v1 = atomic_cmpxchg(p, v, new); + if (likely(v1 == v)) + return 1; + } + return 0; +} + +#define atomic_dec_unless_positive atomic_dec_unless_positive +static inline int atomic_dec_unless_positive(atomic_t *p) +{ + int v, v1, new; + + for (v = 0; v <= 0; v = v1) { + asm volatile("decl %0\n\t" + PAX_REFCOUNT_UNDERFLOW(4) + : "=r" (new) + : "0" (v), + [counter] "m" (p->counter) + : "cc", "cx"); + + v1 = atomic_cmpxchg(p, v, new); + if (likely(v1 == v)) + return 1; + } + return 0; +} + +/* + * atomic_dec_if_positive - decrement by 1 if old value positive + * @v: pointer of type atomic_t + * + * The function returns the old value of *v minus 1, even if + * the atomic variable, v, was not decremented. + */ +#define atomic_dec_if_positive atomic_dec_if_positive +static inline int atomic_dec_if_positive(atomic_t *v) +{ + int c, old, dec; + c = atomic_read(v); + for (;;) { + asm volatile("decl %0\n\t" + PAX_REFCOUNT_UNDERFLOW(4) + : "=r" (dec) + : "0" (c), + [counter] "m" (v->counter) + : "cc", "cx"); + + if (unlikely(dec < 0)) + break; + old = atomic_cmpxchg(v, c, dec); + if (likely(old == c)) + break; + c = old; + } + return dec; +} + +/** * atomic_inc_short - increment of a short integer * @v: pointer to type int * diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h index 71d7705fb..02bb244f3 100644 --- a/arch/x86/include/asm/atomic64_32.h +++ b/arch/x86/include/asm/atomic64_32.h @@ -8,9 +8,17 @@ /* An 64bit atomic type */ typedef struct { - u64 __aligned(8) counter; + s64 __aligned(8) counter; } atomic64_t; +#ifdef CONFIG_PAX_REFCOUNT +typedef struct { + s64 __aligned(8) counter; +} atomic64_unchecked_t; +#else +typedef atomic64_t atomic64_unchecked_t; +#endif + #define ATOMIC64_INIT(val) { (val) } #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...) @@ -23,7 +31,7 @@ typedef struct { #ifdef CONFIG_X86_CMPXCHG64 #define __alternative_atomic64(f, g, out, in...) \ - asm volatile("call %P[func]" \ + asm volatile(PAX_DIRECT_CALL("%P[func]") \ : out : [func] "i" (atomic64_##g##_cx8), ## in) #define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8) @@ -36,21 +44,31 @@ typedef struct { ATOMIC64_DECL_ONE(sym##_386) ATOMIC64_DECL_ONE(add_386); +ATOMIC64_DECL_ONE(add_unchecked_386); ATOMIC64_DECL_ONE(sub_386); +ATOMIC64_DECL_ONE(sub_unchecked_386); ATOMIC64_DECL_ONE(inc_386); +ATOMIC64_DECL_ONE(inc_unchecked_386); ATOMIC64_DECL_ONE(dec_386); +ATOMIC64_DECL_ONE(dec_unchecked_386); #endif #define alternative_atomic64(f, out, in...) \ __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in) ATOMIC64_DECL(read); +ATOMIC64_DECL(read_unchecked); ATOMIC64_DECL(set); +ATOMIC64_DECL(set_unchecked); ATOMIC64_DECL(xchg); ATOMIC64_DECL(add_return); +ATOMIC64_DECL(add_return_unchecked); ATOMIC64_DECL(sub_return); +ATOMIC64_DECL(sub_return_unchecked); ATOMIC64_DECL(inc_return); +ATOMIC64_DECL(inc_return_unchecked); ATOMIC64_DECL(dec_return); +ATOMIC64_DECL(dec_return_unchecked); ATOMIC64_DECL(dec_if_positive); ATOMIC64_DECL(inc_not_zero); ATOMIC64_DECL(add_unless); @@ -76,6 +94,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n } /** + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable + * @p: pointer to type atomic64_unchecked_t + * @o: expected value + * @n: new value + * + * Atomically sets @v to @n if it was equal to @o and returns + * the old value. + */ + +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n) +{ + return cmpxchg64(&v->counter, o, n); +} + +/** * atomic64_xchg - xchg atomic64 variable * @v: pointer to type atomic64_t * @n: value to assign @@ -95,6 +128,25 @@ static inline long long atomic64_xchg(atomic64_t *v, long long n) } /** + * atomic64_xchg_unchecked - xchg atomic64 variable + * @v: pointer to type atomic64_unchecked_t + * @n: value to assign + * + * Atomically xchgs the value of @v to @n and returns + * the old value. + */ +static inline long long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long long n) +{ + long long o; + unsigned high = (unsigned)(n >> 32); + unsigned low = (unsigned)n; + alternative_atomic64(xchg, "=&A" (o), + "S" (v), "b" (low), "c" (high) + : "memory"); + return o; +} + +/** * atomic64_set - set atomic64 variable * @v: pointer to type atomic64_t * @i: value to assign @@ -111,6 +163,22 @@ static inline void atomic64_set(atomic64_t *v, long long i) } /** + * atomic64_set_unchecked - set atomic64 variable + * @v: pointer to type atomic64_unchecked_t + * @n: value to assign + * + * Atomically sets the value of @v to @n. + */ +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i) +{ + unsigned high = (unsigned)(i >> 32); + unsigned low = (unsigned)i; + alternative_atomic64(set, /* no output */, + "S" (v), "b" (low), "c" (high) + : "eax", "edx", "memory"); +} + +/** * atomic64_read - read atomic64 variable * @v: pointer to type atomic64_t * @@ -124,6 +192,19 @@ static inline long long atomic64_read(const atomic64_t *v) } /** + * atomic64_read_unchecked - read atomic64 variable + * @v: pointer to type atomic64_unchecked_t + * + * Atomically reads the value of @v and returns it. + */ +static inline long long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v) +{ + long long r; + alternative_atomic64(read, "=&A" (r), "c" (v) : "memory"); + return r; + } + +/** * atomic64_add_return - add and return * @i: integer value to add * @v: pointer to type atomic64_t @@ -138,6 +219,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v) return i; } +/** + * atomic64_add_return_unchecked - add and return + * @i: integer value to add + * @v: pointer to type atomic64_unchecked_t + * + * Atomically adds @i to @v and returns @i + *@v + */ +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v) +{ + alternative_atomic64(add_return_unchecked, + ASM_OUTPUT2("+A" (i), "+c" (v)), + ASM_NO_INPUT_CLOBBER("memory")); + return i; +} + /* * Other variants with different arithmetic operators: */ @@ -157,6 +253,14 @@ static inline long long atomic64_inc_return(atomic64_t *v) return a; } +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v) +{ + long long a; + alternative_atomic64(inc_return_unchecked, "=&A" (a), + "S" (v) : "memory", "ecx"); + return a; +} + static inline long long atomic64_dec_return(atomic64_t *v) { long long a; @@ -181,6 +285,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v) } /** + * atomic64_add_unchecked - add integer to atomic64 variable + * @i: integer value to add + * @v: pointer to type atomic64_unchecked_t + * + * Atomically adds @i to @v. + */ +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v) +{ + __alternative_atomic64(add_unchecked, add_return_unchecked, + ASM_OUTPUT2("+A" (i), "+c" (v)), + ASM_NO_INPUT_CLOBBER("memory")); + return i; +} + +/** * atomic64_sub - subtract the atomic64 variable * @i: integer value to subtract * @v: pointer to type atomic64_t @@ -222,6 +341,18 @@ static inline void atomic64_inc(atomic64_t *v) } /** + * atomic64_inc_unchecked - increment atomic64 variable + * @v: pointer to type atomic64_unchecked_t + * + * Atomically increments @v by 1. + */ +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v) +{ + __alternative_atomic64(inc_unchecked, inc_return_unchecked, /* no output */, + "S" (v) : "memory", "eax", "ecx", "edx"); +} + +/** * atomic64_dec - decrement atomic64 variable * @v: pointer to type atomic64_t * diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h index 89ed2f6ae..25490ad26 100644 --- a/arch/x86/include/asm/atomic64_64.h +++ b/arch/x86/include/asm/atomic64_64.h @@ -22,6 +22,18 @@ static inline long atomic64_read(const atomic64_t *v) } /** + * atomic64_read_unchecked - read atomic64 variable + * @v: pointer of type atomic64_unchecked_t + * + * Atomically reads the value of @v. + * Doesn't imply a read memory barrier. + */ +static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v) +{ + return ACCESS_ONCE((v)->counter); +} + +/** * atomic64_set - set atomic64 variable * @v: pointer to type atomic64_t * @i: required value @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i) } /** + * atomic64_set_unchecked - set atomic64 variable + * @v: pointer to type atomic64_unchecked_t + * @i: required value + * + * Atomically sets the value of @v to @i. + */ +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i) +{ + v->counter = i; +} + +/** * atomic64_add - add integer to atomic64 variable * @i: integer value to add * @v: pointer to type atomic64_t @@ -42,6 +66,22 @@ static inline void atomic64_set(atomic64_t *v, long i) */ static __always_inline void atomic64_add(long i, atomic64_t *v) { + asm volatile(LOCK_PREFIX "addq %1,%0\n\t" + PAX_REFCOUNT_OVERFLOW(8) + : [counter] "=m" (v->counter) + : "er" (i), "m" (v->counter) + : "cc", "cx"); +} + +/** + * atomic64_add_unchecked - add integer to atomic64 variable + * @i: integer value to add + * @v: pointer to type atomic64_unchecked_t + * + * Atomically adds @i to @v. + */ +static __always_inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v) +{ asm volatile(LOCK_PREFIX "addq %1,%0" : "=m" (v->counter) : "er" (i), "m" (v->counter)); @@ -56,7 +96,23 @@ static __always_inline void atomic64_add(long i, atomic64_t *v) */ static inline void atomic64_sub(long i, atomic64_t *v) { - asm volatile(LOCK_PREFIX "subq %1,%0" + asm volatile(LOCK_PREFIX "subq %1,%0\n\t" + PAX_REFCOUNT_UNDERFLOW(8) + : [counter] "=m" (v->counter) + : "er" (i), "m" (v->counter) + : "cc", "cx"); +} + +/** + * atomic64_sub_unchecked - subtract the atomic64 variable + * @i: integer value to subtract + * @v: pointer to type atomic64_unchecked_t + * + * Atomically subtracts @i from @v. + */ +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v) +{ + asm volatile(LOCK_PREFIX "subq %1,%0\n" : "=m" (v->counter) : "er" (i), "m" (v->counter)); } @@ -72,7 +128,7 @@ static inline void atomic64_sub(long i, atomic64_t *v) */ static inline bool atomic64_sub_and_test(long i, atomic64_t *v) { - GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e); + GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, -8, "er", i, "%0", e); } /** @@ -83,6 +139,21 @@ static inline bool atomic64_sub_and_test(long i, atomic64_t *v) */ static __always_inline void atomic64_inc(atomic64_t *v) { + asm volatile(LOCK_PREFIX "incq %0\n\t" + PAX_REFCOUNT_OVERFLOW(8) + : [counter] "=m" (v->counter) + : "m" (v->counter) + : "cc", "cx"); +} + +/** + * atomic64_inc_unchecked - increment atomic64 variable + * @v: pointer to type atomic64_unchecked_t + * + * Atomically increments @v by 1. + */ +static __always_inline void atomic64_inc_unchecked(atomic64_unchecked_t *v) +{ asm volatile(LOCK_PREFIX "incq %0" : "=m" (v->counter) : "m" (v->counter)); @@ -96,7 +167,22 @@ static __always_inline void atomic64_inc(atomic64_t *v) */ static __always_inline void atomic64_dec(atomic64_t *v) { - asm volatile(LOCK_PREFIX "decq %0" + asm volatile(LOCK_PREFIX "decq %0\n\t" + PAX_REFCOUNT_UNDERFLOW(8) + : [counter] "=m" (v->counter) + : "m" (v->counter) + : "cc", "cx"); +} + +/** + * atomic64_dec_unchecked - decrement atomic64 variable + * @v: pointer to type atomic64_t + * + * Atomically decrements @v by 1. + */ +static __always_inline void atomic64_dec_unchecked(atomic64_unchecked_t *v) +{ + asm volatile(LOCK_PREFIX "decq %0\n" : "=m" (v->counter) : "m" (v->counter)); } @@ -111,7 +197,7 @@ static __always_inline void atomic64_dec(atomic64_t *v) */ static inline bool atomic64_dec_and_test(atomic64_t *v) { - GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e); + GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, -8, "%0", e); } /** @@ -124,7 +210,7 @@ static inline bool atomic64_dec_and_test(atomic64_t *v) */ static inline bool atomic64_inc_and_test(atomic64_t *v) { - GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e); + GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, 8, "%0", e); } /** @@ -138,7 +224,7 @@ static inline bool atomic64_inc_and_test(atomic64_t *v) */ static inline bool atomic64_add_negative(long i, atomic64_t *v) { - GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s); + GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, 8, "er", i, "%0", s); } /** @@ -150,6 +236,18 @@ static inline bool atomic64_add_negative(long i, atomic64_t *v) */ static __always_inline long atomic64_add_return(long i, atomic64_t *v) { + return i + xadd_check_overflow(&v->counter, i); +} + +/** + * atomic64_add_return_unchecked - add and return + * @i: integer value to add + * @v: pointer to type atomic64_unchecked_t + * + * Atomically adds @i to @v and returns @i + @v + */ +static __always_inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v) +{ return i + xadd(&v->counter, i); } @@ -160,15 +258,19 @@ static inline long atomic64_sub_return(long i, atomic64_t *v) static inline long atomic64_fetch_add(long i, atomic64_t *v) { - return xadd(&v->counter, i); + return xadd_check_overflow(&v->counter, i); } static inline long atomic64_fetch_sub(long i, atomic64_t *v) { - return xadd(&v->counter, -i); + return xadd_check_overflow(&v->counter, -i); } #define atomic64_inc_return(v) (atomic64_add_return(1, (v))) +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v) +{ + return atomic64_add_return_unchecked(1, v); +} #define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) @@ -176,11 +278,21 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) return cmpxchg(&v->counter, old, new); } +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new) +{ + return cmpxchg(&v->counter, old, new); +} + static inline long atomic64_xchg(atomic64_t *v, long new) { return xchg(&v->counter, new); } +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new) +{ + return xchg(&v->counter, new); +} + /** * atomic64_add_unless - add unless the number is a given value * @v: pointer of type atomic64_t @@ -192,17 +304,25 @@ static inline long atomic64_xchg(atomic64_t *v, long new) */ static inline bool atomic64_add_unless(atomic64_t *v, long a, long u) { - long c, old; + long c, old, new; c = atomic64_read(v); for (;;) { - if (unlikely(c == (u))) + if (unlikely(c == u)) break; - old = atomic64_cmpxchg((v), c, c + (a)); + + asm volatile("addq %2,%0\n\t" + PAX_REFCOUNT_OVERFLOW(8) + : "=r" (new) + : "0" (c), "ir" (a), + [counter] "m" (v->counter) + : "cc", "cx"); + + old = atomic64_cmpxchg(v, c, new); if (likely(old == c)) break; c = old; } - return c != (u); + return c != u; } #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 68557f52b..d9828eca8 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -50,7 +50,7 @@ * a mask operation on a byte. */ #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3)) #define CONST_MASK(nr) (1 << ((nr) & 7)) /** @@ -203,7 +203,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr) */ static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr) { - GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c); + GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c); } /** @@ -249,7 +249,7 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long * */ static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) { - GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c); + GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c); } /** @@ -302,7 +302,7 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon */ static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr) { - GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c); + GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c); } static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr) @@ -343,7 +343,7 @@ static bool test_bit(int nr, const volatile unsigned long *addr); * * Undefined if no bit exists, so code should check against 0 first. */ -static __always_inline unsigned long __ffs(unsigned long word) +static __always_inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word) { asm("rep; bsf %1,%0" : "=r" (word) @@ -357,7 +357,7 @@ static __always_inline unsigned long __ffs(unsigned long word) * * Undefined if no zero exists, so code should check against ~0UL first. */ -static __always_inline unsigned long ffz(unsigned long word) +static __always_inline unsigned long __intentional_overflow(-1) ffz(unsigned long word) { asm("rep; bsf %1,%0" : "=r" (word) @@ -371,7 +371,7 @@ static __always_inline unsigned long ffz(unsigned long word) * * Undefined if no set bit exists, so code should check against 0 first. */ -static __always_inline unsigned long __fls(unsigned long word) +static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word) { asm("bsr %1,%0" : "=r" (word) @@ -434,7 +434,7 @@ static __always_inline int ffs(int x) * set bit if value is nonzero. The last (most significant) bit is * at position 32. */ -static __always_inline int fls(int x) +static __always_inline int __intentional_overflow(-1) fls(int x) { int r; @@ -476,7 +476,7 @@ static __always_inline int fls(int x) * at position 64. */ #ifdef CONFIG_X86_64 -static __always_inline int fls64(__u64 x) +static __always_inline __intentional_overflow(-1) int fls64(__u64 x) { int bitpos = -1; /* diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h index abd06b19d..17fc65f3a 100644 --- a/arch/x86/include/asm/boot.h +++ b/arch/x86/include/asm/boot.h @@ -6,7 +6,7 @@ #include /* Physical address where kernel should be loaded. */ -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ + (CONFIG_PHYSICAL_ALIGN - 1)) \ & ~(CONFIG_PHYSICAL_ALIGN - 1)) diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h index 48f99f154..26ab08aea 100644 --- a/arch/x86/include/asm/cache.h +++ b/arch/x86/include/asm/cache.h @@ -5,12 +5,12 @@ /* L1 cache line size */ #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) #define __read_mostly __attribute__((__section__(".data..read_mostly"))) #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT) +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT) #ifdef CONFIG_X86_VSMP #ifdef CONFIG_SMP diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h index 7b53743ed..5745aa177 100644 --- a/arch/x86/include/asm/checksum_32.h +++ b/arch/x86/include/asm/checksum_32.h @@ -16,7 +16,7 @@ * * it's best to have buff aligned on a 32-bit boundary */ -asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum); +asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum) __rap_hash; /* * the same as csum_partial, but copies from src while it @@ -30,6 +30,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len, __wsum sum, int *src_err_ptr, int *dst_err_ptr); +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst, + int len, __wsum sum, + int *src_err_ptr, int *dst_err_ptr) __rap_hash; + +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst, + int len, __wsum sum, + int *src_err_ptr, int *dst_err_ptr); + /* * Note: when you get a NULL pointer exception here this means someone * passed in an incorrect kernel address to one of these functions. @@ -52,7 +60,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src, might_sleep(); stac(); - ret = csum_partial_copy_generic((__force void *)src, dst, + ret = csum_partial_copy_generic_from_user((__force void *)src, dst, len, sum, err_ptr, NULL); clac(); @@ -183,7 +191,7 @@ static inline __wsum csum_and_copy_to_user(const void *src, might_sleep(); if (access_ok(VERIFY_WRITE, dst, len)) { stac(); - ret = csum_partial_copy_generic(src, (__force void *)dst, + ret = csum_partial_copy_generic_to_user(src, (__force void *)dst, len, sum, NULL, err_ptr); clac(); return ret; diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h index 97848cdfc..9ccfae996 100644 --- a/arch/x86/include/asm/cmpxchg.h +++ b/arch/x86/include/asm/cmpxchg.h @@ -15,8 +15,12 @@ extern void __cmpxchg_wrong_size(void) __compiletime_error("Bad argument size for cmpxchg"); extern void __xadd_wrong_size(void) __compiletime_error("Bad argument size for xadd"); +extern void __xadd_check_overflow_wrong_size(void) + __compiletime_error("Bad argument size for xadd_check_overflow"); extern void __add_wrong_size(void) __compiletime_error("Bad argument size for add"); +extern void __add_check_overflow_wrong_size(void) + __compiletime_error("Bad argument size for add_check_overflow"); /* * Constants for operation sizes. On 32-bit, the 64-bit size it set to @@ -68,6 +72,32 @@ extern void __add_wrong_size(void) __ret; \ }) +#ifdef CONFIG_PAX_REFCOUNT +#define __xchg_op_check_overflow(ptr, arg, op, lock) \ + ({ \ + __typeof__ (*(ptr)) __ret = (arg); \ + switch (sizeof(*(ptr))) { \ + case __X86_CASE_L: \ + asm volatile (lock #op "l %0, %1\n" \ + PAX_REFCOUNT_OVERFLOW(4) \ + : "+r" (__ret), [counter] "+m" (*(ptr))\ + : : "memory", "cc", "cx"); \ + break; \ + case __X86_CASE_Q: \ + asm volatile (lock #op "q %q0, %1\n" \ + PAX_REFCOUNT_OVERFLOW(8) \ + : "+r" (__ret), [counter] "+m" (*(ptr))\ + : : "memory", "cc", "cx"); \ + break; \ + default: \ + __ ## op ## _check_overflow_wrong_size(); \ + } \ + __ret; \ + }) +#else +#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock) +#endif + /* * Note: no "lock" prefix even on SMP: xchg always implies lock anyway. * Since this is generally used to protect other memory information, we @@ -162,6 +192,9 @@ extern void __add_wrong_size(void) #define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock) #define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX) +#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock) +#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX) + #define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2) \ ({ \ bool __ret; \ diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h index e4959d023..c62dbc28a 100644 --- a/arch/x86/include/asm/cmpxchg_32.h +++ b/arch/x86/include/asm/cmpxchg_32.h @@ -81,7 +81,7 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new) __typeof__(*(ptr)) __old = (o); \ __typeof__(*(ptr)) __new = (n); \ alternative_io(LOCK_PREFIX_HERE \ - "call cmpxchg8b_emu", \ + PAX_DIRECT_CALL("cmpxchg8b_emu"), \ "lock; cmpxchg8b (%%esi)" , \ X86_FEATURE_CX8, \ "=A" (__ret), \ @@ -97,7 +97,7 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new) __typeof__(*(ptr)) __ret; \ __typeof__(*(ptr)) __old = (o); \ __typeof__(*(ptr)) __new = (n); \ - alternative_io("call cmpxchg8b_emu", \ + alternative_io(PAX_DIRECT_CALL("cmpxchg8b_emu"), \ "cmpxchg8b (%%esi)" , \ X86_FEATURE_CX8, \ "=A" (__ret), \ diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h index 24118c0b4..55d73de1a 100644 --- a/arch/x86/include/asm/compat.h +++ b/arch/x86/include/asm/compat.h @@ -42,7 +42,11 @@ typedef u32 compat_uint_t; typedef u32 compat_ulong_t; typedef u32 compat_u32; typedef u64 __attribute__((aligned(4))) compat_u64; +#ifdef CHECKER_PLUGIN_USER typedef u32 compat_uptr_t; +#else +typedef u32 __user compat_uptr_t; +#endif struct compat_timespec { compat_time_t tv_sec; diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 1d2b69fc0..8ca35d6bb 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -156,7 +156,7 @@ static __always_inline __pure bool _static_cpu_has(u16 bit) " .byte 5f - 4f\n" /* repl len */ " .byte 3b - 2b\n" /* pad len */ ".previous\n" - ".section .altinstr_replacement,\"ax\"\n" + ".section .altinstr_replacement,\"a\"\n" "4: jmp %l[t_no]\n" "5:\n" ".previous\n" diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index ed10b5bf9..95be66193 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -206,7 +206,8 @@ #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */ #define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ - +#define X86_FEATURE_PCIDUDEREF ( 8*32+30) /* PaX PCID based UDEREF */ +#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */ /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ @@ -214,7 +215,7 @@ #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */ #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */ #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */ -#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */ +#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Prevention */ #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */ #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */ #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ diff --git a/arch/x86/include/asm/crypto/camellia.h b/arch/x86/include/asm/crypto/camellia.h index bb93333d9..ee113c017 100644 --- a/arch/x86/include/asm/crypto/camellia.h +++ b/arch/x86/include/asm/crypto/camellia.h @@ -39,34 +39,41 @@ extern int xts_camellia_setkey(struct crypto_tfm *tfm, const u8 *key, /* regular block cipher functions */ asmlinkage void __camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst, const u8 *src, bool xor); -asmlinkage void camellia_dec_blk(struct camellia_ctx *ctx, u8 *dst, +asmlinkage void camellia_dec_blk(void *ctx, u8 *dst, const u8 *src); /* 2-way parallel cipher functions */ asmlinkage void __camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst, const u8 *src, bool xor); -asmlinkage void camellia_dec_blk_2way(struct camellia_ctx *ctx, u8 *dst, +asmlinkage void camellia_dec_blk_2way(void *ctx, u8 *dst, const u8 *src); /* 16-way parallel cipher functions (avx/aes-ni) */ -asmlinkage void camellia_ecb_enc_16way(struct camellia_ctx *ctx, u8 *dst, +asmlinkage void camellia_ecb_enc_16way(void *ctx, u8 *dst, const u8 *src); -asmlinkage void camellia_ecb_dec_16way(struct camellia_ctx *ctx, u8 *dst, +asmlinkage void camellia_ecb_dec_16way(void *ctx, u8 *dst, const u8 *src); - -asmlinkage void camellia_cbc_dec_16way(struct camellia_ctx *ctx, u8 *dst, +void roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd(void *ctx, u8 *dst, const u8 *src) __rap_hash; +void roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab(void *ctx, u8 *dst, const u8 *src) __rap_hash; +void roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd(void *ctx, u8 *dst, const u8 *src) __rap_hash; +void roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab(void *ctx, u8 *dst, const u8 *src) __rap_hash; +void __camellia_enc_blk16(void *ctx, u8 *dst, const u8 *src) __rap_hash; +void __camellia_dec_blk16(void *ctx, u8 *dst, const u8 *src) __rap_hash; + +asmlinkage void camellia_cbc_dec_16way(void *ctx, u8 *dst, const u8 *src); -asmlinkage void camellia_ctr_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void camellia_ctr_16way(void *ctx, u128 *dst, + const u128 *src, le128 *iv); -asmlinkage void camellia_xts_enc_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); -asmlinkage void camellia_xts_dec_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void camellia_xts_enc_16way(void *ctx, u128 *dst, + const u128 *src, le128 *iv); +asmlinkage void camellia_xts_dec_16way(void *ctx, u128 *dst, + const u128 *src, le128 *iv); -static inline void camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst, +static inline void camellia_enc_blk(void *_ctx, u8 *dst, const u8 *src) { + struct camellia_ctx *ctx = _ctx; __camellia_enc_blk(ctx, dst, src, false); } @@ -76,9 +83,10 @@ static inline void camellia_enc_blk_xor(struct camellia_ctx *ctx, u8 *dst, __camellia_enc_blk(ctx, dst, src, true); } -static inline void camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst, +static inline void camellia_enc_blk_2way(void *_ctx, u8 *dst, const u8 *src) { + struct camellia_ctx *ctx = _ctx; __camellia_enc_blk_2way(ctx, dst, src, false); } @@ -89,7 +97,7 @@ static inline void camellia_enc_blk_xor_2way(struct camellia_ctx *ctx, u8 *dst, } /* glue helpers */ -extern void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src); +extern void camellia_decrypt_cbc_2way(void *ctx, u8 *dst, const u8 *src); extern void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv); extern void camellia_crypt_ctr_2way(void *ctx, u128 *dst, const u128 *src, diff --git a/arch/x86/include/asm/crypto/glue_helper.h b/arch/x86/include/asm/crypto/glue_helper.h index 03bb1065c..9e7a45cc8 100644 --- a/arch/x86/include/asm/crypto/glue_helper.h +++ b/arch/x86/include/asm/crypto/glue_helper.h @@ -11,16 +11,16 @@ #include typedef void (*common_glue_func_t)(void *ctx, u8 *dst, const u8 *src); -typedef void (*common_glue_cbc_func_t)(void *ctx, u128 *dst, const u128 *src); +typedef void (*common_glue_cbc_func_t)(void *ctx, u8 *dst, const u8 *src); typedef void (*common_glue_ctr_func_t)(void *ctx, u128 *dst, const u128 *src, le128 *iv); typedef void (*common_glue_xts_func_t)(void *ctx, u128 *dst, const u128 *src, le128 *iv); -#define GLUE_FUNC_CAST(fn) ((common_glue_func_t)(fn)) -#define GLUE_CBC_FUNC_CAST(fn) ((common_glue_cbc_func_t)(fn)) -#define GLUE_CTR_FUNC_CAST(fn) ((common_glue_ctr_func_t)(fn)) -#define GLUE_XTS_FUNC_CAST(fn) ((common_glue_xts_func_t)(fn)) +#define GLUE_FUNC_CAST(fn) (fn) +#define GLUE_CBC_FUNC_CAST(fn) (fn) +#define GLUE_CTR_FUNC_CAST(fn) (fn) +#define GLUE_XTS_FUNC_CAST(fn) (fn) struct common_glue_func_entry { unsigned int num_blocks; /* number of blocks that @fn will process */ diff --git a/arch/x86/include/asm/crypto/serpent-avx.h b/arch/x86/include/asm/crypto/serpent-avx.h index 33c2b8a43..21976b7b3 100644 --- a/arch/x86/include/asm/crypto/serpent-avx.h +++ b/arch/x86/include/asm/crypto/serpent-avx.h @@ -16,20 +16,22 @@ struct serpent_xts_ctx { struct serpent_ctx crypt_ctx; }; -asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_ecb_enc_8way_avx(void *ctx, u8 *dst, const u8 *src); -asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_ecb_dec_8way_avx(void *ctx, u8 *dst, const u8 *src); +void __serpent_enc_blk8_avx(void *ctx, u8 *dst, const u8 *src) __rap_hash; +void __serpent_dec_blk8_avx(void *ctx, u8 *dst, const u8 *src) __rap_hash; -asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_cbc_dec_8way_avx(void *ctx, u8 *dst, const u8 *src); -asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void serpent_ctr_8way_avx(void *ctx, u128 *dst, + const u128 *src, le128 *iv); -asmlinkage void serpent_xts_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); -asmlinkage void serpent_xts_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void serpent_xts_enc_8way_avx(void *ctx, u128 *dst, + const u128 *src, le128 *iv); +asmlinkage void serpent_xts_dec_8way_avx(void *ctx, u128 *dst, + const u128 *src, le128 *iv); extern void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv); diff --git a/arch/x86/include/asm/crypto/serpent-sse2.h b/arch/x86/include/asm/crypto/serpent-sse2.h index e6e77dffb..fe4208105 100644 --- a/arch/x86/include/asm/crypto/serpent-sse2.h +++ b/arch/x86/include/asm/crypto/serpent-sse2.h @@ -13,7 +13,7 @@ asmlinkage void __serpent_enc_blk_4way(struct serpent_ctx *ctx, u8 *dst, asmlinkage void serpent_dec_blk_4way(struct serpent_ctx *ctx, u8 *dst, const u8 *src); -static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst, +static inline void serpent_enc_blk_xway(void *ctx, u8 *dst, const u8 *src) { __serpent_enc_blk_4way(ctx, dst, src, false); @@ -25,7 +25,7 @@ static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst, __serpent_enc_blk_4way(ctx, dst, src, true); } -static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst, +static inline void serpent_dec_blk_xway(void *ctx, u8 *dst, const u8 *src) { serpent_dec_blk_4way(ctx, dst, src); @@ -40,7 +40,7 @@ asmlinkage void __serpent_enc_blk_8way(struct serpent_ctx *ctx, u8 *dst, asmlinkage void serpent_dec_blk_8way(struct serpent_ctx *ctx, u8 *dst, const u8 *src); -static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst, +static inline void serpent_enc_blk_xway(void *ctx, u8 *dst, const u8 *src) { __serpent_enc_blk_8way(ctx, dst, src, false); @@ -52,7 +52,7 @@ static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst, __serpent_enc_blk_8way(ctx, dst, src, true); } -static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst, +static inline void serpent_dec_blk_xway(void *ctx, u8 *dst, const u8 *src) { serpent_dec_blk_8way(ctx, dst, src); diff --git a/arch/x86/include/asm/crypto/twofish.h b/arch/x86/include/asm/crypto/twofish.h index 878c51cee..86fc65f8e 100644 --- a/arch/x86/include/asm/crypto/twofish.h +++ b/arch/x86/include/asm/crypto/twofish.h @@ -17,19 +17,19 @@ struct twofish_xts_ctx { }; /* regular block cipher functions from twofish_x86_64 module */ -asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst, +asmlinkage void twofish_enc_blk(void *ctx, u8 *dst, const u8 *src); -asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst, +asmlinkage void twofish_dec_blk(void *ctx, u8 *dst, const u8 *src); /* 3-way parallel cipher functions */ -asmlinkage void __twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, +asmlinkage void __twofish_enc_blk_3way(void *ctx, u8 *dst, const u8 *src, bool xor); -asmlinkage void twofish_dec_blk_3way(struct twofish_ctx *ctx, u8 *dst, +asmlinkage void twofish_dec_blk_3way(void *ctx, u8 *dst, const u8 *src); /* helpers from twofish_x86_64-3way module */ -extern void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src); +extern void twofish_dec_blk_cbc_3way(void *ctx, u8 *dst, const u8 *src); extern void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv); extern void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src, diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h index 9476c04ee..8d1cda487 100644 --- a/arch/x86/include/asm/current.h +++ b/arch/x86/include/asm/current.h @@ -16,6 +16,11 @@ static __always_inline struct task_struct *get_current(void) #define current get_current() +#else + +#define GET_CURRENT(reg) \ + _ASM_MOV PER_CPU_VAR(current_task),reg ; + #endif /* __ASSEMBLY__ */ #endif /* _ASM_X86_CURRENT_H */ diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index 12080d87d..7319a478a 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -4,6 +4,7 @@ #include #include #include +#include #include #include @@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in desc->type = (info->read_exec_only ^ 1) << 1; desc->type |= info->contents << 2; + desc->type |= info->seg_not_present ^ 1; desc->s = 1; desc->dpl = 0x3; @@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in } extern struct desc_ptr idt_descr; -extern gate_desc idt_table[]; +extern gate_desc idt_table[IDT_ENTRIES]; extern const struct desc_ptr debug_idt_descr; -extern gate_desc debug_idt_table[]; - -struct gdt_page { - struct desc_struct gdt[GDT_ENTRIES]; -} __attribute__((aligned(PAGE_SIZE))); - -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page); +extern gate_desc debug_idt_table[IDT_ENTRIES]; +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)]; static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) { - return per_cpu(gdt_page, cpu).gdt; + return cpu_gdt_table[cpu]; } #ifdef CONFIG_X86_64 @@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type, unsigned long base, unsigned dpl, unsigned flags, unsigned short seg) { - gate->a = (seg << 16) | (base & 0xffff); - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8); + gate->gate.offset_low = base; + gate->gate.seg = seg; + gate->gate.reserved = 0; + gate->gate.type = type; + gate->gate.s = 0; + gate->gate.dpl = dpl; + gate->gate.p = 1; + gate->gate.offset_high = base >> 16; } #endif @@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries) static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate) { + pax_open_kernel(); memcpy(&idt[entry], gate, sizeof(*gate)); + pax_close_kernel(); } static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc) { + pax_open_kernel(); memcpy(&ldt[entry], desc, 8); + pax_close_kernel(); } static inline void @@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int default: size = sizeof(*gdt); break; } + pax_open_kernel(); memcpy(&gdt[entry], desc, size); + pax_close_kernel(); } static inline void pack_descriptor(struct desc_struct *desc, unsigned long base, @@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries) static inline void native_load_tr_desc(void) { + pax_open_kernel(); asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8)); + pax_close_kernel(); } static inline void native_load_gdt(const struct desc_ptr *dtr) @@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu) struct desc_struct *gdt = get_cpu_gdt_table(cpu); unsigned int i; + pax_open_kernel(); for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++) gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]; + pax_close_kernel(); } /* This intentionally ignores lm, since 32-bit apps don't have that field. */ @@ -280,7 +293,7 @@ static inline void clear_LDT(void) set_ldt(NULL, 0); } -static inline unsigned long get_desc_base(const struct desc_struct *desc) +static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc) { return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24)); } @@ -304,7 +317,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit) } #ifdef CONFIG_X86_64 -static inline void set_nmi_gate(int gate, void *addr) +static inline void set_nmi_gate(int gate, const void *addr) { gate_desc s; @@ -314,14 +327,14 @@ static inline void set_nmi_gate(int gate, void *addr) #endif #ifdef CONFIG_TRACING -extern struct desc_ptr trace_idt_descr; -extern gate_desc trace_idt_table[]; +extern const struct desc_ptr trace_idt_descr; +extern gate_desc trace_idt_table[IDT_ENTRIES]; static inline void write_trace_idt_entry(int entry, const gate_desc *gate) { write_idt_entry(trace_idt_table, entry, gate); } -static inline void _trace_set_gate(int gate, unsigned type, void *addr, +static inline void _trace_set_gate(int gate, unsigned type, const void *addr, unsigned dpl, unsigned ist, unsigned seg) { gate_desc s; @@ -341,7 +354,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate) #define _trace_set_gate(gate, type, addr, dpl, ist, seg) #endif -static inline void _set_gate(int gate, unsigned type, void *addr, +static inline void _set_gate(int gate, unsigned type, const void *addr, unsigned dpl, unsigned ist, unsigned seg) { gate_desc s; @@ -364,14 +377,14 @@ static inline void _set_gate(int gate, unsigned type, void *addr, #define set_intr_gate_notrace(n, addr) \ do { \ BUG_ON((unsigned)n > 0xFF); \ - _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \ + _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \ __KERNEL_CS); \ } while (0) #define set_intr_gate(n, addr) \ do { \ set_intr_gate_notrace(n, addr); \ - _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\ + _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\ 0, 0, __KERNEL_CS); \ } while (0) @@ -399,19 +412,19 @@ static inline void alloc_system_vector(int vector) /* * This routine sets up an interrupt gate at directory privilege level 3. */ -static inline void set_system_intr_gate(unsigned int n, void *addr) +static inline void set_system_intr_gate(unsigned int n, const void *addr) { BUG_ON((unsigned)n > 0xFF); _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS); } -static inline void set_system_trap_gate(unsigned int n, void *addr) +static inline void set_system_trap_gate(unsigned int n, const void *addr) { BUG_ON((unsigned)n > 0xFF); _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS); } -static inline void set_trap_gate(unsigned int n, void *addr) +static inline void set_trap_gate(unsigned int n, const void *addr) { BUG_ON((unsigned)n > 0xFF); _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS); @@ -420,16 +433,16 @@ static inline void set_trap_gate(unsigned int n, void *addr) static inline void set_task_gate(unsigned int n, unsigned int gdt_entry) { BUG_ON((unsigned)n > 0xFF); - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3)); + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3)); } -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist) +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist) { BUG_ON((unsigned)n > 0xFF); _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS); } -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist) +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist) { BUG_ON((unsigned)n > 0xFF); _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS); @@ -501,4 +514,17 @@ static inline void load_current_idt(void) else load_idt((const struct desc_ptr *)&idt_descr); } + +#ifdef CONFIG_X86_32 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu) +{ + struct desc_struct d; + + if (likely(limit)) + limit = (limit - 1UL) >> PAGE_SHIFT; + pack_descriptor(&d, base, limit, 0xFB, 0xC); + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S); +} +#endif + #endif /* _ASM_X86_DESC_H */ diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h index eb5deb424..ec19436dd 100644 --- a/arch/x86/include/asm/desc_defs.h +++ b/arch/x86/include/asm/desc_defs.h @@ -31,6 +31,12 @@ struct desc_struct { unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1; unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8; }; + struct { + u16 offset_low; + u16 seg; + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1; + unsigned offset_high: 16; + } gate; }; } __attribute__((packed)); diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h index ced283ac7..ffe04ccf6 100644 --- a/arch/x86/include/asm/div64.h +++ b/arch/x86/include/asm/div64.h @@ -39,7 +39,7 @@ __mod; \ }) -static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) +static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) { union { u64 v64; diff --git a/arch/x86/include/asm/dma.h b/arch/x86/include/asm/dma.h index fe884e18f..46149aefc 100644 --- a/arch/x86/include/asm/dma.h +++ b/arch/x86/include/asm/dma.h @@ -149,6 +149,7 @@ #ifdef CONFIG_ISA_DMA_API extern spinlock_t dma_spin_lock; +static inline unsigned long claim_dma_lock(void) __acquires(&dma_spin_lock); static inline unsigned long claim_dma_lock(void) { unsigned long flags; @@ -156,6 +157,7 @@ static inline unsigned long claim_dma_lock(void) return flags; } +static inline void release_dma_lock(unsigned long flags) __releases(&dma_spin_lock); static inline void release_dma_lock(unsigned long flags) { spin_unlock_irqrestore(&dma_spin_lock, flags); diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 389d700b9..fa5126610 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -150,6 +150,11 @@ static inline bool efi_is_native(void) static inline bool efi_runtime_supported(void) { + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) + return false; +#endif + if (efi_is_native()) return true; diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index e7f155c30..8611814b3 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h @@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t; #include -#ifdef CONFIG_X86_64 -extern unsigned int vdso64_enabled; -#endif #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) extern unsigned int vdso32_enabled; #endif @@ -250,7 +247,25 @@ extern int force_personality32; the loader. We need to make sure that it is out of the way of the program that it will "exec", and that there is sufficient room for the brk. */ +#ifdef CONFIG_PAX_SEGMEXEC +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2) +#else #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) +#endif + +#ifdef CONFIG_PAX_ASLR +#ifdef CONFIG_X86_32 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL + +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16) +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16) +#else +#define PAX_ELF_ET_DYN_BASE 0x400000UL + +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3) +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3) +#endif +#endif /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. This could be done in user space, @@ -299,17 +314,13 @@ do { \ #define ARCH_DLINFO \ do { \ - if (vdso64_enabled) \ - NEW_AUX_ENT(AT_SYSINFO_EHDR, \ - (unsigned long __force)current->mm->context.vdso); \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \ } while (0) /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */ #define ARCH_DLINFO_X32 \ do { \ - if (vdso64_enabled) \ - NEW_AUX_ENT(AT_SYSINFO_EHDR, \ - (unsigned long __force)current->mm->context.vdso); \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \ } while (0) #define AT_SYSINFO 32 @@ -324,10 +335,10 @@ else \ #endif /* !CONFIG_X86_32 */ -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso) +#define VDSO_CURRENT_BASE (current->mm->context.vdso) #define VDSO_ENTRY \ - ((unsigned long)current->mm->context.vdso + \ + (current->mm->context.vdso + \ vdso_image_32.sym___kernel_vsyscall) struct linux_binprm; diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h index 77a99ac06..39ff7f5fa 100644 --- a/arch/x86/include/asm/emergency-restart.h +++ b/arch/x86/include/asm/emergency-restart.h @@ -1,6 +1,6 @@ #ifndef _ASM_X86_EMERGENCY_RESTART_H #define _ASM_X86_EMERGENCY_RESTART_H -extern void machine_emergency_restart(void); +extern void machine_emergency_restart(void) __noreturn; #endif /* _ASM_X86_EMERGENCY_RESTART_H */ diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 8554f960e..6c58add91 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -142,7 +142,7 @@ extern pte_t *kmap_pte; extern pte_t *pkmap_page_table; void __native_set_fixmap(enum fixed_addresses idx, pte_t pte); -void native_set_fixmap(enum fixed_addresses idx, +void native_set_fixmap(unsigned int idx, phys_addr_t phys, pgprot_t flags); #ifndef CONFIG_PARAVIRT diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h index 1c7eefe32..d0e470288 100644 --- a/arch/x86/include/asm/floppy.h +++ b/arch/x86/include/asm/floppy.h @@ -229,18 +229,18 @@ static struct fd_routine_l { int (*_dma_setup)(char *addr, unsigned long size, int mode, int io); } fd_routine[] = { { - request_dma, - free_dma, - get_dma_residue, - dma_mem_alloc, - hard_dma_setup + ._request_dma = request_dma, + ._free_dma = free_dma, + ._get_dma_residue = get_dma_residue, + ._dma_mem_alloc = dma_mem_alloc, + ._dma_setup = hard_dma_setup }, { - vdma_request_dma, - vdma_nop, - vdma_get_dma_residue, - vdma_mem_alloc, - vdma_dma_setup + ._request_dma = vdma_request_dma, + ._free_dma = vdma_nop, + ._get_dma_residue = vdma_get_dma_residue, + ._dma_mem_alloc = vdma_mem_alloc, + ._dma_setup = vdma_dma_setup } }; diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 2737366ea..e152d4b98 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -102,9 +102,11 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu); #define user_insn(insn, output, input...) \ ({ \ int err; \ - asm volatile(ASM_STAC "\n" \ - "1:" #insn "\n\t" \ - "2: " ASM_CLAC "\n" \ + user_access_begin(); \ + asm volatile("1:" \ + __copyuser_seg \ + #insn "\n\t" \ + "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: movl $-1,%[err]\n" \ " jmp 2b\n" \ @@ -112,6 +114,7 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu); _ASM_EXTABLE(1b, 3b) \ : [err] "=r" (err), output \ : "0"(0), input); \ + user_access_end(); \ err; \ }) @@ -191,9 +194,9 @@ static inline int copy_user_to_fregs(struct fregs_state __user *fx) static inline void copy_fxregs_to_kernel(struct fpu *fpu) { if (IS_ENABLED(CONFIG_X86_32)) - asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave)); + asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave)); else if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) - asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave)); + asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state->fxsave)); else { /* Using "rex64; fxsave %0" is broken because, if the memory * operand uses any extended registers for addressing, a second @@ -210,15 +213,15 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu) * an extended register is needed for addressing (fix submitted * to mainline 2005-11-21). * - * asm volatile("rex64/fxsave %0" : "=m" (fpu->state.fxsave)); + * asm volatile("rex64/fxsave %0" : "=m" (fpu->state->fxsave)); * * This, however, we can work around by forcing the compiler to * select an addressing mode that doesn't require extended * registers. */ asm volatile( "rex64/fxsave (%[fx])" - : "=m" (fpu->state.fxsave) - : [fx] "R" (&fpu->state.fxsave)); + : "=m" (fpu->state->fxsave) + : [fx] "R" (&fpu->state->fxsave)); } } @@ -390,9 +393,9 @@ static inline int copy_xregs_to_user(struct xregs_state __user *buf) if (unlikely(err)) return -EFAULT; - stac(); - XSTATE_OP(XSAVE, buf, -1, -1, err); - clac(); + user_access_begin(); + XSTATE_OP(__copyuser_seg XSAVE, buf, -1, -1, err); + user_access_end(); return err; } @@ -402,14 +405,14 @@ static inline int copy_xregs_to_user(struct xregs_state __user *buf) */ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask) { - struct xregs_state *xstate = ((__force struct xregs_state *)buf); + struct xregs_state *xstate = ((__force_kernel struct xregs_state *)buf); u32 lmask = mask; u32 hmask = mask >> 32; int err; - stac(); - XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); - clac(); + user_access_begin(); + XSTATE_OP(__copyuser_seg XRSTOR, xstate, lmask, hmask, err); + user_access_end(); return err; } @@ -427,7 +430,7 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask) static inline int copy_fpregs_to_fpstate(struct fpu *fpu) { if (likely(use_xsave())) { - copy_xregs_to_kernel(&fpu->state.xsave); + copy_xregs_to_kernel(&fpu->state->xsave); return 1; } @@ -440,7 +443,7 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu) * Legacy FPU register saving, FNSAVE always clears FPU registers, * so we have to mark them inactive: */ - asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave)); + asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state->fsave)); return 0; } @@ -469,7 +472,7 @@ static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate) "fnclex\n\t" "emms\n\t" "fildl %P[addr]" /* set F?P to defined value */ - : : [addr] "m" (fpstate)); + : : [addr] "m" (cpu_tss[raw_smp_processor_id()].x86_tss.sp0)); } __copy_kernel_to_fpregs(fpstate); @@ -614,7 +617,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu) new_fpu->counter++; __fpregs_activate(new_fpu); trace_x86_fpu_regs_activated(new_fpu); - prefetch(&new_fpu->state); + prefetch(new_fpu->state); } else { __fpregs_deactivate_hw(); } @@ -626,7 +629,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu) if (fpu_want_lazy_restore(new_fpu, cpu)) fpu.preload = 0; else - prefetch(&new_fpu->state); + prefetch(new_fpu->state); fpregs_activate(new_fpu); } } @@ -646,7 +649,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu) static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch) { if (fpu_switch.preload) - copy_kernel_to_fpregs(&new_fpu->state); + copy_kernel_to_fpregs(new_fpu->state); } /* diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h index 48df486b0..e32babddd 100644 --- a/arch/x86/include/asm/fpu/types.h +++ b/arch/x86/include/asm/fpu/types.h @@ -276,6 +276,39 @@ union fpregs_state { */ struct fpu { /* + * @state: + * + * In-memory copy of all FPU registers that we save/restore + * over context switches. If the task is using the FPU then + * the registers in the FPU are more recent than this state + * copy. If the task context-switches away then they get + * saved here and represent the FPU state. + * + * After context switches there may be a (short) time period + * during which the in-FPU hardware registers are unchanged + * and still perfectly match this state, if the tasks + * scheduled afterwards are not using the FPU. + * + * This is the 'lazy restore' window of optimization, which + * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'. + * + * We detect whether a subsequent task uses the FPU via setting + * CR0::TS to 1, which causes any FPU use to raise a #NM fault. + * + * During this window, if the task gets scheduled again, we + * might be able to skip having to do a restore from this + * memory buffer to the hardware registers - at the cost of + * incurring the overhead of #NM fault traps. + * + * Note that on modern CPUs that support the XSAVEOPT (or other + * optimized XSAVE instructions), we don't use #NM traps anymore, + * as the hardware can track whether FPU registers need saving + * or not. On such CPUs we activate the non-lazy ('eagerfpu') + * logic, which unconditionally saves/restores all FPU state + * across context switches. (if FPU state exists.) + */ + union fpregs_state *state; + /* * @last_cpu: * * Records the last CPU on which this context was loaded into @@ -332,43 +365,6 @@ struct fpu { * deal with bursty apps that only use the FPU for a short time: */ unsigned char counter; - /* - * @state: - * - * In-memory copy of all FPU registers that we save/restore - * over context switches. If the task is using the FPU then - * the registers in the FPU are more recent than this state - * copy. If the task context-switches away then they get - * saved here and represent the FPU state. - * - * After context switches there may be a (short) time period - * during which the in-FPU hardware registers are unchanged - * and still perfectly match this state, if the tasks - * scheduled afterwards are not using the FPU. - * - * This is the 'lazy restore' window of optimization, which - * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'. - * - * We detect whether a subsequent task uses the FPU via setting - * CR0::TS to 1, which causes any FPU use to raise a #NM fault. - * - * During this window, if the task gets scheduled again, we - * might be able to skip having to do a restore from this - * memory buffer to the hardware registers - at the cost of - * incurring the overhead of #NM fault traps. - * - * Note that on modern CPUs that support the XSAVEOPT (or other - * optimized XSAVE instructions), we don't use #NM traps anymore, - * as the hardware can track whether FPU registers need saving - * or not. On such CPUs we activate the non-lazy ('eagerfpu') - * logic, which unconditionally saves/restores all FPU state - * across context switches. (if FPU state exists.) - */ - union fpregs_state state; - /* - * WARNING: 'state' is dynamically-sized. Do not put - * anything after it here. - */ }; #endif /* _ASM_X86_FPU_H */ diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h index 430bacf73..d0fbcf0b0 100644 --- a/arch/x86/include/asm/fpu/xstate.h +++ b/arch/x86/include/asm/fpu/xstate.h @@ -43,6 +43,7 @@ #define REX_PREFIX #endif +extern unsigned int xstate_size; extern u64 xfeatures_mask; extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS]; diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h index b4c1f5453..726053de7 100644 --- a/arch/x86/include/asm/futex.h +++ b/arch/x86/include/asm/futex.h @@ -12,25 +12,25 @@ #include #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ - asm volatile("\t" ASM_STAC "\n" \ - "1:\t" insn "\n" \ - "2:\t" ASM_CLAC "\n" \ + typecheck(u32 __user *, uaddr); \ + asm volatile("1:\t" insn "\n" \ + "2:\t\n" \ "\t.section .fixup,\"ax\"\n" \ "3:\tmov\t%3, %1\n" \ "\tjmp\t2b\n" \ "\t.previous\n" \ _ASM_EXTABLE(1b, 3b) \ - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \ : "i" (-EFAULT), "0" (oparg), "1" (0)) #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ - asm volatile("\t" ASM_STAC "\n" \ - "1:\tmovl %2, %0\n" \ + typecheck(u32 __user *, uaddr); \ + asm volatile("1:\tmovl %2, %0\n" \ "\tmovl\t%0, %3\n" \ "\t" insn "\n" \ - "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \ + "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \ "\tjnz\t1b\n" \ - "3:\t" ASM_CLAC "\n" \ + "3:\t\n" \ "\t.section .fixup,\"ax\"\n" \ "4:\tmov\t%5, %1\n" \ "\tjmp\t3b\n" \ @@ -38,7 +38,7 @@ _ASM_EXTABLE(1b, 4b) \ _ASM_EXTABLE(2b, 4b) \ : "=&a" (oldval), "=&r" (ret), \ - "+m" (*uaddr), "=&r" (tem) \ + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \ : "r" (oparg), "i" (-EFAULT), "1" (0)) static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) @@ -57,12 +57,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) pagefault_disable(); + user_access_begin(); switch (op) { case FUTEX_OP_SET: - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg); break; case FUTEX_OP_ADD: - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval, + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval, uaddr, oparg); break; case FUTEX_OP_OR: @@ -77,6 +78,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) default: ret = -ENOSYS; } + user_access_end(); pagefault_enable(); diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index b90e10530..30a595000 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -164,8 +164,8 @@ static inline void unlock_vector_lock(void) {} #endif /* CONFIG_X86_LOCAL_APIC */ /* Statistics */ -extern atomic_t irq_err_count; -extern atomic_t irq_mis_count; +extern atomic_unchecked_t irq_err_count; +extern atomic_unchecked_t irq_mis_count; extern void elcr_set_level_irq(unsigned int irq); diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h index 67942b6ad..176a8b4b9 100644 --- a/arch/x86/include/asm/hypervisor.h +++ b/arch/x86/include/asm/hypervisor.h @@ -46,7 +46,7 @@ struct hypervisor_x86 { /* pin current vcpu to specified physical cpu (run rarely) */ void (*pin_vcpu)(int); -}; +} __do_const; extern const struct hypervisor_x86 *x86_hyper; diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h index 39bcefc20..272d904f1 100644 --- a/arch/x86/include/asm/i8259.h +++ b/arch/x86/include/asm/i8259.h @@ -63,7 +63,7 @@ struct legacy_pic { int (*probe)(void); int (*irq_pending)(unsigned int irq); void (*make_irq)(unsigned int irq); -}; +} __do_const; extern struct legacy_pic *legacy_pic; extern struct legacy_pic null_legacy_pic; diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index d34bd3700..08f323117 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -42,6 +42,7 @@ #include #include #include +#include #define build_mmio_read(name, size, type, reg, barrier) \ static inline type name(const volatile void __iomem *addr) \ @@ -54,12 +55,12 @@ static inline void name(type val, volatile void __iomem *addr) \ "m" (*(volatile type __force *)addr) barrier); } build_mmio_read(readb, "b", unsigned char, "=q", :"memory") -build_mmio_read(readw, "w", unsigned short, "=r", :"memory") -build_mmio_read(readl, "l", unsigned int, "=r", :"memory") +build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory") +build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory") build_mmio_read(__readb, "b", unsigned char, "=q", ) -build_mmio_read(__readw, "w", unsigned short, "=r", ) -build_mmio_read(__readl, "l", unsigned int, "=r", ) +build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", ) +build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", ) build_mmio_write(writeb, "b", unsigned char, "q", :"memory") build_mmio_write(writew, "w", unsigned short, "r", :"memory") @@ -115,7 +116,7 @@ build_mmio_write(writeq, "q", unsigned long, "r", :"memory") * this function */ -static inline phys_addr_t virt_to_phys(volatile void *address) +static inline phys_addr_t __intentional_overflow(-1) virt_to_phys(volatile void *address) { return __pa(address); } @@ -194,7 +195,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) return ioremap_nocache(offset, size); } -extern void iounmap(volatile void __iomem *addr); +extern void iounmap(const volatile void __iomem *addr); extern void set_iounmap_nonlazy(void); @@ -202,6 +203,17 @@ extern void set_iounmap_nonlazy(void); #include +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE +static inline int valid_phys_addr_range(unsigned long addr, size_t count) +{ + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0; +} + +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count) +{ + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0; +} + /* * Convert a virtual cached pointer to an uncached pointer */ diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 6ca9fd623..4dbd5e2f3 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -48,6 +48,10 @@ #define IA32_SYSCALL_VECTOR 0x80 +#define X86_REFCOUNT_VECTOR 0x81 /* Refcount Overflow or Underflow Exception */ +//#define X86_RAP_CALL_VECTOR 0x82 /* RAP Indirect Call Violation Exception */ +//#define X86_RAP_RET_VECTOR 0x83 /* RAP Function Return Violation Exception */ + /* * Vectors 0x30-0x3f are used for ISA interrupts. * round up to the next 16-vector boundary diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index ac7692dcf..90e119ce4 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h @@ -12,7 +12,7 @@ * Interrupt control: */ -static inline unsigned long native_save_fl(void) +static inline asmlinkage unsigned long native_save_fl(void) { unsigned long flags; @@ -27,23 +27,29 @@ static inline unsigned long native_save_fl(void) : /* no input */ : "memory"); +#if !defined(CONFIG_GRKERNSEC_CONFIG_VIRT_HOST) || !defined(CONFIG_GRKERNSEC_CONFIG_VIRT_VIRTUALBOX) + BUG_ON(flags & X86_EFLAGS_AC); +#endif return flags; } static inline void native_restore_fl(unsigned long flags) { +#if !defined(CONFIG_GRKERNSEC_CONFIG_VIRT_HOST) || !defined(CONFIG_GRKERNSEC_CONFIG_VIRT_VIRTUALBOX) + BUG_ON(flags & X86_EFLAGS_AC); +#endif asm volatile("push %0 ; popf" : /* no output */ :"g" (flags) :"memory", "cc"); } -static inline void native_irq_disable(void) +static inline asmlinkage void native_irq_disable(void) { asm volatile("cli": : :"memory"); } -static inline void native_irq_enable(void) +static inline asmlinkage void native_irq_enable(void) { asm volatile("sti": : :"memory"); } @@ -141,6 +147,11 @@ static inline notrace unsigned long arch_local_irq_save(void) swapgs; \ sysretl +#define GET_CR0_INTO_RDI mov %cr0, %rdi +#define SET_RDI_INTO_CR0 mov %rdi, %cr0 +#define GET_CR3_INTO_RDI mov %cr3, %rdi +#define SET_RDI_INTO_CR3 mov %rdi, %cr3 + #else #define INTERRUPT_RETURN iret #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit @@ -197,6 +208,14 @@ static inline int arch_irqs_disabled(void) # define LOCKDEP_SYS_EXIT # define LOCKDEP_SYS_EXIT_IRQ #endif +#else +#ifdef CONFIG_TRACE_IRQFLAGS +void trace_hardirqs_on_thunk(void); +void trace_hardirqs_off_thunk(void); +#endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC +void lockdep_sys_exit_thunk(void); +#endif #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h index d1d1e5094..5bacb6d57 100644 --- a/arch/x86/include/asm/kprobes.h +++ b/arch/x86/include/asm/kprobes.h @@ -37,7 +37,7 @@ typedef u8 kprobe_opcode_t; #define RELATIVEJUMP_SIZE 5 #define RELATIVECALL_OPCODE 0xe8 #define RELATIVE_ADDR_SIZE 4 -#define MAX_STACK_SIZE 64 +#define MAX_STACK_SIZE 64UL #define CUR_STACK_SIZE(ADDR) \ (current_top_of_stack() - (unsigned long)(ADDR)) #define MIN_STACK_SIZE(ADDR) \ diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index e9cd7befc..0f3574f73 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -279,6 +279,8 @@ enum x86emul_mode { #define X86EMUL_SMM_MASK (1 << 6) #define X86EMUL_SMM_INSIDE_NMI_MASK (1 << 7) +struct fastop; + struct x86_emulate_ctxt { const struct x86_emulate_ops *ops; @@ -311,7 +313,10 @@ struct x86_emulate_ctxt { struct operand src; struct operand src2; struct operand dst; - int (*execute)(struct x86_emulate_ctxt *ctxt); + union { + int (*execute)(struct x86_emulate_ctxt *ctxt); + void (*fastop)(struct fastop *fake); + } u; int (*check_perm)(struct x86_emulate_ctxt *ctxt); /* * The following six fields are cleared together, diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h index 751197809..cf525730a 100644 --- a/arch/x86/include/asm/local.h +++ b/arch/x86/include/asm/local.h @@ -10,33 +10,73 @@ typedef struct { atomic_long_t a; } local_t; +typedef struct { + atomic_long_unchecked_t a; +} local_unchecked_t; + #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } #define local_read(l) atomic_long_read(&(l)->a) +#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a) #define local_set(l, i) atomic_long_set(&(l)->a, (i)) +#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i)) static inline void local_inc(local_t *l) { - asm volatile(_ASM_INC "%0" + asm volatile(_ASM_INC "%0\n\t" + PAX_REFCOUNT_OVERFLOW(BITS_PER_LONG/8) + : [counter] "+m" (l->a.counter) + : : "cc", "cx"); +} + +static inline void local_inc_unchecked(local_unchecked_t *l) +{ + asm volatile(_ASM_INC "%0\n" : "+m" (l->a.counter)); } static inline void local_dec(local_t *l) { - asm volatile(_ASM_DEC "%0" + asm volatile(_ASM_DEC "%0\n\t" + PAX_REFCOUNT_UNDERFLOW(BITS_PER_LONG/8) + : [counter] "+m" (l->a.counter) + : : "cc", "cx"); +} + +static inline void local_dec_unchecked(local_unchecked_t *l) +{ + asm volatile(_ASM_DEC "%0\n" : "+m" (l->a.counter)); } static inline void local_add(long i, local_t *l) { - asm volatile(_ASM_ADD "%1,%0" + asm volatile(_ASM_ADD "%1,%0\n\t" + PAX_REFCOUNT_OVERFLOW(BITS_PER_LONG/8) + : [counter] "+m" (l->a.counter) + : "ir" (i) + : "cc", "cx"); +} + +static inline void local_add_unchecked(long i, local_unchecked_t *l) +{ + asm volatile(_ASM_ADD "%1,%0\n" : "+m" (l->a.counter) : "ir" (i)); } static inline void local_sub(long i, local_t *l) { - asm volatile(_ASM_SUB "%1,%0" + asm volatile(_ASM_SUB "%1,%0\n\t" + PAX_REFCOUNT_UNDERFLOW(BITS_PER_LONG/8) + : [counter] "+m" (l->a.counter) + : "ir" (i) + : "cc", "cx"); +} + +static inline void local_sub_unchecked(long i, local_unchecked_t *l) +{ + asm volatile(_ASM_SUB "%1,%0\n" : "+m" (l->a.counter) : "ir" (i)); } @@ -52,7 +92,7 @@ static inline void local_sub(long i, local_t *l) */ static inline bool local_sub_and_test(long i, local_t *l) { - GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", e); + GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, -BITS_PER_LONG/8, "er", i, "%0", e); } /** @@ -65,7 +105,7 @@ static inline bool local_sub_and_test(long i, local_t *l) */ static inline bool local_dec_and_test(local_t *l) { - GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", e); + GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, -BITS_PER_LONG/8, "%0", e); } /** @@ -78,7 +118,7 @@ static inline bool local_dec_and_test(local_t *l) */ static inline bool local_inc_and_test(local_t *l) { - GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", e); + GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, BITS_PER_LONG/8, "%0", e); } /** @@ -92,7 +132,7 @@ static inline bool local_inc_and_test(local_t *l) */ static inline bool local_add_negative(long i, local_t *l) { - GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", s); + GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, BITS_PER_LONG/8, "er", i, "%0", s); } /** @@ -105,6 +145,23 @@ static inline bool local_add_negative(long i, local_t *l) static inline long local_add_return(long i, local_t *l) { long __i = i; + asm volatile(_ASM_XADD "%0, %1\n\t" + PAX_REFCOUNT_OVERFLOW(BITS_PER_LONG/8) + : "+r" (i), [counter] "+m" (l->a.counter) + : : "memory", "cc", "cx"); + return i + __i; +} + +/** + * local_add_return_unchecked - add and return + * @i: integer value to add + * @l: pointer to type local_unchecked_t + * + * Atomically adds @i to @l and returns @i + @l + */ +static inline long local_add_return_unchecked(long i, local_unchecked_t *l) +{ + long __i = i; asm volatile(_ASM_XADD "%0, %1;" : "+r" (i), "+m" (l->a.counter) : : "memory"); @@ -121,6 +178,8 @@ static inline long local_sub_return(long i, local_t *l) #define local_cmpxchg(l, o, n) \ (cmpxchg_local(&((l)->a.counter), (o), (n))) +#define local_cmpxchg_unchecked(l, o, n) \ + (cmpxchg_local(&((l)->a.counter), (o), (n))) /* Always has a lock prefix */ #define local_xchg(l, n) (xchg(&((l)->a.counter), (n))) diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 9bd7ff5ff..d9c8715a4 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -187,7 +187,7 @@ struct mca_msr_regs { u32 (*status) (int bank); u32 (*addr) (int bank); u32 (*misc) (int bank); -}; +} __no_const; extern struct mce_vendor_flags mce_flags; diff --git b/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h new file mode 100644 index 000000000..2bfd3ba88 --- /dev/null +++ b/arch/x86/include/asm/mman.h @@ -0,0 +1,15 @@ +#ifndef _X86_MMAN_H +#define _X86_MMAN_H + +#include + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ +#ifdef CONFIG_X86_32 +#define arch_mmap_check i386_mmap_check +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags); +#endif +#endif +#endif + +#endif /* X86_MMAN_H */ diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h index 72198c64e..2049a3692 100644 --- a/arch/x86/include/asm/mmu.h +++ b/arch/x86/include/asm/mmu.h @@ -19,7 +19,19 @@ typedef struct { #endif struct mutex lock; - void __user *vdso; /* vdso base address */ + unsigned long vdso; /* vdso base address */ + +#ifdef CONFIG_X86_32 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) + unsigned long user_cs_base; + unsigned long user_cs_limit; + +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP) + cpumask_t cpu_user_cs_mask; +#endif + +#endif +#endif const struct vdso_image *vdso_image; /* vdso image in use */ atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */ diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 8e0a9fe86..2473467b1 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -47,7 +47,7 @@ struct ldt_struct { * allocations, but it's not worth trying to optimize. */ struct desc_struct *entries; - int size; + unsigned int size; }; /* @@ -59,6 +59,23 @@ void destroy_context_ldt(struct mm_struct *mm); static inline int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm) { + if (tsk == current) { + mm->context.vdso = 0; + +#ifdef CONFIG_X86_32 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) + mm->context.user_cs_base = 0UL; + mm->context.user_cs_limit = ~0UL; + +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP) + cpumask_clear(&mm->context.cpu_user_cs_mask); +#endif + +#endif +#endif + + } + return 0; } static inline void destroy_context_ldt(struct mm_struct *mm) {} @@ -99,6 +116,20 @@ static inline void load_mm_ldt(struct mm_struct *mm) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + if (!(static_cpu_has(X86_FEATURE_PCIDUDEREF))) { + unsigned int i; + pgd_t *pgd; + + pax_open_kernel(); + pgd = get_cpu_pgd(smp_processor_id(), kernel); + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i) + set_pgd_batched(pgd+i, native_make_pgd(0)); + pax_close_kernel(); + } +#endif + #ifdef CONFIG_SMP if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h index e3b7819ca..ba128ecff 100644 --- a/arch/x86/include/asm/module.h +++ b/arch/x86/include/asm/module.h @@ -5,6 +5,7 @@ #ifdef CONFIG_X86_64 /* X86_64 does not define MODULE_PROC_FAMILY */ +#define MODULE_PROC_FAMILY "" #elif defined CONFIG_M486 #define MODULE_PROC_FAMILY "486 " #elif defined CONFIG_M586 @@ -57,8 +58,26 @@ #error unknown processor family #endif -#ifdef CONFIG_X86_32 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS " +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR) +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR " +#else +#define MODULE_PAX_KERNEXEC "" +#endif + +#ifdef CONFIG_PAX_MEMORY_UDEREF +#define MODULE_PAX_UDEREF "UDEREF " +#else +#define MODULE_PAX_UDEREF "" #endif +#ifdef CONFIG_PAX_RAP +#define MODULE_PAX_RAP "RAP " +#else +#define MODULE_PAX_RAP "" +#endif + +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_RAP + #endif /* _ASM_X86_MODULE_H */ diff --git a/arch/x86/include/asm/mutex_32.h b/arch/x86/include/asm/mutex_32.h index e9355a84f..f0317596b 100644 --- a/arch/x86/include/asm/mutex_32.h +++ b/arch/x86/include/asm/mutex_32.h @@ -30,7 +30,7 @@ do { \ \ asm volatile(LOCK_PREFIX " decl (%%eax)\n" \ " jns 1f \n" \ - " call " #fail_fn "\n" \ + PAX_DIRECT_CALL(#fail_fn)"\n" \ "1:\n" \ : "=a" (dummy) \ : "a" (count) \ @@ -76,7 +76,7 @@ do { \ \ asm volatile(LOCK_PREFIX " incl (%%eax)\n" \ " jg 1f\n" \ - " call " #fail_fn "\n" \ + PAX_DIRECT_CALL(#fail_fn)"\n" \ "1:\n" \ : "=a" (dummy) \ : "a" (count) \ diff --git a/arch/x86/include/asm/mutex_64.h b/arch/x86/include/asm/mutex_64.h index d98507584..d2fc470f6 100644 --- a/arch/x86/include/asm/mutex_64.h +++ b/arch/x86/include/asm/mutex_64.h @@ -39,7 +39,7 @@ do { \ \ asm volatile(LOCK_PREFIX " decl (%%rdi)\n" \ " jns 1f \n" \ - " call " #fail_fn "\n" \ + PAX_DIRECT_CALL(#fail_fn)"\n" \ "1:" \ : "=D" (dummy) \ : "D" (v) \ @@ -94,7 +94,7 @@ do { \ \ asm volatile(LOCK_PREFIX " incl (%%rdi)\n" \ " jg 1f\n" \ - " call " #fail_fn "\n" \ + PAX_DIRECT_CALL(#fail_fn)"\n" \ "1:" \ : "=D" (dummy) \ : "D" (v) \ diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h index 5f2fc4441..106caa61e 100644 --- a/arch/x86/include/asm/nmi.h +++ b/arch/x86/include/asm/nmi.h @@ -36,26 +36,35 @@ enum { typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *); +struct nmiaction; + +struct nmiwork { + const struct nmiaction *action; + u64 max_duration; + struct irq_work irq_work; +}; + struct nmiaction { struct list_head list; nmi_handler_t handler; - u64 max_duration; - struct irq_work irq_work; unsigned long flags; const char *name; -}; + struct nmiwork *work; +} __do_const; #define register_nmi_handler(t, fn, fg, n, init...) \ ({ \ - static struct nmiaction init fn##_na = { \ + static struct nmiwork fn##_nw; \ + static const struct nmiaction init fn##_na = { \ .handler = (fn), \ .name = (n), \ .flags = (fg), \ + .work = &fn##_nw, \ }; \ __register_nmi_handler((t), &fn##_na); \ }) -int __register_nmi_handler(unsigned int, struct nmiaction *); +int __register_nmi_handler(unsigned int, const struct nmiaction *); void unregister_nmi_handler(unsigned int, const char *); diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h index cf8f619b3..bbcf5e67e 100644 --- a/arch/x86/include/asm/page.h +++ b/arch/x86/include/asm/page.h @@ -58,6 +58,8 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr, #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) #endif +#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base)) + #define __boot_va(x) __va(x) #define __boot_pa(x) __pa(x) @@ -65,11 +67,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr, * virt_to_page(kaddr) returns a valid pointer if and only if * virt_addr_valid(kaddr) returns true. */ -#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) extern bool __virt_addr_valid(unsigned long kaddr); #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr)) +#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW +#define virt_to_page(kaddr) \ + ({ \ + const void *__kaddr = (const void *)(kaddr); \ + BUG_ON(!virt_addr_valid(__kaddr)); \ + pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \ + }) +#else +#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) +#endif + #endif /* __ASSEMBLY__ */ #include diff --git a/arch/x86/include/asm/page_32.h b/arch/x86/include/asm/page_32.h index 904f528cc..b4d0d2472 100644 --- a/arch/x86/include/asm/page_32.h +++ b/arch/x86/include/asm/page_32.h @@ -7,11 +7,17 @@ #define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET) #ifdef CONFIG_DEBUG_VIRTUAL -extern unsigned long __phys_addr(unsigned long); +extern unsigned long __intentional_overflow(-1) __phys_addr(unsigned long); #else -#define __phys_addr(x) __phys_addr_nodebug(x) +static inline unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x) +{ + return __phys_addr_nodebug(x); +} #endif -#define __phys_addr_symbol(x) __phys_addr(x) +static inline unsigned long __intentional_overflow(-1) __phys_addr_symbol(unsigned long x) +{ + return __phys_addr(x); +} #define __phys_reloc_hide(x) RELOC_HIDE((x), 0) #ifdef CONFIG_FLATMEM diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h index b3bebf9e5..2c3570f2f 100644 --- a/arch/x86/include/asm/page_64.h +++ b/arch/x86/include/asm/page_64.h @@ -7,9 +7,9 @@ /* duplicated to the one in bootmem.h */ extern unsigned long max_pfn; -extern unsigned long phys_base; +extern const unsigned long phys_base; -static inline unsigned long __phys_addr_nodebug(unsigned long x) +static __always_inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x) { unsigned long y = x - __START_KERNEL_map; @@ -20,12 +20,14 @@ static inline unsigned long __phys_addr_nodebug(unsigned long x) } #ifdef CONFIG_DEBUG_VIRTUAL -extern unsigned long __phys_addr(unsigned long); -extern unsigned long __phys_addr_symbol(unsigned long); +extern unsigned long __intentional_overflow(-1) __phys_addr(unsigned long); +extern unsigned long __intentional_overflow(-1) __phys_addr_symbol(unsigned long); #else #define __phys_addr(x) __phys_addr_nodebug(x) -#define __phys_addr_symbol(x) \ - ((unsigned long)(x) - __START_KERNEL_map + phys_base) +static inline unsigned long __intentional_overflow(-1) __phys_addr_symbol(unsigned long x) +{ + return x - __START_KERNEL_map + phys_base; +} #endif #define __phys_reloc_hide(x) (x) diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index ce932812f..17b0fda23 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -374,11 +374,11 @@ static inline pte_t __pte(pteval_t val) if (sizeof(pteval_t) > sizeof(long)) ret = PVOP_CALLEE2(pteval_t, - pv_mmu_ops.make_pte, + pv_mmu_ops, make_pte, val, (u64)val >> 32); else ret = PVOP_CALLEE1(pteval_t, - pv_mmu_ops.make_pte, + pv_mmu_ops, make_pte, val); return (pte_t) { .pte = ret }; @@ -389,10 +389,10 @@ static inline pteval_t pte_val(pte_t pte) pteval_t ret; if (sizeof(pteval_t) > sizeof(long)) - ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val, + ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops, pte_val, pte.pte, (u64)pte.pte >> 32); else - ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val, + ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops, pte_val, pte.pte); return ret; @@ -403,10 +403,10 @@ static inline pgd_t __pgd(pgdval_t val) pgdval_t ret; if (sizeof(pgdval_t) > sizeof(long)) - ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd, + ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops, make_pgd, val, (u64)val >> 32); else - ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd, + ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops, make_pgd, val); return (pgd_t) { ret }; @@ -417,10 +417,10 @@ static inline pgdval_t pgd_val(pgd_t pgd) pgdval_t ret; if (sizeof(pgdval_t) > sizeof(long)) - ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val, + ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops, pgd_val, pgd.pgd, (u64)pgd.pgd >> 32); else - ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val, + ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops, pgd_val, pgd.pgd); return ret; @@ -496,24 +496,24 @@ static inline pmd_t __pmd(pmdval_t val) pmdval_t ret; if (sizeof(pmdval_t) > sizeof(long)) - ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd, + ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops, make_pmd, val, (u64)val >> 32); else - ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd, + ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops, make_pmd, val); return (pmd_t) { ret }; } -static inline pmdval_t pmd_val(pmd_t pmd) +static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd) { pmdval_t ret; if (sizeof(pmdval_t) > sizeof(long)) - ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val, + ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops, pmd_val, pmd.pmd, (u64)pmd.pmd >> 32); else - ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val, + ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops, pmd_val, pmd.pmd); return ret; @@ -536,10 +536,10 @@ static inline pud_t __pud(pudval_t val) pudval_t ret; if (sizeof(pudval_t) > sizeof(long)) - ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud, + ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops, make_pud, val, (u64)val >> 32); else - ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud, + ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops, make_pud, val); return (pud_t) { ret }; @@ -550,10 +550,10 @@ static inline pudval_t pud_val(pud_t pud) pudval_t ret; if (sizeof(pudval_t) > sizeof(long)) - ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val, + ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops, pud_val, pud.pud, (u64)pud.pud >> 32); else - ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val, + ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops, pud_val, pud.pud); return ret; @@ -571,6 +571,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) val); } +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd) +{ + pgdval_t val = native_pgd_val(pgd); + + if (sizeof(pgdval_t) > sizeof(long)) + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp, + val, (u64)val >> 32); + else + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp, + val); +} + static inline void pgd_clear(pgd_t *pgdp) { set_pgd(pgdp, __pgd(0)); @@ -655,6 +667,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, pv_mmu_ops.set_fixmap(idx, phys, flags); } +#ifdef CONFIG_PAX_KERNEXEC +static inline unsigned long pax_open_kernel(void) +{ + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel); +} + +static inline unsigned long pax_close_kernel(void) +{ + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel); +} +#else +static inline unsigned long pax_open_kernel(void) { return 0; } +static inline unsigned long pax_close_kernel(void) { return 0; } +#endif + #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock, @@ -665,7 +692,7 @@ static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock, static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock) { - PVOP_VCALLEE1(pv_lock_ops.queued_spin_unlock, lock); + PVOP_VCALLEE1(pv_lock_ops, queued_spin_unlock, lock); } static __always_inline void pv_wait(u8 *ptr, u8 val) @@ -735,7 +762,7 @@ static __always_inline void pv_kick(int cpu) */ #define PV_THUNK_NAME(func) "__raw_callee_save_" #func #define PV_CALLEE_SAVE_REGS_THUNK(func) \ - extern typeof(func) __raw_callee_save_##func; \ + extern typeof(func) __raw_callee_save_##func __rap_hash; \ \ asm(".pushsection .text;" \ ".globl " PV_THUNK_NAME(func) ";" \ @@ -743,38 +770,42 @@ static __always_inline void pv_kick(int cpu) PV_THUNK_NAME(func) ":" \ FRAME_BEGIN \ PV_SAVE_ALL_CALLER_REGS \ - "call " #func ";" \ + PAX_DIRECT_CALL_HASH(#func, PV_THUNK_NAME(func)) ";" \ PV_RESTORE_ALL_CALLER_REGS \ FRAME_END \ - "ret;" \ + PAX_RET(PV_THUNK_NAME(func))";" \ ".popsection") /* Get a reference to a callee-save function */ -#define PV_CALLEE_SAVE(func) \ - ((struct paravirt_callee_save) { __raw_callee_save_##func }) +#define PV_CALLEE_SAVE(field, func) \ + ((union paravirt_callee_save) { .field = __raw_callee_save_##func }) +#ifdef CONFIG_PAX_RAP +#define __PV_IS_CALLEE_SAVE(field, func) PV_CALLEE_SAVE(field, func) +#else /* Promise that "func" already uses the right calling convention */ -#define __PV_IS_CALLEE_SAVE(func) \ - ((struct paravirt_callee_save) { func }) +#define __PV_IS_CALLEE_SAVE(field, func) \ + ((union paravirt_callee_save) { .field = func }) +#endif static inline notrace unsigned long arch_local_save_flags(void) { - return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl); + return PVOP_CALLEE0(unsigned long, pv_irq_ops, save_fl); } static inline notrace void arch_local_irq_restore(unsigned long f) { - PVOP_VCALLEE1(pv_irq_ops.restore_fl, f); + PVOP_VCALLEE1(pv_irq_ops, restore_fl, f); } static inline notrace void arch_local_irq_disable(void) { - PVOP_VCALLEE0(pv_irq_ops.irq_disable); + PVOP_VCALLEE0(pv_irq_ops, irq_disable); } static inline notrace void arch_local_irq_enable(void) { - PVOP_VCALLEE0(pv_irq_ops.irq_enable); + PVOP_VCALLEE0(pv_irq_ops, irq_enable); } static inline notrace unsigned long arch_local_irq_save(void) @@ -806,9 +837,9 @@ extern void default_banner(void); #else /* __ASSEMBLY__ */ -#define _PVSITE(ptype, clobbers, ops, word, algn) \ +#define _PVSITE(ptype, clobbers, word, algn, ...)\ 771:; \ - ops; \ + __VA_ARGS__; \ 772:; \ .pushsection .parainstructions,"a"; \ .align algn; \ @@ -848,8 +879,10 @@ extern void default_banner(void); COND_POP(set, CLBR_RAX, rax) #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8) -#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8) -#define PARA_INDIRECT(addr) *addr(%rip) +#define PARA_SITE(ptype, clobbers, ...) _PVSITE(ptype, clobbers, .quad, 8, __VA_ARGS__) +#define PARA_INDIRECT(addr) addr(%rip) +#define PV_INDIRECT_CALL(ops, OPS, addr) pax_indirect_call PARA_INDIRECT(pv_##ops##_ops+PV_##OPS##_##addr), pv_##ops##_ops.##addr +#define PV_INDIRECT_CALL_CALLEE_SAVE(ops, OPS, addr) pax_indirect_call PARA_INDIRECT(pv_##ops##_ops+PV_##OPS##_##addr), pv_##ops##_ops.##addr##.##addr #else #define PV_SAVE_REGS(set) \ COND_PUSH(set, CLBR_EAX, eax); \ @@ -863,30 +896,32 @@ extern void default_banner(void); COND_POP(set, CLBR_EAX, eax) #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4) -#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4) -#define PARA_INDIRECT(addr) *%cs:addr +#define PARA_SITE(ptype, clobbers, ...) _PVSITE(ptype, clobbers, .long, 4, __VA_ARGS__) +#define PARA_INDIRECT(addr) %ss:addr +#define PV_INDIRECT_CALL(ops, OPS, addr) pax_indirect_call PARA_INDIRECT(pv_##ops##_ops+PV_##OPS##_##addr), pv_##ops##_ops.##addr +#define PV_INDIRECT_CALL_CALLEE_SAVE(ops, OPS, addr) pax_indirect_call PARA_INDIRECT(pv_##ops##_ops+PV_##OPS##_##addr), pv_##ops##_ops.##addr##.##addr #endif #define INTERRUPT_RETURN \ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \ - jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret)) + jmp *PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret)) #define DISABLE_INTERRUPTS(clobbers) \ PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ - call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \ + PV_INDIRECT_CALL_CALLEE_SAVE(irq,IRQ,irq_disable); \ PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) #define ENABLE_INTERRUPTS(clobbers) \ PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \ PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ - call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \ + PV_INDIRECT_CALL_CALLEE_SAVE(irq,IRQ,irq_enable); \ PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) #ifdef CONFIG_X86_32 #define GET_CR0_INTO_EAX \ push %ecx; push %edx; \ - call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \ + PV_INDIRECT_CALL(cpu,CPU,read_cr0); \ pop %edx; pop %ecx #else /* !CONFIG_X86_32 */ @@ -907,21 +942,36 @@ extern void default_banner(void); */ #define SWAPGS \ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ - call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \ + PV_INDIRECT_CALL(cpu,CPU,swapgs) \ ) #define GET_CR2_INTO_RAX \ - call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2) + PV_INDIRECT_CALL(mmu,MMU,read_cr2) #define PARAVIRT_ADJUST_EXCEPTION_FRAME \ PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \ CLBR_NONE, \ - call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame)) + PV_INDIRECT_CALL(irq,IRQ,adjust_exception_frame)) #define USERGS_SYSRET64 \ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \ CLBR_NONE, \ - jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64)) + jmp *PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64)) + +#define GET_CR0_INTO_RDI \ + PV_INDIRECT_CALL(cpu,CPU,read_cr0); \ + mov %rax,%rdi + +#define SET_RDI_INTO_CR0 \ + PV_INDIRECT_CALL(cpu,CPU,write_cr0) + +#define GET_CR3_INTO_RDI \ + PV_INDIRECT_CALL(mmu,MMU,read_cr3); \ + mov %rax,%rdi + +#define SET_RDI_INTO_CR3 \ + PV_INDIRECT_CALL(mmu,MMU,write_cr3) + #endif /* CONFIG_X86_32 */ #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 0f400c0e4..3e5329ec3 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -39,6 +39,7 @@ #ifndef __ASSEMBLY__ +#include #include #include #include @@ -51,14 +52,29 @@ struct mm_struct; struct desc_struct; struct task_struct; struct cpumask; +struct qspinlock; /* * Wrapper type for pointers to code which uses the non-standard - * calling convention. See PV_CALL_SAVE_REGS_THUNK below. + * calling convention. See PV_CALLEE_SAVE_REGS_THUNK below. */ -struct paravirt_callee_save { - void *func; -}; +union paravirt_callee_save { + void (*queued_spin_unlock)(struct qspinlock *); + + asmlinkage unsigned long (*save_fl)(void); + void (*restore_fl)(unsigned long); + asmlinkage void (*irq_disable)(void); + asmlinkage void (*irq_enable)(void); + + pteval_t (*pte_val)(pte_t); + pte_t (*make_pte)(pteval_t); + pmdval_t (*pmd_val)(pmd_t); + pmd_t (*make_pmd)(pmdval_t); + pudval_t (*pud_val)(pud_t); + pud_t (*make_pud)(pmdval_t); + pgdval_t (*pgd_val)(pgd_t); + pgd_t (*make_pgd)(pgdval_t); +} __no_const; /* general info */ struct pv_info { @@ -83,7 +99,7 @@ struct pv_init_ops { */ unsigned (*patch)(u8 type, u16 clobber, void *insnbuf, unsigned long addr, unsigned len); -}; +} __no_const __no_randomize_layout; struct pv_lazy_ops { @@ -91,12 +107,12 @@ struct pv_lazy_ops { void (*enter)(void); void (*leave)(void); void (*flush)(void); -}; +} __no_const __no_randomize_layout; struct pv_time_ops { unsigned long long (*sched_clock)(void); unsigned long long (*steal_clock)(int cpu); -}; +} __rap_hash __no_const __no_randomize_layout; struct pv_cpu_ops { /* hooks for various privileged instructions */ @@ -177,7 +193,7 @@ struct pv_cpu_ops { void (*start_context_switch)(struct task_struct *prev); void (*end_context_switch)(struct task_struct *next); -}; +} __rap_hash __no_const __no_randomize_layout; struct pv_irq_ops { /* @@ -189,10 +205,10 @@ struct pv_irq_ops { * NOTE: These functions callers expect the callee to preserve * more registers than the standard C calling convention. */ - struct paravirt_callee_save save_fl; - struct paravirt_callee_save restore_fl; - struct paravirt_callee_save irq_disable; - struct paravirt_callee_save irq_enable; + union paravirt_callee_save save_fl; + union paravirt_callee_save restore_fl; + union paravirt_callee_save irq_disable; + union paravirt_callee_save irq_enable; void (*safe_halt)(void); void (*halt)(void); @@ -200,7 +216,7 @@ struct pv_irq_ops { #ifdef CONFIG_X86_64 void (*adjust_exception_frame)(void); #endif -}; +} __rap_hash __no_randomize_layout; struct pv_mmu_ops { unsigned long (*read_cr2)(void); @@ -259,11 +275,11 @@ struct pv_mmu_ops { void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); - struct paravirt_callee_save pte_val; - struct paravirt_callee_save make_pte; + union paravirt_callee_save pte_val; + union paravirt_callee_save make_pte; - struct paravirt_callee_save pgd_val; - struct paravirt_callee_save make_pgd; + union paravirt_callee_save pgd_val; + union paravirt_callee_save make_pgd; #if CONFIG_PGTABLE_LEVELS >= 3 #ifdef CONFIG_X86_PAE @@ -276,14 +292,15 @@ struct pv_mmu_ops { void (*set_pud)(pud_t *pudp, pud_t pudval); - struct paravirt_callee_save pmd_val; - struct paravirt_callee_save make_pmd; + union paravirt_callee_save pmd_val; + union paravirt_callee_save make_pmd; #if CONFIG_PGTABLE_LEVELS == 4 - struct paravirt_callee_save pud_val; - struct paravirt_callee_save make_pud; + union paravirt_callee_save pud_val; + union paravirt_callee_save make_pud; void (*set_pgd)(pgd_t *pudp, pgd_t pgdval); + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval); #endif /* CONFIG_PGTABLE_LEVELS == 4 */ #endif /* CONFIG_PGTABLE_LEVELS >= 3 */ @@ -295,7 +312,13 @@ struct pv_mmu_ops { an mfn. We can tell which is which from the index. */ void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, phys_addr_t phys, pgprot_t flags); -}; + +#ifdef CONFIG_PAX_KERNEXEC + unsigned long (*pax_open_kernel)(void); + unsigned long (*pax_close_kernel)(void); +#endif + +} __rap_hash __no_randomize_layout; struct arch_spinlock; #ifdef CONFIG_SMP @@ -306,15 +329,18 @@ struct qspinlock; struct pv_lock_ops { void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val); - struct paravirt_callee_save queued_spin_unlock; + union paravirt_callee_save queued_spin_unlock; void (*wait)(u8 *ptr, u8 val); void (*kick)(int cpu); -}; +} __rap_hash __no_randomize_layout; /* This contains all the paravirt structures: we get a convenient * number for each function using the offset which we use to indicate - * what to patch. */ + * what to patch. + * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c + */ + struct paravirt_patch_template { struct pv_init_ops pv_init_ops; struct pv_time_ops pv_time_ops; @@ -322,7 +348,7 @@ struct paravirt_patch_template { struct pv_irq_ops pv_irq_ops; struct pv_mmu_ops pv_mmu_ops; struct pv_lock_ops pv_lock_ops; -}; +} __no_randomize_layout; extern struct pv_info pv_info; extern struct pv_init_ops pv_init_ops; @@ -391,7 +417,7 @@ int paravirt_disable_iospace(void); * offset into the paravirt_patch_template structure, and can therefore be * freely converted back into a structure offset. */ -#define PARAVIRT_CALL "call *%c[paravirt_opptr];" +#define PARAVIRT_CALL(op) PAX_INDIRECT_CALL("*%c[paravirt_opptr]", #op) ";" /* * These macros are intended to wrap calls through one of the paravirt @@ -518,7 +544,7 @@ int paravirt_disable_iospace(void); /* since this condition will never hold */ \ if (sizeof(rettype) > sizeof(unsigned long)) { \ asm volatile(pre \ - paravirt_alt(PARAVIRT_CALL) \ + paravirt_alt(PARAVIRT_CALL(op)) \ post \ : call_clbr, "+r" (__sp) \ : paravirt_type(op), \ @@ -528,7 +554,7 @@ int paravirt_disable_iospace(void); __ret = (rettype)((((u64)__edx) << 32) | __eax); \ } else { \ asm volatile(pre \ - paravirt_alt(PARAVIRT_CALL) \ + paravirt_alt(PARAVIRT_CALL(op)) \ post \ : call_clbr, "+r" (__sp) \ : paravirt_type(op), \ @@ -544,8 +570,8 @@ int paravirt_disable_iospace(void); ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \ EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__) -#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \ - ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \ +#define __PVOP_CALLEESAVE(rettype, op, func, pre, post, ...) \ + ____PVOP_CALL(rettype, op.func.func, CLBR_RET_REG, \ PVOP_CALLEE_CLOBBERS, , \ pre, post, ##__VA_ARGS__) @@ -555,7 +581,7 @@ int paravirt_disable_iospace(void); PVOP_VCALL_ARGS; \ PVOP_TEST_NULL(op); \ asm volatile(pre \ - paravirt_alt(PARAVIRT_CALL) \ + paravirt_alt(PARAVIRT_CALL(op)) \ post \ : call_clbr, "+r" (__sp) \ : paravirt_type(op), \ @@ -569,8 +595,8 @@ int paravirt_disable_iospace(void); VEXTRA_CLOBBERS, \ pre, post, ##__VA_ARGS__) -#define __PVOP_VCALLEESAVE(op, pre, post, ...) \ - ____PVOP_VCALL(op.func, CLBR_RET_REG, \ +#define __PVOP_VCALLEESAVE(op, func, pre, post, ...) \ + ____PVOP_VCALL(op.func.func, CLBR_RET_REG, \ PVOP_VCALLEE_CLOBBERS, , \ pre, post, ##__VA_ARGS__) @@ -581,10 +607,10 @@ int paravirt_disable_iospace(void); #define PVOP_VCALL0(op) \ __PVOP_VCALL(op, "", "") -#define PVOP_CALLEE0(rettype, op) \ - __PVOP_CALLEESAVE(rettype, op, "", "") -#define PVOP_VCALLEE0(op) \ - __PVOP_VCALLEESAVE(op, "", "") +#define PVOP_CALLEE0(rettype, op, func) \ + __PVOP_CALLEESAVE(rettype, op, func, "", "") +#define PVOP_VCALLEE0(op, func) \ + __PVOP_VCALLEESAVE(op, func, "", "") #define PVOP_CALL1(rettype, op, arg1) \ @@ -592,10 +618,10 @@ int paravirt_disable_iospace(void); #define PVOP_VCALL1(op, arg1) \ __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1)) -#define PVOP_CALLEE1(rettype, op, arg1) \ - __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1)) -#define PVOP_VCALLEE1(op, arg1) \ - __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1)) +#define PVOP_CALLEE1(rettype, op, func, arg1) \ + __PVOP_CALLEESAVE(rettype, op, func, "", "", PVOP_CALL_ARG1(arg1)) +#define PVOP_VCALLEE1(op, func, arg1) \ + __PVOP_VCALLEESAVE(op, func, "", "", PVOP_CALL_ARG1(arg1)) #define PVOP_CALL2(rettype, op, arg1, arg2) \ @@ -605,11 +631,11 @@ int paravirt_disable_iospace(void); __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \ PVOP_CALL_ARG2(arg2)) -#define PVOP_CALLEE2(rettype, op, arg1, arg2) \ - __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ +#define PVOP_CALLEE2(rettype, op, func, arg1, arg2) \ + __PVOP_CALLEESAVE(rettype, op, func, "", "", PVOP_CALL_ARG1(arg1),\ PVOP_CALL_ARG2(arg2)) -#define PVOP_VCALLEE2(op, arg1, arg2) \ - __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \ +#define PVOP_VCALLEE2(op, func, arg1, arg2) \ + __PVOP_VCALLEESAVE(op, func, "", "", PVOP_CALL_ARG1(arg1), \ PVOP_CALL_ARG2(arg2)) diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 84f58de08..610576ff7 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -493,7 +493,7 @@ do { \ bool __ret; \ typeof(pcp1) __o1 = (o1), __n1 = (n1); \ typeof(pcp2) __o2 = (o2), __n2 = (n2); \ - alternative_io("leaq %P1,%%rsi\n\tcall this_cpu_cmpxchg16b_emu\n\t", \ + alternative_io("leaq %P1,%%rsi\n\t" PAX_DIRECT_CALL("this_cpu_cmpxchg16b_emu")"\n\t", \ "cmpxchg16b " __percpu_arg(1) "\n\tsetz %0\n\t", \ X86_FEATURE_CX16, \ ASM_OUTPUT2("=a" (__ret), "+m" (pcp1), \ @@ -501,6 +501,7 @@ do { \ "b" (__n1), "c" (__n2), "a" (__o1) : "rsi"); \ __ret; \ }) +bool this_cpu_cmpxchg16b_emu(void *, void*, long, long, long, long) __rap_hash; #define raw_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double #define this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index b6d425999..da6324e9f 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) { paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); +} + +static inline void pmd_populate_user(struct mm_struct *mm, + pmd_t *pmd, pte_t *pte) +{ + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); } @@ -112,12 +119,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, #ifdef CONFIG_X86_PAE extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd); +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) +{ + pud_populate(mm, pudp, pmd); +} #else /* !CONFIG_X86_PAE */ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) { paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd))); } + +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); + set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd))); +} #endif /* CONFIG_X86_PAE */ #if CONFIG_PGTABLE_LEVELS > 3 @@ -127,6 +144,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud))); } +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) +{ + paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT); + set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud))); +} + static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) { gfp_t gfp = GFP_KERNEL_ACCOUNT; diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h index fd74a1195..35fd5afe9 100644 --- a/arch/x86/include/asm/pgtable-2level.h +++ b/arch/x86/include/asm/pgtable-2level.h @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte) static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) { + pax_open_kernel(); *pmdp = pmd; + pax_close_kernel(); } static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index cdaa58c9b..ae30f0d67 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) { + pax_open_kernel(); set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd)); + pax_close_kernel(); } static inline void native_set_pud(pud_t *pudp, pud_t pud) { + pax_open_kernel(); set_64bit((unsigned long long *)(pudp), native_pud_val(pud)); + pax_close_kernel(); } /* @@ -116,9 +120,12 @@ static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, static inline void native_pmd_clear(pmd_t *pmd) { u32 *tmp = (u32 *)pmd; + + pax_open_kernel(); *tmp = 0; smp_wmb(); *(tmp + 1) = 0; + pax_close_kernel(); } static inline void pud_clear(pud_t *pudp) diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 437feb436..a4b25705d 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -54,6 +54,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page); #ifndef __PAGETABLE_PUD_FOLDED #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd) +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd) #define pgd_clear(pgd) native_pgd_clear(pgd) #endif @@ -88,12 +89,53 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page); #define arch_end_context_switch(prev) do {} while(0) +#define pax_open_kernel() native_pax_open_kernel() +#define pax_close_kernel() native_pax_close_kernel() #endif /* CONFIG_PARAVIRT */ +#define __HAVE_ARCH_PAX_OPEN_KERNEL +#define __HAVE_ARCH_PAX_CLOSE_KERNEL + +#ifdef CONFIG_PAX_KERNEXEC +static inline unsigned long native_pax_open_kernel(void) +{ + unsigned long cr0; + + preempt_disable(); + barrier(); + cr0 = read_cr0() ^ X86_CR0_WP; + BUG_ON(cr0 & X86_CR0_WP); + write_cr0(cr0); + barrier(); + return cr0 ^ X86_CR0_WP; +} + +static inline unsigned long native_pax_close_kernel(void) +{ + unsigned long cr0; + + barrier(); + cr0 = read_cr0() ^ X86_CR0_WP; + BUG_ON(!(cr0 & X86_CR0_WP)); + write_cr0(cr0); + barrier(); + preempt_enable_no_resched(); + return cr0 ^ X86_CR0_WP; +} +#else +static inline unsigned long native_pax_open_kernel(void) { return 0; } +static inline unsigned long native_pax_close_kernel(void) { return 0; } +#endif + /* * The following only work if pte_present() is true. * Undefined behaviour if not.. */ +static inline int pte_user(pte_t pte) +{ + return pte_val(pte) & _PAGE_USER; +} + static inline int pte_dirty(pte_t pte) { return pte_flags(pte) & _PAGE_DIRTY; @@ -168,6 +210,11 @@ static inline unsigned long pud_pfn(pud_t pud) return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT; } +static inline unsigned long pgd_pfn(pgd_t pgd) +{ + return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT; +} + #define pte_page(pte) pfn_to_page(pte_pfn(pte)) static inline int pmd_large(pmd_t pte) @@ -224,9 +271,29 @@ static inline pte_t pte_wrprotect(pte_t pte) return pte_clear_flags(pte, _PAGE_RW); } +static inline pte_t pte_mkread(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_USER); +} + static inline pte_t pte_mkexec(pte_t pte) { - return pte_clear_flags(pte, _PAGE_NX); +#ifdef CONFIG_X86_PAE + if (__supported_pte_mask & _PAGE_NX) + return pte_clear_flags(pte, _PAGE_NX); + else +#endif + return pte_set_flags(pte, _PAGE_USER); +} + +static inline pte_t pte_exprotect(pte_t pte) +{ +#ifdef CONFIG_X86_PAE + if (__supported_pte_mask & _PAGE_NX) + return pte_set_flags(pte, _PAGE_NX); + else +#endif + return pte_clear_flags(pte, _PAGE_USER); } static inline pte_t pte_mkdirty(pte_t pte) @@ -431,7 +498,7 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) #define canon_pgprot(p) __pgprot(massage_pgprot(p)) -static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, +static inline int is_new_memtype_allowed(u64 paddr, u64 size, enum page_cache_mode pcm, enum page_cache_mode new_pcm) { @@ -474,6 +541,16 @@ pte_t *populate_extra_pte(unsigned long vaddr); #endif #ifndef __ASSEMBLY__ + +#ifdef CONFIG_PAX_PER_CPU_PGD +extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD]; +enum cpu_pgd_type {kernel = 0, user = 1}; +static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type) +{ + return cpu_pgd[cpu][type]; +} +#endif + #include #include #include @@ -675,7 +752,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd) * Currently stuck as a macro due to indirect forward reference to * linux/mmzone.h's __section_mem_map_addr() definition: */ -#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT) +#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT) /* to find an entry in a page-table-directory. */ static inline unsigned long pud_index(unsigned long address) @@ -690,7 +767,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) static inline int pgd_bad(pgd_t pgd) { - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE; + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE; } static inline int pgd_none(pgd_t pgd) @@ -719,7 +796,12 @@ static inline int pgd_none(pgd_t pgd) * pgd_offset() returns a (pgd_t *) * pgd_index() is used get the offset into the pgd page's array of pgd_t's; */ -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) + +#ifdef CONFIG_PAX_PER_CPU_PGD +#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address)) +#endif + /* * a shortcut which implies the use of the kernel's pgd, instead * of a process's @@ -730,6 +812,25 @@ static inline int pgd_none(pgd_t pgd) #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET) #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY) +#ifdef CONFIG_X86_32 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY +#else +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT)) + +#ifdef CONFIG_PAX_MEMORY_UDEREF +#ifdef __ASSEMBLY__ +#define pax_user_shadow_base pax_user_shadow_base(%rip) +#else +extern unsigned long pax_user_shadow_base; +extern pgdval_t clone_pgd_mask; +#endif +#else +#define pax_user_shadow_base (0UL) +#endif + +#endif + #ifndef __ASSEMBLY__ extern int direct_gbpages; @@ -901,11 +1002,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, * dst and src can be on the same page, but the range must not overlap, * and must not cross a page boundary. */ -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count) { - memcpy(dst, src, count * sizeof(pgd_t)); + pax_open_kernel(); + while (count--) + *dst++ = *src++; + pax_close_kernel(); } +#ifdef CONFIG_PAX_PER_CPU_PGD +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src); +#endif + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src); +#else +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {} +#endif + #define PTE_SHIFT ilog2(PTRS_PER_PTE) static inline int page_level_shift(enum pg_level level) { diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index b6c0b4048..3535d4751 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -25,9 +25,6 @@ struct mm_struct; struct vm_area_struct; -extern pgd_t swapper_pg_dir[1024]; -extern pgd_t initial_page_table[1024]; - static inline void pgtable_cache_init(void) { } static inline void check_pgt_cache(void) { } void paging_init(void); @@ -45,6 +42,12 @@ void paging_init(void); # include #endif +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; +extern pgd_t initial_page_table[PTRS_PER_PGD]; +#ifdef CONFIG_X86_PAE +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD]; +#endif + #if defined(CONFIG_HIGHPTE) #define pte_offset_map(dir, address) \ ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \ @@ -59,12 +62,17 @@ void paging_init(void); /* Clear a kernel PTE and flush it from the TLB */ #define kpte_clear_flush(ptep, vaddr) \ do { \ + pax_open_kernel(); \ pte_clear(&init_mm, (vaddr), (ptep)); \ + pax_close_kernel(); \ __flush_tlb_one((vaddr)); \ } while (0) #endif /* !__ASSEMBLY__ */ +#define HAVE_ARCH_UNMAPPED_AREA +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN + /* * kern_addr_valid() is (1) for FLATMEM and (0) for * SPARSEMEM and DISCONTIGMEM diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h index 9fb2f2bc8..8e18c70e2 100644 --- a/arch/x86/include/asm/pgtable_32_types.h +++ b/arch/x86/include/asm/pgtable_32_types.h @@ -8,7 +8,7 @@ */ #ifdef CONFIG_X86_PAE # include -# define PMD_SIZE (1UL << PMD_SHIFT) +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) # define PMD_MASK (~(PMD_SIZE - 1)) #else # include @@ -46,6 +46,28 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */ # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) #endif +#ifdef CONFIG_PAX_KERNEXEC +#ifndef __ASSEMBLY__ +extern unsigned char MODULES_EXEC_VADDR[]; +extern unsigned char MODULES_EXEC_END[]; + +extern unsigned char __LOAD_PHYSICAL_ADDR[]; +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR) +static inline unsigned long __intentional_overflow(-1) ktla_ktva(unsigned long addr) +{ + return addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET; + +} +static inline unsigned long __intentional_overflow(-1) ktva_ktla(unsigned long addr) +{ + return addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET; +} +#endif +#else +#define ktla_ktva(addr) (addr) +#define ktva_ktla(addr) (addr) +#endif + #define MODULES_VADDR VMALLOC_START #define MODULES_END VMALLOC_END #define MODULES_LEN (MODULES_VADDR - MODULES_END) diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 1cc82ece9..ba29fd88b 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -16,11 +16,17 @@ extern pud_t level3_kernel_pgt[512]; extern pud_t level3_ident_pgt[512]; +extern pud_t level3_vmalloc_start_pgt[4][512]; +extern pud_t level3_vmalloc_end_pgt[512]; +extern pud_t level3_vmemmap_pgt[512]; +extern pud_t level2_vmemmap_pgt[512]; extern pmd_t level2_kernel_pgt[512]; extern pmd_t level2_fixmap_pgt[512]; -extern pmd_t level2_ident_pgt[512]; -extern pte_t level1_fixmap_pgt[512]; -extern pgd_t init_level4_pgt[]; +extern pmd_t level2_ident_pgt[2][512]; +extern pte_t level1_modules_pgt[4][512]; +extern pte_t level1_fixmap_pgt[3][512]; +extern pte_t level1_vsyscall_pgt[512]; +extern pgd_t init_level4_pgt[512]; #define swapper_pg_dir init_level4_pgt @@ -62,7 +68,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) { + pax_open_kernel(); *pmdp = pmd; + pax_close_kernel(); } static inline void native_pmd_clear(pmd_t *pmd) @@ -98,7 +106,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) static inline void native_set_pud(pud_t *pudp, pud_t pud) { + pax_open_kernel(); *pudp = pud; + pax_close_kernel(); } static inline void native_pud_clear(pud_t *pud) @@ -108,6 +118,13 @@ static inline void native_pud_clear(pud_t *pud) static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) { + pax_open_kernel(); + *pgdp = pgd; + pax_close_kernel(); +} + +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd) +{ *pgdp = pgd; } diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 3a264200c..ad31bde0a 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -69,11 +69,16 @@ typedef struct { pteval_t pte; } pte_t; #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) #define MODULES_END _AC(0xffffffffff000000, UL) #define MODULES_LEN (MODULES_END - MODULES_VADDR) +#define MODULES_EXEC_VADDR MODULES_VADDR +#define MODULES_EXEC_END MODULES_END #define ESPFIX_PGD_ENTRY _AC(-2, UL) #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT) #define EFI_VA_START ( -4 * (_AC(1, UL) << 30)) #define EFI_VA_END (-68 * (_AC(1, UL) << 30)) +#define ktla_ktva(addr) (addr) +#define ktva_ktla(addr) (addr) + #define EARLY_DYNAMIC_PAGE_TABLES 64 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */ diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 8b4de22d6..eaf50b890 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -112,10 +112,14 @@ #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) +#ifdef CONFIG_PAX_SEGMEXEC +#define _PAGE_DEVMAP (_AT(pteval_t, 0)) +#else #define _PAGE_DEVMAP (_AT(u64, 1) << _PAGE_BIT_DEVMAP) #define __HAVE_ARCH_PTE_DEVMAP +#endif #else -#define _PAGE_NX (_AT(pteval_t, 0)) +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW2) #define _PAGE_DEVMAP (_AT(pteval_t, 0)) #endif @@ -176,6 +180,9 @@ enum page_cache_mode { #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ _PAGE_ACCESSED) +#define PAGE_READONLY_NOEXEC PAGE_READONLY +#define PAGE_SHARED_NOEXEC PAGE_SHARED + #define __PAGE_KERNEL_EXEC \ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL) #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX) @@ -183,7 +190,7 @@ enum page_cache_mode { #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE) -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER) #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER) #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) @@ -229,7 +236,7 @@ enum page_cache_mode { #ifdef CONFIG_X86_64 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC #else -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */ +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */ #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */ #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */ #endif @@ -271,7 +278,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd) { return native_pgd_val(pgd) & PTE_FLAGS_MASK; } +#endif + +#if CONFIG_PGTABLE_LEVELS == 3 +#include +#endif +#if CONFIG_PGTABLE_LEVELS == 2 +#include +#endif + +#ifndef __ASSEMBLY__ #if CONFIG_PGTABLE_LEVELS > 3 typedef struct { pudval_t pud; } pud_t; @@ -285,8 +302,6 @@ static inline pudval_t native_pud_val(pud_t pud) return pud.pud; } #else -#include - static inline pudval_t native_pud_val(pud_t pud) { return native_pgd_val(pud.pgd); @@ -306,8 +321,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd) return pmd.pmd; } #else -#include - static inline pmdval_t native_pmd_val(pmd_t pmd) { return native_pgd_val(pmd.pud.pgd); @@ -424,7 +437,6 @@ typedef struct page *pgtable_t; extern pteval_t __supported_pte_mask; extern void set_nx(void); -extern int nx_enabled; #define pgprot_writecombine pgprot_writecombine extern pgprot_t pgprot_writecombine(pgprot_t prot); diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h index 2c1ebeb4d..69c4605ff 100644 --- a/arch/x86/include/asm/pmem.h +++ b/arch/x86/include/asm/pmem.h @@ -38,7 +38,7 @@ static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n) * fault) we would have already reported a general protection fault * before the WARN+BUG. */ - rem = __copy_from_user_inatomic_nocache(dst, (void __user *) src, n); + rem = __copy_from_user_inatomic_nocache(dst, (void __force_user *) src, n); if (WARN(rem, "%s: fault copying %p <- %p unwritten: %d\n", __func__, dst, src, rem)) BUG(); diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index 17f218645..f39430785 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -81,7 +81,7 @@ static __always_inline void __preempt_count_sub(int val) */ static __always_inline bool __preempt_count_dec_and_test(void) { - GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e); + GEN_UNARY_RMWcc("decl", __preempt_count, -4, __percpu_arg(0), e); } /* diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 83db0eae9..137bc2cda 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -136,7 +136,7 @@ struct cpuinfo_x86 { /* Index into per_cpu list: */ u16 cpu_index; u32 microcode; -}; +} __randomize_layout; #define X86_VENDOR_INTEL 0 #define X86_VENDOR_CYRIX 1 @@ -160,7 +160,7 @@ extern __u32 cpu_caps_cleared[NCAPINTS]; extern __u32 cpu_caps_set[NCAPINTS]; #ifdef CONFIG_SMP -DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); +DECLARE_PER_CPU_READ_ONLY(struct cpuinfo_x86, cpu_info); #define cpu_data(cpu) per_cpu(cpu_info, cpu) #else #define cpu_info boot_cpu_data @@ -206,9 +206,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, : "memory"); } +/* invpcid (%rdx),%rax */ +#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02" + +#define INVPCID_SINGLE_ADDRESS 0UL +#define INVPCID_SINGLE_CONTEXT 1UL +#define INVPCID_ALL_GLOBAL 2UL +#define INVPCID_ALL_NONGLOBAL 3UL + +#define PCID_KERNEL 0UL +#define PCID_USER 1UL +#define PCID_NOFLUSH (1UL << 63) + static inline void load_cr3(pgd_t *pgdir) { - write_cr3(__pa(pgdir)); + write_cr3(__pa(pgdir) | PCID_KERNEL); } #ifdef CONFIG_X86_32 @@ -308,11 +320,9 @@ struct tss_struct { } ____cacheline_aligned; -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss); +extern struct tss_struct cpu_tss[NR_CPUS]; -#ifdef CONFIG_X86_32 DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack); -#endif /* * Save the original ist values for checking stack pointers during debugging @@ -341,6 +351,7 @@ DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible; DECLARE_INIT_PER_CPU(irq_stack_union); DECLARE_PER_CPU(char *, irq_stack_ptr); +DECLARE_PER_CPU(char *, irq_stack_ptr_lowmem); DECLARE_PER_CPU(unsigned int, irq_count); extern asmlinkage void ignore_sysret(void); #else /* X86_64 */ @@ -389,6 +400,7 @@ struct thread_struct { unsigned short ds; unsigned short fsindex; unsigned short gsindex; + unsigned short ss; #endif u32 status; /* thread synchronous flags */ @@ -405,6 +417,9 @@ struct thread_struct { unsigned long gs; #endif + /* Floating point and extended processor state */ + struct fpu fpu; + /* Save middle states of ptrace breakpoints */ struct perf_event *ptrace_bps[HBP_NUM]; /* Debug status used for traps, single steps, etc... */ @@ -426,17 +441,11 @@ struct thread_struct { unsigned io_bitmap_max; mm_segment_t addr_limit; + unsigned long lowest_stack; unsigned int sig_on_uaccess_err:1; unsigned int uaccess_err:1; /* uaccess failed */ - - /* Floating point and extended processor state */ - struct fpu fpu; - /* - * WARNING: 'fpu' is dynamically-sized. It *MUST* be at - * the end. - */ -}; +} __randomize_layout; /* * Thread-synchronous status. @@ -488,12 +497,8 @@ static inline void native_swapgs(void) static inline unsigned long current_top_of_stack(void) { -#ifdef CONFIG_X86_64 - return this_cpu_read_stable(cpu_tss.x86_tss.sp0); -#else /* sp0 on x86_32 is special in and around vm86 mode. */ return this_cpu_read_stable(cpu_current_top_of_stack); -#endif } #ifdef CONFIG_PARAVIRT @@ -718,20 +723,30 @@ static inline void spin_lock_prefetch(const void *x) #define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \ TOP_OF_KERNEL_STACK_PADDING) +extern union fpregs_state init_fpregs_state; + #ifdef CONFIG_X86_32 /* * User space process size: 3GB (default). */ #define TASK_SIZE PAGE_OFFSET #define TASK_SIZE_MAX TASK_SIZE + +#ifdef CONFIG_PAX_SEGMEXEC +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2) +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE) +#else #define STACK_TOP TASK_SIZE -#define STACK_TOP_MAX STACK_TOP +#endif + +#define STACK_TOP_MAX TASK_SIZE #define INIT_THREAD { \ .sp0 = TOP_OF_INIT_STACK, \ .sysenter_cs = __KERNEL_CS, \ .io_bitmap_ptr = NULL, \ .addr_limit = KERNEL_DS, \ + .fpu.state = &init_fpregs_state, \ } /* @@ -744,12 +759,7 @@ static inline void spin_lock_prefetch(const void *x) * "struct pt_regs" is possible, but they may contain the * completely wrong values. */ -#define task_pt_regs(task) \ -({ \ - unsigned long __ptr = (unsigned long)task_stack_page(task); \ - __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \ - ((struct pt_regs *)__ptr) - 1; \ -}) +#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) #define KSTK_ESP(task) (task_pt_regs(task)->sp) @@ -763,13 +773,13 @@ static inline void spin_lock_prefetch(const void *x) * particular problem by preventing anything from being mapped * at the maximum canonical address. */ -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE) +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE) /* This decides where the kernel will search for a free chunk of vm * space during mmap's. */ #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ - 0xc0000000 : 0xFFFFe000) + 0xc0000000 : 0xFFFFf000) #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \ IA32_PAGE_OFFSET : TASK_SIZE_MAX) @@ -782,6 +792,7 @@ static inline void spin_lock_prefetch(const void *x) #define INIT_THREAD { \ .sp0 = TOP_OF_INIT_STACK, \ .addr_limit = KERNEL_DS, \ + .fpu.state = &init_fpregs_state, \ } #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) @@ -800,6 +811,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip, */ #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) +#ifdef CONFIG_PAX_SEGMEXEC +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3)) +#endif + #define KSTK_EIP(task) (task_pt_regs(task)->ip) /* Get/set a process' ability to use the timestamp counter instruction */ @@ -845,7 +860,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) return 0; } -extern unsigned long arch_align_stack(unsigned long sp); +#define arch_align_stack(x) ((x) & ~0xfUL) extern void free_init_pages(char *what, unsigned long begin, unsigned long end); void default_idle(void); @@ -855,6 +870,6 @@ bool xen_set_default_idle(void); #define xen_set_default_idle 0 #endif -void stop_this_cpu(void *dummy); +void stop_this_cpu(void *dummy) __noreturn; void df_debug(struct pt_regs *regs, long error_code); #endif /* _ASM_X86_PROCESSOR_H */ diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 2b5d686ea..8693ed0dd 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -118,15 +118,16 @@ static inline int v8086_mode(struct pt_regs *regs) #ifdef CONFIG_X86_64 static inline bool user_64bit_mode(struct pt_regs *regs) { + unsigned long cs = regs->cs & 0xffff; #ifndef CONFIG_PARAVIRT /* * On non-paravirt systems, this is the only long mode CPL 3 * selector. We do not allow long mode selectors in the LDT. */ - return regs->cs == __USER_CS; + return cs == __USER_CS; #else /* Headers are too twisted for this to go in paravirt.h. */ - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs; + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs; #endif } @@ -173,9 +174,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs, * Traps from the kernel do not save sp and ss. * Use the helper function to retrieve sp. */ - if (offset == offsetof(struct pt_regs, sp) && - regs->cs == __KERNEL_CS) - return kernel_stack_pointer(regs); + if (offset == offsetof(struct pt_regs, sp)) { + unsigned long cs = regs->cs & 0xffff; + if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) + return kernel_stack_pointer(regs); + } #endif return *(unsigned long *)((unsigned long)regs + offset); } diff --git a/arch/x86/include/asm/qspinlock_paravirt.h b/arch/x86/include/asm/qspinlock_paravirt.h index 9d55f9b6e..84e9a5828 100644 --- a/arch/x86/include/asm/qspinlock_paravirt.h +++ b/arch/x86/include/asm/qspinlock_paravirt.h @@ -48,15 +48,15 @@ asm (".pushsection .text;" "jne .slowpath;" "pop %rdx;" FRAME_END - "ret;" + PAX_RET(PV_UNLOCK)";" ".slowpath: " "push %rsi;" "movzbl %al,%esi;" - "call " PV_UNLOCK_SLOWPATH ";" + PAX_DIRECT_CALL(PV_UNLOCK_SLOWPATH)";" "pop %rsi;" "pop %rdx;" FRAME_END - "ret;" + PAX_RET(PV_UNLOCK)";" ".size " PV_UNLOCK ", .-" PV_UNLOCK ";" ".popsection"); diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h index 230e1903a..400b14c54 100644 --- a/arch/x86/include/asm/realmode.h +++ b/arch/x86/include/asm/realmode.h @@ -22,16 +22,14 @@ struct real_mode_header { #endif /* APM/BIOS reboot */ u32 machine_real_restart_asm; -#ifdef CONFIG_X86_64 u32 machine_real_restart_seg; -#endif }; /* This must match data at trampoline_32/64.S */ struct trampoline_header { #ifdef CONFIG_X86_32 u32 start; - u16 gdt_pad; + u16 boot_cs; u16 gdt_limit; u32 gdt_base; #else diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h index 2cb1cc253..787d52465 100644 --- a/arch/x86/include/asm/reboot.h +++ b/arch/x86/include/asm/reboot.h @@ -6,13 +6,13 @@ struct pt_regs; struct machine_ops { - void (*restart)(char *cmd); - void (*halt)(void); - void (*power_off)(void); + void (* __noreturn restart)(char *cmd); + void (* __noreturn halt)(void); + void (* __noreturn power_off)(void); void (*shutdown)(void); void (*crash_shutdown)(struct pt_regs *); - void (*emergency_restart)(void); -}; + void (* __noreturn emergency_restart)(void); +} __no_const; extern struct machine_ops machine_ops; diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h index 661dd3056..e804f848d 100644 --- a/arch/x86/include/asm/rmwcc.h +++ b/arch/x86/include/asm/rmwcc.h @@ -5,7 +5,19 @@ /* Use asm goto */ -#define __GEN_RMWcc(fullop, var, cc, ...) \ +#define __GEN_RMWcc(fullop, var, size, cc, ...) \ +do { \ + asm_volatile_goto (fullop \ + "\n\t"__PAX_REFCOUNT(size) \ + ";j" #cc " %l[cc_label]" \ + : : [counter] "m" (var), ## __VA_ARGS__ \ + : "memory", "cc", "cx" : cc_label); \ + return 0; \ +cc_label: \ + return 1; \ +} while (0) + +#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \ do { \ asm_volatile_goto (fullop "; j" #cc " %l[cc_label]" \ : : "m" (var), ## __VA_ARGS__ \ @@ -15,17 +27,34 @@ cc_label: \ return 1; \ } while (0) -#define GEN_UNARY_RMWcc(op, var, arg0, cc) \ - __GEN_RMWcc(op " " arg0, var, cc) +#define GEN_UNARY_RMWcc(op, var, size, arg0, cc) \ + __GEN_RMWcc(op " " arg0, var, size, cc) + +#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \ + __GEN_RMWcc_unchecked(op " " arg0, var, cc) -#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \ - __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val)) +#define GEN_BINARY_RMWcc(op, var, size, vcon, val, arg0, cc) \ + __GEN_RMWcc(op " %1, " arg0, var, size, cc, vcon (val)) + +#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \ + __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val)) #else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */ /* Use flags output or a set instruction */ -#define __GEN_RMWcc(fullop, var, cc, ...) \ +#define __GEN_RMWcc(fullop, var, size, cc, ...) \ +do { \ + bool c; \ + asm volatile (fullop \ + "\n\t"__PAX_REFCOUNT(size) \ + ";" CC_SET(cc) \ + : [counter] "+m" (var), CC_OUT(cc) (c) \ + : __VA_ARGS__ : "memory", "cc", "cx"); \ + return c != 0; \ +} while (0) + +#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \ do { \ bool c; \ asm volatile (fullop ";" CC_SET(cc) \ @@ -34,11 +63,17 @@ do { \ return c; \ } while (0) -#define GEN_UNARY_RMWcc(op, var, arg0, cc) \ - __GEN_RMWcc(op " " arg0, var, cc) +#define GEN_UNARY_RMWcc(op, var, size, arg0, cc) \ + __GEN_RMWcc(op " " arg0, var, size, cc) + +#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \ + __GEN_RMWcc_unchecked(op " " arg0, var, cc) + +#define GEN_BINARY_RMWcc(op, var, size, vcon, val, arg0, cc) \ + __GEN_RMWcc(op " %2, " arg0, var, size, cc, vcon (val)) -#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \ - __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val)) +#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \ + __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val)) #endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */ diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h index a34e0d4b9..5708ff739 100644 --- a/arch/x86/include/asm/rwsem.h +++ b/arch/x86/include/asm/rwsem.h @@ -64,14 +64,15 @@ static inline void __down_read(struct rw_semaphore *sem) { asm volatile("# beginning down_read\n\t" LOCK_PREFIX _ASM_INC "(%1)\n\t" + PAX_REFCOUNT_OVERFLOW(BITS_PER_LONG/8) /* adds 0x00000001 */ " jns 1f\n" " call call_rwsem_down_read_failed\n" "1:\n\t" "# ending down_read\n\t" - : "+m" (sem->count) + : [counter] "+m" (sem->count) : "a" (sem) - : "memory", "cc"); + : "memory", "cc", "cx"); } /* @@ -85,14 +86,15 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem) "1:\n\t" " mov %1,%2\n\t" " add %3,%2\n\t" + PAX_REFCOUNT_OVERFLOW(BITS_PER_LONG/8) " jle 2f\n\t" LOCK_PREFIX " cmpxchg %2,%0\n\t" " jnz 1b\n\t" "2:\n\t" "# ending __down_read_trylock\n\t" - : "+m" (sem->count), "=&a" (result), "=&r" (tmp) + : [counter] "+m" (sem->count), "=&a" (result), "=&r" (tmp) : "i" (RWSEM_ACTIVE_READ_BIAS) - : "memory", "cc"); + : "memory", "cc", "cx"); return result >= 0; } @@ -107,16 +109,17 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem) \ asm volatile("# beginning down_write\n\t" \ LOCK_PREFIX " xadd %1,(%4)\n\t" \ + PAX_REFCOUNT_OVERFLOW(BITS_PER_LONG/8)\ /* adds 0xffff0001, returns the old value */ \ " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \ /* was the active mask 0 before? */\ " jz 1f\n" \ - " call " slow_path "\n" \ + PAX_DIRECT_CALL(slow_path)"\n" \ "1:\n" \ "# ending down_write" \ - : "+m" (sem->count), "=d" (tmp), "=a" (ret), "+r" (__sp) \ + : [counter] "+m" (sem->count), "=d" (tmp), "=a" (ret), "+r" (__sp) \ : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \ - : "memory", "cc"); \ + : "memory", "cc", "cx"); \ ret; \ }) @@ -148,15 +151,16 @@ static inline bool __down_write_trylock(struct rw_semaphore *sem) " jnz 2f\n\t" " mov %1,%2\n\t" " add %4,%2\n\t" + PAX_REFCOUNT_OVERFLOW(BITS_PER_LONG/8) LOCK_PREFIX " cmpxchg %2,%0\n\t" " jnz 1b\n\t" "2:\n\t" CC_SET(e) "# ending __down_write_trylock\n\t" - : "+m" (sem->count), "=&a" (tmp0), "=&r" (tmp1), + : [counter] "+m" (sem->count), "=&a" (tmp0), "=&r" (tmp1), CC_OUT(e) (result) : "er" (RWSEM_ACTIVE_WRITE_BIAS) - : "memory"); + : "memory", "cx"); return result; } @@ -168,14 +172,15 @@ static inline void __up_read(struct rw_semaphore *sem) long tmp; asm volatile("# beginning __up_read\n\t" LOCK_PREFIX " xadd %1,(%2)\n\t" + PAX_REFCOUNT_UNDERFLOW(BITS_PER_LONG/8) /* subtracts 1, returns the old value */ " jns 1f\n\t" " call call_rwsem_wake\n" /* expects old value in %edx */ "1:\n" "# ending __up_read\n" - : "+m" (sem->count), "=d" (tmp) + : [counter] "+m" (sem->count), "=d" (tmp) : "a" (sem), "1" (-RWSEM_ACTIVE_READ_BIAS) - : "memory", "cc"); + : "memory", "cc", "cx"); } /* @@ -186,14 +191,15 @@ static inline void __up_write(struct rw_semaphore *sem) long tmp; asm volatile("# beginning __up_write\n\t" LOCK_PREFIX " xadd %1,(%2)\n\t" + PAX_REFCOUNT_UNDERFLOW(BITS_PER_LONG/8) /* subtracts 0xffff0001, returns the old value */ " jns 1f\n\t" " call call_rwsem_wake\n" /* expects old value in %edx */ "1:\n\t" "# ending __up_write\n" - : "+m" (sem->count), "=d" (tmp) + : [counter] "+m" (sem->count), "=d" (tmp) : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS) - : "memory", "cc"); + : "memory", "cc", "cx"); } /* @@ -203,6 +209,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem) { asm volatile("# beginning __downgrade_write\n\t" LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t" + PAX_REFCOUNT_OVERFLOW(BITS_PER_LONG/8) /* * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386) * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64) @@ -211,9 +218,9 @@ static inline void __downgrade_write(struct rw_semaphore *sem) " call call_rwsem_downgrade_wake\n" "1:\n\t" "# ending __downgrade_write\n" - : "+m" (sem->count) + : [counter] "+m" (sem->count) : "a" (sem), "er" (-RWSEM_WAITING_BIAS) - : "memory", "cc"); + : "memory", "cc", "cx"); } #endif /* __KERNEL__ */ diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index 1549caa09..aa9ebe12f 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h @@ -83,14 +83,20 @@ * 26 - ESPFIX small SS * 27 - per-cpu [ offset to per-cpu data area ] * 28 - stack_canary-20 [ for stack protector ] <=== cacheline #8 - * 29 - unused - * 30 - unused + * 29 - PCI BIOS CS + * 30 - PCI BIOS DS * 31 - TSS for double fault handler */ +#define GDT_ENTRY_KERNEXEC_EFI_CS (1) +#define GDT_ENTRY_KERNEXEC_EFI_DS (2) +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8) +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8) + #define GDT_ENTRY_TLS_MIN 6 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) #define GDT_ENTRY_KERNEL_CS 12 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 4 #define GDT_ENTRY_KERNEL_DS 13 #define GDT_ENTRY_DEFAULT_USER_CS 14 #define GDT_ENTRY_DEFAULT_USER_DS 15 @@ -107,6 +113,12 @@ #define GDT_ENTRY_PERCPU 27 #define GDT_ENTRY_STACK_CANARY 28 +#define GDT_ENTRY_PCIBIOS_CS 29 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8) + +#define GDT_ENTRY_PCIBIOS_DS 30 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8) + #define GDT_ENTRY_DOUBLEFAULT_TSS 31 /* @@ -119,6 +131,7 @@ */ #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8) +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8) #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8) #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8 + 3) #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8 + 3) @@ -130,7 +143,7 @@ #define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16*8) /* "Is this PNP code selector (PNP_CS32 or PNP_CS16)?" */ -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == PNP_CS32) +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16) /* data segment for BIOS: */ #define PNP_DS (GDT_ENTRY_PNPBIOS_DS*8) @@ -177,6 +190,8 @@ #define GDT_ENTRY_DEFAULT_USER_DS 5 #define GDT_ENTRY_DEFAULT_USER_CS 6 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7 + /* Needs two entries */ #define GDT_ENTRY_TSS 8 /* Needs two entries */ @@ -188,10 +203,12 @@ /* Abused to load per CPU data from limit */ #define GDT_ENTRY_PER_CPU 15 +#define GDT_ENTRY_UDEREF_KERNEL_DS 16 + /* * Number of entries in the GDT table: */ -#define GDT_ENTRIES 16 +#define GDT_ENTRIES 17 /* * Segment selector values corresponding to the above entries: @@ -201,7 +218,9 @@ */ #define __KERNEL32_CS (GDT_ENTRY_KERNEL32_CS*8) #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8) +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8) #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8) +#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8) #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8 + 3) #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8 + 3) #define __USER32_DS __USER_DS diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index ac1d5da14..6c4be50ca 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -61,6 +61,7 @@ static inline void x86_ce4100_early_setup(void) { } #ifndef _SETUP #include +#include #include /* @@ -76,7 +77,7 @@ static inline bool kaslr_enabled(void) static inline unsigned long kaslr_offset(void) { - return (unsigned long)&_text - __START_KERNEL; + return ktla_ktva((unsigned long)&_text) - __START_KERNEL; } /* diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h index db333300b..9356a6317 100644 --- a/arch/x86/include/asm/smap.h +++ b/arch/x86/include/asm/smap.h @@ -25,6 +25,18 @@ #include +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) +#define ASM_PAX_OPEN_USERLAND \ + ALTERNATIVE "", "pax_direct_call __pax_open_userland", X86_FEATURE_STRONGUDEREF + +#define ASM_PAX_CLOSE_USERLAND \ + ALTERNATIVE "", "pax_direct_call __pax_close_userland", X86_FEATURE_STRONGUDEREF + +#else +#define ASM_PAX_OPEN_USERLAND +#define ASM_PAX_CLOSE_USERLAND +#endif + #ifdef CONFIG_X86_SMAP #define ASM_CLAC \ @@ -40,10 +52,44 @@ #endif /* CONFIG_X86_SMAP */ +#define ASM_USER_ACCESS_BEGIN ASM_PAX_OPEN_USERLAND; ASM_STAC +#define ASM_USER_ACCESS_END ASM_CLAC; ASM_PAX_CLOSE_USERLAND + #else /* __ASSEMBLY__ */ #include +#define __HAVE_ARCH_PAX_OPEN_USERLAND +#define __HAVE_ARCH_PAX_CLOSE_USERLAND + +extern void __pax_open_userland(void); +static __always_inline unsigned long pax_open_userland(void) +{ + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + asm volatile(ALTERNATIVE("", PAX_DIRECT_CALL("%P[open]"), X86_FEATURE_STRONGUDEREF) + : + : [open] "i" (__pax_open_userland) + : "memory", "rax"); +#endif + + return 0; +} + +extern void __pax_close_userland(void); +static __always_inline unsigned long pax_close_userland(void) +{ + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + asm volatile(ALTERNATIVE("", PAX_DIRECT_CALL("%P[close]"), X86_FEATURE_STRONGUDEREF) + : + : [close] "i" (__pax_close_userland) + : "memory", "rax"); +#endif + + return 0; +} + #ifdef CONFIG_X86_SMAP static __always_inline void clac(void) diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 026ea82ec..31a5b193f 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -25,7 +25,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map); /* cpus sharing the last level cache: */ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id); -DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number); +DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number); static inline struct cpumask *cpu_llc_shared_mask(int cpu) { @@ -57,7 +57,7 @@ struct smp_ops { void (*send_call_func_ipi)(const struct cpumask *mask); void (*send_call_func_single_ipi)(int cpu); -}; +} __no_const; /* Globals due to paravirt */ extern void set_cpu_sibling_map(int cpu); diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index 58505f019..bff3b5b64 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h @@ -49,7 +49,7 @@ * head_32 for boot CPU and setup_per_cpu_areas() for others. */ #define GDT_STACK_CANARY_INIT \ - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18), + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17), /* * Initialize the stackprotector canary value. @@ -114,7 +114,7 @@ static inline void setup_stack_canary_segment(int cpu) static inline void load_stack_canary_segment(void) { -#ifdef CONFIG_X86_32 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF) asm volatile ("mov %0, %%gs" : : "r" (0)); #endif } diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h index 3d3e8353e..50b64b198 100644 --- a/arch/x86/include/asm/string_32.h +++ b/arch/x86/include/asm/string_32.h @@ -6,28 +6,28 @@ /* Let gcc decide whether to inline or use the out of line functions */ #define __HAVE_ARCH_STRCPY -extern char *strcpy(char *dest, const char *src); +extern char *strcpy(char *dest, const char *src) __nocapture(2); #define __HAVE_ARCH_STRNCPY -extern char *strncpy(char *dest, const char *src, size_t count); +extern char *strncpy(char *dest, const char *src, size_t count) __nocapture(2); #define __HAVE_ARCH_STRCAT -extern char *strcat(char *dest, const char *src); +extern char *strcat(char *dest, const char *src) __nocapture(2); #define __HAVE_ARCH_STRNCAT -extern char *strncat(char *dest, const char *src, size_t count); +extern char *strncat(char *dest, const char *src, size_t count) __nocapture(2); #define __HAVE_ARCH_STRCMP -extern int strcmp(const char *cs, const char *ct); +extern int strcmp(const char *cs, const char *ct) __nocapture(); #define __HAVE_ARCH_STRNCMP -extern int strncmp(const char *cs, const char *ct, size_t count); +extern int strncmp(const char *cs, const char *ct, size_t count) __nocapture(1, 2); #define __HAVE_ARCH_STRCHR -extern char *strchr(const char *s, int c); +extern char *strchr(const char *s, int c) __nocapture(-1); #define __HAVE_ARCH_STRLEN -extern size_t strlen(const char *s); +extern size_t strlen(const char *s) __nocapture(1); static __always_inline void *__memcpy(void *to, const void *from, size_t n) { @@ -197,12 +197,12 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len) #endif #define __HAVE_ARCH_MEMMOVE -void *memmove(void *dest, const void *src, size_t n); +void *memmove(void *dest, const void *src, size_t n) __nocapture(2); #define memcmp __builtin_memcmp #define __HAVE_ARCH_MEMCHR -extern void *memchr(const void *cs, int c, size_t count); +extern void *memchr(const void *cs, int c, size_t count) __nocapture(-1); static inline void *__memset_generic(void *s, char c, size_t count) { @@ -243,11 +243,11 @@ void *__constant_c_memset(void *s, unsigned long c, size_t count) /* Added by Gertjan van Wingerde to make minix and sysv module work */ #define __HAVE_ARCH_STRNLEN -extern size_t strnlen(const char *s, size_t count); +extern size_t strnlen(const char *s, size_t count) __nocapture(1); /* end of additional stuff */ #define __HAVE_ARCH_STRSTR -extern char *strstr(const char *cs, const char *ct); +extern char *strstr(const char *cs, const char *ct) __nocapture(-1, 2); /* * This looks horribly ugly, but the compiler can optimize it totally, diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h index a164862d7..a90256820 100644 --- a/arch/x86/include/asm/string_64.h +++ b/arch/x86/include/asm/string_64.h @@ -28,8 +28,8 @@ static __always_inline void *__inline_memcpy(void *to, const void *from, size_t function. */ #define __HAVE_ARCH_MEMCPY 1 -extern void *memcpy(void *to, const void *from, size_t len); -extern void *__memcpy(void *to, const void *from, size_t len); +extern void *memcpy(void *to, const void *from, size_t len) __nocapture(2); +extern void *__memcpy(void *to, const void *from, size_t len) __nocapture(2); #ifndef CONFIG_KMEMCHECK #if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4 @@ -57,14 +57,14 @@ void *memset(void *s, int c, size_t n); void *__memset(void *s, int c, size_t n); #define __HAVE_ARCH_MEMMOVE -void *memmove(void *dest, const void *src, size_t count); -void *__memmove(void *dest, const void *src, size_t count); +void *memmove(void *dest, const void *src, size_t count) __nocapture(2); +void *__memmove(void *dest, const void *src, size_t count) __nocapture(2); -int memcmp(const void *cs, const void *ct, size_t count); -size_t strlen(const char *s); -char *strcpy(char *dest, const char *src); -char *strcat(char *dest, const char *src); -int strcmp(const char *cs, const char *ct); +int memcmp(const void *cs, const void *ct, size_t count) __nocapture(1, 2); +size_t strlen(const char *s) __nocapture(1); +char *strcpy(char *dest, const char *src) __nocapture(2); +char *strcat(char *dest, const char *src) __nocapture(2); +int strcmp(const char *cs, const char *ct) __nocapture(1, 2); #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) @@ -96,7 +96,7 @@ DECLARE_STATIC_KEY_FALSE(mcsafe_key); * * Return 0 for success, -EFAULT for fail */ -static __always_inline __must_check int +static __always_inline __must_check __nocapture(2) int memcpy_mcsafe(void *dst, const void *src, size_t cnt) { #ifdef CONFIG_X86_MCE diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h index 6136a1815..585dcbd5f 100644 --- a/arch/x86/include/asm/suspend_64.h +++ b/arch/x86/include/asm/suspend_64.h @@ -44,6 +44,6 @@ struct saved_context { /* routines for saving/restoring kernel state */ extern int acpi_save_state_mem(void); extern char core_restore_code; -extern char restore_registers; +extern int restore_registers(void); #endif /* _ASM_X86_SUSPEND_64_H */ diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h index 5cb436acd..aba17e163 100644 --- a/arch/x86/include/asm/switch_to.h +++ b/arch/x86/include/asm/switch_to.h @@ -42,7 +42,9 @@ struct inactive_task_frame { unsigned long r15; unsigned long r14; unsigned long r13; +#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR unsigned long r12; +#endif #else unsigned long si; unsigned long di; diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h index 82c34ee25..940fa40cf 100644 --- a/arch/x86/include/asm/sys_ia32.h +++ b/arch/x86/include/asm/sys_ia32.h @@ -20,8 +20,8 @@ #include /* ia32/sys_ia32.c */ -asmlinkage long sys32_truncate64(const char __user *, unsigned long, unsigned long); -asmlinkage long sys32_ftruncate64(unsigned int, unsigned long, unsigned long); +asmlinkage long sys32_truncate64(const char __user *, unsigned int, unsigned int); +asmlinkage long sys32_ftruncate64(unsigned int, unsigned int, unsigned int); asmlinkage long sys32_stat64(const char __user *, struct stat64 __user *); asmlinkage long sys32_lstat64(const char __user *, struct stat64 __user *); @@ -42,7 +42,7 @@ long sys32_vm86_warning(void); asmlinkage ssize_t sys32_readahead(int, unsigned, unsigned, size_t); asmlinkage long sys32_sync_file_range(int, unsigned, unsigned, unsigned, unsigned, int); -asmlinkage long sys32_fadvise64(int, unsigned, unsigned, size_t, int); +asmlinkage long sys32_fadvise64(int, unsigned, unsigned, int, int); asmlinkage long sys32_fallocate(int, int, unsigned, unsigned, unsigned, unsigned); diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h index 91dfcafe2..8fda9d0b4 100644 --- a/arch/x86/include/asm/syscalls.h +++ b/arch/x86/include/asm/syscalls.h @@ -21,7 +21,7 @@ asmlinkage long sys_ioperm(unsigned long, unsigned long, int); asmlinkage long sys_iopl(unsigned int); /* kernel/ldt.c */ -asmlinkage int sys_modify_ldt(int, void __user *, unsigned long); +asmlinkage long sys_modify_ldt(int, void __user *, unsigned long); /* kernel/signal.c */ asmlinkage long sys_rt_sigreturn(void); @@ -34,7 +34,7 @@ asmlinkage long sys_get_thread_area(struct user_desc __user *); #ifdef CONFIG_X86_32 /* kernel/signal.c */ -asmlinkage unsigned long sys_sigreturn(void); +asmlinkage long sys_sigreturn(void); /* kernel/vm86_32.c */ struct vm86_struct; diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index ad6f5eb07..1b4909d8e 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -39,7 +39,7 @@ # define TOP_OF_KERNEL_STACK_PADDING 8 # endif #else -# define TOP_OF_KERNEL_STACK_PADDING 0 +# define TOP_OF_KERNEL_STACK_PADDING 16 #endif /* @@ -99,6 +99,7 @@ struct thread_info { #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */ #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */ #define TIF_X32 30 /* 32-bit native x86-64 binary */ +#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) @@ -121,6 +122,7 @@ struct thread_info { #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) #define _TIF_ADDR32 (1 << TIF_ADDR32) #define _TIF_X32 (1 << TIF_X32) +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID) /* * work to do in syscall_trace_enter(). Also includes TIF_NOHZ for @@ -129,12 +131,12 @@ struct thread_info { #define _TIF_WORK_SYSCALL_ENTRY \ (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \ - _TIF_NOHZ) + _TIF_NOHZ | _TIF_GRSEC_SETXID) /* work to do on any return to user space */ #define _TIF_ALLWORK_MASK \ ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \ - _TIF_NOHZ) + _TIF_NOHZ | _TIF_GRSEC_SETXID) /* flags to check in __switch_to() */ #define _TIF_WORK_CTXSW \ @@ -168,21 +170,21 @@ static inline unsigned long current_stack_pointer(void) * entirely contained by a single stack frame. * * Returns: - * 1 if within a frame - * -1 if placed across a frame boundary (or outside stack) - * 0 unable to determine (no frame pointers, etc) + * GOOD_FRAME if within a frame + * BAD_STACK if placed across a frame boundary (or outside stack) + * GOOD_STACK unable to determine (no frame pointers, etc) */ -static inline int arch_within_stack_frames(const void * const stack, - const void * const stackend, - const void *obj, unsigned long len) +static __always_inline int arch_within_stack_frames(unsigned long stack, + unsigned long stackend, + unsigned long obj, unsigned long len) { #if defined(CONFIG_FRAME_POINTER) - const void *frame = NULL; - const void *oldframe; + unsigned long frame = 0; + unsigned long oldframe; - oldframe = __builtin_frame_address(1); + oldframe = (unsigned long)__builtin_frame_address(1); if (oldframe) - frame = __builtin_frame_address(2); + frame = (unsigned long)__builtin_frame_address(2); /* * low ----------------------------------------------> high * [saved bp][saved ip][args][local vars][saved bp][saved ip] @@ -197,22 +199,16 @@ static inline int arch_within_stack_frames(const void * const stack, * the copy as invalid. */ if (obj + len <= frame) - return obj >= oldframe + 2 * sizeof(void *) ? 1 : -1; + return obj >= oldframe + 2 * sizeof(unsigned long) ? GOOD_FRAME : BAD_STACK; oldframe = frame; - frame = *(const void * const *)frame; + frame = *(unsigned long *)frame; } - return -1; + return BAD_STACK; #else - return 0; + return GOOD_STACK; #endif } -#else /* !__ASSEMBLY__ */ - -#ifdef CONFIG_X86_64 -# define cpu_current_top_of_stack (cpu_tss + TSS_sp0) -#endif - #endif #ifdef CONFIG_COMPAT diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 6fa85944a..30950f37a 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -89,7 +89,9 @@ static inline void cr4_set_bits(unsigned long mask) { unsigned long cr4; +// BUG_ON(!arch_irqs_disabled()); cr4 = this_cpu_read(cpu_tlbstate.cr4); + BUG_ON(cr4 != __read_cr4()); if ((cr4 | mask) != cr4) { cr4 |= mask; this_cpu_write(cpu_tlbstate.cr4, cr4); @@ -102,7 +104,9 @@ static inline void cr4_clear_bits(unsigned long mask) { unsigned long cr4; +// BUG_ON(!arch_irqs_disabled()); cr4 = this_cpu_read(cpu_tlbstate.cr4); + BUG_ON(cr4 != __read_cr4()); if ((cr4 & ~mask) != cr4) { cr4 &= ~mask; this_cpu_write(cpu_tlbstate.cr4, cr4); @@ -113,6 +117,7 @@ static inline void cr4_clear_bits(unsigned long mask) /* Read the CR4 shadow. */ static inline unsigned long cr4_read_shadow(void) { +// BUG_ON(!arch_irqs_disabled()); return this_cpu_read(cpu_tlbstate.cr4); } @@ -135,6 +140,25 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask) static inline void __native_flush_tlb(void) { + if (static_cpu_has(X86_FEATURE_INVPCID)) { + u64 descriptor[2]; + + descriptor[0] = PCID_KERNEL; + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_NONGLOBAL) : "memory"); + return; + } + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + if (static_cpu_has(X86_FEATURE_PCIDUDEREF)) { + unsigned int cpu = raw_get_cpu(); + + native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER); + native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL); + raw_put_cpu_no_resched(); + return; + } +#endif + /* * If current->mm == NULL then we borrow a mm which may change during a * task switch and therefore we must not be preempted while we write CR3 @@ -147,13 +171,21 @@ static inline void __native_flush_tlb(void) static inline void __native_flush_tlb_global_irq_disabled(void) { - unsigned long cr4; - - cr4 = this_cpu_read(cpu_tlbstate.cr4); - /* clear PGE */ - native_write_cr4(cr4 & ~X86_CR4_PGE); - /* write old PGE again and flush TLBs */ - native_write_cr4(cr4); + if (static_cpu_has(X86_FEATURE_INVPCID)) { + u64 descriptor[2]; + + descriptor[0] = PCID_KERNEL; + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory"); + } else { + unsigned long cr4; + + cr4 = this_cpu_read(cpu_tlbstate.cr4); + BUG_ON(cr4 != __read_cr4()); + /* clear PGE */ + native_write_cr4(cr4 & ~X86_CR4_PGE); + /* write old PGE again and flush TLBs */ + native_write_cr4(cr4); + } } static inline void __native_flush_tlb_global(void) @@ -183,6 +215,43 @@ static inline void __native_flush_tlb_global(void) static inline void __native_flush_tlb_single(unsigned long addr) { + if (static_cpu_has(X86_FEATURE_INVPCID)) { + u64 descriptor[2]; + + descriptor[0] = PCID_KERNEL; + descriptor[1] = addr; + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + if (static_cpu_has(X86_FEATURE_PCIDUDEREF)) { + if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) { + if (addr < TASK_SIZE_MAX) + descriptor[1] += pax_user_shadow_base; + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory"); + } + + descriptor[0] = PCID_USER; + descriptor[1] = addr; + } +#endif + + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory"); + return; + } + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + if (static_cpu_has(X86_FEATURE_PCIDUDEREF)) { + unsigned int cpu = raw_get_cpu(); + + native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH); + asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); + native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH); + raw_put_cpu_no_resched(); + + if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX) + addr += pax_user_shadow_base; + } +#endif + asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); } diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h index 9217ab1f5..90c91bfcd 100644 --- a/arch/x86/include/asm/trace/fpu.h +++ b/arch/x86/include/asm/trace/fpu.h @@ -25,8 +25,8 @@ DECLARE_EVENT_CLASS(x86_fpu, __entry->fpstate_active = fpu->fpstate_active; __entry->counter = fpu->counter; if (boot_cpu_has(X86_FEATURE_OSXSAVE)) { - __entry->xfeatures = fpu->state.xsave.header.xfeatures; - __entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv; + __entry->xfeatures = fpu->state->xsave.header.xfeatures; + __entry->xcomp_bv = fpu->state->xsave.header.xcomp_bv; } ), TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d counter: %d xfeatures: %llx xcomp_bv: %llx", diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index 01fd0a7f4..0ad067b45 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -10,7 +10,7 @@ #define dotraplinkage __visible asmlinkage void divide_error(void); -asmlinkage void debug(void); +asmlinkage void int1(void); asmlinkage void nmi(void); asmlinkage void int3(void); asmlinkage void xen_debug(void); @@ -38,6 +38,15 @@ asmlinkage void machine_check(void); #endif /* CONFIG_X86_MCE */ asmlinkage void simd_coprocessor_error(void); +#ifdef CONFIG_PAX_REFCOUNT +asmlinkage void refcount_error(void); +#endif + +#ifdef CONFIG_PAX_RAP +asmlinkage void rap_call_error(void); +asmlinkage void rap_ret_error(void); +#endif + #ifdef CONFIG_TRACING asmlinkage void trace_page_fault(void); #define trace_stack_segment stack_segment @@ -54,6 +63,9 @@ asmlinkage void trace_page_fault(void); #define trace_alignment_check alignment_check #define trace_simd_coprocessor_error simd_coprocessor_error #define trace_async_page_fault async_page_fault +#define trace_refcount_error refcount_error +#define trace_rap_call_error rap_call_error +#define trace_rap_ret_error rap_ret_error #endif dotraplinkage void do_divide_error(struct pt_regs *, long); @@ -107,7 +119,7 @@ extern int panic_on_unrecovered_nmi; void math_emulate(struct math_emu_info *); #ifndef CONFIG_X86_32 -asmlinkage void smp_thermal_interrupt(void); +asmlinkage void smp_thermal_interrupt(struct pt_regs *regs); asmlinkage void smp_threshold_interrupt(void); asmlinkage void smp_deferred_error_interrupt(void); #endif @@ -117,7 +129,7 @@ extern void ist_exit(struct pt_regs *regs); extern void ist_begin_non_atomic(struct pt_regs *regs); extern void ist_end_non_atomic(void); -#ifdef CONFIG_VMAP_STACK +#if defined(CONFIG_VMAP_STACK) || defined(CONFIG_GRKERNSEC_KSTACKOVERFLOW) void __noreturn handle_stack_overflow(const char *message, struct pt_regs *regs, unsigned long fault_address); @@ -145,6 +157,9 @@ enum { X86_TRAP_AC, /* 17, Alignment Check */ X86_TRAP_MC, /* 18, Machine Check */ X86_TRAP_XF, /* 19, SIMD Floating-Point Exception */ + X86_TRAP_VE, /* 20, Virtualization Exception */ + X86_TRAP_CP, /* 21, Control Protection Exception */ + X86_TRAP_SX = 30, /* 30, Security Exception */ X86_TRAP_IRET = 32, /* 32, IRET Exception */ }; diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index faf3687f1..7624c7a9f 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -8,6 +8,8 @@ #include #include #include +#include +#include #include #include #include @@ -31,7 +33,12 @@ #define get_ds() (KERNEL_DS) #define get_fs() (current->thread.addr_limit) +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) +void __set_fs(mm_segment_t x); +void set_fs(mm_segment_t x); +#else #define set_fs(x) (current->thread.addr_limit = (x)) +#endif #define segment_eq(a, b) ((a).seg == (b).seg) @@ -88,8 +95,10 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un * checks that the pointer is in the user space range - after calling * this function, memory access functions may still return -EFAULT. */ +#define access_ok_noprefault(type, addr, size) \ + likely(!__range_not_ok((addr), (size), user_addr_max())) #define access_ok(type, addr, size) \ - likely(!__range_not_ok(addr, size, user_addr_max())) + __access_ok(type, (unsigned long)(addr), (size_t)(size)) /* * These are the main single-value transfer routines. They automatically @@ -112,15 +121,27 @@ extern int __get_user_4(void); extern int __get_user_8(void); extern int __get_user_bad(void); -#define __uaccess_begin() stac() -#define __uaccess_end() clac() +#define __uaccess_begin() pax_open_userland(); stac() +#define __uaccess_end() clac(); pax_close_userland() + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) +#define __copyuser_seg "gs;" +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n" +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n" +#else +#define __copyuser_seg +#define __COPYUSER_SET_ES +#define __COPYUSER_RESTORE_ES +#endif /* - * This is a type: either unsigned long, if the argument fits into - * that type, or otherwise unsigned long long. + * This is a type: either (un)signed int, if the argument fits into + * that type, or otherwise (un)signed long long. */ #define __inttype(x) \ -__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) +__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0U), \ + __builtin_choose_expr(__type_is_unsigned(__typeof__(x)), 0ULL, 0LL),\ + __builtin_choose_expr(__type_is_unsigned(__typeof__(x)), 0U, 0))) /** * get_user: - Get a simple variable from user space. @@ -171,14 +192,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") - - #ifdef CONFIG_X86_32 #define __put_user_asm_u64(x, addr, err, errret) \ asm volatile("\n" \ - "1: movl %%eax,0(%2)\n" \ - "2: movl %%edx,4(%2)\n" \ - "3:" \ + "1: "__copyuser_seg"movl %%eax,0(%2)\n" \ + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \ + "3:\n" \ ".section .fixup,\"ax\"\n" \ "4: movl %3,%0\n" \ " jmp 3b\n" \ @@ -190,9 +209,9 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) #define __put_user_asm_ex_u64(x, addr) \ asm volatile("\n" \ - "1: movl %%eax,0(%1)\n" \ - "2: movl %%edx,4(%1)\n" \ - "3:" \ + "1: "__copyuser_seg"movl %%eax,0(%1)\n" \ + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \ + "3:\n" \ _ASM_EXTABLE_EX(1b, 2b) \ _ASM_EXTABLE_EX(2b, 3b) \ : : "A" (x), "r" (addr)) @@ -239,10 +258,10 @@ extern void __put_user_8(void); #define put_user(x, ptr) \ ({ \ int __ret_pu; \ - __typeof__(*(ptr)) __pu_val; \ + __inttype(*(ptr)) __pu_val; \ __chk_user_ptr(ptr); \ might_fault(); \ - __pu_val = x; \ + __pu_val = (__inttype(*(ptr)))(x); \ switch (sizeof(*(ptr))) { \ case 1: \ __put_user_x(1, __pu_val, ptr, __ret_pu); \ @@ -315,10 +334,9 @@ do { \ #define __get_user_asm_u64(x, ptr, retval, errret) \ ({ \ __typeof__(ptr) __ptr = (ptr); \ - asm volatile(ASM_STAC "\n" \ - "1: movl %2,%%eax\n" \ + asm volatile("1: movl %2,%%eax\n" \ "2: movl %3,%%edx\n" \ - "3: " ASM_CLAC "\n" \ + "3:\n" \ ".section .fixup,\"ax\"\n" \ "4: mov %4,%0\n" \ " xorl %%eax,%%eax\n" \ @@ -346,10 +364,10 @@ do { \ __chk_user_ptr(ptr); \ switch (size) { \ case 1: \ - __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \ + __get_user_asm(x, ptr, retval, "zbl", "k", "=r", errret);\ break; \ case 2: \ - __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \ + __get_user_asm(x, ptr, retval, "zwl", "k", "=r", errret);\ break; \ case 4: \ __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \ @@ -363,17 +381,19 @@ do { \ } while (0) #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ +do { \ asm volatile("\n" \ - "1: mov"itype" %2,%"rtype"1\n" \ + "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: mov %3,%0\n" \ - " xor"itype" %"rtype"1,%"rtype"1\n" \ + " xorl %k1,%k1\n" \ " jmp 2b\n" \ ".previous\n" \ _ASM_EXTABLE(1b, 3b) \ - : "=r" (err), ltype(x) \ - : "m" (__m(addr)), "i" (errret), "0" (err)) + : "=r" (err), ltype (x) \ + : "m" (__m(addr)), "i" (errret), "0" (err)); \ +} while (0) /* * This doesn't do __uaccess_begin/end - the exception handling @@ -384,10 +404,10 @@ do { \ __chk_user_ptr(ptr); \ switch (size) { \ case 1: \ - __get_user_asm_ex(x, ptr, "b", "b", "=q"); \ + __get_user_asm_ex(x, ptr, "zbl", "k", "=r"); \ break; \ case 2: \ - __get_user_asm_ex(x, ptr, "w", "w", "=r"); \ + __get_user_asm_ex(x, ptr, "zwl", "k", "=r"); \ break; \ case 4: \ __get_user_asm_ex(x, ptr, "l", "k", "=r"); \ @@ -401,10 +421,10 @@ do { \ } while (0) #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ - asm volatile("1: mov"itype" %1,%"rtype"0\n" \ + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\ "2:\n" \ ".section .fixup,\"ax\"\n" \ - "3:xor"itype" %"rtype"0,%"rtype"0\n" \ + "3:xorl %k0,%k0\n" \ " jmp 2b\n" \ ".previous\n" \ _ASM_EXTABLE_EX(1b, 3b) \ @@ -426,13 +446,24 @@ do { \ __uaccess_begin(); \ __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ __uaccess_end(); \ - (x) = (__force __typeof__(*(ptr)))__gu_val; \ + (x) = (__typeof__(*(ptr)))__gu_val; \ __builtin_expect(__gu_err, 0); \ }) /* FIXME: this hack is definitely wrong -AK */ struct __large_struct { unsigned long buf[100]; }; -#define __m(x) (*(struct __large_struct __user *)(x)) +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) +#define ____m(x) \ +({ \ + unsigned long ____x = (unsigned long)(x); \ + if (____x < pax_user_shadow_base) \ + ____x += pax_user_shadow_base; \ + (typeof(x))____x; \ +}) +#else +#define ____m(x) (x) +#endif +#define __m(x) (*(struct __large_struct __user *)____m(x)) /* * Tell gcc we read from memory instead of writing: this is because @@ -440,8 +471,9 @@ struct __large_struct { unsigned long buf[100]; }; * aliasing issues. */ #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ +do { \ asm volatile("\n" \ - "1: mov"itype" %"rtype"1,%2\n" \ + "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: mov %3,%0\n" \ @@ -449,10 +481,11 @@ struct __large_struct { unsigned long buf[100]; }; ".previous\n" \ _ASM_EXTABLE(1b, 3b) \ : "=r"(err) \ - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\ +} while (0) #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ - asm volatile("1: mov"itype" %"rtype"0,%1\n" \ + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\ "2:\n" \ _ASM_EXTABLE_EX(1b, 2b) \ : : ltype(x), "m" (__m(addr))) @@ -492,8 +525,12 @@ struct __large_struct { unsigned long buf[100]; }; * On error, the variable @x is set to zero. */ +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) +#define __get_user(x, ptr) get_user((x), (ptr)) +#else #define __get_user(x, ptr) \ __get_user_nocheck((x), (ptr), sizeof(*(ptr))) +#endif /** * __put_user: - Write a simple value into user space, with less checking. @@ -516,8 +553,12 @@ struct __large_struct { unsigned long buf[100]; }; * Returns zero on success, or -EFAULT on error. */ +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) +#define __put_user(x, ptr) put_user((x), (ptr)) +#else #define __put_user(x, ptr) \ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) +#endif #define __get_user_unaligned __get_user #define __put_user_unaligned __put_user @@ -535,7 +576,7 @@ struct __large_struct { unsigned long buf[100]; }; #define get_user_ex(x, ptr) do { \ unsigned long __gue_val; \ __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \ - (x) = (__force __typeof__(*(ptr)))__gue_val; \ + (x) = (__typeof__(*(ptr)))__gue_val; \ } while (0) #define put_user_try uaccess_try @@ -553,7 +594,7 @@ extern __must_check long strlen_user(const char __user *str); extern __must_check long strnlen_user(const char __user *str, long n); unsigned long __must_check clear_user(void __user *mem, unsigned long len); -unsigned long __must_check __clear_user(void __user *mem, unsigned long len); +unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2); extern void __cmpxchg_wrong_size(void) __compiletime_error("Bad argument size for cmpxchg"); @@ -561,22 +602,22 @@ extern void __cmpxchg_wrong_size(void) #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \ ({ \ int __ret = 0; \ - __typeof__(ptr) __uval = (uval); \ - __typeof__(*(ptr)) __old = (old); \ - __typeof__(*(ptr)) __new = (new); \ + __typeof__(uval) __uval = (uval); \ + __typeof__(*(uval)) __old = (old); \ + __typeof__(*(uval)) __new = (new); \ __uaccess_begin(); \ switch (size) { \ case 1: \ { \ asm volatile("\n" \ - "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \ + "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgb %4, %2\n"\ "2:\n" \ "\t.section .fixup, \"ax\"\n" \ "3:\tmov %3, %0\n" \ "\tjmp 2b\n" \ "\t.previous\n" \ _ASM_EXTABLE(1b, 3b) \ - : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ + : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\ : "i" (-EFAULT), "q" (__new), "1" (__old) \ : "memory" \ ); \ @@ -585,14 +626,14 @@ extern void __cmpxchg_wrong_size(void) case 2: \ { \ asm volatile("\n" \ - "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \ + "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgw %4, %2\n"\ "2:\n" \ "\t.section .fixup, \"ax\"\n" \ "3:\tmov %3, %0\n" \ "\tjmp 2b\n" \ "\t.previous\n" \ _ASM_EXTABLE(1b, 3b) \ - : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ + : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\ : "i" (-EFAULT), "r" (__new), "1" (__old) \ : "memory" \ ); \ @@ -601,14 +642,14 @@ extern void __cmpxchg_wrong_size(void) case 4: \ { \ asm volatile("\n" \ - "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \ + "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"\ "2:\n" \ "\t.section .fixup, \"ax\"\n" \ "3:\tmov %3, %0\n" \ "\tjmp 2b\n" \ "\t.previous\n" \ _ASM_EXTABLE(1b, 3b) \ - : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ + : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\ : "i" (-EFAULT), "r" (__new), "1" (__old) \ : "memory" \ ); \ @@ -620,14 +661,14 @@ extern void __cmpxchg_wrong_size(void) __cmpxchg_wrong_size(); \ \ asm volatile("\n" \ - "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \ + "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgq %4, %2\n"\ "2:\n" \ "\t.section .fixup, \"ax\"\n" \ "3:\tmov %3, %0\n" \ "\tjmp 2b\n" \ "\t.previous\n" \ _ASM_EXTABLE(1b, 3b) \ - : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ + : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\ : "i" (-EFAULT), "r" (__new), "1" (__old) \ : "memory" \ ); \ @@ -660,17 +701,6 @@ extern struct movsl_mask { #define ARCH_HAS_NOCACHE_UACCESS 1 -#ifdef CONFIG_X86_32 -# include -#else -# include -#endif - -unsigned long __must_check _copy_from_user(void *to, const void __user *from, - unsigned n); -unsigned long __must_check _copy_to_user(void __user *to, const void *from, - unsigned n); - extern void __compiletime_error("usercopy buffer size is too small") __bad_copy_user(void); @@ -679,22 +709,30 @@ static inline void copy_user_overflow(int size, unsigned long count) WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); } +#ifdef CONFIG_X86_32 +# include +#else +# include +#endif + static __always_inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) { - int sz = __compiletime_object_size(to); + size_t sz = __compiletime_object_size(to); might_fault(); kasan_check_write(to, n); - if (likely(sz < 0 || sz >= n)) { - check_object_size(to, n, false); - n = _copy_from_user(to, from, n); - } else if (!__builtin_constant_p(n)) - copy_user_overflow(sz, n); - else - __bad_copy_user(); + if (unlikely(sz != (size_t)-1 && sz < n)) { + if (!__builtin_constant_p(n)) + copy_user_overflow(sz, n); + else + __bad_copy_user(); + } else if (access_ok(VERIFY_READ, from, n)) + n = __copy_from_user(to, from, n); + else if ((long)n > 0) + memset(to, 0, n); return n; } @@ -702,19 +740,19 @@ copy_from_user(void *to, const void __user *from, unsigned long n) static __always_inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) { - int sz = __compiletime_object_size(from); + size_t sz = __compiletime_object_size(from); kasan_check_read(from, n); might_fault(); - if (likely(sz < 0 || sz >= n)) { - check_object_size(from, n, true); - n = _copy_to_user(to, from, n); - } else if (!__builtin_constant_p(n)) - copy_user_overflow(sz, n); - else - __bad_copy_user(); + if (unlikely(sz != (size_t)-1 && sz < n)) { + if (!__builtin_constant_p(n)) + copy_user_overflow(sz, n); + else + __bad_copy_user(); + } else if (access_ok(VERIFY_WRITE, to, n)) + n = __copy_to_user(to, from, n); return n; } diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 7d3bdd1ed..67d81f675 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h @@ -34,9 +34,12 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero * The caller should also make sure he pins the user space address * so that we don't result in page fault and sleep. */ -static __always_inline unsigned long __must_check +static __always_inline __size_overflow(3) unsigned long __must_check __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) { + if ((long)n < 0) + return n; + check_object_size(from, n, true); return __copy_to_user_ll(to, from, n); } @@ -60,12 +63,17 @@ static __always_inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n) { might_fault(); + return __copy_to_user_inatomic(to, from, n); } -static __always_inline unsigned long +static __always_inline __size_overflow(3) unsigned long __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) { + if ((long)n < 0) + return n; + + check_object_size(to, n, false); return __copy_from_user_ll_nozero(to, from, n); } @@ -96,6 +104,10 @@ static __always_inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) { might_fault(); + + if ((long)n < 0) + return n; + check_object_size(to, n, false); if (__builtin_constant_p(n)) { unsigned long ret; @@ -125,6 +137,11 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to, const void __user *from, unsigned long n) { might_fault(); + + if ((long)n < 0) + return n; + + check_object_size(to, n, false); if (__builtin_constant_p(n)) { unsigned long ret; @@ -153,7 +170,11 @@ static __always_inline unsigned long __copy_from_user_inatomic_nocache(void *to, const void __user *from, unsigned long n) { - return __copy_from_user_ll_nocache_nozero(to, from, n); + if ((long)n < 0) + return n; + + check_object_size(to, n, false); + return __copy_from_user_ll_nocache_nozero(to, from, n); } #endif /* _ASM_X86_UACCESS_32_H */ diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 673059a10..0ec831089 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -11,6 +11,7 @@ #include #include #include +#include /* * Copy To/From Userspace @@ -18,14 +19,14 @@ /* Handles exceptions in both to and from, but doesn't do access_ok */ __must_check unsigned long -copy_user_enhanced_fast_string(void *to, const void *from, unsigned len); +copy_user_enhanced_fast_string(void *to, const void *from, unsigned long len); __must_check unsigned long -copy_user_generic_string(void *to, const void *from, unsigned len); +copy_user_generic_string(void *to, const void *from, unsigned long len); __must_check unsigned long -copy_user_generic_unrolled(void *to, const void *from, unsigned len); +copy_user_generic_unrolled(void *to, const void *from, unsigned long len); -static __always_inline __must_check unsigned long -copy_user_generic(void *to, const void *from, unsigned len) +static __always_inline __must_check __size_overflow(3) unsigned long +copy_user_generic(void *to, const void *from, unsigned long len) { unsigned ret; @@ -47,68 +48,86 @@ copy_user_generic(void *to, const void *from, unsigned len) } __must_check unsigned long -copy_in_user(void __user *to, const void __user *from, unsigned len); +copy_in_user(void __user *to, const void __user *from, unsigned long len); static __always_inline __must_check -int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size) +unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size) { - int ret = 0; + size_t sz = __compiletime_object_size(dst); + unsigned ret = 0; + + if (size > INT_MAX) + return size; check_object_size(dst, size, false); + +#ifdef CONFIG_PAX_MEMORY_UDEREF + if (!access_ok_noprefault(VERIFY_READ, src, size)) + return size; +#endif + + if (unlikely(sz != (size_t)-1 && sz < size)) { + if(__builtin_constant_p(size)) + __bad_copy_user(); + else + copy_user_overflow(sz, size); + return size; + } + if (!__builtin_constant_p(size)) - return copy_user_generic(dst, (__force void *)src, size); + return copy_user_generic(dst, (__force_kernel const void *)____m(src), size); switch (size) { case 1: __uaccess_begin(); - __get_user_asm(*(u8 *)dst, (u8 __user *)src, + __get_user_asm(*(u8 *)dst, (const u8 __user *)src, ret, "b", "b", "=q", 1); __uaccess_end(); return ret; case 2: __uaccess_begin(); - __get_user_asm(*(u16 *)dst, (u16 __user *)src, + __get_user_asm(*(u16 *)dst, (const u16 __user *)src, ret, "w", "w", "=r", 2); __uaccess_end(); return ret; case 4: __uaccess_begin(); - __get_user_asm(*(u32 *)dst, (u32 __user *)src, + __get_user_asm(*(u32 *)dst, (const u32 __user *)src, ret, "l", "k", "=r", 4); __uaccess_end(); return ret; case 8: __uaccess_begin(); - __get_user_asm(*(u64 *)dst, (u64 __user *)src, + __get_user_asm(*(u64 *)dst, (const u64 __user *)src, ret, "q", "", "=r", 8); __uaccess_end(); return ret; case 10: __uaccess_begin(); - __get_user_asm(*(u64 *)dst, (u64 __user *)src, + __get_user_asm(*(u64 *)dst, (const u64 __user *)src, ret, "q", "", "=r", 10); if (likely(!ret)) __get_user_asm(*(u16 *)(8 + (char *)dst), - (u16 __user *)(8 + (char __user *)src), + (const u16 __user *)(8 + (const char __user *)src), ret, "w", "w", "=r", 2); __uaccess_end(); return ret; case 16: __uaccess_begin(); - __get_user_asm(*(u64 *)dst, (u64 __user *)src, + __get_user_asm(*(u64 *)dst, (const u64 __user *)src, ret, "q", "", "=r", 16); if (likely(!ret)) __get_user_asm(*(u64 *)(8 + (char *)dst), - (u64 __user *)(8 + (char __user *)src), + (const u64 __user *)(8 + (const char __user *)src), ret, "q", "", "=r", 8); __uaccess_end(); return ret; default: - return copy_user_generic(dst, (__force void *)src, size); + return copy_user_generic(dst, (__force_kernel const void *)____m(src), size); } } static __always_inline __must_check -int __copy_from_user(void *dst, const void __user *src, unsigned size) +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size) { might_fault(); kasan_check_write(dst, size); @@ -116,67 +135,85 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size) } static __always_inline __must_check -int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size) +unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size) { - int ret = 0; + size_t sz = __compiletime_object_size(src); + unsigned ret = 0; + + if (size > INT_MAX) + return size; check_object_size(src, size, true); + +#ifdef CONFIG_PAX_MEMORY_UDEREF + if (!access_ok_noprefault(VERIFY_WRITE, dst, size)) + return size; +#endif + + if (unlikely(sz != (size_t)-1 && sz < size)) { + if(__builtin_constant_p(size)) + __bad_copy_user(); + else + copy_user_overflow(sz, size); + return size; + } + if (!__builtin_constant_p(size)) - return copy_user_generic((__force void *)dst, src, size); + return copy_user_generic((__force_kernel void *)____m(dst), src, size); switch (size) { case 1: __uaccess_begin(); - __put_user_asm(*(u8 *)src, (u8 __user *)dst, + __put_user_asm(*(const u8 *)src, (u8 __user *)dst, ret, "b", "b", "iq", 1); __uaccess_end(); return ret; case 2: __uaccess_begin(); - __put_user_asm(*(u16 *)src, (u16 __user *)dst, + __put_user_asm(*(const u16 *)src, (u16 __user *)dst, ret, "w", "w", "ir", 2); __uaccess_end(); return ret; case 4: __uaccess_begin(); - __put_user_asm(*(u32 *)src, (u32 __user *)dst, + __put_user_asm(*(const u32 *)src, (u32 __user *)dst, ret, "l", "k", "ir", 4); __uaccess_end(); return ret; case 8: __uaccess_begin(); - __put_user_asm(*(u64 *)src, (u64 __user *)dst, + __put_user_asm(*(const u64 *)src, (u64 __user *)dst, ret, "q", "", "er", 8); __uaccess_end(); return ret; case 10: __uaccess_begin(); - __put_user_asm(*(u64 *)src, (u64 __user *)dst, + __put_user_asm(*(const u64 *)src, (u64 __user *)dst, ret, "q", "", "er", 10); if (likely(!ret)) { asm("":::"memory"); - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst, + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst, ret, "w", "w", "ir", 2); } __uaccess_end(); return ret; case 16: __uaccess_begin(); - __put_user_asm(*(u64 *)src, (u64 __user *)dst, + __put_user_asm(*(const u64 *)src, (u64 __user *)dst, ret, "q", "", "er", 16); if (likely(!ret)) { asm("":::"memory"); - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst, + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst, ret, "q", "", "er", 8); } __uaccess_end(); return ret; default: - return copy_user_generic((__force void *)dst, src, size); + return copy_user_generic((__force_kernel void *)____m(dst), src, size); } } static __always_inline __must_check -int __copy_to_user(void __user *dst, const void *src, unsigned size) +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size) { might_fault(); kasan_check_read(src, size); @@ -184,19 +221,30 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size) } static __always_inline __must_check -int __copy_in_user(void __user *dst, const void __user *src, unsigned size) +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size) { - int ret = 0; + unsigned ret = 0; might_fault(); + + if (size > INT_MAX) + return size; + +#ifdef CONFIG_PAX_MEMORY_UDEREF + if (!access_ok_noprefault(VERIFY_READ, src, size)) + return size; + if (!access_ok_noprefault(VERIFY_WRITE, dst, size)) + return size; +#endif + if (!__builtin_constant_p(size)) - return copy_user_generic((__force void *)dst, - (__force void *)src, size); + return copy_user_generic((__force_kernel void *)____m(dst), + (__force_kernel const void *)____m(src), size); switch (size) { case 1: { u8 tmp; __uaccess_begin(); - __get_user_asm(tmp, (u8 __user *)src, + __get_user_asm(tmp, (const u8 __user *)src, ret, "b", "b", "=q", 1); if (likely(!ret)) __put_user_asm(tmp, (u8 __user *)dst, @@ -207,7 +255,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) case 2: { u16 tmp; __uaccess_begin(); - __get_user_asm(tmp, (u16 __user *)src, + __get_user_asm(tmp, (const u16 __user *)src, ret, "w", "w", "=r", 2); if (likely(!ret)) __put_user_asm(tmp, (u16 __user *)dst, @@ -219,7 +267,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) case 4: { u32 tmp; __uaccess_begin(); - __get_user_asm(tmp, (u32 __user *)src, + __get_user_asm(tmp, (const u32 __user *)src, ret, "l", "k", "=r", 4); if (likely(!ret)) __put_user_asm(tmp, (u32 __user *)dst, @@ -230,7 +278,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) case 8: { u64 tmp; __uaccess_begin(); - __get_user_asm(tmp, (u64 __user *)src, + __get_user_asm(tmp, (const u64 __user *)src, ret, "q", "", "=r", 8); if (likely(!ret)) __put_user_asm(tmp, (u64 __user *)dst, @@ -239,45 +287,67 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) return ret; } default: - return copy_user_generic((__force void *)dst, - (__force void *)src, size); + return copy_user_generic((__force_kernel void *)____m(dst), + (__force_kernel const void *)____m(src), size); } } -static __must_check __always_inline int -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) +static __must_check __always_inline unsigned long +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size) { kasan_check_write(dst, size); return __copy_from_user_nocheck(dst, src, size); } -static __must_check __always_inline int -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) +static __must_check __always_inline unsigned long +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size) { kasan_check_read(src, size); return __copy_to_user_nocheck(dst, src, size); } -extern long __copy_user_nocache(void *dst, const void __user *src, - unsigned size, int zerorest); +extern unsigned long __copy_user_nocache(void *dst, const void __user *src, + unsigned long size, int zerorest) __size_overflow(3); -static inline int -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size) +static inline unsigned long +__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size) { might_fault(); kasan_check_write(dst, size); + + if (size > INT_MAX) + return size; + + check_object_size(dst, size, false); + +#ifdef CONFIG_PAX_MEMORY_UDEREF + if (!access_ok_noprefault(VERIFY_READ, src, size)) + return size; +#endif + return __copy_user_nocache(dst, src, size, 1); } -static inline int +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src, - unsigned size) + unsigned long size) { kasan_check_write(dst, size); + + if (size > INT_MAX) + return size; + + check_object_size(dst, size, false); + +#ifdef CONFIG_PAX_MEMORY_UDEREF + if (!access_ok_noprefault(VERIFY_READ, src, size)) + return size; +#endif + return __copy_user_nocache(dst, src, size, 0); } unsigned long -copy_user_handle_tail(char *to, char *from, unsigned len); +copy_user_handle_tail(void __user *to, const void __user *from, unsigned long len) __size_overflow(3); #endif /* _ASM_X86_UACCESS_64_H */ diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h index 5b2389815..77fdd78ba 100644 --- a/arch/x86/include/asm/word-at-a-time.h +++ b/arch/x86/include/asm/word-at-a-time.h @@ -11,7 +11,7 @@ * and shift, for example. */ struct word_at_a_time { - const unsigned long one_bits, high_bits; + unsigned long one_bits, high_bits; }; #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 6ba793178..dc843cd7d 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -126,7 +126,7 @@ struct x86_init_ops { struct x86_init_timers timers; struct x86_init_iommu iommu; struct x86_init_pci pci; -}; +} __no_const; /** * struct x86_cpuinit_ops - platform specific cpu hotplug setups @@ -137,7 +137,7 @@ struct x86_cpuinit_ops { void (*setup_percpu_clockev)(void); void (*early_percpu_clock_init)(void); void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node); -}; +} __no_const; struct timespec; @@ -225,12 +225,12 @@ struct x86_msi_ops { void (*teardown_msi_irq)(unsigned int irq); void (*teardown_msi_irqs)(struct pci_dev *dev); void (*restore_msi_irqs)(struct pci_dev *dev); -}; +} __no_const; struct x86_io_apic_ops { unsigned int (*read) (unsigned int apic, unsigned int reg); void (*disable)(void); -}; +} __no_const; extern struct x86_init_ops x86_init; extern struct x86_cpuinit_ops x86_cpuinit; diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index f5fb840b4..e45184eb3 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h @@ -82,7 +82,7 @@ static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val) * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special * cases needing an extended handling. */ -static inline unsigned long __pfn_to_mfn(unsigned long pfn) +static inline unsigned long __intentional_overflow(-1) __pfn_to_mfn(unsigned long pfn) { unsigned long mfn; diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h index 9dafe59cf..0293c1d70 100644 --- a/arch/x86/include/uapi/asm/e820.h +++ b/arch/x86/include/uapi/asm/e820.h @@ -69,7 +69,7 @@ struct e820map { #define ISA_START_ADDRESS 0xa0000 #define ISA_END_ADDRESS 0x100000 -#define BIOS_BEGIN 0x000a0000 +#define BIOS_BEGIN 0x000c0000 #define BIOS_END 0x00100000 #define BIOS_ROM_BASE 0xffe00000 diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 79076d75b..76acc7071 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -46,6 +46,7 @@ obj-$(CONFIG_MODIFY_LDT_SYSCALL) += ldt.o obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o obj-$(CONFIG_IRQ_WORK) += irq_work.o obj-y += probe_roms.o +obj-$(CONFIG_X86_32) += sys_i386_32.o obj-$(CONFIG_X86_64) += sys_x86_64.o mcount_64.o obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o obj-$(CONFIG_SYSFS) += ksysfs.o diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 931ced8ca..6268c6269 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -1345,7 +1345,7 @@ static void __init acpi_reduced_hw_init(void) * If your system is blacklisted here, but you find that acpi=force * works for you, please contact linux-acpi@vger.kernel.org */ -static struct dmi_system_id __initdata acpi_dmi_table[] = { +static const struct dmi_system_id __initconst acpi_dmi_table[] = { /* * Boxes that need ACPI disabled */ @@ -1420,7 +1420,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = { }; /* second table for DMI checks that should run after early-quirks */ -static struct dmi_system_id __initdata acpi_dmi_table_late[] = { +static const struct dmi_system_id __initconst acpi_dmi_table_late[] = { /* * HP laptops which use a DSDT reporting as HP/SB400/10000, * which includes some code which overrides all temperature diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 48587335e..3353d988d 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -100,8 +100,12 @@ int x86_acpi_suspend_lowlevel(void) #else /* CONFIG_64BIT */ #ifdef CONFIG_SMP initial_stack = (unsigned long)temp_stack + sizeof(temp_stack); + + pax_open_kernel(); early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(smp_processor_id()); + pax_close_kernel(); + initial_gs = per_cpu_offset(smp_processor_id()); #endif initial_code = (unsigned long)wakeup_long64; diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S index 0c26b1b44..dcc0a4f01 100644 --- a/arch/x86/kernel/acpi/wakeup_32.S +++ b/arch/x86/kernel/acpi/wakeup_32.S @@ -2,6 +2,7 @@ #include #include #include +#include # Copyright 2003, 2008 Pavel Machek , distribute under GPLv2 @@ -31,13 +32,11 @@ wakeup_pmode_return: # and restore the stack ... but you need gdt for this to work movl saved_context_esp, %esp - movl %cs:saved_magic, %eax - cmpl $0x12345678, %eax + cmpl $0x12345678, saved_magic jne bogus_magic # jump to place where we left off - movl saved_eip, %eax - jmp *%eax + jmp *(saved_eip) bogus_magic: jmp bogus_magic @@ -59,7 +58,7 @@ save_registers: popl saved_context_eflags movl $ret_point, saved_eip - ret + pax_ret save_registers restore_registers: @@ -69,13 +68,14 @@ restore_registers: movl saved_context_edi, %edi pushl saved_context_eflags popfl - ret + ASM_CLAC + pax_ret restore_registers ENTRY(do_suspend_lowlevel) - call save_processor_state - call save_registers + pax_direct_call save_processor_state + pax_direct_call save_registers pushl $3 - call x86_acpi_enter_sleep_state + pax_direct_call x86_acpi_enter_sleep_state addl $4, %esp # In case of S3 failure, we'll emerge here. Jump @@ -83,9 +83,9 @@ ENTRY(do_suspend_lowlevel) jmp ret_point .p2align 4,,7 ret_point: - call restore_registers - call restore_processor_state - ret + pax_direct_call restore_registers + pax_direct_call restore_processor_state + pax_ret do_suspend_lowlevel .data ALIGN diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S index 169963f47..55a72f5c2 100644 --- a/arch/x86/kernel/acpi/wakeup_64.S +++ b/arch/x86/kernel/acpi/wakeup_64.S @@ -6,6 +6,7 @@ #include #include #include +#include # Copyright 2003 Pavel Machek , distribute under GPLv2 @@ -43,7 +44,7 @@ ENTRY(do_suspend_lowlevel) FRAME_BEGIN subq $8, %rsp xorl %eax, %eax - call save_processor_state + pax_direct_call save_processor_state movq $saved_context, %rax movq %rsp, pt_regs_sp(%rax) @@ -75,7 +76,7 @@ ENTRY(do_suspend_lowlevel) addq $8, %rsp movl $3, %edi xorl %eax, %eax - call x86_acpi_enter_sleep_state + pax_direct_call x86_acpi_enter_sleep_state /* in case something went wrong, restore the machine status and go on */ jmp .Lresume_point @@ -93,6 +94,7 @@ ENTRY(do_suspend_lowlevel) movq %rbx, %cr0 pushq pt_regs_flags(%rax) popfq + ASM_CLAC movq pt_regs_sp(%rax), %rsp movq pt_regs_bp(%rax), %rbp movq pt_regs_si(%rax), %rsi diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 5cb272a7a..5b3aa2017 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -21,6 +21,7 @@ #include #include #include +#include int __read_mostly alternatives_patched; @@ -262,7 +263,9 @@ static void __init_or_module add_nops(void *insns, unsigned int len) unsigned int noplen = len; if (noplen > ASM_NOP_MAX) noplen = ASM_NOP_MAX; + pax_open_kernel(); memcpy(insns, ideal_nops[noplen], noplen); + pax_close_kernel(); insns += noplen; len -= noplen; } @@ -290,6 +293,13 @@ recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf) if (a->replacementlen != 5) return; +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) + if (orig_insn < (u8 *)_text || (u8 *)_einittext <= orig_insn) + orig_insn = (u8 *)ktva_ktla((unsigned long)orig_insn); + else + orig_insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; +#endif + o_dspl = *(s32 *)(insnbuf + 1); /* next_rip of the replacement JMP */ @@ -365,6 +375,7 @@ void __init_or_module apply_alternatives(struct alt_instr *start, { struct alt_instr *a; u8 *instr, *replacement; + u8 *vinstr, *vreplacement; u8 insnbuf[MAX_PATCH_LEN]; DPRINTK("alt table %p -> %p", start, end); @@ -380,46 +391,81 @@ void __init_or_module apply_alternatives(struct alt_instr *start, for (a = start; a < end; a++) { int insnbuf_sz = 0; - instr = (u8 *)&a->instr_offset + a->instr_offset; - replacement = (u8 *)&a->repl_offset + a->repl_offset; + vinstr = instr = (u8 *)&a->instr_offset + a->instr_offset; + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) + if ((u8 *)_text - (____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR) <= instr && + instr < (u8 *)_einittext - (____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR)) { + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; + vinstr = (u8 *)ktla_ktva((unsigned long)instr); + } else if ((u8 *)_text <= instr && instr < (u8 *)_einittext) { + vinstr = (u8 *)ktla_ktva((unsigned long)instr); + } else { + instr = (u8 *)ktva_ktla((unsigned long)instr); + } +#endif + + vreplacement = replacement = (u8 *)&a->repl_offset + a->repl_offset; + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) + if ((u8 *)_text - (____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR) <= replacement && + replacement < (u8 *)_einittext - (____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR)) { + replacement += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; + vreplacement = (u8 *)ktla_ktva((unsigned long)replacement); + } else if ((u8 *)_text <= replacement && replacement < (u8 *)_einittext) { + vreplacement = (u8 *)ktla_ktva((unsigned long)replacement); + } else + replacement = (u8 *)ktva_ktla((unsigned long)replacement); +#endif + BUG_ON(a->instrlen > sizeof(insnbuf)); BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32); if (!boot_cpu_has(a->cpuid)) { if (a->padlen > 1) - optimize_nops(a, instr); + optimize_nops(a, vinstr); continue; } - DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d), pad: %d", + DPRINTK("feat: %d*32+%d, old: (%p/%p, len: %d), repl: (%p, len: %d), pad: %d", a->cpuid >> 5, a->cpuid & 0x1f, - instr, a->instrlen, - replacement, a->replacementlen, a->padlen); + instr, vinstr, a->instrlen, + vreplacement, a->replacementlen, a->padlen); - DUMP_BYTES(instr, a->instrlen, "%p: old_insn: ", instr); - DUMP_BYTES(replacement, a->replacementlen, "%p: rpl_insn: ", replacement); + DUMP_BYTES(vinstr, a->instrlen, "%p: old_insn: ", vinstr); + DUMP_BYTES(vreplacement, a->replacementlen, "%p: rpl_insn: ", vreplacement); - memcpy(insnbuf, replacement, a->replacementlen); + memcpy(insnbuf, vreplacement, a->replacementlen); insnbuf_sz = a->replacementlen; - /* 0xe8 is a relative jump; fix the offset. */ + /* 0xe8 is a call; fix the relative offset. */ if (*insnbuf == 0xe8 && a->replacementlen == 5) { - *(s32 *)(insnbuf + 1) += replacement - instr; + *(s32 *)(insnbuf + 1) += vreplacement - instr; DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx", *(s32 *)(insnbuf + 1), - (unsigned long)instr + *(s32 *)(insnbuf + 1) + 5); + (unsigned long)vinstr + *(s32 *)(insnbuf + 1) + 5); } - if (a->replacementlen && is_jmp(replacement[0])) - recompute_jump(a, instr, replacement, insnbuf); +#ifdef CONFIG_PAX_RAP + /* 0xeb ... 0xe8 is a rap_call; fix the relative offset. */ + if (*insnbuf == 0xeb && a->replacementlen == 5 + 3 + 8 + 2 && insnbuf[a->replacementlen - 5] == 0xe8) { + *(s32 *)(insnbuf + a->replacementlen - 4) += vreplacement - instr; + DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx", + *(s32 *)(insnbuf + a->replacementlen - 4), + (unsigned long)vinstr + *(s32 *)(insnbuf + a->replacementlen - 4) + 5); + } +#endif + + if (a->replacementlen && is_jmp(vreplacement[0])) + recompute_jump(a, instr, vreplacement, insnbuf); if (a->instrlen > a->replacementlen) { add_nops(insnbuf + a->replacementlen, a->instrlen - a->replacementlen); insnbuf_sz += a->instrlen - a->replacementlen; } - DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", instr); + DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", vinstr); text_poke_early(instr, insnbuf, insnbuf_sz); } @@ -435,10 +481,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end, for (poff = start; poff < end; poff++) { u8 *ptr = (u8 *)poff + *poff; +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr) + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; +#endif + if (!*poff || ptr < text || ptr >= text_end) continue; /* turn DS segment override prefix into lock prefix */ - if (*ptr == 0x3e) + if (*(u8 *)ktla_ktva((unsigned long)ptr) == 0x3e) text_poke(ptr, ((unsigned char []){0xf0}), 1); } mutex_unlock(&text_mutex); @@ -453,10 +505,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end, for (poff = start; poff < end; poff++) { u8 *ptr = (u8 *)poff + *poff; +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr) + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; +#endif + if (!*poff || ptr < text || ptr >= text_end) continue; /* turn lock prefix into DS segment override prefix */ - if (*ptr == 0xf0) + if (*(u8 *)ktla_ktva((unsigned long)ptr) == 0xf0) text_poke(ptr, ((unsigned char []){0x3E}), 1); } mutex_unlock(&text_mutex); @@ -545,7 +603,9 @@ void alternatives_enable_smp(void) if (uniproc_patched) { pr_info("switching to SMP code\n"); BUG_ON(num_online_cpus() != 1); + pax_open_kernel(); clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); + pax_close_kernel(); clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP); list_for_each_entry(mod, &smp_alt_modules, next) alternatives_smp_lock(mod->locks, mod->locks_end, @@ -593,7 +653,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start, BUG_ON(p->len > MAX_PATCH_LEN); /* prep the buffer with the original instructions */ - memcpy(insnbuf, p->instr, p->len); + memcpy(insnbuf, (const void *)ktla_ktva((unsigned long)p->instr), p->len); used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf, (unsigned long)p->instr, p->len); @@ -640,7 +700,7 @@ void __init alternative_instructions(void) if (!uniproc_patched || num_possible_cpus() == 1) free_init_pages("SMP alternatives", (unsigned long)__smp_locks, - (unsigned long)__smp_locks_end); + PAGE_ALIGN((unsigned long)__smp_locks_end)); #endif apply_paravirt(__parainstructions, __parainstructions_end); @@ -661,13 +721,17 @@ void __init alternative_instructions(void) * instructions. And on the local CPU you need to be protected again NMI or MCE * handlers seeing an inconsistent instruction while you patch. */ -void *__init_or_module text_poke_early(void *addr, const void *opcode, +void *__kprobes text_poke_early(void *addr, const void *opcode, size_t len) { unsigned long flags; local_irq_save(flags); - memcpy(addr, opcode, len); + + pax_open_kernel(); + memcpy((void *)ktla_ktva((unsigned long)addr), opcode, len); sync_core(); + pax_close_kernel(); + local_irq_restore(flags); /* Could also do a CLFLUSH here to speed up CPU recovery; but that causes hangs on some VIA CPUs. */ @@ -689,20 +753,29 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode, */ void *text_poke(void *addr, const void *opcode, size_t len) { - unsigned long flags; - char *vaddr; + unsigned char *vaddr = (void *)ktla_ktva((unsigned long)addr); struct page *pages[2]; - int i; + size_t i; + +#ifndef CONFIG_PAX_KERNEXEC + unsigned long flags; +#endif if (!core_kernel_text((unsigned long)addr)) { - pages[0] = vmalloc_to_page(addr); - pages[1] = vmalloc_to_page(addr + PAGE_SIZE); + pages[0] = vmalloc_to_page(vaddr); + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE); } else { - pages[0] = virt_to_page(addr); + pages[0] = virt_to_page(vaddr); WARN_ON(!PageReserved(pages[0])); - pages[1] = virt_to_page(addr + PAGE_SIZE); + pages[1] = virt_to_page(vaddr + PAGE_SIZE); } BUG_ON(!pages[0]); + +#ifdef CONFIG_PAX_KERNEXEC + text_poke_early(addr, opcode, len); + for (i = 0; i < len; i++) + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]); +#else local_irq_save(flags); set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0])); if (pages[1]) @@ -719,6 +792,7 @@ void *text_poke(void *addr, const void *opcode, size_t len) for (i = 0; i < len; i++) BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]); local_irq_restore(flags); +#endif return addr; } @@ -772,7 +846,7 @@ int poke_int3_handler(struct pt_regs *regs) */ void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler) { - unsigned char int3 = 0xcc; + const unsigned char int3 = 0xcc; bp_int3_handler = handler; bp_int3_addr = (u8 *)addr + sizeof(int3); diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index f2234918e..90833448b 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -181,7 +181,7 @@ int first_system_vector = FIRST_SYSTEM_VECTOR; /* * Debug level, exported for io_apic.c */ -unsigned int apic_verbosity; +int apic_verbosity; int pic_mode; @@ -1904,7 +1904,7 @@ static void __smp_error_interrupt(struct pt_regs *regs) apic_write(APIC_ESR, 0); v = apic_read(APIC_ESR); ack_APIC_irq(); - atomic_inc(&irq_err_count); + atomic_inc_unchecked(&irq_err_count); apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x", smp_processor_id(), v); diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index d1e25564b..2f6454d1b 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -1683,7 +1683,7 @@ static unsigned int startup_ioapic_irq(struct irq_data *data) return was_pending; } -atomic_t irq_mis_count; +atomic_unchecked_t irq_mis_count; #ifdef CONFIG_GENERIC_PENDING_IRQ static bool io_apic_level_ack_pending(struct mp_chip_data *data) @@ -1822,7 +1822,7 @@ static void ioapic_ack_level(struct irq_data *irq_data) * at the cpu. */ if (!(v & (1 << (i & 0x1f)))) { - atomic_inc(&irq_mis_count); + atomic_inc_unchecked(&irq_mis_count); eoi_ioapic_pin(cfg->vector, irq_data->chip_data); } @@ -1868,7 +1868,7 @@ static int ioapic_set_affinity(struct irq_data *irq_data, return ret; } -static struct irq_chip ioapic_chip __read_mostly = { +static struct irq_chip ioapic_chip = { .name = "IO-APIC", .irq_startup = startup_ioapic_irq, .irq_mask = mask_ioapic_irq, @@ -1879,7 +1879,7 @@ static struct irq_chip ioapic_chip __read_mostly = { .flags = IRQCHIP_SKIP_SET_WAKE, }; -static struct irq_chip ioapic_ir_chip __read_mostly = { +static struct irq_chip ioapic_ir_chip = { .name = "IR-IO-APIC", .irq_startup = startup_ioapic_irq, .irq_mask = mask_ioapic_irq, @@ -1937,7 +1937,7 @@ static void ack_lapic_irq(struct irq_data *data) ack_APIC_irq(); } -static struct irq_chip lapic_chip __read_mostly = { +static struct irq_chip lapic_chip = { .name = "local-APIC", .irq_mask = mask_lapic_irq, .irq_unmask = unmask_lapic_irq, diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c index 015bbf30e..77db80464 100644 --- a/arch/x86/kernel/apic/msi.c +++ b/arch/x86/kernel/apic/msi.c @@ -269,7 +269,7 @@ static void hpet_msi_write_msg(struct irq_data *data, struct msi_msg *msg) hpet_msi_write(irq_data_get_irq_handler_data(data), msg); } -static struct irq_chip hpet_msi_controller __ro_after_init = { +static irq_chip_no_const hpet_msi_controller __ro_after_init = { .name = "HPET-MSI", .irq_unmask = hpet_msi_unmask, .irq_mask = hpet_msi_mask, @@ -315,7 +315,7 @@ static struct msi_domain_info hpet_msi_domain_info = { .chip = &hpet_msi_controller, }; -struct irq_domain *hpet_create_irq_domain(int hpet_id) +__init struct irq_domain *hpet_create_irq_domain(int hpet_id) { struct irq_domain *parent; struct irq_alloc_info info; diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 5d30c5e42..3c83cc467 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -37,6 +37,7 @@ static struct irq_chip lapic_controller; static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY]; #endif +void lock_vector_lock(void) __acquires(&vector_lock); void lock_vector_lock(void) { /* Used to the online set of cpus does not change @@ -45,6 +46,7 @@ void lock_vector_lock(void) raw_spin_lock(&vector_lock); } +void unlock_vector_lock(void) __releases(&vector_lock); void unlock_vector_lock(void) { raw_spin_unlock(&vector_lock); diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 51287cd90..24489b310 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -432,7 +432,7 @@ static DEFINE_MUTEX(apm_mutex); * This is for buggy BIOS's that refer to (real mode) segment 0x40 * even though they are called in protected mode. */ -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092, +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093, (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1); static const char driver_version[] = "1.16ac"; /* no spaces */ @@ -610,7 +610,10 @@ static long __apm_bios_call(void *_call) BUG_ON(cpu != 0); gdt = get_cpu_gdt_table(cpu); save_desc_40 = gdt[0x40 / 8]; + + pax_open_kernel(); gdt[0x40 / 8] = bad_bios_desc; + pax_close_kernel(); apm_irq_save(flags); APM_DO_SAVE_SEGS; @@ -619,7 +622,11 @@ static long __apm_bios_call(void *_call) &call->esi); APM_DO_RESTORE_SEGS; apm_irq_restore(flags); + + pax_open_kernel(); gdt[0x40 / 8] = save_desc_40; + pax_close_kernel(); + put_cpu(); return call->eax & 0xff; @@ -686,7 +693,10 @@ static long __apm_bios_call_simple(void *_call) BUG_ON(cpu != 0); gdt = get_cpu_gdt_table(cpu); save_desc_40 = gdt[0x40 / 8]; + + pax_open_kernel(); gdt[0x40 / 8] = bad_bios_desc; + pax_close_kernel(); apm_irq_save(flags); APM_DO_SAVE_SEGS; @@ -694,7 +704,11 @@ static long __apm_bios_call_simple(void *_call) &call->eax); APM_DO_RESTORE_SEGS; apm_irq_restore(flags); + + pax_open_kernel(); gdt[0x40 / 8] = save_desc_40; + pax_close_kernel(); + put_cpu(); return error; } @@ -2042,7 +2056,7 @@ static int __init swab_apm_power_in_minutes(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id __initdata apm_dmi_table[] = { +static const struct dmi_system_id __initconst apm_dmi_table[] = { { print_if_true, KERN_WARNING "IBM T23 - BIOS 1.03b+ and controller firmware 1.02+ may be needed for Linux APM.", @@ -2352,12 +2366,15 @@ static int __init apm_init(void) * code to that CPU. */ gdt = get_cpu_gdt_table(0); + + pax_open_kernel(); set_desc_base(&gdt[APM_CS >> 3], (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4)); set_desc_base(&gdt[APM_CS_16 >> 3], (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4)); set_desc_base(&gdt[APM_DS >> 3], (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4)); + pax_close_kernel(); proc_create("apm", 0, NULL, &apm_file_ops); diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index c62e015b1..2abc97cf3 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -30,6 +30,9 @@ void common(void) { BLANK(); OFFSET(TASK_threadsp, task_struct, thread.sp); +#ifdef CONFIG_PAX_RAP + OFFSET(TASK_stack, task_struct, stack); +#endif #ifdef CONFIG_CC_STACKPROTECTOR OFFSET(TASK_stack_canary, task_struct, stack_canary); #endif @@ -37,6 +40,8 @@ void common(void) { BLANK(); OFFSET(TASK_TI_flags, task_struct, thread_info.flags); OFFSET(TASK_addr_limit, task_struct, thread.addr_limit); + OFFSET(TASK_lowest_stack, task_struct, thread.lowest_stack); + OFFSET(TASK_thread_sp0, task_struct, thread.sp0); BLANK(); OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); @@ -71,8 +76,26 @@ void common(void) { OFFSET(PV_CPU_iret, pv_cpu_ops, iret); OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0); OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2); + +#ifdef CONFIG_PAX_KERNEXEC + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0); +#endif + +#ifdef CONFIG_PAX_MEMORY_UDEREF + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3); + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3); +#ifdef CONFIG_X86_64 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched); +#endif #endif +#endif + + BLANK(); + DEFINE(PAGE_SIZE_asm, PAGE_SIZE); + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT); + DEFINE(THREAD_SIZE_asm, THREAD_SIZE); + #ifdef CONFIG_XEN BLANK(); OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask); @@ -91,4 +114,5 @@ void common(void) { BLANK(); DEFINE(PTREGS_SIZE, sizeof(struct pt_regs)); + DEFINE(TSS_size, sizeof(struct tss_struct)); } diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 4a8697f7d..8a1342851 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -12,10 +12,6 @@ endif KCOV_INSTRUMENT_common.o := n KCOV_INSTRUMENT_perf_event.o := n -# Make sure load_percpu_segment has no stackprotector -nostackp := $(call cc-option, -fno-stack-protector) -CFLAGS_common.o := $(nostackp) - obj-y := intel_cacheinfo.o scattered.o topology.o obj-y += common.o obj-y += rdrand.o diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 2b4cf0423..c73b25e4f 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -808,7 +808,7 @@ static void init_amd(struct cpuinfo_x86 *c) static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) { /* AMD errata T13 (order #21922) */ - if ((c->x86 == 6)) { + if (c->x86 == 6) { /* Duron Rev A0 */ if (c->x86_model == 3 && c->x86_mask == 0) size = 64; diff --git a/arch/x86/kernel/cpu/bugs_64.c b/arch/x86/kernel/cpu/bugs_64.c index a972ac4c7..938c16341 100644 --- a/arch/x86/kernel/cpu/bugs_64.c +++ b/arch/x86/kernel/cpu/bugs_64.c @@ -10,6 +10,7 @@ #include #include #include +#include void __init check_bugs(void) { @@ -18,6 +19,7 @@ void __init check_bugs(void) pr_info("CPU: "); print_cpu_info(&boot_cpu_data); #endif + set_memory_nx((unsigned long)_sinitdata, (__START_KERNEL_map + KERNEL_IMAGE_SIZE - (unsigned long)_sinitdata) >> PAGE_SHIFT); alternative_instructions(); /* diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 4eece91ad..9eeb03bc4 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -93,60 +93,6 @@ static const struct cpu_dev default_cpu = { static const struct cpu_dev *this_cpu = &default_cpu; -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { -#ifdef CONFIG_X86_64 - /* - * We need valid kernel segments for data and code in long mode too - * IRET will check the segment types kkeil 2000/10/28 - * Also sysret mandates a special GDT layout - * - * TLS descriptors are currently at a different place compared to i386. - * Hopefully nobody expects them at a fixed place (Wine?) - */ - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), -#else - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), - /* - * Segments used for calling PnP BIOS have byte granularity. - * They code segments and data segments have fixed 64k limits, - * the transfer segment sizes are set at run time. - */ - /* 32-bit code */ - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), - /* 16-bit code */ - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), - /* 16-bit data */ - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), - /* 16-bit data */ - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), - /* 16-bit data */ - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), - /* - * The APM segments have byte granularity and their bases - * are set at run time. All have 64k limits. - */ - /* 32-bit code */ - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), - /* 16-bit code */ - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), - /* data */ - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), - - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), - GDT_STACK_CANARY_INIT -#endif -} }; -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); - static int __init x86_mpx_setup(char *s) { /* require an exact match without trailing characters */ @@ -281,6 +227,10 @@ static __always_inline void setup_smep(struct cpuinfo_x86 *c) { if (cpu_has(c, X86_FEATURE_SMEP)) cr4_set_bits(X86_CR4_SMEP); +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_NONE + else + panic("PAX: this KERNEXEC configuration requires SMEP support\n"); +#endif } static __init int setup_disable_smap(char *arg) @@ -306,6 +256,109 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c) } } +#ifdef CONFIG_PAX_MEMORY_UDEREF +#ifdef CONFIG_X86_64 +static bool uderef_enabled __read_only = true; +unsigned long pax_user_shadow_base __read_only; +EXPORT_SYMBOL(pax_user_shadow_base); +extern char patch_pax_enter_kernel_user[]; +extern char patch_pax_exit_kernel_user[]; + +static int __init setup_pax_weakuderef(char *str) +{ + if (uderef_enabled) + pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT; + return 1; +} +__setup("pax_weakuderef", setup_pax_weakuderef); +#endif + +static int __init setup_pax_nouderef(char *str) +{ +#ifdef CONFIG_X86_32 + unsigned int cpu; + struct desc_struct *gdt; + + for (cpu = 0; cpu < nr_cpu_ids; cpu++) { + gdt = get_cpu_gdt_table(cpu); + gdt[GDT_ENTRY_KERNEL_DS].type = 3; + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf; + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf; + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf; + } + loadsegment(ds, __KERNEL_DS); + loadsegment(es, __KERNEL_DS); + loadsegment(ss, __KERNEL_DS); +#else + memcpy(patch_pax_enter_kernel_user, (unsigned char []){0xc3}, 1); + memcpy(patch_pax_exit_kernel_user, (unsigned char []){0xc3}, 1); + clone_pgd_mask = ~(pgdval_t)0UL; + pax_user_shadow_base = 0UL; + setup_clear_cpu_cap(X86_FEATURE_PCIDUDEREF); + uderef_enabled = false; +#endif + + return 0; +} +early_param("pax_nouderef", setup_pax_nouderef); +#endif + +#ifdef CONFIG_X86_64 +static __init int setup_disable_pcid(char *arg) +{ + setup_clear_cpu_cap(X86_FEATURE_PCID); + setup_clear_cpu_cap(X86_FEATURE_INVPCID); + +#ifdef CONFIG_PAX_MEMORY_UDEREF + if (uderef_enabled) + pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT; +#endif + + return 1; +} +__setup("nopcid", setup_disable_pcid); + +static void setup_pcid(struct cpuinfo_x86 *c) +{ + if (cpu_has(c, X86_FEATURE_PCID)) { + printk("PAX: PCID detected\n"); + cr4_set_bits(X86_CR4_PCIDE); + } else + clear_cpu_cap(c, X86_FEATURE_INVPCID); + + if (cpu_has(c, X86_FEATURE_INVPCID)) + printk("PAX: INVPCID detected\n"); + +#ifdef CONFIG_PAX_MEMORY_UDEREF + if (!uderef_enabled) { + printk("PAX: UDEREF disabled\n"); + return; + } + + if (!cpu_has(c, X86_FEATURE_PCID)) { + pax_open_kernel(); + pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT; + pax_close_kernel(); + printk("PAX: slow and weak UDEREF enabled\n"); + return; + } + + set_cpu_cap(c, X86_FEATURE_PCIDUDEREF); + + pax_open_kernel(); + clone_pgd_mask = ~(pgdval_t)0UL; + pax_close_kernel(); + if (pax_user_shadow_base) + printk("PAX: weak UDEREF enabled\n"); + else { + set_cpu_cap(c, X86_FEATURE_STRONGUDEREF); + printk("PAX: strong UDEREF enabled\n"); + } +#endif + +} +#endif + /* * Protection Keys are not available in 32-bit mode. */ @@ -451,7 +504,7 @@ void switch_to_new_gdt(int cpu) { struct desc_ptr gdt_descr; - gdt_descr.address = (long)get_cpu_gdt_table(cpu); + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); /* Reload the per-cpu base */ @@ -973,9 +1026,11 @@ static void x86_init_cache_qos(struct cpuinfo_x86 *c) * in case CQM bits really aren't there in this CPU. */ if (c != &boot_cpu_data) { + pax_open_kernel(); boot_cpu_data.x86_cache_max_rmid = min(boot_cpu_data.x86_cache_max_rmid, c->x86_cache_max_rmid); + pax_close_kernel(); } } @@ -1064,6 +1119,20 @@ static void identify_cpu(struct cpuinfo_x86 *c) setup_smep(c); setup_smap(c); +#ifdef CONFIG_X86_32 +#ifdef CONFIG_PAX_PAGEEXEC + if (!(__supported_pte_mask & _PAGE_NX)) + clear_cpu_cap(c, X86_FEATURE_PSE); +#endif +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) + clear_cpu_cap(c, X86_FEATURE_SEP); +#endif +#endif + +#ifdef CONFIG_X86_64 + setup_pcid(c); +#endif + /* * The vendor-specific functions might have changed features. * Now we do "generic changes." @@ -1109,10 +1178,14 @@ static void identify_cpu(struct cpuinfo_x86 *c) * executed, c == &boot_cpu_data. */ if (c != &boot_cpu_data) { + pax_open_kernel(); + /* AND the already accumulated flags with these */ for (i = 0; i < NCAPINTS; i++) boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; + pax_close_kernel(); + /* OR, i.e. replicate the bug flags */ for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++) c->x86_capability[i] |= boot_cpu_data.x86_capability[i]; @@ -1142,7 +1215,7 @@ void enable_sep_cpu(void) return; cpu = get_cpu(); - tss = &per_cpu(cpu_tss, cpu); + tss = cpu_tss + cpu; /* * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field -- @@ -1308,6 +1381,8 @@ EXPORT_PER_CPU_SYMBOL(current_task); DEFINE_PER_CPU(char *, irq_stack_ptr) = init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE; +DEFINE_PER_CPU(char *, irq_stack_ptr_lowmem) = + init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE; DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1; @@ -1400,21 +1475,21 @@ EXPORT_PER_CPU_SYMBOL(current_task); DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; EXPORT_PER_CPU_SYMBOL(__preempt_count); +#ifdef CONFIG_CC_STACKPROTECTOR +DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); +#endif + +#endif /* CONFIG_X86_64 */ + /* * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find * the top of the kernel stack. Use an extra percpu variable to track the * top of the kernel stack directly. */ DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) = - (unsigned long)&init_thread_union + THREAD_SIZE; + (unsigned long)&init_thread_union - 16 + THREAD_SIZE; EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack); -#ifdef CONFIG_CC_STACKPROTECTOR -DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); -#endif - -#endif /* CONFIG_X86_64 */ - /* * Clear all 6 debug registers: */ @@ -1490,7 +1565,7 @@ void cpu_init(void) */ load_ucode_ap(); - t = &per_cpu(cpu_tss, cpu); + t = cpu_tss + cpu; oist = &per_cpu(orig_ist, cpu); #ifdef CONFIG_NUMA @@ -1522,7 +1597,6 @@ void cpu_init(void) wrmsrl(MSR_KERNEL_GS_BASE, 0); barrier(); - x86_configure_nx(); x2apic_setup(); /* @@ -1574,7 +1648,7 @@ void cpu_init(void) { int cpu = smp_processor_id(); struct task_struct *curr = current; - struct tss_struct *t = &per_cpu(cpu_tss, cpu); + struct tss_struct *t = cpu_tss + cpu; struct thread_struct *thread = &curr->thread; wait_for_master_cpu(cpu); diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index fcd484d2b..8a466c149 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -56,7 +56,9 @@ void check_mpx_erratum(struct cpuinfo_x86 *c) * is no such hardware known at the moment. */ if (cpu_has(c, X86_FEATURE_MPX) && !cpu_has(c, X86_FEATURE_SMEP)) { + pax_open_kernel(); setup_clear_cpu_cap(X86_FEATURE_MPX); + pax_close_kernel(); pr_warn("x86/mpx: Disabling MPX since SMEP not present\n"); } } @@ -177,8 +179,10 @@ static void early_init_intel(struct cpuinfo_x86 *c) rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) { pr_info("Disabled fast string operations\n"); + pax_open_kernel(); setup_clear_cpu_cap(X86_FEATURE_REP_GOOD); setup_clear_cpu_cap(X86_FEATURE_ERMS); + pax_close_kernel(); } } @@ -194,7 +198,9 @@ static void early_init_intel(struct cpuinfo_x86 *c) */ if (c->x86 == 5 && c->x86_model == 9) { pr_info("Disabling PGE capability bit\n"); + pax_open_kernel(); setup_clear_cpu_cap(X86_FEATURE_PGE); + pax_close_kernel(); } if (c->cpuid_level >= 0x00000001) { diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index de6626c18..c84e8c157 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -519,25 +519,23 @@ cache_private_attrs_is_visible(struct kobject *kobj, return 0; } +static struct attribute *amd_l3_attrs[4]; + static struct attribute_group cache_private_group = { .is_visible = cache_private_attrs_is_visible, + .attrs = amd_l3_attrs, }; static void init_amd_l3_attrs(void) { int n = 1; - static struct attribute **amd_l3_attrs; - - if (amd_l3_attrs) /* already initialized */ - return; if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) n += 2; if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) n += 1; - amd_l3_attrs = kcalloc(n, sizeof(*amd_l3_attrs), GFP_KERNEL); - if (!amd_l3_attrs) + if (n > 1 && amd_l3_attrs[0]) /* already initialized */ return; n = 0; @@ -547,8 +545,6 @@ static void init_amd_l3_attrs(void) } if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) amd_l3_attrs[n++] = &dev_attr_subcaches.attr; - - cache_private_group.attrs = amd_l3_attrs; } const struct attribute_group * @@ -559,7 +555,7 @@ cache_get_priv_group(struct cacheinfo *this_leaf) if (this_leaf->level < 3 || !nb) return NULL; - if (nb && nb->l3_cache.indices) + if (nb->l3_cache.indices) init_amd_l3_attrs(); return &cache_private_group; diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index a7fdf453d..87c1a3106 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -48,6 +48,7 @@ #include #include #include +#include #include "mce-internal.h" @@ -210,8 +211,7 @@ static struct notifier_block mce_srao_nb; void mce_register_decode_chain(struct notifier_block *nb) { /* Ensure SRAO notifier has the highest priority in the decode chain. */ - if (nb != &mce_srao_nb && nb->priority == INT_MAX) - nb->priority -= 1; + BUG_ON(nb != &mce_srao_nb && nb->priority == INT_MAX); atomic_notifier_chain_register(&x86_mce_decoder_chain, nb); } @@ -263,7 +263,7 @@ static inline u32 smca_misc_reg(int bank) return MSR_AMD64_SMCA_MCx_MISC(bank); } -struct mca_msr_regs msr_ops = { +struct mca_msr_regs msr_ops __read_only = { .ctl = ctl_reg, .status = status_reg, .addr = addr_reg, @@ -282,7 +282,7 @@ static void print_mce(struct mce *m) !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", m->cs, m->ip); - if (m->cs == __KERNEL_CS) + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS) print_symbol("{%s}", m->ip); pr_cont("\n"); } @@ -322,10 +322,10 @@ static void print_mce(struct mce *m) #define PANIC_TIMEOUT 5 /* 5 seconds */ -static atomic_t mce_panicked; +static atomic_unchecked_t mce_panicked; static int fake_panic; -static atomic_t mce_fake_panicked; +static atomic_unchecked_t mce_fake_panicked; /* Panic in progress. Enable interrupts and wait for final IPI */ static void wait_for_panic(void) @@ -351,7 +351,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp) /* * Make sure only one CPU runs in machine check panic */ - if (atomic_inc_return(&mce_panicked) > 1) + if (atomic_inc_return_unchecked(&mce_panicked) > 1) wait_for_panic(); barrier(); @@ -359,7 +359,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp) console_verbose(); } else { /* Don't log too much for fake panic */ - if (atomic_inc_return(&mce_fake_panicked) > 1) + if (atomic_inc_return_unchecked(&mce_fake_panicked) > 1) return; } pending = mce_gen_pool_prepare_records(); @@ -395,7 +395,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp) if (!fake_panic) { if (panic_timeout == 0) panic_timeout = mca_cfg.panic_timeout; - panic(msg); + panic("%s", msg); } else pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg); } @@ -787,7 +787,7 @@ static int mce_timed_out(u64 *t, const char *msg) * might have been modified by someone else. */ rmb(); - if (atomic_read(&mce_panicked)) + if (atomic_read_unchecked(&mce_panicked)) wait_for_panic(); if (!mca_cfg.monarch_timeout) goto out; @@ -1706,10 +1706,12 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) * Install proper ops for Scalable MCA enabled processors */ if (mce_flags.smca) { + pax_open_kernel(); msr_ops.ctl = smca_ctl_reg; msr_ops.status = smca_status_reg; msr_ops.addr = smca_addr_reg; msr_ops.misc = smca_misc_reg; + pax_close_kernel(); } mce_amd_feature_init(c); @@ -1762,7 +1764,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code) } /* Call the installed machine check handler for this CPU setup. */ -void (*machine_check_vector)(struct pt_regs *, long error_code) = +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only = unexpected_machine_check; /* @@ -1791,7 +1793,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c) return; } + pax_open_kernel(); machine_check_vector = do_machine_check; + pax_close_kernel(); __mcheck_cpu_init_generic(); __mcheck_cpu_init_vendor(c); @@ -1823,7 +1827,7 @@ void mcheck_cpu_clear(struct cpuinfo_x86 *c) */ static DEFINE_SPINLOCK(mce_chrdev_state_lock); -static int mce_chrdev_open_count; /* #times opened */ +static local_t mce_chrdev_open_count; /* #times opened */ static int mce_chrdev_open_exclu; /* already open exclusive? */ static int mce_chrdev_open(struct inode *inode, struct file *file) @@ -1831,7 +1835,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file) spin_lock(&mce_chrdev_state_lock); if (mce_chrdev_open_exclu || - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) { + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) { spin_unlock(&mce_chrdev_state_lock); return -EBUSY; @@ -1839,7 +1843,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file) if (file->f_flags & O_EXCL) mce_chrdev_open_exclu = 1; - mce_chrdev_open_count++; + local_inc(&mce_chrdev_open_count); spin_unlock(&mce_chrdev_state_lock); @@ -1850,7 +1854,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file) { spin_lock(&mce_chrdev_state_lock); - mce_chrdev_open_count--; + local_dec(&mce_chrdev_open_count); mce_chrdev_open_exclu = 0; spin_unlock(&mce_chrdev_state_lock); @@ -2545,7 +2549,7 @@ static __init void mce_init_banks(void) for (i = 0; i < mca_cfg.banks; i++) { struct mce_bank *b = &mce_banks[i]; - struct device_attribute *a = &b->attr; + device_attribute_no_const *a = &b->attr; sysfs_attr_init(&a->attr); a->attr.name = b->attrname; @@ -2652,7 +2656,7 @@ struct dentry *mce_get_debugfs_dir(void) static void mce_reset(void) { cpu_missing = 0; - atomic_set(&mce_fake_panicked, 0); + atomic_set_unchecked(&mce_fake_panicked, 0); atomic_set(&mce_executing, 0); atomic_set(&mce_callin, 0); atomic_set(&global_nwo, 0); diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c index 2a0717bf8..7fbc6417d 100644 --- a/arch/x86/kernel/cpu/mcheck/p5.c +++ b/arch/x86/kernel/cpu/mcheck/p5.c @@ -12,6 +12,7 @@ #include #include #include +#include /* By default disabled */ int mce_p5_enabled __read_mostly; @@ -52,7 +53,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c) if (!cpu_has(c, X86_FEATURE_MCE)) return; + pax_open_kernel(); machine_check_vector = pentium_machine_check; + pax_close_kernel(); /* Make sure the vector pointer is visible before we enable MCEs: */ wmb(); diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c index c6a722e1d..40161401f 100644 --- a/arch/x86/kernel/cpu/mcheck/winchip.c +++ b/arch/x86/kernel/cpu/mcheck/winchip.c @@ -11,6 +11,7 @@ #include #include #include +#include /* Machine check handler for WinChip C6: */ static void winchip_machine_check(struct pt_regs *regs, long error_code) @@ -28,7 +29,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c) { u32 lo, hi; + pax_open_kernel(); machine_check_vector = winchip_machine_check; + pax_close_kernel(); /* Make sure the vector pointer is visible before we enable MCEs: */ wmb(); diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 8c88845b9..83b75d5be 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -1072,13 +1072,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device, static int get_ucode_user(void *to, const void *from, size_t n) { - return copy_from_user(to, from, n); + return copy_from_user(to, (const void __force_user *)from, n); } static enum ucode_state request_microcode_user(int cpu, const void __user *buf, size_t size) { - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user); + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user); } static void microcode_fini_cpu(int cpu) diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 8f44c5a50..ed71f8c76 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -206,7 +206,7 @@ static void __init ms_hyperv_init_platform(void) x86_platform.get_nmi_reason = hv_get_nmi_reason; } -const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { +const struct hypervisor_x86 x86_hyper_ms_hyperv = { .name = "Microsoft HyperV", .detect = ms_hyperv_platform, .init_platform = ms_hyperv_init_platform, diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index fdc55215d..d31149c08 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -726,7 +726,8 @@ static DEFINE_RAW_SPINLOCK(set_atomicity_lock); * The caller must ensure that local interrupts are disabled and * are reenabled after post_set() has been called. */ -static void prepare_set(void) __acquires(set_atomicity_lock) +static void prepare_set(void) __acquires(&set_atomicity_lock); +static void prepare_set(void) { unsigned long cr0; @@ -762,7 +763,8 @@ static void prepare_set(void) __acquires(set_atomicity_lock) wbinvd(); } -static void post_set(void) __releases(set_atomicity_lock) +static void post_set(void) __releases(&set_atomicity_lock); +static void post_set(void) { /* Flush TLBs (no need to flush caches - they are disabled) */ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h index ad8bd763e..e51b46f31 100644 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h @@ -25,7 +25,7 @@ struct mtrr_ops { int (*validate_add_page)(unsigned long base, unsigned long size, unsigned int type); int (*have_wrcomb)(void); -}; +} __do_const; extern int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg); diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index 5130985b7..6067a9fd9 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c @@ -150,7 +150,7 @@ static bool __init vmware_legacy_x2apic_available(void) (eax & (1 << VMWARE_PORT_CMD_LEGACY_X2APIC)) != 0; } -const __refconst struct hypervisor_x86 x86_hyper_vmware = { +const struct hypervisor_x86 x86_hyper_vmware = { .name = "VMware", .detect = vmware_platform, .set_cpu_features = vmware_set_cpu_features, diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c index afa64adb7..dce67dd30 100644 --- a/arch/x86/kernel/crash_dump_64.c +++ b/arch/x86/kernel/crash_dump_64.c @@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, return -ENOMEM; if (userbuf) { - if (copy_to_user(buf, vaddr + offset, csize)) { + if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) { iounmap(vaddr); return -EFAULT; } diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c index f6dfd9334..892ade462 100644 --- a/arch/x86/kernel/doublefault.c +++ b/arch/x86/kernel/doublefault.c @@ -12,7 +12,7 @@ #define DOUBLEFAULT_STACKSIZE (1024) static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE]; -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE) +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2) #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM) @@ -22,7 +22,7 @@ static void doublefault_fn(void) unsigned long gdt, tss; native_store_gdt(&gdt_desc); - gdt = gdt_desc.address; + gdt = (unsigned long)gdt_desc.address; printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size); @@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = { /* 0x2 bit is always set */ .flags = X86_EFLAGS_SF | 0x2, .sp = STACK_START, - .es = __USER_DS, + .es = __KERNEL_DS, .cs = __KERNEL_CS, .ss = __KERNEL_DS, - .ds = __USER_DS, + .ds = __KERNEL_DS, .fs = __KERNEL_PERCPU, .__cr3 = __pa_nodebug(swapper_pg_dir), diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 85f854b98..78ddb7ecd 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -2,6 +2,9 @@ * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs */ +#ifdef CONFIG_GRKERNSEC_HIDESYM +#define __INCLUDED_BY_HIDESYM 1 +#endif #include #include #include @@ -53,7 +56,7 @@ static void printk_stack_address(unsigned long address, int reliable, void printk_address(unsigned long address) { - pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address); + pr_cont(" [<%p>] %pA\n", (void *)address, (void *)address); } void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, @@ -202,6 +205,7 @@ EXPORT_SYMBOL_GPL(oops_begin); NOKPROBE_SYMBOL(oops_begin); void __noreturn rewind_stack_do_exit(int signr); +extern void gr_handle_kernel_exploit(void); void oops_end(unsigned long flags, struct pt_regs *regs, int signr) { @@ -225,6 +229,8 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr) if (panic_on_oops) panic("Fatal exception"); + gr_handle_kernel_exploit(); + /* * We're not going to return, but we might be on an IST stack or * have very little stack space left. Rewind the stack and kill diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c index 06eb322b5..ca545b8e2 100644 --- a/arch/x86/kernel/dumpstack_32.c +++ b/arch/x86/kernel/dumpstack_32.c @@ -15,6 +15,7 @@ #include #include +#include void stack_type_str(enum stack_type type, const char **begin, const char **end) { @@ -167,16 +168,17 @@ void show_regs(struct pt_regs *regs) unsigned int code_len = code_bytes; unsigned char c; u8 *ip; + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]); pr_emerg("Stack:\n"); show_stack_log_lvl(current, regs, NULL, KERN_EMERG); pr_emerg("Code:"); - ip = (u8 *)regs->ip - code_prologue; + ip = (u8 *)regs->ip - code_prologue + cs_base; if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { /* try starting at IP */ - ip = (u8 *)regs->ip; + ip = (u8 *)regs->ip + cs_base; code_len = code_len - code_prologue + 1; } for (i = 0; i < code_len; i++, ip++) { @@ -185,7 +187,7 @@ void show_regs(struct pt_regs *regs) pr_cont(" Bad EIP value."); break; } - if (ip == (u8 *)regs->ip) + if (ip == (u8 *)regs->ip + cs_base) pr_cont(" <%02x>", c); else pr_cont(" %02x", c); @@ -198,6 +200,7 @@ int is_valid_bugaddr(unsigned long ip) { unsigned short ud2; + ip = ktla_ktva(ip); if (ip < PAGE_OFFSET) return 0; if (probe_kernel_address((unsigned short *)ip, ud2)) @@ -205,3 +208,15 @@ int is_valid_bugaddr(unsigned long ip) return ud2 == 0x0b0f; } + +#ifdef CONFIG_PAX_MEMORY_STACKLEAK +void __used pax_check_alloca(unsigned long size) +{ + unsigned long sp = (unsigned long)&sp, stack_left; + + /* all kernel stacks are of the same size */ + stack_left = sp & (THREAD_SIZE - 1); + BUG_ON(stack_left < 256 || size >= stack_left - 256); +} +EXPORT_SYMBOL(pax_check_alloca); +#endif diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index 36cf1a498..046b56f73 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -237,8 +237,42 @@ int is_valid_bugaddr(unsigned long ip) { unsigned short ud2; - if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2))) + if (probe_kernel_address((unsigned short *)ip, ud2)) return 0; return ud2 == 0x0b0f; } + +#ifdef CONFIG_PAX_MEMORY_STACKLEAK +void __used pax_check_alloca(unsigned long size) +{ + struct stack_info stack_info = {0}; + unsigned long visit_mask = 0; + unsigned long sp = (unsigned long)&sp; + unsigned long stack_left; + + BUG_ON(get_stack_info(&sp, current, &stack_info, &visit_mask)); + + switch (stack_info.type) { + case STACK_TYPE_TASK: + stack_left = sp & (THREAD_SIZE - 1); + break; + + case STACK_TYPE_IRQ: + stack_left = sp & (IRQ_STACK_SIZE - 1); + put_cpu(); + break; + + case STACK_TYPE_EXCEPTION ... STACK_TYPE_EXCEPTION_LAST: + stack_left = sp & (EXCEPTION_STKSZ - 1); + break; + + case STACK_TYPE_SOFTIRQ: + default: + BUG(); + } + + BUG_ON(stack_left < 256 || size >= stack_left - 256); +} +EXPORT_SYMBOL(pax_check_alloca); +#endif diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 90e8dde3e..50b64a0ae 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -829,8 +829,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void) static void __init early_panic(char *msg) { - early_printk(msg); - panic(msg); + early_printk("%s", msg); + panic("%s", msg); } static int userdef __initdata; diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index 8a121991e..e63bebfd7 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c index 04f89caef..43ad7de00 100644 --- a/arch/x86/kernel/espfix_64.c +++ b/arch/x86/kernel/espfix_64.c @@ -41,6 +41,7 @@ #include #include #include +#include /* * Note: we only need 6*8 = 48 bytes for the espfix stack, but round @@ -70,8 +71,10 @@ static DEFINE_MUTEX(espfix_init_mutex); #define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE) static void *espfix_pages[ESPFIX_MAX_PAGES]; -static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD] - __aligned(PAGE_SIZE); +static __page_aligned_rodata pud_t espfix_pud_page[PTRS_PER_PUD]; +static __page_aligned_rodata pmd_t espfix_pmd_page[PTRS_PER_PMD]; +static __page_aligned_rodata pte_t espfix_pte_page[PTRS_PER_PTE]; +static __page_aligned_rodata char espfix_stack_page[ESPFIX_MAX_PAGES][PAGE_SIZE]; static unsigned int page_random, slot_random; @@ -122,10 +125,19 @@ static void init_espfix_random(void) void __init init_espfix_bsp(void) { pgd_t *pgd_p; + pud_t *pud_p; + unsigned long index = pgd_index(ESPFIX_BASE_ADDR); /* Install the espfix pud into the kernel page directory */ - pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)]; - pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page); + pgd_p = &init_level4_pgt[index]; + pud_p = espfix_pud_page; + paravirt_alloc_pud(&init_mm, __pa(pud_p) >> PAGE_SHIFT); + set_pgd(pgd_p, __pgd(PGTABLE_PROT | __pa(pud_p))); + +#ifdef CONFIG_PAX_PER_CPU_PGD + clone_pgd_range(get_cpu_pgd(0, kernel) + index, swapper_pg_dir + index, 1); + clone_pgd_range(get_cpu_pgd(0, user) + index, swapper_pg_dir + index, 1); +#endif /* Randomize the locations */ init_espfix_random(); @@ -170,35 +182,39 @@ void init_espfix_ap(int cpu) pud_p = &espfix_pud_page[pud_index(addr)]; pud = *pud_p; if (!pud_present(pud)) { - struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0); - - pmd_p = (pmd_t *)page_address(page); + if (cpu) + pmd_p = page_address(alloc_pages_node(node, PGALLOC_GFP, 0)); + else + pmd_p = espfix_pmd_page; pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask)); paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT); for (n = 0; n < ESPFIX_PUD_CLONES; n++) set_pud(&pud_p[n], pud); - } + } else + BUG_ON(!cpu); pmd_p = pmd_offset(&pud, addr); pmd = *pmd_p; if (!pmd_present(pmd)) { - struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0); - - pte_p = (pte_t *)page_address(page); + if (cpu) + pte_p = page_address(alloc_pages_node(node, PGALLOC_GFP, 0)); + else + pte_p = espfix_pte_page; pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask)); paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT); for (n = 0; n < ESPFIX_PMD_CLONES; n++) set_pmd(&pmd_p[n], pmd); - } + } else + BUG_ON(!cpu); pte_p = pte_offset_kernel(&pmd, addr); - stack_page = page_address(alloc_pages_node(node, GFP_KERNEL, 0)); + stack_page = espfix_stack_page[page]; pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask)); for (n = 0; n < ESPFIX_PTE_CLONES; n++) set_pte(&pte_p[n*PTE_STRIDE], pte); /* Job is done for this CPU and any CPU which shares this page */ - ACCESS_ONCE(espfix_pages[page]) = stack_page; + ACCESS_ONCE_RW(espfix_pages[page]) = stack_page; unlock_done: mutex_unlock(&espfix_init_mutex); diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index ebb4e95fb..37e51387c 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -136,7 +136,7 @@ void __kernel_fpu_end(void) struct fpu *fpu = ¤t->thread.fpu; if (fpu->fpregs_active) - copy_kernel_to_fpregs(&fpu->state); + copy_kernel_to_fpregs(fpu->state); else __fpregs_deactivate_hw(); @@ -201,7 +201,7 @@ void fpu__save(struct fpu *fpu) if (fpu->fpregs_active) { if (!copy_fpregs_to_fpstate(fpu)) { if (use_eager_fpu()) - copy_kernel_to_fpregs(&fpu->state); + copy_kernel_to_fpregs(fpu->state); else fpregs_deactivate(fpu); } @@ -261,7 +261,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) * leak into the child task: */ if (use_eager_fpu()) - memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size); + memset(&dst_fpu->state->xsave, 0, fpu_kernel_xstate_size); /* * Save current FPU registers directly into the child @@ -280,11 +280,10 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) */ preempt_disable(); if (!copy_fpregs_to_fpstate(dst_fpu)) { - memcpy(&src_fpu->state, &dst_fpu->state, - fpu_kernel_xstate_size); + memcpy(src_fpu->state, dst_fpu->state, fpu_kernel_xstate_size); if (use_eager_fpu()) - copy_kernel_to_fpregs(&src_fpu->state); + copy_kernel_to_fpregs(src_fpu->state); else fpregs_deactivate(src_fpu); } @@ -305,7 +304,7 @@ void fpu__activate_curr(struct fpu *fpu) WARN_ON_FPU(fpu != ¤t->thread.fpu); if (!fpu->fpstate_active) { - fpstate_init(&fpu->state); + fpstate_init(fpu->state); trace_x86_fpu_init_state(fpu); trace_x86_fpu_activate_state(fpu); @@ -333,7 +332,7 @@ void fpu__activate_fpstate_read(struct fpu *fpu) fpu__save(fpu); } else { if (!fpu->fpstate_active) { - fpstate_init(&fpu->state); + fpstate_init(fpu->state); trace_x86_fpu_init_state(fpu); trace_x86_fpu_activate_state(fpu); @@ -368,7 +367,7 @@ void fpu__activate_fpstate_write(struct fpu *fpu) /* Invalidate any lazy state: */ fpu->last_cpu = -1; } else { - fpstate_init(&fpu->state); + fpstate_init(fpu->state); trace_x86_fpu_init_state(fpu); trace_x86_fpu_activate_state(fpu); @@ -431,7 +430,7 @@ void fpu__current_fpstate_write_end(void) * an XRSTOR if they are active. */ if (fpregs_active()) - copy_kernel_to_fpregs(&fpu->state); + copy_kernel_to_fpregs(fpu->state); /* * Our update is done and the fpregs/fpstate are in sync @@ -458,7 +457,7 @@ void fpu__restore(struct fpu *fpu) kernel_fpu_disable(); trace_x86_fpu_before_restore(fpu); fpregs_activate(fpu); - copy_kernel_to_fpregs(&fpu->state); + copy_kernel_to_fpregs(fpu->state); fpu->counter++; trace_x86_fpu_after_restore(fpu); kernel_fpu_enable(); @@ -554,11 +553,11 @@ int fpu__exception_code(struct fpu *fpu, int trap_nr) * fully reproduce the context of the exception. */ if (boot_cpu_has(X86_FEATURE_FXSR)) { - cwd = fpu->state.fxsave.cwd; - swd = fpu->state.fxsave.swd; + cwd = fpu->state->fxsave.cwd; + swd = fpu->state->fxsave.swd; } else { - cwd = (unsigned short)fpu->state.fsave.cwd; - swd = (unsigned short)fpu->state.fsave.swd; + cwd = (unsigned short)fpu->state->fsave.cwd; + swd = (unsigned short)fpu->state->fsave.swd; } err = swd & ~cwd; @@ -572,7 +571,7 @@ int fpu__exception_code(struct fpu *fpu, int trap_nr) unsigned short mxcsr = MXCSR_DEFAULT; if (boot_cpu_has(X86_FEATURE_XMM)) - mxcsr = fpu->state.fxsave.mxcsr; + mxcsr = fpu->state->fxsave.mxcsr; err = ~(mxcsr >> 7) & mxcsr; } diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c index 2f2b8c7cc..9aad66541 100644 --- a/arch/x86/kernel/fpu/init.c +++ b/arch/x86/kernel/fpu/init.c @@ -45,7 +45,7 @@ static void fpu__init_cpu_generic(void) /* Flush out any pending x87 state: */ #ifdef CONFIG_MATH_EMULATION if (!boot_cpu_has(X86_FEATURE_FPU)) - fpstate_init_soft(¤t->thread.fpu.state.soft); + fpstate_init_soft(¤t->thread.fpu.state->soft); else #endif asm volatile ("fninit"); @@ -148,51 +148,7 @@ static void __init fpu__init_system_generic(void) unsigned int fpu_kernel_xstate_size; EXPORT_SYMBOL_GPL(fpu_kernel_xstate_size); -/* Get alignment of the TYPE. */ -#define TYPE_ALIGN(TYPE) offsetof(struct { char x; TYPE test; }, test) - -/* - * Enforce that 'MEMBER' is the last field of 'TYPE'. - * - * Align the computed size with alignment of the TYPE, - * because that's how C aligns structs. - */ -#define CHECK_MEMBER_AT_END_OF(TYPE, MEMBER) \ - BUILD_BUG_ON(sizeof(TYPE) != ALIGN(offsetofend(TYPE, MEMBER), \ - TYPE_ALIGN(TYPE))) - -/* - * We append the 'struct fpu' to the task_struct: - */ -static void __init fpu__init_task_struct_size(void) -{ - int task_size = sizeof(struct task_struct); - - /* - * Subtract off the static size of the register state. - * It potentially has a bunch of padding. - */ - task_size -= sizeof(((struct task_struct *)0)->thread.fpu.state); - - /* - * Add back the dynamically-calculated register state - * size. - */ - task_size += fpu_kernel_xstate_size; - - /* - * We dynamically size 'struct fpu', so we require that - * it be at the end of 'thread_struct' and that - * 'thread_struct' be at the end of 'task_struct'. If - * you hit a compile error here, check the structure to - * see if something got added to the end. - */ - CHECK_MEMBER_AT_END_OF(struct fpu, state); - CHECK_MEMBER_AT_END_OF(struct thread_struct, fpu); - CHECK_MEMBER_AT_END_OF(struct task_struct, thread); - - arch_task_struct_size = task_size; -} +union fpregs_state init_fpregs_state; /* * Set up the user and kernel xstate sizes based on the legacy FPU context size. @@ -386,7 +342,6 @@ void __init fpu__init_system(struct cpuinfo_x86 *c) fpu__init_system_generic(); fpu__init_system_xstate_size_legacy(); fpu__init_system_xstate(); - fpu__init_task_struct_size(); fpu__init_system_ctx_switch(); } diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c index c114b132d..0b0d95943 100644 --- a/arch/x86/kernel/fpu/regset.c +++ b/arch/x86/kernel/fpu/regset.c @@ -41,7 +41,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset, fpstate_sanitize_xstate(fpu); return user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &fpu->state.fxsave, 0, -1); + &fpu->state->fxsave, 0, -1); } int xfpregs_set(struct task_struct *target, const struct user_regset *regset, @@ -58,19 +58,19 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset, fpstate_sanitize_xstate(fpu); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - &fpu->state.fxsave, 0, -1); + &fpu->state->fxsave, 0, -1); /* * mxcsr reserved bits must be masked to zero for security reasons. */ - fpu->state.fxsave.mxcsr &= mxcsr_feature_mask; + fpu->state->fxsave.mxcsr &= mxcsr_feature_mask; /* * update the header bits in the xsave header, indicating the * presence of FP and SSE state. */ if (boot_cpu_has(X86_FEATURE_XSAVE)) - fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FPSSE; + fpu->state->xsave.header.xfeatures |= XFEATURE_MASK_FPSSE; return ret; } @@ -86,7 +86,7 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset, if (!boot_cpu_has(X86_FEATURE_XSAVE)) return -ENODEV; - xsave = &fpu->state.xsave; + xsave = &fpu->state->xsave; fpu__activate_fpstate_read(fpu); @@ -126,7 +126,7 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset, if ((pos != 0) || (count < fpu_user_xstate_size)) return -EFAULT; - xsave = &fpu->state.xsave; + xsave = &fpu->state->xsave; fpu__activate_fpstate_write(fpu); @@ -139,7 +139,7 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset, * In case of failure, mark all states as init: */ if (ret) - fpstate_init(&fpu->state); + fpstate_init(fpu->state); /* * mxcsr reserved bits must be masked to zero for security reasons. @@ -229,7 +229,7 @@ static inline u32 twd_fxsr_to_i387(struct fxregs_state *fxsave) void convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk) { - struct fxregs_state *fxsave = &tsk->thread.fpu.state.fxsave; + struct fxregs_state *fxsave = &tsk->thread.fpu.state->fxsave; struct _fpreg *to = (struct _fpreg *) &env->st_space[0]; struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0]; int i; @@ -267,7 +267,7 @@ void convert_to_fxsr(struct task_struct *tsk, const struct user_i387_ia32_struct *env) { - struct fxregs_state *fxsave = &tsk->thread.fpu.state.fxsave; + struct fxregs_state *fxsave = &tsk->thread.fpu.state->fxsave; struct _fpreg *from = (struct _fpreg *) &env->st_space[0]; struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0]; int i; @@ -305,7 +305,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset, if (!boot_cpu_has(X86_FEATURE_FXSR)) return user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &fpu->state.fsave, 0, + &fpu->state->fsave, 0, -1); fpstate_sanitize_xstate(fpu); @@ -336,7 +336,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, if (!boot_cpu_has(X86_FEATURE_FXSR)) return user_regset_copyin(&pos, &count, &kbuf, &ubuf, - &fpu->state.fsave, 0, + &fpu->state->fsave, 0, -1); if (pos > 0 || count < sizeof(env)) @@ -351,7 +351,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, * presence of FP. */ if (boot_cpu_has(X86_FEATURE_XSAVE)) - fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FP; + fpu->state->xsave.header.xfeatures |= XFEATURE_MASK_FP; return ret; } diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c index a184c210e..a1731b7ec 100644 --- a/arch/x86/kernel/fpu/signal.c +++ b/arch/x86/kernel/fpu/signal.c @@ -56,7 +56,7 @@ static inline int check_for_xstate(struct fxregs_state __user *buf, static inline int save_fsave_header(struct task_struct *tsk, void __user *buf) { if (use_fxsr()) { - struct xregs_state *xsave = &tsk->thread.fpu.state.xsave; + struct xregs_state *xsave = &tsk->thread.fpu.state->xsave; struct user_i387_ia32_struct env; struct _fpstate_32 __user *fp = buf; @@ -85,19 +85,19 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame) /* Setup the bytes not touched by the [f]xsave and reserved for SW. */ sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved; - err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes)); + err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes)); if (!use_xsave()) return err; err |= __put_user(FP_XSTATE_MAGIC2, - (__u32 *)(buf + fpu_user_xstate_size)); + (__u32 __user *)(buf + fpu_user_xstate_size)); /* * Read the xfeatures which we copied (directly from the cpu or * from the state in task struct) to the user buffers. */ - err |= __get_user(xfeatures, (__u32 *)&x->header.xfeatures); + err |= __get_user(xfeatures, (__u32 __user *)&x->header.xfeatures); /* * For legacy compatible, we always set FP/SSE bits in the bit @@ -112,7 +112,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame) */ xfeatures |= XFEATURE_MASK_FPSSE; - err |= __put_user(xfeatures, (__u32 *)&x->header.xfeatures); + err |= __put_user(xfeatures, (__u32 __user *)&x->header.xfeatures); return err; } @@ -121,6 +121,7 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf) { int err; + buf = (struct xregs_state __user *)____m(buf); if (use_xsave()) err = copy_xregs_to_user(buf); else if (use_fxsr()) @@ -155,7 +156,7 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf) */ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size) { - struct xregs_state *xsave = ¤t->thread.fpu.state.xsave; + struct xregs_state *xsave = ¤t->thread.fpu.state->xsave; struct task_struct *tsk = current; int ia32_fxstate = (buf != buf_fx); @@ -209,7 +210,7 @@ sanitize_restored_xstate(struct task_struct *tsk, struct user_i387_ia32_struct *ia32_env, u64 xfeatures, int fx_only) { - struct xregs_state *xsave = &tsk->thread.fpu.state.xsave; + struct xregs_state *xsave = &tsk->thread.fpu.state->xsave; struct xstate_header *header = &xsave->header; if (use_xsave()) { @@ -242,6 +243,7 @@ sanitize_restored_xstate(struct task_struct *tsk, */ static inline int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_only) { + buf = (void __user *)____m(buf); if (use_xsave()) { if ((unsigned long)buf % 64 || fx_only) { u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE; @@ -325,14 +327,14 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) if (using_compacted_format()) { err = copyin_to_xsaves(NULL, buf_fx, - &fpu->state.xsave); + &fpu->state->xsave); } else { - err = __copy_from_user(&fpu->state.xsave, + err = __copy_from_user(&fpu->state->xsave, buf_fx, state_size); } if (err || __copy_from_user(&env, buf, sizeof(env))) { - fpstate_init(&fpu->state); + fpstate_init(fpu->state); trace_x86_fpu_init_state(fpu); err = -1; } else { diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 095ef7ddd..f6c3ca649 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -154,14 +154,14 @@ static int xfeature_is_user(int xfeature_nr) */ void fpstate_sanitize_xstate(struct fpu *fpu) { - struct fxregs_state *fx = &fpu->state.fxsave; + struct fxregs_state *fx = &fpu->state->fxsave; int feature_bit; u64 xfeatures; if (!use_xsaveopt()) return; - xfeatures = fpu->state.xsave.header.xfeatures; + xfeatures = fpu->state->xsave.header.xfeatures; /* * None of the feature bits are in init state. So nothing else @@ -866,7 +866,7 @@ const void *get_xsave_field_ptr(int xsave_state) */ fpu__save(fpu); - return get_xsave_addr(&fpu->state.xsave, xsave_state); + return get_xsave_addr(&fpu->state->xsave, xsave_state); } #ifdef CONFIG_ARCH_HAS_PKEYS diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 8639bb2ae..aaa97aefc 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -89,7 +89,7 @@ static unsigned long text_ip_addr(unsigned long ip) * kernel identity mapping to modify code. */ if (within(ip, (unsigned long)_text, (unsigned long)_etext)) - ip = (unsigned long)__va(__pa_symbol(ip)); + ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip))); return ip; } @@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code, { unsigned char replaced[MCOUNT_INSN_SIZE]; + ip = ktla_ktva(ip); + ftrace_expected = old_code; /* @@ -233,7 +235,7 @@ static int update_ftrace_func(unsigned long ip, void *new) unsigned char old[MCOUNT_INSN_SIZE]; int ret; - memcpy(old, (void *)ip, MCOUNT_INSN_SIZE); + memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE); ftrace_update_func = ip; /* Make sure the breakpoints see the ftrace_update_func update */ @@ -314,7 +316,7 @@ static int add_break(unsigned long ip, const char *old) unsigned char replaced[MCOUNT_INSN_SIZE]; unsigned char brk = BREAKPOINT_INSTRUCTION; - if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) + if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE)) return -EFAULT; ftrace_expected = old; @@ -681,11 +683,11 @@ static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr) /* Module allocation simplifies allocating memory for code */ static inline void *alloc_tramp(unsigned long size) { - return module_alloc(size); + return module_alloc_exec(size); } static inline void tramp_free(void *tramp) { - module_memfree(tramp); + module_memfree_exec(tramp); } #else /* Trampolines can only be created if modules are supported */ @@ -763,7 +765,9 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) *tramp_size = size + MCOUNT_INSN_SIZE + sizeof(void *); /* Copy ftrace_caller onto the trampoline memory */ + pax_open_kernel(); ret = probe_kernel_read(trampoline, (void *)start_offset, size); + pax_close_kernel(); if (WARN_ON(ret < 0)) { tramp_free(trampoline); return 0; @@ -773,6 +777,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) /* The trampoline ends with a jmp to ftrace_epilogue */ jmp = ftrace_jmp_replace(ip, (unsigned long)ftrace_epilogue); + pax_open_kernel(); memcpy(trampoline + size, jmp, MCOUNT_INSN_SIZE); /* @@ -785,6 +790,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) ptr = (unsigned long *)(trampoline + size + MCOUNT_INSN_SIZE); *ptr = (unsigned long)ops; + pax_close_kernel(); op_offset -= start_offset; memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE); @@ -802,7 +808,9 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) op_ptr.offset = offset; /* put in the new offset to the ftrace_ops */ + pax_open_kernel(); memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE); + pax_close_kernel(); /* ALLOC_TRAMP flags lets us know we created it */ ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP; diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 54a2372f5..46504a418 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -62,12 +62,12 @@ int __init early_make_pgtable(unsigned long address) pgd = *pgd_p; /* - * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is - * critical -- __PAGE_OFFSET would point us back into the dynamic + * The use of __early_va rather than __va here is critical: + * __va would point us back into the dynamic * range and we might end up looping forever... */ if (pgd) - pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base); + pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK)); else { if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) { reset_early_page_tables(); @@ -76,13 +76,13 @@ int __init early_make_pgtable(unsigned long address) pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++]; memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); - *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE; + *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE; } pud_p += pud_index(address); pud = *pud_p; if (pud) - pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base); + pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK)); else { if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) { reset_early_page_tables(); @@ -91,7 +91,7 @@ int __init early_make_pgtable(unsigned long address) pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++]; memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD); - *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE; + *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE; } pmd = (physaddr & PMD_MASK) + early_pmd_flags; pmd_p[pmd_index(address)] = pmd; @@ -155,8 +155,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data) clear_bss(); - clear_page(init_level4_pgt); - kasan_early_init(); for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 2dabea46f..e97ebe8ca 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -24,10 +24,17 @@ #include #include #include +#include /* Physical address */ #define pa(X) ((X) - __PAGE_OFFSET) +#ifdef CONFIG_PAX_KERNEXEC +#define ta(X) (X) +#else +#define ta(X) ((X) - __PAGE_OFFSET) +#endif + /* * References to members of the new_cpu_data structure. */ @@ -57,11 +64,7 @@ * and small than max_low_pfn, otherwise will waste some page table entries */ -#if PTRS_PER_PMD > 1 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD) -#else -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD) -#endif +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE) /* * Number of possible pages in the lowmem region. @@ -87,6 +90,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE RESERVE_BRK(pagetables, INIT_MAP_SIZE) /* + * Real beginning of normal "text" segment + */ +ENTRY(stext) +ENTRY(_stext) + +/* * 32-bit kernel entrypoint; only used by the boot CPU. On entry, * %esi points to the real-mode code as a 32-bit pointer. * CS and DS must be 4 GB flat segments, but we don't depend on @@ -94,6 +103,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE) * can. */ __HEAD + +#ifdef CONFIG_PAX_KERNEXEC + jmp startup_32 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */ +.fill PAGE_SIZE-5,1,0xcc +#endif + ENTRY(startup_32) movl pa(initial_stack),%ecx @@ -115,6 +131,66 @@ ENTRY(startup_32) 2: leal -__PAGE_OFFSET(%ecx),%esp +#ifdef CONFIG_SMP + movl $pa(cpu_gdt_table),%edi + movl $__per_cpu_load,%eax + movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi) + rorl $16,%eax + movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi) + movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi) + movl $__per_cpu_end - 1,%eax + subl $__per_cpu_start,%eax + cmpl $0x100000,%eax + jb 1f + shrl $PAGE_SHIFT,%eax + orb $0x80,GDT_ENTRY_PERCPU * 8 + 6(%edi) +1: + movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi) + shrl $16,%eax + orb %al,GDT_ENTRY_PERCPU * 8 + 6(%edi) +#endif + +#ifdef CONFIG_PAX_MEMORY_UDEREF + movl $NR_CPUS,%ecx + movl $pa(cpu_gdt_table),%edi +1: + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi) + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi) + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi) + addl $PAGE_SIZE_asm,%edi + loop 1b +#endif + +#ifdef CONFIG_PAX_KERNEXEC + movl $pa(boot_gdt),%edi + movl $__LOAD_PHYSICAL_ADDR,%eax + movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi) + rorl $16,%eax + movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi) + movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi) + rorl $16,%eax + + ljmp $(__BOOT_CS),$1f +1: + + movl $NR_CPUS,%ecx + movl $pa(cpu_gdt_table),%edi + addl $__PAGE_OFFSET,%eax +1: + movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi) + movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi) + movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi) + movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi) + rorl $16,%eax + movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi) + movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi) + movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi) + movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi) + rorl $16,%eax + addl $PAGE_SIZE_asm,%edi + loop 1b +#endif + /* * Clear BSS first so that there are no surprises... */ @@ -155,7 +231,7 @@ ENTRY(startup_32) #ifdef CONFIG_MICROCODE /* Early load ucode on BSP. */ - call load_ucode_bsp + pax_direct_call load_ucode_bsp #endif /* @@ -210,8 +286,11 @@ ENTRY(startup_32) movl %eax, pa(max_pfn_mapped) /* Do early initialization of the fixmap area */ - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8) +#ifdef CONFIG_COMPAT_VDSO + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8) +#else + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8) +#endif #else /* Not PAE */ page_pde_offset = (__PAGE_OFFSET >> 20); @@ -241,8 +320,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20); movl %eax, pa(max_pfn_mapped) /* Do early initialization of the fixmap area */ - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax - movl %eax,pa(initial_page_table+0xffc) +#ifdef CONFIG_COMPAT_VDSO + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc) +#else + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc) +#endif #endif #ifdef CONFIG_PARAVIRT @@ -256,9 +338,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20); cmpl $num_subarch_entries, %eax jae bad_subarch - movl pa(subarch_entries)(,%eax,4), %eax - subl $__PAGE_OFFSET, %eax - jmp *%eax + jmp *pa(subarch_entries)(,%eax,4) bad_subarch: WEAK(lguest_entry) @@ -270,10 +350,10 @@ WEAK(xen_entry) __INITDATA subarch_entries: - .long default_entry /* normal x86/PC */ - .long lguest_entry /* lguest hypervisor */ - .long xen_entry /* Xen hypervisor */ - .long default_entry /* Moorestown MID */ + .long ta(default_entry) /* normal x86/PC */ + .long ta(lguest_entry) /* lguest hypervisor */ + .long ta(xen_entry) /* Xen hypervisor */ + .long ta(default_entry) /* Moorestown MID */ num_subarch_entries = (. - subarch_entries) / 4 .previous #else @@ -314,7 +394,7 @@ ENTRY(startup_32_smp) #ifdef CONFIG_MICROCODE /* Early load ucode on AP. */ - call load_ucode_ap + pax_direct_call load_ucode_ap #endif default_entry: @@ -362,6 +442,7 @@ default_entry: movl pa(mmu_cr4_features),%eax movl %eax,%cr4 +#ifdef CONFIG_X86_PAE testb $X86_CR4_PAE, %al # check if PAE is enabled jz enable_paging @@ -390,6 +471,9 @@ default_entry: /* Make changes effective */ wrmsr + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4) +#endif + enable_paging: /* @@ -457,14 +541,20 @@ is486: 1: movl $(__KERNEL_DS),%eax # reload all the segment registers movl %eax,%ss # after changing gdt. - movl $(__USER_DS),%eax # DS/ES contains default USER segment +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment movl %eax,%ds movl %eax,%es movl $(__KERNEL_PERCPU), %eax movl %eax,%fs # set this cpu's percpu +#ifdef CONFIG_CC_STACKPROTECTOR movl $(__KERNEL_STACK_CANARY),%eax +#elif defined(CONFIG_PAX_MEMORY_UDEREF) + movl $(__USER_DS),%eax +#else + xorl %eax,%eax +#endif movl %eax,%gs xorl %eax,%eax # Clear LDT @@ -521,8 +611,11 @@ setup_once: * relocation. Manually set base address in stack canary * segment descriptor. */ - movl $gdt_page,%eax + movl $cpu_gdt_table,%eax movl $stack_canary,%ecx +#ifdef CONFIG_SMP + addl $__per_cpu_load,%ecx +#endif movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) shrl $16, %ecx movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) @@ -588,7 +681,7 @@ early_idt_handler_common: movw $0, PT_GS+2(%esp) movl %esp, %eax /* args are pt_regs (EAX), trapnr (EDX) */ - call early_fixup_exception + pax_direct_call early_fixup_exception popl %ebx /* pt_regs->bx */ popl %ecx /* pt_regs->cx */ @@ -609,8 +702,11 @@ ENDPROC(early_idt_handler_common) /* This is the default interrupt "handler" :-) */ ALIGN ignore_int: - cld #ifdef CONFIG_PRINTK + cmpl $2,%ss:early_recursion_flag + je hlt_loop + incl %ss:early_recursion_flag + cld pushl %eax pushl %ecx pushl %edx @@ -619,17 +715,14 @@ ignore_int: movl $(__KERNEL_DS),%eax movl %eax,%ds movl %eax,%es - cmpl $2,early_recursion_flag - je hlt_loop - incl early_recursion_flag pushl 16(%esp) pushl 24(%esp) pushl 32(%esp) pushl 40(%esp) pushl $int_msg - call printk + pax_direct_call printk - call dump_stack + pax_direct_call dump_stack addl $(5*4),%esp popl %ds @@ -656,11 +749,8 @@ ENTRY(initial_code) ENTRY(setup_once_ref) .long setup_once -/* - * BSS section - */ -__PAGE_ALIGNED_BSS - .align PAGE_SIZE +__READ_ONLY + .balign PAGE_SIZE #ifdef CONFIG_X86_PAE initial_pg_pmd: .fill 1024*KPMDS,4,0 @@ -676,16 +766,19 @@ empty_zero_page: .fill 4096,1,0 .globl swapper_pg_dir swapper_pg_dir: - .fill 1024,4,0 +#ifdef CONFIG_X86_PAE + .fill PTRS_PER_PGD,8,0 +#else + .fill PTRS_PER_PGD,4,0 +#endif EXPORT_SYMBOL(empty_zero_page) /* * This starts the data section. */ #ifdef CONFIG_X86_PAE -__PAGE_ALIGNED_DATA - /* Page-aligned for the benefit of paravirt? */ - .align PAGE_SIZE +__READ_ONLY + .balign PAGE_SIZE ENTRY(initial_page_table) .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ # if KPMDS == 3 @@ -703,13 +796,22 @@ ENTRY(initial_page_table) # else # error "Kernel PMDs should be 1, 2 or 3" # endif - .align PAGE_SIZE /* needs to be page-sized too */ + .balign PAGE_SIZE /* needs to be page-sized too */ + +# ifdef CONFIG_PAX_PER_CPU_PGD +ENTRY(cpu_pgd) + .rept 2*NR_CPUS + .fill PTRS_PER_PGD,8,0 + .endr +EXPORT_SYMBOL(cpu_pgd) +# endif + #endif .data .balign 4 ENTRY(initial_stack) - .long init_thread_union+THREAD_SIZE + .long init_thread_union+THREAD_SIZE-8 __INITRODATA int_msg: @@ -724,7 +826,7 @@ int_msg: * segment size, and 32-bit linear address value: */ - .data +__READ_ONLY .globl boot_gdt_descr .globl idt_descr @@ -733,7 +835,7 @@ int_msg: .word 0 # 32 bit align gdt_desc.address boot_gdt_descr: .word __BOOT_DS+7 - .long boot_gdt - __PAGE_OFFSET + .long pa(boot_gdt) .word 0 # 32-bit align idt_desc.address idt_descr: @@ -744,7 +846,7 @@ idt_descr: .word 0 # 32 bit align gdt_desc.address ENTRY(early_gdt_descr) .word GDT_ENTRIES*8-1 - .long gdt_page /* Overwritten for secondary CPUs */ + .long cpu_gdt_table /* Overwritten for secondary CPUs */ /* * The boot_gdt must mirror the equivalent in setup.S and is @@ -753,5 +855,67 @@ ENTRY(early_gdt_descr) .align L1_CACHE_BYTES ENTRY(boot_gdt) .fill GDT_ENTRY_BOOT_CS,8,0 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */ - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */ + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */ + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */ + + .align PAGE_SIZE_asm +ENTRY(cpu_gdt_table) + .rept NR_CPUS + .quad 0x0000000000000000 /* NULL descriptor */ + .quad 0x0000000000000000 /* 0x0b reserved */ + .quad 0x0000000000000000 /* 0x13 reserved */ + .quad 0x0000000000000000 /* 0x1b reserved */ + +#ifdef CONFIG_PAX_KERNEXEC + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */ +#else + .quad 0x0000000000000000 /* 0x20 unused */ +#endif + + .quad 0x0000000000000000 /* 0x28 unused */ + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */ + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */ + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */ + .quad 0x0000000000000000 /* 0x4b reserved */ + .quad 0x0000000000000000 /* 0x53 reserved */ + .quad 0x0000000000000000 /* 0x5b reserved */ + + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */ + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */ + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */ + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */ + + .quad 0x0000000000000000 /* 0x80 TSS descriptor */ + .quad 0x0000000000000000 /* 0x88 LDT descriptor */ + + /* + * Segments used for calling PnP BIOS have byte granularity. + * The code segments and data segments have fixed 64k limits, + * the transfer segment sizes are set at run time. + */ + .quad 0x00409b000000ffff /* 0x90 32-bit code */ + .quad 0x00009b000000ffff /* 0x98 16-bit code */ + .quad 0x000093000000ffff /* 0xa0 16-bit data */ + .quad 0x0000930000000000 /* 0xa8 16-bit data */ + .quad 0x0000930000000000 /* 0xb0 16-bit data */ + + /* + * The APM segments have byte granularity and their bases + * are set at run time. All have 64k limits. + */ + .quad 0x00409b000000ffff /* 0xb8 APM CS code */ + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */ + .quad 0x004093000000ffff /* 0xc8 APM DS data */ + + .quad 0x00c093000000ffff /* 0xd0 - ESPFIX SS */ + .quad 0x0040930000000000 /* 0xd8 - PERCPU */ + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */ + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */ + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */ + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */ + + /* Be sure this is zeroed to avoid false validations in Xen */ + .fill PAGE_SIZE_asm - GDT_SIZE,1,0 + .endr + +EXPORT_SYMBOL_GPL(cpu_gdt_table) diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index b4421cc19..8326c6945 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -20,6 +20,8 @@ #include #include #include +#include +#include #include "../entry/calling.h" #include @@ -42,6 +44,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE) L4_START_KERNEL = pgd_index(__START_KERNEL_map) L3_START_KERNEL = pud_index(__START_KERNEL_map) +L4_VMALLOC_START = pgd_index(VMALLOC_START) +L3_VMALLOC_START = pud_index(VMALLOC_START) +L4_VMALLOC_END = pgd_index(VMALLOC_END) +L3_VMALLOC_END = pud_index(VMALLOC_END) +L4_VMEMMAP_START = pgd_index(VMEMMAP_START) +L3_VMEMMAP_START = pud_index(VMEMMAP_START) .text __HEAD @@ -99,11 +107,36 @@ startup_64: * Fixup the physical addresses in the page table */ addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip) + addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip) + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip) + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8) + 8(%rip) + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8) + 16(%rip) + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8) + 24(%rip) + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip) + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip) + addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip) + + addq %rbp, level3_ident_pgt + (0*8)(%rip) +#ifndef CONFIG_XEN + addq %rbp, level3_ident_pgt + (1*8)(%rip) +#endif + + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip) + + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip) + addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip) + + addq %rbp, level2_ident_pgt + (0*8)(%rip) - addq %rbp, level3_kernel_pgt + (510*8)(%rip) - addq %rbp, level3_kernel_pgt + (511*8)(%rip) + addq %rbp, level2_fixmap_pgt + (0*8)(%rip) + addq %rbp, level2_fixmap_pgt + (1*8)(%rip) + addq %rbp, level2_fixmap_pgt + (2*8)(%rip) + addq %rbp, level2_fixmap_pgt + (3*8)(%rip) + addq %rbp, level2_fixmap_pgt + (504*8)(%rip) + addq %rbp, level2_fixmap_pgt + (505*8)(%rip) addq %rbp, level2_fixmap_pgt + (506*8)(%rip) + addq %rbp, level2_fixmap_pgt + (507*8)(%rip) /* * Set up the identity mapping for the switchover. These @@ -187,11 +220,12 @@ ENTRY(secondary_startup_64) /* Sanitize CPU configuration */ call verify_cpu + orq $-1, %rbp movq $(init_level4_pgt - __START_KERNEL_map), %rax 1: - /* Enable PAE mode and PGE */ - movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx + /* Enable PAE mode and PSE/PGE */ + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx movq %rcx, %cr4 /* Setup early boot stage 4 level pagetables. */ @@ -212,10 +246,24 @@ ENTRY(secondary_startup_64) movl $MSR_EFER, %ecx rdmsr btsl $_EFER_SCE, %eax /* Enable System Call */ - btl $20,%edi /* No Execute supported? */ + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */ jnc 1f btsl $_EFER_NX, %eax + cmpq $-1, %rbp + je 1f btsq $_PAGE_BIT_NX,early_pmd_flags(%rip) + btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip) + btsq $_PAGE_BIT_NX, init_level4_pgt + (8*L4_VMALLOC_START)(%rip) + btsq $_PAGE_BIT_NX, init_level4_pgt + (8*L4_VMALLOC_START) + 8(%rip) + btsq $_PAGE_BIT_NX, init_level4_pgt + (8*L4_VMALLOC_START) + 16(%rip) + btsq $_PAGE_BIT_NX, init_level4_pgt + (8*L4_VMALLOC_START) + 24(%rip) + btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip) + btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip) + btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*504(%rip) + btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*505(%rip) + btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip) + btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip) + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip) 1: wrmsr /* Make changes effective */ /* Setup cr0 */ @@ -295,10 +343,10 @@ ENTRY(secondary_startup_64) * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect, * address given in m16:64. */ - movq initial_code(%rip),%rax + pax_set_fptr_mask pushq $0 # fake return address to stop unwinder pushq $__KERNEL_CS # set correct cs - pushq %rax # target address in negative space + pushq initial_code(%rip) # target address in negative space lretq ENDPROC(secondary_startup_64) @@ -312,10 +360,9 @@ ENDPROC(secondary_startup_64) */ ENTRY(start_cpu0) movq initial_stack(%rip),%rsp - movq initial_code(%rip),%rax pushq $0 # fake return address to stop unwinder pushq $__KERNEL_CS # set correct cs - pushq %rax # target address in negative space + pushq initial_code(%rip) # target address in negative space lretq ENDPROC(start_cpu0) #endif @@ -328,7 +375,7 @@ ENDPROC(start_cpu0) GLOBAL(initial_gs) .quad INIT_PER_CPU_VAR(irq_stack_union) GLOBAL(initial_stack) - .quad init_thread_union+THREAD_SIZE-8 + .quad init_thread_union+THREAD_SIZE-16 __FINITDATA bad_address: @@ -382,13 +429,13 @@ early_idt_handler_common: cmpq $14,%rsi /* Page fault? */ jnz 10f GET_CR2_INTO(%rdi) /* Can clobber any volatile register if pv */ - call early_make_pgtable + pax_direct_call early_make_pgtable andl %eax,%eax jz 20f /* All good */ 10: movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */ - call early_fixup_exception + pax_direct_call early_fixup_exception 20: decl early_recursion_flag(%rip) @@ -416,40 +463,71 @@ GLOBAL(name) __INITDATA NEXT_PAGE(early_level4_pgt) .fill 511,8,0 - .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE + .quad level3_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE NEXT_PAGE(early_dynamic_pgts) .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 - .data + __READ_ONLY -#ifndef CONFIG_XEN NEXT_PAGE(init_level4_pgt) - .fill 512,8,0 -#else -NEXT_PAGE(init_level4_pgt) - .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE .org init_level4_pgt + L4_PAGE_OFFSET*8, 0 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE + .org init_level4_pgt + L4_VMALLOC_START*8, 0 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + PAGE_SIZE*0 + _KERNPG_TABLE + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + PAGE_SIZE*1 + _KERNPG_TABLE + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + PAGE_SIZE*2 + _KERNPG_TABLE + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + PAGE_SIZE*3 + _KERNPG_TABLE + .org init_level4_pgt + L4_VMALLOC_END*8, 0 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE + .org init_level4_pgt + L4_VMEMMAP_START*8, 0 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE .org init_level4_pgt + L4_START_KERNEL*8, 0 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ - .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE + .quad level3_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE + +#ifdef CONFIG_PAX_PER_CPU_PGD +NEXT_PAGE(cpu_pgd) + .rept 2*NR_CPUS + .fill 512,8,0 + .endr +EXPORT_SYMBOL(cpu_pgd) +#endif NEXT_PAGE(level3_ident_pgt) .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE +#ifdef CONFIG_XEN .fill 511, 8, 0 +#else + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE + .fill 510,8,0 +#endif + +NEXT_PAGE(level3_vmalloc_start_pgt) + .fill 4*512,8,0 + +NEXT_PAGE(level3_vmalloc_end_pgt) + .fill 512,8,0 + +NEXT_PAGE(level3_vmemmap_pgt) + .fill L3_VMEMMAP_START,8,0 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE + NEXT_PAGE(level2_ident_pgt) - /* Since I easily can, map the first 1G. + .quad level1_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE + /* Since I easily can, map the first 2G. * Don't set NX because code runs from these pages. */ - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) -#endif + PMDS(PMD_SIZE, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD - 1) NEXT_PAGE(level3_kernel_pgt) .fill L3_START_KERNEL,8,0 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE - .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE + .quad level2_fixmap_pgt - __START_KERNEL_map + _KERNPG_TABLE + +NEXT_PAGE(level2_vmemmap_pgt) + .fill 512,8,0 NEXT_PAGE(level2_kernel_pgt) /* @@ -466,23 +544,73 @@ NEXT_PAGE(level2_kernel_pgt) KERNEL_IMAGE_SIZE/PMD_SIZE) NEXT_PAGE(level2_fixmap_pgt) - .fill 506,8,0 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ - .fill 5,8,0 + .quad level1_modules_pgt - __START_KERNEL_map + 0 * PAGE_SIZE + _KERNPG_TABLE + .quad level1_modules_pgt - __START_KERNEL_map + 1 * PAGE_SIZE + _KERNPG_TABLE + .quad level1_modules_pgt - __START_KERNEL_map + 2 * PAGE_SIZE + _KERNPG_TABLE + .quad level1_modules_pgt - __START_KERNEL_map + 3 * PAGE_SIZE + _KERNPG_TABLE + .fill 500,8,0 + .quad level1_fixmap_pgt - __START_KERNEL_map + 0 * PAGE_SIZE + _KERNPG_TABLE + .quad level1_fixmap_pgt - __START_KERNEL_map + 1 * PAGE_SIZE + _KERNPG_TABLE + .quad level1_fixmap_pgt - __START_KERNEL_map + 2 * PAGE_SIZE + _KERNPG_TABLE + .quad level1_vsyscall_pgt - __START_KERNEL_map + _KERNPG_TABLE + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */ + .fill 4,8,0 + +NEXT_PAGE(level1_ident_pgt) + .fill 512,8,0 + +NEXT_PAGE(level1_modules_pgt) + .fill 4*512,8,0 NEXT_PAGE(level1_fixmap_pgt) + .fill 3*512,8,0 + +NEXT_PAGE(level1_vsyscall_pgt) .fill 512,8,0 #undef PMDS - .data + .align PAGE_SIZE +ENTRY(cpu_gdt_table) + .rept NR_CPUS + .quad 0x0000000000000000 /* NULL descriptor */ + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */ + .quad 0x00af9b000000ffff /* __KERNEL_CS */ + .quad 0x00cf93000000ffff /* __KERNEL_DS */ + .quad 0x00cffb000000ffff /* __USER32_CS */ + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */ + .quad 0x00affb000000ffff /* __USER_CS */ + +#ifdef CONFIG_PAX_KERNEXEC + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */ +#else + .quad 0x0 /* unused */ +#endif + + .quad 0,0 /* TSS */ + .quad 0,0 /* LDT */ + .quad 0,0,0 /* three TLS descriptors */ + .quad 0x0000f40000000000 /* node/CPU stored in limit */ + /* asm/segment.h:GDT_ENTRIES must match this */ + +#ifdef CONFIG_PAX_MEMORY_UDEREF + .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */ +#else + .quad 0x0 /* unused */ +#endif + + /* zero the remaining page */ + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0 + .endr + +EXPORT_SYMBOL_GPL(cpu_gdt_table) + .align 16 .globl early_gdt_descr early_gdt_descr: .word GDT_ENTRIES*8-1 early_gdt_descr_base: - .quad INIT_PER_CPU_VAR(gdt_page) + .quad cpu_gdt_table ENTRY(phys_base) /* This must match the first entry in level2_kernel_pgt */ @@ -490,8 +618,8 @@ ENTRY(phys_base) EXPORT_SYMBOL(phys_base) #include "../../x86/xen/xen-head.S" - - __PAGE_ALIGNED_BSS + + .section .rodata,"a",@progbits NEXT_PAGE(empty_zero_page) .skip PAGE_SIZE EXPORT_SYMBOL(empty_zero_page) diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 932348fbb..57fc0d495 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -136,7 +136,7 @@ int is_hpet_enabled(void) } EXPORT_SYMBOL_GPL(is_hpet_enabled); -static void _hpet_print_config(const char *function, int line) +static void __nocapture(1) _hpet_print_config(const char *function, int line) { u32 i, timers, l, h; printk(KERN_INFO "hpet: %s(%d):\n", function, line); @@ -593,7 +593,7 @@ static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu) #define RESERVE_TIMERS 0 #endif -static void hpet_msi_capability_lookup(unsigned int start_timer) +static __init void hpet_msi_capability_lookup(unsigned int start_timer) { unsigned int id; unsigned int num_timers; diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index be22f5a21..a04fa1406 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c @@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq) static void make_8259A_irq(unsigned int irq) { disable_irq_nosync(irq); - io_apic_irqs &= ~(1< #include #include +#include #include #include #include @@ -20,7 +21,7 @@ /* * this changes the io permissions bitmap in the current task. */ -asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) +SYSCALL_DEFINE3(ioperm, unsigned long, from, unsigned long, num, int, turn_on) { struct thread_struct *t = ¤t->thread; struct tss_struct *tss; @@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) return -EINVAL; if (turn_on && !capable(CAP_SYS_RAWIO)) return -EPERM; +#ifdef CONFIG_GRKERNSEC_IO + if (turn_on && grsec_disable_privio) { + gr_handle_ioperm(); + return -ENODEV; + } +#endif /* * If it's the first ioperm() call in this thread's lifetime, set the @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) * because the ->io_bitmap_max value must match the bitmap * contents: */ - tss = &per_cpu(cpu_tss, get_cpu()); + tss = cpu_tss + get_cpu(); if (turn_on) bitmap_clear(t->io_bitmap_ptr, from, num); @@ -110,6 +117,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level) if (level > old) { if (!capable(CAP_SYS_RAWIO)) return -EPERM; +#ifdef CONFIG_GRKERNSEC_IO + if (grsec_disable_privio) { + gr_handle_iopl(); + return -ENODEV; + } +#endif } regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << X86_EFLAGS_IOPL_BIT); diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 9f669fdd2..00354af57 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -28,7 +28,7 @@ EXPORT_PER_CPU_SYMBOL(irq_stat); DEFINE_PER_CPU(struct pt_regs *, irq_regs); EXPORT_PER_CPU_SYMBOL(irq_regs); -atomic_t irq_err_count; +atomic_unchecked_t irq_err_count; /* Function pointer for generic interrupt vector handling */ void (*x86_platform_ipi_callback)(void) = NULL; @@ -146,9 +146,9 @@ int arch_show_interrupts(struct seq_file *p, int prec) seq_puts(p, " Hypervisor callback interrupts\n"); } #endif - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count)); #if defined(CONFIG_X86_IO_APIC) - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count)); + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count)); #endif #ifdef CONFIG_HAVE_KVM seq_printf(p, "%*s: ", prec, "PIN"); @@ -200,7 +200,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu) u64 arch_irq_stat(void) { - u64 sum = atomic_read(&irq_err_count); + u64 sum = atomic_read_unchecked(&irq_err_count); return sum; } diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 1f38d9a4d..65b5e986f 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -22,6 +22,8 @@ #ifdef CONFIG_DEBUG_STACKOVERFLOW +extern void gr_handle_kernel_exploit(void); + int sysctl_panic_on_stackoverflow __read_mostly; /* Debugging check for stack overflow: is there less than 1KB free? */ @@ -32,29 +34,30 @@ static int check_stack_overflow(void) __asm__ __volatile__("andl %%esp,%0" : "=r" (sp) : "0" (THREAD_SIZE - 1)); - return sp < (sizeof(struct thread_info) + STACK_WARN); + return sp < STACK_WARN; } -static void print_stack_overflow(void) +static asmlinkage void print_stack_overflow(void) { printk(KERN_WARNING "low stack detected by irq handler\n"); dump_stack(); + gr_handle_kernel_exploit(); if (sysctl_panic_on_stackoverflow) panic("low stack detected by irq handler - check messages\n"); } #else static inline int check_stack_overflow(void) { return 0; } -static inline void print_stack_overflow(void) { } +static asmlinkage void print_stack_overflow(void) { } #endif DEFINE_PER_CPU(struct irq_stack *, hardirq_stack); DEFINE_PER_CPU(struct irq_stack *, softirq_stack); -static void call_on_stack(void *func, void *stack) +static void call_on_stack(void (asmlinkage *func)(void), void *stack) { asm volatile("xchgl %%ebx,%%esp \n" - "call *%%edi \n" + PAX_INDIRECT_CALL("*%%edi", "__do_softirq") "\n" "movl %%ebx,%%esp \n" : "=b" (stack) : "0" (stack), @@ -69,10 +72,9 @@ static inline void *current_stack(void) static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc) { - struct irq_stack *curstk, *irqstk; + struct irq_stack *irqstk; u32 *isp, *prev_esp, arg1; - curstk = (struct irq_stack *) current_stack(); irqstk = __this_cpu_read(hardirq_stack); /* @@ -81,25 +83,34 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc) * handler) we can't do that and just have to keep using the * current stack (which is the irq stack already after all) */ - if (unlikely(curstk == irqstk)) + if (unlikely((void *)current_stack_pointer - (void *)irqstk < THREAD_SIZE)) return 0; - isp = (u32 *) ((char *)irqstk + sizeof(*irqstk)); + isp = (u32 *) ((char *)irqstk + sizeof(*irqstk) - 8); /* Save the next esp at the bottom of the stack */ prev_esp = (u32 *)irqstk; *prev_esp = current_stack_pointer(); +#ifdef CONFIG_PAX_MEMORY_UDEREF + __set_fs(MAKE_MM_SEG(0)); +#endif + if (unlikely(overflow)) call_on_stack(print_stack_overflow, isp); asm volatile("xchgl %%ebx,%%esp \n" - "call *%%edi \n" + PAX_INDIRECT_CALL("*%%edi", "handle_bad_irq") "\n" "movl %%ebx,%%esp \n" : "=a" (arg1), "=b" (isp) : "0" (desc), "1" (isp), "D" (desc->handle_irq) : "memory", "cc", "ecx"); + +#ifdef CONFIG_PAX_MEMORY_UDEREF + __set_fs(current->thread.addr_limit); +#endif + return 1; } @@ -108,23 +119,11 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc) */ void irq_ctx_init(int cpu) { - struct irq_stack *irqstk; - if (per_cpu(hardirq_stack, cpu)) return; - irqstk = page_address(alloc_pages_node(cpu_to_node(cpu), - THREADINFO_GFP, - THREAD_SIZE_ORDER)); - per_cpu(hardirq_stack, cpu) = irqstk; - - irqstk = page_address(alloc_pages_node(cpu_to_node(cpu), - THREADINFO_GFP, - THREAD_SIZE_ORDER)); - per_cpu(softirq_stack, cpu) = irqstk; - - printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", - cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu)); + per_cpu(hardirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER)); + per_cpu(softirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER)); } void do_softirq_own_stack(void) @@ -141,7 +140,16 @@ void do_softirq_own_stack(void) prev_esp = (u32 *)irqstk; *prev_esp = current_stack_pointer(); +#ifdef CONFIG_PAX_MEMORY_UDEREF + __set_fs(MAKE_MM_SEG(0)); +#endif + call_on_stack(__do_softirq, isp); + +#ifdef CONFIG_PAX_MEMORY_UDEREF + __set_fs(current->thread.addr_limit); +#endif + } bool handle_irq(struct irq_desc *desc, struct pt_regs *regs) diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 9ebd0b0e7..685de4ae6 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -19,6 +19,8 @@ #include #include +extern void gr_handle_kernel_exploit(void); + int sysctl_panic_on_stackoverflow; /* @@ -44,9 +46,8 @@ static inline void stack_overflow_check(struct pt_regs *regs) regs->sp <= curbase + THREAD_SIZE) return; - irq_stack_top = (u64)this_cpu_ptr(irq_stack_union.irq_stack) + - STACK_TOP_MARGIN; irq_stack_bottom = (u64)__this_cpu_read(irq_stack_ptr); + irq_stack_top = irq_stack_bottom - IRQ_STACK_SIZE + STACK_TOP_MARGIN; if (regs->sp >= irq_stack_top && regs->sp <= irq_stack_bottom) return; @@ -61,6 +62,8 @@ static inline void stack_overflow_check(struct pt_regs *regs) irq_stack_top, irq_stack_bottom, estack_top, estack_bottom); + gr_handle_kernel_exploit(); + if (sysctl_panic_on_stackoverflow) panic("low stack detected by irq handler - check messages\n"); #endif diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c index fc25f698d..d31d60cc3 100644 --- a/arch/x86/kernel/jump_label.c +++ b/arch/x86/kernel/jump_label.c @@ -32,6 +32,8 @@ static void bug_at(unsigned char *ip, int line) * Something went wrong. Crash the box, as something could be * corrupting the kernel. */ + ip = (unsigned char *)ktla_ktva((unsigned long)ip); + pr_warning("Unexpected op at %pS [%p] %s:%d\n", ip, ip, __FILE__, line); pr_warning("Unexpected op at %pS [%p] (%02x %02x %02x %02x %02x) %s:%d\n", ip, ip, ip[0], ip[1], ip[2], ip[3], ip[4], __FILE__, line); BUG(); @@ -52,7 +54,7 @@ static void __jump_label_transform(struct jump_entry *entry, * Jump label is enabled for the first time. * So we expect a default_nop... */ - if (unlikely(memcmp((void *)entry->code, default_nop, 5) + if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0)) bug_at((void *)entry->code, __LINE__); } else { @@ -60,7 +62,7 @@ static void __jump_label_transform(struct jump_entry *entry, * ...otherwise expect an ideal_nop. Otherwise * something went horribly wrong. */ - if (unlikely(memcmp((void *)entry->code, ideal_nop, 5) + if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5) != 0)) bug_at((void *)entry->code, __LINE__); } @@ -76,13 +78,13 @@ static void __jump_label_transform(struct jump_entry *entry, * are converting the default nop to the ideal nop. */ if (init) { - if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0)) + if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0)) bug_at((void *)entry->code, __LINE__); } else { code.jump = 0xe9; code.offset = entry->target - (entry->code + JUMP_LABEL_NOP_SIZE); - if (unlikely(memcmp((void *)entry->code, &code, 5) != 0)) + if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0)) bug_at((void *)entry->code, __LINE__); } memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE); diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 8e36f2496..dd9064b75 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void) bp->attr.bp_addr = breakinfo[breakno].addr; bp->attr.bp_len = breakinfo[breakno].len; bp->attr.bp_type = breakinfo[breakno].type; - info->address = breakinfo[breakno].addr; + if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE) + info->address = ktla_ktva(breakinfo[breakno].addr); + else + info->address = breakinfo[breakno].addr; info->len = breakinfo[breakno].len; info->type = breakinfo[breakno].type; val = arch_install_hw_breakpoint(bp); @@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, case 'k': /* clear the trace bit */ linux_regs->flags &= ~X86_EFLAGS_TF; - atomic_set(&kgdb_cpu_doing_single_step, -1); + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1); /* set the trace bit if we're stepping */ if (remcomInBuffer[0] == 's') { linux_regs->flags |= X86_EFLAGS_TF; - atomic_set(&kgdb_cpu_doing_single_step, + atomic_set_unchecked(&kgdb_cpu_doing_single_step, raw_smp_processor_id()); } @@ -551,7 +554,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd) switch (cmd) { case DIE_DEBUG: - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) { if (user_mode(regs)) return single_step_cont(regs, args); break; @@ -754,11 +757,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) char opc[BREAK_INSTR_SIZE]; bpt->type = BP_BREAKPOINT; - err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr, + err = probe_kernel_read(bpt->saved_instr, (const void *)ktla_ktva(bpt->bpt_addr), BREAK_INSTR_SIZE); if (err) return err; - err = probe_kernel_write((char *)bpt->bpt_addr, + err = probe_kernel_write((void *)ktla_ktva(bpt->bpt_addr), arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); if (!err) return err; @@ -770,7 +773,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) return -EBUSY; text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); - err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); + err = probe_kernel_read(opc, (const void *)ktla_ktva(bpt->bpt_addr), BREAK_INSTR_SIZE); if (err) return err; if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE)) @@ -794,13 +797,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) if (mutex_is_locked(&text_mutex)) goto knl_write; text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE); - err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); + err = probe_kernel_read(opc, (const void *)ktla_ktva(bpt->bpt_addr), BREAK_INSTR_SIZE); if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE)) goto knl_write; return err; knl_write: - return probe_kernel_write((char *)bpt->bpt_addr, + return probe_kernel_write((void *)ktla_ktva(bpt->bpt_addr), (char *)bpt->saved_instr, BREAK_INSTR_SIZE); } diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index d9d8d16b6..139e5a04d 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -123,9 +123,12 @@ __synthesize_relative_insn(void *from, void *to, u8 op) s32 raddr; } __packed *insn; - insn = (struct __arch_relative_insn *)from; + insn = (struct __arch_relative_insn *)ktla_ktva((unsigned long)from); + + pax_open_kernel(); insn->raddr = (s32)((long)(to) - ((long)(from) + 5)); insn->op = op; + pax_close_kernel(); } /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ @@ -171,7 +174,7 @@ int can_boost(kprobe_opcode_t *opcodes) kprobe_opcode_t opcode; kprobe_opcode_t *orig_opcodes = opcodes; - if (search_exception_tables((unsigned long)opcodes)) + if (search_exception_tables(ktva_ktla((unsigned long)opcodes))) return 0; /* Page fault may occur on this address. */ retry: @@ -263,12 +266,12 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) * Fortunately, we know that the original code is the ideal 5-byte * long NOP. */ - memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); + memcpy(buf, (void *)ktla_ktva(addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); if (faddr) memcpy(buf, ideal_nops[NOP_ATOMIC5], 5); else buf[0] = kp->opcode; - return (unsigned long)buf; + return ktva_ktla((unsigned long)buf); } /* @@ -370,7 +373,9 @@ int __copy_instruction(u8 *dest, u8 *src) /* Another subsystem puts a breakpoint, failed to recover */ if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) return 0; + pax_open_kernel(); memcpy(dest, insn.kaddr, length); + pax_close_kernel(); #ifdef CONFIG_X86_64 if (insn_rip_relative(&insn)) { @@ -397,7 +402,9 @@ int __copy_instruction(u8 *dest, u8 *src) return 0; } disp = (u8 *) dest + insn_offset_displacement(&insn); + pax_open_kernel(); *(s32 *) disp = (s32) newdisp; + pax_close_kernel(); } #endif return length; @@ -512,6 +519,7 @@ static nokprobe_inline void restore_btf(void) } } +#ifdef CONFIG_KRETPROBES void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { unsigned long *sara = stack_addr(regs); @@ -522,6 +530,7 @@ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) *sara = (unsigned long) &kretprobe_trampoline; } NOKPROBE_SYMBOL(arch_prepare_kretprobe); +#endif static void setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, int reenter) @@ -539,7 +548,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs, * nor set current_kprobe, because it doesn't use single * stepping. */ - regs->ip = (unsigned long)p->ainsn.insn; + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn); preempt_enable_no_resched(); return; } @@ -556,9 +565,9 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs, regs->flags &= ~X86_EFLAGS_IF; /* single step inline if the instruction is an int3 */ if (p->opcode == BREAKPOINT_INSTRUCTION) - regs->ip = (unsigned long)p->addr; + regs->ip = ktla_ktva((unsigned long)p->addr); else - regs->ip = (unsigned long)p->ainsn.insn; + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn); } NOKPROBE_SYMBOL(setup_singlestep); @@ -643,7 +652,7 @@ int kprobe_int3_handler(struct pt_regs *regs) setup_singlestep(p, regs, kcb, 0); return 1; } - } else if (*addr != BREAKPOINT_INSTRUCTION) { + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) { /* * The breakpoint instruction was removed right * after we hit it. Another cpu has removed @@ -677,6 +686,9 @@ NOKPROBE_SYMBOL(kprobe_int3_handler); asm( ".global kretprobe_trampoline\n" ".type kretprobe_trampoline, @function\n" +#ifdef CONFIG_PAX_RAP + ".quad __rap_hash_ret_kretprobe_trampoline\n" +#endif "kretprobe_trampoline:\n" #ifdef CONFIG_X86_64 /* We don't bother saving the ss register */ @@ -684,16 +696,19 @@ asm( " pushfq\n" SAVE_REGS_STRING " movq %rsp, %rdi\n" - " call trampoline_handler\n" + " "PAX_DIRECT_CALL("trampoline_handler") "\n" /* Replace saved sp with true return address. */ " movq %rax, 152(%rsp)\n" RESTORE_REGS_STRING " popfq\n" +#ifdef KERNEXEC_PLUGIN + " btsq $63,(%rsp)\n" +#endif #else " pushf\n" SAVE_REGS_STRING " movl %esp, %eax\n" - " call trampoline_handler\n" + " "PAX_DIRECT_CALL("trampoline_handler") "\n" /* Move flags to cs */ " movl 56(%esp), %edx\n" " movl %edx, 52(%esp)\n" @@ -830,7 +845,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { unsigned long *tos = stack_addr(regs); - unsigned long copy_ip = (unsigned long)p->ainsn.insn; + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn); unsigned long orig_ip = (unsigned long)p->addr; kprobe_opcode_t *insn = p->ainsn.insn; @@ -1144,7 +1159,9 @@ int __init arch_init_kprobes(void) return 0; } +#ifdef CONFIG_KRETPROBES int arch_trampoline_kprobe(struct kprobe *p) { return 0; } +#endif diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 3bb4c5f02..657db4e4d 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -80,6 +80,7 @@ unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val) { + pax_open_kernel(); #ifdef CONFIG_X86_64 *addr++ = 0x48; *addr++ = 0xbf; @@ -87,6 +88,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val) *addr++ = 0xb8; #endif *(unsigned long *)addr = val; + pax_close_kernel(); } asm ( @@ -343,7 +345,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, * Verify if the address gap is in 2GB range, because this uses * a relative jump. */ - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE; + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE; if (abs(rel) > 0x7fffffff) { __arch_remove_optimized_kprobe(op, 0); return -ERANGE; @@ -360,16 +362,18 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, op->optinsn.size = ret; /* Copy arch-dep-instance from template */ - memcpy(buf, &optprobe_template_entry, TMPL_END_IDX); + pax_open_kernel(); + memcpy(buf, (u8 *)ktla_ktva((unsigned long)&optprobe_template_entry), TMPL_END_IDX); + pax_close_kernel(); /* Set probe information */ synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op); /* Set probe function call */ - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback); + synthesize_relcall((u8 *)ktva_ktla((unsigned long)buf) + TMPL_CALL_IDX, optimized_callback); /* Set returning jmp instruction at the tail of out-of-line buffer */ - synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size, + synthesize_reljump((u8 *)ktva_ktla((unsigned long)buf) + TMPL_END_IDX + op->optinsn.size, (u8 *)op->kp.addr + op->optinsn.size); flush_icache_range((unsigned long) buf, @@ -394,7 +398,7 @@ void arch_optimize_kprobes(struct list_head *oplist) WARN_ON(kprobe_disabled(&op->kp)); /* Backup instructions which will be replaced by jump address */ - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE, + memcpy(op->optinsn.copied_insn, (u8 *)ktla_ktva((unsigned long)op->kp.addr) + INT3_SIZE, RELATIVE_ADDR_SIZE); insn_buf[0] = RELATIVEJUMP_OPCODE; @@ -442,7 +446,7 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) /* This kprobe is really able to run optimized path. */ op = container_of(p, struct optimized_kprobe, kp); /* Detour through copied instructions */ - regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX; + regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX; if (!reenter) reset_current_kprobe(); preempt_enable_no_resched(); diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c index 4afc67f5f..f00d132fc 100644 --- a/arch/x86/kernel/ksysfs.c +++ b/arch/x86/kernel/ksysfs.c @@ -184,7 +184,7 @@ static ssize_t setup_data_data_read(struct file *fp, static struct kobj_attribute type_attr = __ATTR_RO(type); -static struct bin_attribute data_attr __ro_after_init = { +static bin_attribute_no_const data_attr __ro_after_init = { .attr = { .name = "data", .mode = S_IRUGO, diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index edbbfc854..d4d75ef1d 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -533,7 +533,7 @@ static uint32_t __init kvm_detect(void) return kvm_cpuid_base(); } -const struct hypervisor_x86 x86_hyper_kvm __refconst = { +const struct hypervisor_x86 x86_hyper_kvm = { .name = "KVM", .detect = kvm_detect, .x2apic_available = kvm_para_available, @@ -604,10 +604,12 @@ void __init kvm_spinlock_init(void) return; __pv_init_lock_hash(); + pax_open_kernel(); pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; - pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); + pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(queued_spin_unlock, __pv_queued_spin_unlock); pv_lock_ops.wait = kvm_wait; pv_lock_ops.kick = kvm_kick_cpu; + pax_close_kernel(); } static __init int kvm_spinlock_init_jump(void) diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 60b9949f1..5b52343fe 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -42,7 +42,7 @@ static int parse_no_kvmclock(char *arg) early_param("no-kvmclock", parse_no_kvmclock); /* The hypervisor will put information about time periodically here */ -static struct pvclock_vsyscall_time_info *hv_clock; +static struct pvclock_vsyscall_time_info hv_clock[NR_CPUS] __page_aligned_bss; static struct pvclock_wall_clock wall_clock; struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void) @@ -161,7 +161,7 @@ bool kvm_check_and_clear_guest_paused(void) struct pvclock_vcpu_time_info *src; int cpu = smp_processor_id(); - if (!hv_clock) + if (!kvmclock) return ret; src = &hv_clock[cpu].pvti; @@ -188,7 +188,7 @@ int kvm_register_clock(char *txt) int low, high, ret; struct pvclock_vcpu_time_info *src; - if (!hv_clock) + if (!kvmclock) return 0; src = &hv_clock[cpu].pvti; @@ -248,7 +248,6 @@ static void kvm_shutdown(void) void __init kvmclock_init(void) { struct pvclock_vcpu_time_info *vcpu_time; - unsigned long mem; int size, cpu; u8 flags; @@ -266,15 +265,8 @@ void __init kvmclock_init(void) printk(KERN_INFO "kvm-clock: Using msrs %x and %x", msr_kvm_system_time, msr_kvm_wall_clock); - mem = memblock_alloc(size, PAGE_SIZE); - if (!mem) - return; - hv_clock = __va(mem); - memset(hv_clock, 0, size); - if (kvm_register_clock("primary cpu clock")) { - hv_clock = NULL; - memblock_free(mem, size); + kvmclock = 0; return; } @@ -315,7 +307,7 @@ int __init kvm_setup_vsyscall_timeinfo(void) struct pvclock_vcpu_time_info *vcpu_time; unsigned int size; - if (!hv_clock) + if (!kvmclock) return 0; size = PAGE_ALIGN(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS); diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 6707039b9..cf6479123 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -11,16 +11,26 @@ #include #include #include +#include #include #include #include #include +#include #include #include #include #include +#ifdef CONFIG_GRKERNSEC +int sysctl_modify_ldt __read_only = 0; +#elif defined(CONFIG_DEFAULT_MODIFY_LDT_SYSCALL) +int sysctl_modify_ldt __read_only = 1; +#else +int sysctl_modify_ldt __read_only = 0; +#endif + /* context.lock is held for us, so we don't need any locking. */ static void flush_ldt(void *current_mm) { @@ -109,6 +119,23 @@ int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm) struct mm_struct *old_mm; int retval = 0; + if (tsk == current) { + mm->context.vdso = 0; + +#ifdef CONFIG_X86_32 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) + mm->context.user_cs_base = 0UL; + mm->context.user_cs_limit = ~0UL; + +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP) + cpumask_clear(&mm->context.cpu_user_cs_mask); +#endif + +#endif +#endif + + } + mutex_init(&mm->context.lock); old_mm = current->mm; if (!old_mm) { @@ -235,6 +262,14 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) /* The user wants to clear the entry. */ memset(&ldt, 0, sizeof(ldt)); } else { + +#ifdef CONFIG_PAX_SEGMEXEC + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) { + error = -EINVAL; + goto out; + } +#endif + if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { error = -EINVAL; goto out; @@ -271,11 +306,19 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) return error; } -asmlinkage int sys_modify_ldt(int func, void __user *ptr, - unsigned long bytecount) +SYSCALL_DEFINE3(modify_ldt, int, func, void __user *, ptr, unsigned long, bytecount) { int ret = -ENOSYS; + if (!sysctl_modify_ldt) { + printk_ratelimited(KERN_INFO + "Denied a call to modify_ldt() from %s[%d] (uid: %d)." + " Adjust sysctl if this was not an exploit attempt.\n", + current->comm, task_pid_nr(current), + from_kuid_munged(current_user_ns(), current_uid())); + return ret; + } + switch (func) { case 0: ret = read_ldt(ptr, bytecount); diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index 469b23d6a..5449cfed1 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c @@ -26,7 +26,7 @@ #include #include -static void set_idt(void *newidt, __u16 limit) +static void set_idt(struct desc_struct *newidt, __u16 limit) { struct desc_ptr curidt; @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit) } -static void set_gdt(void *newgdt, __u16 limit) +static void set_gdt(struct desc_struct *newgdt, __u16 limit) { struct desc_ptr curgdt; @@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image) } control_page = page_address(image->control_code_page); - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE); + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE); relocate_kernel_ptr = control_page; page_list[PA_CONTROL_PAGE] = __pa(control_page); diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S index 7b0d3da52..e4ff6df80 100644 --- a/arch/x86/kernel/mcount_64.S +++ b/arch/x86/kernel/mcount_64.S @@ -8,7 +8,7 @@ #include #include #include - +#include .code64 .section .entry.text, "ax" @@ -151,8 +151,8 @@ EXPORT_SYMBOL(mcount) #ifdef CONFIG_DYNAMIC_FTRACE ENTRY(function_hook) - retq -END(function_hook) + pax_ret mcount +ENDPROC(function_hook) ENTRY(ftrace_caller) /* save_mcount_regs fills in first two parameters */ @@ -165,8 +165,7 @@ GLOBAL(ftrace_caller_op_ptr) /* regs go into 4th parameter (but make it NULL) */ movq $0, %rcx -GLOBAL(ftrace_call) - call ftrace_stub + pax_direct_call_global ftrace_stub ftrace_call restore_mcount_regs @@ -182,13 +181,13 @@ GLOBAL(ftrace_epilogue) #ifdef CONFIG_FUNCTION_GRAPH_TRACER GLOBAL(ftrace_graph_call) - jmp ftrace_stub #endif + jmp ftrace_stub /* This is weak to keep gas from relaxing the jumps */ -WEAK(ftrace_stub) - retq -END(ftrace_caller) +RAP_WEAK(ftrace_stub) + pax_ret ftrace_stub +ENDPROC(ftrace_caller) ENTRY(ftrace_regs_caller) /* Save the current flags before any operations that can change them */ @@ -225,8 +224,7 @@ GLOBAL(ftrace_regs_caller_op_ptr) /* regs go into 4th parameter */ leaq (%rsp), %rcx -GLOBAL(ftrace_regs_call) - call ftrace_stub + pax_direct_call_global ftrace_stub ftrace_regs_call /* Copy flags back to SS, to restore them */ movq EFLAGS(%rsp), %rax @@ -259,7 +257,7 @@ GLOBAL(ftrace_regs_caller_end) jmp ftrace_epilogue -END(ftrace_regs_caller) +ENDPROC(ftrace_regs_caller) #else /* ! CONFIG_DYNAMIC_FTRACE */ @@ -278,7 +276,7 @@ fgraph_trace: #endif GLOBAL(ftrace_stub) - retq + pax_ret ftrace_stub trace: /* save_mcount_regs fills in first two parameters */ @@ -290,12 +288,13 @@ trace: * ip and parent ip are used and the list function is called when * function tracing is enabled. */ - call *ftrace_trace_function + pax_force_fptr ftrace_trace_function + pax_indirect_call "ftrace_trace_function", ftrace_stub restore_mcount_regs jmp fgraph_trace -END(function_hook) +ENDPROC(function_hook) #endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_FUNCTION_TRACER */ @@ -313,13 +312,14 @@ ENTRY(ftrace_graph_caller) /* ftrace does sanity checks against frame pointers */ movq (%rdx), %rdx #endif - call prepare_ftrace_return + pax_direct_call prepare_ftrace_return restore_mcount_regs - retq -END(ftrace_graph_caller) + pax_ret ftrace_graph_caller +ENDPROC(ftrace_graph_caller) + pax_retloc return_to_handler GLOBAL(return_to_handler) subq $24, %rsp @@ -328,11 +328,13 @@ GLOBAL(return_to_handler) movq %rdx, 8(%rsp) movq %rbp, %rdi - call ftrace_return_to_handler + pax_direct_call ftrace_return_to_handler movq %rax, %rdi movq 8(%rsp), %rdx movq (%rsp), %rax addq $24, %rsp + pax_force_fptr %rdi jmp *%rdi +ENDPROC(return_to_handler) #endif diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index 477ae806c..a280c6743 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -76,17 +76,17 @@ static unsigned long int get_module_load_offset(void) } #endif -void *module_alloc(unsigned long size) +static inline void *__module_alloc(unsigned long size, pgprot_t prot) { void *p; - if (PAGE_ALIGN(size) > MODULES_LEN) + if (!size || PAGE_ALIGN(size) > MODULES_LEN) return NULL; p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR + get_module_load_offset(), - MODULES_END, GFP_KERNEL | __GFP_HIGHMEM, - PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, + MODULES_END, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, + prot, 0, NUMA_NO_NODE, __builtin_return_address(0)); if (p && (kasan_module_alloc(p, size) < 0)) { vfree(p); @@ -96,6 +96,51 @@ void *module_alloc(unsigned long size) return p; } +void *module_alloc(unsigned long size) +{ + +#ifdef CONFIG_PAX_KERNEXEC + return __module_alloc(size, PAGE_KERNEL); +#else + return __module_alloc(size, PAGE_KERNEL_EXEC); +#endif + +} + +#ifdef CONFIG_PAX_KERNEXEC +#ifdef CONFIG_X86_32 +void *module_alloc_exec(unsigned long size) +{ + struct vm_struct *area; + + if (size == 0) + return NULL; + + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END); + return area ? area->addr : NULL; +} +EXPORT_SYMBOL(module_alloc_exec); + +void module_memfree_exec(void *module_region) +{ + vunmap(module_region); +} +EXPORT_SYMBOL(module_memfree_exec); +#else +void module_memfree_exec(void *module_region) +{ + module_memfree(module_region); +} +EXPORT_SYMBOL(module_memfree_exec); + +void *module_alloc_exec(unsigned long size) +{ + return __module_alloc(size, PAGE_KERNEL_RX); +} +EXPORT_SYMBOL(module_alloc_exec); +#endif +#endif + #ifdef CONFIG_X86_32 int apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, @@ -106,14 +151,16 @@ int apply_relocate(Elf32_Shdr *sechdrs, unsigned int i; Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr; Elf32_Sym *sym; - uint32_t *location; + uint32_t *plocation, location; DEBUGP("Applying relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info); for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* This is where to make the change */ - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr - + rel[i].r_offset; + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset; + location = (uint32_t)plocation; + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR) + plocation = (uint32_t *)ktla_ktva((unsigned long)plocation); /* This is the symbol it is referring to. Note that all undefined symbols have been resolved. */ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr @@ -122,11 +169,15 @@ int apply_relocate(Elf32_Shdr *sechdrs, switch (ELF32_R_TYPE(rel[i].r_info)) { case R_386_32: /* We add the value into the location given */ - *location += sym->st_value; + pax_open_kernel(); + *plocation += sym->st_value; + pax_close_kernel(); break; case R_386_PC32: /* Add the value, subtract its position */ - *location += sym->st_value - (uint32_t)location; + pax_open_kernel(); + *plocation += sym->st_value - location; + pax_close_kernel(); break; default: pr_err("%s: Unknown relocation: %u\n", @@ -171,21 +222,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, case R_X86_64_NONE: break; case R_X86_64_64: + pax_open_kernel(); *(u64 *)loc = val; + pax_close_kernel(); break; case R_X86_64_32: + pax_open_kernel(); *(u32 *)loc = val; + pax_close_kernel(); if (val != *(u32 *)loc) goto overflow; break; case R_X86_64_32S: + pax_open_kernel(); *(s32 *)loc = val; + pax_close_kernel(); if ((s64)val != *(s32 *)loc) goto overflow; break; case R_X86_64_PC32: val -= (u64)loc; + pax_open_kernel(); *(u32 *)loc = val; + pax_close_kernel(); + #if 0 if ((s64)val != *(s32 *)loc) goto overflow; diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index 7f3550acd..e53578342 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include @@ -83,6 +84,13 @@ static ssize_t msr_write(struct file *file, const char __user *buf, int err = 0; ssize_t bytes = 0; +#ifdef CONFIG_GRKERNSEC_KMEM + if (reg != MSR_IA32_ENERGY_PERF_BIAS) { + gr_handle_msr_write(); + return -EPERM; + } +#endif + if (count % 8) return -EINVAL; /* Invalid chunk size */ @@ -130,6 +138,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg) err = -EBADF; break; } +#ifdef CONFIG_GRKERNSEC_KMEM + gr_handle_msr_write(); + return -EPERM; +#endif if (copy_from_user(®s, uregs, sizeof regs)) { err = -EFAULT; break; @@ -213,7 +225,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb, return notifier_from_errno(err); } -static struct notifier_block __refdata msr_class_cpu_notifier = { +static struct notifier_block msr_class_cpu_notifier = { .notifier_call = msr_class_cpu_callback, }; diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index bfe4d6c96..1c3f03ca6 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -101,16 +101,16 @@ fs_initcall(nmi_warning_debugfs); static void nmi_max_handler(struct irq_work *w) { - struct nmiaction *a = container_of(w, struct nmiaction, irq_work); + struct nmiwork *n = container_of(w, struct nmiwork, irq_work); int remainder_ns, decimal_msecs; - u64 whole_msecs = ACCESS_ONCE(a->max_duration); + u64 whole_msecs = ACCESS_ONCE(n->max_duration); remainder_ns = do_div(whole_msecs, (1000 * 1000)); decimal_msecs = remainder_ns / 1000; printk_ratelimited(KERN_INFO "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n", - a->handler, whole_msecs, decimal_msecs); + n->action->handler, whole_msecs, decimal_msecs); } static int nmi_handle(unsigned int type, struct pt_regs *regs) @@ -137,11 +137,11 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs) delta = sched_clock() - delta; trace_nmi_handler(a->handler, (int)delta, thishandled); - if (delta < nmi_longest_ns || delta < a->max_duration) + if (delta < nmi_longest_ns || delta < a->work->max_duration) continue; - a->max_duration = delta; - irq_work_queue(&a->irq_work); + a->work->max_duration = delta; + irq_work_queue(&a->work->irq_work); } rcu_read_unlock(); @@ -151,7 +151,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs) } NOKPROBE_SYMBOL(nmi_handle); -int __register_nmi_handler(unsigned int type, struct nmiaction *action) +int __register_nmi_handler(unsigned int type, const struct nmiaction *action) { struct nmi_desc *desc = nmi_to_desc(type); unsigned long flags; @@ -159,7 +159,8 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action) if (!action->handler) return -EINVAL; - init_irq_work(&action->irq_work, nmi_max_handler); + action->work->action = action; + init_irq_work(&action->work->irq_work, nmi_max_handler); spin_lock_irqsave(&desc->lock, flags); @@ -177,9 +178,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action) * event confuses some handlers (kdump uses this flag) */ if (action->flags & NMI_FLAG_FIRST) - list_add_rcu(&action->list, &desc->head); + pax_list_add_rcu((struct list_head *)&action->list, &desc->head); else - list_add_tail_rcu(&action->list, &desc->head); + pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head); spin_unlock_irqrestore(&desc->lock, flags); return 0; @@ -202,7 +203,7 @@ void unregister_nmi_handler(unsigned int type, const char *name) if (!strcmp(n->name, name)) { WARN(in_nmi(), "Trying to free NMI (%s) from NMI context!\n", n->name); - list_del_rcu(&n->list); + pax_list_del_rcu((struct list_head *)&n->list); break; } } @@ -503,6 +504,17 @@ static DEFINE_PER_CPU(int, update_debug_stack); dotraplinkage notrace void do_nmi(struct pt_regs *regs, long error_code) { + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) + if (!user_mode(regs)) { + unsigned long cs = regs->cs & 0xFFFF; + unsigned long ip = ktva_ktla(regs->ip); + + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext) + regs->ip = ip; + } +#endif + if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { this_cpu_write(nmi_state, NMI_LATCHED); return; diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c index 6d9582ec0..f74628792 100644 --- a/arch/x86/kernel/nmi_selftest.c +++ b/arch/x86/kernel/nmi_selftest.c @@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void) { /* trap all the unknown NMIs we may generate */ register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk", - __initdata); + __initconst); } static void __init cleanup_nmi_testsuite(void) @@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask) unsigned long timeout; if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback, - NMI_FLAG_FIRST, "nmi_selftest", __initdata)) { + NMI_FLAG_FIRST, "nmi_selftest", __initconst)) { nmi_fail = FAILURE; return; } diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c index 2c55a003b..1528b2557 100644 --- a/arch/x86/kernel/paravirt-spinlocks.c +++ b/arch/x86/kernel/paravirt-spinlocks.c @@ -17,16 +17,26 @@ PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock); bool pv_is_native_spin_unlock(void) { - return pv_lock_ops.queued_spin_unlock.func == + return pv_lock_ops.queued_spin_unlock.queued_spin_unlock == __raw_callee_save___native_queued_spin_unlock; } -struct pv_lock_ops pv_lock_ops = { +#ifdef CONFIG_SMP +static void native_wait(u8 *ptr, u8 val) +{ +} + +static void native_kick(int cpu) +{ +} +#endif /* SMP */ + +struct pv_lock_ops pv_lock_ops __read_only = { #ifdef CONFIG_SMP .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath, - .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock), - .wait = paravirt_nop, - .kick = paravirt_nop, + .queued_spin_unlock = PV_CALLEE_SAVE(queued_spin_unlock, __native_queued_spin_unlock), + .wait = native_wait, + .kick = native_kick, #endif /* SMP */ }; EXPORT_SYMBOL(pv_lock_ops); diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index bbf3d5933..904b63737 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -65,6 +65,9 @@ u64 notrace _paravirt_ident_64(u64 x) { return x; } +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE) +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64); +#endif void __init default_banner(void) { @@ -75,17 +78,25 @@ void __init default_banner(void) /* Undefined instruction for dealing with missing ops pointers. */ static const unsigned char ud2a[] = { 0x0f, 0x0b }; -struct branch { +struct longbranch { unsigned char opcode; u32 delta; } __attribute__((packed)); +struct shortbranch { + unsigned char opcode; + signed char delta; +}; + unsigned paravirt_patch_call(void *insnbuf, const void *target, u16 tgt_clobbers, unsigned long addr, u16 site_clobbers, unsigned len) { - struct branch *b = insnbuf; + struct longbranch *b = insnbuf; +#ifdef CONFIG_PAX_RAP + struct shortbranch *hashb = insnbuf; +#endif unsigned long delta = (unsigned long)target - (addr+5); if (tgt_clobbers & ~site_clobbers) @@ -93,17 +104,29 @@ unsigned paravirt_patch_call(void *insnbuf, if (len < 5) return len; /* call too long for patch site */ +#ifdef CONFIG_PAX_RAP + if (hashb->opcode != 0xeb) + return len; + hashb->delta = len - sizeof(*b) - sizeof(*hashb); + b = insnbuf + len - sizeof(*b); + delta = (unsigned long)target - (addr + len); +#endif + b->opcode = 0xe8; /* call */ b->delta = delta; BUILD_BUG_ON(sizeof(*b) != 5); +#ifdef CONFIG_PAX_RAP + return len; +#else return 5; +#endif } unsigned paravirt_patch_jmp(void *insnbuf, const void *target, unsigned long addr, unsigned len) { - struct branch *b = insnbuf; + struct longbranch *b = insnbuf; unsigned long delta = (unsigned long)target - (addr+5); if (len < 5) @@ -132,6 +155,38 @@ static void *get_call_destination(u8 type) return *((void **)&tmpl + type); } +#if (defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)) || defined(CONFIG_PAX_RAP) +#if CONFIG_PGTABLE_LEVELS >= 3 +PV_CALLEE_SAVE_REGS_THUNK(native_pmd_val); +PV_CALLEE_SAVE_REGS_THUNK(native_make_pmd); +#if CONFIG_PGTABLE_LEVELS == 4 +PV_CALLEE_SAVE_REGS_THUNK(native_pud_val); +PV_CALLEE_SAVE_REGS_THUNK(native_make_pud); +#endif +#endif +PV_CALLEE_SAVE_REGS_THUNK(native_pte_val); +PV_CALLEE_SAVE_REGS_THUNK(native_pgd_val); +PV_CALLEE_SAVE_REGS_THUNK(native_make_pte); +PV_CALLEE_SAVE_REGS_THUNK(native_make_pgd); + +const struct pv_mmu_ops rap_pv_mmu_ops __initconst = { +#if CONFIG_PGTABLE_LEVELS >= 3 + .pmd_val = (union paravirt_callee_save) { .pmd_val = native_pmd_val }, + .make_pmd = (union paravirt_callee_save) { .make_pmd = native_make_pmd }, + +#if CONFIG_PGTABLE_LEVELS == 4 + .pud_val = (union paravirt_callee_save) { .pud_val = native_pud_val }, + .make_pud = (union paravirt_callee_save) { .make_pud = native_make_pud }, +#endif +#endif /* CONFIG_PGTABLE_LEVELS >= 3 */ + .pte_val = (union paravirt_callee_save) { .pte_val = native_pte_val }, + .pgd_val = (union paravirt_callee_save) { .pgd_val = native_pgd_val }, + + .make_pte = (union paravirt_callee_save) { .make_pte = native_make_pte }, + .make_pgd = (union paravirt_callee_save) { .make_pgd = native_make_pgd }, +}; +#endif + unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, unsigned long addr, unsigned len) { @@ -140,15 +195,49 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, if (opfunc == NULL) /* If there's no function, patch it with a ud2a (BUG) */ - ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a)); - else if (opfunc == _paravirt_nop) + ret = paravirt_patch_insns(insnbuf, len, (const char *)ktva_ktla((unsigned long)ud2a), ud2a+sizeof(ud2a)); + else if (opfunc == (void *)_paravirt_nop) ret = 0; /* identity functions just return their single argument */ - else if (opfunc == _paravirt_ident_32) +#ifdef CONFIG_PAX_RAP + else if ( +#if CONFIG_PGTABLE_LEVELS >= 3 + opfunc == (void *)__raw_callee_save_native_pmd_val || + opfunc == (void *)__raw_callee_save_native_make_pmd || +#if CONFIG_PGTABLE_LEVELS == 4 + opfunc == (void *)__raw_callee_save_native_pud_val || + opfunc == (void *)__raw_callee_save_native_make_pud || +#endif +#endif + opfunc == (void *)__raw_callee_save_native_pte_val || + opfunc == (void *)__raw_callee_save_native_pgd_val || + opfunc == (void *)__raw_callee_save_native_make_pte || + opfunc == (void *)__raw_callee_save_native_make_pgd) +#else + else if ( +#if CONFIG_PGTABLE_LEVELS >= 3 + opfunc == (void *)native_pmd_val || + opfunc == (void *)native_make_pmd || +#if CONFIG_PGTABLE_LEVELS == 4 + opfunc == (void *)native_pud_val || + opfunc == (void *)native_make_pud || +#endif +#endif + opfunc == (void *)native_pte_val || + opfunc == (void *)native_pgd_val || + opfunc == (void *)native_make_pte || + opfunc == (void *)native_make_pgd) +#endif +#ifdef CONFIG_X86_32 +#ifdef CONFIG_X86_PAE + ret = paravirt_patch_ident_64(insnbuf, len); +#else ret = paravirt_patch_ident_32(insnbuf, len); - else if (opfunc == _paravirt_ident_64) +#endif +#else ret = paravirt_patch_ident_64(insnbuf, len); +#endif else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64)) @@ -171,7 +260,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len, if (insn_len > len || start == NULL) insn_len = len; else - memcpy(insnbuf, start, insn_len); + memcpy(insnbuf, (const char *)ktla_ktva((unsigned long)start), insn_len); return insn_len; } @@ -293,7 +382,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void) return this_cpu_read(paravirt_lazy_mode); } -struct pv_info pv_info = { +struct pv_info pv_info __read_only = { .name = "bare hardware", .kernel_rpl = 0, .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */ @@ -303,20 +392,35 @@ struct pv_info pv_info = { #endif }; -struct pv_init_ops pv_init_ops = { +struct pv_init_ops pv_init_ops __read_only = { .patch = native_patch, }; -struct pv_time_ops pv_time_ops = { +struct pv_time_ops pv_time_ops __read_only = { .sched_clock = native_sched_clock, .steal_clock = native_steal_clock, }; -__visible struct pv_irq_ops pv_irq_ops = { - .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl), - .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl), - .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable), - .irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable), + +#ifdef CONFIG_PAX_RAP +PV_CALLEE_SAVE_REGS_THUNK(native_save_fl); +PV_CALLEE_SAVE_REGS_THUNK(native_restore_fl); +PV_CALLEE_SAVE_REGS_THUNK(native_irq_disable); +PV_CALLEE_SAVE_REGS_THUNK(native_irq_enable); + +const struct pv_irq_ops rap_pv_irq_ops __initconst = { + .save_fl = (union paravirt_callee_save) { .save_fl = native_save_fl }, + .restore_fl = (union paravirt_callee_save) { .restore_fl = native_restore_fl }, + .irq_disable = (union paravirt_callee_save) { .irq_disable = native_irq_disable }, + .irq_enable = (union paravirt_callee_save) { .irq_enable = native_irq_enable }, +}; +#endif + +__visible struct pv_irq_ops pv_irq_ops __read_only = { + .save_fl = __PV_IS_CALLEE_SAVE(save_fl, native_save_fl), + .restore_fl = __PV_IS_CALLEE_SAVE(restore_fl, native_restore_fl), + .irq_disable = __PV_IS_CALLEE_SAVE(irq_disable, native_irq_disable), + .irq_enable = __PV_IS_CALLEE_SAVE(irq_enable, native_irq_enable), .safe_halt = native_safe_halt, .halt = native_halt, #ifdef CONFIG_X86_64 @@ -324,7 +428,23 @@ __visible struct pv_irq_ops pv_irq_ops = { #endif }; -__visible struct pv_cpu_ops pv_cpu_ops = { +static void native_alloc_ldt(struct desc_struct *ldt, unsigned entries) +{ +} + +static void native_free_ldt(struct desc_struct *ldt, unsigned entries) +{ +} + +static void native_start_context_switch(struct task_struct *prev) +{ +} + +static void native_end_context_switch(struct task_struct *next) +{ +} + +__visible struct pv_cpu_ops pv_cpu_ops __read_only = { .cpuid = native_cpuid, .get_debugreg = native_get_debugreg, .set_debugreg = native_set_debugreg, @@ -357,8 +477,8 @@ __visible struct pv_cpu_ops pv_cpu_ops = { .write_gdt_entry = native_write_gdt_entry, .write_idt_entry = native_write_idt_entry, - .alloc_ldt = paravirt_nop, - .free_ldt = paravirt_nop, + .alloc_ldt = native_alloc_ldt, + .free_ldt = native_free_ldt, .load_sp0 = native_load_sp0, @@ -371,8 +491,8 @@ __visible struct pv_cpu_ops pv_cpu_ops = { .set_iopl_mask = native_set_iopl_mask, .io_delay = native_io_delay, - .start_context_switch = paravirt_nop, - .end_context_switch = paravirt_nop, + .start_context_switch = native_start_context_switch, + .end_context_switch = native_end_context_switch, }; /* At this point, native_get/set_debugreg has real function entries */ @@ -380,14 +500,63 @@ NOKPROBE_SYMBOL(native_get_debugreg); NOKPROBE_SYMBOL(native_set_debugreg); NOKPROBE_SYMBOL(native_load_idt); -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE) +#ifdef CONFIG_X86_32 +#ifdef CONFIG_X86_PAE +/* 64-bit pagetable entries */ +#define PTE_IDENT(field, op) PV_CALLEE_SAVE(field, op) +#else /* 32-bit pagetable entries */ -#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32) +#define PTE_IDENT(field, op) __PV_IS_CALLEE_SAVE(field, op) +#endif #else /* 64-bit pagetable entries */ -#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64) +#define PTE_IDENT(field, op) __PV_IS_CALLEE_SAVE(field, op) #endif +static void native_pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ +} + +static void native_alloc_pte(struct mm_struct *mm, unsigned long pfn) +{ +} + +static void native_alloc_pmd(struct mm_struct *mm, unsigned long pfn) +{ +} + +static void native_alloc_pud(struct mm_struct *mm, unsigned long pfn) +{ +} + +static void native_release_pte(unsigned long pfn) +{ +} + +static void native_release_pmd(unsigned long pfn) +{ +} + +static void native_release_pud(unsigned long pfn) +{ +} + +static void native_pte_update(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ +} + +static void native_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) +{ +} + +static void native_exit_mmap(struct mm_struct *mm) +{ +} + +static void native_activate_mm(struct mm_struct *prev, struct mm_struct *next) +{ +} + struct pv_mmu_ops pv_mmu_ops __ro_after_init = { .read_cr2 = native_read_cr2, @@ -401,20 +570,20 @@ struct pv_mmu_ops pv_mmu_ops __ro_after_init = { .flush_tlb_others = native_flush_tlb_others, .pgd_alloc = __paravirt_pgd_alloc, - .pgd_free = paravirt_nop, + .pgd_free = native_pgd_free, - .alloc_pte = paravirt_nop, - .alloc_pmd = paravirt_nop, - .alloc_pud = paravirt_nop, - .release_pte = paravirt_nop, - .release_pmd = paravirt_nop, - .release_pud = paravirt_nop, + .alloc_pte = native_alloc_pte, + .alloc_pmd = native_alloc_pmd, + .alloc_pud = native_alloc_pud, + .release_pte = native_release_pte, + .release_pmd = native_release_pmd, + .release_pud = native_release_pud, .set_pte = native_set_pte, .set_pte_at = native_set_pte_at, .set_pmd = native_set_pmd, .set_pmd_at = native_set_pmd_at, - .pte_update = paravirt_nop, + .pte_update = native_pte_update, .ptep_modify_prot_start = __ptep_modify_prot_start, .ptep_modify_prot_commit = __ptep_modify_prot_commit, @@ -427,26 +596,27 @@ struct pv_mmu_ops pv_mmu_ops __ro_after_init = { #endif .set_pud = native_set_pud, - .pmd_val = PTE_IDENT, - .make_pmd = PTE_IDENT, + .pmd_val = PTE_IDENT(pmd_val, native_pmd_val), + .make_pmd = PTE_IDENT(make_pmd, native_make_pmd), #if CONFIG_PGTABLE_LEVELS == 4 - .pud_val = PTE_IDENT, - .make_pud = PTE_IDENT, + .pud_val = PTE_IDENT(pud_val, native_pud_val), + .make_pud = PTE_IDENT(make_pud, native_make_pud), .set_pgd = native_set_pgd, + .set_pgd_batched = native_set_pgd_batched, #endif #endif /* CONFIG_PGTABLE_LEVELS >= 3 */ - .pte_val = PTE_IDENT, - .pgd_val = PTE_IDENT, + .pte_val = PTE_IDENT(pte_val, native_pte_val), + .pgd_val = PTE_IDENT(pgd_val, native_pgd_val), - .make_pte = PTE_IDENT, - .make_pgd = PTE_IDENT, + .make_pte = PTE_IDENT(make_pte, native_make_pte), + .make_pgd = PTE_IDENT(make_pgd, native_make_pgd), - .dup_mmap = paravirt_nop, - .exit_mmap = paravirt_nop, - .activate_mm = paravirt_nop, + .dup_mmap = native_dup_mmap, + .exit_mmap = native_exit_mmap, + .activate_mm = native_activate_mm, .lazy_mode = { .enter = paravirt_nop, @@ -455,6 +625,12 @@ struct pv_mmu_ops pv_mmu_ops __ro_after_init = { }, .set_fixmap = native_set_fixmap, + +#ifdef CONFIG_PAX_KERNEXEC + .pax_open_kernel = native_pax_open_kernel, + .pax_close_kernel = native_pax_close_kernel, +#endif + }; EXPORT_SYMBOL_GPL(pv_time_ops); diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c index bb3840ced..2fe40f3d9 100644 --- a/arch/x86/kernel/paravirt_patch_64.c +++ b/arch/x86/kernel/paravirt_patch_64.c @@ -9,7 +9,11 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax"); DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax"); DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax"); DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3"); + +#ifndef CONFIG_PAX_MEMORY_UDEREF DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)"); +#endif + DEF_NATIVE(pv_cpu_ops, clts, "clts"); DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd"); @@ -59,7 +63,11 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, PATCH_SITE(pv_mmu_ops, read_cr3); PATCH_SITE(pv_mmu_ops, write_cr3); PATCH_SITE(pv_cpu_ops, clts); + +#ifndef CONFIG_PAX_MEMORY_UDEREF PATCH_SITE(pv_mmu_ops, flush_tlb_single); +#endif + PATCH_SITE(pv_cpu_ops, wbinvd); #if defined(CONFIG_PARAVIRT_SPINLOCKS) case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock): diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index 5d400ba13..eaad6f6a0 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c @@ -1347,7 +1347,7 @@ static void __init get_tce_space_from_tar(void) tce_space = be64_to_cpu(readq(target)); tce_space = tce_space & TAR_SW_BITS; - tce_space = tce_space & (~specified_table_size); + tce_space = tce_space & (~(unsigned long)specified_table_size); info->tce_space = (u64 *)__va(tce_space); } } diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c index f712dfdf1..0172a75cf 100644 --- a/arch/x86/kernel/pci-iommu_table.c +++ b/arch/x86/kernel/pci-iommu_table.c @@ -2,7 +2,7 @@ #include #include #include - +#include #define DEBUG 1 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 8e10e72bf..fdd36da1f 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -41,7 +42,8 @@ * section. Since TSS's are completely CPU-local, we want them * on exact cacheline boundaries, to eliminate cacheline ping-pong. */ -__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = { +struct tss_struct cpu_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { + [0 ... NR_CPUS-1] = { .x86_tss = { .sp0 = TOP_OF_INIT_STACK, #ifdef CONFIG_X86_32 @@ -62,8 +64,9 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = { #ifdef CONFIG_X86_32 .SYSENTER_stack_canary = STACK_END_MAGIC, #endif +} }; -EXPORT_PER_CPU_SYMBOL(cpu_tss); +EXPORT_SYMBOL(cpu_tss); #ifdef CONFIG_X86_64 static DEFINE_PER_CPU(unsigned char, is_idle); @@ -82,13 +85,26 @@ void idle_notifier_unregister(struct notifier_block *n) EXPORT_SYMBOL_GPL(idle_notifier_unregister); #endif +struct kmem_cache *fpregs_state_cachep; +EXPORT_SYMBOL(fpregs_state_cachep); + +void __init arch_task_cache_init(void) +{ + /* create a slab on which task_structs can be allocated */ + fpregs_state_cachep = + kmem_cache_create_usercopy("fpregs_state", fpu_kernel_xstate_size, + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, 0, fpu_kernel_xstate_size, NULL); +} + /* * this gets called so that we can store lazy state into memory and copy the * current task into the new thread. */ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { - memcpy(dst, src, arch_task_struct_size); + *dst = *src; + dst->thread.fpu.state = kmem_cache_alloc_node(fpregs_state_cachep, GFP_KERNEL, tsk_fork_get_node(src)); + memcpy(dst->thread.fpu.state, src->thread.fpu.state, fpu_kernel_xstate_size); #ifdef CONFIG_VM86 dst->thread.vm86 = NULL; #endif @@ -96,6 +112,12 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) return fpu__copy(&dst->thread.fpu, &src->thread.fpu); } +void arch_release_task_struct(struct task_struct *tsk) +{ + kmem_cache_free(fpregs_state_cachep, tsk->thread.fpu.state); + tsk->thread.fpu.state = NULL; +} + /* * Free current thread data structures etc.. */ @@ -106,7 +128,7 @@ void exit_thread(struct task_struct *tsk) struct fpu *fpu = &t->fpu; if (bp) { - struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu()); + struct tss_struct *tss = cpu_tss + get_cpu(); t->io_bitmap_ptr = NULL; clear_thread_flag(TIF_IO_BITMAP); @@ -128,6 +150,9 @@ void flush_thread(void) { struct task_struct *tsk = current; +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF) + loadsegment(gs, 0); +#endif flush_ptrace_hw_breakpoint(tsk); memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); @@ -269,7 +294,7 @@ static void __exit_idle(void) void exit_idle(void) { /* idle loop has pid 0 */ - if (current->pid) + if (task_pid_nr(current)) return; __exit_idle(); } @@ -322,7 +347,7 @@ bool xen_set_default_idle(void) return ret; } #endif -void stop_this_cpu(void *dummy) +__noreturn void stop_this_cpu(void *dummy) { local_irq_disable(); /* @@ -499,13 +524,6 @@ static int __init idle_setup(char *str) } early_param("idle", idle_setup); -unsigned long arch_align_stack(unsigned long sp) -{ - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) - sp -= get_random_int() % 8192; - return sp & ~0xf; -} - unsigned long arch_randomize_brk(struct mm_struct *mm) { return randomize_page(mm->brk, 0x02000000); @@ -583,3 +601,35 @@ unsigned long get_wchan(struct task_struct *p) put_task_stack(p); return ret; } + +#ifdef CONFIG_PAX_RANDKSTACK +void pax_randomize_kstack(struct pt_regs *regs) +{ + struct thread_struct *thread = ¤t->thread; + unsigned long time; + + if (!randomize_va_space) + return; + + if (v8086_mode(regs)) + return; + + time = rdtsc(); + + /* P4 seems to return a 0 LSB, ignore it */ +#ifdef CONFIG_MPENTIUM4 + time &= 0x3EUL; + time <<= 2; +#elif defined(CONFIG_X86_64) + time &= 0xFUL; + time <<= 4; +#else + time &= 0x1FUL; + time <<= 3; +#endif + + thread->sp0 ^= time; + load_sp0(cpu_tss + smp_processor_id(), thread); + this_cpu_write(cpu_current_top_of_stack, thread->sp0); +} +#endif diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index bd7be8efd..631798b1a 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -65,16 +65,15 @@ void __show_regs(struct pt_regs *regs, int all) if (user_mode(regs)) { sp = regs->sp; ss = regs->ss & 0xffff; - gs = get_user_gs(regs); } else { sp = kernel_stack_pointer(regs); savesegment(ss, ss); - savesegment(gs, gs); } + gs = get_user_gs(regs); printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", (u16)regs->cs, regs->ip, regs->flags, - smp_processor_id()); + raw_smp_processor_id()); print_symbol("EIP is at %s\n", regs->ip); printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", @@ -121,7 +120,7 @@ void release_thread(struct task_struct *dead_task) int copy_thread_tls(unsigned long clone_flags, unsigned long sp, unsigned long arg, struct task_struct *p, unsigned long tls) { - struct pt_regs *childregs = task_pt_regs(p); + struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8; struct fork_frame *fork_frame = container_of(childregs, struct fork_frame, regs); struct inactive_task_frame *frame = &fork_frame->frame; struct task_struct *tsk; @@ -131,6 +130,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp, frame->ret_addr = (unsigned long) ret_from_fork; p->thread.sp = (unsigned long) fork_frame; p->thread.sp0 = (unsigned long) (childregs+1); + p->thread.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long); memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); if (unlikely(p->flags & PF_KTHREAD)) { @@ -231,7 +231,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) struct fpu *prev_fpu = &prev->fpu; struct fpu *next_fpu = &next->fpu; int cpu = smp_processor_id(); - struct tss_struct *tss = &per_cpu(cpu_tss, cpu); + struct tss_struct *tss = cpu_tss + cpu; fpu_switch_t fpu_switch; /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ @@ -250,6 +250,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) */ lazy_save_gs(prev->gs); +#ifdef CONFIG_PAX_MEMORY_UDEREF + __set_fs(next_p->thread.addr_limit); +#endif + /* * Load the per-thread Thread-Local Storage descriptor. */ @@ -285,9 +289,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) * current_thread_info(). */ load_sp0(tss, next); - this_cpu_write(cpu_current_top_of_stack, - (unsigned long)task_stack_page(next_p) + - THREAD_SIZE); + this_cpu_write(current_task, next_p); + this_cpu_write(cpu_current_top_of_stack, next->sp0); /* * Restore %gs if needed (which is common) @@ -297,7 +300,5 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) switch_fpu_finish(next_fpu, fpu_switch); - this_cpu_write(current_task, next_p); - return prev_p; } diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index b3760b3c1..cb95ed8c6 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -145,13 +146,14 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp, struct inactive_task_frame *frame; struct task_struct *me = current; - p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE; + p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16; childregs = task_pt_regs(p); fork_frame = container_of(childregs, struct fork_frame, regs); frame = &fork_frame->frame; frame->bp = 0; frame->ret_addr = (unsigned long) ret_from_fork; p->thread.sp = (unsigned long) fork_frame; + p->thread.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long); p->thread.io_bitmap_ptr = NULL; savesegment(gs, p->thread.gsindex); @@ -160,13 +162,15 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp, p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase; savesegment(es, p->thread.es); savesegment(ds, p->thread.ds); + savesegment(ss, p->thread.ss); + BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS); memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); if (unlikely(p->flags & PF_KTHREAD)) { /* kernel thread */ memset(childregs, 0, sizeof(struct pt_regs)); frame->bx = sp; /* function */ - frame->r12 = arg; + frame->r13 = arg; return 0; } frame->bx = 0; @@ -263,7 +267,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) struct fpu *prev_fpu = &prev->fpu; struct fpu *next_fpu = &next->fpu; int cpu = smp_processor_id(); - struct tss_struct *tss = &per_cpu(cpu_tss, cpu); + struct tss_struct *tss = cpu_tss + cpu; unsigned prev_fsindex, prev_gsindex; fpu_switch_t fpu_switch; @@ -314,6 +318,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) if (unlikely(next->ds | prev->ds)) loadsegment(ds, next->ds); + savesegment(ss, prev->ss); + if (unlikely(next->ss != prev->ss)) + loadsegment(ss, next->ss); + /* * Switch FS and GS. * @@ -427,6 +435,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) /* Reload esp0 and ss1. This changes current_thread_info(). */ load_sp0(tss, next); + this_cpu_write(cpu_current_top_of_stack, next->sp0); + /* * Now maybe reload the debug registers and handle I/O bitmaps */ @@ -612,7 +622,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) return ret; } -long sys_arch_prctl(int code, unsigned long addr) +SYSCALL_DEFINE2(arch_prctl, int, code, unsigned long, addr) { return do_arch_prctl(current, code, addr); } diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 0e63c0267..9ee59fb1c 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -169,7 +169,7 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs) unsigned long sp = (unsigned long)®s->sp; u32 *prev_esp; - if (context == (sp & ~(THREAD_SIZE - 1))) + if (context == ((sp + 8) & ~(THREAD_SIZE - 1))) return sp; prev_esp = (u32 *)(context); @@ -411,6 +411,20 @@ static int putreg(struct task_struct *child, if (child->thread.gsbase != value) return do_arch_prctl(child, ARCH_SET_GS, value); return 0; + + case offsetof(struct user_regs_struct,ip): + /* + * Protect against any attempt to set ip to an + * impossible address. There are dragons lurking if the + * address is noncanonical. (This explicitly allows + * setting ip to TASK_SIZE_MAX, because user code can do + * that all by itself by running off the end of its + * address space. + */ + if (value > TASK_SIZE_MAX) + return -EIO; + break; + #endif } @@ -533,7 +547,7 @@ static void ptrace_triggered(struct perf_event *bp, static unsigned long ptrace_get_dr7(struct perf_event *bp[]) { int i; - int dr7 = 0; + unsigned long dr7 = 0; struct arch_hw_breakpoint *info; for (i = 0; i < HBP_NUM; i++) { @@ -767,7 +781,7 @@ long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { int ret; - unsigned long __user *datap = (unsigned long __user *)data; + unsigned long __user *datap = (__force unsigned long __user *)data; switch (request) { /* read the word at location addr in the USER area. */ @@ -852,14 +866,14 @@ long arch_ptrace(struct task_struct *child, long request, if ((int) addr < 0) return -EIO; ret = do_get_thread_area(child, addr, - (struct user_desc __user *)data); + (__force struct user_desc __user *) data); break; case PTRACE_SET_THREAD_AREA: if ((int) addr < 0) return -EIO; ret = do_set_thread_area(child, addr, - (struct user_desc __user *)data, 0); + (__force struct user_desc __user *) data, 0); break; #endif @@ -1250,7 +1264,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, #ifdef CONFIG_X86_64 -static struct user_regset x86_64_regsets[] __ro_after_init = { +static user_regset_no_const x86_64_regsets[] __ro_after_init = { [REGSET_GENERAL] = { .core_note_type = NT_PRSTATUS, .n = sizeof(struct user_regs_struct) / sizeof(long), @@ -1291,7 +1305,7 @@ static const struct user_regset_view user_x86_64_view = { #endif /* CONFIG_X86_64 */ #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION -static struct user_regset x86_32_regsets[] __ro_after_init = { +static user_regset_no_const x86_32_regsets[] __ro_after_init = { [REGSET_GENERAL] = { .core_note_type = NT_PRSTATUS, .n = sizeof(struct user_regs_struct32) / sizeof(u32), @@ -1379,7 +1393,7 @@ static void fill_sigtrap_info(struct task_struct *tsk, memset(info, 0, sizeof(*info)); info->si_signo = SIGTRAP; info->si_code = si_code; - info->si_addr = user_mode(regs) ? (void __user *)regs->ip : NULL; + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL; } void user_single_step_siginfo(struct task_struct *tsk, diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c index 5b2cc889c..bdbf72f23 100644 --- a/arch/x86/kernel/pvclock.c +++ b/arch/x86/kernel/pvclock.c @@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void) reset_hung_task_detector(); } -static atomic64_t last_value = ATOMIC64_INIT(0); +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0); void pvclock_resume(void) { - atomic64_set(&last_value, 0); + atomic64_set_unchecked(&last_value, 0); } u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src) @@ -107,11 +107,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) * updating at the same time, and one of them could be slightly behind, * making the assumption that last_value always go forward fail to hold. */ - last = atomic64_read(&last_value); + last = atomic64_read_unchecked(&last_value); do { if (ret < last) return last; - last = atomic64_cmpxchg(&last_value, last, ret); + last = atomic64_cmpxchg_unchecked(&last_value, last, ret); } while (unlikely(last != ret)); return ret; diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index e244c19a2..2cf3a2667 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -83,6 +83,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d) void __noreturn machine_real_restart(unsigned int type) { + +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)) + struct desc_struct *gdt; +#endif + local_irq_disable(); /* @@ -110,7 +115,29 @@ void __noreturn machine_real_restart(unsigned int type) /* Jump to the identity-mapped low memory code */ #ifdef CONFIG_X86_32 - asm volatile("jmpl *%0" : : + +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) + gdt = get_cpu_gdt_table(smp_processor_id()); + pax_open_kernel(); +#ifdef CONFIG_PAX_MEMORY_UDEREF + gdt[GDT_ENTRY_KERNEL_DS].type = 3; + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf; + loadsegment(ds, __KERNEL_DS); + loadsegment(es, __KERNEL_DS); + loadsegment(ss, __KERNEL_DS); +#endif +#ifdef CONFIG_PAX_KERNEXEC + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0; + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0; + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0; + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff; + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf; + gdt[GDT_ENTRY_KERNEL_CS].g = 1; +#endif + pax_close_kernel(); +#endif + + asm volatile("ljmpl *%0" : : "rm" (real_mode_header->machine_real_restart_asm), "a" (type)); #else @@ -150,7 +177,7 @@ static int __init set_kbd_reboot(const struct dmi_system_id *d) /* * This is a single dmi_table handling all reboot quirks. */ -static struct dmi_system_id __initdata reboot_dmi_table[] = { +static const struct dmi_system_id __initconst reboot_dmi_table[] = { /* Acer */ { /* Handle reboot issue on Acer Aspire one */ @@ -540,7 +567,7 @@ void __attribute__((weak)) mach_reboot_fixups(void) * This means that this function can never return, it can misbehave * by not rebooting properly and hanging. */ -static void native_machine_emergency_restart(void) +static void __noreturn native_machine_emergency_restart(void) { int i; int attempt = 0; @@ -669,13 +696,13 @@ void native_machine_shutdown(void) #endif } -static void __machine_emergency_restart(int emergency) +static void __noreturn __machine_emergency_restart(int emergency) { reboot_emergency = emergency; machine_ops.emergency_restart(); } -static void native_machine_restart(char *__unused) +static void __noreturn native_machine_restart(char *__unused) { pr_notice("machine restart\n"); @@ -684,7 +711,7 @@ static void native_machine_restart(char *__unused) __machine_emergency_restart(0); } -static void native_machine_halt(void) +static void __noreturn native_machine_halt(void) { /* Stop other cpus and apics */ machine_shutdown(); @@ -694,7 +721,7 @@ static void native_machine_halt(void) stop_this_cpu(NULL); } -static void native_machine_power_off(void) +static void __noreturn native_machine_power_off(void) { if (pm_power_off) { if (!reboot_force) @@ -703,6 +730,7 @@ static void native_machine_power_off(void) } /* A fallback in case there is no PM info available */ tboot_shutdown(TB_SHUTDOWN_HALT); + unreachable(); } struct machine_ops machine_ops __ro_after_init = { diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c index c8e41e90f..64049ef37 100644 --- a/arch/x86/kernel/reboot_fixups_32.c +++ b/arch/x86/kernel/reboot_fixups_32.c @@ -57,7 +57,7 @@ struct device_fixup { unsigned int vendor; unsigned int device; void (*reboot_fixup)(struct pci_dev *); -}; +} __do_const; /* * PCI ids solely used for fixups_table go here diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index 98111b38e..73ca12504 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S @@ -96,8 +96,7 @@ relocate_kernel: /* jump to identity mapped page */ addq $(identity_mapped - relocate_kernel), %r8 - pushq %r8 - ret + jmp *%r8 identity_mapped: /* set return address to 0 if not preserving context */ diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 9c337b0e8..16b315ad7 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -114,6 +114,7 @@ #include #include #include +#include /* * max_low_pfn_mapped: highest direct mapped pfn under 4GB @@ -178,7 +179,7 @@ struct cpuinfo_x86 new_cpu_data = { .wp_works_ok = -1, }; /* common cpu data for all cpus */ -struct cpuinfo_x86 boot_cpu_data __read_mostly = { +struct cpuinfo_x86 boot_cpu_data __read_only = { .wp_works_ok = -1, }; EXPORT_SYMBOL(boot_cpu_data); @@ -202,17 +203,19 @@ struct ist_info ist_info; #endif #else -struct cpuinfo_x86 boot_cpu_data __read_mostly = { +struct cpuinfo_x86 boot_cpu_data __read_only = { .x86_phys_bits = MAX_PHYSMEM_BITS, }; EXPORT_SYMBOL(boot_cpu_data); #endif -#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64) -__visible unsigned long mmu_cr4_features __ro_after_init; -#else +#ifdef CONFIG_X86_64 +__visible unsigned long mmu_cr4_features __ro_after_init = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE; +#elif defined(CONFIG_X86_PAE) __visible unsigned long mmu_cr4_features __ro_after_init = X86_CR4_PAE; +#else +__visible unsigned long mmu_cr4_features __ro_after_init; #endif /* Boot loader ID and version as integers, for the benefit of proc_dointvec */ @@ -761,7 +764,7 @@ static void __init trim_bios_range(void) * area (640->1Mb) as ram even though it is not. * take them out. */ - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1); + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1); sanitize_e820_map(e820->map, ARRAY_SIZE(e820->map), &e820->nr_map); } @@ -769,7 +772,7 @@ static void __init trim_bios_range(void) /* called before trim_bios_range() to spare extra sanitize */ static void __init e820_add_kernel_range(void) { - u64 start = __pa_symbol(_text); + u64 start = __pa_symbol(ktla_ktva((unsigned long)_text)); u64 size = __pa_symbol(_end) - start; /* @@ -850,8 +853,8 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) void __init setup_arch(char **cmdline_p) { - memblock_reserve(__pa_symbol(_text), - (unsigned long)__bss_stop - (unsigned long)_text); + memblock_reserve(__pa_symbol(ktla_ktva((unsigned long)_text)), + (unsigned long)__bss_stop - ktla_ktva((unsigned long)_text)); early_reserve_initrd(); @@ -944,16 +947,16 @@ void __init setup_arch(char **cmdline_p) if (!boot_params.hdr.root_flags) root_mountflags &= ~MS_RDONLY; - init_mm.start_code = (unsigned long) _text; - init_mm.end_code = (unsigned long) _etext; - init_mm.end_data = (unsigned long) _edata; + init_mm.start_code = ktla_ktva((unsigned long)_text); + init_mm.end_code = ktla_ktva((unsigned long)_etext); + init_mm.end_data = (unsigned long)_edata; init_mm.brk = _brk_end; mpx_mm_init(&init_mm); - code_resource.start = __pa_symbol(_text); - code_resource.end = __pa_symbol(_etext)-1; - data_resource.start = __pa_symbol(_etext); + code_resource.start = __pa_symbol(ktla_ktva((unsigned long)_text)); + code_resource.end = __pa_symbol(ktla_ktva((unsigned long)_etext))-1; + data_resource.start = __pa_symbol(_sdata); data_resource.end = __pa_symbol(_edata)-1; bss_resource.start = __pa_symbol(__bss_start); bss_resource.end = __pa_symbol(__bss_stop)-1; diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 2bbd27f89..99987a323 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -21,14 +21,12 @@ #include #include -DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number); +#ifdef CONFIG_SMP +DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number); EXPORT_PER_CPU_SYMBOL(cpu_number); +#endif -#ifdef CONFIG_X86_64 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load) -#else -#define BOOT_PERCPU_OFFSET 0 -#endif DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET; EXPORT_PER_CPU_SYMBOL(this_cpu_off); @@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void) { #ifdef CONFIG_NEED_MULTIPLE_NODES pg_data_t *last = NULL; - unsigned int cpu; + int cpu; for_each_possible_cpu(cpu) { int node = early_cpu_to_node(cpu); @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu) { #ifdef CONFIG_X86_32 struct desc_struct gdt; + unsigned long base = per_cpu_offset(cpu); - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF, - 0x2 | DESCTYPE_S, 0x8); - gdt.s = 1; + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT, + 0x83 | DESCTYPE_S, 0xC); write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S); #endif @@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void) /* alrighty, percpu areas up and running */ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; for_each_possible_cpu(cpu) { +#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_X86_32 + unsigned long canary = per_cpu(stack_canary.canary, cpu); +#endif +#endif per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); per_cpu(cpu_number, cpu) = cpu; @@ -244,7 +247,7 @@ void __init setup_per_cpu_areas(void) early_per_cpu_map(x86_cpu_to_logical_apicid, cpu); #endif #ifdef CONFIG_X86_64 - per_cpu(irq_stack_ptr, cpu) = + per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack_ptr_lowmem, cpu) = per_cpu(irq_stack_union.irq_stack, cpu) + IRQ_STACK_SIZE; #endif @@ -261,6 +264,12 @@ void __init setup_per_cpu_areas(void) */ set_cpu_numa_node(cpu, early_cpu_to_node(cpu)); #endif +#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_X86_32 + if (!cpu) + per_cpu(stack_canary.canary, cpu) = canary; +#endif +#endif /* * Up to this point, the boot CPU has been using .init.data * area. Reload any changed state for the boot CPU. diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 763af1d0d..178b2d690 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -227,7 +228,7 @@ static unsigned long align_sigframe(unsigned long sp) * Align the stack pointer according to the i386 ABI, * i.e. so that on function entry ((sp + 4) & 15) == 0. */ - sp = ((sp + 4) & -16ul) - 4; + sp = ((sp - 12) & -16ul) - 4; #else /* !CONFIG_X86_32 */ sp = round_down(sp, 16) - 8; #endif @@ -335,10 +336,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set, } if (current->mm->context.vdso) - restorer = current->mm->context.vdso + - vdso_image_32.sym___kernel_sigreturn; + restorer = (void __force_user *)(current->mm->context.vdso + vdso_image_32.sym___kernel_sigreturn); else - restorer = &frame->retcode; + restorer = frame->retcode; if (ksig->ka.sa.sa_flags & SA_RESTORER) restorer = ksig->ka.sa.sa_restorer; @@ -352,7 +352,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set, * reasons and because gdb uses it as a signature to notice * signal handler stack frames. */ - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode); + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode); if (err) return -EFAULT; @@ -399,8 +399,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig, save_altstack_ex(&frame->uc.uc_stack, regs->sp); /* Set up to return from userspace. */ - restorer = current->mm->context.vdso + - vdso_image_32.sym___kernel_rt_sigreturn; + if (current->mm->context.vdso) + restorer = (void __force_user *)(current->mm->context.vdso + vdso_image_32.sym___kernel_rt_sigreturn); + else + restorer = (void __user *)&frame->retcode; if (ksig->ka.sa.sa_flags & SA_RESTORER) restorer = ksig->ka.sa.sa_restorer; put_user_ex(restorer, &frame->pretcode); @@ -412,7 +414,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig, * reasons and because gdb uses it as a signature to notice * signal handler stack frames. */ - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode); + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode); } put_user_catch(err); err |= copy_siginfo_to_user(&frame->info, &ksig->info); @@ -599,7 +601,7 @@ static int x32_setup_rt_frame(struct ksignal *ksig, * Do a signal return; undo the signal stack. */ #ifdef CONFIG_X86_32 -asmlinkage unsigned long sys_sigreturn(void) +SYSCALL_DEFINE0(sigreturn) { struct pt_regs *regs = current_pt_regs(); struct sigframe __user *frame; @@ -631,7 +633,7 @@ asmlinkage unsigned long sys_sigreturn(void) } #endif /* CONFIG_X86_32 */ -asmlinkage long sys_rt_sigreturn(void) +SYSCALL_DEFINE0(rt_sigreturn) { struct pt_regs *regs = current_pt_regs(); struct rt_sigframe __user *frame; @@ -853,7 +855,7 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where) } #ifdef CONFIG_X86_X32_ABI -asmlinkage long sys32_x32_rt_sigreturn(void) +SYS32_SYSCALL_DEFINE0(x32_rt_sigreturn) { struct pt_regs *regs = current_pt_regs(); struct rt_sigframe_x32 __user *frame; diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index c00cb64bc..73f6b30f9 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -340,7 +340,7 @@ static int __init nonmi_ipi_setup(char *str) __setup("nonmi_ipi", nonmi_ipi_setup); -struct smp_ops smp_ops = { +struct smp_ops smp_ops __read_only = { .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, .smp_prepare_cpus = native_smp_prepare_cpus, .smp_cpus_done = native_smp_cpus_done, diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 36171bcd9..e32a454dc 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -94,7 +94,7 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map); DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); /* Per CPU bogomips and other parameters */ -DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); +DEFINE_PER_CPU_READ_ONLY(struct cpuinfo_x86, cpu_info); EXPORT_PER_CPU_SYMBOL(cpu_info); /* Logical package management. We might want to allocate that dynamically */ @@ -224,14 +224,17 @@ static void notrace start_secondary(void *unused) enable_start_cpu0 = 0; -#ifdef CONFIG_X86_32 + /* otherwise gcc will move up smp_processor_id before the cpu_init */ + barrier(); + /* switch away from the initial page table */ +#ifdef CONFIG_PAX_PER_CPU_PGD + load_cr3(get_cpu_pgd(smp_processor_id(), kernel)); +#else load_cr3(swapper_pg_dir); - __flush_tlb_all(); #endif + __flush_tlb_all(); - /* otherwise gcc will move up smp_processor_id before the cpu_init */ - barrier(); /* * Check TSC synchronization with the BP: */ @@ -931,13 +934,11 @@ void common_cpu_up(unsigned int cpu, struct task_struct *idle) per_cpu(current_task, cpu) = idle; #ifdef CONFIG_X86_32 - /* Stack for startup_32 can be just as for start_secondary onwards */ irq_ctx_init(cpu); - per_cpu(cpu_current_top_of_stack, cpu) = - (unsigned long)task_stack_page(idle) + THREAD_SIZE; #else initial_gs = per_cpu_offset(cpu); #endif + per_cpu(cpu_current_top_of_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE; } /* @@ -958,9 +959,11 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) unsigned long timeout; idle->thread.sp = (unsigned long) (((struct pt_regs *) - (THREAD_SIZE + task_stack_page(idle))) - 1); + (THREAD_SIZE - 16 + task_stack_page(idle))) - 1); + pax_open_kernel(); early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); + pax_close_kernel(); initial_code = (unsigned long)start_secondary; initial_stack = idle->thread.sp; @@ -1108,6 +1111,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle) common_cpu_up(cpu, tidle); +#ifdef CONFIG_PAX_PER_CPU_PGD + clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY, + swapper_pg_dir + KERNEL_PGD_BOUNDARY, + KERNEL_PGD_PTRS); + clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY, + swapper_pg_dir + KERNEL_PGD_BOUNDARY, + KERNEL_PGD_PTRS); +#endif + err = do_boot_cpu(apicid, cpu, tidle); if (err) { pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu); diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c index a23ce84a3..88541d527 100644 --- a/arch/x86/kernel/step.c +++ b/arch/x86/kernel/step.c @@ -45,7 +45,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re addr += base; } mutex_unlock(&child->mm->context.lock); - } + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS) + addr = ktla_ktva(addr); #endif return addr; @@ -57,6 +58,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs) unsigned char opcode[15]; unsigned long addr = convert_ip_to_linear(child, regs); + if (addr == -1L) + return 0; + copied = access_process_vm(child, addr, opcode, sizeof(opcode), FOLL_FORCE); for (i = 0; i < copied; i++) { diff --git b/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c new file mode 100644 index 000000000..920e41328 --- /dev/null +++ b/arch/x86/kernel/sys_i386_32.c @@ -0,0 +1,189 @@ +/* + * This file contains various random system calls that + * have a non-standard calling sequence on the Linux/i386 + * platform. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) +{ + unsigned long pax_task_size = TASK_SIZE; + +#ifdef CONFIG_PAX_SEGMEXEC + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) + pax_task_size = SEGMEXEC_TASK_SIZE; +#endif + + if (flags & MAP_FIXED) + if (len > pax_task_size || addr > pax_task_size - len) + return -EINVAL; + + return 0; +} + +/* + * Align a virtual address to avoid aliasing in the I$ on AMD F15h. + */ +static unsigned long get_align_mask(void) +{ + if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32)) + return 0; + + if (!(current->flags & PF_RANDOMIZE)) + return 0; + + return va_align.mask; +} + +unsigned long +arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + unsigned long pax_task_size = TASK_SIZE; + struct vm_unmapped_area_info info; + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) + pax_task_size = SEGMEXEC_TASK_SIZE; +#endif + + pax_task_size -= PAGE_SIZE; + + if (len > pax_task_size) + return -ENOMEM; + + if (flags & MAP_FIXED) + return addr; + +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + + if (addr) { + addr = PAGE_ALIGN(addr); + if (pax_task_size - len >= addr) { + vma = find_vma(mm, addr); + if (check_heap_stack_gap(vma, addr, len, offset)) + return addr; + } + } + + info.flags = 0; + info.length = len; + info.align_mask = filp ? get_align_mask() : 0; + info.align_offset = pgoff << PAGE_SHIFT; + info.threadstack_offset = offset; + +#ifdef CONFIG_PAX_PAGEEXEC + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) { + info.low_limit = 0x00110000UL; + info.high_limit = mm->start_code; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + info.low_limit += mm->delta_mmap & 0x03FFF000UL; +#endif + + if (info.low_limit < info.high_limit) { + addr = vm_unmapped_area(&info); + if (!IS_ERR_VALUE(addr)) + return addr; + } + } else +#endif + + info.low_limit = mm->mmap_base; + info.high_limit = pax_task_size; + + return vm_unmapped_area(&info); +} + +unsigned long +arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr0, + unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; + unsigned long addr = addr0, pax_task_size = TASK_SIZE; + struct vm_unmapped_area_info info; + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) + pax_task_size = SEGMEXEC_TASK_SIZE; +#endif + + pax_task_size -= PAGE_SIZE; + + /* requested length too big for entire address space */ + if (len > pax_task_size) + return -ENOMEM; + + if (flags & MAP_FIXED) + return addr; + +#ifdef CONFIG_PAX_PAGEEXEC + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) + goto bottomup; +#endif + +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + + /* requesting a specific address */ + if (addr) { + addr = PAGE_ALIGN(addr); + if (pax_task_size - len >= addr) { + vma = find_vma(mm, addr); + if (check_heap_stack_gap(vma, addr, len, offset)) + return addr; + } + } + + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; + info.low_limit = PAGE_SIZE; + info.high_limit = mm->mmap_base; + info.align_mask = filp ? get_align_mask() : 0; + info.align_offset = pgoff << PAGE_SHIFT; + info.threadstack_offset = offset; + + addr = vm_unmapped_area(&info); + if (!(addr & ~PAGE_MASK)) + return addr; + VM_BUG_ON(addr != -ENOMEM); + +bottomup: + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + return arch_get_unmapped_area(filp, addr0, len, pgoff, flags); +} diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index a55ed63b9..665be0a8f 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -97,8 +97,8 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, return error; } -static void find_start_end(unsigned long flags, unsigned long *begin, - unsigned long *end) +static void find_start_end(struct mm_struct *mm, unsigned long flags, + unsigned long *begin, unsigned long *end) { if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) { /* This is usually used needed to map code in small @@ -114,7 +114,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin, *begin = randomize_page(*begin, 0x02000000); } } else { - *begin = current->mm->mmap_legacy_base; + *begin = mm->mmap_legacy_base; *end = TASK_SIZE; } } @@ -127,20 +127,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, struct vm_area_struct *vma; struct vm_unmapped_area_info info; unsigned long begin, end; + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); if (flags & MAP_FIXED) return addr; - find_start_end(flags, &begin, &end); + find_start_end(mm, flags, &begin, &end); if (len > end) return -ENOMEM; +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (end - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) return addr; } @@ -154,18 +158,20 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, info.align_mask = get_align_mask(); info.align_offset += get_align_bits(); } + info.threadstack_offset = offset; return vm_unmapped_area(&info); } unsigned long -arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - const unsigned long len, const unsigned long pgoff, - const unsigned long flags) +arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr0, + unsigned long len, unsigned long pgoff, + unsigned long flags) { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long addr = addr0; struct vm_unmapped_area_info info; + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); /* requested length too big for entire address space */ if (len > TASK_SIZE) @@ -178,12 +184,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) goto bottomup; +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) return addr; } @@ -197,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, info.align_mask = get_align_mask(); info.align_offset += get_align_bits(); } + info.threadstack_offset = offset; addr = vm_unmapped_area(&info); if (!(addr & ~PAGE_MASK)) return addr; diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c index 840290782..b0e4a7244 100644 --- a/arch/x86/kernel/tboot.c +++ b/arch/x86/kernel/tboot.c @@ -44,6 +44,7 @@ #include #include #include +#include #include "../realmode/rm/wakeup.h" @@ -145,6 +146,10 @@ static int map_tboot_pages(unsigned long vaddr, unsigned long start_pfn, if (!tboot_pg_dir) return -1; + clone_pgd_range(tboot_pg_dir + KERNEL_PGD_BOUNDARY, + swapper_pg_dir + KERNEL_PGD_BOUNDARY, + KERNEL_PGD_PTRS); + for (; nr > 0; nr--, vaddr += PAGE_SIZE, start_pfn++) { if (map_tboot_page(vaddr, start_pfn, PAGE_KERNEL_EXEC)) return -1; @@ -215,8 +220,6 @@ static int tboot_setup_sleep(void) void tboot_shutdown(u32 shutdown_type) { - void (*shutdown)(void); - if (!tboot_enabled()) return; @@ -236,9 +239,12 @@ void tboot_shutdown(u32 shutdown_type) tboot->shutdown_type = shutdown_type; switch_to_tboot_pt(); + __write_cr4(__read_cr4() & ~X86_CR4_PCIDE); - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry; - shutdown(); + /* + * PaX: can't be a C indirect function call due to KERNEXEC + */ + asm volatile("jmp *%0" : : "r"((unsigned long)tboot->shutdown_entry)); /* should not reach here */ while (1) @@ -304,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b) return -ENODEV; } -static atomic_t ap_wfs_count; +static atomic_unchecked_t ap_wfs_count; static int tboot_wait_for_aps(int num_aps) { @@ -325,9 +331,9 @@ static int tboot_wait_for_aps(int num_aps) static int tboot_dying_cpu(unsigned int cpu) { - atomic_inc(&ap_wfs_count); + atomic_inc_unchecked(&ap_wfs_count); if (num_online_cpus() == 1) { - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count))) + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count))) return -EBUSY; } return 0; @@ -407,7 +413,7 @@ static __init int tboot_late_init(void) tboot_create_trampoline(); - atomic_set(&ap_wfs_count, 0); + atomic_set_unchecked(&ap_wfs_count, 0); cpuhp_setup_state(CPUHP_AP_X86_TBOOT_DYING, "AP_X86_TBOOT_DYING", NULL, tboot_dying_cpu); #ifdef CONFIG_DEBUG_FS diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c index d39c09119..1df434906 100644 --- a/arch/x86/kernel/time.c +++ b/arch/x86/kernel/time.c @@ -32,7 +32,7 @@ unsigned long profile_pc(struct pt_regs *regs) if (!user_mode(regs) && in_lock_functions(pc)) { #ifdef CONFIG_FRAME_POINTER - return *(unsigned long *)(regs->bp + sizeof(long)); + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long))); #else unsigned long *sp = (unsigned long *)kernel_stack_pointer(regs); @@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs) * or above a saved flags. Eflags has bits 22-31 zero, * kernel addresses don't. */ + +#ifdef CONFIG_PAX_KERNEXEC + return ktla_ktva(sp[0]); +#else if (sp[0] >> 22) return sp[0]; if (sp[1] >> 22) return sp[1]; #endif + +#endif } return pc; } diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c index 9692a5e9f..aea9fa5f8 100644 --- a/arch/x86/kernel/tls.c +++ b/arch/x86/kernel/tls.c @@ -140,6 +140,11 @@ int do_set_thread_area(struct task_struct *p, int idx, if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return -EINVAL; +#ifdef CONFIG_PAX_SEGMEXEC + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE)) + return -EINVAL; +#endif + set_tls_desc(p, idx, &info, 1); /* @@ -298,7 +303,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset, if (kbuf) info = kbuf; - else if (__copy_from_user(infobuf, ubuf, count)) + else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count)) return -EFAULT; else info = infobuf; diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c index 1c113db9e..287b42e46 100644 --- a/arch/x86/kernel/tracepoint.c +++ b/arch/x86/kernel/tracepoint.c @@ -9,11 +9,11 @@ #include atomic_t trace_idt_ctr = ATOMIC_INIT(0); -struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1, +const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) trace_idt_table }; /* No need to be aligned, but done to keep all IDTs defined the same way. */ -gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss; +gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata; static int trace_irq_vector_refcount; static DEFINE_MUTEX(irq_vector_mutex); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index bd4e3d4d3..3e938e369 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -71,7 +71,7 @@ #include /* No need to be aligned, but done to keep all IDTs defined the same way. */ -gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss; +gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata; #else #include #include @@ -79,7 +79,7 @@ gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss; #endif /* Must be page-aligned because the real IDT is used in a fixmap. */ -gate_desc idt_table[NR_VECTORS] __page_aligned_bss; +gate_desc idt_table[NR_VECTORS] __page_aligned_rodata; DECLARE_BITMAP(used_vectors, NR_VECTORS); EXPORT_SYMBOL_GPL(used_vectors); @@ -169,7 +169,7 @@ void ist_end_non_atomic(void) } static nokprobe_inline int -do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, +do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str, struct pt_regs *regs, long error_code) { if (v8086_mode(regs)) { @@ -189,8 +189,32 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, if (!fixup_exception(regs, trapnr)) { tsk->thread.error_code = error_code; tsk->thread.trap_nr = trapnr; + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) + if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)) + str = "PAX: suspicious stack segment fault"; +#endif + +#ifdef CONFIG_PAX_RAP + if (trapnr == X86_RAP_CALL_VECTOR) { + str = "PAX: overwritten function pointer detected"; + regs->ip -= 2; // sizeof int $xx + } else if (trapnr == X86_RAP_RET_VECTOR) { + str = "PAX: overwritten return address detected"; + regs->ip -= 2; // sizeof int $xx + } +#endif + die(str, regs, error_code); } + +#ifdef CONFIG_PAX_REFCOUNT + if (trapnr == X86_REFCOUNT_VECTOR) { + regs->ip -= 2; // sizeof int $xx + pax_report_refcount_error(regs, str); + } +#endif + return 0; } @@ -229,7 +253,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr, } static void -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs, long error_code, siginfo_t *info) { struct task_struct *tsk = current; @@ -252,7 +276,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, if (show_unhandled_signals && unhandled_signal(tsk, signr) && printk_ratelimit()) { pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx", - tsk->comm, tsk->pid, str, + tsk->comm, task_pid_nr(tsk), str, regs->ip, regs->sp, error_code); print_vma_addr(" in ", regs->ip); pr_cont("\n"); @@ -262,7 +286,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, } NOKPROBE_SYMBOL(do_trap); -static void do_error_trap(struct pt_regs *regs, long error_code, char *str, +static void do_error_trap(struct pt_regs *regs, long error_code, const char *str, unsigned long trapnr, int signr) { siginfo_t info; @@ -292,7 +316,7 @@ DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) DO_ERROR(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check) -#ifdef CONFIG_VMAP_STACK +#if defined(CONFIG_VMAP_STACK) || defined(CONFIG_GRKERNSEC_KSTACKOVERFLOW) __visible void __noreturn handle_stack_overflow(const char *message, struct pt_regs *regs, unsigned long fault_address) @@ -307,13 +331,49 @@ __visible void __noreturn handle_stack_overflow(const char *message, } #endif +#ifdef CONFIG_PAX_REFCOUNT +extern char __refcount_overflow_start[], __refcount_overflow_end[]; +extern char __refcount64_overflow_start[], __refcount64_overflow_end[]; +extern char __refcount_underflow_start[], __refcount_underflow_end[]; +extern char __refcount64_underflow_start[], __refcount64_underflow_end[]; + +dotraplinkage void do_refcount_error(struct pt_regs *regs, long error_code) +{ + const char *str = NULL; + + BUG_ON(!(regs->flags & X86_EFLAGS_OF)); + +#define range_check(size, direction, type, value) \ + if ((unsigned long)__##size##_##direction##_start <= regs->ip && \ + regs->ip < (unsigned long)__##size##_##direction##_end) { \ + *(type *)regs->cx = value; \ + str = #size " " #direction; \ + } + + range_check(refcount, overflow, int, INT_MAX) + range_check(refcount64, overflow, long long, LLONG_MAX) + range_check(refcount, underflow, int, INT_MIN) + range_check(refcount64, underflow, long long, LLONG_MIN) + +#undef range_check + + BUG_ON(!str); + do_error_trap(regs, error_code, str, X86_REFCOUNT_VECTOR, SIGILL); +} +#endif + +#ifdef CONFIG_PAX_RAP +DO_ERROR(X86_RAP_CALL_VECTOR, SIGILL, "RAP call violation", rap_call_error) +DO_ERROR(X86_RAP_RET_VECTOR, SIGILL, "RAP return violation", rap_ret_error) +#endif + #ifdef CONFIG_X86_64 /* Runs on IST stack */ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) { static const char str[] = "double fault"; struct task_struct *tsk = current; -#ifdef CONFIG_VMAP_STACK +#if defined(CONFIG_VMAP_STACK) || defined(CONFIG_GRKERNSEC_KSTACKOVERFLOW) unsigned long cr2; #endif @@ -350,7 +410,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) tsk->thread.error_code = error_code; tsk->thread.trap_nr = X86_TRAP_DF; -#ifdef CONFIG_VMAP_STACK +#if defined(CONFIG_VMAP_STACK) || defined(CONFIG_GRKERNSEC_KSTACKOVERFLOW) /* * If we overflow the stack into a guard page, the CPU will fail * to deliver #PF and will send #DF instead. Similarly, if we @@ -390,8 +450,12 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) */ cr2 = read_cr2(); if ((unsigned long)task_stack_page(tsk) - 1 - cr2 < PAGE_SIZE) +#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW + handle_stack_overflow("grsec: kernel stack overflow detected", regs, cr2); +#else handle_stack_overflow("kernel stack overflow (double-fault)", regs, cr2); #endif +#endif #ifdef CONFIG_DOUBLEFAULT df_debug(regs, error_code); @@ -505,11 +569,35 @@ do_general_protection(struct pt_regs *regs, long error_code) tsk->thread.error_code = error_code; tsk->thread.trap_nr = X86_TRAP_GP; if (notify_die(DIE_GPF, "general protection fault", regs, error_code, - X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) + X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) { + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS) + die("PAX: suspicious general protection fault", regs, error_code); + else +#endif + die("general protection fault", regs, error_code); + } return; } +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) { + struct mm_struct *mm = tsk->mm; + unsigned long limit; + + down_write(&mm->mmap_sem); + limit = mm->context.user_cs_limit; + if (limit < TASK_SIZE) { + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC); + up_write(&mm->mmap_sem); + return; + } + up_write(&mm->mmap_sem); + } +#endif + tsk->thread.error_code = error_code; tsk->thread.trap_nr = X86_TRAP_GP; @@ -607,6 +695,9 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s) container_of(task_pt_regs(current), struct bad_iret_stack, regs); + if ((current->thread.sp0 ^ (unsigned long)s) < THREAD_SIZE) + new_stack = s; + /* Copy the IRET target to the new stack. */ memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8); @@ -778,7 +869,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code) * This is the most likely code path that involves non-trivial use * of the SYSENTER stack. Check that we haven't overrun it. */ - WARN(this_cpu_read(cpu_tss.SYSENTER_stack_canary) != STACK_END_MAGIC, + WARN(cpu_tss[raw_smp_processor_id()].SYSENTER_stack_canary != STACK_END_MAGIC, "Overran or corrupted SYSENTER stack\n"); #endif ist_exit(regs); @@ -908,7 +999,7 @@ void __init early_trap_init(void) * since we don't have trace_debug and it will be reset to * 'debug' in trap_init() by set_intr_gate_ist(). */ - set_intr_gate_notrace(X86_TRAP_DB, debug); + set_intr_gate_notrace(X86_TRAP_DB, int1); /* int3 can be called from all */ set_system_intr_gate(X86_TRAP_BP, &int3); #ifdef CONFIG_X86_32 @@ -975,6 +1066,19 @@ void __init trap_init(void) set_bit(IA32_SYSCALL_VECTOR, used_vectors); #endif +#ifdef CONFIG_PAX_REFCOUNT + set_intr_gate(X86_REFCOUNT_VECTOR, refcount_error); + set_bit(X86_REFCOUNT_VECTOR, used_vectors); +#endif + +#ifdef CONFIG_PAX_RAP + set_intr_gate(X86_RAP_CALL_VECTOR, rap_call_error); + set_bit(X86_RAP_CALL_VECTOR, used_vectors); + + set_intr_gate(X86_RAP_RET_VECTOR, rap_ret_error); + set_bit(X86_RAP_RET_VECTOR, used_vectors); +#endif + /* * Set the IDT descriptor to a fixed read-only location, so that the * "sidt" instruction will not leak the location of the kernel, and @@ -993,7 +1097,7 @@ void __init trap_init(void) * in early_trap_init(). However, ITS works only after * cpu_init() loads TSS. See comments in early_trap_init(). */ - set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK); + set_intr_gate_ist(X86_TRAP_DB, &int1, DEBUG_STACK); /* int3 can be called from all */ set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK); @@ -1001,7 +1105,7 @@ void __init trap_init(void) #ifdef CONFIG_X86_64 memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16); - set_nmi_gate(X86_TRAP_DB, &debug); + set_nmi_gate(X86_TRAP_DB, &int1); set_nmi_gate(X86_TRAP_BP, &int3); #endif } diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 46b2f41f8..666b35b84 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -24,6 +24,7 @@ #include #include #include +#include unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ EXPORT_SYMBOL(cpu_khz); @@ -158,7 +159,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data) */ smp_wmb(); - ACCESS_ONCE(c2n->head) = data; + ACCESS_ONCE_RW(c2n->head) = data; } /* @@ -289,7 +290,7 @@ static void set_cyc2ns_scale(unsigned long khz, int cpu) /* * Scheduler clock - returns current time in nanosec units. */ -u64 native_sched_clock(void) +unsigned long long native_sched_clock(void) { if (static_branch_likely(&__use_tsc)) { u64 tsc_now = rdtsc(); @@ -998,7 +999,9 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, } if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { + pax_open_kernel(); *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); + pax_close_kernel(); tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); if (!(freq->flags & CPUFREQ_CONST_LOOPS)) diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 495c776de..c0427ef8f 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -287,7 +287,7 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool { u32 volatile *good_insns; - insn_init(insn, auprobe->insn, sizeof(auprobe->insn), x86_64); + insn_init(insn, (void *)ktva_ktla((unsigned long)auprobe->insn), sizeof(auprobe->insn), x86_64); /* has the side-effect of processing the entire instruction */ insn_get_length(insn); if (WARN_ON_ONCE(!insn_complete(insn))) @@ -978,7 +978,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs if (nleft != rasize) { pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, " - "%%ip=%#lx\n", current->pid, regs->sp, regs->ip); + "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip); force_sig_info(SIGSEGV, SEND_SIG_FORCED, current); } diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S index 014ea59aa..03cfe40e4 100644 --- a/arch/x86/kernel/verify_cpu.S +++ b/arch/x86/kernel/verify_cpu.S @@ -20,6 +20,7 @@ * arch/x86/boot/compressed/head_64.S: Boot cpu verification * arch/x86/kernel/trampoline_64.S: secondary processor verification * arch/x86/kernel/head_32.S: processor startup + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume * * verify_cpu, returns the status of longmode and SSE in register %eax. * 0: Success 1: Failure diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 01f30e56f..a304a4c76 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -144,7 +144,7 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval) do_exit(SIGSEGV); } - tss = &per_cpu(cpu_tss, get_cpu()); + tss = cpu_tss + get_cpu(); tsk->thread.sp0 = vm86->saved_sp0; tsk->thread.sysenter_cs = __KERNEL_CS; load_sp0(tss, &tsk->thread); @@ -176,10 +176,8 @@ static void mark_screen_rdonly(struct mm_struct *mm) goto out; pmd = pmd_offset(pud, 0xA0000); - if (pmd_trans_huge(*pmd)) { - struct vm_area_struct *vma = find_vma(mm, 0xA0000); - split_huge_pmd(vma, pmd, 0xA0000); - } + if (pmd_trans_huge(*pmd)) + split_huge_pmd(find_vma(mm, 0xA0000), pmd, 0xA0000); if (pmd_none_or_clear_bad(pmd)) goto out; pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl); @@ -263,6 +261,13 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) return -EPERM; } +#ifdef CONFIG_GRKERNSEC_VM86 + if (!capable(CAP_SYS_RAWIO)) { + gr_handle_vm86(); + return -EPERM; + } +#endif + if (!vm86) { if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL))) return -ENOMEM; @@ -358,7 +363,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) vm86->saved_sp0 = tsk->thread.sp0; lazy_save_gs(vm86->regs32.gs); - tss = &per_cpu(cpu_tss, get_cpu()); + tss = cpu_tss + get_cpu(); /* make room for real-mode segments */ tsk->thread.sp0 += 16; @@ -538,7 +543,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i, goto cannot_handle; if (i == 0x21 && is_revectored(AH(regs), &vm86->int21_revectored)) goto cannot_handle; - intr_ptr = (unsigned long __user *) (i << 2); + intr_ptr = (unsigned long __force_user *) (i << 2); if (get_user(segoffs, intr_ptr)) goto cannot_handle; if ((segoffs >> 16) == BIOSSEG) @@ -831,6 +836,14 @@ static inline int get_and_reset_irq(int irqnumber) static int do_vm86_irq_handling(int subfunction, int irqnumber) { int ret; + +#ifdef CONFIG_GRKERNSEC_VM86 + if (!capable(CAP_SYS_RAWIO)) { + gr_handle_vm86(); + return -EPERM; + } +#endif + switch (subfunction) { case VM86_GET_AND_RESET_IRQ: { return get_and_reset_irq(irqnumber); diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index dbf67f64d..283643198 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -26,6 +26,13 @@ #include #include #include +#include + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR) +#else +#define __KERNEL_TEXT_OFFSET 0 +#endif #undef i386 /* in case the preprocessor is a 32bit one */ @@ -68,30 +75,44 @@ jiffies_64 = jiffies; PHDRS { text PT_LOAD FLAGS(5); /* R_E */ +#ifdef CONFIG_X86_32 + module PT_LOAD FLAGS(5); /* R_E */ +#endif +#ifdef CONFIG_XEN + rodata PT_LOAD FLAGS(5); /* R_E */ +#else + rodata PT_LOAD FLAGS(4); /* R__ */ +#endif data PT_LOAD FLAGS(6); /* RW_ */ -#ifdef CONFIG_X86_64 + init.begin PT_LOAD FLAGS(6); /* RW_ */ #ifdef CONFIG_SMP percpu PT_LOAD FLAGS(6); /* RW_ */ #endif - init PT_LOAD FLAGS(7); /* RWE */ -#endif + text.init PT_LOAD FLAGS(5); /* R_E */ + text.exit PT_LOAD FLAGS(5); /* R_E */ + init PT_LOAD FLAGS(6); /* RW_ */ note PT_NOTE FLAGS(0); /* ___ */ } SECTIONS { #ifdef CONFIG_X86_32 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR; - phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET); + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR; #else . = __START_KERNEL; - phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET); #endif /* Text and read-only data */ - .text : AT(ADDR(.text) - LOAD_OFFSET) { - _text = .; + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) { /* bootstrapping code */ +#ifdef CONFIG_X86_32 + phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET); + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET; +#else + phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET); + __LOAD_PHYSICAL_ADDR = ABSOLUTE(. - LOAD_OFFSET + __KERNEL_TEXT_OFFSET); +#endif + _text = .; HEAD_TEXT . = ALIGN(8); _stext = .; @@ -105,13 +126,35 @@ SECTIONS SOFTIRQENTRY_TEXT *(.fixup) *(.gnu.warning) - /* End of text section */ - _etext = .; } :text = 0x9090 - NOTES :text :note + . += __KERNEL_TEXT_OFFSET; - EXCEPTION_TABLE(16) :text = 0x9090 +#ifdef CONFIG_X86_32 + . = ALIGN(PAGE_SIZE); + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) { + +#ifdef CONFIG_PAX_KERNEXEC + MODULES_EXEC_VADDR = .; + BYTE(0) + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024); + . = ALIGN(HPAGE_SIZE) - 1; + MODULES_EXEC_END = .; +#endif + + } :module +#endif + + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) { + /* End of text section */ + BYTE(0) + _etext = . - __KERNEL_TEXT_OFFSET; + } + + . = ALIGN(PAGE_SIZE); + NOTES :rodata :note + + EXCEPTION_TABLE(16) :rodata /* .text should occupy whole number of pages */ . = ALIGN(PAGE_SIZE); @@ -121,16 +164,20 @@ SECTIONS /* Data */ .data : AT(ADDR(.data) - LOAD_OFFSET) { + +#ifdef CONFIG_PAX_KERNEXEC + . = ALIGN(HPAGE_SIZE); +#else + . = ALIGN(PAGE_SIZE); +#endif + /* Start of data section */ _sdata = .; /* init_task */ INIT_TASK_DATA(THREAD_SIZE) -#ifdef CONFIG_X86_32 - /* 32 bit has nosave before _edata */ NOSAVE_DATA -#endif PAGE_ALIGNED_DATA(PAGE_SIZE) @@ -173,12 +220,19 @@ SECTIONS . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE); /* Init code and data - will be freed after init */ - . = ALIGN(PAGE_SIZE); .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) { + BYTE(0) + +#ifdef CONFIG_PAX_KERNEXEC + . = ALIGN(HPAGE_SIZE); +#else + . = ALIGN(PAGE_SIZE); +#endif + __init_begin = .; /* paired with __init_end */ - } + } :init.begin -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP) +#ifdef CONFIG_SMP /* * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the * output PHDR, so the next output section - .init.text - should @@ -189,10 +243,13 @@ SECTIONS "per-CPU data too large - increase CONFIG_PHYSICAL_START") #endif - INIT_TEXT_SECTION(PAGE_SIZE) -#ifdef CONFIG_X86_64 - :init -#endif + . = ALIGN(PAGE_SIZE); + init_begin = .; + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) { + VMLINUX_SYMBOL(_sinittext) = .; + INIT_TEXT + . = ALIGN(PAGE_SIZE); + } :text.init /* * Section for code used exclusively before alternatives are run. All @@ -201,11 +258,29 @@ SECTIONS * * See static_cpu_has() for an example. */ - .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) { + .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) { *(.altinstr_aux) } - INIT_DATA_SECTION(16) + /* + * .exit.text is discard at runtime, not link time, to deal with + * references from .altinstructions and .eh_frame + */ + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) { + EXIT_TEXT + VMLINUX_SYMBOL(_einittext) = .; + +#ifdef CONFIG_PAX_KERNEXEC + . = ALIGN(HPAGE_SIZE); +#else + . = ALIGN(16); +#endif + + } :text.exit + . = init_begin + SIZEOF(.init.text) + SIZEOF(.altinstr_aux) + SIZEOF(.exit.text); + + . = ALIGN(PAGE_SIZE); + INIT_DATA_SECTION(16) :init .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { __x86_cpu_dev_start = .; @@ -276,19 +351,12 @@ SECTIONS } . = ALIGN(8); - /* - * .exit.text is discard at runtime, not link time, to deal with - * references from .altinstructions and .eh_frame - */ - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { - EXIT_TEXT - } .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { EXIT_DATA } -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) +#ifndef CONFIG_SMP PERCPU_SECTION(INTERNODE_CACHE_BYTES) #endif @@ -307,16 +375,10 @@ SECTIONS .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { __smp_locks = .; *(.smp_locks) - . = ALIGN(PAGE_SIZE); __smp_locks_end = .; + . = ALIGN(PAGE_SIZE); } -#ifdef CONFIG_X86_64 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { - NOSAVE_DATA - } -#endif - /* BSS */ . = ALIGN(PAGE_SIZE); .bss : AT(ADDR(.bss) - LOAD_OFFSET) { @@ -332,6 +394,7 @@ SECTIONS __brk_base = .; . += 64 * 1024; /* 64k alignment slop space */ *(.brk_reservation) /* areas brk users have reserved */ + . = ALIGN(HPAGE_SIZE); __brk_limit = .; } @@ -362,13 +425,12 @@ SECTIONS * for the boot processor. */ #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load -INIT_PER_CPU(gdt_page); INIT_PER_CPU(irq_stack_union); /* * Build-time check on the image size: */ -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE), "kernel image bigger than KERNEL_IMAGE_SIZE"); #ifdef CONFIG_SMP diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index b034b1b14..61b3eefce 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c @@ -117,11 +117,13 @@ static void __init set_vsmp_pv_ops(void) if (cap & ctl & (1 << 4)) { /* Setup irq ops and turn on vSMP IRQ fastpath handling */ - pv_irq_ops.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable); - pv_irq_ops.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable); - pv_irq_ops.save_fl = PV_CALLEE_SAVE(vsmp_save_fl); - pv_irq_ops.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl); + pax_open_kernel(); + pv_irq_ops.irq_disable = PV_CALLEE_SAVE(irq_disable, vsmp_irq_disable); + pv_irq_ops.irq_enable = PV_CALLEE_SAVE(irq_enable, vsmp_irq_enable); + pv_irq_ops.save_fl = PV_CALLEE_SAVE(save_fl, vsmp_save_fl); + pv_irq_ops.restore_fl = PV_CALLEE_SAVE(restore_fl, vsmp_restore_fl); pv_init_ops.patch = vsmp_patch; + pax_close_kernel(); ctl &= ~(1 << 4); } writel(ctl, address + 4); diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 9f676adcd..7024de419 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -25,6 +25,7 @@ #include #include #include +#include #include "x86.h" #include "tss.h" @@ -220,7 +221,7 @@ struct opcode { void (*fastop)(struct fastop *fake); } u; int (*check_perm)(struct x86_emulate_ctxt *ctxt); -}; +} __rap_hash; struct group_dual { struct opcode mod012[8]; @@ -425,25 +426,6 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)); asm(".global kvm_fastop_exception \n" "kvm_fastop_exception: xor %esi, %esi; ret"); -FOP_START(setcc) -FOP_SETCC(seto) -FOP_SETCC(setno) -FOP_SETCC(setc) -FOP_SETCC(setnc) -FOP_SETCC(setz) -FOP_SETCC(setnz) -FOP_SETCC(setbe) -FOP_SETCC(setnbe) -FOP_SETCC(sets) -FOP_SETCC(setns) -FOP_SETCC(setp) -FOP_SETCC(setnp) -FOP_SETCC(setl) -FOP_SETCC(setnl) -FOP_SETCC(setle) -FOP_SETCC(setnle) -FOP_END; - FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET FOP_END; @@ -1006,14 +988,82 @@ static int em_bsr_c(struct x86_emulate_ctxt *ctxt) return fastop(ctxt, em_bsr); } -static __always_inline u8 test_cc(unsigned int condition, unsigned long flags) +static unsigned int test_cc(unsigned int condition, unsigned int flags) { - u8 rc; - void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf); + unsigned int rc; + + switch (condition & 0xf) { + case 0: + rc = flags & X86_EFLAGS_OF; + break; + + case 1: + rc = ~flags & X86_EFLAGS_OF; + break; + + case 2: + rc = flags & X86_EFLAGS_CF; + break; + + case 3: + rc = ~flags & X86_EFLAGS_CF; + break; + + case 4: + rc = flags & X86_EFLAGS_ZF; + break; + + case 5: + rc = ~flags & X86_EFLAGS_ZF; + break; + + case 6: + rc = flags & (X86_EFLAGS_CF | X86_EFLAGS_ZF); + break; + + case 7: + rc = (flags & (X86_EFLAGS_CF | X86_EFLAGS_ZF)) ? 0 : 1; + break; + + case 8: + rc = flags & X86_EFLAGS_SF; + break; + + case 9: + rc = ~flags & X86_EFLAGS_SF; + break; + + case 10: + rc = flags & X86_EFLAGS_PF; + break; + + case 11: + rc = ~flags & X86_EFLAGS_PF; + break; + + case 12: + rc = flags & X86_EFLAGS_SF; + rc ^= (flags & X86_EFLAGS_OF) >> (X86_EFLAGS_OF_BIT - X86_EFLAGS_SF_BIT); + break; + + case 13: + rc = ~flags & X86_EFLAGS_SF; + rc ^= (flags & X86_EFLAGS_OF) >> (X86_EFLAGS_OF_BIT - X86_EFLAGS_SF_BIT); + break; + + case 14: + rc = flags & X86_EFLAGS_SF; + rc ^= (flags & X86_EFLAGS_OF) >> (X86_EFLAGS_OF_BIT - X86_EFLAGS_SF_BIT); + rc |= flags & X86_EFLAGS_ZF; + break; + + case 15: + rc = ~flags & X86_EFLAGS_SF; + rc ^= (flags & X86_EFLAGS_OF) >> (X86_EFLAGS_OF_BIT - X86_EFLAGS_SF_BIT); + rc &= (~flags & X86_EFLAGS_ZF) << (X86_EFLAGS_SF_BIT - X86_EFLAGS_ZF_BIT); + break; + } - flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF; - asm("push %[flags]; popf; call *%[fastop]" - : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags)); return rc; } @@ -1959,7 +2009,7 @@ static int em_push_sreg(struct x86_emulate_ctxt *ctxt) static int em_pop_sreg(struct x86_emulate_ctxt *ctxt) { int seg = ctxt->src2.val; - unsigned long selector; + u16 selector; int rc; rc = emulate_pop(ctxt, &selector, 2); @@ -1971,7 +2021,7 @@ static int em_pop_sreg(struct x86_emulate_ctxt *ctxt) if (ctxt->op_bytes > 2) rsp_increment(ctxt, ctxt->op_bytes - 2); - rc = load_segment_descriptor(ctxt, (u16)selector, seg); + rc = load_segment_descriptor(ctxt, selector, seg); return rc; } @@ -4059,7 +4109,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt) int cr = ctxt->modrm_reg; u64 efer = 0; - static u64 cr_reserved_bits[] = { + static const u64 cr_reserved_bits[] = { 0xffffffff00000000ULL, 0, 0, 0, /* CR3 checked later */ CR4_RESERVED_BITS, @@ -5147,7 +5197,10 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) if (ctxt->d == 0) return EMULATION_FAILED; - ctxt->execute = opcode.u.execute; + if (ctxt->d & Fastop) + ctxt->u.fastop = opcode.u.fastop; + else + ctxt->u.execute = opcode.u.execute; if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD))) return EMULATION_FAILED; @@ -5286,7 +5339,7 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) if (!(ctxt->d & ByteOp)) fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE; - asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n" + asm("push %[flags]; popf;" PAX_INDIRECT_CALL("*%[fastop]", "opcode.u.fastop") "; pushf; pop %[flags]\n" : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags), [fastop]"+S"(fop), "+r"(__sp) : "c"(ctxt->src2.val)); @@ -5454,15 +5507,14 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) else ctxt->eflags &= ~X86_EFLAGS_RF; - if (ctxt->execute) { + if (ctxt->u.execute) { if (ctxt->d & Fastop) { - void (*fop)(struct fastop *) = (void *)ctxt->execute; - rc = fastop(ctxt, fop); + rc = fastop(ctxt, ctxt->u.fastop); if (rc != X86EMUL_CONTINUE) goto done; goto writeback; } - rc = ctxt->execute(ctxt); + rc = ctxt->u.execute(ctxt); if (rc != X86EMUL_CONTINUE) goto done; goto writeback; diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 7cc2360f1..6ae123647 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -39,14 +39,14 @@ static void pic_irq_request(struct kvm *kvm, int level); +static void pic_lock(struct kvm_pic *s) __acquires(&s->lock); static void pic_lock(struct kvm_pic *s) - __acquires(&s->lock) { spin_lock(&s->lock); } +static void pic_unlock(struct kvm_pic *s) __releases(&s->lock); static void pic_unlock(struct kvm_pic *s) - __releases(&s->lock) { bool wakeup = s->wakeup_needed; struct kvm_vcpu *vcpu, *found = NULL; @@ -72,6 +72,7 @@ static void pic_unlock(struct kvm_pic *s) } } +static void pic_clear_isr(struct kvm_kpic_state *s, int irq) __must_hold(s->pics_state); static void pic_clear_isr(struct kvm_kpic_state *s, int irq) { s->isr &= ~(1 << irq); @@ -219,6 +220,7 @@ void kvm_pic_clear_all(struct kvm_pic *s, int irq_source_id) /* * acknowledge interrupt 'irq' */ +static inline void pic_intack(struct kvm_kpic_state *s, int irq) __must_hold(s); static inline void pic_intack(struct kvm_kpic_state *s, int irq) { s->isr |= 1 << irq; @@ -273,6 +275,7 @@ int kvm_pic_read_irq(struct kvm *kvm) return intno; } +void kvm_pic_reset(struct kvm_kpic_state *s) __must_hold(s); void kvm_pic_reset(struct kvm_kpic_state *s) { int irq, i; @@ -307,6 +310,7 @@ void kvm_pic_reset(struct kvm_kpic_state *s) pic_clear_isr(s, irq); } +static void pic_ioport_write(void *opaque, u32 addr, u32 val) __must_hold(opaque); static void pic_ioport_write(void *opaque, u32 addr, u32 val) { struct kvm_kpic_state *s = opaque; @@ -400,6 +404,7 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val) } } +static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1) __must_hold(s); static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1) { int ret; @@ -422,6 +427,7 @@ static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1) return ret; } +static u32 pic_ioport_read(void *opaque, u32 addr1) __must_hold(opaque); static u32 pic_ioport_read(void *opaque, u32 addr1) { struct kvm_kpic_state *s = opaque; diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index 6e219e5c0..ccaf11560 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c @@ -415,6 +415,8 @@ static void kvm_ioapic_eoi_inject_work(struct work_struct *work) #define IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT 10000 static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, + struct kvm_ioapic *ioapic, int vector, int trigger_mode) __must_hold(&ioapic->lock); +static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, struct kvm_ioapic *ioapic, int vector, int trigger_mode) { struct dest_map *dest_map = &ioapic->rtc_status.dest_map; diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 3f05c0447..b79e6eaba 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -57,7 +57,7 @@ #define APIC_BUS_CYCLE_NS 1 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */ -#define apic_debug(fmt, arg...) +#define apic_debug(fmt, arg...) do {} while (0) /* 14 is the version for Xeon and Pentium 8.4.8*/ #define APIC_VERSION (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16)) diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index a01105485..da14b4785 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -355,7 +355,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, if (unlikely(kvm_is_error_hva(host_addr))) goto error; - ptep_user = (pt_element_t __user *)((void *)host_addr + offset); + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset); if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) goto error; walker->ptep_user[walker->level - 1] = ptep_user; diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 8ca1eca50..fff3699bb 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -4253,7 +4253,11 @@ static void reload_tss(struct kvm_vcpu *vcpu) int cpu = raw_smp_processor_id(); struct svm_cpu_data *sd = per_cpu(svm_data, cpu); + + pax_open_kernel(); sd->tss_desc->type = 9; /* available 32/64-bit TSS */ + pax_close_kernel(); + load_TR_desc(); } @@ -4895,6 +4899,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) #endif #endif +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) + __set_fs(current->thread.addr_limit); +#endif + reload_tss(vcpu); local_irq_disable(); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 64774f419..4888061e9 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1675,14 +1675,14 @@ static __always_inline void vmcs_writel(unsigned long field, unsigned long value __vmcs_writel(field, value); } -static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask) +static __always_inline void vmcs_clear_bits(unsigned long field, unsigned long mask) { BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000, "vmcs_clear_bits does not support 64-bit fields"); __vmcs_writel(field, __vmcs_readl(field) & ~mask); } -static __always_inline void vmcs_set_bits(unsigned long field, u32 mask) +static __always_inline void vmcs_set_bits(unsigned long field, unsigned long mask) { BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000, "vmcs_set_bits does not support 64-bit fields"); @@ -1961,7 +1961,11 @@ static void reload_tss(void) struct desc_struct *descs; descs = (void *)gdt->address; + + pax_open_kernel(); descs[GDT_ENTRY_TSS].type = 9; /* available TSS */ + pax_close_kernel(); + load_TR_desc(); } @@ -2265,6 +2269,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */ vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */ +#ifdef CONFIG_PAX_PER_CPU_PGD + vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ +#endif + rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ @@ -2596,7 +2604,7 @@ static void setup_msrs(struct vcpu_vmx *vmx) * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset * -- Intel TSC Scaling for Virtualization White Paper, sec 1.3 */ -static u64 guest_read_tsc(struct kvm_vcpu *vcpu) +static u64 __intentional_overflow(-1) guest_read_tsc(struct kvm_vcpu *vcpu) { u64 host_tsc, tsc_offset; @@ -4848,7 +4856,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) unsigned long cr4; vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */ + +#ifndef CONFIG_PAX_PER_CPU_PGD vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ +#endif /* Save the most likely value for this task's CR4 in the VMCS. */ cr4 = cr4_read_shadow(); @@ -4875,7 +4886,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ vmx->host_idt_base = dt.address; - vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */ + vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */ rdmsr(MSR_IA32_SYSENTER_CS, low32, high32); vmcs_write32(HOST_IA32_SYSENTER_CS, low32); @@ -6447,11 +6458,17 @@ static __init int hardware_setup(void) * page upon invalidation. No need to do anything if not * using the APIC_ACCESS_ADDR VMCS field. */ - if (!flexpriority_enabled) + if (!flexpriority_enabled) { + pax_open_kernel(); kvm_x86_ops->set_apic_access_page_addr = NULL; + pax_close_kernel(); + } - if (!cpu_has_vmx_tpr_shadow()) + if (!cpu_has_vmx_tpr_shadow()) { + pax_open_kernel(); kvm_x86_ops->update_cr8_intercept = NULL; + pax_close_kernel(); + } if (enable_ept && !cpu_has_vmx_ept_2m_page()) kvm_disable_largepages(); @@ -6532,10 +6549,12 @@ static __init int hardware_setup(void) enable_pml = 0; if (!enable_pml) { + pax_open_kernel(); kvm_x86_ops->slot_enable_log_dirty = NULL; kvm_x86_ops->slot_disable_log_dirty = NULL; kvm_x86_ops->flush_log_dirty = NULL; kvm_x86_ops->enable_log_dirty_pt_masked = NULL; + pax_close_kernel(); } if (cpu_has_vmx_preemption_timer() && enable_preemption_timer) { @@ -8928,6 +8947,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) "jmp 2f \n\t" "1: " __ex(ASM_VMX_VMRESUME) "\n\t" "2: " + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) + "ljmp %[cs],$3f\n\t" + "3: " +#endif + /* Save guest registers, load host registers, keep flags */ "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t" "pop %0 \n\t" @@ -8980,6 +9005,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) #endif [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)), [wordsize]"i"(sizeof(ulong)) + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) + ,[cs]"i"(__KERNEL_CS) +#endif + : "cc", "memory" #ifdef CONFIG_X86_64 , "rax", "rbx", "rdi", "rsi" @@ -8993,7 +9023,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) if (debugctlmsr) update_debugctlmsr(debugctlmsr); -#ifndef CONFIG_X86_64 +#ifdef CONFIG_X86_32 /* * The sysexit path does not restore ds/es, so we must set them to * a reasonable value ourselves. @@ -9002,8 +9032,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) * may be executed in interrupt context, which saves and restore segments * around it, nullifying its effect. */ - loadsegment(ds, __USER_DS); - loadsegment(es, __USER_DS); + loadsegment(ds, __KERNEL_DS); + loadsegment(es, __KERNEL_DS); + loadsegment(ss, __KERNEL_DS); + +#ifdef CONFIG_PAX_KERNEXEC + loadsegment(fs, __KERNEL_PERCPU); +#endif + +#ifdef CONFIG_PAX_MEMORY_UDEREF + __set_fs(current->thread.addr_limit); +#endif + #endif vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 731044efb..399463dbd 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2005,8 +2005,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) { struct kvm *kvm = vcpu->kvm; int lm = is_long_mode(vcpu); - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32; + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32; u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 : kvm->arch.xen_hvm_config.blob_size_32; u32 page_num = data & ~PAGE_MASK; @@ -2716,6 +2716,8 @@ long kvm_arch_dev_ioctl(struct file *filp, if (n < msr_list.nmsrs) goto out; r = -EFAULT; + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save)) + goto out; if (copy_to_user(user_msr_list->indices, &msrs_to_save, num_msrs_to_save * sizeof(u32))) goto out; @@ -3137,7 +3139,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu) { - struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave; + struct xregs_state *xsave = &vcpu->arch.guest_fpu.state->xsave; u64 xstate_bv = xsave->header.xfeatures; u64 valid; @@ -3174,7 +3176,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu) static void load_xsave(struct kvm_vcpu *vcpu, u8 *src) { - struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave; + struct xregs_state *xsave = &vcpu->arch.guest_fpu.state->xsave; u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET); u64 valid; @@ -3218,7 +3220,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, fill_xsave((u8 *) guest_xsave->region, vcpu); } else { memcpy(guest_xsave->region, - &vcpu->arch.guest_fpu.state.fxsave, + &vcpu->arch.guest_fpu.state->fxsave, sizeof(struct fxregs_state)); *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] = XFEATURE_MASK_FPSSE; @@ -3243,7 +3245,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, } else { if (xstate_bv & ~XFEATURE_MASK_FPSSE) return -EINVAL; - memcpy(&vcpu->arch.guest_fpu.state.fxsave, + memcpy(&vcpu->arch.guest_fpu.state->fxsave, guest_xsave->region, sizeof(struct fxregs_state)); } return 0; @@ -5807,7 +5809,7 @@ static unsigned long kvm_get_guest_ip(void) unsigned long ip = 0; if (__this_cpu_read(current_vcpu)) - ip = kvm_rip_read(__this_cpu_read(current_vcpu)); + ip = kvm_get_linear_rip(__this_cpu_read(current_vcpu)); return ip; } @@ -6531,6 +6533,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, * exiting to the userspace. Otherwise, the value will be returned to the * userspace. */ +static int vcpu_enter_guest(struct kvm_vcpu *vcpu) __must_hold(&vcpu->kvm->srcu); static int vcpu_enter_guest(struct kvm_vcpu *vcpu) { int r; @@ -6805,6 +6808,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) return r; } +static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) __must_hold(&kvm->srcu); static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) { if (!kvm_arch_vcpu_runnable(vcpu) && @@ -7352,7 +7356,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { struct fxregs_state *fxsave = - &vcpu->arch.guest_fpu.state.fxsave; + &vcpu->arch.guest_fpu.state->fxsave; memcpy(fpu->fpr, fxsave->st_space, 128); fpu->fcw = fxsave->cwd; @@ -7369,7 +7373,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { struct fxregs_state *fxsave = - &vcpu->arch.guest_fpu.state.fxsave; + &vcpu->arch.guest_fpu.state->fxsave; memcpy(fxsave->st_space, fpu->fpr, 128); fxsave->cwd = fpu->fcw; @@ -7385,9 +7389,9 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) static void fx_init(struct kvm_vcpu *vcpu) { - fpstate_init(&vcpu->arch.guest_fpu.state); + fpstate_init(vcpu->arch.guest_fpu.state); if (boot_cpu_has(X86_FEATURE_XSAVES)) - vcpu->arch.guest_fpu.state.xsave.header.xcomp_bv = + vcpu->arch.guest_fpu.state->xsave.header.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED; /* @@ -7410,7 +7414,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) */ vcpu->guest_fpu_loaded = 1; __kernel_fpu_begin(); - __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state); + __copy_kernel_to_fpregs(vcpu->arch.guest_fpu.state); trace_kvm_fpu(1); } @@ -7710,6 +7714,8 @@ bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) struct static_key kvm_no_apic_vcpu __read_mostly; EXPORT_SYMBOL_GPL(kvm_no_apic_vcpu); +extern struct kmem_cache *fpregs_state_cachep; + int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { struct page *page; @@ -7727,11 +7733,14 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) else vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; - page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!page) { - r = -ENOMEM; + r = -ENOMEM; + vcpu->arch.guest_fpu.state = kmem_cache_alloc(fpregs_state_cachep, GFP_KERNEL); + if (!vcpu->arch.guest_fpu.state) goto fail; - } + + page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!page) + goto fail_free_fpregs; vcpu->arch.pio_data = page_address(page); kvm_set_tsc_khz(vcpu, max_tsc_khz); @@ -7789,6 +7798,9 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) kvm_mmu_destroy(vcpu); fail_free_pio_data: free_page((unsigned long)vcpu->arch.pio_data); +fail_free_fpregs: + kmem_cache_free(fpregs_state_cachep, vcpu->arch.guest_fpu.state); + vcpu->arch.guest_fpu.state = NULL; fail: return r; } @@ -7807,6 +7819,8 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) free_page((unsigned long)vcpu->arch.pio_data); if (!lapic_in_kernel(vcpu)) static_key_slow_dec(&kvm_no_apic_vcpu); + kmem_cache_free(fpregs_state_cachep, vcpu->arch.guest_fpu.state); + vcpu->arch.guest_fpu.state = NULL; } void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 25da5bc8d..235cb8819 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -262,7 +262,7 @@ PV_CALLEE_SAVE_REGS_THUNK(lguest_irq_disable); /*:*/ /* These are in head_32.S */ -extern void lg_irq_enable(void); +extern asmlinkage void lg_irq_enable(void); extern void lg_restore_fl(unsigned long flags); /*M:003 @@ -1329,9 +1329,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count) * Rebooting also tells the Host we're finished, but the RESTART flag tells the * Launcher to reboot us. */ -static void lguest_restart(char *reason) +static __noreturn void lguest_restart(char *reason) { hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0); + BUG(); } /*G:050 @@ -1392,6 +1393,11 @@ static unsigned lguest_patch(u8 type, u16 clobber, void *ibuf, return insn_len; } +#ifdef CONFIG_PAX_RAP +PV_CALLEE_SAVE_REGS_THUNK(lg_restore_fl); +PV_CALLEE_SAVE_REGS_THUNK(lg_irq_enable); +#endif + /*G:029 * Once we get to lguest_init(), we know we're a Guest. The various * pv_ops structures in the kernel provide points for (almost) every routine we @@ -1412,10 +1418,10 @@ __init void lguest_init(void) */ /* Interrupt-related operations */ - pv_irq_ops.save_fl = PV_CALLEE_SAVE(lguest_save_fl); - pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(lg_restore_fl); - pv_irq_ops.irq_disable = PV_CALLEE_SAVE(lguest_irq_disable); - pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(lg_irq_enable); + pv_irq_ops.save_fl = PV_CALLEE_SAVE(save_fl, lguest_save_fl); + pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(restore_fl, lg_restore_fl); + pv_irq_ops.irq_disable = PV_CALLEE_SAVE(irq_disable, lguest_irq_disable); + pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(irq_enable, lg_irq_enable); pv_irq_ops.safe_halt = lguest_safe_halt; /* Setup operations */ diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index 34a74131a..375b9b1f3 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -29,6 +29,10 @@ lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o ifeq ($(CONFIG_X86_32),y) + GCC_PLUGINS_CFLAGS_strstr_32.o += $(INITIFY_DISABLE_VERIFY_NOCAPTURE_FUNCTIONS) + GCC_PLUGINS_CFLAGS_string_32.o += $(INITIFY_DISABLE_VERIFY_NOCAPTURE_FUNCTIONS) + GCC_PLUGINS_CFLAGS_memcpy_32.o += $(INITIFY_DISABLE_VERIFY_NOCAPTURE_FUNCTIONS) + obj-y += atomic64_32.o lib-y += atomic64_cx8_32.o lib-y += checksum_32.o diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S index 9b0ca8fe8..390af89f7 100644 --- a/arch/x86/lib/atomic64_386_32.S +++ b/arch/x86/lib/atomic64_386_32.S @@ -10,6 +10,7 @@ */ #include +#include #include /* if you want SMP support, implement these with real spinlocks */ @@ -32,26 +33,34 @@ ENTRY(atomic64_##op##_386); \ #define ENDP endp -#define RET \ +#define RET(op) \ UNLOCK v; \ - ret + pax_ret atomic64_##op##_386 -#define RET_ENDP \ - RET; \ +#define RET_ENDP(op) \ + RET(op); \ ENDP #define v %ecx BEGIN(read) movl (v), %eax movl 4(v), %edx -RET_ENDP +RET_ENDP(read) +BEGIN(read_unchecked) + movl (v), %eax + movl 4(v), %edx +RET_ENDP(read_unchecked) #undef v #define v %esi BEGIN(set) movl %ebx, (v) movl %ecx, 4(v) -RET_ENDP +RET_ENDP(set) +BEGIN(set_unchecked) + movl %ebx, (v) + movl %ecx, 4(v) +RET_ENDP(set_unchecked) #undef v #define v %esi @@ -60,30 +69,51 @@ BEGIN(xchg) movl 4(v), %edx movl %ebx, (v) movl %ecx, 4(v) -RET_ENDP +RET_ENDP(xchg) #undef v #define v %ecx BEGIN(add) addl %eax, (v) adcl %edx, 4(v) -RET_ENDP + + PAX_REFCOUNT64_OVERFLOW (v) +RET_ENDP(add) +BEGIN(add_unchecked) + addl %eax, (v) + adcl %edx, 4(v) +RET_ENDP(add_unchecked) #undef v #define v %ecx BEGIN(add_return) addl (v), %eax adcl 4(v), %edx + movl %eax, (v) movl %edx, 4(v) -RET_ENDP + + PAX_REFCOUNT64_OVERFLOW (v) +RET_ENDP(add_return) +BEGIN(add_return_unchecked) + addl (v), %eax + adcl 4(v), %edx + movl %eax, (v) + movl %edx, 4(v) +RET_ENDP(add_return_unchecked) #undef v #define v %ecx BEGIN(sub) subl %eax, (v) sbbl %edx, 4(v) -RET_ENDP + + PAX_REFCOUNT64_UNDERFLOW (v) +RET_ENDP(sub) +BEGIN(sub_unchecked) + subl %eax, (v) + sbbl %edx, 4(v) +RET_ENDP(sub_unchecked) #undef v #define v %ecx @@ -93,16 +123,34 @@ BEGIN(sub_return) sbbl $0, %edx addl (v), %eax adcl 4(v), %edx + movl %eax, (v) movl %edx, 4(v) -RET_ENDP + + PAX_REFCOUNT64_UNDERFLOW (v) +RET_ENDP(sub_return) +BEGIN(sub_return_unchecked) + negl %edx + negl %eax + sbbl $0, %edx + addl (v), %eax + adcl 4(v), %edx + movl %eax, (v) + movl %edx, 4(v) +RET_ENDP(sub_return_unchecked) #undef v #define v %esi BEGIN(inc) addl $1, (v) adcl $0, 4(v) -RET_ENDP + + PAX_REFCOUNT64_OVERFLOW (v) +RET_ENDP(inc) +BEGIN(inc_unchecked) + addl $1, (v) + adcl $0, 4(v) +RET_ENDP(inc_unchecked) #undef v #define v %esi @@ -111,16 +159,33 @@ BEGIN(inc_return) movl 4(v), %edx addl $1, %eax adcl $0, %edx + movl %eax, (v) movl %edx, 4(v) -RET_ENDP + + PAX_REFCOUNT64_OVERFLOW (v) +RET_ENDP(inc_return) +BEGIN(inc_return_unchecked) + movl (v), %eax + movl 4(v), %edx + addl $1, %eax + adcl $0, %edx + movl %eax, (v) + movl %edx, 4(v) +RET_ENDP(inc_return_unchecked) #undef v #define v %esi BEGIN(dec) subl $1, (v) sbbl $0, 4(v) -RET_ENDP + + PAX_REFCOUNT64_UNDERFLOW (v) +RET_ENDP(dec) +BEGIN(dec_unchecked) + subl $1, (v) + sbbl $0, 4(v) +RET_ENDP(dec_unchecked) #undef v #define v %esi @@ -129,9 +194,20 @@ BEGIN(dec_return) movl 4(v), %edx subl $1, %eax sbbl $0, %edx + + movl %eax, (v) + movl %edx, 4(v) + + PAX_REFCOUNT64_UNDERFLOW (v) +RET_ENDP(dec_return) +BEGIN(dec_return_unchecked) + movl (v), %eax + movl 4(v), %edx + subl $1, %eax + sbbl $0, %edx movl %eax, (v) movl %edx, 4(v) -RET_ENDP +RET_ENDP(dec_return_unchecked) #undef v #define v %esi @@ -140,6 +216,9 @@ BEGIN(add_unless) adcl %edx, %edi addl (v), %eax adcl 4(v), %edx + + PAX_REFCOUNT64_OVERFLOW (v) + cmpl %eax, %ecx je 3f 1: @@ -147,7 +226,7 @@ BEGIN(add_unless) movl %edx, 4(v) movl $1, %eax 2: - RET + RET(add_unless) 3: cmpl %edx, %edi jne 1b @@ -165,11 +244,14 @@ BEGIN(inc_not_zero) 1: addl $1, %eax adcl $0, %edx + + PAX_REFCOUNT64_OVERFLOW (v) + movl %eax, (v) movl %edx, 4(v) movl $1, %eax 2: - RET + RET(inc_not_zero) 3: testl %edx, %edx jne 1b @@ -183,9 +265,12 @@ BEGIN(dec_if_positive) movl 4(v), %edx subl $1, %eax sbbl $0, %edx + + PAX_REFCOUNT64_UNDERFLOW (v) + js 1f movl %eax, (v) movl %edx, 4(v) 1: -RET_ENDP +RET_ENDP(dec_if_positive) #undef v diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S index db3ae8544..5e266efa3 100644 --- a/arch/x86/lib/atomic64_cx8_32.S +++ b/arch/x86/lib/atomic64_cx8_32.S @@ -10,6 +10,7 @@ */ #include +#include #include .macro read64 reg @@ -22,9 +23,14 @@ ENTRY(atomic64_read_cx8) read64 %ecx - ret + pax_ret atomic64_read ENDPROC(atomic64_read_cx8) +ENTRY(atomic64_read_unchecked_cx8) + read64 %ecx + pax_ret atomic64_read_unchecked +ENDPROC(atomic64_read_unchecked_cx8) + ENTRY(atomic64_set_cx8) 1: /* we don't need LOCK_PREFIX since aligned 64-bit writes @@ -32,20 +38,30 @@ ENTRY(atomic64_set_cx8) cmpxchg8b (%esi) jne 1b - ret + pax_ret atomic64_set ENDPROC(atomic64_set_cx8) +ENTRY(atomic64_set_unchecked_cx8) +1: +/* we don't need LOCK_PREFIX since aligned 64-bit writes + * are atomic on 586 and newer */ + cmpxchg8b (%esi) + jne 1b + + pax_ret atomic64_set_unchecked +ENDPROC(atomic64_set_unchecked_cx8) + ENTRY(atomic64_xchg_cx8) 1: LOCK_PREFIX cmpxchg8b (%esi) jne 1b - ret + pax_ret atomic64_xchg ENDPROC(atomic64_xchg_cx8) -.macro addsub_return func ins insc -ENTRY(atomic64_\func\()_return_cx8) +.macro addsub_return func ins insc unchecked="" +ENTRY(atomic64_\func\()_return\unchecked\()_cx8) pushl %ebp pushl %ebx pushl %esi @@ -61,26 +77,36 @@ ENTRY(atomic64_\func\()_return_cx8) movl %edx, %ecx \ins\()l %esi, %ebx \insc\()l %edi, %ecx + +.ifb \unchecked +.if \func == add + PAX_REFCOUNT64_OVERFLOW (%ebp) +.else + PAX_REFCOUNT64_UNDERFLOW (%ebp) +.endif +.endif + LOCK_PREFIX cmpxchg8b (%ebp) jne 1b - -10: movl %ebx, %eax movl %ecx, %edx + popl %edi popl %esi popl %ebx popl %ebp - ret -ENDPROC(atomic64_\func\()_return_cx8) + pax_ret atomic64_\func\()_return\unchecked +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8) .endm addsub_return add add adc addsub_return sub sub sbb +addsub_return add add adc _unchecked +addsub_return sub sub sbb _unchecked -.macro incdec_return func ins insc -ENTRY(atomic64_\func\()_return_cx8) +.macro incdec_return func ins insc unchecked="" +ENTRY(atomic64_\func\()_return\unchecked\()_cx8) pushl %ebx read64 %esi @@ -89,20 +115,30 @@ ENTRY(atomic64_\func\()_return_cx8) movl %edx, %ecx \ins\()l $1, %ebx \insc\()l $0, %ecx + +.ifb \unchecked +.if \func == inc + PAX_REFCOUNT64_OVERFLOW (%esi) +.else + PAX_REFCOUNT64_UNDERFLOW (%esi) +.endif +.endif + LOCK_PREFIX cmpxchg8b (%esi) jne 1b - -10: movl %ebx, %eax movl %ecx, %edx + popl %ebx - ret -ENDPROC(atomic64_\func\()_return_cx8) + pax_ret atomic64_\func\()_return\unchecked +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8) .endm incdec_return inc add adc incdec_return dec sub sbb +incdec_return inc add adc _unchecked +incdec_return dec sub sbb _unchecked ENTRY(atomic64_dec_if_positive_cx8) pushl %ebx @@ -113,6 +149,9 @@ ENTRY(atomic64_dec_if_positive_cx8) movl %edx, %ecx subl $1, %ebx sbb $0, %ecx + + PAX_REFCOUNT64_UNDERFLOW (%esi) + js 2f LOCK_PREFIX cmpxchg8b (%esi) @@ -122,7 +161,7 @@ ENTRY(atomic64_dec_if_positive_cx8) movl %ebx, %eax movl %ecx, %edx popl %ebx - ret + pax_ret atomic64_dec_if_positive ENDPROC(atomic64_dec_if_positive_cx8) ENTRY(atomic64_add_unless_cx8) @@ -144,6 +183,9 @@ ENTRY(atomic64_add_unless_cx8) movl %edx, %ecx addl %ebp, %ebx adcl %edi, %ecx + + PAX_REFCOUNT64_OVERFLOW (%esi) + LOCK_PREFIX cmpxchg8b (%esi) jne 1b @@ -153,7 +195,7 @@ ENTRY(atomic64_add_unless_cx8) addl $8, %esp popl %ebx popl %ebp - ret + pax_ret atomic64_add_unless 4: cmpl %edx, 4(%esp) jne 2b @@ -173,6 +215,9 @@ ENTRY(atomic64_inc_not_zero_cx8) xorl %ecx, %ecx addl $1, %ebx adcl %edx, %ecx + + PAX_REFCOUNT64_OVERFLOW (%esi) + LOCK_PREFIX cmpxchg8b (%esi) jne 1b @@ -180,5 +225,5 @@ ENTRY(atomic64_inc_not_zero_cx8) movl $1, %eax 3: popl %ebx - ret + pax_ret atomic64_inc_not_zero ENDPROC(atomic64_inc_not_zero_cx8) diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S index 4d34bb548..bccd7fc1f 100644 --- a/arch/x86/lib/checksum_32.S +++ b/arch/x86/lib/checksum_32.S @@ -29,7 +29,9 @@ #include #include #include - +#include +#include + /* * computes a partial checksum, e.g. for TCP/UDP fragments */ @@ -130,7 +132,7 @@ ENTRY(csum_partial) 8: popl %ebx popl %esi - ret + pax_ret csum_partial ENDPROC(csum_partial) #else @@ -248,7 +250,7 @@ ENTRY(csum_partial) 90: popl %ebx popl %esi - ret + pax_ret csum_partial ENDPROC(csum_partial) #endif @@ -282,7 +284,22 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, #define ARGBASE 16 #define FP 12 - + +ENTRY(csum_partial_copy_generic_to_user) + +#ifdef CONFIG_PAX_MEMORY_UDEREF + pushl %gs + popl %es + jmp csum_partial_copy_generic +#endif + +ENTRY(csum_partial_copy_generic_from_user) + +#ifdef CONFIG_PAX_MEMORY_UDEREF + pushl %gs + popl %ds +#endif + ENTRY(csum_partial_copy_generic) subl $4,%esp pushl %edi @@ -301,7 +318,7 @@ ENTRY(csum_partial_copy_generic) jmp 4f SRC(1: movw (%esi), %bx ) addl $2, %esi -DST( movw %bx, (%edi) ) +DST( movw %bx, %es:(%edi) ) addl $2, %edi addw %bx, %ax adcl $0, %eax @@ -313,30 +330,30 @@ DST( movw %bx, (%edi) ) SRC(1: movl (%esi), %ebx ) SRC( movl 4(%esi), %edx ) adcl %ebx, %eax -DST( movl %ebx, (%edi) ) +DST( movl %ebx, %es:(%edi) ) adcl %edx, %eax -DST( movl %edx, 4(%edi) ) +DST( movl %edx, %es:4(%edi) ) SRC( movl 8(%esi), %ebx ) SRC( movl 12(%esi), %edx ) adcl %ebx, %eax -DST( movl %ebx, 8(%edi) ) +DST( movl %ebx, %es:8(%edi) ) adcl %edx, %eax -DST( movl %edx, 12(%edi) ) +DST( movl %edx, %es:12(%edi) ) SRC( movl 16(%esi), %ebx ) SRC( movl 20(%esi), %edx ) adcl %ebx, %eax -DST( movl %ebx, 16(%edi) ) +DST( movl %ebx, %es:16(%edi) ) adcl %edx, %eax -DST( movl %edx, 20(%edi) ) +DST( movl %edx, %es:20(%edi) ) SRC( movl 24(%esi), %ebx ) SRC( movl 28(%esi), %edx ) adcl %ebx, %eax -DST( movl %ebx, 24(%edi) ) +DST( movl %ebx, %es:24(%edi) ) adcl %edx, %eax -DST( movl %edx, 28(%edi) ) +DST( movl %edx, %es:28(%edi) ) lea 32(%esi), %esi lea 32(%edi), %edi @@ -350,7 +367,7 @@ DST( movl %edx, 28(%edi) ) shrl $2, %edx # This clears CF SRC(3: movl (%esi), %ebx ) adcl %ebx, %eax -DST( movl %ebx, (%edi) ) +DST( movl %ebx, %es:(%edi) ) lea 4(%esi), %esi lea 4(%edi), %edi dec %edx @@ -362,12 +379,12 @@ DST( movl %ebx, (%edi) ) jb 5f SRC( movw (%esi), %cx ) leal 2(%esi), %esi -DST( movw %cx, (%edi) ) +DST( movw %cx, %es:(%edi) ) leal 2(%edi), %edi je 6f shll $16,%ecx SRC(5: movb (%esi), %cl ) -DST( movb %cl, (%edi) ) +DST( movb %cl, %es:(%edi) ) 6: addl %ecx, %eax adcl $0, %eax 7: @@ -378,7 +395,7 @@ DST( movb %cl, (%edi) ) 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr - movl $-EFAULT, (%ebx) + movl $-EFAULT, %ss:(%ebx) # zero the complete destination - computing the rest # is too much work @@ -391,34 +408,58 @@ DST( movb %cl, (%edi) ) 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr - movl $-EFAULT,(%ebx) + movl $-EFAULT,%ss:(%ebx) jmp 5000b .previous +#ifdef CONFIG_PAX_MEMORY_UDEREF + pushl %ss + popl %ds + pushl %ss + popl %es +#endif + popl %ebx popl %esi popl %edi popl %ecx # equivalent to addl $4,%esp - ret -ENDPROC(csum_partial_copy_generic) + pax_ret csum_partial_copy_generic_to_user +ENDPROC(csum_partial_copy_generic_to_user) #else /* Version for PentiumII/PPro */ #define ROUND1(x) \ + nop; nop; nop; \ SRC(movl x(%esi), %ebx ) ; \ addl %ebx, %eax ; \ - DST(movl %ebx, x(%edi) ) ; + DST(movl %ebx, %es:x(%edi)) ; #define ROUND(x) \ + nop; nop; nop; \ SRC(movl x(%esi), %ebx ) ; \ adcl %ebx, %eax ; \ - DST(movl %ebx, x(%edi) ) ; + DST(movl %ebx, %es:x(%edi)) ; #define ARGBASE 12 - + +ENTRY(csum_partial_copy_generic_to_user) + +#ifdef CONFIG_PAX_MEMORY_UDEREF + pushl %gs + popl %es + jmp csum_partial_copy_generic +#endif + +ENTRY(csum_partial_copy_generic_from_user) + +#ifdef CONFIG_PAX_MEMORY_UDEREF + pushl %gs + popl %ds +#endif + ENTRY(csum_partial_copy_generic) pushl %ebx pushl %edi @@ -437,7 +478,7 @@ ENTRY(csum_partial_copy_generic) subl %ebx, %edi lea -1(%esi),%edx andl $-32,%edx - lea 3f(%ebx,%ebx), %ebx + lea 3f(%ebx,%ebx,2), %ebx testl %esi, %esi jmp *%ebx 1: addl $64,%esi @@ -458,19 +499,19 @@ ENTRY(csum_partial_copy_generic) jb 5f SRC( movw (%esi), %dx ) leal 2(%esi), %esi -DST( movw %dx, (%edi) ) +DST( movw %dx, %es:(%edi) ) leal 2(%edi), %edi je 6f shll $16,%edx 5: SRC( movb (%esi), %dl ) -DST( movb %dl, (%edi) ) +DST( movb %dl, %es:(%edi) ) 6: addl %edx, %eax adcl $0, %eax 7: .section .fixup, "ax" 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr - movl $-EFAULT, (%ebx) + movl $-EFAULT, %ss:(%ebx) # zero the complete destination (computing the rest is too much work) movl ARGBASE+8(%esp),%edi # dst movl ARGBASE+12(%esp),%ecx # len @@ -478,18 +519,27 @@ DST( movb %dl, (%edi) ) rep; stosb jmp 7b 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr - movl $-EFAULT, (%ebx) + movl $-EFAULT, %ss:(%ebx) jmp 7b .previous +#ifdef CONFIG_PAX_MEMORY_UDEREF + pushl %ss + popl %ds + pushl %ss + popl %es +#endif + popl %esi popl %edi popl %ebx - ret -ENDPROC(csum_partial_copy_generic) + pax_ret csum_partial_copy_generic_to_user +ENDPROC(csum_partial_copy_generic_to_user) #undef ROUND #undef ROUND1 #endif EXPORT_SYMBOL(csum_partial_copy_generic) +EXPORT_SYMBOL(csum_partial_copy_generic_to_user) +EXPORT_SYMBOL(csum_partial_copy_generic_from_user) diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S index 5e2af3a88..5fb2fdb3d 100644 --- a/arch/x86/lib/clear_page_64.S +++ b/arch/x86/lib/clear_page_64.S @@ -22,7 +22,7 @@ ENTRY(clear_page) movl $4096/8,%ecx xorl %eax,%eax rep stosq - ret + pax_ret clear_page ENDPROC(clear_page) EXPORT_SYMBOL(clear_page) @@ -45,12 +45,12 @@ ENTRY(clear_page_orig) leaq 64(%rdi),%rdi jnz .Lloop nop - ret + pax_ret clear_page ENDPROC(clear_page_orig) ENTRY(clear_page_c_e) movl $4096,%ecx xorl %eax,%eax rep stosb - ret + pax_ret clear_page ENDPROC(clear_page_c_e) diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S index 9b330242e..cdbc7251b 100644 --- a/arch/x86/lib/cmpxchg16b_emu.S +++ b/arch/x86/lib/cmpxchg16b_emu.S @@ -7,6 +7,7 @@ */ #include #include +#include .text @@ -43,11 +44,11 @@ ENTRY(this_cpu_cmpxchg16b_emu) popfq mov $1, %al - ret + pax_ret this_cpu_cmpxchg16b_emu .Lnot_same: popfq xor %al,%al - ret + pax_ret this_cpu_cmpxchg16b_emu ENDPROC(this_cpu_cmpxchg16b_emu) diff --git a/arch/x86/lib/cmpxchg8b_emu.S b/arch/x86/lib/cmpxchg8b_emu.S index 03a186fc0..6793bf299 100644 --- a/arch/x86/lib/cmpxchg8b_emu.S +++ b/arch/x86/lib/cmpxchg8b_emu.S @@ -8,6 +8,7 @@ #include #include +#include .text @@ -38,7 +39,7 @@ ENTRY(cmpxchg8b_emu) movl %ecx, 4(%esi) popfl - ret + pax_ret cmpxchg8b_emu .Lnot_same: movl (%esi), %eax @@ -46,7 +47,7 @@ ENTRY(cmpxchg8b_emu) movl 4(%esi), %edx popfl - ret + pax_ret cmpxchg8b_emu ENDPROC(cmpxchg8b_emu) EXPORT_SYMBOL(cmpxchg8b_emu) diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S index e8508156c..65619e558 100644 --- a/arch/x86/lib/copy_page_64.S +++ b/arch/x86/lib/copy_page_64.S @@ -16,14 +16,14 @@ ENTRY(copy_page) ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD movl $4096/8, %ecx rep movsq - ret + pax_ret copy_page ENDPROC(copy_page) EXPORT_SYMBOL(copy_page) ENTRY(copy_page_regs) subq $2*8, %rsp movq %rbx, (%rsp) - movq %r12, 1*8(%rsp) + movq %r13, 1*8(%rsp) movl $(4096/64)-5, %ecx .p2align 4 @@ -36,7 +36,7 @@ ENTRY(copy_page_regs) movq 0x8*4(%rsi), %r9 movq 0x8*5(%rsi), %r10 movq 0x8*6(%rsi), %r11 - movq 0x8*7(%rsi), %r12 + movq 0x8*7(%rsi), %r13 prefetcht0 5*64(%rsi) @@ -47,7 +47,7 @@ ENTRY(copy_page_regs) movq %r9, 0x8*4(%rdi) movq %r10, 0x8*5(%rdi) movq %r11, 0x8*6(%rdi) - movq %r12, 0x8*7(%rdi) + movq %r13, 0x8*7(%rdi) leaq 64 (%rsi), %rsi leaq 64 (%rdi), %rdi @@ -66,7 +66,7 @@ ENTRY(copy_page_regs) movq 0x8*4(%rsi), %r9 movq 0x8*5(%rsi), %r10 movq 0x8*6(%rsi), %r11 - movq 0x8*7(%rsi), %r12 + movq 0x8*7(%rsi), %r13 movq %rax, 0x8*0(%rdi) movq %rbx, 0x8*1(%rdi) @@ -75,14 +75,14 @@ ENTRY(copy_page_regs) movq %r9, 0x8*4(%rdi) movq %r10, 0x8*5(%rdi) movq %r11, 0x8*6(%rdi) - movq %r12, 0x8*7(%rdi) + movq %r13, 0x8*7(%rdi) leaq 64(%rdi), %rdi leaq 64(%rsi), %rsi jnz .Loop2 movq (%rsp), %rbx - movq 1*8(%rsp), %r12 + movq 1*8(%rsp), %r13 addq $2*8, %rsp - ret + pax_ret copy_page ENDPROC(copy_page_regs) diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index d376e4b48..8e52373db 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -15,54 +15,35 @@ #include #include #include +#include +#include -/* Standard copy_to_user with segment limit checking */ -ENTRY(_copy_to_user) - mov PER_CPU_VAR(current_task), %rax - movq %rdi,%rcx - addq %rdx,%rcx - jc bad_to_user - cmpq TASK_addr_limit(%rax),%rcx - ja bad_to_user - ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \ - "jmp copy_user_generic_string", \ - X86_FEATURE_REP_GOOD, \ - "jmp copy_user_enhanced_fast_string", \ - X86_FEATURE_ERMS -ENDPROC(_copy_to_user) -EXPORT_SYMBOL(_copy_to_user) - -/* Standard copy_from_user with segment limit checking */ -ENTRY(_copy_from_user) - mov PER_CPU_VAR(current_task), %rax - movq %rsi,%rcx - addq %rdx,%rcx - jc bad_from_user - cmpq TASK_addr_limit(%rax),%rcx - ja bad_from_user - ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \ - "jmp copy_user_generic_string", \ - X86_FEATURE_REP_GOOD, \ - "jmp copy_user_enhanced_fast_string", \ - X86_FEATURE_ERMS -ENDPROC(_copy_from_user) -EXPORT_SYMBOL(_copy_from_user) - +.macro ALIGN_DESTINATION + /* check for bad alignment of destination */ + movl %edi,%ecx + andl $7,%ecx + jz 102f /* already aligned */ + subl $8,%ecx + negl %ecx + subl %ecx,%edx +100: movb (%rsi),%al +101: movb %al,(%rdi) + incq %rsi + incq %rdi + decl %ecx + jnz 100b +102: .section .fixup,"ax" - /* must zero dest */ -ENTRY(bad_from_user) -bad_from_user: - movl %edx,%ecx - xorl %eax,%eax - rep - stosb -bad_to_user: - movl %edx,%eax - ret -ENDPROC(bad_from_user) +103: addl %ecx,%edx /* ecx is zerorest also */ + FRAME_END + jmp copy_user_handle_tail .previous + _ASM_EXTABLE(100b,103b) + _ASM_EXTABLE(101b,103b) +.endm + /* * copy_user_generic_unrolled - memory copy with exception handling. * This version is for CPUs like P4 that don't have efficient micro @@ -77,7 +58,8 @@ ENDPROC(bad_from_user) * eax uncopied bytes or 0 if successful. */ ENTRY(copy_user_generic_unrolled) - ASM_STAC + FRAME_BEGIN + ASM_USER_ACCESS_BEGIN cmpl $8,%edx jb 20f /* less then 8 bytes, go to byte copy loop */ ALIGN_DESTINATION @@ -125,8 +107,9 @@ ENTRY(copy_user_generic_unrolled) decl %ecx jnz 21b 23: xor %eax,%eax - ASM_CLAC - ret + ASM_USER_ACCESS_END + FRAME_END + pax_ret copy_user_generic_unrolled .section .fixup,"ax" 30: shll $6,%ecx @@ -135,7 +118,8 @@ ENTRY(copy_user_generic_unrolled) 40: leal (%rdx,%rcx,8),%edx jmp 60f 50: movl %ecx,%edx -60: jmp copy_user_handle_tail /* ecx is zerorest also */ +60: FRAME_END + jmp copy_user_handle_tail /* ecx is zerorest also */ .previous _ASM_EXTABLE(1b,30b) @@ -180,7 +164,8 @@ EXPORT_SYMBOL(copy_user_generic_unrolled) * eax uncopied bytes or 0 if successful. */ ENTRY(copy_user_generic_string) - ASM_STAC + FRAME_BEGIN + ASM_USER_ACCESS_BEGIN cmpl $8,%edx jb 2f /* less than 8 bytes, go to byte copy loop */ ALIGN_DESTINATION @@ -193,12 +178,14 @@ ENTRY(copy_user_generic_string) 3: rep movsb xorl %eax,%eax - ASM_CLAC - ret + ASM_USER_ACCESS_END + FRAME_END + pax_ret copy_user_generic_string .section .fixup,"ax" 11: leal (%rdx,%rcx,8),%ecx 12: movl %ecx,%edx /* ecx is zerorest also */ + FRAME_END jmp copy_user_handle_tail .previous @@ -220,16 +207,19 @@ EXPORT_SYMBOL(copy_user_generic_string) * eax uncopied bytes or 0 if successful. */ ENTRY(copy_user_enhanced_fast_string) - ASM_STAC + FRAME_BEGIN + ASM_USER_ACCESS_BEGIN movl %edx,%ecx 1: rep movsb xorl %eax,%eax - ASM_CLAC - ret + ASM_USER_ACCESS_END + FRAME_END + pax_ret copy_user_enhanced_fast_string .section .fixup,"ax" 12: movl %ecx,%edx /* ecx is zerorest also */ + FRAME_END jmp copy_user_handle_tail .previous @@ -247,7 +237,17 @@ EXPORT_SYMBOL(copy_user_enhanced_fast_string) * - Require 4-byte alignment when size is 4 bytes. */ ENTRY(__copy_user_nocache) - ASM_STAC + FRAME_BEGIN + +#ifdef CONFIG_PAX_MEMORY_UDEREF + mov pax_user_shadow_base,%rcx + cmp %rcx,%rsi + jae 1f + add %rcx,%rsi +1: +#endif + + ASM_USER_ACCESS_BEGIN /* If size is less than 8 bytes, go to 4-byte copy */ cmpl $8,%edx @@ -341,9 +341,10 @@ ENTRY(__copy_user_nocache) /* Finished copying; fence the prior stores */ .L_finish_copy: xorl %eax,%eax - ASM_CLAC + ASM_USER_ACCESS_END sfence - ret + FRAME_END + pax_ret __copy_user_nocache .section .fixup,"ax" .L_fixup_4x8b_copy: @@ -360,6 +361,7 @@ ENTRY(__copy_user_nocache) movl %ecx,%edx .L_fixup_handle_tail: sfence + FRAME_END jmp copy_user_handle_tail .previous diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S index 7e48807b2..627b0036e 100644 --- a/arch/x86/lib/csum-copy_64.S +++ b/arch/x86/lib/csum-copy_64.S @@ -8,6 +8,7 @@ #include #include #include +#include /* * Checksum copy with exception handling. @@ -52,7 +53,7 @@ ENTRY(csum_partial_copy_generic) .Lignore: subq $7*8, %rsp movq %rbx, 2*8(%rsp) - movq %r12, 3*8(%rsp) + movq %r15, 3*8(%rsp) movq %r14, 4*8(%rsp) movq %r13, 5*8(%rsp) movq %rbp, 6*8(%rsp) @@ -64,16 +65,16 @@ ENTRY(csum_partial_copy_generic) movl %edx, %ecx xorl %r9d, %r9d - movq %rcx, %r12 + movq %rcx, %r15 - shrq $6, %r12 + shrq $6, %r15 jz .Lhandle_tail /* < 64 */ clc /* main loop. clear in 64 byte blocks */ /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */ - /* r11: temp3, rdx: temp4, r12 loopcnt */ + /* r11: temp3, rdx: temp4, r15 loopcnt */ /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */ .p2align 4 .Lloop: @@ -107,7 +108,7 @@ ENTRY(csum_partial_copy_generic) adcq %r14, %rax adcq %r13, %rax - decl %r12d + decl %r15d dest movq %rbx, (%rsi) @@ -200,12 +201,12 @@ ENTRY(csum_partial_copy_generic) .Lende: movq 2*8(%rsp), %rbx - movq 3*8(%rsp), %r12 + movq 3*8(%rsp), %r15 movq 4*8(%rsp), %r14 movq 5*8(%rsp), %r13 movq 6*8(%rsp), %rbp addq $7*8, %rsp - ret + pax_ret csum_partial_copy_generic /* Exception handlers. Very simple, zeroing is done in the wrappers */ .Lbad_source: diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c index 8bd53589e..a6c9102fd 100644 --- a/arch/x86/lib/csum-wrappers_64.c +++ b/arch/x86/lib/csum-wrappers_64.c @@ -53,10 +53,10 @@ csum_partial_copy_from_user(const void __user *src, void *dst, len -= 2; } } - stac(); - isum = csum_partial_copy_generic((__force const void *)src, + user_access_begin(); + isum = csum_partial_copy_generic((const void __force_kernel *)____m(src), dst, len, isum, errp, NULL); - clac(); + user_access_end(); if (unlikely(*errp)) goto out_err; @@ -110,10 +110,10 @@ csum_partial_copy_to_user(const void *src, void __user *dst, } *errp = 0; - stac(); - ret = csum_partial_copy_generic(src, (void __force *)dst, + user_access_begin(); + ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst), len, isum, NULL, errp); - clac(); + user_access_end(); return ret; } EXPORT_SYMBOL(csum_partial_copy_to_user); diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S index 37b62d412..07a73dc4b 100644 --- a/arch/x86/lib/getuser.S +++ b/arch/x86/lib/getuser.S @@ -33,72 +33,140 @@ #include #include #include +#include +#include +#include +#include + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) +#define __copyuser_seg gs; +#else +#define __copyuser_seg +#endif .text ENTRY(__get_user_1) + FRAME_BEGIN + +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user - ASM_STAC -1: movzbl (%_ASM_AX),%edx + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + mov pax_user_shadow_base,%_ASM_DX + cmp %_ASM_DX,%_ASM_AX + jae 1234f + add %_ASM_DX,%_ASM_AX +1234: +#endif + +#endif + + ASM_USER_ACCESS_BEGIN +1: __copyuser_seg movzbl (%_ASM_AX),%edx xor %eax,%eax - ASM_CLAC - ret + ASM_USER_ACCESS_END + FRAME_END + pax_ret __get_user_1 ENDPROC(__get_user_1) EXPORT_SYMBOL(__get_user_1) ENTRY(__get_user_2) + FRAME_BEGIN add $1,%_ASM_AX + +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) jc bad_get_user mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user - ASM_STAC -2: movzwl -1(%_ASM_AX),%edx + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + mov pax_user_shadow_base,%_ASM_DX + cmp %_ASM_DX,%_ASM_AX + jae 1234f + add %_ASM_DX,%_ASM_AX +1234: +#endif + +#endif + + ASM_USER_ACCESS_BEGIN +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx xor %eax,%eax - ASM_CLAC - ret + ASM_USER_ACCESS_END + FRAME_END + pax_ret __get_user_2 ENDPROC(__get_user_2) EXPORT_SYMBOL(__get_user_2) ENTRY(__get_user_4) + FRAME_BEGIN add $3,%_ASM_AX + +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) jc bad_get_user mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user - ASM_STAC -3: movl -3(%_ASM_AX),%edx + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + mov pax_user_shadow_base,%_ASM_DX + cmp %_ASM_DX,%_ASM_AX + jae 1234f + add %_ASM_DX,%_ASM_AX +1234: +#endif + +#endif + + ASM_USER_ACCESS_BEGIN +3: __copyuser_seg movl -3(%_ASM_AX),%edx xor %eax,%eax - ASM_CLAC - ret + ASM_USER_ACCESS_END + FRAME_END + pax_ret __get_user_4 ENDPROC(__get_user_4) EXPORT_SYMBOL(__get_user_4) ENTRY(__get_user_8) + FRAME_BEGIN + #ifdef CONFIG_X86_64 add $7,%_ASM_AX jc bad_get_user mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user - ASM_STAC + +#ifdef CONFIG_PAX_MEMORY_UDEREF + mov pax_user_shadow_base,%_ASM_DX + cmp %_ASM_DX,%_ASM_AX + jae 1234f + add %_ASM_DX,%_ASM_AX +1234: +#endif + + ASM_USER_ACCESS_BEGIN 4: movq -7(%_ASM_AX),%rdx xor %eax,%eax - ASM_CLAC - ret + ASM_USER_ACCESS_END + FRAME_END + pax_ret __get_user_8 #else add $7,%_ASM_AX jc bad_get_user_8 mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user_8 - ASM_STAC -4: movl -7(%_ASM_AX),%edx -5: movl -3(%_ASM_AX),%ecx + ASM_USER_ACCESS_BEGIN +4: __copyuser_seg movl -7(%_ASM_AX),%edx +5: __copyuser_seg movl -3(%_ASM_AX),%ecx xor %eax,%eax - ASM_CLAC - ret + ASM_USER_ACCESS_END + FRAME_END + pax_ret __get_user_8 #endif ENDPROC(__get_user_8) EXPORT_SYMBOL(__get_user_8) @@ -107,8 +175,9 @@ EXPORT_SYMBOL(__get_user_8) bad_get_user: xor %edx,%edx mov $(-EFAULT),%_ASM_AX - ASM_CLAC - ret + ASM_USER_ACCESS_END + FRAME_END + pax_ret __get_user_bad END(bad_get_user) #ifdef CONFIG_X86_32 @@ -116,8 +185,9 @@ bad_get_user_8: xor %edx,%edx xor %ecx,%ecx mov $(-EFAULT),%_ASM_AX - ASM_CLAC - ret + ASM_USER_ACCESS_END + FRAME_END + pax_ret __get_user_bad END(bad_get_user_8) #endif diff --git a/arch/x86/lib/hweight.S b/arch/x86/lib/hweight.S index 23d893cbc..688a0aac4 100644 --- a/arch/x86/lib/hweight.S +++ b/arch/x86/lib/hweight.S @@ -1,7 +1,7 @@ #include #include -#include +#include /* * unsigned int __sw_hweight32(unsigned int w) @@ -31,7 +31,7 @@ ENTRY(__sw_hweight32) imull $0x01010101, %eax, %eax # w_tmp *= 0x01010101 shrl $24, %eax # w = w_tmp >> 24 __ASM_SIZE(pop,) %__ASM_REG(dx) - ret + pax_ret __sw_hweight32 ENDPROC(__sw_hweight32) EXPORT_SYMBOL(__sw_hweight32) @@ -64,19 +64,19 @@ ENTRY(__sw_hweight64) popq %rdx popq %rdi - ret + pax_ret __sw_hweight64 #else /* CONFIG_X86_32 */ /* We're getting an u64 arg in (%eax,%edx): unsigned long hweight64(__u64 w) */ pushl %ecx - call __sw_hweight32 + pax_direct_call __sw_hweight32 movl %eax, %ecx # stash away result movl %edx, %eax # second part of input - call __sw_hweight32 + pax_direct_call __sw_hweight32 addl %ecx, %eax # result popl %ecx - ret + pax_ret __sw_hweight64 #endif ENDPROC(__sw_hweight64) EXPORT_SYMBOL(__sw_hweight64) diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c index 1088eb8f3..fac84689c 100644 --- a/arch/x86/lib/insn.c +++ b/arch/x86/lib/insn.c @@ -20,8 +20,10 @@ #ifdef __KERNEL__ #include +#include #else #include +#define ktla_ktva(addr) addr #endif #include #include @@ -60,9 +62,9 @@ void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64) buf_len = MAX_INSN_SIZE; memset(insn, 0, sizeof(*insn)); - insn->kaddr = kaddr; - insn->end_kaddr = kaddr + buf_len; - insn->next_byte = kaddr; + insn->kaddr = (void *)ktla_ktva((unsigned long)kaddr); + insn->end_kaddr = insn->kaddr + buf_len; + insn->next_byte = insn->kaddr; insn->x86_64 = x86_64 ? 1 : 0; insn->opnd_bytes = 4; if (x86_64) diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S index 33147fef3..fb022d080 100644 --- a/arch/x86/lib/iomap_copy_64.S +++ b/arch/x86/lib/iomap_copy_64.S @@ -16,6 +16,7 @@ */ #include +#include /* * override generic version in lib/iomap_copy.c @@ -23,5 +24,5 @@ ENTRY(__iowrite32_copy) movl %edx,%ecx rep movsd - ret + pax_ret __iowrite32_copy ENDPROC(__iowrite32_copy) diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 779782f58..7aef7b9a9 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -38,7 +38,7 @@ ENTRY(memcpy) rep movsq movl %edx, %ecx rep movsb - ret + pax_ret memcpy ENDPROC(memcpy) ENDPROC(__memcpy) EXPORT_SYMBOL(memcpy) @@ -52,7 +52,7 @@ ENTRY(memcpy_erms) movq %rdi, %rax movq %rdx, %rcx rep movsb - ret + pax_ret memcpy ENDPROC(memcpy_erms) ENTRY(memcpy_orig) @@ -136,7 +136,7 @@ ENTRY(memcpy_orig) movq %r9, 1*8(%rdi) movq %r10, -2*8(%rdi, %rdx) movq %r11, -1*8(%rdi, %rdx) - retq + pax_ret memcpy .p2align 4 .Lless_16bytes: cmpl $8, %edx @@ -148,7 +148,7 @@ ENTRY(memcpy_orig) movq -1*8(%rsi, %rdx), %r9 movq %r8, 0*8(%rdi) movq %r9, -1*8(%rdi, %rdx) - retq + pax_ret memcpy .p2align 4 .Lless_8bytes: cmpl $4, %edx @@ -161,7 +161,7 @@ ENTRY(memcpy_orig) movl -4(%rsi, %rdx), %r8d movl %ecx, (%rdi) movl %r8d, -4(%rdi, %rdx) - retq + pax_ret memcpy .p2align 4 .Lless_3bytes: subl $1, %edx @@ -179,7 +179,7 @@ ENTRY(memcpy_orig) movb %cl, (%rdi) .Lend: - retq + pax_ret memcpy ENDPROC(memcpy_orig) #ifndef CONFIG_UML @@ -275,7 +275,7 @@ ENTRY(memcpy_mcsafe_unrolled) /* Copy successful. Return zero */ .L_done_memcpy_trap: xorq %rax, %rax - ret + pax_ret memcpy ENDPROC(memcpy_mcsafe_unrolled) EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled) @@ -283,7 +283,7 @@ EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled) /* Return -EFAULT for any failure */ .L_memcpy_mcsafe_fail: mov $-EFAULT, %rax - ret + pax_ret memcpy .previous diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S index 15de86cd1..cf8817617 100644 --- a/arch/x86/lib/memmove_64.S +++ b/arch/x86/lib/memmove_64.S @@ -42,7 +42,7 @@ ENTRY(__memmove) jg 2f .Lmemmove_begin_forward: - ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; retq", X86_FEATURE_ERMS + ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; pax_ret memmove", X86_FEATURE_ERMS /* * movsq instruction have many startup latency @@ -205,7 +205,7 @@ ENTRY(__memmove) movb (%rsi), %r11b movb %r11b, (%rdi) 13: - retq + pax_ret memmove ENDPROC(__memmove) ENDPROC(memmove) EXPORT_SYMBOL(__memmove) diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S index 55b95db30..7b106268d 100644 --- a/arch/x86/lib/memset_64.S +++ b/arch/x86/lib/memset_64.S @@ -41,7 +41,7 @@ ENTRY(__memset) movl %edx,%ecx rep stosb movq %r9,%rax - ret + pax_ret memset ENDPROC(memset) ENDPROC(__memset) EXPORT_SYMBOL(memset) @@ -64,7 +64,7 @@ ENTRY(memset_erms) movq %rdx,%rcx rep stosb movq %r9,%rax - ret + pax_ret memset ENDPROC(memset_erms) ENTRY(memset_orig) @@ -126,7 +126,7 @@ ENTRY(memset_orig) .Lende: movq %r10,%rax - ret + pax_ret memset .Lbad_alignment: cmpq $7,%rdx diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c index c2311a678..3b01ad91b 100644 --- a/arch/x86/lib/mmx_32.c +++ b/arch/x86/lib/mmx_32.c @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len) { void *p; int i; + unsigned long cr0; if (unlikely(in_interrupt())) return __memcpy(to, from, len); @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len) kernel_fpu_begin(); __asm__ __volatile__ ( - "1: prefetch (%0)\n" /* This set is 28 bytes */ - " prefetch 64(%0)\n" - " prefetch 128(%0)\n" - " prefetch 192(%0)\n" - " prefetch 256(%0)\n" + "1: prefetch (%1)\n" /* This set is 28 bytes */ + " prefetch 64(%1)\n" + " prefetch 128(%1)\n" + " prefetch 192(%1)\n" + " prefetch 256(%1)\n" "2: \n" ".section .fixup, \"ax\"\n" - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ + "3: \n" + +#ifdef CONFIG_PAX_KERNEXEC + " movl %%cr0, %0\n" + " movl %0, %%eax\n" + " andl $0xFFFEFFFF, %%eax\n" + " movl %%eax, %%cr0\n" +#endif + + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ + +#ifdef CONFIG_PAX_KERNEXEC + " movl %0, %%cr0\n" +#endif + " jmp 2b\n" ".previous\n" _ASM_EXTABLE(1b, 3b) - : : "r" (from)); + : "=&r" (cr0) : "r" (from) : "ax"); for ( ; i > 5; i--) { __asm__ __volatile__ ( - "1: prefetch 320(%0)\n" - "2: movq (%0), %%mm0\n" - " movq 8(%0), %%mm1\n" - " movq 16(%0), %%mm2\n" - " movq 24(%0), %%mm3\n" - " movq %%mm0, (%1)\n" - " movq %%mm1, 8(%1)\n" - " movq %%mm2, 16(%1)\n" - " movq %%mm3, 24(%1)\n" - " movq 32(%0), %%mm0\n" - " movq 40(%0), %%mm1\n" - " movq 48(%0), %%mm2\n" - " movq 56(%0), %%mm3\n" - " movq %%mm0, 32(%1)\n" - " movq %%mm1, 40(%1)\n" - " movq %%mm2, 48(%1)\n" - " movq %%mm3, 56(%1)\n" + "1: prefetch 320(%1)\n" + "2: movq (%1), %%mm0\n" + " movq 8(%1), %%mm1\n" + " movq 16(%1), %%mm2\n" + " movq 24(%1), %%mm3\n" + " movq %%mm0, (%2)\n" + " movq %%mm1, 8(%2)\n" + " movq %%mm2, 16(%2)\n" + " movq %%mm3, 24(%2)\n" + " movq 32(%1), %%mm0\n" + " movq 40(%1), %%mm1\n" + " movq 48(%1), %%mm2\n" + " movq 56(%1), %%mm3\n" + " movq %%mm0, 32(%2)\n" + " movq %%mm1, 40(%2)\n" + " movq %%mm2, 48(%2)\n" + " movq %%mm3, 56(%2)\n" ".section .fixup, \"ax\"\n" - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ + "3:\n" + +#ifdef CONFIG_PAX_KERNEXEC + " movl %%cr0, %0\n" + " movl %0, %%eax\n" + " andl $0xFFFEFFFF, %%eax\n" + " movl %%eax, %%cr0\n" +#endif + + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */ + +#ifdef CONFIG_PAX_KERNEXEC + " movl %0, %%cr0\n" +#endif + " jmp 2b\n" ".previous\n" _ASM_EXTABLE(1b, 3b) - : : "r" (from), "r" (to) : "memory"); + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax"); from += 64; to += 64; @@ -158,6 +187,7 @@ static void fast_clear_page(void *page) static void fast_copy_page(void *to, void *from) { int i; + unsigned long cr0; kernel_fpu_begin(); @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from) * but that is for later. -AV */ __asm__ __volatile__( - "1: prefetch (%0)\n" - " prefetch 64(%0)\n" - " prefetch 128(%0)\n" - " prefetch 192(%0)\n" - " prefetch 256(%0)\n" + "1: prefetch (%1)\n" + " prefetch 64(%1)\n" + " prefetch 128(%1)\n" + " prefetch 192(%1)\n" + " prefetch 256(%1)\n" "2: \n" ".section .fixup, \"ax\"\n" - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ + "3: \n" + +#ifdef CONFIG_PAX_KERNEXEC + " movl %%cr0, %0\n" + " movl %0, %%eax\n" + " andl $0xFFFEFFFF, %%eax\n" + " movl %%eax, %%cr0\n" +#endif + + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ + +#ifdef CONFIG_PAX_KERNEXEC + " movl %0, %%cr0\n" +#endif + " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b, 3b) : : "r" (from)); + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax"); for (i = 0; i < (4096-320)/64; i++) { __asm__ __volatile__ ( - "1: prefetch 320(%0)\n" - "2: movq (%0), %%mm0\n" - " movntq %%mm0, (%1)\n" - " movq 8(%0), %%mm1\n" - " movntq %%mm1, 8(%1)\n" - " movq 16(%0), %%mm2\n" - " movntq %%mm2, 16(%1)\n" - " movq 24(%0), %%mm3\n" - " movntq %%mm3, 24(%1)\n" - " movq 32(%0), %%mm4\n" - " movntq %%mm4, 32(%1)\n" - " movq 40(%0), %%mm5\n" - " movntq %%mm5, 40(%1)\n" - " movq 48(%0), %%mm6\n" - " movntq %%mm6, 48(%1)\n" - " movq 56(%0), %%mm7\n" - " movntq %%mm7, 56(%1)\n" + "1: prefetch 320(%1)\n" + "2: movq (%1), %%mm0\n" + " movntq %%mm0, (%2)\n" + " movq 8(%1), %%mm1\n" + " movntq %%mm1, 8(%2)\n" + " movq 16(%1), %%mm2\n" + " movntq %%mm2, 16(%2)\n" + " movq 24(%1), %%mm3\n" + " movntq %%mm3, 24(%2)\n" + " movq 32(%1), %%mm4\n" + " movntq %%mm4, 32(%2)\n" + " movq 40(%1), %%mm5\n" + " movntq %%mm5, 40(%2)\n" + " movq 48(%1), %%mm6\n" + " movntq %%mm6, 48(%2)\n" + " movq 56(%1), %%mm7\n" + " movntq %%mm7, 56(%2)\n" ".section .fixup, \"ax\"\n" - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ + "3:\n" + +#ifdef CONFIG_PAX_KERNEXEC + " movl %%cr0, %0\n" + " movl %0, %%eax\n" + " andl $0xFFFEFFFF, %%eax\n" + " movl %%eax, %%cr0\n" +#endif + + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */ + +#ifdef CONFIG_PAX_KERNEXEC + " movl %0, %%cr0\n" +#endif + " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory"); + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax"); from += 64; to += 64; @@ -280,47 +338,76 @@ static void fast_clear_page(void *page) static void fast_copy_page(void *to, void *from) { int i; + unsigned long cr0; kernel_fpu_begin(); __asm__ __volatile__ ( - "1: prefetch (%0)\n" - " prefetch 64(%0)\n" - " prefetch 128(%0)\n" - " prefetch 192(%0)\n" - " prefetch 256(%0)\n" + "1: prefetch (%1)\n" + " prefetch 64(%1)\n" + " prefetch 128(%1)\n" + " prefetch 192(%1)\n" + " prefetch 256(%1)\n" "2: \n" ".section .fixup, \"ax\"\n" - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ + "3: \n" + +#ifdef CONFIG_PAX_KERNEXEC + " movl %%cr0, %0\n" + " movl %0, %%eax\n" + " andl $0xFFFEFFFF, %%eax\n" + " movl %%eax, %%cr0\n" +#endif + + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ + +#ifdef CONFIG_PAX_KERNEXEC + " movl %0, %%cr0\n" +#endif + " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b, 3b) : : "r" (from)); + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax"); for (i = 0; i < 4096/64; i++) { __asm__ __volatile__ ( - "1: prefetch 320(%0)\n" - "2: movq (%0), %%mm0\n" - " movq 8(%0), %%mm1\n" - " movq 16(%0), %%mm2\n" - " movq 24(%0), %%mm3\n" - " movq %%mm0, (%1)\n" - " movq %%mm1, 8(%1)\n" - " movq %%mm2, 16(%1)\n" - " movq %%mm3, 24(%1)\n" - " movq 32(%0), %%mm0\n" - " movq 40(%0), %%mm1\n" - " movq 48(%0), %%mm2\n" - " movq 56(%0), %%mm3\n" - " movq %%mm0, 32(%1)\n" - " movq %%mm1, 40(%1)\n" - " movq %%mm2, 48(%1)\n" - " movq %%mm3, 56(%1)\n" + "1: prefetch 320(%1)\n" + "2: movq (%1), %%mm0\n" + " movq 8(%1), %%mm1\n" + " movq 16(%1), %%mm2\n" + " movq 24(%1), %%mm3\n" + " movq %%mm0, (%2)\n" + " movq %%mm1, 8(%2)\n" + " movq %%mm2, 16(%2)\n" + " movq %%mm3, 24(%2)\n" + " movq 32(%1), %%mm0\n" + " movq 40(%1), %%mm1\n" + " movq 48(%1), %%mm2\n" + " movq 56(%1), %%mm3\n" + " movq %%mm0, 32(%2)\n" + " movq %%mm1, 40(%2)\n" + " movq %%mm2, 48(%2)\n" + " movq %%mm3, 56(%2)\n" ".section .fixup, \"ax\"\n" - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ + "3:\n" + +#ifdef CONFIG_PAX_KERNEXEC + " movl %%cr0, %0\n" + " movl %0, %%eax\n" + " andl $0xFFFEFFFF, %%eax\n" + " movl %%eax, %%cr0\n" +#endif + + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */ + +#ifdef CONFIG_PAX_KERNEXEC + " movl %0, %%cr0\n" +#endif + " jmp 2b\n" ".previous\n" _ASM_EXTABLE(1b, 3b) - : : "r" (from), "r" (to) : "memory"); + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax"); from += 64; to += 64; diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S index c81556409..345ff9120 100644 --- a/arch/x86/lib/msr-reg.S +++ b/arch/x86/lib/msr-reg.S @@ -2,6 +2,7 @@ #include #include #include +#include #ifdef CONFIG_X86_64 /* @@ -34,7 +35,7 @@ ENTRY(\op\()_safe_regs) movl %edi, 28(%r10) popq %rbp popq %rbx - ret + pax_ret \op\()_safe_regs 3: movl $-EIO, %r11d jmp 2b @@ -76,7 +77,7 @@ ENTRY(\op\()_safe_regs) popl %esi popl %ebp popl %ebx - ret + pax_ret \op\()_safe_regs 3: movl $-EIO, 4(%esp) jmp 2b diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S index cd5d716d2..146f98162 100644 --- a/arch/x86/lib/putuser.S +++ b/arch/x86/lib/putuser.S @@ -16,7 +16,10 @@ #include #include #include - +#include +#include +#include +#include /* * __put_user_X @@ -30,17 +33,44 @@ * as they get called from within inline assembly. */ -#define ENTER mov PER_CPU_VAR(current_task), %_ASM_BX -#define EXIT ASM_CLAC ; \ - ret +#define ENTER FRAME_BEGIN +#define EXIT ASM_USER_ACCESS_END ; \ + FRAME_END ; \ + pax_ret __put_user_1 + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) +#define _DEST %_ASM_CX,%_ASM_BX +#else +#define _DEST %_ASM_CX +#endif + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) +#define __copyuser_seg gs; +#else +#define __copyuser_seg +#endif .text ENTRY(__put_user_1) ENTER + +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) + mov PER_CPU_VAR(current_task), %_ASM_BX cmp TASK_addr_limit(%_ASM_BX),%_ASM_CX jae bad_put_user - ASM_STAC -1: movb %al,(%_ASM_CX) + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + mov pax_user_shadow_base,%_ASM_BX + cmp %_ASM_BX,%_ASM_CX + jb 1234f + xor %ebx,%ebx +1234: +#endif + +#endif + + ASM_USER_ACCESS_BEGIN +1: __copyuser_seg movb %al,(_DEST) xor %eax,%eax EXIT ENDPROC(__put_user_1) @@ -48,12 +78,26 @@ EXPORT_SYMBOL(__put_user_1) ENTRY(__put_user_2) ENTER + +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) + mov PER_CPU_VAR(current_task), %_ASM_BX mov TASK_addr_limit(%_ASM_BX),%_ASM_BX sub $1,%_ASM_BX cmp %_ASM_BX,%_ASM_CX jae bad_put_user - ASM_STAC -2: movw %ax,(%_ASM_CX) + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + mov pax_user_shadow_base,%_ASM_BX + cmp %_ASM_BX,%_ASM_CX + jb 1234f + xor %ebx,%ebx +1234: +#endif + +#endif + + ASM_USER_ACCESS_BEGIN +2: __copyuser_seg movw %ax,(_DEST) xor %eax,%eax EXIT ENDPROC(__put_user_2) @@ -61,12 +105,26 @@ EXPORT_SYMBOL(__put_user_2) ENTRY(__put_user_4) ENTER + +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) + mov PER_CPU_VAR(current_task), %_ASM_BX mov TASK_addr_limit(%_ASM_BX),%_ASM_BX sub $3,%_ASM_BX cmp %_ASM_BX,%_ASM_CX jae bad_put_user - ASM_STAC -3: movl %eax,(%_ASM_CX) + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + mov pax_user_shadow_base,%_ASM_BX + cmp %_ASM_BX,%_ASM_CX + jb 1234f + xor %ebx,%ebx +1234: +#endif + +#endif + + ASM_USER_ACCESS_BEGIN +3: __copyuser_seg movl %eax,(_DEST) xor %eax,%eax EXIT ENDPROC(__put_user_4) @@ -74,14 +132,28 @@ EXPORT_SYMBOL(__put_user_4) ENTRY(__put_user_8) ENTER + +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) + mov PER_CPU_VAR(current_task), %_ASM_BX mov TASK_addr_limit(%_ASM_BX),%_ASM_BX sub $7,%_ASM_BX cmp %_ASM_BX,%_ASM_CX jae bad_put_user - ASM_STAC -4: mov %_ASM_AX,(%_ASM_CX) + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + mov pax_user_shadow_base,%_ASM_BX + cmp %_ASM_BX,%_ASM_CX + jb 1234f + xor %ebx,%ebx +1234: +#endif + +#endif + + ASM_USER_ACCESS_BEGIN +4: __copyuser_seg mov %_ASM_AX,(_DEST) #ifdef CONFIG_X86_32 -5: movl %edx,4(%_ASM_CX) +5: __copyuser_seg movl %edx,4(_DEST) #endif xor %eax,%eax EXIT diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S index bf2c6074e..3ca297db8 100644 --- a/arch/x86/lib/rwsem.S +++ b/arch/x86/lib/rwsem.S @@ -91,31 +91,31 @@ ENTRY(call_rwsem_down_read_failed) save_common_regs __ASM_SIZE(push,) %__ASM_REG(dx) movq %rax,%rdi - call rwsem_down_read_failed + pax_direct_call rwsem_down_read_failed __ASM_SIZE(pop,) %__ASM_REG(dx) restore_common_regs FRAME_END - ret + pax_ret rwsem_down_read_failed ENDPROC(call_rwsem_down_read_failed) ENTRY(call_rwsem_down_write_failed) FRAME_BEGIN save_common_regs movq %rax,%rdi - call rwsem_down_write_failed + pax_direct_call rwsem_down_write_failed restore_common_regs FRAME_END - ret + pax_ret rwsem_down_write_failed ENDPROC(call_rwsem_down_write_failed) ENTRY(call_rwsem_down_write_failed_killable) FRAME_BEGIN save_common_regs movq %rax,%rdi - call rwsem_down_write_failed_killable + pax_direct_call rwsem_down_write_failed_killable restore_common_regs FRAME_END - ret + pax_ret rwsem_down_write_failed_killable ENDPROC(call_rwsem_down_write_failed_killable) ENTRY(call_rwsem_wake) @@ -125,10 +125,10 @@ ENTRY(call_rwsem_wake) jnz 1f save_common_regs movq %rax,%rdi - call rwsem_wake + pax_direct_call rwsem_wake restore_common_regs 1: FRAME_END - ret + pax_ret rwsem_wake ENDPROC(call_rwsem_wake) ENTRY(call_rwsem_downgrade_wake) @@ -136,9 +136,9 @@ ENTRY(call_rwsem_downgrade_wake) save_common_regs __ASM_SIZE(push,) %__ASM_REG(dx) movq %rax,%rdi - call rwsem_downgrade_wake + pax_direct_call rwsem_downgrade_wake __ASM_SIZE(pop,) %__ASM_REG(dx) restore_common_regs FRAME_END - ret + pax_ret rwsem_downgrade_wake ENDPROC(call_rwsem_downgrade_wake) diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index 3bc7baf2a..aa7d985e8 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c @@ -42,11 +42,13 @@ do { \ int __d0; \ might_fault(); \ __asm__ __volatile__( \ + __COPYUSER_SET_ES \ ASM_STAC "\n" \ "0: rep; stosl\n" \ " movl %2,%0\n" \ "1: rep; stosb\n" \ "2: " ASM_CLAC "\n" \ + __COPYUSER_RESTORE_ES \ ".section .fixup,\"ax\"\n" \ "3: lea 0(%2,%0,4),%0\n" \ " jmp 2b\n" \ @@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user); #ifdef CONFIG_X86_INTEL_USERCOPY static unsigned long -__copy_user_intel(void __user *to, const void *from, unsigned long size) +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size) { int d0, d1; __asm__ __volatile__( @@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size) " .align 2,0x90\n" "3: movl 0(%4), %%eax\n" "4: movl 4(%4), %%edx\n" - "5: movl %%eax, 0(%3)\n" - "6: movl %%edx, 4(%3)\n" + "5: "__copyuser_seg" movl %%eax, 0(%3)\n" + "6: "__copyuser_seg" movl %%edx, 4(%3)\n" "7: movl 8(%4), %%eax\n" "8: movl 12(%4),%%edx\n" - "9: movl %%eax, 8(%3)\n" - "10: movl %%edx, 12(%3)\n" + "9: "__copyuser_seg" movl %%eax, 8(%3)\n" + "10: "__copyuser_seg" movl %%edx, 12(%3)\n" "11: movl 16(%4), %%eax\n" "12: movl 20(%4), %%edx\n" - "13: movl %%eax, 16(%3)\n" - "14: movl %%edx, 20(%3)\n" + "13: "__copyuser_seg" movl %%eax, 16(%3)\n" + "14: "__copyuser_seg" movl %%edx, 20(%3)\n" "15: movl 24(%4), %%eax\n" "16: movl 28(%4), %%edx\n" - "17: movl %%eax, 24(%3)\n" - "18: movl %%edx, 28(%3)\n" + "17: "__copyuser_seg" movl %%eax, 24(%3)\n" + "18: "__copyuser_seg" movl %%edx, 28(%3)\n" "19: movl 32(%4), %%eax\n" "20: movl 36(%4), %%edx\n" - "21: movl %%eax, 32(%3)\n" - "22: movl %%edx, 36(%3)\n" + "21: "__copyuser_seg" movl %%eax, 32(%3)\n" + "22: "__copyuser_seg" movl %%edx, 36(%3)\n" "23: movl 40(%4), %%eax\n" "24: movl 44(%4), %%edx\n" - "25: movl %%eax, 40(%3)\n" - "26: movl %%edx, 44(%3)\n" + "25: "__copyuser_seg" movl %%eax, 40(%3)\n" + "26: "__copyuser_seg" movl %%edx, 44(%3)\n" "27: movl 48(%4), %%eax\n" "28: movl 52(%4), %%edx\n" - "29: movl %%eax, 48(%3)\n" - "30: movl %%edx, 52(%3)\n" + "29: "__copyuser_seg" movl %%eax, 48(%3)\n" + "30: "__copyuser_seg" movl %%edx, 52(%3)\n" "31: movl 56(%4), %%eax\n" "32: movl 60(%4), %%edx\n" - "33: movl %%eax, 56(%3)\n" - "34: movl %%edx, 60(%3)\n" + "33: "__copyuser_seg" movl %%eax, 56(%3)\n" + "34: "__copyuser_seg" movl %%edx, 60(%3)\n" " addl $-64, %0\n" " addl $64, %4\n" " addl $64, %3\n" @@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size) " shrl $2, %0\n" " andl $3, %%eax\n" " cld\n" + __COPYUSER_SET_ES "99: rep; movsl\n" "36: movl %%eax, %0\n" "37: rep; movsb\n" "100:\n" + __COPYUSER_RESTORE_ES + ".section .fixup,\"ax\"\n" + "101: lea 0(%%eax,%0,4),%0\n" + " jmp 100b\n" + ".previous\n" + _ASM_EXTABLE(1b,100b) + _ASM_EXTABLE(2b,100b) + _ASM_EXTABLE(3b,100b) + _ASM_EXTABLE(4b,100b) + _ASM_EXTABLE(5b,100b) + _ASM_EXTABLE(6b,100b) + _ASM_EXTABLE(7b,100b) + _ASM_EXTABLE(8b,100b) + _ASM_EXTABLE(9b,100b) + _ASM_EXTABLE(10b,100b) + _ASM_EXTABLE(11b,100b) + _ASM_EXTABLE(12b,100b) + _ASM_EXTABLE(13b,100b) + _ASM_EXTABLE(14b,100b) + _ASM_EXTABLE(15b,100b) + _ASM_EXTABLE(16b,100b) + _ASM_EXTABLE(17b,100b) + _ASM_EXTABLE(18b,100b) + _ASM_EXTABLE(19b,100b) + _ASM_EXTABLE(20b,100b) + _ASM_EXTABLE(21b,100b) + _ASM_EXTABLE(22b,100b) + _ASM_EXTABLE(23b,100b) + _ASM_EXTABLE(24b,100b) + _ASM_EXTABLE(25b,100b) + _ASM_EXTABLE(26b,100b) + _ASM_EXTABLE(27b,100b) + _ASM_EXTABLE(28b,100b) + _ASM_EXTABLE(29b,100b) + _ASM_EXTABLE(30b,100b) + _ASM_EXTABLE(31b,100b) + _ASM_EXTABLE(32b,100b) + _ASM_EXTABLE(33b,100b) + _ASM_EXTABLE(34b,100b) + _ASM_EXTABLE(35b,100b) + _ASM_EXTABLE(36b,100b) + _ASM_EXTABLE(37b,100b) + _ASM_EXTABLE(99b,101b) + : "=&c"(size), "=&D" (d0), "=&S" (d1) + : "1"(to), "2"(from), "0"(size) + : "eax", "edx", "memory"); + return size; +} + +static unsigned long +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size) +{ + int d0, d1; + __asm__ __volatile__( + " .align 2,0x90\n" + "1: "__copyuser_seg" movl 32(%4), %%eax\n" + " cmpl $67, %0\n" + " jbe 3f\n" + "2: "__copyuser_seg" movl 64(%4), %%eax\n" + " .align 2,0x90\n" + "3: "__copyuser_seg" movl 0(%4), %%eax\n" + "4: "__copyuser_seg" movl 4(%4), %%edx\n" + "5: movl %%eax, 0(%3)\n" + "6: movl %%edx, 4(%3)\n" + "7: "__copyuser_seg" movl 8(%4), %%eax\n" + "8: "__copyuser_seg" movl 12(%4),%%edx\n" + "9: movl %%eax, 8(%3)\n" + "10: movl %%edx, 12(%3)\n" + "11: "__copyuser_seg" movl 16(%4), %%eax\n" + "12: "__copyuser_seg" movl 20(%4), %%edx\n" + "13: movl %%eax, 16(%3)\n" + "14: movl %%edx, 20(%3)\n" + "15: "__copyuser_seg" movl 24(%4), %%eax\n" + "16: "__copyuser_seg" movl 28(%4), %%edx\n" + "17: movl %%eax, 24(%3)\n" + "18: movl %%edx, 28(%3)\n" + "19: "__copyuser_seg" movl 32(%4), %%eax\n" + "20: "__copyuser_seg" movl 36(%4), %%edx\n" + "21: movl %%eax, 32(%3)\n" + "22: movl %%edx, 36(%3)\n" + "23: "__copyuser_seg" movl 40(%4), %%eax\n" + "24: "__copyuser_seg" movl 44(%4), %%edx\n" + "25: movl %%eax, 40(%3)\n" + "26: movl %%edx, 44(%3)\n" + "27: "__copyuser_seg" movl 48(%4), %%eax\n" + "28: "__copyuser_seg" movl 52(%4), %%edx\n" + "29: movl %%eax, 48(%3)\n" + "30: movl %%edx, 52(%3)\n" + "31: "__copyuser_seg" movl 56(%4), %%eax\n" + "32: "__copyuser_seg" movl 60(%4), %%edx\n" + "33: movl %%eax, 56(%3)\n" + "34: movl %%edx, 60(%3)\n" + " addl $-64, %0\n" + " addl $64, %4\n" + " addl $64, %3\n" + " cmpl $63, %0\n" + " ja 1b\n" + "35: movl %0, %%eax\n" + " shrl $2, %0\n" + " andl $3, %%eax\n" + " cld\n" + "99: rep; "__copyuser_seg" movsl\n" + "36: movl %%eax, %0\n" + "37: rep; "__copyuser_seg" movsb\n" + "100:\n" ".section .fixup,\"ax\"\n" "101: lea 0(%%eax,%0,4),%0\n" " jmp 100b\n" @@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) int d0, d1; __asm__ __volatile__( " .align 2,0x90\n" - "0: movl 32(%4), %%eax\n" + "0: "__copyuser_seg" movl 32(%4), %%eax\n" " cmpl $67, %0\n" " jbe 2f\n" - "1: movl 64(%4), %%eax\n" + "1: "__copyuser_seg" movl 64(%4), %%eax\n" " .align 2,0x90\n" - "2: movl 0(%4), %%eax\n" - "21: movl 4(%4), %%edx\n" + "2: "__copyuser_seg" movl 0(%4), %%eax\n" + "21: "__copyuser_seg" movl 4(%4), %%edx\n" " movl %%eax, 0(%3)\n" " movl %%edx, 4(%3)\n" - "3: movl 8(%4), %%eax\n" - "31: movl 12(%4),%%edx\n" + "3: "__copyuser_seg" movl 8(%4), %%eax\n" + "31: "__copyuser_seg" movl 12(%4),%%edx\n" " movl %%eax, 8(%3)\n" " movl %%edx, 12(%3)\n" - "4: movl 16(%4), %%eax\n" - "41: movl 20(%4), %%edx\n" + "4: "__copyuser_seg" movl 16(%4), %%eax\n" + "41: "__copyuser_seg" movl 20(%4), %%edx\n" " movl %%eax, 16(%3)\n" " movl %%edx, 20(%3)\n" - "10: movl 24(%4), %%eax\n" - "51: movl 28(%4), %%edx\n" + "10: "__copyuser_seg" movl 24(%4), %%eax\n" + "51: "__copyuser_seg" movl 28(%4), %%edx\n" " movl %%eax, 24(%3)\n" " movl %%edx, 28(%3)\n" - "11: movl 32(%4), %%eax\n" - "61: movl 36(%4), %%edx\n" + "11: "__copyuser_seg" movl 32(%4), %%eax\n" + "61: "__copyuser_seg" movl 36(%4), %%edx\n" " movl %%eax, 32(%3)\n" " movl %%edx, 36(%3)\n" - "12: movl 40(%4), %%eax\n" - "71: movl 44(%4), %%edx\n" + "12: "__copyuser_seg" movl 40(%4), %%eax\n" + "71: "__copyuser_seg" movl 44(%4), %%edx\n" " movl %%eax, 40(%3)\n" " movl %%edx, 44(%3)\n" - "13: movl 48(%4), %%eax\n" - "81: movl 52(%4), %%edx\n" + "13: "__copyuser_seg" movl 48(%4), %%eax\n" + "81: "__copyuser_seg" movl 52(%4), %%edx\n" " movl %%eax, 48(%3)\n" " movl %%edx, 52(%3)\n" - "14: movl 56(%4), %%eax\n" - "91: movl 60(%4), %%edx\n" + "14: "__copyuser_seg" movl 56(%4), %%eax\n" + "91: "__copyuser_seg" movl 60(%4), %%edx\n" " movl %%eax, 56(%3)\n" " movl %%edx, 60(%3)\n" " addl $-64, %0\n" @@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) " shrl $2, %0\n" " andl $3, %%eax\n" " cld\n" - "6: rep; movsl\n" + "6: rep; "__copyuser_seg" movsl\n" " movl %%eax,%0\n" - "7: rep; movsb\n" + "7: rep; "__copyuser_seg" movsb\n" "8:\n" ".section .fixup,\"ax\"\n" "9: lea 0(%%eax,%0,4),%0\n" @@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to, __asm__ __volatile__( " .align 2,0x90\n" - "0: movl 32(%4), %%eax\n" + "0: "__copyuser_seg" movl 32(%4), %%eax\n" " cmpl $67, %0\n" " jbe 2f\n" - "1: movl 64(%4), %%eax\n" + "1: "__copyuser_seg" movl 64(%4), %%eax\n" " .align 2,0x90\n" - "2: movl 0(%4), %%eax\n" - "21: movl 4(%4), %%edx\n" + "2: "__copyuser_seg" movl 0(%4), %%eax\n" + "21: "__copyuser_seg" movl 4(%4), %%edx\n" " movnti %%eax, 0(%3)\n" " movnti %%edx, 4(%3)\n" - "3: movl 8(%4), %%eax\n" - "31: movl 12(%4),%%edx\n" + "3: "__copyuser_seg" movl 8(%4), %%eax\n" + "31: "__copyuser_seg" movl 12(%4),%%edx\n" " movnti %%eax, 8(%3)\n" " movnti %%edx, 12(%3)\n" - "4: movl 16(%4), %%eax\n" - "41: movl 20(%4), %%edx\n" + "4: "__copyuser_seg" movl 16(%4), %%eax\n" + "41: "__copyuser_seg" movl 20(%4), %%edx\n" " movnti %%eax, 16(%3)\n" " movnti %%edx, 20(%3)\n" - "10: movl 24(%4), %%eax\n" - "51: movl 28(%4), %%edx\n" + "10: "__copyuser_seg" movl 24(%4), %%eax\n" + "51: "__copyuser_seg" movl 28(%4), %%edx\n" " movnti %%eax, 24(%3)\n" " movnti %%edx, 28(%3)\n" - "11: movl 32(%4), %%eax\n" - "61: movl 36(%4), %%edx\n" + "11: "__copyuser_seg" movl 32(%4), %%eax\n" + "61: "__copyuser_seg" movl 36(%4), %%edx\n" " movnti %%eax, 32(%3)\n" " movnti %%edx, 36(%3)\n" - "12: movl 40(%4), %%eax\n" - "71: movl 44(%4), %%edx\n" + "12: "__copyuser_seg" movl 40(%4), %%eax\n" + "71: "__copyuser_seg" movl 44(%4), %%edx\n" " movnti %%eax, 40(%3)\n" " movnti %%edx, 44(%3)\n" - "13: movl 48(%4), %%eax\n" - "81: movl 52(%4), %%edx\n" + "13: "__copyuser_seg" movl 48(%4), %%eax\n" + "81: "__copyuser_seg" movl 52(%4), %%edx\n" " movnti %%eax, 48(%3)\n" " movnti %%edx, 52(%3)\n" - "14: movl 56(%4), %%eax\n" - "91: movl 60(%4), %%edx\n" + "14: "__copyuser_seg" movl 56(%4), %%eax\n" + "91: "__copyuser_seg" movl 60(%4), %%edx\n" " movnti %%eax, 56(%3)\n" " movnti %%edx, 60(%3)\n" " addl $-64, %0\n" @@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to, " shrl $2, %0\n" " andl $3, %%eax\n" " cld\n" - "6: rep; movsl\n" + "6: rep; "__copyuser_seg" movsl\n" " movl %%eax,%0\n" - "7: rep; movsb\n" + "7: rep; "__copyuser_seg" movsb\n" "8:\n" ".section .fixup,\"ax\"\n" "9: lea 0(%%eax,%0,4),%0\n" @@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to, __asm__ __volatile__( " .align 2,0x90\n" - "0: movl 32(%4), %%eax\n" + "0: "__copyuser_seg" movl 32(%4), %%eax\n" " cmpl $67, %0\n" " jbe 2f\n" - "1: movl 64(%4), %%eax\n" + "1: "__copyuser_seg" movl 64(%4), %%eax\n" " .align 2,0x90\n" - "2: movl 0(%4), %%eax\n" - "21: movl 4(%4), %%edx\n" + "2: "__copyuser_seg" movl 0(%4), %%eax\n" + "21: "__copyuser_seg" movl 4(%4), %%edx\n" " movnti %%eax, 0(%3)\n" " movnti %%edx, 4(%3)\n" - "3: movl 8(%4), %%eax\n" - "31: movl 12(%4),%%edx\n" + "3: "__copyuser_seg" movl 8(%4), %%eax\n" + "31: "__copyuser_seg" movl 12(%4),%%edx\n" " movnti %%eax, 8(%3)\n" " movnti %%edx, 12(%3)\n" - "4: movl 16(%4), %%eax\n" - "41: movl 20(%4), %%edx\n" + "4: "__copyuser_seg" movl 16(%4), %%eax\n" + "41: "__copyuser_seg" movl 20(%4), %%edx\n" " movnti %%eax, 16(%3)\n" " movnti %%edx, 20(%3)\n" - "10: movl 24(%4), %%eax\n" - "51: movl 28(%4), %%edx\n" + "10: "__copyuser_seg" movl 24(%4), %%eax\n" + "51: "__copyuser_seg" movl 28(%4), %%edx\n" " movnti %%eax, 24(%3)\n" " movnti %%edx, 28(%3)\n" - "11: movl 32(%4), %%eax\n" - "61: movl 36(%4), %%edx\n" + "11: "__copyuser_seg" movl 32(%4), %%eax\n" + "61: "__copyuser_seg" movl 36(%4), %%edx\n" " movnti %%eax, 32(%3)\n" " movnti %%edx, 36(%3)\n" - "12: movl 40(%4), %%eax\n" - "71: movl 44(%4), %%edx\n" + "12: "__copyuser_seg" movl 40(%4), %%eax\n" + "71: "__copyuser_seg" movl 44(%4), %%edx\n" " movnti %%eax, 40(%3)\n" " movnti %%edx, 44(%3)\n" - "13: movl 48(%4), %%eax\n" - "81: movl 52(%4), %%edx\n" + "13: "__copyuser_seg" movl 48(%4), %%eax\n" + "81: "__copyuser_seg" movl 52(%4), %%edx\n" " movnti %%eax, 48(%3)\n" " movnti %%edx, 52(%3)\n" - "14: movl 56(%4), %%eax\n" - "91: movl 60(%4), %%edx\n" + "14: "__copyuser_seg" movl 56(%4), %%eax\n" + "91: "__copyuser_seg" movl 60(%4), %%edx\n" " movnti %%eax, 56(%3)\n" " movnti %%edx, 60(%3)\n" " addl $-64, %0\n" @@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to, " shrl $2, %0\n" " andl $3, %%eax\n" " cld\n" - "6: rep; movsl\n" + "6: rep; "__copyuser_seg" movsl\n" " movl %%eax,%0\n" - "7: rep; movsb\n" + "7: rep; "__copyuser_seg" movsb\n" "8:\n" ".section .fixup,\"ax\"\n" "9: lea 0(%%eax,%0,4),%0\n" @@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to, */ unsigned long __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size); -unsigned long __copy_user_intel(void __user *to, const void *from, +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from, + unsigned long size); +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size); unsigned long __copy_user_zeroing_intel_nocache(void *to, const void __user *from, unsigned long size); #endif /* CONFIG_X86_INTEL_USERCOPY */ /* Generic arbitrary sized copy. */ -#define __copy_user(to, from, size) \ +#define __copy_user(to, from, size, prefix, set, restore) \ do { \ int __d0, __d1, __d2; \ __asm__ __volatile__( \ + set \ " cmp $7,%0\n" \ " jbe 1f\n" \ " movl %1,%0\n" \ " negl %0\n" \ " andl $7,%0\n" \ " subl %0,%3\n" \ - "4: rep; movsb\n" \ + "4: rep; "prefix"movsb\n" \ " movl %3,%0\n" \ " shrl $2,%0\n" \ " andl $3,%3\n" \ " .align 2,0x90\n" \ - "0: rep; movsl\n" \ + "0: rep; "prefix"movsl\n" \ " movl %3,%0\n" \ - "1: rep; movsb\n" \ + "1: rep; "prefix"movsb\n" \ "2:\n" \ + restore \ ".section .fixup,\"ax\"\n" \ "5: addl %3,%0\n" \ " jmp 2b\n" \ @@ -538,14 +650,14 @@ do { \ " negl %0\n" \ " andl $7,%0\n" \ " subl %0,%3\n" \ - "4: rep; movsb\n" \ + "4: rep; "__copyuser_seg"movsb\n" \ " movl %3,%0\n" \ " shrl $2,%0\n" \ " andl $3,%3\n" \ " .align 2,0x90\n" \ - "0: rep; movsl\n" \ + "0: rep; "__copyuser_seg"movsl\n" \ " movl %3,%0\n" \ - "1: rep; movsb\n" \ + "1: rep; "__copyuser_seg"movsb\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "5: addl %3,%0\n" \ @@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from, { stac(); if (movsl_is_ok(to, from, n)) - __copy_user(to, from, n); + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES); else - n = __copy_user_intel(to, from, n); + n = __generic_copy_to_user_intel(to, from, n); clac(); return n; } @@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from, { stac(); if (movsl_is_ok(to, from, n)) - __copy_user(to, from, n); + __copy_user(to, from, n, __copyuser_seg, "", ""); else - n = __copy_user_intel((void __user *)to, - (const void *)from, n); + n = __generic_copy_from_user_intel(to, from, n); clac(); return n; } @@ -632,60 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr if (n > 64 && static_cpu_has(X86_FEATURE_XMM2)) n = __copy_user_intel_nocache(to, from, n); else - __copy_user(to, from, n); + __copy_user(to, from, n, __copyuser_seg, "", ""); #else - __copy_user(to, from, n); + __copy_user(to, from, n, __copyuser_seg, "", ""); #endif clac(); return n; } EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero); -/** - * copy_to_user: - Copy a block of data into user space. - * @to: Destination address, in user space. - * @from: Source address, in kernel space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from kernel space to user space. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - */ -unsigned long _copy_to_user(void __user *to, const void *from, unsigned n) +#ifdef CONFIG_PAX_MEMORY_UDEREF +void __set_fs(mm_segment_t x) { - if (access_ok(VERIFY_WRITE, to, n)) - n = __copy_to_user(to, from, n); - return n; + switch (x.seg) { + case 0: + loadsegment(gs, 0); + break; + case TASK_SIZE_MAX: + loadsegment(gs, __USER_DS); + break; + case -1UL: + loadsegment(gs, __KERNEL_DS); + break; + default: + BUG(); + } } -EXPORT_SYMBOL(_copy_to_user); +EXPORT_SYMBOL(__set_fs); -/** - * copy_from_user: - Copy a block of data from user space. - * @to: Destination address, in kernel space. - * @from: Source address, in user space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from user space to kernel space. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - * - * If some data could not be copied, this function will pad the copied - * data to the requested size using zero bytes. - */ -unsigned long _copy_from_user(void *to, const void __user *from, unsigned n) +void set_fs(mm_segment_t x) { - if (access_ok(VERIFY_READ, from, n)) - n = __copy_from_user(to, from, n); - else - memset(to, 0, n); - return n; + current->thread.addr_limit = x; + __set_fs(x); } -EXPORT_SYMBOL(_copy_from_user); +EXPORT_SYMBOL(set_fs); +#endif diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index 69873589c..14d476915 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -18,7 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size) might_fault(); /* no memory constraint because it doesn't change any memory gcc knows about */ - stac(); + user_access_begin(); asm volatile( " testq %[size8],%[size8]\n" " jz 4f\n" @@ -39,9 +39,9 @@ unsigned long __clear_user(void __user *addr, unsigned long size) _ASM_EXTABLE(0b,3b) _ASM_EXTABLE(1b,2b) : [size8] "=&c"(size), [dst] "=&D" (__d0) - : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr), + : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)), [zero] "r" (0UL), [eight] "r" (8UL)); - clac(); + user_access_end(); return size; } EXPORT_SYMBOL(__clear_user); @@ -54,12 +54,11 @@ unsigned long clear_user(void __user *to, unsigned long n) } EXPORT_SYMBOL(clear_user); -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len) +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len) { - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) { - return copy_user_generic((__force void *)to, (__force void *)from, len); - } - return len; + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) + return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len); + return len; } EXPORT_SYMBOL(copy_in_user); @@ -69,20 +68,20 @@ EXPORT_SYMBOL(copy_in_user); * it is not necessary to optimize tail handling. */ __visible unsigned long -copy_user_handle_tail(char *to, char *from, unsigned len) +copy_user_handle_tail(void __user *to, const void __user *from, unsigned long len) { + user_access_end(); for (; len; --len, to++) { char c; - if (__get_user_nocheck(c, from++, sizeof(char))) + if (__get_user_nocheck(c, (const char *)from++, sizeof(char))) break; - if (__put_user_nocheck(c, to, sizeof(char))) + if (__put_user_nocheck(c, (char *)to, sizeof(char))) break; } - clac(); /* If the destination is a kernel buffer, we always clear the end */ - if (!__addr_ok(to)) - memset(to, 0, len); + if (!__addr_ok(to) && (unsigned long)to >= TASK_SIZE_MAX + pax_user_shadow_base) + memset((void __force_kernel *)to, 0, len); return len; } diff --git a/arch/x86/math-emu/div_Xsig.S b/arch/x86/math-emu/div_Xsig.S index f77ba3058..8ba0e95ef 100644 --- a/arch/x86/math-emu/div_Xsig.S +++ b/arch/x86/math-emu/div_Xsig.S @@ -340,26 +340,26 @@ L_exit: popl %esi leave - ret + pax_ret div_Xsig #ifdef PARANOID /* The logic is wrong if we got here */ L_bugged: pushl EX_INTERNAL|0x240 - call EXCEPTION + pax_direct_call EXCEPTION pop %ebx jmp L_exit L_bugged_1: pushl EX_INTERNAL|0x241 - call EXCEPTION + pax_direct_call EXCEPTION pop %ebx jmp L_exit L_bugged_2: pushl EX_INTERNAL|0x242 - call EXCEPTION + pax_direct_call EXCEPTION pop %ebx jmp L_exit #endif /* PARANOID */ diff --git a/arch/x86/math-emu/div_small.S b/arch/x86/math-emu/div_small.S index 47099628f..f9f329aa1 100644 --- a/arch/x86/math-emu/div_small.S +++ b/arch/x86/math-emu/div_small.S @@ -43,5 +43,5 @@ ENTRY(FPU_div_small) popl %esi leave - ret + pax_ret FPU_div_small diff --git a/arch/x86/math-emu/fpu_asm.h b/arch/x86/math-emu/fpu_asm.h index 955b93273..32dc22330 100644 --- a/arch/x86/math-emu/fpu_asm.h +++ b/arch/x86/math-emu/fpu_asm.h @@ -11,6 +11,7 @@ #define _FPU_ASM_H_ #include +#include #define EXCEPTION FPU_exception diff --git a/arch/x86/math-emu/fpu_aux.c b/arch/x86/math-emu/fpu_aux.c index 024f6e971..308f1b093 100644 --- a/arch/x86/math-emu/fpu_aux.c +++ b/arch/x86/math-emu/fpu_aux.c @@ -52,7 +52,7 @@ void fpstate_init_soft(struct swregs_state *soft) void finit(void) { - fpstate_init_soft(¤t->thread.fpu.state.soft); + fpstate_init_soft(¤t->thread.fpu.state->soft); } /* diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c index e945fedf1..bffe686ab 100644 --- a/arch/x86/math-emu/fpu_entry.c +++ b/arch/x86/math-emu/fpu_entry.c @@ -643,7 +643,7 @@ int fpregs_soft_set(struct task_struct *target, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { - struct swregs_state *s387 = &target->thread.fpu.state.soft; + struct swregs_state *s387 = &target->thread.fpu.state->soft; void *space = s387->st_space; int ret; int offset, other, i, tags, regnr, tag, newtop; @@ -695,7 +695,7 @@ int fpregs_soft_get(struct task_struct *target, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { - struct swregs_state *s387 = &target->thread.fpu.state.soft; + struct swregs_state *s387 = &target->thread.fpu.state->soft; const void *space = s387->st_space; int ret; int offset = (S387->ftop & 7) * 10, other = 80 - offset; diff --git a/arch/x86/math-emu/fpu_etc.c b/arch/x86/math-emu/fpu_etc.c index 233e5af56..dd82ff07a 100644 --- a/arch/x86/math-emu/fpu_etc.c +++ b/arch/x86/math-emu/fpu_etc.c @@ -119,9 +119,14 @@ static void fxam(FPU_REG *st0_ptr, u_char st0tag) setcc(c); } +static void FPU_ST0_illegal(FPU_REG *st0_ptr, u_char st0_tag) +{ + FPU_illegal(); +} + static FUNC_ST0 const fp_etc_table[] = { - fchs, fabs, (FUNC_ST0) FPU_illegal, (FUNC_ST0) FPU_illegal, - ftst_, fxam, (FUNC_ST0) FPU_illegal, (FUNC_ST0) FPU_illegal + fchs, fabs, FPU_ST0_illegal, FPU_ST0_illegal, + ftst_, fxam, FPU_ST0_illegal, FPU_ST0_illegal }; void FPU_etc(void) diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h index 5e044d506..d342fce49 100644 --- a/arch/x86/math-emu/fpu_system.h +++ b/arch/x86/math-emu/fpu_system.h @@ -46,7 +46,7 @@ static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg) #define SEG_EXPAND_DOWN(s) (((s).b & ((1 << 11) | (1 << 10))) \ == (1 << 10)) -#define I387 (¤t->thread.fpu.state) +#define I387 (current->thread.fpu.state) #define FPU_info (I387->soft.info) #define FPU_CS (*(unsigned short *) &(FPU_info->regs->cs)) diff --git a/arch/x86/math-emu/fpu_trig.c b/arch/x86/math-emu/fpu_trig.c index ecd066805..4b4c6646a 100644 --- a/arch/x86/math-emu/fpu_trig.c +++ b/arch/x86/math-emu/fpu_trig.c @@ -432,13 +432,13 @@ static void fxtract(FPU_REG *st0_ptr, u_char st0_tag) #endif /* PARANOID */ } -static void fdecstp(void) +static void fdecstp(FPU_REG *st0_ptr, u_char st0_tag) { clear_C1(); top--; } -static void fincstp(void) +static void fincstp(FPU_REG *st0_ptr, u_char st0_tag) { clear_C1(); top++; @@ -607,6 +607,11 @@ static int fsin(FPU_REG *st0_ptr, u_char tag) } } +static void _fsin(FPU_REG *st0_ptr, u_char tag) +{ + fsin(st0_ptr, tag); +} + static int f_cos(FPU_REG *st0_ptr, u_char tag) { u_char st0_sign; @@ -1625,7 +1630,7 @@ static void fscale(FPU_REG *st0_ptr, u_char st0_tag) static FUNC_ST0 const trig_table_a[] = { f2xm1, fyl2x, fptan, fpatan, - fxtract, fprem1, (FUNC_ST0) fdecstp, (FUNC_ST0) fincstp + fxtract, fprem1, fdecstp, fincstp }; void FPU_triga(void) @@ -1634,7 +1639,7 @@ void FPU_triga(void) } static FUNC_ST0 const trig_table_b[] = { - fprem, fyl2xp1, fsqrt_, fsincos, frndint_, fscale, (FUNC_ST0) fsin, fcos + fprem, fyl2xp1, fsqrt_, fsincos, frndint_, fscale, _fsin, fcos }; void FPU_trigb(void) diff --git a/arch/x86/math-emu/mul_Xsig.S b/arch/x86/math-emu/mul_Xsig.S index 717785a53..36e00653d 100644 --- a/arch/x86/math-emu/mul_Xsig.S +++ b/arch/x86/math-emu/mul_Xsig.S @@ -61,7 +61,7 @@ ENTRY(mul32_Xsig) popl %esi leave - ret + pax_ret mul32_Xsig ENTRY(mul64_Xsig) @@ -113,7 +113,7 @@ ENTRY(mul64_Xsig) popl %esi leave - ret + pax_ret mul64_Xsig @@ -172,5 +172,5 @@ ENTRY(mul_Xsig_Xsig) popl %esi leave - ret + pax_ret mul_Xsig_Xsig diff --git a/arch/x86/math-emu/polynom_Xsig.S b/arch/x86/math-emu/polynom_Xsig.S index 17315c89f..b132abb06 100644 --- a/arch/x86/math-emu/polynom_Xsig.S +++ b/arch/x86/math-emu/polynom_Xsig.S @@ -132,4 +132,4 @@ L_accum_done: popl %edi popl %esi leave - ret + pax_ret polynomial_Xsig diff --git a/arch/x86/math-emu/reg_constant.c b/arch/x86/math-emu/reg_constant.c index 005483549..a3bd67152 100644 --- a/arch/x86/math-emu/reg_constant.c +++ b/arch/x86/math-emu/reg_constant.c @@ -107,8 +107,13 @@ static void fldz(int rc) typedef void (*FUNC_RC) (int); +static void FPU_RC_illegal(int rc) +{ + FPU_illegal(); +} + static FUNC_RC constants_table[] = { - fld1, fldl2t, fldl2e, fldpi, fldlg2, fldln2, fldz, (FUNC_RC) FPU_illegal + fld1, fldl2t, fldl2e, fldpi, fldlg2, fldln2, fldz, FPU_RC_illegal }; void fconst(void) diff --git a/arch/x86/math-emu/reg_norm.S b/arch/x86/math-emu/reg_norm.S index 8b6352efc..484e7742f 100644 --- a/arch/x86/math-emu/reg_norm.S +++ b/arch/x86/math-emu/reg_norm.S @@ -71,7 +71,7 @@ L_exit_valid: L_exit: popl %ebx leave - ret + pax_ret FPU_normalize L_zero: @@ -83,7 +83,7 @@ L_underflow: /* Convert the exponent to 80x87 form. */ addw EXTENDED_Ebias,EXP(%ebx) push %ebx - call arith_underflow + pax_direct_call arith_underflow pop %ebx jmp L_exit @@ -91,7 +91,7 @@ L_overflow: /* Convert the exponent to 80x87 form. */ addw EXTENDED_Ebias,EXP(%ebx) push %ebx - call arith_overflow + pax_direct_call arith_overflow pop %ebx jmp L_exit @@ -136,7 +136,7 @@ L_exit_nuo_valid: popl %ebx leave - ret + pax_ret FPU_normalize_nuo L_exit_nuo_zero: movl TAG_Zero,%eax @@ -144,4 +144,4 @@ L_exit_nuo_zero: popl %ebx leave - ret + pax_ret FPU_normalize_nuo diff --git a/arch/x86/math-emu/reg_round.S b/arch/x86/math-emu/reg_round.S index d1d4e48b4..003208e62 100644 --- a/arch/x86/math-emu/reg_round.S +++ b/arch/x86/math-emu/reg_round.S @@ -436,7 +436,7 @@ fpu_Arith_exit: popl %edi popl %esi leave - ret + pax_ret FPU_round /* @@ -446,7 +446,7 @@ fpu_Arith_exit: L_precision_lost_up: push %edx push %eax - call set_precision_flag_up + pax_direct_call set_precision_flag_up popl %eax popl %edx jmp L_no_precision_loss @@ -458,7 +458,7 @@ L_precision_lost_up: L_precision_lost_down: push %edx push %eax - call set_precision_flag_down + pax_direct_call set_precision_flag_down popl %eax popl %edx jmp L_no_precision_loss @@ -603,7 +603,7 @@ LPseudoDenormal: /* There must be a masked underflow */ push %eax pushl EX_Underflow - call EXCEPTION + pax_direct_call EXCEPTION popl %eax popl %eax movl TAG_Special,%edx @@ -616,12 +616,12 @@ LPseudoDenormal: */ L_underflow_to_zero: push %eax - call set_precision_flag_down + pax_direct_call set_precision_flag_down popl %eax push %eax pushl EX_Underflow - call EXCEPTION + pax_direct_call EXCEPTION popl %eax popl %eax @@ -635,7 +635,7 @@ L_underflow_to_zero: L_overflow: addw EXTENDED_Ebias,EXP(%edi) /* Set for unmasked response. */ push %edi - call arith_overflow + pax_direct_call arith_overflow pop %edi jmp fpu_reg_round_signed_special_exit @@ -653,7 +653,7 @@ Do_unmasked_underflow: addw $(3*(1<<13)),EXP(%edi) push %eax pushl EX_Underflow - call EXCEPTION + pax_direct_call EXCEPTION popl %eax popl %eax jmp L_Normalised @@ -663,44 +663,44 @@ Do_unmasked_underflow: #ifdef PECULIAR_486 L_bugged_denorm_486: pushl EX_INTERNAL|0x236 - call EXCEPTION + pax_direct_call EXCEPTION popl %ebx jmp L_exception_exit #else L_bugged_denorm: pushl EX_INTERNAL|0x230 - call EXCEPTION + pax_direct_call EXCEPTION popl %ebx jmp L_exception_exit #endif /* PECULIAR_486 */ L_bugged_round24: pushl EX_INTERNAL|0x231 - call EXCEPTION + pax_direct_call EXCEPTION popl %ebx jmp L_exception_exit L_bugged_round53: pushl EX_INTERNAL|0x232 - call EXCEPTION + pax_direct_call EXCEPTION popl %ebx jmp L_exception_exit L_bugged_round64: pushl EX_INTERNAL|0x233 - call EXCEPTION + pax_direct_call EXCEPTION popl %ebx jmp L_exception_exit L_norm_bugged: pushl EX_INTERNAL|0x234 - call EXCEPTION + pax_direct_call EXCEPTION popl %ebx jmp L_exception_exit L_entry_bugged: pushl EX_INTERNAL|0x235 - call EXCEPTION + pax_direct_call EXCEPTION popl %ebx L_exception_exit: mov $-1,%eax diff --git a/arch/x86/math-emu/reg_u_add.S b/arch/x86/math-emu/reg_u_add.S index 47c4c2434..2349fceda 100644 --- a/arch/x86/math-emu/reg_u_add.S +++ b/arch/x86/math-emu/reg_u_add.S @@ -153,7 +153,7 @@ L_round_the_result: /* If we ever get here then we have problems! */ L_bugged: pushl EX_INTERNAL|0x201 - call EXCEPTION + pax_direct_call EXCEPTION pop %ebx movl $-1,%eax jmp L_exit @@ -163,5 +163,5 @@ L_exit: popl %edi popl %esi leave - ret + pax_ret FPU_u_add #endif /* PARANOID */ diff --git a/arch/x86/math-emu/reg_u_div.S b/arch/x86/math-emu/reg_u_div.S index cc00654b6..f628ba9d6 100644 --- a/arch/x86/math-emu/reg_u_div.S +++ b/arch/x86/math-emu/reg_u_div.S @@ -444,19 +444,19 @@ LRound_precision: /* The logic is wrong if we got here */ L_bugged: pushl EX_INTERNAL|0x202 - call EXCEPTION + pax_direct_call EXCEPTION pop %ebx jmp L_exit L_bugged_1: pushl EX_INTERNAL|0x203 - call EXCEPTION + pax_direct_call EXCEPTION pop %ebx jmp L_exit L_bugged_2: pushl EX_INTERNAL|0x204 - call EXCEPTION + pax_direct_call EXCEPTION pop %ebx jmp L_exit @@ -467,5 +467,5 @@ L_exit: popl %esi leave - ret + pax_ret FPU_u_div #endif /* PARANOID */ diff --git a/arch/x86/math-emu/reg_u_mul.S b/arch/x86/math-emu/reg_u_mul.S index 973f12af9..34675c5ca 100644 --- a/arch/x86/math-emu/reg_u_mul.S +++ b/arch/x86/math-emu/reg_u_mul.S @@ -134,7 +134,7 @@ L_extent_zero: #ifdef PARANOID L_bugged: pushl EX_INTERNAL|0x205 - call EXCEPTION + pax_direct_call EXCEPTION pop %ebx jmp L_exit @@ -143,6 +143,6 @@ L_exit: popl %edi popl %esi leave - ret + pax_ret FPU_u_mul #endif /* PARANOID */ diff --git a/arch/x86/math-emu/reg_u_sub.S b/arch/x86/math-emu/reg_u_sub.S index 1b6c24801..5359e936f 100644 --- a/arch/x86/math-emu/reg_u_sub.S +++ b/arch/x86/math-emu/reg_u_sub.S @@ -231,31 +231,31 @@ L_round: #ifdef PARANOID L_bugged_1: pushl EX_INTERNAL|0x206 - call EXCEPTION + pax_direct_call EXCEPTION pop %ebx jmp L_error_exit L_bugged_2: pushl EX_INTERNAL|0x209 - call EXCEPTION + pax_direct_call EXCEPTION pop %ebx jmp L_error_exit L_bugged_3: pushl EX_INTERNAL|0x210 - call EXCEPTION + pax_direct_call EXCEPTION pop %ebx jmp L_error_exit L_bugged_4: pushl EX_INTERNAL|0x211 - call EXCEPTION + pax_direct_call EXCEPTION pop %ebx jmp L_error_exit L_bugged: pushl EX_INTERNAL|0x212 - call EXCEPTION + pax_direct_call EXCEPTION pop %ebx jmp L_error_exit @@ -269,4 +269,4 @@ L_exit: popl %edi popl %esi leave - ret + pax_ret FPU_u_sub diff --git a/arch/x86/math-emu/round_Xsig.S b/arch/x86/math-emu/round_Xsig.S index bbe0e8771..e8328172b 100644 --- a/arch/x86/math-emu/round_Xsig.S +++ b/arch/x86/math-emu/round_Xsig.S @@ -77,7 +77,7 @@ L_exit: popl %esi popl %ebx leave - ret + pax_ret round_Xsig @@ -137,5 +137,5 @@ L_n_exit: popl %esi popl %ebx leave - ret + pax_ret norm_Xsig diff --git a/arch/x86/math-emu/shr_Xsig.S b/arch/x86/math-emu/shr_Xsig.S index 31cdd118e..61a03ecad 100644 --- a/arch/x86/math-emu/shr_Xsig.S +++ b/arch/x86/math-emu/shr_Xsig.S @@ -44,7 +44,7 @@ ENTRY(shr_Xsig) popl %ebx popl %esi leave - ret + pax_ret shr_Xsig L_more_than_31: cmpl $64,%ecx @@ -60,7 +60,7 @@ L_more_than_31: movl $0,8(%esi) popl %esi leave - ret + pax_ret shr_Xsig L_more_than_63: cmpl $96,%ecx @@ -75,7 +75,7 @@ L_more_than_63: movl %edx,8(%esi) popl %esi leave - ret + pax_ret shr_Xsig L_more_than_95: xorl %eax,%eax @@ -84,4 +84,4 @@ L_more_than_95: movl %eax,8(%esi) popl %esi leave - ret + pax_ret shr_Xsig diff --git a/arch/x86/math-emu/wm_shrx.S b/arch/x86/math-emu/wm_shrx.S index 518428317..6267001fd 100644 --- a/arch/x86/math-emu/wm_shrx.S +++ b/arch/x86/math-emu/wm_shrx.S @@ -54,7 +54,7 @@ ENTRY(FPU_shrx) popl %ebx popl %esi leave - ret + pax_ret FPU_shrx L_more_than_31: cmpl $64,%ecx @@ -69,7 +69,7 @@ L_more_than_31: movl $0,4(%esi) popl %esi leave - ret + pax_ret FPU_shrx L_more_than_63: cmpl $96,%ecx @@ -83,7 +83,7 @@ L_more_than_63: movl %edx,4(%esi) popl %esi leave - ret + pax_ret FPU_shrx L_more_than_95: xorl %eax,%eax @@ -91,7 +91,7 @@ L_more_than_95: movl %eax,4(%esi) popl %esi leave - ret + pax_ret FPU_shrx /*---------------------------------------------------------------------------+ @@ -144,7 +144,7 @@ ENTRY(FPU_shrxs) popl %ebx popl %esi leave - ret + pax_ret FPU_shrxs /* Shift by [0..31] bits */ Ls_less_than_32: @@ -161,7 +161,7 @@ Ls_less_than_32: popl %ebx popl %esi leave - ret + pax_ret FPU_shrxs /* Shift by [64..95] bits */ Ls_more_than_63: @@ -187,7 +187,7 @@ Ls_more_than_63: popl %ebx popl %esi leave - ret + pax_ret FPU_shrxs Ls_more_than_95: /* Shift by [96..inf) bits */ @@ -201,4 +201,4 @@ Ls_more_than_95: popl %ebx popl %esi leave - ret + pax_ret FPU_shrxs diff --git a/arch/x86/math-emu/wm_sqrt.S b/arch/x86/math-emu/wm_sqrt.S index d258f5956..c5255d6d9 100644 --- a/arch/x86/math-emu/wm_sqrt.S +++ b/arch/x86/math-emu/wm_sqrt.S @@ -223,7 +223,7 @@ sqrt_stage_2_finish: #ifdef PARANOID sqrt_stage_2_error: pushl EX_INTERNAL|0x213 - call EXCEPTION + pax_direct_call EXCEPTION #endif /* PARANOID */ sqrt_stage_2_done: @@ -276,7 +276,7 @@ sqrt_stage_2_done: sqrt_stage_3_error: pushl EX_INTERNAL|0x207 - call EXCEPTION + pax_direct_call EXCEPTION sqrt_stage_3_no_error: #endif /* PARANOID */ @@ -382,7 +382,7 @@ sqrt_near_exact: ja sqrt_near_exact_ok pushl EX_INTERNAL|0x214 - call EXCEPTION + pax_direct_call EXCEPTION sqrt_near_exact_ok: #endif /* PARANOID */ @@ -442,7 +442,7 @@ sqrt_get_more_precision: ja sqrt_more_prec_ok pushl EX_INTERNAL|0x215 - call EXCEPTION + pax_direct_call EXCEPTION sqrt_more_prec_ok: #endif /* PARANOID */ diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 96d2b847e..b3db3803f 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -39,3 +39,7 @@ obj-$(CONFIG_X86_INTEL_MPX) += mpx.o obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o +quote:=" +obj-$(CONFIG_X86_64) += uderef_64.o +CFLAGS_uderef_64.o := -fcall-saved-rax -fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11 + diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index 8aa6bea1c..1181e441a 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -28,6 +28,7 @@ struct pg_state { int level; pgprot_t current_prot; + pgprot_t current_prots[5]; unsigned long start_address; unsigned long current_address; const struct addr_marker *marker; @@ -185,6 +186,23 @@ static unsigned long normalize_addr(unsigned long u) #endif } +static pgprot_t merge_prot(pgprot_t old_prot, pgprot_t new_prot) +{ + if (!(pgprot_val(new_prot) & _PAGE_PRESENT)) + return new_prot; + + if (!(pgprot_val(old_prot) & _PAGE_PRESENT)) + return new_prot; + + if (pgprot_val(old_prot) & _PAGE_NX) + pgprot_val(new_prot) |= _PAGE_NX; + + if (!(pgprot_val(old_prot) & _PAGE_RW)) + pgprot_val(new_prot) &= ~_PAGE_RW; + + return new_prot; +} + /* * This function gets called on a break in a continuous series * of PTE entries; the next one is different so we need to @@ -201,11 +219,13 @@ static void note_page(struct seq_file *m, struct pg_state *st, * we have now. "break" is either changing perms, levels or * address space marker. */ + new_prot = merge_prot(st->current_prots[level - 1], new_prot); prot = pgprot_val(new_prot); cur = pgprot_val(st->current_prot); if (!st->level) { /* First entry */ + st->current_prots[0] = __pgprot(_PAGE_RW); st->current_prot = new_prot; st->level = level; st->marker = address_markers; @@ -217,9 +237,8 @@ static void note_page(struct seq_file *m, struct pg_state *st, const char *unit = units; unsigned long delta; int width = sizeof(unsigned long) * 2; - pgprotval_t pr = pgprot_val(st->current_prot); - if (st->check_wx && (pr & _PAGE_RW) && !(pr & _PAGE_NX)) { + if (st->check_wx && (cur & _PAGE_RW) && !(cur & _PAGE_NX)) { WARN_ONCE(1, "x86/mm: Found insecure W+X mapping at address %p/%pS\n", (void *)st->start_address, @@ -305,9 +324,10 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr, start = (pmd_t *) pud_page_vaddr(addr); for (i = 0; i < PTRS_PER_PMD; i++) { st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT); + prot = pmd_flags(*start); + st->current_prots[3] = merge_prot(st->current_prots[2], __pgprot(prot)); if (!pmd_none(*start)) { if (pmd_large(*start) || !pmd_present(*start)) { - prot = pmd_flags(*start); note_page(m, st, __pgprot(prot), 3); } else { walk_pte_level(m, st, *start, @@ -338,9 +358,10 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, pgd_t addr, for (i = 0; i < PTRS_PER_PUD; i++) { st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT); + prot = pud_flags(*start); + st->current_prots[2] = merge_prot(st->current_prots[1], __pgprot(start->pud)); if (!pud_none(*start)) { if (pud_large(*start) || !pud_present(*start)) { - prot = pud_flags(*start); note_page(m, st, __pgprot(prot), 2); } else { walk_pmd_level(m, st, *start, @@ -396,9 +417,10 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd, for (i = 0; i < PTRS_PER_PGD; i++) { st.current_address = normalize_addr(i * PGD_LEVEL_MULT); + prot = pgd_flags(*start); + st.current_prots[1] = __pgprot(prot); if (!pgd_none(*start) && !is_hypervisor_range(i)) { if (pgd_large(*start) || !pgd_present(*start)) { - prot = pgd_flags(*start); note_page(m, &st, __pgprot(prot), 1); } else { walk_pud_level(m, &st, *start, diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index fcd06f752..b5d8f5bed 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c @@ -102,7 +102,7 @@ int fixup_exception(struct pt_regs *regs, int trapnr) ex_handler_t handler; #ifdef CONFIG_PNPBIOS - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) { + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) { extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp; extern u32 pnp_bios_is_utter_crap; pnp_bios_is_utter_crap = 1; diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 9f72ca3b2..a076e4a2c 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -14,6 +14,8 @@ #include /* prefetchw */ #include /* exception_enter(), ... */ #include /* faulthandler_disabled() */ +#include +#include #include /* boot_cpu_has, ... */ #include /* dotraplinkage, ... */ @@ -23,6 +25,11 @@ #include /* emulate_vsyscall */ #include /* struct vm86 */ #include /* vma_pkey() */ +#include + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) +#include +#endif #define CREATE_TRACE_POINTS #include @@ -126,7 +133,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, return !instr_lo || (instr_lo>>1) == 1; case 0x00: /* Prefetch instruction is 0x0F0D or 0x0F18 */ - if (probe_kernel_address(instr, opcode)) + if (user_mode(regs)) { + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1)) + return 0; + } else if (probe_kernel_address(instr, opcode)) return 0; *prefetch = (instr_lo == 0xF) && @@ -151,7 +161,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) if (error_code & PF_INSTR) return 0; - instr = (void *)convert_ip_to_linear(current, regs); + addr = convert_ip_to_linear(current, regs); + if (addr == -1L) + return 0; + instr = (void *)addr; max_instr = instr + 15; if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX) @@ -160,7 +173,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) while (instr < max_instr) { unsigned char opcode; - if (probe_kernel_address(instr, opcode)) + if (user_mode(regs)) { + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1)) + break; + } else if (probe_kernel_address(instr, opcode)) break; instr++; @@ -244,6 +260,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address, force_sig_info(si_signo, &info, tsk); } +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address); +#endif + +#ifdef CONFIG_PAX_EMUTRAMP +static int pax_handle_fetch_fault(struct pt_regs *regs); +#endif + +#ifdef CONFIG_PAX_PAGEEXEC +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + + pgd = pgd_offset(mm, address); + if (!pgd_present(*pgd)) + return NULL; + pud = pud_offset(pgd, address); + if (!pud_present(*pud)) + return NULL; + pmd = pmd_offset(pud, address); + if (!pmd_present(*pmd)) + return NULL; + return pmd; +} +#endif + DEFINE_SPINLOCK(pgd_lock); LIST_HEAD(pgd_list); @@ -294,10 +338,27 @@ void vmalloc_sync_all(void) for (address = VMALLOC_START & PMD_MASK; address >= TASK_SIZE_MAX && address < FIXADDR_TOP; address += PMD_SIZE) { + +#ifdef CONFIG_PAX_PER_CPU_PGD + unsigned long cpu; +#else struct page *page; +#endif spin_lock(&pgd_lock); + +#ifdef CONFIG_PAX_PER_CPU_PGD + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) { + pgd_t *pgd = get_cpu_pgd(cpu, user); + pmd_t *ret; + + ret = vmalloc_sync_one(pgd, address); + if (!ret) + break; + pgd = get_cpu_pgd(cpu, kernel); +#else list_for_each_entry(page, &pgd_list, lru) { + pgd_t *pgd; spinlock_t *pgt_lock; pmd_t *ret; @@ -305,8 +366,14 @@ void vmalloc_sync_all(void) pgt_lock = &pgd_page_get_mm(page)->page_table_lock; spin_lock(pgt_lock); - ret = vmalloc_sync_one(page_address(page), address); + pgd = page_address(page); +#endif + + ret = vmalloc_sync_one(pgd, address); + +#ifndef CONFIG_PAX_PER_CPU_PGD spin_unlock(pgt_lock); +#endif if (!ret) break; @@ -340,6 +407,12 @@ static noinline int vmalloc_fault(unsigned long address) * an interrupt in the middle of a task switch.. */ pgd_paddr = read_cr3(); + +#ifdef CONFIG_PAX_PER_CPU_PGD + BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK)); + vmalloc_sync_one(__va(pgd_paddr + PTRS_PER_PGD * sizeof(pgd_t)), address); +#endif + pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); if (!pmd_k) return -1; @@ -439,11 +512,24 @@ static noinline int vmalloc_fault(unsigned long address) * happen within a race in page table update. In the later * case just flush: */ - pgd = (pgd_t *)__va(read_cr3()) + pgd_index(address); pgd_ref = pgd_offset_k(address); if (pgd_none(*pgd_ref)) return -1; +#ifdef CONFIG_PAX_PER_CPU_PGD + BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK)); + pgd = pgd_offset_cpu(smp_processor_id(), user, address); + if (pgd_none(*pgd)) { + set_pgd(pgd, *pgd_ref); + arch_flush_lazy_mmu_mode(); + } else { + BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); + } + pgd = pgd_offset_cpu(smp_processor_id(), kernel, address); +#else + pgd = (pgd_t *)__va(read_cr3()) + pgd_index(address); +#endif + if (pgd_none(*pgd)) { set_pgd(pgd, *pgd_ref); arch_flush_lazy_mmu_mode(); @@ -616,7 +702,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address) static int is_errata100(struct pt_regs *regs, unsigned long address) { #ifdef CONFIG_X86_64 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32)) return 1; #endif return 0; @@ -643,9 +729,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address) } static const char nx_warning[] = KERN_CRIT -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n"; static const char smep_warning[] = KERN_CRIT -"unable to execute userspace code (SMEP?) (uid: %d)\n"; +"unable to execute userspace code (SMEP?) (uid: %d, task: %s, pid: %d)\n"; static void show_fault_oops(struct pt_regs *regs, unsigned long error_code, @@ -654,7 +740,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code, if (!oops_may_print()) return; - if (error_code & PF_INSTR) { + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) { unsigned int level; pgd_t *pgd; pte_t *pte; @@ -665,12 +751,24 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code, pte = lookup_address_in_pgd(pgd, address, &level); if (pte && pte_present(*pte) && !pte_exec(*pte)) - printk(nx_warning, from_kuid(&init_user_ns, current_uid())); + printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current)); if (pte && pte_present(*pte) && pte_exec(*pte) && (pgd_flags(*pgd) & _PAGE_USER) && (__read_cr4() & X86_CR4_SMEP)) - printk(smep_warning, from_kuid(&init_user_ns, current_uid())); + printk(smep_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current)); + } + +#ifdef CONFIG_PAX_KERNEXEC + if (init_mm.start_code <= address && address < init_mm.end_code) { + if (current->signal->curr_ip) + printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", + ¤t->signal->curr_ip, current->comm, task_pid_nr(current), + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid())); + else + printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current), + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid())); } +#endif printk(KERN_ALERT "BUG: unable to handle kernel "); if (address < PAGE_SIZE) @@ -775,12 +873,9 @@ no_context(struct pt_regs *regs, unsigned long error_code, * break the console driver and lose most of the stack dump. */ asm volatile ("movq %[stack], %%rsp\n\t" - "call handle_stack_overflow\n\t" - "1: jmp 1b" : "+r" (__sp) - : "D" ("kernel stack overflow (page fault)"), - "S" (regs), "d" (address), - [stack] "rm" (stack)); + : [stack] "rm" (stack)); + handle_stack_overflow("kernel stack overflow (page fault)", regs, address); unreachable(); } #endif @@ -887,6 +982,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, } #endif +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) + if (pax_is_fetch_fault(regs, error_code, address)) { + +#ifdef CONFIG_PAX_EMUTRAMP + switch (pax_handle_fetch_fault(regs)) { + case 2: + return; + } +#endif + + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp); + do_group_exit(SIGKILL); + } +#endif + /* * To avoid leaking information about the kernel page table * layout, pretend that user-mode accesses to kernel addresses @@ -998,7 +1108,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { printk(KERN_ERR "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", - tsk->comm, tsk->pid, address); + tsk->comm, task_pid_nr(tsk), address); code = BUS_MCEERR_AR; } #endif @@ -1057,6 +1167,109 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte) return 1; } +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) +static inline unsigned long get_limit(unsigned long segment) +{ + unsigned long __limit; + + asm("lsll %1,%0" : "=r" (__limit) : "r" (segment)); + return __limit + 1; +} + +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code) +{ + pte_t *pte; + pmd_t *pmd; + spinlock_t *ptl; + unsigned char pte_mask; + + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) || + !(mm->pax_flags & MF_PAX_PAGEEXEC)) + return 0; + + /* PaX: it's our fault, let's handle it if we can */ + + /* PaX: take a look at read faults before acquiring any locks */ + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) { + /* instruction fetch attempt from a protected page in user mode */ + up_read(&mm->mmap_sem); + +#ifdef CONFIG_PAX_EMUTRAMP + switch (pax_handle_fetch_fault(regs)) { + case 2: + return 1; + } +#endif + + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp); + do_group_exit(SIGKILL); + } + + pmd = pax_get_pmd(mm, address); + if (unlikely(!pmd)) + return 0; + + pte = pte_offset_map_lock(mm, pmd, address, &ptl); + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) { + pte_unmap_unlock(pte, ptl); + return 0; + } + + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) { + /* write attempt to a protected page in user mode */ + pte_unmap_unlock(pte, ptl); + return 0; + } + +#ifdef CONFIG_SMP + if (likely(address > get_limit(regs->cs) && cpumask_test_cpu(smp_processor_id(), &mm->context.cpu_user_cs_mask))) +#else + if (likely(address > get_limit(regs->cs))) +#endif + { + set_pte(pte, pte_mkread(*pte)); + __flush_tlb_one(address); + pte_unmap_unlock(pte, ptl); + up_read(&mm->mmap_sem); + return 1; + } + + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1)); + + /* + * PaX: fill DTLB with user rights and retry + */ + __asm__ __volatile__ ( + "orb %2,(%1)\n" +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC) +/* + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any* + * page fault when examined during a TLB load attempt. this is true not only + * for PTEs holding a non-present entry but also present entries that will + * raise a page fault (such as those set up by PaX, or the copy-on-write + * mechanism). in effect it means that we do *not* need to flush the TLBs + * for our target pages since their PTEs are simply not in the TLBs at all. + + * the best thing in omitting it is that we gain around 15-20% speed in the + * fast path of the page fault handler and can get rid of tracing since we + * can no longer flush unintended entries. + */ + "invlpg (%0)\n" +#endif + ASM_STAC "\n" + __copyuser_seg"testb $0,(%0)\n" + ASM_CLAC "\n" + "xorb %3,(%1)\n" + : + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER) + : "memory", "cc"); + pte_unmap_unlock(pte, ptl); + up_read(&mm->mmap_sem); + return 1; +} +#endif + /* * Handle a spurious fault caused by a stale TLB entry. * @@ -1145,6 +1358,9 @@ access_error(unsigned long error_code, struct vm_area_struct *vma) /* This is only called for the current mm, so: */ bool foreign = false; + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC)) + return 1; + /* * Read or write was blocked by protection keys. This is * always an unconditional error and can never result in @@ -1224,6 +1440,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, tsk = current; mm = tsk->mm; +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + if (!user_mode(regs) && address < 2 * pax_user_shadow_base) { + if (!search_exception_tables(regs->ip)) { + printk(KERN_EMERG "PAX: please report this to pageexec@freemail.hu\n"); + bad_area_nosemaphore(regs, error_code, address, NULL); + return; + } + if (address < pax_user_shadow_base) { + printk(KERN_EMERG "PAX: please report this to pageexec@freemail.hu\n"); + printk(KERN_EMERG "PAX: faulting IP: %pS\n", (void *)regs->ip); + show_trace_log_lvl(current, regs, (void *)regs->sp, KERN_EMERG); + } else + address -= pax_user_shadow_base; + } +#endif + /* * Detect and handle instructions that would cause a page fault for * both a tracked kernel page and a userspace page. @@ -1350,6 +1582,11 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, might_sleep(); } +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) + if (pax_handle_pageexec_fault(regs, mm, address, error_code)) + return; +#endif + vma = find_vma(mm, address); if (unlikely(!vma)) { bad_area(regs, error_code, address); @@ -1361,18 +1598,24 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, bad_area(regs, error_code, address); return; } - if (error_code & PF_USER) { - /* - * Accessing the stack below %sp is always a bug. - * The large cushion allows instructions like enter - * and pusha to work. ("enter $65535, $31" pushes - * 32 pointers and then decrements %sp by 65535.) - */ - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { - bad_area(regs, error_code, address); - return; - } + /* + * Accessing the stack below %sp is always a bug. + * The large cushion allows instructions like enter + * and pusha to work. ("enter $65535, $31" pushes + * 32 pointers and then decrements %sp by 65535.) + */ + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) { + bad_area(regs, error_code, address); + return; } + +#ifdef CONFIG_PAX_SEGMEXEC + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) { + bad_area(regs, error_code, address); + return; + } +#endif + if (unlikely(expand_stack(vma, address))) { bad_area(regs, error_code, address); return; @@ -1492,3 +1735,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code) } NOKPROBE_SYMBOL(trace_do_page_fault); #endif /* CONFIG_TRACING */ + +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address) +{ + struct mm_struct *mm = current->mm; + unsigned long ip = regs->ip; + + if (v8086_mode(regs)) + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff); + +#ifdef CONFIG_PAX_PAGEEXEC + if (mm->pax_flags & MF_PAX_PAGEEXEC) { + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) + return true; + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address) + return true; + return false; + } +#endif + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) { + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) + return true; + return false; + } +#endif + + return false; +} +#endif + +#ifdef CONFIG_PAX_EMUTRAMP +static int pax_handle_fetch_fault_32(struct pt_regs *regs) +{ + int err; + + do { /* PaX: libffi trampoline emulation */ + unsigned char mov, jmp; + unsigned int addr1, addr2; + +#ifdef CONFIG_X86_64 + if ((regs->ip + 9) >> 32) + break; +#endif + + err = get_user(mov, (unsigned char __user *)regs->ip); + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1)); + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5)); + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6)); + + if (err) + break; + + if (mov == 0xB8 && jmp == 0xE9) { + regs->ax = addr1; + regs->ip = (unsigned int)(regs->ip + addr2 + 10); + return 2; + } + } while (0); + + do { /* PaX: gcc trampoline emulation #1 */ + unsigned char mov1, mov2; + unsigned short jmp; + unsigned int addr1, addr2; + +#ifdef CONFIG_X86_64 + if ((regs->ip + 11) >> 32) + break; +#endif + + err = get_user(mov1, (unsigned char __user *)regs->ip); + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1)); + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5)); + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6)); + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10)); + + if (err) + break; + + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) { + regs->cx = addr1; + regs->ax = addr2; + regs->ip = addr2; + return 2; + } + } while (0); + + do { /* PaX: gcc trampoline emulation #2 */ + unsigned char mov, jmp; + unsigned int addr1, addr2; + +#ifdef CONFIG_X86_64 + if ((regs->ip + 9) >> 32) + break; +#endif + + err = get_user(mov, (unsigned char __user *)regs->ip); + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1)); + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5)); + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6)); + + if (err) + break; + + if (mov == 0xB9 && jmp == 0xE9) { + regs->cx = addr1; + regs->ip = (unsigned int)(regs->ip + addr2 + 10); + return 2; + } + } while (0); + + return 1; /* PaX in action */ +} + +#ifdef CONFIG_X86_64 +static int pax_handle_fetch_fault_64(struct pt_regs *regs) +{ + int err; + + do { /* PaX: libffi trampoline emulation */ + unsigned short mov1, mov2, jmp1; + unsigned char stcclc, jmp2; + unsigned long addr1, addr2; + + err = get_user(mov1, (unsigned short __user *)regs->ip); + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2)); + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10)); + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12)); + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20)); + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21)); + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23)); + + if (err) + break; + + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) { + regs->r11 = addr1; + regs->r10 = addr2; + if (stcclc == 0xF8) + regs->flags &= ~X86_EFLAGS_CF; + else + regs->flags |= X86_EFLAGS_CF; + regs->ip = addr1; + return 2; + } + } while (0); + + do { /* PaX: gcc trampoline emulation #1 */ + unsigned short mov1, mov2, jmp1; + unsigned char jmp2; + unsigned int addr1; + unsigned long addr2; + + err = get_user(mov1, (unsigned short __user *)regs->ip); + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2)); + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6)); + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8)); + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16)); + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18)); + + if (err) + break; + + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) { + regs->r11 = addr1; + regs->r10 = addr2; + regs->ip = addr1; + return 2; + } + } while (0); + + do { /* PaX: gcc trampoline emulation #2 */ + unsigned short mov1, mov2, jmp1; + unsigned char jmp2; + unsigned long addr1, addr2; + + err = get_user(mov1, (unsigned short __user *)regs->ip); + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2)); + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10)); + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12)); + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20)); + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22)); + + if (err) + break; + + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) { + regs->r11 = addr1; + regs->r10 = addr2; + regs->ip = addr1; + return 2; + } + } while (0); + + return 1; /* PaX in action */ +} +#endif + +/* + * PaX: decide what to do with offenders (regs->ip = fault address) + * + * returns 1 when task should be killed + * 2 when gcc trampoline was detected + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + if (v8086_mode(regs)) + return 1; + + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP)) + return 1; + +#ifdef CONFIG_X86_32 + return pax_handle_fetch_fault_32(regs); +#else + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) + return pax_handle_fetch_fault_32(regs); + else + return pax_handle_fetch_fault_64(regs); +#endif +} +#endif + +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) +{ + long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 20; i++) { + unsigned char c; + if (get_user(c, (unsigned char __force_user *)pc+i)) + printk(KERN_CONT "?? "); + else + printk(KERN_CONT "%02x ", c); + } + printk("\n"); + + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long)); + for (i = -1; i < 80 / (long)sizeof(long); i++) { + unsigned long c; + if (get_user(c, (unsigned long __force_user *)sp+i)) { +#ifdef CONFIG_X86_32 + printk(KERN_CONT "???????? "); +#else + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) + printk(KERN_CONT "???????? ???????? "); + else + printk(KERN_CONT "???????????????? "); +#endif + } else { +#ifdef CONFIG_X86_64 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) { + printk(KERN_CONT "%08x ", (unsigned int)c); + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32)); + } else +#endif + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c); + } + } + printk("\n"); +} +#endif + +/** + * probe_kernel_write(): safely attempt to write to a location + * @dst: address to write to + * @src: pointer to the data that shall be written + * @size: size of the data chunk + * + * Safely write to address @dst from the buffer at @src. If a kernel fault + * happens, handle that and return -EFAULT. + */ +long notrace probe_kernel_write(void *dst, const void *src, size_t size) +{ + long ret; + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); + pagefault_disable(); + pax_open_kernel(); + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size); + pax_close_kernel(); + pagefault_enable(); + set_fs(old_fs); + + return ret ? -EFAULT : 0; +} diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index 0d4fb3ebb..0ae587e31 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -313,7 +313,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, + if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ, (void __user *)start, len))) return 0; @@ -389,6 +389,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, goto slow_irqon; #endif + if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ, + (void __user *)start, len))) + return 0; + /* * XXX: batch / limit 'nr', to avoid large irq off latency * needs some instrumenting to determine the common sizes used by diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 6d18b70ed..9dc249e04 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c @@ -35,6 +35,8 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot) unsigned long vaddr; int idx, type; + BUG_ON(pgprot_val(prot) & _PAGE_USER); + preempt_disable(); pagefault_disable(); @@ -45,7 +47,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot) idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); BUG_ON(!pte_none(*(kmap_pte-idx))); + + pax_open_kernel(); set_pte(kmap_pte-idx, mk_pte(page, prot)); + pax_close_kernel(); + arch_flush_lazy_mmu_mode(); return (void *)vaddr; diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index 2ae8584b4..e8f8f2948 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -74,23 +74,24 @@ int pud_huge(pud_t pud) #ifdef CONFIG_HUGETLB_PAGE static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr, unsigned long len, - unsigned long pgoff, unsigned long flags) + unsigned long pgoff, unsigned long flags, unsigned long offset) { struct hstate *h = hstate_file(file); struct vm_unmapped_area_info info; - + info.flags = 0; info.length = len; info.low_limit = current->mm->mmap_legacy_base; info.high_limit = TASK_SIZE; info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; + info.threadstack_offset = offset; return vm_unmapped_area(&info); } static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr0, unsigned long len, - unsigned long pgoff, unsigned long flags) + unsigned long pgoff, unsigned long flags, unsigned long offset) { struct hstate *h = hstate_file(file); struct vm_unmapped_area_info info; @@ -102,6 +103,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, info.high_limit = current->mm->mmap_base; info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; + info.threadstack_offset = offset; addr = vm_unmapped_area(&info); /* @@ -114,6 +116,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, VM_BUG_ON(addr != -ENOMEM); info.flags = 0; info.low_limit = TASK_UNMAPPED_BASE; + +#ifdef CONFIG_PAX_RANDMMAP + if (current->mm->pax_flags & MF_PAX_RANDMMAP) + info.low_limit += current->mm->delta_mmap; +#endif + info.high_limit = TASK_SIZE; addr = vm_unmapped_area(&info); } @@ -128,10 +136,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, struct hstate *h = hstate_file(file); struct mm_struct *mm = current->mm; struct vm_area_struct *vma; + unsigned long pax_task_size = TASK_SIZE; + unsigned long offset = gr_rand_threadstack_offset(mm, file, flags); if (len & ~huge_page_mask(h)) return -EINVAL; - if (len > TASK_SIZE) + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) + pax_task_size = SEGMEXEC_TASK_SIZE; +#endif + + pax_task_size -= PAGE_SIZE; + + if (len > pax_task_size) return -ENOMEM; if (flags & MAP_FIXED) { @@ -140,19 +158,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, return addr; } +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + if (addr) { addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) return hugetlb_get_unmapped_area_bottomup(file, addr, len, - pgoff, flags); + pgoff, flags, offset); else return hugetlb_get_unmapped_area_topdown(file, addr, len, - pgoff, flags); + pgoff, flags, offset); } #endif /* CONFIG_HUGETLB_PAGE */ diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 22af912d6..ac95bc1ce 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -4,6 +4,7 @@ #include #include #include /* for max_low_pfn */ +#include #include #include @@ -18,6 +19,7 @@ #include /* for MAX_DMA_PFN */ #include #include +#include /* * We need to define the tracepoints somewhere, and tlb.c @@ -633,7 +635,18 @@ void __init init_mem_mapping(void) early_ioremap_page_table_range_init(); #endif +#ifdef CONFIG_PAX_PER_CPU_PGD + clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY, + swapper_pg_dir + KERNEL_PGD_BOUNDARY, + KERNEL_PGD_PTRS); + clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY, + swapper_pg_dir + KERNEL_PGD_BOUNDARY, + KERNEL_PGD_PTRS); + load_cr3(get_cpu_pgd(0, kernel)); +#else load_cr3(swapper_pg_dir); +#endif + __flush_tlb_all(); early_memtest(0, max_pfn_mapped << PAGE_SHIFT); @@ -649,10 +662,34 @@ void __init init_mem_mapping(void) * Access has to be given to non-kernel-ram areas as well, these contain the PCI * mmio resources as well as potential bios/acpi data regions. */ + +#ifdef CONFIG_GRKERNSEC_KMEM +static unsigned int ebda_start __read_only; +static unsigned int ebda_end __read_only; +#endif + int devmem_is_allowed(unsigned long pagenr) { +#ifdef CONFIG_GRKERNSEC_KMEM + /* allow BDA */ + if (!pagenr) + return 1; + /* allow EBDA */ + if (pagenr >= ebda_start && pagenr < ebda_end) + return 1; + /* if tboot is in use, allow access to its hardcoded serial log range */ + if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT))) + return 1; + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT)) + return 1; + /* throw out everything else below 1MB */ + if (pagenr <= 256) + return 0; +#else if (pagenr < 256) return 1; +#endif + if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) return 0; if (!page_is_ram(pagenr)) @@ -699,8 +736,33 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) } } +#ifdef CONFIG_GRKERNSEC_KMEM +static inline void gr_init_ebda(void) +{ + unsigned int ebda_addr; + unsigned int ebda_size = 0; + + ebda_addr = get_bios_ebda(); + if (ebda_addr) { + ebda_size = *(unsigned char *)phys_to_virt(ebda_addr); + ebda_size <<= 10; + } + if (ebda_addr && ebda_size) { + ebda_start = ebda_addr >> PAGE_SHIFT; + ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT; + } else { + ebda_start = 0x9f000 >> PAGE_SHIFT; + ebda_end = 0xa0000 >> PAGE_SHIFT; + } +} +#else +static inline void gr_init_ebda(void) { } +#endif + void __ref free_initmem(void) { + gr_init_ebda(); + e820_reallocate_tables(); free_init_pages("unused kernel", diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index cf8059016..de6997132 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -51,6 +51,7 @@ #include #include #include +#include #include "mm_internal.h" @@ -61,33 +62,6 @@ static noinline int do_test_wp_bit(void); bool __read_mostly __vmalloc_start_set = false; /* - * Creates a middle page table and puts a pointer to it in the - * given global directory entry. This only returns the gd entry - * in non-PAE compilation mode, since the middle layer is folded. - */ -static pmd_t * __init one_md_table_init(pgd_t *pgd) -{ - pud_t *pud; - pmd_t *pmd_table; - -#ifdef CONFIG_X86_PAE - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { - pmd_table = (pmd_t *)alloc_low_page(); - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); - pud = pud_offset(pgd, 0); - BUG_ON(pmd_table != pmd_offset(pud, 0)); - - return pmd_table; - } -#endif - pud = pud_offset(pgd, 0); - pmd_table = pmd_offset(pud, 0); - - return pmd_table; -} - -/* * Create a page table and place a pointer to it in a middle page * directory entry: */ @@ -97,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd) pte_t *page_table = (pte_t *)alloc_low_page(); paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE)); +#else set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); +#endif BUG_ON(page_table != pte_offset_kernel(pmd, 0)); } return pte_offset_kernel(pmd, 0); } +static pmd_t * __init one_md_table_init(pgd_t *pgd) +{ + pud_t *pud; + pmd_t *pmd_table; + + pud = pud_offset(pgd, 0); + pmd_table = pmd_offset(pud, 0); + + return pmd_table; +} + pmd_t * __init populate_extra_pmd(unsigned long vaddr) { int pgd_idx = pgd_index(vaddr); @@ -208,6 +197,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) int pgd_idx, pmd_idx; unsigned long vaddr; pgd_t *pgd; + pud_t *pud; pmd_t *pmd; pte_t *pte = NULL; unsigned long count = page_table_range_init_count(start, end); @@ -222,8 +212,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) pgd = pgd_base + pgd_idx; for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { - pmd = one_md_table_init(pgd); - pmd = pmd + pmd_index(vaddr); + pud = pud_offset(pgd, vaddr); + pmd = pmd_offset(pud, vaddr); + +#ifdef CONFIG_X86_PAE + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT); +#endif + for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) { pte = page_table_kmap_check(one_page_table_init(pmd), @@ -235,11 +230,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) } } -static inline int is_kernel_text(unsigned long addr) +static inline int is_kernel_text(unsigned long start, unsigned long end) { - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end) - return 1; - return 0; + if ((start >= ktla_ktva((unsigned long)_etext) || + end <= ktla_ktva((unsigned long)_stext)) && + (start >= ktla_ktva((unsigned long)_einittext) || + end <= ktla_ktva((unsigned long)_sinittext)) && + +#ifdef CONFIG_ACPI_SLEEP + (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) && +#endif + + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000))) + return 0; + return 1; } /* @@ -256,9 +260,10 @@ kernel_physical_mapping_init(unsigned long start, unsigned long last_map_addr = end; unsigned long start_pfn, end_pfn; pgd_t *pgd_base = swapper_pg_dir; - int pgd_idx, pmd_idx, pte_ofs; + unsigned int pgd_idx, pmd_idx, pte_ofs; unsigned long pfn; pgd_t *pgd; + pud_t *pud; pmd_t *pmd; pte_t *pte; unsigned pages_2m, pages_4k; @@ -291,8 +296,13 @@ kernel_physical_mapping_init(unsigned long start, pfn = start_pfn; pgd_idx = pgd_index((pfn<> PAGE_SHIFT); +#endif if (pfn >= end_pfn) continue; @@ -304,14 +314,13 @@ kernel_physical_mapping_init(unsigned long start, #endif for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn; pmd++, pmd_idx++) { - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET; + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET; /* * Map with big pages if possible, otherwise * create normal page tables: */ if (use_pse) { - unsigned int addr2; pgprot_t prot = PAGE_KERNEL_LARGE; /* * first pass will use the same initial @@ -322,11 +331,7 @@ kernel_physical_mapping_init(unsigned long start, _PAGE_PSE); pfn &= PMD_MASK >> PAGE_SHIFT; - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE + - PAGE_OFFSET + PAGE_SIZE-1; - - if (is_kernel_text(addr) || - is_kernel_text(addr2)) + if (is_kernel_text(address, address + PMD_SIZE)) prot = PAGE_KERNEL_LARGE_EXEC; pages_2m++; @@ -343,7 +348,7 @@ kernel_physical_mapping_init(unsigned long start, pte_ofs = pte_index((pfn<> 10, - (unsigned long)&_etext, (unsigned long)&_edata, - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, + (unsigned long)&_sdata, (unsigned long)&_edata, + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10, - (unsigned long)&_text, (unsigned long)&_etext, + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext), ((unsigned long)&_etext - (unsigned long)&_text) >> 10); /* @@ -867,7 +870,7 @@ static noinline int do_test_wp_bit(void) const int rodata_test_data = 0xC3; EXPORT_SYMBOL_GPL(rodata_test_data); -int kernel_set_to_readonly __read_mostly; +int kernel_set_to_readonly __read_only; void set_kernel_text_rw(void) { @@ -877,6 +880,7 @@ void set_kernel_text_rw(void) if (!kernel_set_to_readonly) return; + start = ktla_ktva(start); pr_debug("Set kernel text: %lx - %lx for read write\n", start, start+size); @@ -891,6 +895,7 @@ void set_kernel_text_ro(void) if (!kernel_set_to_readonly) return; + start = ktla_ktva(start); pr_debug("Set kernel text: %lx - %lx for read only\n", start, start+size); @@ -903,7 +908,7 @@ static void mark_nxdata_nx(void) * When this called, init has already been executed and released, * so everything past _etext should be NX. */ - unsigned long start = PFN_ALIGN(_etext); + unsigned long start = ktla_ktva(PFN_ALIGN(_etext)); /* * This comes from is_kernel_text upper limit. Also HPAGE where used: */ @@ -919,26 +924,52 @@ void mark_rodata_ro(void) unsigned long start = PFN_ALIGN(_text); unsigned long size = PFN_ALIGN(_etext) - start; - set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); - printk(KERN_INFO "Write protecting the kernel text: %luk\n", - size >> 10); +#ifdef CONFIG_PAX_KERNEXEC + /* PaX: limit KERNEL_CS to actual size */ + unsigned long limit; + struct desc_struct d; + int cpu; + + limit = get_kernel_rpl() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext; + limit = (limit - 1UL) >> PAGE_SHIFT; + + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE); + for (cpu = 0; cpu < nr_cpu_ids; cpu++) { + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC); + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S); + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S); + } + +#ifdef CONFIG_MODULES + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT); +#endif +#endif + start = ktla_ktva(start); +#ifdef CONFIG_PAX_KERNEXEC + /* PaX: make KERNEL_CS read-only */ + if (!get_kernel_rpl()) { +#endif kernel_set_to_readonly = 1; + set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); + printk(KERN_INFO "Write protecting the kernel text: %luk\n", size >> 10); + #ifdef CONFIG_CPA_DEBUG - printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n", - start, start+size); + printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n", start, start+size); set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT); printk(KERN_INFO "Testing CPA: write protecting again\n"); set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); #endif +#ifdef CONFIG_PAX_KERNEXEC + } +#endif start += size; - size = (unsigned long)__end_rodata - start; + size = PFN_ALIGN(_sdata) - start; set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); - printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", - size >> 10); + printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", size >> 10); rodata_test(); #ifdef CONFIG_CPA_DEBUG @@ -952,3 +983,7 @@ void mark_rodata_ro(void) if (__supported_pte_mask & _PAGE_NX) debug_checkwx(); } + +#ifdef CONFIG_PAX_KERNEXEC +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR); +#endif diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 14b9dd71d..774d5178c 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -65,7 +65,7 @@ * around without checking the pgd every time. */ -pteval_t __supported_pte_mask __read_mostly = ~0; +pteval_t __supported_pte_mask __read_only = ~_PAGE_NX; EXPORT_SYMBOL_GPL(__supported_pte_mask); int force_personality32; @@ -98,7 +98,12 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed) for (address = start; address <= end; address += PGDIR_SIZE) { const pgd_t *pgd_ref = pgd_offset_k(address); + +#ifdef CONFIG_PAX_PER_CPU_PGD + unsigned long cpu; +#else struct page *page; +#endif /* * When it is called after memory hot remove, pgd_none() @@ -109,6 +114,25 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed) continue; spin_lock(&pgd_lock); + +#ifdef CONFIG_PAX_PER_CPU_PGD + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) { + pgd_t *pgd = pgd_offset_cpu(cpu, user, address); + + if (!pgd_none(*pgd_ref) && !pgd_none(*pgd)) + BUG_ON(pgd_page_vaddr(*pgd) + != pgd_page_vaddr(*pgd_ref)); + + if (removed) { + if (pgd_none(*pgd_ref) && !pgd_none(*pgd)) + pgd_clear(pgd); + } else { + if (pgd_none(*pgd)) + set_pgd(pgd, *pgd_ref); + } + + pgd = pgd_offset_cpu(cpu, kernel, address); +#else list_for_each_entry(page, &pgd_list, lru) { pgd_t *pgd; spinlock_t *pgt_lock; @@ -117,6 +141,7 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed) /* the pgt_lock only for Xen */ pgt_lock = &pgd_page_get_mm(page)->page_table_lock; spin_lock(pgt_lock); +#endif if (!pgd_none(*pgd_ref) && !pgd_none(*pgd)) BUG_ON(pgd_page_vaddr(*pgd) @@ -130,7 +155,10 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed) set_pgd(pgd, *pgd_ref); } +#ifndef CONFIG_PAX_PER_CPU_PGD spin_unlock(pgt_lock); +#endif + } spin_unlock(&pgd_lock); } @@ -163,7 +191,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr) { if (pgd_none(*pgd)) { pud_t *pud = (pud_t *)spp_getpage(); - pgd_populate(&init_mm, pgd, pud); + pgd_populate_kernel(&init_mm, pgd, pud); if (pud != pud_offset(pgd, 0)) printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n", pud, pud_offset(pgd, 0)); @@ -175,7 +203,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr) { if (pud_none(*pud)) { pmd_t *pmd = (pmd_t *) spp_getpage(); - pud_populate(&init_mm, pud, pmd); + pud_populate_kernel(&init_mm, pud, pmd); if (pmd != pmd_offset(pud, 0)) printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud, 0)); @@ -204,7 +232,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) pmd = fill_pmd(pud, vaddr); pte = fill_pte(pmd, vaddr); + pax_open_kernel(); set_pte(pte, new_pte); + pax_close_kernel(); /* * It's enough to flush this one mapping. @@ -266,14 +296,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size, pgd = pgd_offset_k((unsigned long)__va(phys)); if (pgd_none(*pgd)) { pud = (pud_t *) spp_getpage(); - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE | - _PAGE_USER)); + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE)); } pud = pud_offset(pgd, (unsigned long)__va(phys)); if (pud_none(*pud)) { pmd = (pmd_t *) spp_getpage(); - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | - _PAGE_USER)); + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE)); } pmd = pmd_offset(pud, phys); BUG_ON(!pmd_none(*pmd)); @@ -543,7 +571,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, page_size_mask, prot); spin_lock(&init_mm.page_table_lock); - pud_populate(&init_mm, pud, pmd); + pud_populate_kernel(&init_mm, pud, pmd); spin_unlock(&init_mm.page_table_lock); } __flush_tlb_all(); @@ -590,7 +618,7 @@ kernel_physical_mapping_init(unsigned long paddr_start, page_size_mask); spin_lock(&init_mm.page_table_lock); - pgd_populate(&init_mm, pgd, pud); + pgd_populate_kernel(&init_mm, pgd, pud); spin_unlock(&init_mm.page_table_lock); pgd_changed = true; } @@ -1013,7 +1041,7 @@ void __init mem_init(void) const int rodata_test_data = 0xC3; EXPORT_SYMBOL_GPL(rodata_test_data); -int kernel_set_to_readonly; +int kernel_set_to_readonly __read_only; void set_kernel_text_rw(void) { @@ -1042,8 +1070,7 @@ void set_kernel_text_ro(void) if (!kernel_set_to_readonly) return; - pr_debug("Set kernel text: %lx - %lx for read only\n", - start, end); + pr_debug("Set kernel text: %lx - %lx for read only\n", start, end); /* * Set the kernel identity mapping for text RO. @@ -1054,18 +1081,23 @@ void set_kernel_text_ro(void) void mark_rodata_ro(void) { unsigned long start = PFN_ALIGN(_text); +#ifdef CONFIG_PAX_KERNEXEC + unsigned long addr; + unsigned long end = PFN_ALIGN(_sdata); + unsigned long text_end = end; +#else unsigned long rodata_start = PFN_ALIGN(__start_rodata); unsigned long end = (unsigned long) &__end_rodata_hpage_align; unsigned long text_end = PFN_ALIGN(&__stop___ex_table); unsigned long rodata_end = PFN_ALIGN(&__end_rodata); +#endif unsigned long all_end; - printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", - (end - start) >> 10); - set_memory_ro(start, (end - start) >> PAGE_SHIFT); - kernel_set_to_readonly = 1; + printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", (end - start) >> 10); + set_memory_ro(start, (end - start) >> PAGE_SHIFT); + /* * The rodata/data/bss/brk section (but not the kernel text!) * should also be not-executable. @@ -1091,12 +1123,54 @@ void mark_rodata_ro(void) set_memory_ro(start, (end-start) >> PAGE_SHIFT); #endif +#ifdef CONFIG_PAX_KERNEXEC + /* PaX: ensure that kernel code/rodata is read-only, the rest is non-executable */ + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) { + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + + pgd = pgd_offset_k(addr); + pud = pud_offset(pgd, addr); + pmd = pmd_offset(pud, addr); + if (!pmd_present(*pmd)) + continue; + if (addr >= (unsigned long)_text) + BUG_ON(!pmd_large(*pmd)); + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata) + BUG_ON(pmd_write(*pmd)); +// set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW)); + else + BUG_ON(!(pmd_flags(*pmd) & _PAGE_NX)); +// set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask))); + } + + addr = (unsigned long)__va(__pa(__START_KERNEL_map)); + end = addr + KERNEL_IMAGE_SIZE; + for (; addr < end; addr += PMD_SIZE) { + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + + pgd = pgd_offset_k(addr); + pud = pud_offset(pgd, addr); + pmd = pmd_offset(pud, addr); + if (!pmd_present(*pmd)) + continue; + if (addr >= (unsigned long)_text) + BUG_ON(!pmd_large(*pmd)); + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata))) + BUG_ON(pmd_write(*pmd)); +// set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW)); + } +#else free_init_pages("unused kernel", (unsigned long) __va(__pa_symbol(text_end)), (unsigned long) __va(__pa_symbol(rodata_start))); free_init_pages("unused kernel", (unsigned long) __va(__pa_symbol(rodata_end)), (unsigned long) __va(__pa_symbol(_sdata))); +#endif debug_checkwx(); } diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index ada98b39b..c812b6228 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c @@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + + pax_open_kernel(); set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); + pax_close_kernel(); + arch_flush_lazy_mmu_mode(); return (void *)vaddr; diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 7aaa26358..e77438feb 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -58,8 +58,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, unsigned long i; for (i = 0; i < nr_pages; ++i) - if (pfn_valid(start_pfn + i) && - !PageReserved(pfn_to_page(start_pfn + i))) + if (pfn_valid(start_pfn + i) && (start_pfn + i >= 0x100 || + !PageReserved(pfn_to_page(start_pfn + i)))) return 1; return 0; @@ -80,7 +80,7 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, * caller shouldn't need to know that small detail. */ static void __iomem *__ioremap_caller(resource_size_t phys_addr, - unsigned long size, enum page_cache_mode pcm, void *caller) + resource_size_t size, enum page_cache_mode pcm, void *caller) { unsigned long offset, vaddr; resource_size_t pfn, last_pfn, last_addr; @@ -331,7 +331,7 @@ EXPORT_SYMBOL(ioremap_prot); * * Caller must ensure there is only one unmapping for the same pointer. */ -void iounmap(volatile void __iomem *addr) +void iounmap(const volatile void __iomem *addr) { struct vm_struct *p, *o; @@ -394,31 +394,37 @@ int __init arch_ioremap_pmd_supported(void) */ void *xlate_dev_mem_ptr(phys_addr_t phys) { - unsigned long start = phys & PAGE_MASK; - unsigned long offset = phys & ~PAGE_MASK; - void *vaddr; + phys_addr_t pfn = phys >> PAGE_SHIFT; - /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */ - if (page_is_ram(start >> PAGE_SHIFT)) - return __va(phys); - - vaddr = ioremap_cache(start, PAGE_SIZE); - /* Only add the offset on success and return NULL if the ioremap() failed: */ - if (vaddr) - vaddr += offset; + if (page_is_ram(pfn)) { +#ifdef CONFIG_HIGHMEM + if (pfn >= max_low_pfn) + return kmap_high(pfn_to_page(pfn)); + else +#endif + return __va(phys); + } - return vaddr; + return (void __force *)ioremap_cache(phys, 1); } void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) { - if (page_is_ram(phys >> PAGE_SHIFT)) + phys_addr_t pfn = phys >> PAGE_SHIFT; + + if (page_is_ram(pfn)) { +#ifdef CONFIG_HIGHMEM + if (pfn >= max_low_pfn) + kunmap_high(pfn_to_page(pfn)); +#endif return; + } - iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK)); + iounmap((void __iomem __force *)addr); } -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; +static pte_t __bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_rodata; +static pte_t *bm_pte __read_only = __bm_pte; static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) { @@ -454,8 +460,14 @@ void __init early_ioremap_init(void) early_ioremap_setup(); pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); - memset(bm_pte, 0, sizeof(bm_pte)); - pmd_populate_kernel(&init_mm, pmd, bm_pte); + if (pmd_none(*pmd)) +#ifdef CONFIG_COMPAT_VDSO + pmd_populate_user(&init_mm, pmd, __bm_pte); +#else + pmd_populate_kernel(&init_mm, pmd, __bm_pte); +#endif + else + bm_pte = (pte_t *)pmd_page_vaddr(*pmd); /* * The boot-ioremap range spans multiple pmds, for which diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c index 4515bae36..e16276443 100644 --- a/arch/x86/mm/kmemcheck/kmemcheck.c +++ b/arch/x86/mm/kmemcheck/kmemcheck.c @@ -627,9 +627,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address, * memory (e.g. tracked pages)? For now, we need this to avoid * invoking kmemcheck for PnP BIOS calls. */ - if (regs->flags & X86_VM_MASK) + if (v8086_mode(regs)) return false; - if (regs->cs != __KERNEL_CS) + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS) return false; pte = kmemcheck_pte_lookup(address); diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index d2dc0438d..41dfc2bf2 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -52,7 +52,7 @@ static unsigned long stack_maxrandom_size(void) * Leave an at least ~128 MB hole with possible stack randomization. */ #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size()) -#define MAX_GAP (TASK_SIZE/6*5) +#define MAX_GAP (pax_task_size/6*5) static int mmap_is_legacy(void) { @@ -81,16 +81,31 @@ unsigned long arch_mmap_rnd(void) return rnd << PAGE_SHIFT; } -static unsigned long mmap_base(unsigned long rnd) +static unsigned long mmap_base(struct mm_struct *mm, unsigned long rnd) { unsigned long gap = rlimit(RLIMIT_STACK); + unsigned long pax_task_size = TASK_SIZE; + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) + pax_task_size = SEGMEXEC_TASK_SIZE; +#endif if (gap < MIN_GAP) gap = MIN_GAP; else if (gap > MAX_GAP) gap = MAX_GAP; - return PAGE_ALIGN(TASK_SIZE - gap - rnd); + return PAGE_ALIGN(pax_task_size - gap - rnd); +} + +static unsigned long mmap_legacy_base(struct mm_struct *mm, unsigned long rnd) +{ +#ifdef CONFIG_PAX_SEGMEXEC + if (mmap_is_ia32() && (mm->pax_flags & MF_PAX_SEGMEXEC)) + return SEGMEXEC_TASK_UNMAPPED_BASE + rnd; +#endif + return TASK_UNMAPPED_BASE + rnd; } /* @@ -101,18 +116,29 @@ void arch_pick_mmap_layout(struct mm_struct *mm) { unsigned long random_factor = 0UL; +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif if (current->flags & PF_RANDOMIZE) random_factor = arch_mmap_rnd(); - mm->mmap_legacy_base = TASK_UNMAPPED_BASE + random_factor; + mm->mmap_legacy_base = mmap_legacy_base(mm, random_factor); if (mmap_is_legacy()) { mm->mmap_base = mm->mmap_legacy_base; mm->get_unmapped_area = arch_get_unmapped_area; } else { - mm->mmap_base = mmap_base(random_factor); + mm->mmap_base = mmap_base(mm, random_factor); mm->get_unmapped_area = arch_get_unmapped_area_topdown; } + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) { + mm->mmap_legacy_base += mm->delta_mmap; + mm->mmap_base -= mm->delta_mmap + mm->delta_stack; + } +#endif + } const char *arch_vma_name(struct vm_area_struct *vma) diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c index bef36622e..c5b252326 100644 --- a/arch/x86/mm/mmio-mod.c +++ b/arch/x86/mm/mmio-mod.c @@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs, break; default: { - unsigned char *ip = (unsigned char *)instptr; + unsigned char *ip = (unsigned char *)ktla_ktva(instptr); my_trace->opcode = MMIO_UNKNOWN_OP; my_trace->width = 0; my_trace->value = (*ip) << 16 | *(ip + 1) << 8 | @@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition, static void ioremap_trace_core(resource_size_t offset, unsigned long size, void __iomem *addr) { - static atomic_t next_id; + static atomic_unchecked_t next_id; struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL); /* These are page-unaligned. */ struct mmiotrace_map map = { @@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size, .private = trace }, .phys = offset, - .id = atomic_inc_return(&next_id) + .id = atomic_inc_return_unchecked(&next_id) }; map.map_id = trace->id; @@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size, ioremap_trace_core(offset, size, addr); } -static void iounmap_trace_core(volatile void __iomem *addr) +static void iounmap_trace_core(const volatile void __iomem *addr) { struct mmiotrace_map map = { .phys = 0, @@ -328,7 +328,7 @@ static void iounmap_trace_core(volatile void __iomem *addr) } } -void mmiotrace_iounmap(volatile void __iomem *addr) +void mmiotrace_iounmap(const volatile void __iomem *addr) { might_sleep(); if (is_enabled()) /* recheck and proper locking in *_core() */ diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index e4f800999..ab2ba5d07 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c @@ -193,7 +193,7 @@ static int mpx_insn_decode(struct insn *insn, */ if (!nr_copied) return -EFAULT; - insn_init(insn, buf, nr_copied, x86_64); + insn_init(insn, (void *)ktva_ktla((unsigned long)buf), nr_copied, x86_64); insn_get_length(insn); /* * copy_from_user() tries to get as many bytes as we could see in @@ -293,11 +293,11 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs) * We were not able to extract an address from the instruction, * probably because there was something invalid in it. */ - if (info->si_addr == (void *)-1) { + if (info->si_addr == (void __user *)-1) { err = -EINVAL; goto err_out; } - trace_mpx_bounds_register_exception(info->si_addr, bndreg); + trace_mpx_bounds_register_exception((void __force_kernel *)info->si_addr, bndreg); return info; err_out: /* info might be NULL, but kfree() handles that */ diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 3f35b48d1..a825b33c5 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -528,7 +528,7 @@ static void __init numa_clear_kernel_node_hotplug(void) } } -static int __init numa_register_memblks(struct numa_meminfo *mi) +static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi) { unsigned long uninitialized_var(pfn_align); int i, nid; diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index e3353c97d..2a8fbe5bd 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -265,7 +265,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, */ #ifdef CONFIG_PCI_BIOS if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT)) - pgprot_val(forbidden) |= _PAGE_NX; + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask; #endif /* @@ -273,14 +273,14 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, * Does not cover __inittext since that is gone later on. On * 64bit we do not enforce !NX on the low mapping */ - if (within(address, (unsigned long)_text, (unsigned long)_etext)) - pgprot_val(forbidden) |= _PAGE_NX; + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext))) + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask; /* * The .rodata section needs to be read-only. Using the pfn * catches all aliases. */ - if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT, + if (kernel_set_to_readonly && within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT, __pa_symbol(__end_rodata) >> PAGE_SHIFT)) pgprot_val(forbidden) |= _PAGE_RW; @@ -321,6 +321,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, } #endif +#ifdef CONFIG_PAX_KERNEXEC + if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)) >> PAGE_SHIFT, __pa((unsigned long)&_sdata) >> PAGE_SHIFT)) { + pgprot_val(forbidden) |= _PAGE_RW; + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask; + } +#endif + prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); return prot; @@ -457,23 +464,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys); static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) { /* change init_mm */ + pax_open_kernel(); set_pte_atomic(kpte, pte); + #ifdef CONFIG_X86_32 if (!SHARED_KERNEL_PMD) { + +#ifdef CONFIG_PAX_PER_CPU_PGD + unsigned long cpu; +#else struct page *page; +#endif +#ifdef CONFIG_PAX_PER_CPU_PGD + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) { + pgd_t *pgd = get_cpu_pgd(cpu, kernel); +#else list_for_each_entry(page, &pgd_list, lru) { - pgd_t *pgd; + pgd_t *pgd = (pgd_t *)page_address(page); +#endif + pud_t *pud; pmd_t *pmd; - pgd = (pgd_t *)page_address(page) + pgd_index(address); + pgd += pgd_index(address); pud = pud_offset(pgd, address); pmd = pmd_offset(pud, address); set_pte_atomic((pte_t *)pmd, pte); } } #endif + pax_close_kernel(); } static int @@ -711,6 +732,8 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, } static int split_large_page(struct cpa_data *cpa, pte_t *kpte, + unsigned long address) __must_hold(&cpa_lock); +static int split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address) { struct page *base; @@ -1153,6 +1176,7 @@ static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, } } +static int __change_page_attr(struct cpa_data *cpa, int primary) __must_hold(&cpa_lock); static int __change_page_attr(struct cpa_data *cpa, int primary) { unsigned long address; @@ -1211,7 +1235,9 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) * Do we really change anything ? */ if (pte_val(old_pte) != pte_val(new_pte)) { + pax_open_kernel(); set_pte_atomic(kpte, new_pte); + pax_close_kernel(); cpa->flags |= CPA_FLUSHTLB; } cpa->numpages = 1; diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 83e701f16..a70d92e22 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -632,7 +632,7 @@ int free_memtype(u64 start, u64 end) if (IS_ERR(entry)) { pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n", - current->comm, current->pid, start, end - 1); + current->comm, task_pid_nr(current), start, end - 1); return -EINVAL; } @@ -818,7 +818,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) { pr_info("x86/PAT: %s:%d ioremap_change_attr failed %s for [mem %#010Lx-%#010Lx]\n", - current->comm, current->pid, + current->comm, task_pid_nr(current), cattr_name(pcm), base, (unsigned long long)(base + size-1)); return -EINVAL; @@ -853,7 +853,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, pcm = lookup_memtype(paddr); if (want_pcm != pcm) { pr_warn("x86/PAT: %s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n", - current->comm, current->pid, + current->comm, task_pid_nr(current), cattr_name(want_pcm), (unsigned long long)paddr, (unsigned long long)(paddr + size - 1), @@ -874,7 +874,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) { free_memtype(paddr, paddr + size); pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n", - current->comm, current->pid, + current->comm, task_pid_nr(current), cattr_name(want_pcm), (unsigned long long)paddr, (unsigned long long)(paddr + size - 1), diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c index 159b52ccd..2b68583d8 100644 --- a/arch/x86/mm/pat_rbtree.c +++ b/arch/x86/mm/pat_rbtree.c @@ -170,7 +170,7 @@ static int memtype_rb_check_conflict(struct rb_root *root, failure: pr_info("x86/PAT: %s:%d conflicting memory types %Lx-%Lx %s<->%s\n", - current->comm, current->pid, start, end, + current->comm, task_pid_nr(current), start, end, cattr_name(found_type), cattr_name(match->type)); return -EBUSY; } diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c index a23586953..3aa7bdda9 100644 --- a/arch/x86/mm/pf_in.c +++ b/arch/x86/mm/pf_in.c @@ -147,7 +147,7 @@ enum reason_type get_ins_type(unsigned long ins_addr) int i; enum reason_type rv = OTHERS; - p = (unsigned char *)ins_addr; + p = (unsigned char *)ktla_ktva(ins_addr); p += skip_prefix(p, &prf); p += get_opcode(p, &opcode); @@ -167,7 +167,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr) struct prefix_bits prf; int i; - p = (unsigned char *)ins_addr; + p = (unsigned char *)ktla_ktva(ins_addr); p += skip_prefix(p, &prf); p += get_opcode(p, &opcode); @@ -190,7 +190,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr) struct prefix_bits prf; int i; - p = (unsigned char *)ins_addr; + p = (unsigned char *)ktla_ktva(ins_addr); p += skip_prefix(p, &prf); p += get_opcode(p, &opcode); @@ -414,7 +414,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs) struct prefix_bits prf; int i; - p = (unsigned char *)ins_addr; + p = (unsigned char *)ktla_ktva(ins_addr); p += skip_prefix(p, &prf); p += get_opcode(p, &opcode); for (i = 0; i < ARRAY_SIZE(reg_rop); i++) @@ -469,7 +469,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr) struct prefix_bits prf; int i; - p = (unsigned char *)ins_addr; + p = (unsigned char *)ktla_ktva(ins_addr); p += skip_prefix(p, &prf); p += get_opcode(p, &opcode); for (i = 0; i < ARRAY_SIZE(imm_wop); i++) diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 3feec5af4..0f77f72b2 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -98,10 +98,75 @@ static inline void pgd_list_del(pgd_t *pgd) list_del(&page->lru); } -#define UNSHARED_PTRS_PER_PGD \ - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT; +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) +{ + unsigned int count = USER_PGD_PTRS; + + if (!pax_user_shadow_base) + return; + + while (count--) + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER); +} +#endif + +#ifdef CONFIG_PAX_PER_CPU_PGD +void __clone_user_pgds(pgd_t *dst, const pgd_t *src) +{ + unsigned int count = USER_PGD_PTRS; + + while (count--) { + pgd_t pgd; + +#ifdef CONFIG_X86_64 + pgd = __pgd(pgd_val(*src++) | _PAGE_USER); +#else + pgd = *src++; +#endif +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + pgd = __pgd(pgd_val(pgd) & clone_pgd_mask); +#endif + + *dst++ = pgd; + } + +} +#endif + +#ifdef CONFIG_X86_64 +#define pxd_t pud_t +#define pyd_t pgd_t +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn) +#define pgtable_pxd_page_ctor(page) true +#define pgtable_pxd_page_dtor(page) do {} while (0) +#define pxd_free(mm, pud) pud_free((mm), (pud)) +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud)) +#define pyd_offset(mm, address) pgd_offset((mm), (address)) +#define PYD_SIZE PGDIR_SIZE +#define mm_inc_nr_pxds(mm) do {} while (0) +#define mm_dec_nr_pxds(mm) do {} while (0) +#else +#define pxd_t pmd_t +#define pyd_t pud_t +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn) +#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page) +#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page) +#define pxd_free(mm, pud) pmd_free((mm), (pud)) +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud)) +#define pyd_offset(mm, address) pud_offset((mm), (address)) +#define PYD_SIZE PUD_SIZE +#define mm_inc_nr_pxds(mm) mm_inc_nr_pmds(mm) +#define mm_dec_nr_pxds(mm) mm_dec_nr_pmds(mm) +#endif + +#ifdef CONFIG_PAX_PER_CPU_PGD +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {} +static inline void pgd_dtor(pgd_t *pgd) {} +#else static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) { BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm)); @@ -142,6 +207,7 @@ static void pgd_dtor(pgd_t *pgd) pgd_list_del(pgd); spin_unlock(&pgd_lock); } +#endif /* * List of all pgd's needed for non-PAE so it can invalidate entries @@ -154,7 +220,7 @@ static void pgd_dtor(pgd_t *pgd) * -- nyc */ -#ifdef CONFIG_X86_PAE +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE) /* * In PAE mode, we need to do a cr3 reload (=tlb flush) when * updating the top-level pagetable entries to guarantee the @@ -166,7 +232,7 @@ static void pgd_dtor(pgd_t *pgd) * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate * and initialize the kernel pmds here. */ -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) { @@ -184,26 +250,28 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) */ flush_tlb_mm(mm); } +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD) +#define PREALLOCATED_PXDS USER_PGD_PTRS #else /* !CONFIG_X86_PAE */ /* No need to prepopulate any pagetable entries in non-PAE modes. */ -#define PREALLOCATED_PMDS 0 +#define PREALLOCATED_PXDS 0 #endif /* CONFIG_X86_PAE */ -static void free_pmds(struct mm_struct *mm, pmd_t *pmds[]) +static void free_pxds(struct mm_struct *mm, pxd_t *pxds[]) { int i; - for(i = 0; i < PREALLOCATED_PMDS; i++) - if (pmds[i]) { - pgtable_pmd_page_dtor(virt_to_page(pmds[i])); - free_page((unsigned long)pmds[i]); - mm_dec_nr_pmds(mm); + for(i = 0; i < PREALLOCATED_PXDS; i++) + if (pxds[i]) { + pgtable_pxd_page_dtor(virt_to_page(pxds[i])); + free_page((unsigned long)pxds[i]); + mm_dec_nr_pxds(mm); } } -static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[]) +static int preallocate_pxds(struct mm_struct *mm, pxd_t *pxds[]) { int i; bool failed = false; @@ -212,22 +280,22 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[]) if (mm == &init_mm) gfp &= ~__GFP_ACCOUNT; - for(i = 0; i < PREALLOCATED_PMDS; i++) { - pmd_t *pmd = (pmd_t *)__get_free_page(gfp); - if (!pmd) + for(i = 0; i < PREALLOCATED_PXDS; i++) { + pxd_t *pxd = (pxd_t *)__get_free_page(gfp); + if (!pxd) failed = true; - if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) { - free_page((unsigned long)pmd); - pmd = NULL; + if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) { + free_page((unsigned long)pxd); + pxd = NULL; failed = true; } - if (pmd) - mm_inc_nr_pmds(mm); - pmds[i] = pmd; + if (pxd) + mm_inc_nr_pxds(mm); + pxds[i] = pxd; } if (failed) { - free_pmds(mm, pmds); + free_pxds(mm, pxds); return -ENOMEM; } @@ -240,43 +308,47 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[]) * preallocate which never got a corresponding vma will need to be * freed manually. */ -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp) { int i; - for(i = 0; i < PREALLOCATED_PMDS; i++) { + for(i = 0; i < PREALLOCATED_PXDS; i++) { pgd_t pgd = pgdp[i]; if (pgd_val(pgd) != 0) { - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd); - pgdp[i] = native_make_pgd(0); + set_pgd(pgdp + i, native_make_pgd(0)); - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT); - pmd_free(mm, pmd); - mm_dec_nr_pmds(mm); + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT); + pxd_free(mm, pxd); + mm_dec_nr_pxds(mm); } } } -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[]) { - pud_t *pud; + pyd_t *pyd; int i; - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */ + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */ return; - pud = pud_offset(pgd, 0); +#ifdef CONFIG_X86_64 + pyd = pyd_offset(mm, 0L); +#else + pyd = pyd_offset(pgd, 0L); +#endif - for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) { - pmd_t *pmd = pmds[i]; + for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) { + pxd_t *pxd = pxds[i]; if (i >= KERNEL_PGD_BOUNDARY) - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), - sizeof(pmd_t) * PTRS_PER_PMD); + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]), + sizeof(pxd_t) * PTRS_PER_PMD); - pud_populate(mm, pud, pmd); + pyd_populate(mm, pyd, pxd); } } @@ -358,7 +430,7 @@ static inline void _pgd_free(pgd_t *pgd) pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *pgd; - pmd_t *pmds[PREALLOCATED_PMDS]; + pxd_t *pxds[PREALLOCATED_PXDS]; pgd = _pgd_alloc(); @@ -367,11 +439,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm) mm->pgd = pgd; - if (preallocate_pmds(mm, pmds) != 0) + if (preallocate_pxds(mm, pxds) != 0) goto out_free_pgd; if (paravirt_pgd_alloc(mm) != 0) - goto out_free_pmds; + goto out_free_pxds; /* * Make sure that pre-populating the pmds is atomic with @@ -381,14 +453,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm) spin_lock(&pgd_lock); pgd_ctor(mm, pgd); - pgd_prepopulate_pmd(mm, pgd, pmds); + pgd_prepopulate_pxd(mm, pgd, pxds); spin_unlock(&pgd_lock); return pgd; -out_free_pmds: - free_pmds(mm, pmds); +out_free_pxds: + free_pxds(mm, pxds); out_free_pgd: _pgd_free(pgd); out: @@ -397,7 +469,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) void pgd_free(struct mm_struct *mm, pgd_t *pgd) { - pgd_mop_up_pmds(mm, pgd); + pgd_mop_up_pxds(mm, pgd); pgd_dtor(pgd); paravirt_pgd_free(mm, pgd); _pgd_free(pgd); @@ -530,6 +602,50 @@ void __init reserve_top_address(unsigned long reserve) int fixmaps_set; +static void fix_user_fixmap(enum fixed_addresses idx, unsigned long address) +{ +#ifdef CONFIG_X86_64 + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + + switch (idx) { + default: + return; + +#ifdef CONFIG_X86_VSYSCALL_EMULATION + case VSYSCALL_PAGE: + break; +#endif + } + + pgd = pgd_offset_k(address); + if (!(pgd_val(*pgd) & _PAGE_USER)) { +#ifdef CONFIG_PAX_PER_CPU_PGD + unsigned int cpu; + pgd_t *pgd_cpu; + + for_each_possible_cpu(cpu) { + pgd_cpu = pgd_offset_cpu(cpu, kernel, address); + set_pgd(pgd_cpu, __pgd(pgd_val(*pgd_cpu) | _PAGE_USER)); + + pgd_cpu = pgd_offset_cpu(cpu, user, address); + set_pgd(pgd_cpu, __pgd(pgd_val(*pgd_cpu) | _PAGE_USER)); + } +#endif + set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER)); + } + + pud = pud_offset(pgd, address); + if (!(pud_val(*pud) & _PAGE_USER)) + set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER)); + + pmd = pmd_offset(pud, address); + if (!(pmd_val(*pmd) & _PAGE_USER)) + set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_USER)); +#endif +} + void __native_set_fixmap(enum fixed_addresses idx, pte_t pte) { unsigned long address = __fix_to_virt(idx); @@ -540,9 +656,10 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte) } set_pte_vaddr(address, pte); fixmaps_set++; + fix_user_fixmap(idx, address); } -void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys, +void native_set_fixmap(unsigned int idx, phys_addr_t phys, pgprot_t flags) { __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags)); @@ -606,9 +723,11 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) prot = pgprot_4k_2_large(prot); + pax_open_kernel(); set_pte((pte_t *)pmd, pfn_pte( (u64)addr >> PAGE_SHIFT, __pgprot(pgprot_val(prot) | _PAGE_PSE))); + pax_close_kernel(); return 1; } diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index 9adce7768..b698e8be0 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c @@ -46,10 +46,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval) return; } pte = pte_offset_kernel(pmd, vaddr); + + pax_open_kernel(); if (!pte_none(pteval)) set_pte_at(&init_mm, vaddr, pte, pteval); else pte_clear(&init_mm, vaddr, pte); + pax_close_kernel(); /* * It's enough to flush this one mapping. diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c index f65a33f50..f408a9959 100644 --- a/arch/x86/mm/setup_nx.c +++ b/arch/x86/mm/setup_nx.c @@ -6,8 +6,10 @@ #include #include +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) static int disable_nx; +#ifndef CONFIG_PAX_PAGEEXEC /* * noexec = on|off * @@ -29,12 +31,17 @@ static int __init noexec_setup(char *str) return 0; } early_param("noexec", noexec_setup); +#endif + +#endif void x86_configure_nx(void) { +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) if (boot_cpu_has(X86_FEATURE_NX) && !disable_nx) __supported_pte_mask |= _PAGE_NX; else +#endif __supported_pte_mask &= ~_PAGE_NX; } diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index a7655f6ca..895549add 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -47,7 +47,11 @@ void leave_mm(int cpu) BUG(); if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) { cpumask_clear_cpu(cpu, mm_cpumask(active_mm)); + +#ifndef CONFIG_PAX_PER_CPU_PGD load_cr3(swapper_pg_dir); +#endif + /* * This gets called in the idle path where RCU * functions differently. Tracing normally @@ -61,6 +65,51 @@ EXPORT_SYMBOL_GPL(leave_mm); #endif /* CONFIG_SMP */ +static void pax_switch_mm(struct mm_struct *next, unsigned int cpu) +{ + +#ifdef CONFIG_PAX_PER_CPU_PGD + pax_open_kernel(); + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + if (static_cpu_has(X86_FEATURE_PCIDUDEREF)) + __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd); + else +#endif + + __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd); + + __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd); + + pax_close_kernel(); + + BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK)); + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + if (static_cpu_has(X86_FEATURE_PCIDUDEREF)) { + if (static_cpu_has(X86_FEATURE_INVPCID)) { + u64 descriptor[2]; + descriptor[0] = PCID_USER; + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory"); + if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) { + descriptor[0] = PCID_KERNEL; + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory"); + } + } else { + write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER); + if (static_cpu_has(X86_FEATURE_STRONGUDEREF)) + write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH); + else + write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL); + } + } else +#endif + + load_cr3(get_cpu_pgd(cpu, kernel)); +#endif + +} + void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { @@ -75,6 +124,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { unsigned cpu = smp_processor_id(); +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) + int tlbstate = TLBSTATE_OK; +#endif if (likely(prev != next)) { if (IS_ENABLED(CONFIG_VMAP_STACK)) { @@ -89,9 +141,14 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, if (unlikely(pgd_none(*pgd))) set_pgd(pgd, init_mm.pgd[stack_pgd_index]); + else + BUG_ON(pgd->pgd != init_mm.pgd[stack_pgd_index].pgd); } #ifdef CONFIG_SMP +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) + tlbstate = this_cpu_read(cpu_tlbstate.state); +#endif this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); this_cpu_write(cpu_tlbstate.active_mm, next); #endif @@ -111,7 +168,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, * We need to prevent an outcome in which CPU 1 observes * the new PTE value and CPU 0 observes bit 1 clear in * mm_cpumask. (If that occurs, then the IPI will never - * be sent, and CPU 0's TLB will contain a stale entry.) + * be sent, and CPU 1's TLB will contain a stale entry.) * * The bad outcome can occur if either CPU's load is * reordered before that CPU's store, so both CPUs must @@ -126,7 +183,11 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, * ordering guarantee we need. * */ +#ifdef CONFIG_PAX_PER_CPU_PGD + pax_switch_mm(next, cpu); +#else load_cr3(next->pgd); +#endif trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); @@ -152,9 +213,31 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, if (unlikely(prev->context.ldt != next->context.ldt)) load_mm_ldt(next); #endif + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP) + if (!(__supported_pte_mask & _PAGE_NX)) { + smp_mb__before_atomic(); + cpumask_clear_cpu(cpu, &prev->context.cpu_user_cs_mask); + smp_mb__after_atomic(); + cpumask_set_cpu(cpu, &next->context.cpu_user_cs_mask); + } +#endif + +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base || + prev->context.user_cs_limit != next->context.user_cs_limit)) + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu); +#ifdef CONFIG_SMP + else if (unlikely(tlbstate != TLBSTATE_OK)) + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu); +#endif +#endif + } + else { + pax_switch_mm(next, cpu); + #ifdef CONFIG_SMP - else { this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next); @@ -175,13 +258,30 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, * As above, load_cr3() is serializing and orders TLB * fills with respect to the mm_cpumask write. */ + +#ifndef CONFIG_PAX_PER_CPU_PGD load_cr3(next->pgd); trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); +#endif + load_mm_cr4(next); load_mm_ldt(next); + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) + if (!(__supported_pte_mask & _PAGE_NX)) + cpumask_set_cpu(cpu, &next->context.cpu_user_cs_mask); +#endif + +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) +#ifdef CONFIG_PAX_PAGEEXEC + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX))) +#endif + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu); +#endif + } - } #endif + } } #ifdef CONFIG_SMP diff --git b/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c new file mode 100644 index 000000000..cc849dd82 --- /dev/null +++ b/arch/x86/mm/uderef_64.c @@ -0,0 +1,38 @@ +#include +#include +#include +#include + +#ifdef CONFIG_PAX_MEMORY_UDEREF +/* PaX: due to the special call convention these functions must + * - remain leaf functions under all configurations, + * - never be called directly, only dereferenced from the wrappers. + */ +void __used __pax_open_userland(void) +{ + unsigned int cpu; + + if (unlikely(!segment_eq(get_fs(), USER_DS))) + return; + + cpu = raw_get_cpu(); + BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL); + write_cr3(__pa_nodebug(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH); + raw_put_cpu_no_resched(); +} +EXPORT_SYMBOL(__pax_open_userland); + +void __used __pax_close_userland(void) +{ + unsigned int cpu; + + if (unlikely(!segment_eq(get_fs(), USER_DS))) + return; + + cpu = raw_get_cpu(); + BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER); + write_cr3(__pa_nodebug(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH); + raw_put_cpu_no_resched(); +} +EXPORT_SYMBOL(__pax_close_userland); +#endif diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S index f2a7faf47..4cedb9875 100644 --- a/arch/x86/net/bpf_jit.S +++ b/arch/x86/net/bpf_jit.S @@ -9,6 +9,7 @@ */ #include #include +#include /* * Calling convention : @@ -39,7 +40,7 @@ FUNC(sk_load_word_positive_offset) jle bpf_slow_path_word mov (SKBDATA,%rsi),%eax bswap %eax /* ntohl() */ - ret + pax_ret __bpf_call_base FUNC(sk_load_half) test %esi,%esi @@ -52,7 +53,7 @@ FUNC(sk_load_half_positive_offset) jle bpf_slow_path_half movzwl (SKBDATA,%rsi),%eax rol $8,%ax # ntohs() - ret + pax_ret __bpf_call_base FUNC(sk_load_byte) test %esi,%esi @@ -62,7 +63,7 @@ FUNC(sk_load_byte_positive_offset) cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */ jle bpf_slow_path_byte movzbl (SKBDATA,%rsi),%eax - ret + pax_ret __bpf_call_base /* rsi contains offset and can be scratched */ #define bpf_slow_path_common(LEN) \ @@ -73,7 +74,7 @@ FUNC(sk_load_byte_positive_offset) push SKBDATA; \ /* rsi already has offset */ \ mov $LEN,%ecx; /* len */ \ - call skb_copy_bits; \ + pax_direct_call skb_copy_bits; \ test %eax,%eax; \ pop SKBDATA; \ pop %r9; \ @@ -85,7 +86,7 @@ bpf_slow_path_word: js bpf_error mov - MAX_BPF_STACK + 32(%rbp),%eax bswap %eax - ret + pax_ret __bpf_call_base bpf_slow_path_half: bpf_slow_path_common(2) @@ -93,13 +94,13 @@ bpf_slow_path_half: mov - MAX_BPF_STACK + 32(%rbp),%ax rol $8,%ax movzwl %ax,%eax - ret + pax_ret __bpf_call_base bpf_slow_path_byte: bpf_slow_path_common(1) js bpf_error movzbl - MAX_BPF_STACK + 32(%rbp),%eax - ret + pax_ret __bpf_call_base #define sk_negative_common(SIZE) \ FRAME_BEGIN; \ @@ -108,7 +109,7 @@ bpf_slow_path_byte: push SKBDATA; \ /* rsi already has offset */ \ mov $SIZE,%edx; /* size */ \ - call bpf_internal_load_pointer_neg_helper; \ + pax_direct_call bpf_internal_load_pointer_neg_helper; \ test %rax,%rax; \ pop SKBDATA; \ pop %r9; \ @@ -123,7 +124,7 @@ FUNC(sk_load_word_negative_offset) sk_negative_common(4) mov (%rax), %eax bswap %eax - ret + pax_ret __bpf_call_base bpf_slow_path_half_neg: cmp SKF_MAX_NEG_OFF, %esi @@ -134,7 +135,7 @@ FUNC(sk_load_half_negative_offset) mov (%rax),%ax rol $8,%ax movzwl %ax,%eax - ret + pax_ret __bpf_call_base bpf_slow_path_byte_neg: cmp SKF_MAX_NEG_OFF, %esi @@ -143,7 +144,7 @@ bpf_slow_path_byte_neg: FUNC(sk_load_byte_negative_offset) sk_negative_common(1) movzbl (%rax), %eax - ret + pax_ret __bpf_call_base bpf_error: # force a return 0 from jit handler @@ -153,4 +154,4 @@ bpf_error: mov - MAX_BPF_STACK + 16(%rbp),%r14 mov - MAX_BPF_STACK + 24(%rbp),%r15 leaveq - ret + pax_ret __bpf_call_base diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 15f743615..f13aeac7e 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -14,7 +14,11 @@ #include #include +#ifdef CONFIG_GRKERNSEC_BPF_HARDEN +int bpf_jit_enable __read_only; +#else int bpf_jit_enable __read_mostly; +#endif /* * assembly code in arch/x86/net/bpf_jit.S @@ -183,7 +187,9 @@ static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) static void jit_fill_hole(void *area, unsigned int size) { /* fill whole space with int3 instructions */ + pax_open_kernel(); memset(area, 0xcc, size); + pax_close_kernel(); } struct jit_context { @@ -1076,7 +1082,9 @@ xadd: if (is_imm8(insn->off)) pr_err("bpf_jit_compile fatal error\n"); return -EFAULT; } + pax_open_kernel(); memcpy(image + proglen, temp, ilen); + pax_close_kernel(); } proglen += ilen; addrs[i] = proglen; @@ -1169,7 +1177,6 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) if (image) { bpf_flush_icache(header, image + proglen); - set_memory_ro((unsigned long)header, header->pages); prog->bpf_func = (void *)image; prog->jited = 1; } else { @@ -1190,12 +1197,8 @@ void bpf_jit_free(struct bpf_prog *fp) unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; struct bpf_binary_header *header = (void *)addr; - if (!fp->jited) - goto free_filter; + if (fp->jited) + bpf_jit_binary_free(header); - set_memory_rw(addr, header->pages); - bpf_jit_binary_free(header); - -free_filter: bpf_prog_unlock_free(fp); } diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c index a2488b6e2..4c2dd840e 100644 --- a/arch/x86/oprofile/backtrace.c +++ b/arch/x86/oprofile/backtrace.c @@ -27,11 +27,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head) struct stack_frame_ia32 *fp; unsigned long bytes; - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead)); if (bytes != 0) return NULL; - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame); + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame); oprofile_add_trace(bufhead[0].return_address); @@ -73,7 +73,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head) struct stack_frame bufhead[2]; unsigned long bytes; - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead)); if (bytes != 0) return NULL; diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 28c04123b..568d0a476 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -23,6 +23,7 @@ #include #include #include +#include #include "op_counter.h" #include "op_x86_model.h" @@ -615,7 +616,7 @@ enum __force_cpu_type { static int force_cpu_type; -static int set_cpu_type(const char *str, struct kernel_param *kp) +static int set_cpu_type(const char *str, const struct kernel_param *kp) { if (!strcmp(str, "timer")) { force_cpu_type = timer; @@ -786,8 +787,11 @@ int __init op_nmi_init(struct oprofile_operations *ops) if (ret) return ret; - if (!model->num_virt_counters) - model->num_virt_counters = model->num_counters; + if (!model->num_virt_counters) { + pax_open_kernel(); + const_cast(model->num_virt_counters) = model->num_counters; + pax_close_kernel(); + } mux_init(ops); diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 660a83c82..6ff762bb2 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -518,9 +518,11 @@ static int op_amd_init(struct oprofile_operations *ops) num_counters = AMD64_NUM_COUNTERS; } - op_amd_spec.num_counters = num_counters; - op_amd_spec.num_controls = num_counters; - op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS); + pax_open_kernel(); + const_cast(op_amd_spec.num_counters) = num_counters; + const_cast(op_amd_spec.num_controls) = num_counters; + const_cast(op_amd_spec.num_virt_counters) = max(num_counters, NUM_VIRT_COUNTERS); + pax_close_kernel(); return 0; } diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 350f7096b..77882e0a0 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "op_x86_model.h" #include "op_counter.h" @@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void) num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER); - op_arch_perfmon_spec.num_counters = num_counters; - op_arch_perfmon_spec.num_controls = num_counters; + pax_open_kernel(); + const_cast(op_arch_perfmon_spec.num_counters) = num_counters; + const_cast(op_arch_perfmon_spec.num_controls) = num_counters; + pax_close_kernel(); } static int arch_perfmon_init(struct oprofile_operations *ignore) diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h index 71e8a6733..6a313bb0b 100644 --- a/arch/x86/oprofile/op_x86_model.h +++ b/arch/x86/oprofile/op_x86_model.h @@ -52,7 +52,7 @@ struct op_x86_model_spec { void (*switch_ctrl)(struct op_x86_model_spec const *model, struct op_msrs const * const msrs); #endif -}; +} __do_const; struct op_counter_config; diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c index 5a18aedcb..22eac20fb 100644 --- a/arch/x86/pci/intel_mid_pci.c +++ b/arch/x86/pci/intel_mid_pci.c @@ -288,7 +288,7 @@ int __init intel_mid_pci_init(void) pci_mmcfg_late_init(); pcibios_enable_irq = intel_mid_pci_irq_enable; pcibios_disable_irq = intel_mid_pci_irq_disable; - pci_root_ops = intel_mid_pci_ops; + memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops); pci_soc_mode = 1; /* Continue with standard init */ return 1; diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index 9bd115484..e9d4656ff 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c @@ -51,7 +51,7 @@ struct irq_router { struct irq_router_handler { u16 vendor; int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device); -}; +} __do_const; int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq; void (*pcibios_disable_irq)(struct pci_dev *dev) = pirq_disable_irq; @@ -792,7 +792,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router return 0; } -static __initdata struct irq_router_handler pirq_routers[] = { +static __initconst const struct irq_router_handler pirq_routers[] = { { PCI_VENDOR_ID_INTEL, intel_router_probe }, { PCI_VENDOR_ID_AL, ali_router_probe }, { PCI_VENDOR_ID_ITE, ite_router_probe }, @@ -819,7 +819,7 @@ static struct pci_dev *pirq_router_dev; static void __init pirq_find_router(struct irq_router *r) { struct irq_routing_table *rt = pirq_table; - struct irq_router_handler *h; + const struct irq_router_handler *h; #ifdef CONFIG_PCI_BIOS if (!rt->signature) { @@ -1092,7 +1092,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id __initdata pciirq_dmi_table[] = { +static const struct dmi_system_id __initconst pciirq_dmi_table[] = { { .callback = fix_broken_hp_bios_irq9, .ident = "HP Pavilion N5400 Series Laptop", diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c index 1d97cea3b..f34bbf28e 100644 --- a/arch/x86/pci/pcbios.c +++ b/arch/x86/pci/pcbios.c @@ -79,7 +79,7 @@ union bios32 { static struct { unsigned long address; unsigned short segment; -} bios32_indirect __initdata = { 0, __KERNEL_CS }; +} bios32_indirect __initdata = { 0, __PCIBIOS_CS }; /* * Returns the entry point for the given service, NULL on error @@ -92,28 +92,71 @@ static unsigned long __init bios32_service(unsigned long service) unsigned long length; /* %ecx */ unsigned long entry; /* %edx */ unsigned long flags; + struct desc_struct d, *gdt; local_irq_save(flags); - __asm__("lcall *(%%edi); cld" + + gdt = get_cpu_gdt_table(smp_processor_id()); + + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC); + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S); + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC); + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S); + + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld" : "=a" (return_code), "=b" (address), "=c" (length), "=d" (entry) : "0" (service), "1" (0), - "D" (&bios32_indirect)); + "D" (&bios32_indirect), + "r"(__PCIBIOS_DS) + : "memory"); + + pax_open_kernel(); + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0; + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0; + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0; + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0; + pax_close_kernel(); + local_irq_restore(flags); switch (return_code) { - case 0: - return address + entry; - case 0x80: /* Not present */ - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service); - return 0; - default: /* Shouldn't happen */ - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n", - service, return_code); + case 0: { + int cpu; + unsigned char flags; + + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry); + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) { + printk(KERN_WARNING "bios32_service: not valid\n"); return 0; + } + address = address + PAGE_OFFSET; + length += 16UL; /* some BIOSs underreport this... */ + flags = 4; + if (length >= 64*1024*1024) { + length >>= PAGE_SHIFT; + flags |= 8; + } + + for (cpu = 0; cpu < nr_cpu_ids; cpu++) { + gdt = get_cpu_gdt_table(cpu); + pack_descriptor(&d, address, length, 0x9b, flags); + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S); + pack_descriptor(&d, address, length, 0x93, flags); + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S); + } + return entry; + } + case 0x80: /* Not present */ + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service); + return 0; + default: /* Shouldn't happen */ + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n", + service, return_code); + return 0; } } @@ -122,7 +165,7 @@ static struct { unsigned short segment; } pci_indirect __ro_after_init = { .address = 0, - .segment = __KERNEL_CS, + .segment = __PCIBIOS_CS, }; static int pci_bios_present __ro_after_init; @@ -134,11 +177,13 @@ static int __init check_pcibios(void) unsigned long flags, pcibios_entry; if ((pcibios_entry = bios32_service(PCI_SERVICE))) { - pci_indirect.address = pcibios_entry + PAGE_OFFSET; + pci_indirect.address = pcibios_entry; local_irq_save(flags); - __asm__( - "lcall *(%%edi); cld\n\t" + __asm__("movw %w6, %%ds\n\t" + "lcall *%%ss:(%%edi); cld\n\t" + "push %%ss\n\t" + "pop %%ds\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -147,7 +192,8 @@ static int __init check_pcibios(void) "=b" (ebx), "=c" (ecx) : "1" (PCIBIOS_PCI_BIOS_PRESENT), - "D" (&pci_indirect) + "D" (&pci_indirect), + "r" (__PCIBIOS_DS) : "memory"); local_irq_restore(flags); @@ -205,7 +251,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus, break; } - __asm__("lcall *(%%esi); cld\n\t" + __asm__("movw %w6, %%ds\n\t" + "lcall *%%ss:(%%esi); cld\n\t" + "push %%ss\n\t" + "pop %%ds\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -214,7 +263,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus, : "1" (number), "b" (bx), "D" ((long)reg), - "S" (&pci_indirect)); + "S" (&pci_indirect), + "r" (__PCIBIOS_DS)); /* * Zero-extend the result beyond 8 or 16 bits, do not trust the * BIOS having done it: @@ -253,7 +303,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus, break; } - __asm__("lcall *(%%esi); cld\n\t" + __asm__("movw %w6, %%ds\n\t" + "lcall *%%ss:(%%esi); cld\n\t" + "push %%ss\n\t" + "pop %%ds\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -262,7 +315,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus, "c" (value), "b" (bx), "D" ((long)reg), - "S" (&pci_indirect)); + "S" (&pci_indirect), + "r" (__PCIBIOS_DS)); raw_spin_unlock_irqrestore(&pci_config_lock, flags); @@ -365,10 +419,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void) DBG("PCI: Fetching IRQ routing table... "); __asm__("push %%es\n\t" + "movw %w8, %%ds\n\t" "push %%ds\n\t" "pop %%es\n\t" - "lcall *(%%esi); cld\n\t" + "lcall *%%ss:(%%esi); cld\n\t" "pop %%es\n\t" + "push %%ss\n\t" + "pop %%ds\n" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -379,7 +436,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void) "1" (0), "D" ((long) &opt), "S" (&pci_indirect), - "m" (opt) + "m" (opt), + "r" (__PCIBIOS_DS) : "memory"); DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map); if (ret & 0xff00) @@ -403,7 +461,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq) { int ret; - __asm__("lcall *(%%esi); cld\n\t" + __asm__("movw %w5, %%ds\n\t" + "lcall *%%ss:(%%esi); cld\n\t" + "push %%ss\n\t" + "pop %%ds\n" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -411,7 +472,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq) : "0" (PCIBIOS_SET_PCI_HW_INT), "b" ((dev->bus->number << 8) | dev->devfn), "c" ((irq << 8) | (pin + 10)), - "S" (&pci_indirect)); + "S" (&pci_indirect), + "r" (__PCIBIOS_DS)); return !(ret & 0xff00); } EXPORT_SYMBOL(pcibios_set_irq_routing); diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c index cef39b097..0e5aebe25 100644 --- a/arch/x86/platform/efi/efi_32.c +++ b/arch/x86/platform/efi/efi_32.c @@ -63,11 +63,27 @@ pgd_t * __init efi_call_phys_prolog(void) struct desc_ptr gdt_descr; pgd_t *save_pgd; +#ifdef CONFIG_PAX_KERNEXEC + struct desc_struct d; +#endif + /* Current pgd is swapper_pg_dir, we'll restore it later: */ +#ifdef CONFIG_PAX_PER_CPU_PGD + save_pgd = get_cpu_pgd(smp_processor_id(), kernel); +#else save_pgd = swapper_pg_dir; +#endif + load_cr3(initial_page_table); __flush_tlb_all(); +#ifdef CONFIG_PAX_KERNEXEC + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC); + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S); + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC); + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S); +#endif + gdt_descr.address = __pa(get_cpu_gdt_table(0)); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); @@ -79,6 +95,14 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd) { struct desc_ptr gdt_descr; +#ifdef CONFIG_PAX_KERNEXEC + struct desc_struct d; + + memset(&d, 0, sizeof d); + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S); + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S); +#endif + gdt_descr.address = (unsigned long)get_cpu_gdt_table(0); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 2f25a3630..3caf89367 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -93,6 +93,11 @@ pgd_t * __init efi_call_phys_prolog(void) vaddress = (unsigned long)__va(pgd * PGDIR_SIZE); set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress)); } + +#ifdef CONFIG_PAX_PER_CPU_PGD + load_cr3(swapper_pg_dir); +#endif + out: __flush_tlb_all(); @@ -120,6 +125,10 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd) kfree(save_pgd); +#ifdef CONFIG_PAX_PER_CPU_PGD + load_cr3(get_cpu_pgd(smp_processor_id(), kernel)); +#endif + __flush_tlb_all(); early_code_mapping_set_exec(0); } @@ -248,8 +257,23 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) unsigned npages; pgd_t *pgd; - if (efi_enabled(EFI_OLD_MEMMAP)) + if (efi_enabled(EFI_OLD_MEMMAP)) { + /* PaX: We need to disable the NX bit in the PGD, otherwise we won't be + * able to execute the EFI services. + */ + if (__supported_pte_mask & _PAGE_NX) { + unsigned long addr = (unsigned long) __va(0); + pgd_t pe = __pgd(pgd_val(*pgd_offset_k(addr)) & ~_PAGE_NX); + + pr_alert("PAX: Disabling NX protection for low memory map. Try booting without \"efi=old_map\"\n"); +#ifdef CONFIG_PAX_PER_CPU_PGD + set_pgd(pgd_offset_cpu(0, kernel, addr), pe); +#endif + set_pgd(pgd_offset_k(addr), pe); + } + return 0; + } efi_scratch.efi_pgt = (pgd_t *)__pa(efi_pgd); pgd = efi_pgd; diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S index 040192b50..7d3300f76 100644 --- a/arch/x86/platform/efi/efi_stub_32.S +++ b/arch/x86/platform/efi/efi_stub_32.S @@ -6,7 +6,9 @@ */ #include +#include #include +#include /* * efi_call_phys(void *, ...) is a function with variable parameters. @@ -20,7 +22,7 @@ * service functions will comply with gcc calling convention, too. */ -.text +__INIT ENTRY(efi_call_phys) /* * 0. The function can only be called in Linux kernel. So CS has been @@ -36,10 +38,24 @@ ENTRY(efi_call_phys) * The mapping of lower virtual memory has been created in prolog and * epilog. */ - movl $1f, %edx - subl $__PAGE_OFFSET, %edx - jmp *%edx +#ifdef CONFIG_PAX_KERNEXEC + movl $(__KERNEXEC_EFI_DS), %edx + mov %edx, %ds + mov %edx, %es + mov %edx, %ss + addl $2f,(1f) + ljmp *(1f) + +__INITDATA +1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS +.previous + +2: + subl $2b,(1b) +#else + jmp 1f-__PAGE_OFFSET 1: +#endif /* * 2. Now on the top of stack is the return @@ -47,14 +63,8 @@ ENTRY(efi_call_phys) * parameter 2, ..., param n. To make things easy, we save the return * address of efi_call_phys in a global variable. */ - popl %edx - movl %edx, saved_return_addr - /* get the function pointer into ECX*/ - popl %ecx - movl %ecx, efi_rt_function_ptr - movl $2f, %edx - subl $__PAGE_OFFSET, %edx - pushl %edx + popl (saved_return_addr) + popl (efi_rt_function_ptr) /* * 3. Clear PG bit in %CR0. @@ -73,9 +83,8 @@ ENTRY(efi_call_phys) /* * 5. Call the physical function. */ - jmp *%ecx + call *(efi_rt_function_ptr-__PAGE_OFFSET) -2: /* * 6. After EFI runtime service returns, control will return to * following instruction. We'd better readjust stack pointer first. @@ -88,35 +97,36 @@ ENTRY(efi_call_phys) movl %cr0, %edx orl $0x80000000, %edx movl %edx, %cr0 - jmp 1f -1: + /* * 8. Now restore the virtual mode from flat mode by * adding EIP with PAGE_OFFSET. */ - movl $1f, %edx - jmp *%edx +#ifdef CONFIG_PAX_KERNEXEC + movl $(__KERNEL_DS), %edx + mov %edx, %ds + mov %edx, %es + mov %edx, %ss + ljmp $(__KERNEL_CS),$1f +#else + jmp 1f+__PAGE_OFFSET +#endif 1: /* * 9. Balance the stack. And because EAX contain the return value, * we'd better not clobber it. */ - leal efi_rt_function_ptr, %edx - movl (%edx), %ecx - pushl %ecx + pushl (efi_rt_function_ptr) /* - * 10. Push the saved return address onto the stack and return. + * 10. Return to the saved return address. */ - leal saved_return_addr, %edx - movl (%edx), %ecx - pushl %ecx - ret + jmpl *(saved_return_addr) ENDPROC(efi_call_phys) .previous -.data +__INITDATA saved_return_addr: .long 0 efi_rt_function_ptr: diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S index cd9507594..7771479fd 100644 --- a/arch/x86/platform/efi/efi_stub_64.S +++ b/arch/x86/platform/efi/efi_stub_64.S @@ -11,6 +11,7 @@ #include #include #include +#include #define SAVE_XMM \ mov %rsp, %rax; \ @@ -53,5 +54,9 @@ ENTRY(efi_call) addq $48, %rsp RESTORE_XMM popq %rbp +#ifdef efi_call ret +#else + pax_ret efi_call +#endif ENDPROC(efi_call) diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c index 7850128f0..bcf03ab77 100644 --- a/arch/x86/platform/intel-mid/intel-mid.c +++ b/arch/x86/platform/intel-mid/intel-mid.c @@ -62,9 +62,9 @@ enum intel_mid_timer_options intel_mid_timer_options; /* intel_mid_ops to store sub arch ops */ -static struct intel_mid_ops *intel_mid_ops; +static const struct intel_mid_ops *intel_mid_ops; /* getter function for sub arch ops*/ -static void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT; +static const void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT; enum intel_mid_cpu_type __intel_mid_cpu_chip; EXPORT_SYMBOL_GPL(__intel_mid_cpu_chip); @@ -77,9 +77,10 @@ static void intel_mid_power_off(void) intel_scu_ipc_simple_command(IPCMSG_COLD_OFF, 1); }; -static void intel_mid_reboot(void) +static void __noreturn intel_mid_reboot(void) { intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0); + BUG(); } static unsigned long __init intel_mid_calibrate_tsc(void) diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h index 3c1c3866d..59a68ed4c 100644 --- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h +++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h @@ -13,6 +13,6 @@ /* For every CPU addition a new get__ops interface needs * to be added. */ -extern void *get_penwell_ops(void); -extern void *get_cloverview_ops(void); -extern void *get_tangier_ops(void); +extern const void *get_penwell_ops(void); +extern const void *get_cloverview_ops(void); +extern const void *get_tangier_ops(void); diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c index 1eb47b629..dadfb57d9 100644 --- a/arch/x86/platform/intel-mid/mfld.c +++ b/arch/x86/platform/intel-mid/mfld.c @@ -61,12 +61,12 @@ static void __init penwell_arch_setup(void) pm_power_off = mfld_power_off; } -void *get_penwell_ops(void) +const void *get_penwell_ops(void) { return &penwell_ops; } -void *get_cloverview_ops(void) +const void *get_cloverview_ops(void) { return &penwell_ops; } diff --git a/arch/x86/platform/intel-mid/mrfld.c b/arch/x86/platform/intel-mid/mrfld.c index 59253db41..81bb53462 100644 --- a/arch/x86/platform/intel-mid/mrfld.c +++ b/arch/x86/platform/intel-mid/mrfld.c @@ -94,7 +94,7 @@ static struct intel_mid_ops tangier_ops = { .arch_setup = tangier_arch_setup, }; -void *get_tangier_ops(void) +const void *get_tangier_ops(void) { return &tangier_ops; } diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c index f5bad4093..da1428a9a 100644 --- a/arch/x86/platform/intel-quark/imr_selftest.c +++ b/arch/x86/platform/intel-quark/imr_selftest.c @@ -54,7 +54,7 @@ static void __init imr_self_test_result(int res, const char *fmt, ...) */ static void __init imr_self_test(void) { - phys_addr_t base = virt_to_phys(&_text); + phys_addr_t base = virt_to_phys((void *)ktla_ktva((unsigned long)_text)); size_t size = virt_to_phys(&__end_rodata) - base; const char *fmt_over = "overlapped IMR @ (0x%08lx - 0x%08lx)\n"; int ret; diff --git a/arch/x86/platform/mellanox/mlx-platform.c b/arch/x86/platform/mellanox/mlx-platform.c index c0355d789..dea97c86b 100644 --- a/arch/x86/platform/mellanox/mlx-platform.c +++ b/arch/x86/platform/mellanox/mlx-platform.c @@ -149,7 +149,7 @@ static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi) return 1; }; -static struct dmi_system_id mlxplat_dmi_table[] __initdata = { +static const struct dmi_system_id mlxplat_dmi_table[] __initconst = { { .callback = mlxplat_dmi_default_matched, .matches = { diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c index d6ee92986..045432762 100644 --- a/arch/x86/platform/olpc/olpc_dt.c +++ b/arch/x86/platform/olpc/olpc_dt.c @@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size) return res; } -static struct of_pdt_ops prom_olpc_ops __initdata = { +static const struct of_pdt_ops prom_olpc_ops __initconst = { .nextprop = olpc_dt_nextprop, .getproplen = olpc_dt_getproplen, .getproperty = olpc_dt_getproperty, diff --git a/arch/x86/platform/olpc/xo1-wakeup.S b/arch/x86/platform/olpc/xo1-wakeup.S index 948deb289..16a434a30 100644 --- a/arch/x86/platform/olpc/xo1-wakeup.S +++ b/arch/x86/platform/olpc/xo1-wakeup.S @@ -1,8 +1,10 @@ .text #include +#include #include #include #include +#include .macro writepost,value movb $0x34, %al @@ -76,7 +78,7 @@ save_registers: pushfl popl saved_context_eflags - ret + pax_ret save_registers restore_registers: movl saved_context_ebp, %ebp @@ -87,17 +89,17 @@ restore_registers: pushl saved_context_eflags popfl - ret + pax_ret restore_registers ENTRY(do_olpc_suspend_lowlevel) - call save_processor_state - call save_registers + pax_direct_call save_processor_state + pax_direct_call save_registers # This is the stack context we want to remember movl %esp, saved_context_esp pushl $3 - call xo1_do_sleep + pax_direct_call xo1_do_sleep jmp wakeup_start .p2align 4,,7 @@ -106,9 +108,9 @@ ret_point: writepost 0x32 - call restore_registers - call restore_processor_state - ret + pax_direct_call restore_registers + pax_direct_call restore_processor_state + pax_ret do_olpc_suspend_lowlevel .data saved_gdt: .long 0,0 diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 9e42842e9..fad7e1d64 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -24,7 +24,7 @@ #include #include -static struct bau_operations ops; +static struct bau_operations *ops __read_only; static struct bau_operations uv123_bau_ops = { .bau_gpa_to_offset = uv_gpa_to_offset, @@ -239,7 +239,7 @@ static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp, msg = mdp->msg; if (!msg->canceled && do_acknowledge) { dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec; - ops.write_l_sw_ack(dw); + ops->write_l_sw_ack(dw); } msg->replied_to = 1; msg->swack_vec = 0; @@ -275,7 +275,7 @@ static void bau_process_retry_msg(struct msg_desc *mdp, msg->swack_vec) == 0) && (msg2->sending_cpu == msg->sending_cpu) && (msg2->msg_type != MSG_NOOP)) { - mmr = ops.read_l_sw_ack(); + mmr = ops->read_l_sw_ack(); msg_res = msg2->swack_vec; /* * This is a message retry; clear the resources held @@ -293,7 +293,7 @@ static void bau_process_retry_msg(struct msg_desc *mdp, stat->d_canceled++; cancel_count++; mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res; - ops.write_l_sw_ack(mr); + ops->write_l_sw_ack(mr); } } } @@ -426,12 +426,12 @@ static void do_reset(void *ptr) /* * only reset the resource if it is still pending */ - mmr = ops.read_l_sw_ack(); + mmr = ops->read_l_sw_ack(); msg_res = msg->swack_vec; mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res; if (mmr & msg_res) { stat->d_rcanceled++; - ops.write_l_sw_ack(mr); + ops->write_l_sw_ack(mr); } } } @@ -1221,7 +1221,7 @@ void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp) struct bau_pq_entry *msg = mdp->msg; struct bau_pq_entry *other_msg; - mmr_image = ops.read_l_sw_ack(); + mmr_image = ops->read_l_sw_ack(); swack_vec = msg->swack_vec; if ((swack_vec & mmr_image) == 0) { @@ -1450,7 +1450,7 @@ static int ptc_seq_show(struct seq_file *file, void *data) /* destination side statistics */ seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", - ops.read_g_sw_ack(uv_cpu_to_pnode(cpu)), + ops->read_g_sw_ack(uv_cpu_to_pnode(cpu)), stat->d_requestee, cycles_2_us(stat->d_time), stat->d_alltlb, stat->d_onetlb, stat->d_multmsg, stat->d_nomsg, stat->d_retries, stat->d_canceled, @@ -1744,7 +1744,7 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode) gpa = uv_gpa(bau_desc); n = uv_gpa_to_gnode(gpa); - m = ops.bau_gpa_to_offset(gpa); + m = ops->bau_gpa_to_offset(gpa); if (is_uv1_hub()) uv1 = 1; @@ -1831,8 +1831,8 @@ static void pq_init(int node, int pnode) bcp->queue_last = pqp + (DEST_Q_SIZE - 1); } - first = ops.bau_gpa_to_offset(uv_gpa(pqp)); - last = ops.bau_gpa_to_offset(uv_gpa(pqp + (DEST_Q_SIZE - 1))); + first = ops->bau_gpa_to_offset(uv_gpa(pqp)); + last = ops->bau_gpa_to_offset(uv_gpa(pqp + (DEST_Q_SIZE - 1))); /* * Pre UV4, the gnode is required to locate the payload queue @@ -1846,9 +1846,9 @@ static void pq_init(int node, int pnode) write_mmr_payload_tail(pnode, tail); } - ops.write_payload_first(pnode, first); - ops.write_payload_last(pnode, last); - ops.write_g_sw_ack(pnode, 0xffffUL); + ops->write_payload_first(pnode, first); + ops->write_payload_last(pnode, last); + ops->write_g_sw_ack(pnode, 0xffffUL); /* in effect, all msg_type's are set to MSG_NOOP */ memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE); @@ -2166,13 +2166,13 @@ static int __init uv_bau_init(void) return 0; if (is_uv4_hub()) - ops = uv4_bau_ops; + ops = &uv4_bau_ops; else if (is_uv3_hub()) - ops = uv123_bau_ops; + ops = &uv123_bau_ops; else if (is_uv2_hub()) - ops = uv123_bau_ops; + ops = &uv123_bau_ops; else if (is_uv1_hub()) - ops = uv123_bau_ops; + ops = &uv123_bau_ops; for_each_possible_cpu(cur_cpu) { mask = &per_cpu(uv_flush_tlb_mask, cur_cpu); diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 53cace2ec..ef55ccaae 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -160,11 +160,8 @@ static void do_fpu_end(void) static void fix_processor_context(void) { int cpu = smp_processor_id(); - struct tss_struct *t = &per_cpu(cpu_tss, cpu); -#ifdef CONFIG_X86_64 - struct desc_struct *desc = get_cpu_gdt_table(cpu); - tss_desc tss; -#endif + struct tss_struct *t = cpu_tss + cpu; + set_tss_desc(cpu, t); /* * This just modifies memory; should not be * necessary. But... This is necessary, because @@ -173,10 +170,6 @@ static void fix_processor_context(void) */ #ifdef CONFIG_X86_64 - memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc)); - tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */ - write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS); - syscall_init(); /* This sets MSR_*STAR and related */ #endif load_TR_desc(); /* This does ltr */ @@ -289,9 +282,13 @@ int hibernate_resume_nonboot_cpu_disable(void) * any more at that point (the page tables used by it previously may * have been overwritten by hibernate image data). */ + pax_open_kernel(); smp_ops.play_dead = resume_play_dead; + pax_close_kernel(); ret = disable_nonboot_cpus(); + pax_open_kernel(); smp_ops.play_dead = play_dead; + pax_close_kernel(); return ret; } #endif diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c index 9634557a5..c280eda03 100644 --- a/arch/x86/power/hibernate_64.c +++ b/arch/x86/power/hibernate_64.c @@ -130,15 +130,14 @@ static int relocate_restore_code(void) /* Make the page containing the relocated code executable */ pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code); + set_pgd(pgd, __pgd(pgd_val(*pgd) & ~_PAGE_NX)); pud = pud_offset(pgd, relocated_restore_code); - if (pud_large(*pud)) { - set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX)); - } else { + set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX)); + if (!pud_large(*pud)) { pmd_t *pmd = pmd_offset(pud, relocated_restore_code); - if (pmd_large(*pmd)) { - set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX)); - } else { + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX)); + if (!pmd_large(*pmd)) { pte_t *pte = pte_offset_kernel(pmd, relocated_restore_code); set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX)); @@ -198,7 +197,7 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size) if (max_size < sizeof(struct restore_data_record)) return -EOVERFLOW; rdr->jump_address = (unsigned long)&restore_registers; - rdr->jump_address_phys = __pa_symbol(&restore_registers); + rdr->jump_address_phys = __pa_symbol(rdr->jump_address); rdr->cr3 = restore_cr3; rdr->magic = RESTORE_MAGIC; return 0; diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S index 1d0fa0e24..f18e49812 100644 --- a/arch/x86/power/hibernate_asm_32.S +++ b/arch/x86/power/hibernate_asm_32.S @@ -11,6 +11,7 @@ #include #include #include +#include .text @@ -23,8 +24,8 @@ ENTRY(swsusp_arch_suspend) pushfl popl saved_context_eflags - call swsusp_save - ret + pax_direct_call swsusp_save + pax_ret swsusp_arch_suspend ENTRY(restore_image) movl mmu_cr4_features, %ecx @@ -74,6 +75,7 @@ done: pushl saved_context_eflags popfl + ASM_CLAC /* Saved in save_processor_state. */ movl $saved_context, %eax @@ -81,4 +83,4 @@ done: xorl %eax, %eax - ret + pax_ret restore_image diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S index ce8da3a04..2f20c8320 100644 --- a/arch/x86/power/hibernate_asm_64.S +++ b/arch/x86/power/hibernate_asm_64.S @@ -22,6 +22,7 @@ #include #include #include +#include ENTRY(swsusp_arch_suspend) movq $saved_context, %rax @@ -48,9 +49,9 @@ ENTRY(swsusp_arch_suspend) movq %rax, restore_cr3(%rip) FRAME_BEGIN - call swsusp_save + pax_direct_call swsusp_save FRAME_END - ret + pax_ret swsusp_arch_suspend ENDPROC(swsusp_arch_suspend) ENTRY(restore_image) @@ -133,6 +134,7 @@ ENTRY(restore_registers) movq pt_regs_r15(%rax), %r15 pushq pt_regs_flags(%rax) popfq + ASM_CLAC /* Saved in save_processor_state. */ lgdt saved_context_gdt_desc(%rax) @@ -142,5 +144,5 @@ ENTRY(restore_registers) /* tell the hibernation core that we've just restored the memory */ movq %rax, in_suspend(%rip) - ret + pax_ret restore_registers ENDPROC(restore_registers) diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile index 555b9fa0a..3ae31c35b 100644 --- a/arch/x86/purgatory/Makefile +++ b/arch/x86/purgatory/Makefile @@ -10,6 +10,8 @@ targets += purgatory.ro KCOV_INSTRUMENT := n +GCC_PLUGINS := n + # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That # in turn leaves some undefined symbols like __fentry__ in purgatory and not # sure how to relocate those. Like kexec-tools, use custom flags. diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c index 5db706f14..267f9070c 100644 --- a/arch/x86/realmode/init.c +++ b/arch/x86/realmode/init.c @@ -85,7 +85,13 @@ static void __init setup_real_mode(void) __va(real_mode_header->trampoline_header); #ifdef CONFIG_X86_32 - trampoline_header->start = __pa_symbol(startup_32_smp); + trampoline_header->start = __pa_symbol(ktla_ktva((unsigned long)startup_32_smp)); + +#ifdef CONFIG_PAX_KERNEXEC + trampoline_header->start -= LOAD_PHYSICAL_ADDR; +#endif + + trampoline_header->boot_cs = __BOOT_CS; trampoline_header->gdt_limit = __BOOT_DS + 7; trampoline_header->gdt_base = __pa_symbol(boot_gdt); #else @@ -101,7 +107,7 @@ static void __init setup_real_mode(void) *trampoline_cr4_features = mmu_cr4_features; trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd); - trampoline_pgd[0] = trampoline_pgd_entry.pgd; + trampoline_pgd[0] = trampoline_pgd_entry.pgd & ~_PAGE_NX; trampoline_pgd[511] = init_level4_pgt[511].pgd; #endif } diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile index 25012abc3..424eb9a36 100644 --- a/arch/x86/realmode/rm/Makefile +++ b/arch/x86/realmode/rm/Makefile @@ -8,6 +8,7 @@ # KASAN_SANITIZE := n OBJECT_FILES_NON_STANDARD := y +GCC_PLUGINS := n # Prevents link failures: __sanitizer_cov_trace_pc() is not linked in. KCOV_INSTRUMENT := n diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S index a28221d94..93c40f1b8 100644 --- a/arch/x86/realmode/rm/header.S +++ b/arch/x86/realmode/rm/header.S @@ -30,7 +30,9 @@ GLOBAL(real_mode_header) #endif /* APM/BIOS reboot */ .long pa_machine_real_restart_asm -#ifdef CONFIG_X86_64 +#ifdef CONFIG_X86_32 + .long __KERNEL_CS +#else .long __KERNEL32_CS #endif END(real_mode_header) diff --git a/arch/x86/realmode/rm/reboot.S b/arch/x86/realmode/rm/reboot.S index d66c607bd..3def84565 100644 --- a/arch/x86/realmode/rm/reboot.S +++ b/arch/x86/realmode/rm/reboot.S @@ -27,6 +27,10 @@ ENTRY(machine_real_restart_asm) lgdtl pa_tr_gdt /* Disable paging to drop us out of long mode */ + movl %cr4, %eax + andl $~X86_CR4_PCIDE, %eax + movl %eax, %cr4 + movl %cr0, %eax andl $~X86_CR0_PG, %eax movl %eax, %cr0 diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S index 48ddd76bc..c26749f66 100644 --- a/arch/x86/realmode/rm/trampoline_32.S +++ b/arch/x86/realmode/rm/trampoline_32.S @@ -24,6 +24,12 @@ #include #include "realmode.h" +#ifdef CONFIG_PAX_KERNEXEC +#define ta(X) (X) +#else +#define ta(X) (pa_ ## X) +#endif + .text .code16 @@ -38,8 +44,6 @@ ENTRY(trampoline_start) cli # We should be safe anyway - movl tr_start, %eax # where we need to go - movl $0xA5A5A5A5, trampoline_status # write marker for master knows we're running @@ -55,7 +59,7 @@ ENTRY(trampoline_start) movw $1, %dx # protected mode (PE) bit lmsw %dx # into protected mode - ljmpl $__BOOT_CS, $pa_startup_32 + ljmpl *(trampoline_header) .section ".text32","ax" .code32 @@ -66,7 +70,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S .balign 8 GLOBAL(trampoline_header) tr_start: .space 4 - tr_gdt_pad: .space 2 + tr_boot_cs: .space 2 tr_gdt: .space 6 END(trampoline_header) diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S index dac7b20d2..72dbaca0d 100644 --- a/arch/x86/realmode/rm/trampoline_64.S +++ b/arch/x86/realmode/rm/trampoline_64.S @@ -93,6 +93,7 @@ ENTRY(startup_32) movl %edx, %gs movl pa_tr_cr4, %eax + andl $~X86_CR4_PCIDE, %eax movl %eax, %cr4 # Enable PAE mode # Setup trampoline 4 level pagetables @@ -106,7 +107,7 @@ ENTRY(startup_32) wrmsr # Enable paging and in turn activate Long Mode - movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax + movl $(X86_CR0_PG | X86_CR0_PE), %eax movl %eax, %cr0 /* diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S index 9e7e14797..25a4158e3 100644 --- a/arch/x86/realmode/rm/wakeup_asm.S +++ b/arch/x86/realmode/rm/wakeup_asm.S @@ -126,11 +126,10 @@ ENTRY(wakeup_start) lgdtl pmode_gdt /* This really couldn't... */ - movl pmode_entry, %eax movl pmode_cr0, %ecx movl %ecx, %cr0 - ljmpl $__KERNEL_CS, $pa_startup_32 - /* -> jmp *%eax in trampoline_32.S */ + + ljmpl *pmode_entry #else jmp trampoline_start #endif diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile index 604a37efd..e49702a20 100644 --- a/arch/x86/tools/Makefile +++ b/arch/x86/tools/Makefile @@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c -HOST_EXTRACFLAGS += -I$(srctree)/tools/include +HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb hostprogs-y += relocs relocs-objs := relocs_32.o relocs_64.o relocs_common.o PHONY += relocs diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index 0c2fae8d9..1d2a07981 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c @@ -1,5 +1,7 @@ /* This is included from relocs_32/64.c */ +#include "../../../include/generated/autoconf.h" + #define ElfW(type) _ElfW(ELF_BITS, type) #define _ElfW(bits, type) __ElfW(bits, type) #define __ElfW(bits, type) Elf##bits##_##type @@ -11,6 +13,7 @@ #define Elf_Sym ElfW(Sym) static Elf_Ehdr ehdr; +static Elf_Phdr *phdr; struct relocs { uint32_t *offset; @@ -45,6 +48,7 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = { "^(xen_irq_disable_direct_reloc$|" "xen_save_fl_direct_reloc$|" "VDSO|" + "__rap_hash_|" "__crc_)", /* @@ -386,9 +390,39 @@ static void read_ehdr(FILE *fp) } } +static void read_phdrs(FILE *fp) +{ + unsigned int i; + + phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr)); + if (!phdr) { + die("Unable to allocate %d program headers\n", + ehdr.e_phnum); + } + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) { + die("Seek to %d failed: %s\n", + ehdr.e_phoff, strerror(errno)); + } + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) { + die("Cannot read ELF program headers: %s\n", + strerror(errno)); + } + for(i = 0; i < ehdr.e_phnum; i++) { + phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type); + phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset); + phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr); + phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr); + phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz); + phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz); + phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags); + phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align); + } + +} + static void read_shdrs(FILE *fp) { - int i; + unsigned int i; Elf_Shdr shdr; secs = calloc(ehdr.e_shnum, sizeof(struct section)); @@ -423,7 +457,7 @@ static void read_shdrs(FILE *fp) static void read_strtabs(FILE *fp) { - int i; + unsigned int i; for (i = 0; i < ehdr.e_shnum; i++) { struct section *sec = &secs[i]; if (sec->shdr.sh_type != SHT_STRTAB) { @@ -448,7 +482,7 @@ static void read_strtabs(FILE *fp) static void read_symtabs(FILE *fp) { - int i,j; + unsigned int i,j; for (i = 0; i < ehdr.e_shnum; i++) { struct section *sec = &secs[i]; if (sec->shdr.sh_type != SHT_SYMTAB) { @@ -479,9 +513,11 @@ static void read_symtabs(FILE *fp) } -static void read_relocs(FILE *fp) +static void read_relocs(FILE *fp, int use_real_mode) { - int i,j; + unsigned int i,j; + uint32_t base; + for (i = 0; i < ehdr.e_shnum; i++) { struct section *sec = &secs[i]; if (sec->shdr.sh_type != SHT_REL_TYPE) { @@ -501,9 +537,22 @@ static void read_relocs(FILE *fp) die("Cannot read symbol table: %s\n", strerror(errno)); } + base = 0; + +#ifdef CONFIG_X86_32 + for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) { + if (phdr[j].p_type != PT_LOAD ) + continue; + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz) + continue; + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr; + break; + } +#endif + for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) { Elf_Rel *rel = &sec->reltab[j]; - rel->r_offset = elf_addr_to_cpu(rel->r_offset); + rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base; rel->r_info = elf_xword_to_cpu(rel->r_info); #if (SHT_REL_TYPE == SHT_RELA) rel->r_addend = elf_xword_to_cpu(rel->r_addend); @@ -515,7 +564,7 @@ static void read_relocs(FILE *fp) static void print_absolute_symbols(void) { - int i; + unsigned int i; const char *format; if (ELF_BITS == 64) @@ -528,7 +577,7 @@ static void print_absolute_symbols(void) for (i = 0; i < ehdr.e_shnum; i++) { struct section *sec = &secs[i]; char *sym_strtab; - int j; + unsigned int j; if (sec->shdr.sh_type != SHT_SYMTAB) { continue; @@ -555,7 +604,7 @@ static void print_absolute_symbols(void) static void print_absolute_relocs(void) { - int i, printed = 0; + unsigned int i, printed = 0; const char *format; if (ELF_BITS == 64) @@ -568,7 +617,7 @@ static void print_absolute_relocs(void) struct section *sec_applies, *sec_symtab; char *sym_strtab; Elf_Sym *sh_symtab; - int j; + unsigned int j; if (sec->shdr.sh_type != SHT_REL_TYPE) { continue; } @@ -645,13 +694,13 @@ static void add_reloc(struct relocs *r, uint32_t offset) static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym, const char *symname)) { - int i; + unsigned int i; /* Walk through the relocations */ for (i = 0; i < ehdr.e_shnum; i++) { char *sym_strtab; Elf_Sym *sh_symtab; struct section *sec_applies, *sec_symtab; - int j; + unsigned int j; struct section *sec = &secs[i]; if (sec->shdr.sh_type != SHT_REL_TYPE) { @@ -697,7 +746,7 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel, * kernel data and does not require special treatment. * */ -static int per_cpu_shndx = -1; +static unsigned int per_cpu_shndx = ~0; static Elf_Addr per_cpu_load_addr; static void percpu_init(void) @@ -830,6 +879,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym, { unsigned r_type = ELF32_R_TYPE(rel->r_info); int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname); + char *sym_strtab = sec->link->link->strtab; + + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */ + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load")) + return 0; + +#ifdef CONFIG_PAX_KERNEXEC + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */ + if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext")) + return 0; + if (!strcmp(sec_name(sym->st_shndx), ".init.text")) + return 0; + if (!strcmp(sec_name(sym->st_shndx), ".exit.text")) + return 0; + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR")) + return 0; +#endif switch (r_type) { case R_386_NONE: @@ -968,7 +1034,7 @@ static int write32_as_text(uint32_t v, FILE *f) static void emit_relocs(int as_text, int use_real_mode) { - int i; + unsigned int i; int (*write_reloc)(uint32_t, FILE *) = write32; int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym, const char *symname); @@ -1078,10 +1144,11 @@ void process(FILE *fp, int use_real_mode, int as_text, { regex_init(use_real_mode); read_ehdr(fp); + read_phdrs(fp); read_shdrs(fp); read_strtabs(fp); read_symtabs(fp); - read_relocs(fp); + read_relocs(fp, use_real_mode); if (ELF_BITS == 64) percpu_init(); if (show_absolute_syms) { diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c index 744afdc18..a0b8a0dd0 100644 --- a/arch/x86/um/mem_32.c +++ b/arch/x86/um/mem_32.c @@ -20,7 +20,7 @@ static int __init gate_vma_init(void) gate_vma.vm_start = FIXADDR_USER_START; gate_vma.vm_end = FIXADDR_USER_END; gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; - gate_vma.vm_page_prot = __P101; + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags); return 0; } diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c index 48e38584d..ab4458c19 100644 --- a/arch/x86/um/tls_32.c +++ b/arch/x86/um/tls_32.c @@ -261,7 +261,7 @@ static int get_tls_entry(struct task_struct *task, struct user_desc *info, if (unlikely(task == current && !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) { printk(KERN_ERR "get_tls_entry: task with pid %d got here " - "without flushed TLS.", current->pid); + "without flushed TLS.", task_pid_nr(current)); } return 0; diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig index c7b15f3e2..cc09a659c 100644 --- a/arch/x86/xen/Kconfig +++ b/arch/x86/xen/Kconfig @@ -10,6 +10,7 @@ config XEN select XEN_HAVE_VPMU depends on X86_64 || (X86_32 && X86_PAE) depends on X86_LOCAL_APIC && X86_TSC + depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN help This is the Linux Xen port. Enabling this will allow the kernel to boot in a paravirtualized environment under the diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index bdd855685..4c0b9c7c6 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -134,8 +134,6 @@ EXPORT_SYMBOL_GPL(xen_start_info); struct shared_info xen_dummy_shared_info; -void *xen_initial_gdt; - RESERVE_BRK(shared_info_page_brk, PAGE_SIZE); static int xen_cpu_up_prepare(unsigned int cpu); @@ -596,8 +594,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr) { unsigned long va = dtr->address; unsigned int size = dtr->size + 1; - unsigned pages = DIV_ROUND_UP(size, PAGE_SIZE); - unsigned long frames[pages]; + unsigned long frames[65536 / PAGE_SIZE]; int f; /* @@ -645,8 +642,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr) { unsigned long va = dtr->address; unsigned int size = dtr->size + 1; - unsigned pages = DIV_ROUND_UP(size, PAGE_SIZE); - unsigned long frames[pages]; + unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE]; int f; /* @@ -654,7 +650,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr) * 8-byte entries, or 16 4k pages.. */ - BUG_ON(size > 65536); + BUG_ON(size > GDT_SIZE); BUG_ON(va & ~PAGE_MASK); for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { @@ -783,7 +779,7 @@ static int cvt_gate_to_trap(int vector, const gate_desc *val, * so we should never see them. Warn if * there's an unexpected IST-using fault handler. */ - if (addr == (unsigned long)debug) + if (addr == (unsigned long)int1) addr = (unsigned long)xen_debug; else if (addr == (unsigned long)int3) addr = (unsigned long)xen_int3; @@ -1138,6 +1134,13 @@ void xen_setup_shared_info(void) xen_setup_mfn_list_list(); } +#ifdef CONFIG_PAX_RAP +PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl_direct); +PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl_direct); +PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable_direct); +PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable_direct); +#endif + /* This is called once we have the cpu_possible_mask */ void xen_setup_vcpu_info_placement(void) { @@ -1153,10 +1156,10 @@ void xen_setup_vcpu_info_placement(void) * percpu area for all cpus, so make use of it. Note that for * PVH we want to use native IRQ mechanism. */ if (have_vcpu_info_placement && !xen_pvh_domain()) { - pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct); - pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct); - pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct); - pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct); + pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(save_fl, xen_save_fl_direct); + pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(restore_fl, xen_restore_fl_direct); + pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(irq_disable, xen_irq_disable_direct); + pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(irq_enable, xen_irq_enable_direct); pv_mmu_ops.read_cr2 = xen_read_cr2_direct; } } @@ -1291,7 +1294,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { .end_context_switch = xen_end_context_switch, }; -static void xen_reboot(int reason) +static __noreturn void xen_reboot(int reason) { struct sched_shutdown r = { .reason = reason }; int cpu; @@ -1299,26 +1302,26 @@ static void xen_reboot(int reason) for_each_online_cpu(cpu) xen_pmu_finish(cpu); - if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r)) - BUG(); + HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); + BUG(); } -static void xen_restart(char *msg) +static __noreturn void xen_restart(char *msg) { xen_reboot(SHUTDOWN_reboot); } -static void xen_emergency_restart(void) +static __noreturn void xen_emergency_restart(void) { xen_reboot(SHUTDOWN_reboot); } -static void xen_machine_halt(void) +static __noreturn void xen_machine_halt(void) { xen_reboot(SHUTDOWN_poweroff); } -static void xen_machine_power_off(void) +static __noreturn void xen_machine_power_off(void) { if (pm_power_off) pm_power_off(); @@ -1472,8 +1475,11 @@ static void __ref xen_setup_gdt(int cpu) pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot; pv_cpu_ops.load_gdt = xen_load_gdt_boot; - setup_stack_canary_segment(0); - switch_to_new_gdt(0); + setup_stack_canary_segment(cpu); +#ifdef CONFIG_X86_64 + load_percpu_segment(cpu); +#endif + switch_to_new_gdt(cpu); pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry; pv_cpu_ops.load_gdt = xen_load_gdt; @@ -1606,9 +1612,6 @@ asmlinkage __visible void __init xen_start_kernel(void) */ __userpte_alloc_gfp &= ~__GFP_HIGHMEM; - /* Work out if we support NX */ - x86_configure_nx(); - /* Get mfn list */ xen_build_dynamic_phys_to_machine(); @@ -1618,6 +1621,19 @@ asmlinkage __visible void __init xen_start_kernel(void) */ xen_setup_gdt(0); + /* Work out if we support NX */ +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 && + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) { + unsigned l, h; + + __supported_pte_mask |= _PAGE_NX; + rdmsr(MSR_EFER, l, h); + l |= EFER_NX; + wrmsr(MSR_EFER, l, h); + } +#endif + xen_init_irq_ops(); xen_init_cpuid_mask(); @@ -1635,13 +1651,6 @@ asmlinkage __visible void __init xen_start_kernel(void) machine_ops = xen_machine_ops; - /* - * The only reliable way to retain the initial address of the - * percpu gdt_page is to remember it here, so we can go and - * mark it RW later, when the initial percpu area is freed. - */ - xen_initial_gdt = &per_cpu(gdt_page, 0); - xen_smp_init(); #ifdef CONFIG_ACPI_NUMA diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c index 33e92955e..7af61babe 100644 --- a/arch/x86/xen/irq.c +++ b/arch/x86/xen/irq.c @@ -116,10 +116,10 @@ static void xen_halt(void) } static const struct pv_irq_ops xen_irq_ops __initconst = { - .save_fl = PV_CALLEE_SAVE(xen_save_fl), - .restore_fl = PV_CALLEE_SAVE(xen_restore_fl), - .irq_disable = PV_CALLEE_SAVE(xen_irq_disable), - .irq_enable = PV_CALLEE_SAVE(xen_irq_enable), + .save_fl = PV_CALLEE_SAVE(save_fl, xen_save_fl), + .restore_fl = PV_CALLEE_SAVE(restore_fl, xen_restore_fl), + .irq_disable = PV_CALLEE_SAVE(irq_disable, xen_irq_disable), + .irq_enable = PV_CALLEE_SAVE(irq_enable, xen_irq_enable), .safe_halt = xen_safe_halt, .halt = xen_halt, diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 7d5afdb41..71fb27ca8 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -64,6 +64,7 @@ #include #include #include +#include #include #include @@ -1940,7 +1941,14 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) * L3_k[511] -> level2_fixmap_pgt */ convert_pfn_mfn(level3_kernel_pgt); + convert_pfn_mfn(level3_vmalloc_start_pgt[0]); + convert_pfn_mfn(level3_vmalloc_start_pgt[1]); + convert_pfn_mfn(level3_vmalloc_start_pgt[2]); + convert_pfn_mfn(level3_vmalloc_start_pgt[3]); + convert_pfn_mfn(level3_vmalloc_end_pgt); + convert_pfn_mfn(level3_vmemmap_pgt); /* L3_k[511][506] -> level1_fixmap_pgt */ + /* L3_k[511][507] -> level1_vsyscall_pgt */ convert_pfn_mfn(level2_fixmap_pgt); } /* We get [511][511] and have Xen's version of level2_kernel_pgt */ @@ -1970,11 +1978,25 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); + set_page_prot(level3_vmalloc_start_pgt[0], PAGE_KERNEL_RO); + set_page_prot(level3_vmalloc_start_pgt[1], PAGE_KERNEL_RO); + set_page_prot(level3_vmalloc_start_pgt[2], PAGE_KERNEL_RO); + set_page_prot(level3_vmalloc_start_pgt[3], PAGE_KERNEL_RO); + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO); + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO); set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO); set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO); + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO); set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); - set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO); + set_page_prot(level1_modules_pgt[0], PAGE_KERNEL_RO); + set_page_prot(level1_modules_pgt[1], PAGE_KERNEL_RO); + set_page_prot(level1_modules_pgt[2], PAGE_KERNEL_RO); + set_page_prot(level1_modules_pgt[3], PAGE_KERNEL_RO); + set_page_prot(level1_fixmap_pgt[0], PAGE_KERNEL_RO); + set_page_prot(level1_fixmap_pgt[1], PAGE_KERNEL_RO); + set_page_prot(level1_fixmap_pgt[2], PAGE_KERNEL_RO); + set_page_prot(level1_vsyscall_pgt, PAGE_KERNEL_RO); /* Pin down new L4 */ pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, @@ -2385,6 +2407,7 @@ static void __init xen_post_allocator_init(void) pv_mmu_ops.set_pud = xen_set_pud; #if CONFIG_PGTABLE_LEVELS == 4 pv_mmu_ops.set_pgd = xen_set_pgd; + pv_mmu_ops.set_pgd_batched = xen_set_pgd; #endif /* This will work as long as patching hasn't happened yet @@ -2397,7 +2420,7 @@ static void __init xen_post_allocator_init(void) pv_mmu_ops.alloc_pud = xen_alloc_pud; pv_mmu_ops.release_pud = xen_release_pud; #endif - pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte); + pv_mmu_ops.make_pte = PV_CALLEE_SAVE(make_pte, xen_make_pte); #ifdef CONFIG_X86_64 pv_mmu_ops.write_cr3 = &xen_write_cr3; @@ -2414,6 +2437,10 @@ static void xen_leave_lazy_mmu(void) preempt_enable(); } +static void xen_pte_update(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ +} + static const struct pv_mmu_ops xen_mmu_ops __initconst = { .read_cr2 = xen_read_cr2, .write_cr2 = xen_write_cr2, @@ -2426,7 +2453,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { .flush_tlb_single = xen_flush_tlb_single, .flush_tlb_others = xen_flush_tlb_others, - .pte_update = paravirt_nop, + .pte_update = xen_pte_update, .pgd_alloc = xen_pgd_alloc, .pgd_free = xen_pgd_free, @@ -2443,11 +2470,11 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { .ptep_modify_prot_start = __ptep_modify_prot_start, .ptep_modify_prot_commit = __ptep_modify_prot_commit, - .pte_val = PV_CALLEE_SAVE(xen_pte_val), - .pgd_val = PV_CALLEE_SAVE(xen_pgd_val), + .pte_val = PV_CALLEE_SAVE(pte_val, xen_pte_val), + .pgd_val = PV_CALLEE_SAVE(pgd_val, xen_pgd_val), - .make_pte = PV_CALLEE_SAVE(xen_make_pte_init), - .make_pgd = PV_CALLEE_SAVE(xen_make_pgd), + .make_pte = PV_CALLEE_SAVE(make_pte, xen_make_pte_init), + .make_pgd = PV_CALLEE_SAVE(make_pgd, xen_make_pgd), #ifdef CONFIG_X86_PAE .set_pte_atomic = xen_set_pte_atomic, @@ -2456,13 +2483,14 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { #endif /* CONFIG_X86_PAE */ .set_pud = xen_set_pud_hyper, - .make_pmd = PV_CALLEE_SAVE(xen_make_pmd), - .pmd_val = PV_CALLEE_SAVE(xen_pmd_val), + .make_pmd = PV_CALLEE_SAVE(make_pmd, xen_make_pmd), + .pmd_val = PV_CALLEE_SAVE(pmd_val, xen_pmd_val), #if CONFIG_PGTABLE_LEVELS == 4 - .pud_val = PV_CALLEE_SAVE(xen_pud_val), - .make_pud = PV_CALLEE_SAVE(xen_make_pud), + .pud_val = PV_CALLEE_SAVE(pud_val, xen_pud_val), + .make_pud = PV_CALLEE_SAVE(make_pud, xen_make_pud), .set_pgd = xen_set_pgd_hyper, + .set_pgd_batched = xen_set_pgd_hyper, .alloc_pud = xen_alloc_pmd_init, .release_pud = xen_release_pmd_init, diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c index b9fc52556..1368a4d6c 100644 --- a/arch/x86/xen/pmu.c +++ b/arch/x86/xen/pmu.c @@ -444,6 +444,7 @@ static unsigned long xen_get_guest_ip(void) return 0; } + // TODO: adjust with the segment base return xenpmu_data->pmu.r.regs.ip; } diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 311acad7d..89be7c829 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -306,17 +306,13 @@ static void __init xen_smp_prepare_boot_cpu(void) if (xen_pv_domain()) { if (!xen_feature(XENFEAT_writable_page_tables)) - /* We've switched to the "real" per-cpu gdt, so make - * sure the old memory can be recycled. */ - make_lowmem_page_readwrite(xen_initial_gdt); - #ifdef CONFIG_X86_32 /* * Xen starts us with XEN_FLAT_RING1_DS, but linux code * expects __USER_DS */ - loadsegment(ds, __USER_DS); - loadsegment(es, __USER_DS); + loadsegment(ds, __KERNEL_DS); + loadsegment(es, __KERNEL_DS); #endif xen_filter_cpu_maps(); @@ -406,7 +402,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) #ifdef CONFIG_X86_32 /* Note: PVH is not yet supported on x86_32. */ ctxt->user_regs.fs = __KERNEL_PERCPU; - ctxt->user_regs.gs = __KERNEL_STACK_CANARY; + savesegment(gs, ctxt->user_regs.gs); #endif memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); @@ -414,8 +410,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; ctxt->flags = VGCF_IN_KERNEL; ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ - ctxt->user_regs.ds = __USER_DS; - ctxt->user_regs.es = __USER_DS; + ctxt->user_regs.ds = __KERNEL_DS; + ctxt->user_regs.es = __KERNEL_DS; ctxt->user_regs.ss = __KERNEL_DS; xen_copy_trap_info(ctxt->trap_ctxt); @@ -751,7 +747,7 @@ static const struct smp_ops xen_smp_ops __initconst = { void __init xen_smp_init(void) { - smp_ops = xen_smp_ops; + memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops); xen_fill_possible_map(); } diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 3d6e0064c..021e1bde1 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -134,7 +134,7 @@ void __init xen_init_spinlocks(void) __pv_init_lock_hash(); pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; - pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); + pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(queued_spin_unlock, __pv_queued_spin_unlock); pv_lock_ops.wait = xen_qlock_wait; pv_lock_ops.kick = xen_qlock_kick; } diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S index eff224df8..ab792d24c 100644 --- a/arch/x86/xen/xen-asm.S +++ b/arch/x86/xen/xen-asm.S @@ -15,6 +15,7 @@ #include #include #include +#include #include "xen-asm.h" @@ -23,7 +24,7 @@ * event status with one and operation. If there are pending events, * then enter the hypervisor to get them handled. */ -ENTRY(xen_irq_enable_direct) +RAP_ENTRY(xen_irq_enable_direct) FRAME_BEGIN /* Unmask events */ movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask @@ -42,7 +43,7 @@ ENTRY(xen_irq_enable_direct) 1: ENDPATCH(xen_irq_enable_direct) FRAME_END - ret + pax_ret xen_irq_enable_direct ENDPROC(xen_irq_enable_direct) RELOC(xen_irq_enable_direct, 2b+1) @@ -51,10 +52,10 @@ ENDPATCH(xen_irq_enable_direct) * Disabling events is simply a matter of making the event mask * non-zero. */ -ENTRY(xen_irq_disable_direct) +RAP_ENTRY(xen_irq_disable_direct) movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask ENDPATCH(xen_irq_disable_direct) - ret + pax_ret xen_irq_disable_direct ENDPROC(xen_irq_disable_direct) RELOC(xen_irq_disable_direct, 0) @@ -67,12 +68,12 @@ ENDPATCH(xen_irq_disable_direct) * undefined. We need to toggle the state of the bit, because Xen and * x86 use opposite senses (mask vs enable). */ -ENTRY(xen_save_fl_direct) +RAP_ENTRY(xen_save_fl_direct) testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask setz %ah addb %ah, %ah ENDPATCH(xen_save_fl_direct) - ret + pax_ret xen_save_fl_direct ENDPROC(xen_save_fl_direct) RELOC(xen_save_fl_direct, 0) @@ -84,7 +85,7 @@ ENDPATCH(xen_save_fl_direct) * interrupt mask state, it checks for unmasked pending events and * enters the hypervisor to get them delivered if so. */ -ENTRY(xen_restore_fl_direct) +RAP_ENTRY(xen_restore_fl_direct) FRAME_BEGIN #ifdef CONFIG_X86_64 testw $X86_EFLAGS_IF, %di @@ -105,7 +106,7 @@ ENTRY(xen_restore_fl_direct) 1: ENDPATCH(xen_restore_fl_direct) FRAME_END - ret + pax_ret xen_restore_fl_direct ENDPROC(xen_restore_fl_direct) RELOC(xen_restore_fl_direct, 2b+1) @@ -120,7 +121,7 @@ ENTRY(check_events) push %eax push %ecx push %edx - call xen_force_evtchn_callback + pax_direct_call xen_force_evtchn_callback pop %edx pop %ecx pop %eax @@ -134,7 +135,7 @@ ENTRY(check_events) push %r9 push %r10 push %r11 - call xen_force_evtchn_callback + pax_direct_call xen_force_evtchn_callback pop %r11 pop %r10 pop %r9 @@ -146,5 +147,5 @@ ENTRY(check_events) pop %rax #endif FRAME_END - ret + pax_ret check_events ENDPROC(check_events) diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S index feb6d40a0..c8fd8e7b4 100644 --- a/arch/x86/xen/xen-asm_32.S +++ b/arch/x86/xen/xen-asm_32.S @@ -28,7 +28,7 @@ check_events: push %eax push %ecx push %edx - call xen_force_evtchn_callback + pax_direct_call xen_force_evtchn_callback pop %edx pop %ecx pop %eax @@ -85,7 +85,7 @@ ENTRY(xen_iret) pushw %fs movl $(__KERNEL_PERCPU), %eax movl %eax, %fs - movl %fs:xen_vcpu, %eax + mov PER_CPU_VAR(xen_vcpu), %eax POP_FS #else movl %ss:xen_vcpu, %eax diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S index 7f8d8abf4..3032b77c1 100644 --- a/arch/x86/xen/xen-head.S +++ b/arch/x86/xen/xen-head.S @@ -8,6 +8,7 @@ #include #include +#include #include #include @@ -50,6 +51,18 @@ ENTRY(startup_xen) mov %_ASM_SI, xen_start_info mov $init_thread_union+THREAD_SIZE, %_ASM_SP +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) + movl $cpu_gdt_table,%edi + movl $__per_cpu_load,%eax + movw %ax,__KERNEL_PERCPU + 2(%edi) + rorl $16,%eax + movb %al,__KERNEL_PERCPU + 4(%edi) + movb %ah,__KERNEL_PERCPU + 7(%edi) + movl $__per_cpu_end - 1,%eax + subl $__per_cpu_start,%eax + movw %ax,__KERNEL_PERCPU + 0(%edi) +#endif + jmp xen_start_kernel __FINIT @@ -85,7 +98,7 @@ ENTRY(xen_pvh_early_cpu_init) cmp $0, %r11b jne cpu_bringup_and_idle #endif - ret + pax_ret xen_pvh_early_cpu_init #endif /* CONFIG_XEN_PVH */ diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 3cbce3b08..f1221bcab 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -16,8 +16,6 @@ void xen_syscall_target(void); void xen_syscall32_target(void); #endif -extern void *xen_initial_gdt; - struct trap_info; void xen_copy_trap_info(struct trap_info *traps); diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h index 525bd3d90..ef888b1c4 100644 --- a/arch/xtensa/variants/dc232b/include/variant/core.h +++ b/arch/xtensa/variants/dc232b/include/variant/core.h @@ -119,9 +119,9 @@ ----------------------------------------------------------------------*/ #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */ -#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */ #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */ #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */ +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */ #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */ #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */ diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h index 2f337605c..835e50a88 100644 --- a/arch/xtensa/variants/fsf/include/variant/core.h +++ b/arch/xtensa/variants/fsf/include/variant/core.h @@ -11,6 +11,7 @@ #ifndef _XTENSA_CORE_H #define _XTENSA_CORE_H +#include /**************************************************************************** Parameters Useful for Any Code, USER or PRIVILEGED @@ -112,9 +113,9 @@ ----------------------------------------------------------------------*/ #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */ -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */ #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */ #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */ +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */ #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */ #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */ diff --git a/block/bio.c b/block/bio.c index db85c5753..104443217 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1145,7 +1145,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q, /* * Overflow, abort */ - if (end < start) + if (end < start || end - start > INT_MAX - nr_pages) return ERR_PTR(-EINVAL); nr_pages += end - start; @@ -1270,7 +1270,7 @@ struct bio *bio_map_user_iov(struct request_queue *q, /* * Overflow, abort */ - if (end < start) + if (end < start || end - start > INT_MAX - nr_pages) return ERR_PTR(-EINVAL); nr_pages += end - start; @@ -1778,7 +1778,7 @@ EXPORT_SYMBOL(bio_endio); * to @bio's bi_io_vec; it is the caller's responsibility to ensure that * @bio is not freed before the split. */ -struct bio *bio_split(struct bio *bio, int sectors, +struct bio *bio_split(struct bio *bio, unsigned int sectors, gfp_t gfp, struct bio_set *bs) { struct bio *split = NULL; diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index b08ccbb93..87fe4924f 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -561,10 +561,10 @@ u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, for (i = 0; i < BLKG_RWSTAT_NR; i++) seq_printf(sf, "%s %s %llu\n", dname, rwstr[i], - (unsigned long long)atomic64_read(&rwstat->aux_cnt[i])); + (unsigned long long)atomic64_read_unchecked(&rwstat->aux_cnt[i])); - v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) + - atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]); + v = atomic64_read_unchecked(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) + + atomic64_read_unchecked(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]); seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v); return v; } @@ -716,7 +716,7 @@ u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg, else stat = (void *)blkg + off; - sum += blkg_stat_read(stat) + atomic64_read(&stat->aux_cnt); + sum += blkg_stat_read(stat) + atomic64_read_unchecked(&stat->aux_cnt); } rcu_read_unlock(); @@ -760,7 +760,7 @@ struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, rwstat = (void *)pos_blkg + off; for (i = 0; i < BLKG_RWSTAT_NR; i++) - atomic64_add(atomic64_read(&rwstat->aux_cnt[i]) + + atomic64_add_unchecked(atomic64_read_unchecked(&rwstat->aux_cnt[i]) + percpu_counter_sum_positive(&rwstat->cpu_cnt[i]), &sum.aux_cnt[i]); } @@ -886,13 +886,13 @@ static int blkcg_print_stat(struct seq_file *sf, void *v) rwstat = blkg_rwstat_recursive_sum(blkg, NULL, offsetof(struct blkcg_gq, stat_bytes)); - rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]); - wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]); + rbytes = atomic64_read_unchecked(&rwstat.aux_cnt[BLKG_RWSTAT_READ]); + wbytes = atomic64_read_unchecked(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]); rwstat = blkg_rwstat_recursive_sum(blkg, NULL, offsetof(struct blkcg_gq, stat_ios)); - rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]); - wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]); + rios = atomic64_read_unchecked(&rwstat.aux_cnt[BLKG_RWSTAT_READ]); + wios = atomic64_read_unchecked(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]); spin_unlock_irq(blkg->q->queue_lock); diff --git a/block/blk-core.c b/block/blk-core.c index 14d7c0740..fd7b83206 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -3539,8 +3539,11 @@ int __init blk_dev_init(void) if (!kblockd_workqueue) panic("Failed to create kblockd\n"); - request_cachep = kmem_cache_create("blkdev_requests", - sizeof(struct request), 0, SLAB_PANIC, NULL); + request_cachep = kmem_cache_create_usercopy("blkdev_requests", + sizeof(struct request), 0, SLAB_PANIC, + offsetof(struct request, __cmd), + sizeof(((struct request *)0)->__cmd), + NULL); blk_requestq_cachep = kmem_cache_create("request_queue", sizeof(struct request_queue), 0, SLAB_PANIC, NULL); diff --git a/block/blk-map.c b/block/blk-map.c index 27fd8d928..c03179e21 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -223,7 +223,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, if (!len || !kbuf) return -EINVAL; - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf); if (do_copy) bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); else diff --git a/block/blk-softirq.c b/block/blk-softirq.c index 06cf9807f..3eb814a6d 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c @@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done); * Softirq action handler - move entries to local list and loop over them * while passing them to the queue registered handler. */ -static __latent_entropy void blk_done_softirq(struct softirq_action *h) +static __latent_entropy void blk_done_softirq(void) { struct list_head *cpu_list, local_list; diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 3ab680777..e3e68f0a4 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -1965,8 +1965,8 @@ static u64 cfqg_prfill_sectors_recursive(struct seq_file *sf, { struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL, offsetof(struct blkcg_gq, stat_bytes)); - u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) + - atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]); + u64 sum = atomic64_read_unchecked(&tmp.aux_cnt[BLKG_RWSTAT_READ]) + + atomic64_read_unchecked(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]); return __blkg_prfill_u64(sf, pd, sum >> 9); } diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c index 556826ac7..4e7c5fd7f 100644 --- a/block/compat_ioctl.c +++ b/block/compat_ioctl.c @@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode, cgc = compat_alloc_user_space(sizeof(*cgc)); cgc32 = compat_ptr(arg); - if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) || + if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) || get_user(data, &cgc32->buffer) || put_user(compat_ptr(data), &cgc->buffer) || copy_in_user(&cgc->buflen, &cgc32->buflen, @@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode, err |= __get_user(f->spec1, &uf->spec1); err |= __get_user(f->fmt_gap, &uf->fmt_gap); err |= __get_user(name, &uf->name); - f->name = compat_ptr(name); + f->name = (void __force_kernel *)compat_ptr(name); if (err) { err = -EFAULT; goto out; diff --git a/block/genhd.c b/block/genhd.c index fcd6d4fae..96e433b40 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -471,21 +471,24 @@ static char *bdevt_str(dev_t devt, char *buf) /* * Register device numbers dev..(dev+range-1) - * range must be nonzero + * Noop if @range is zero. * The hash chain is sorted on range, so that subranges can override. */ void blk_register_region(dev_t devt, unsigned long range, struct module *module, struct kobject *(*probe)(dev_t, int *, void *), int (*lock)(dev_t, void *), void *data) { - kobj_map(bdev_map, devt, range, module, probe, lock, data); + if (range) + kobj_map(bdev_map, devt, range, module, probe, lock, data); } EXPORT_SYMBOL(blk_register_region); +/* undo blk_register_region(), noop if @range is zero */ void blk_unregister_region(dev_t devt, unsigned long range) { - kobj_unmap(bdev_map, devt, range); + if (range) + kobj_unmap(bdev_map, devt, range); } EXPORT_SYMBOL(blk_unregister_region); diff --git a/block/partitions/efi.c b/block/partitions/efi.c index bcd86e5cd..fe457ef43 100644 --- a/block/partitions/efi.c +++ b/block/partitions/efi.c @@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state, if (!gpt) return NULL; - count = le32_to_cpu(gpt->num_partition_entries) * - le32_to_cpu(gpt->sizeof_partition_entry); - if (!count) + if (!le32_to_cpu(gpt->num_partition_entries)) return NULL; - pte = kmalloc(count, GFP_KERNEL); + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL); if (!pte) return NULL; + count = le32_to_cpu(gpt->num_partition_entries) * + le32_to_cpu(gpt->sizeof_partition_entry); if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba), (u8 *) pte, count) < count) { kfree(pte); diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 077479994..a0012eab9 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p) return put_user(0, p); } -static int sg_get_timeout(struct request_queue *q) +static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q) { return jiffies_to_clock_t(q->sg_timeout); } diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c index 058c8d755..55229ddb0 100644 --- a/crypto/cast6_generic.c +++ b/crypto/cast6_generic.c @@ -181,8 +181,9 @@ static inline void QBAR(u32 *block, u8 *Kr, u32 *Km) block[2] ^= F1(block[3], Kr[0], Km[0]); } -void __cast6_encrypt(struct cast6_ctx *c, u8 *outbuf, const u8 *inbuf) +void __cast6_encrypt(void *_c, u8 *outbuf, const u8 *inbuf) { + struct cast6_ctx *c = _c; const __be32 *src = (const __be32 *)inbuf; __be32 *dst = (__be32 *)outbuf; u32 block[4]; @@ -219,8 +220,9 @@ static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) __cast6_encrypt(crypto_tfm_ctx(tfm), outbuf, inbuf); } -void __cast6_decrypt(struct cast6_ctx *c, u8 *outbuf, const u8 *inbuf) +void __cast6_decrypt(void *_c, u8 *outbuf, const u8 *inbuf) { + struct cast6_ctx *c = _c; const __be32 *src = (const __be32 *)inbuf; __be32 *dst = (__be32 *)outbuf; u32 block[4]; diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 0c654e59f..cf01e3e8f 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -65,7 +65,7 @@ struct cryptd_blkcipher_ctx { struct cryptd_blkcipher_request_ctx { crypto_completion_t complete; -}; +} __no_const; struct cryptd_hash_ctx { atomic_t refcnt; @@ -84,7 +84,7 @@ struct cryptd_aead_ctx { struct cryptd_aead_request_ctx { crypto_completion_t complete; -}; +} __no_const; static void cryptd_queue_worker(struct work_struct *work); diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index 1c5705481..e0299350c 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c @@ -490,7 +490,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) dump_alloc += CRYPTO_REPORT_MAXSIZE; { - struct netlink_dump_control c = { + netlink_dump_control_no_const c = { .dump = link->dump, .done = link->done, .min_dump_alloc = dump_alloc, diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index ee9cfb99f..30b36ed13 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -392,7 +392,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name) int ret; pinst->kobj.kset = pcrypt_kset; - ret = kobject_add(&pinst->kobj, NULL, name); + ret = kobject_add(&pinst->kobj, NULL, "%s", name); if (!ret) kobject_uevent(&pinst->kobj, KOBJ_ADD); diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c index f550b5d94..8488bebe8 100644 --- a/crypto/salsa20_generic.c +++ b/crypto/salsa20_generic.c @@ -104,7 +104,7 @@ static void salsa20_wordtobyte(u8 output[64], const u32 input[16]) static const char sigma[16] = "expand 32-byte k"; static const char tau[16] = "expand 16-byte k"; -static void salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k, u32 kbytes) +static void __salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k, u32 kbytes) { const char *constants; @@ -128,7 +128,7 @@ static void salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k, u32 kbytes) ctx->input[15] = U8TO32_LITTLE(constants + 12); } -static void salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv) +static void __salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv) { ctx->input[6] = U8TO32_LITTLE(iv + 0); ctx->input[7] = U8TO32_LITTLE(iv + 4); @@ -136,7 +136,7 @@ static void salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv) ctx->input[9] = 0; } -static void salsa20_encrypt_bytes(struct salsa20_ctx *ctx, u8 *dst, +static void __salsa20_encrypt_bytes(struct salsa20_ctx *ctx, u8 *dst, const u8 *src, unsigned int bytes) { u8 buf[64]; @@ -170,7 +170,7 @@ static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keysize) { struct salsa20_ctx *ctx = crypto_tfm_ctx(tfm); - salsa20_keysetup(ctx, key, keysize); + __salsa20_keysetup(ctx, key, keysize); return 0; } @@ -186,24 +186,24 @@ static int encrypt(struct blkcipher_desc *desc, blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt_block(desc, &walk, 64); - salsa20_ivsetup(ctx, walk.iv); + __salsa20_ivsetup(ctx, walk.iv); if (likely(walk.nbytes == nbytes)) { - salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, + __salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes); return blkcipher_walk_done(desc, &walk, 0); } while (walk.nbytes >= 64) { - salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, + __salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes - (walk.nbytes % 64)); err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64); } if (walk.nbytes) { - salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, + __salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes); err = blkcipher_walk_done(desc, &walk, 0); } diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index c16c94f88..8ab5bf8ba 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c @@ -62,14 +62,20 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, { struct scatter_walk walk; struct scatterlist tmp[2]; + void *realbuf = buf; if (!nbytes) return; sg = scatterwalk_ffwd(tmp, sg, start); +#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW + if (object_starts_on_stack(buf)) + realbuf = buf - current->stack + current->lowmem_stack; +#endif + scatterwalk_start(&walk, sg); - scatterwalk_copychunks(buf, &walk, nbytes, out); + scatterwalk_copychunks(realbuf, &walk, nbytes, out); scatterwalk_done(&walk, out, 0); } EXPORT_SYMBOL_GPL(scatterwalk_map_and_copy); diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c index 94970a794..f0c8d2670 100644 --- a/crypto/serpent_generic.c +++ b/crypto/serpent_generic.c @@ -442,8 +442,9 @@ int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) } EXPORT_SYMBOL_GPL(serpent_setkey); -void __serpent_encrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src) +void __serpent_encrypt(void *_ctx, u8 *dst, const u8 *src) { + struct serpent_ctx *ctx = _ctx; const u32 *k = ctx->expkey; const __le32 *s = (const __le32 *)src; __le32 *d = (__le32 *)dst; @@ -507,8 +508,9 @@ static void serpent_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) __serpent_encrypt(ctx, dst, src); } -void __serpent_decrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src) +void __serpent_decrypt(void *_ctx, u8 *dst, const u8 *src) { + struct serpent_ctx *ctx = _ctx; const u32 *k = ctx->expkey; const __le32 *s = (const __le32 *)src; __le32 *d = (__le32 *)dst; diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index f71b756b0..b96847cdf 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -70,7 +70,7 @@ static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); #ifdef CONFIG_ACPI_PROCFS_POWER extern struct proc_dir_entry *acpi_lock_ac_dir(void); -extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir); +extern void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir); static int acpi_ac_open_fs(struct inode *inode, struct file *file); #endif diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index c5557d070..8ece62499 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -406,7 +406,7 @@ static int video_set_report_key_events(const struct dmi_system_id *id) return 0; } -static struct dmi_system_id video_dmi_table[] = { +static const struct dmi_system_id video_dmi_table[] = { /* * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 */ diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h index 0a1b53c9e..2349b23e8 100644 --- a/drivers/acpi/acpica/acutils.h +++ b/drivers/acpi/acpica/acutils.h @@ -289,7 +289,7 @@ void acpi_ut_init_stack_ptr_trace(void); void acpi_ut_track_stack_ptr(void); -void +__nocapture(2) void acpi_ut_trace(u32 line_number, const char *function_name, const char *module_name, u32 component_id); diff --git a/drivers/acpi/acpica/dbhistry.c b/drivers/acpi/acpica/dbhistry.c index 46bd65d38..ec9da4830 100644 --- a/drivers/acpi/acpica/dbhistry.c +++ b/drivers/acpi/acpica/dbhistry.c @@ -155,7 +155,7 @@ void acpi_db_display_history(void) for (i = 0; i < acpi_gbl_num_history; i++) { if (acpi_gbl_history_buffer[history_index].command) { - acpi_os_printf("%3ld %s\n", + acpi_os_printf("%3u %s\n", acpi_gbl_history_buffer[history_index]. cmd_num, acpi_gbl_history_buffer[history_index]. diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c index 068214f9c..43be06bdb 100644 --- a/drivers/acpi/acpica/dbinput.c +++ b/drivers/acpi/acpica/dbinput.c @@ -608,7 +608,7 @@ static u32 acpi_db_get_line(char *input_buffer) (acpi_gbl_db_parsed_buf, sizeof(acpi_gbl_db_parsed_buf), input_buffer)) { acpi_os_printf - ("Buffer overflow while parsing input line (max %u characters)\n", + ("Buffer overflow while parsing input line (max %lu characters)\n", sizeof(acpi_gbl_db_parsed_buf)); return (0); } @@ -864,24 +864,24 @@ acpi_db_command_dispatch(char *input_buffer, if (param_count == 0) { acpi_os_printf - ("Current debug level for file output is: %8.8lX\n", + ("Current debug level for file output is: %8.8X\n", acpi_gbl_db_debug_level); acpi_os_printf - ("Current debug level for console output is: %8.8lX\n", + ("Current debug level for console output is: %8.8X\n", acpi_gbl_db_console_debug_level); } else if (param_count == 2) { temp = acpi_gbl_db_console_debug_level; acpi_gbl_db_console_debug_level = strtoul(acpi_gbl_db_args[1], NULL, 16); acpi_os_printf - ("Debug Level for console output was %8.8lX, now %8.8lX\n", + ("Debug Level for console output was %8.8X, now %8.8X\n", temp, acpi_gbl_db_console_debug_level); } else { temp = acpi_gbl_db_debug_level; acpi_gbl_db_debug_level = strtoul(acpi_gbl_db_args[1], NULL, 16); acpi_os_printf - ("Debug Level for file output was %8.8lX, now %8.8lX\n", + ("Debug Level for file output was %8.8X, now %8.8X\n", temp, acpi_gbl_db_debug_level); } break; diff --git a/drivers/acpi/acpica/dbstats.c b/drivers/acpi/acpica/dbstats.c index a414e1fa6..de7023024 100644 --- a/drivers/acpi/acpica/dbstats.c +++ b/drivers/acpi/acpica/dbstats.c @@ -377,17 +377,17 @@ acpi_status acpi_db_display_statistics(char *type_arg) "ACPI_TYPE", "NODES", "OBJECTS"); for (i = 0; i < ACPI_TYPE_NS_NODE_MAX; i++) { - acpi_os_printf("%16.16s % 10ld% 10ld\n", + acpi_os_printf("%16.16s % 10d% 10d\n", acpi_ut_get_type_name(i), acpi_gbl_node_type_count[i], acpi_gbl_obj_type_count[i]); } - acpi_os_printf("%16.16s % 10ld% 10ld\n", "Misc/Unknown", + acpi_os_printf("%16.16s % 10d% 10d\n", "Misc/Unknown", acpi_gbl_node_type_count_misc, acpi_gbl_obj_type_count_misc); - acpi_os_printf("%16.16s % 10ld% 10ld\n", "TOTALS:", + acpi_os_printf("%16.16s % 10d% 10d\n", "TOTALS:", acpi_gbl_num_nodes, acpi_gbl_num_objects); break; @@ -415,16 +415,16 @@ acpi_status acpi_db_display_statistics(char *type_arg) case CMD_STAT_MISC: acpi_os_printf("\nMiscellaneous Statistics:\n\n"); - acpi_os_printf("Calls to AcpiPsFind:.. ........% 7ld\n", + acpi_os_printf("Calls to AcpiPsFind:.. ........% 7u\n", acpi_gbl_ps_find_count); - acpi_os_printf("Calls to AcpiNsLookup:..........% 7ld\n", + acpi_os_printf("Calls to AcpiNsLookup:..........% 7u\n", acpi_gbl_ns_lookup_count); acpi_os_printf("\n"); acpi_os_printf("Mutex usage:\n\n"); for (i = 0; i < ACPI_NUM_MUTEX; i++) { - acpi_os_printf("%-28s: % 7ld\n", + acpi_os_printf("%-28s: % 7u\n", acpi_ut_get_mutex_name(i), acpi_gbl_mutex_info[i].use_count); } @@ -434,87 +434,87 @@ acpi_status acpi_db_display_statistics(char *type_arg) acpi_os_printf("\nInternal object sizes:\n\n"); - acpi_os_printf("Common %3d\n", + acpi_os_printf("Common %3lu\n", sizeof(struct acpi_object_common)); - acpi_os_printf("Number %3d\n", + acpi_os_printf("Number %3lu\n", sizeof(struct acpi_object_integer)); - acpi_os_printf("String %3d\n", + acpi_os_printf("String %3lu\n", sizeof(struct acpi_object_string)); - acpi_os_printf("Buffer %3d\n", + acpi_os_printf("Buffer %3lu\n", sizeof(struct acpi_object_buffer)); - acpi_os_printf("Package %3d\n", + acpi_os_printf("Package %3lu\n", sizeof(struct acpi_object_package)); - acpi_os_printf("BufferField %3d\n", + acpi_os_printf("BufferField %3lu\n", sizeof(struct acpi_object_buffer_field)); - acpi_os_printf("Device %3d\n", + acpi_os_printf("Device %3lu\n", sizeof(struct acpi_object_device)); - acpi_os_printf("Event %3d\n", + acpi_os_printf("Event %3lu\n", sizeof(struct acpi_object_event)); - acpi_os_printf("Method %3d\n", + acpi_os_printf("Method %3lu\n", sizeof(struct acpi_object_method)); - acpi_os_printf("Mutex %3d\n", + acpi_os_printf("Mutex %3lu\n", sizeof(struct acpi_object_mutex)); - acpi_os_printf("Region %3d\n", + acpi_os_printf("Region %3lu\n", sizeof(struct acpi_object_region)); - acpi_os_printf("PowerResource %3d\n", + acpi_os_printf("PowerResource %3lu\n", sizeof(struct acpi_object_power_resource)); - acpi_os_printf("Processor %3d\n", + acpi_os_printf("Processor %3lu\n", sizeof(struct acpi_object_processor)); - acpi_os_printf("ThermalZone %3d\n", + acpi_os_printf("ThermalZone %3lu\n", sizeof(struct acpi_object_thermal_zone)); - acpi_os_printf("RegionField %3d\n", + acpi_os_printf("RegionField %3lu\n", sizeof(struct acpi_object_region_field)); - acpi_os_printf("BankField %3d\n", + acpi_os_printf("BankField %3lu\n", sizeof(struct acpi_object_bank_field)); - acpi_os_printf("IndexField %3d\n", + acpi_os_printf("IndexField %3lu\n", sizeof(struct acpi_object_index_field)); - acpi_os_printf("Reference %3d\n", + acpi_os_printf("Reference %3lu\n", sizeof(struct acpi_object_reference)); - acpi_os_printf("Notify %3d\n", + acpi_os_printf("Notify %3lu\n", sizeof(struct acpi_object_notify_handler)); - acpi_os_printf("AddressSpace %3d\n", + acpi_os_printf("AddressSpace %3lu\n", sizeof(struct acpi_object_addr_handler)); - acpi_os_printf("Extra %3d\n", + acpi_os_printf("Extra %3lu\n", sizeof(struct acpi_object_extra)); - acpi_os_printf("Data %3d\n", + acpi_os_printf("Data %3lu\n", sizeof(struct acpi_object_data)); acpi_os_printf("\n"); - acpi_os_printf("ParseObject %3d\n", + acpi_os_printf("ParseObject %3lu\n", sizeof(struct acpi_parse_obj_common)); - acpi_os_printf("ParseObjectNamed %3d\n", + acpi_os_printf("ParseObjectNamed %3lu\n", sizeof(struct acpi_parse_obj_named)); - acpi_os_printf("ParseObjectAsl %3d\n", + acpi_os_printf("ParseObjectAsl %3lu\n", sizeof(struct acpi_parse_obj_asl)); - acpi_os_printf("OperandObject %3d\n", + acpi_os_printf("OperandObject %3lu\n", sizeof(union acpi_operand_object)); - acpi_os_printf("NamespaceNode %3d\n", + acpi_os_printf("NamespaceNode %3lu\n", sizeof(struct acpi_namespace_node)); - acpi_os_printf("AcpiObject %3d\n", + acpi_os_printf("AcpiObject %3lu\n", sizeof(union acpi_object)); acpi_os_printf("\n"); - acpi_os_printf("Generic State %3d\n", + acpi_os_printf("Generic State %3lu\n", sizeof(union acpi_generic_state)); - acpi_os_printf("Common State %3d\n", + acpi_os_printf("Common State %3lu\n", sizeof(struct acpi_common_state)); - acpi_os_printf("Control State %3d\n", + acpi_os_printf("Control State %3lu\n", sizeof(struct acpi_control_state)); - acpi_os_printf("Update State %3d\n", + acpi_os_printf("Update State %3lu\n", sizeof(struct acpi_update_state)); - acpi_os_printf("Scope State %3d\n", + acpi_os_printf("Scope State %3lu\n", sizeof(struct acpi_scope_state)); - acpi_os_printf("Parse Scope %3d\n", + acpi_os_printf("Parse Scope %3lu\n", sizeof(struct acpi_pscope_state)); - acpi_os_printf("Package State %3d\n", + acpi_os_printf("Package State %3lu\n", sizeof(struct acpi_pkg_state)); - acpi_os_printf("Thread State %3d\n", + acpi_os_printf("Thread State %3lu\n", sizeof(struct acpi_thread_state)); - acpi_os_printf("Result Values %3d\n", + acpi_os_printf("Result Values %3lu\n", sizeof(struct acpi_result_values)); - acpi_os_printf("Notify Info %3d\n", + acpi_os_printf("Notify Info %3lu\n", sizeof(struct acpi_notify_info)); break; diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c index f76e0eab3..4b83315dd 100644 --- a/drivers/acpi/acpica/hwxfsleep.c +++ b/drivers/acpi/acpica/hwxfsleep.c @@ -70,11 +70,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id); /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */ static struct acpi_sleep_functions acpi_sleep_dispatch[] = { - {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep), - acpi_hw_extended_sleep}, - {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep), - acpi_hw_extended_wake_prep}, - {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake} + {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep), + .extended_function = acpi_hw_extended_sleep}, + {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep), + .extended_function = acpi_hw_extended_wake_prep}, + {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), + .extended_function = acpi_hw_extended_wake} }; /* diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c index 044df9b03..b4cdb9c14 100644 --- a/drivers/acpi/acpica/utdebug.c +++ b/drivers/acpi/acpica/utdebug.c @@ -189,7 +189,7 @@ acpi_debug_print(u32 requested_debug_level, * Display the module name, current line number, thread ID (if requested), * current procedure nesting level, and the current procedure name */ - acpi_os_printf("%9s-%04ld ", module_name, line_number); + acpi_os_printf("%9s-%04u ", module_name, line_number); #ifdef ACPI_APPLICATION /* diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h index 6e9f14c0a..7f9a99db3 100644 --- a/drivers/acpi/apei/apei-internal.h +++ b/drivers/acpi/apei/apei-internal.h @@ -19,7 +19,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx, struct apei_exec_ins_type { u32 flags; apei_exec_ins_func_t run; -}; +} __do_const; struct apei_exec_context { u32 ip; diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index e53bef6cf..46534c453 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -483,7 +483,7 @@ static void __ghes_print_estatus(const char *pfx, const struct acpi_hest_generic *generic, const struct acpi_hest_generic_status *estatus) { - static atomic_t seqno; + static atomic_unchecked_t seqno; unsigned int curr_seqno; char pfx_seq[64]; @@ -494,7 +494,7 @@ static void __ghes_print_estatus(const char *pfx, else pfx = KERN_ERR; } - curr_seqno = atomic_inc_return(&seqno); + curr_seqno = atomic_inc_return_unchecked(&seqno); snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno); printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n", pfx_seq, generic->header.source_id); @@ -544,7 +544,7 @@ static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus) cache_estatus = GHES_ESTATUS_FROM_CACHE(cache); if (memcmp(estatus, cache_estatus, len)) continue; - atomic_inc(&cache->count); + atomic_inc_unchecked(&cache->count); now = sched_clock(); if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC) cached = 1; @@ -578,7 +578,7 @@ static struct ghes_estatus_cache *ghes_estatus_cache_alloc( cache_estatus = GHES_ESTATUS_FROM_CACHE(cache); memcpy(cache_estatus, estatus, len); cache->estatus_len = len; - atomic_set(&cache->count, 0); + atomic_set_unchecked(&cache->count, 0); cache->generic = generic; cache->time_in = sched_clock(); return cache; @@ -628,7 +628,7 @@ static void ghes_estatus_cache_add( slot_cache = cache; break; } - count = atomic_read(&cache->count); + count = atomic_read_unchecked(&cache->count); period = duration; do_div(period, (count + 1)); if (period > max_period) { diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 93ecae55f..a845fce81 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -75,7 +75,7 @@ MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); #ifdef CONFIG_ACPI_PROCFS_POWER extern struct proc_dir_entry *acpi_lock_battery_dir(void); -extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir); +extern void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir); enum acpi_battery_files { info_tag = 0, diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c index 75f128e76..0fbae68c8 100644 --- a/drivers/acpi/bgrt.c +++ b/drivers/acpi/bgrt.c @@ -17,40 +17,40 @@ static struct kobject *bgrt_kobj; -static ssize_t show_version(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_version(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->version); } -static DEVICE_ATTR(version, S_IRUGO, show_version, NULL); +static KOBJECT_ATTR(version, S_IRUGO, show_version, NULL); -static ssize_t show_status(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_status(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->status); } -static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); +static KOBJECT_ATTR(status, S_IRUGO, show_status, NULL); -static ssize_t show_type(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_type(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_type); } -static DEVICE_ATTR(type, S_IRUGO, show_type, NULL); +static KOBJECT_ATTR(type, S_IRUGO, show_type, NULL); -static ssize_t show_xoffset(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_xoffset(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_offset_x); } -static DEVICE_ATTR(xoffset, S_IRUGO, show_xoffset, NULL); +static KOBJECT_ATTR(xoffset, S_IRUGO, show_xoffset, NULL); -static ssize_t show_yoffset(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_yoffset(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_offset_y); } -static DEVICE_ATTR(yoffset, S_IRUGO, show_yoffset, NULL); +static KOBJECT_ATTR(yoffset, S_IRUGO, show_yoffset, NULL); static ssize_t image_read(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) @@ -87,8 +87,10 @@ static int __init bgrt_init(void) if (!bgrt_image) return -ENODEV; - bin_attr_image.private = bgrt_image; - bin_attr_image.size = bgrt_image_size; + pax_open_kernel(); + const_cast(bin_attr_image.private) = bgrt_image; + const_cast(bin_attr_image.size) = bgrt_image_size; + pax_close_kernel(); bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj); if (!bgrt_kobj) diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index bdc67bad6..a82756b5e 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c @@ -47,13 +47,13 @@ struct acpi_blacklist_item { u32 is_critical_error; }; -static struct dmi_system_id acpi_rev_dmi_table[] __initdata; +static const struct dmi_system_id acpi_rev_dmi_table[] __initconst; /* * POLICY: If *anything* doesn't work, put it on the blacklist. * If they are critical errors, mark it critical, and abort driver load. */ -static struct acpi_blacklist_item acpi_blacklist[] __initdata = { +static const struct acpi_blacklist_item acpi_blacklist[] __initconst = { /* Compaq Presario 1700 */ {"PTLTD ", " DSDT ", 0x06040000, ACPI_SIG_DSDT, less_than_or_equal, "Multiple problems", 1}, @@ -144,7 +144,7 @@ static int __init dmi_enable_rev_override(const struct dmi_system_id *d) } #endif -static struct dmi_system_id acpi_rev_dmi_table[] __initdata = { +static const struct dmi_system_id acpi_rev_dmi_table[] __initconst = { #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE /* * DELL XPS 13 (2015) switches sound between HDA and I2S diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 56190d00f..406f04cb4 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -67,7 +67,7 @@ static int set_copy_dsdt(const struct dmi_system_id *id) } #endif -static struct dmi_system_id dsdt_dmi_table[] __initdata = { +static const struct dmi_system_id dsdt_dmi_table[] __initconst = { /* * Invoke DSDT corruption work-around on all Toshiba Satellite. * https://bugzilla.kernel.org/show_bug.cgi?id=14679 @@ -83,7 +83,7 @@ static struct dmi_system_id dsdt_dmi_table[] __initdata = { {} }; #else -static struct dmi_system_id dsdt_dmi_table[] __initdata = { +static const struct dmi_system_id dsdt_dmi_table[] __initconst = { {} }; #endif diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index e19f530f1..cc2d29659 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -556,7 +556,7 @@ static int acpi_button_remove(struct acpi_device *device) return 0; } -static int param_set_lid_init_state(const char *val, struct kernel_param *kp) +static int param_set_lid_init_state(const char *val, const struct kernel_param *kp) { int result = 0; @@ -574,7 +574,7 @@ static int param_set_lid_init_state(const char *val, struct kernel_param *kp) return result; } -static int param_get_lid_init_state(char *buffer, struct kernel_param *kp) +static int param_get_lid_init_state(char *buffer, const struct kernel_param *kp) { switch (lid_init_state) { case ACPI_BUTTON_LID_INIT_OPEN: diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c index c68e72414..e86300825 100644 --- a/drivers/acpi/custom_method.c +++ b/drivers/acpi/custom_method.c @@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, struct acpi_table_header table; acpi_status status; +#ifdef CONFIG_GRKERNSEC_KMEM + return -EPERM; +#endif + if (!(*ppos)) { /* parse the table header to get the table length */ if (count <= sizeof(struct acpi_table_header)) diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 993fd3139..cc15d1487 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -1026,6 +1026,8 @@ EXPORT_SYMBOL_GPL(acpi_subsys_freeze); #endif /* CONFIG_PM_SLEEP */ +static void acpi_dev_pm_detach(struct device *dev, bool power_off); + static struct dev_pm_domain acpi_general_pm_domain = { .ops = { .runtime_suspend = acpi_subsys_runtime_suspend, @@ -1042,6 +1044,7 @@ static struct dev_pm_domain acpi_general_pm_domain = { .restore_early = acpi_subsys_resume_early, #endif }, + .detach = acpi_dev_pm_detach }; /** @@ -1119,7 +1122,6 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on) acpi_device_wakeup(adev, ACPI_STATE_S0, false); } - dev->pm_domain->detach = acpi_dev_pm_detach; return 0; } EXPORT_SYMBOL_GPL(acpi_dev_pm_attach); diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 48e19d013..b08e80298 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1777,7 +1777,7 @@ static int ec_correct_ecdt(const struct dmi_system_id *id) return 0; } -static struct dmi_system_id ec_dmi_table[] __initdata = { +static const struct dmi_system_id ec_dmi_table[] __initconst = { { ec_correct_ecdt, "MSI MS-171F", { DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"), @@ -1910,7 +1910,7 @@ static const struct dev_pm_ops acpi_ec_pm = { SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume) }; -static int param_set_event_clearing(const char *val, struct kernel_param *kp) +static int param_set_event_clearing(const char *val, const struct kernel_param *kp) { int result = 0; @@ -1928,7 +1928,7 @@ static int param_set_event_clearing(const char *val, struct kernel_param *kp) return result; } -static int param_get_event_clearing(char *buffer, struct kernel_param *kp) +static int param_get_event_clearing(char *buffer, const struct kernel_param *kp) { switch (ec_event_clearing) { case ACPI_EC_EVT_TIMING_STATUS: diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c index 849f9d224..c97dd8167 100644 --- a/drivers/acpi/osi.c +++ b/drivers/acpi/osi.c @@ -318,7 +318,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d) * Note that _OSI("Linux")/_OSI("Darwin") determined here can be overridden * by acpi_osi=!Linux/acpi_osi=!Darwin command line options. */ -static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { +static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = { { .callback = dmi_disable_osi_vista, .ident = "Fujitsu Siemens", diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c index f62c68e24..e90b61f7d 100644 --- a/drivers/acpi/pci_slot.c +++ b/drivers/acpi/pci_slot.c @@ -174,7 +174,7 @@ static int do_sta_before_sun(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id acpi_pci_slot_dmi_table[] __initdata = { +static const struct dmi_system_id acpi_pci_slot_dmi_table[] __initconst = { /* * Fujitsu Primequest machines will return 1023 to indicate an * error if the _SUN method is evaluated on SxFy objects that diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 2237d3f24..af0be90b4 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -842,7 +842,7 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr) { int i, count = CPUIDLE_DRIVER_STATE_START; struct acpi_processor_cx *cx; - struct cpuidle_state *state; + cpuidle_state_no_const *state; struct cpuidle_driver *drv = &acpi_idle_driver; if (max_cstate == 0) @@ -1251,7 +1251,7 @@ static int acpi_processor_setup_lpi_states(struct acpi_processor *pr) { int i; struct acpi_lpi_state *lpi; - struct cpuidle_state *state; + cpuidle_state_no_const *state; struct cpuidle_driver *drv = &acpi_idle_driver; if (!pr->flags.has_lpi) diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c index 7cfbda4d7..74f738cb6 100644 --- a/drivers/acpi/processor_pdc.c +++ b/drivers/acpi/processor_pdc.c @@ -173,7 +173,7 @@ static int __init set_no_mwait(const struct dmi_system_id *id) return 0; } -static struct dmi_system_id processor_idle_dmi_table[] __initdata = { +static const struct dmi_system_id processor_idle_dmi_table[] __initconst = { { set_no_mwait, "Extensa 5220", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 54abb26b7..6f7015f1c 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -154,7 +154,7 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id acpisleep_dmi_table[] __initdata = { +static const struct dmi_system_id acpisleep_dmi_table[] __initconst = { { .callback = init_old_suspend_ordering, .ident = "Abit KN9 (nForce4 variant)", diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 703c26e70..47be99056 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c @@ -227,7 +227,7 @@ module_param_cb(trace_method_name, ¶m_ops_trace_method, &trace_method_name, module_param_cb(trace_debug_layer, ¶m_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644); module_param_cb(trace_debug_level, ¶m_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644); -static int param_set_trace_state(const char *val, struct kernel_param *kp) +static int param_set_trace_state(const char *val, const struct kernel_param *kp) { acpi_status status; const char *method = trace_method_name; @@ -263,7 +263,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp) return 0; } -static int param_get_trace_state(char *buffer, struct kernel_param *kp) +static int param_get_trace_state(char *buffer, const struct kernel_param *kp) { if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED)) return sprintf(buffer, "disable"); @@ -292,7 +292,7 @@ MODULE_PARM_DESC(aml_debug_output, "To enable/disable the ACPI Debug Object output."); /* /sys/module/acpi/parameters/acpica_version */ -static int param_get_acpica_version(char *buffer, struct kernel_param *kp) +static int param_get_acpica_version(char *buffer, const struct kernel_param *kp) { int result; @@ -490,11 +490,11 @@ static u32 num_counters; static struct attribute **all_attrs; static u32 acpi_gpe_count; -static struct attribute_group interrupt_stats_attr_group = { +static attribute_group_no_const interrupt_stats_attr_group = { .name = "interrupts", }; -static struct kobj_attribute *counter_attrs; +static kobj_attribute_no_const *counter_attrs; static void delete_gpe_attr_array(void) { @@ -797,13 +797,13 @@ static void __exit interrupt_stats_exit(void) } static ssize_t -acpi_show_profile(struct device *dev, struct device_attribute *attr, +acpi_show_profile(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile); } -static const struct device_attribute pm_profile_attr = +static const struct kobj_attribute pm_profile_attr = __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL); static ssize_t hotplug_enabled_show(struct kobject *kobj, diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 35e8fbca1..5f0c4120c 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -1209,7 +1209,7 @@ static int thermal_psv(const struct dmi_system_id *d) { return 0; } -static struct dmi_system_id thermal_dmi_table[] __initdata = { +static const struct dmi_system_id thermal_dmi_table[] __initconst = { /* * Award BIOS on this AOpen makes thermal control almost worthless. * http://bugzilla.kernel.org/show_bug.cgi?id=8842 diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 02ded25c8..ff23e2d89 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -41,7 +41,6 @@ ACPI_MODULE_NAME("video"); void acpi_video_unregister_backlight(void); static bool backlight_notifier_registered; -static struct notifier_block backlight_nb; static struct work_struct backlight_notify_work; static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef; @@ -339,6 +338,10 @@ static int acpi_video_backlight_notify(struct notifier_block *nb, return NOTIFY_OK; } +static struct notifier_block backlight_nb = { + .notifier_call = acpi_video_backlight_notify, +}; + /* * Determine which type of backlight interface to use on this system, * First check cmdline, then dmi quirks, then do autodetect. @@ -369,8 +372,6 @@ enum acpi_backlight_type acpi_video_get_backlight_type(void) &video_caps, NULL); INIT_WORK(&backlight_notify_work, acpi_video_backlight_notify_work); - backlight_nb.notifier_call = acpi_video_backlight_notify; - backlight_nb.priority = 0; if (backlight_register_notifier(&backlight_nb) == 0) backlight_notifier_registered = true; init_done = true; diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 3c71b982b..03bc39b96 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -119,7 +119,7 @@ static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); static int binder_stop_on_user_error; static int binder_set_stop_on_user_error(const char *val, - struct kernel_param *kp) + const struct kernel_param *kp) { int ret; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 33e363dcc..1694417d4 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -105,7 +105,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev); static void ata_dev_xfermask(struct ata_device *dev); static unsigned long ata_dev_blacklisted(const struct ata_device *dev); -atomic_t ata_print_id = ATOMIC_INIT(0); +atomic_unchecked_t ata_print_id = ATOMIC_INIT(0); struct ata_force_param { const char *name; @@ -4990,7 +4990,7 @@ void ata_qc_free(struct ata_queued_cmd *qc) struct ata_port *ap; unsigned int tag; - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ ap = qc->ap; qc->flags = 0; @@ -5007,7 +5007,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc) struct ata_port *ap; struct ata_link *link; - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); ap = qc->ap; link = qc->dev->link; @@ -6119,6 +6119,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops) return; spin_lock(&lock); + pax_open_kernel(); for (cur = ops->inherits; cur; cur = cur->inherits) { void **inherit = (void **)cur; @@ -6132,8 +6133,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops) if (IS_ERR(*pp)) *pp = NULL; - ops->inherits = NULL; + const_cast(ops->inherits) = NULL; + pax_close_kernel(); spin_unlock(&lock); } @@ -6329,7 +6331,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) /* give ports names and add SCSI hosts */ for (i = 0; i < host->n_ports; i++) { - host->ports[i]->print_id = atomic_inc_return(&ata_print_id); + host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id); host->ports[i]->local_port_no = i + 1; } diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 8e575fbdf..104fcff55 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -4908,7 +4908,7 @@ int ata_sas_port_init(struct ata_port *ap) if (rc) return rc; - ap->print_id = atomic_inc_return(&ata_print_id); + ap->print_id = atomic_inc_return_unchecked(&ata_print_id); return 0; } EXPORT_SYMBOL_GPL(ata_sas_port_init); diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index 3b301a480..ff1567665 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h @@ -53,7 +53,7 @@ enum { ATA_DNXFER_QUIET = (1 << 31), }; -extern atomic_t ata_print_id; +extern atomic_unchecked_t ata_print_id; extern int atapi_passthru16; extern int libata_fua; extern int libata_noacpi; diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c index b4d54771c..9ec8e0bb7 100644 --- a/drivers/ata/pata_arasan_cf.c +++ b/drivers/ata/pata_arasan_cf.c @@ -864,7 +864,9 @@ static int arasan_cf_probe(struct platform_device *pdev) /* Handle platform specific quirks */ if (quirk) { if (quirk & CF_BROKEN_PIO) { - ap->ops->set_piomode = NULL; + pax_open_kernel(); + const_cast(ap->ops->set_piomode) = NULL; + pax_close_kernel(); ap->pio_mask = 0; } if (quirk & CF_BROKEN_MWDMA) diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c index f9b983ae6..887b9d89f 100644 --- a/drivers/atm/adummy.c +++ b/drivers/atm/adummy.c @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb) vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); return 0; } diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c index cd3f62eb1..ff94eed43 100644 --- a/drivers/atm/ambassador.c +++ b/drivers/atm/ambassador.c @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) { PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx); // VC layer stats - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx); // free the descriptor kfree (tx_descr); @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) { dump_skb ("<<<", vc, skb); // VC layer stats - atomic_inc(&atm_vcc->stats->rx); + atomic_inc_unchecked(&atm_vcc->stats->rx); __net_timestamp(skb); // end of our responsibility atm_vcc->push (atm_vcc, skb); @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) { } else { PRINTK (KERN_INFO, "dropped over-size frame"); // should we count this? - atomic_inc(&atm_vcc->stats->rx_drop); + atomic_inc_unchecked(&atm_vcc->stats->rx_drop); } } else { @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) { } if (check_area (skb->data, skb->len)) { - atomic_inc(&atm_vcc->stats->tx_err); + atomic_inc_unchecked(&atm_vcc->stats->tx_err); return -ENOMEM; // ? } diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c index 480fa6ffb..947067c9d 100644 --- a/drivers/atm/atmtcp.c +++ b/drivers/atm/atmtcp.c @@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb) if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb(skb); if (dev_data) return 0; - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); return -ENOLINK; } size = skb->len+sizeof(struct atmtcp_hdr); @@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb) if (!new_skb) { if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb(skb); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); return -ENOBUFS; } hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr)); @@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb) if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb(skb); out_vcc->push(out_vcc,new_skb); - atomic_inc(&vcc->stats->tx); - atomic_inc(&out_vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->tx); + atomic_inc_unchecked(&out_vcc->stats->rx); return 0; } @@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb) read_unlock(&vcc_sklist_lock); if (!out_vcc) { result = -EUNATCH; - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); goto done; } skb_pull(skb,sizeof(struct atmtcp_hdr)); @@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb) __net_timestamp(new_skb); skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len); out_vcc->push(out_vcc,new_skb); - atomic_inc(&vcc->stats->tx); - atomic_inc(&out_vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->tx); + atomic_inc_unchecked(&out_vcc->stats->rx); done: if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb(skb); diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index 40c2d5614..58e2b4e6d 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc) DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n", vcc->dev->number); length = 0; - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); } else { length = ATM_CELL_SIZE-1; /* no HEC */ @@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc) size); } eff = length = 0; - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); } else { size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2); @@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc) "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n", vcc->dev->number,vcc->vci,length,size << 2,descr); length = eff = 0; - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); } } skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL; @@ -770,7 +770,7 @@ rx_dequeued++; vcc->push(vcc,skb); pushed++; } - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); } wake_up(&eni_dev->rx_wait); } @@ -1230,7 +1230,7 @@ static void dequeue_tx(struct atm_dev *dev) DMA_TO_DEVICE); if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb_irq(skb); - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); wake_up(&eni_dev->tx_wait); dma_complete++; } diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index 85aaf2222..8730d15f1 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -753,7 +753,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q) } } - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx); fs_dprintk (FS_DEBUG_TXMEM, "i"); fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb); @@ -820,7 +820,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q) #endif skb_put (skb, qe->p1 & 0xffff); ATM_SKB(skb)->vcc = atm_vcc; - atomic_inc(&atm_vcc->stats->rx); + atomic_inc_unchecked(&atm_vcc->stats->rx); __net_timestamp(skb); fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb); atm_vcc->push (atm_vcc, skb); @@ -841,12 +841,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q) kfree (pe); } if (atm_vcc) - atomic_inc(&atm_vcc->stats->rx_drop); + atomic_inc_unchecked(&atm_vcc->stats->rx_drop); break; case 0x1f: /* Reassembly abort: no buffers. */ /* Silently increment error counter. */ if (atm_vcc) - atomic_inc(&atm_vcc->stats->rx_drop); + atomic_inc_unchecked(&atm_vcc->stats->rx_drop); break; default: /* Hmm. Haven't written the code to handle the others yet... -- REW */ printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n", diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index 058aae9dd..470592d64 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c @@ -932,9 +932,9 @@ fore200e_tx_irq(struct fore200e* fore200e) #endif /* check error condition */ if (*entry->status & STATUS_ERROR) - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); else - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); } } @@ -1083,7 +1083,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp if (skb == NULL) { DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); return -ENOMEM; } @@ -1126,14 +1126,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp dev_kfree_skb_any(skb); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); return -ENOMEM; } ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); vcc->push(vcc, skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); @@ -1211,7 +1211,7 @@ fore200e_rx_irq(struct fore200e* fore200e) DPRINTK(2, "damaged PDU on %d.%d.%d\n", fore200e->atm_dev->number, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); } } @@ -1656,7 +1656,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb) goto retry_here; } - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); fore200e->tx_sat++; DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n", diff --git a/drivers/atm/he.c b/drivers/atm/he.c index 31b513a23..940ef04a5 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c @@ -1691,7 +1691,7 @@ he_service_rbrq(struct he_dev *he_dev, int group) if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) { hprintk("HBUF_ERR! (cid 0x%x)\n", cid); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); goto return_host_buffers; } @@ -1718,7 +1718,7 @@ he_service_rbrq(struct he_dev *he_dev, int group) RBRQ_LEN_ERR(he_dev->rbrq_head) ? "LEN_ERR" : "", vcc->vpi, vcc->vci); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); goto return_host_buffers; } @@ -1770,7 +1770,7 @@ he_service_rbrq(struct he_dev *he_dev, int group) vcc->push(vcc, skb); spin_lock(&he_dev->global_lock); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); return_host_buffers: ++pdus_assembled; @@ -2096,7 +2096,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid) tpd->vcc->pop(tpd->vcc, tpd->skb); else dev_kfree_skb_any(tpd->skb); - atomic_inc(&tpd->vcc->stats->tx_err); + atomic_inc_unchecked(&tpd->vcc->stats->tx_err); } dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status)); return; @@ -2508,7 +2508,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); return -EINVAL; } @@ -2519,7 +2519,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); return -EINVAL; } #endif @@ -2531,7 +2531,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); spin_unlock_irqrestore(&he_dev->global_lock, flags); return -ENOMEM; } @@ -2573,7 +2573,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); spin_unlock_irqrestore(&he_dev->global_lock, flags); return -ENOMEM; } @@ -2604,7 +2604,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) __enqueue_tpd(he_dev, tpd, cid); spin_unlock_irqrestore(&he_dev->global_lock, flags); - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); return 0; } diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c index 5fc81e240..42907aed2 100644 --- a/drivers/atm/horizon.c +++ b/drivers/atm/horizon.c @@ -1018,7 +1018,7 @@ static void rx_schedule (hrz_dev * dev, int irq) { { struct atm_vcc * vcc = ATM_SKB(skb)->vcc; // VC layer stats - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); __net_timestamp(skb); // end of our responsibility vcc->push (vcc, skb); @@ -1170,7 +1170,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) { dev->tx_iovec = NULL; // VC layer stats - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx); // free the skb hrz_kfree_skb (skb); diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index 074616b39..d6b3d5f74 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc) else dev_kfree_skb(skb); - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); } atomic_dec(&scq->used); @@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) if ((sb = dev_alloc_skb(64)) == NULL) { printk("%s: Can't allocate buffers for aal0.\n", card->name); - atomic_add(i, &vcc->stats->rx_drop); + atomic_add_unchecked(i, &vcc->stats->rx_drop); break; } if (!atm_charge(vcc, sb->truesize)) { RXPRINTK("%s: atm_charge() dropped aal0 packets.\n", card->name); - atomic_add(i - 1, &vcc->stats->rx_drop); + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); dev_kfree_skb(sb); break; } @@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) ATM_SKB(sb)->vcc = vcc; __net_timestamp(sb); vcc->push(vcc, sb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); cell += ATM_CELL_PAYLOAD; } @@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) "(CDC: %08x)\n", card->name, len, rpp->len, readl(SAR_REG_CDC)); recycle_rx_pool_skb(card, rpp); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); return; } if (stat & SAR_RSQE_CRC) { RXPRINTK("%s: AAL5 CRC error.\n", card->name); recycle_rx_pool_skb(card, rpp); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); return; } if (skb_queue_len(&rpp->queue) > 1) { @@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) RXPRINTK("%s: Can't alloc RX skb.\n", card->name); recycle_rx_pool_skb(card, rpp); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); return; } if (!atm_charge(vcc, skb->truesize)) { @@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) __net_timestamp(skb); vcc->push(vcc, skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); return; } @@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) __net_timestamp(skb); vcc->push(vcc, skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); if (skb->truesize > SAR_FB_SIZE_3) add_rx_skb(card, 3, SAR_FB_SIZE_3, 1); @@ -1302,14 +1302,14 @@ idt77252_rx_raw(struct idt77252_dev *card) if (vcc->qos.aal != ATM_AAL0) { RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n", card->name, vpi, vci); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); goto drop; } if ((sb = dev_alloc_skb(64)) == NULL) { printk("%s: Can't allocate buffers for AAL0.\n", card->name); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); goto drop; } @@ -1328,7 +1328,7 @@ idt77252_rx_raw(struct idt77252_dev *card) ATM_SKB(sb)->vcc = vcc; __net_timestamp(sb); vcc->push(vcc, sb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); drop: skb_pull(queue, 64); @@ -1953,13 +1953,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam) if (vc == NULL) { printk("%s: NULL connection in send().\n", card->name); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb(skb); return -EINVAL; } if (!test_bit(VCF_TX, &vc->flags)) { printk("%s: Trying to transmit on a non-tx VC.\n", card->name); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb(skb); return -EINVAL; } @@ -1971,14 +1971,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam) break; default: printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb(skb); return -EINVAL; } if (skb_shinfo(skb)->nr_frags != 0) { printk("%s: No scatter-gather yet.\n", card->name); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb(skb); return -EINVAL; } @@ -1986,7 +1986,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam) err = queue_skb(card, vc, skb, oam); if (err) { - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb(skb); return err; } @@ -2009,7 +2009,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags) skb = dev_alloc_skb(64); if (!skb) { printk("%s: Out of memory in send_oam().\n", card->name); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); return -ENOMEM; } atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index b27567659..948649d23 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev) status = (u_short) (buf_desc_ptr->desc_mode); if (status & (RX_CER | RX_PTE | RX_OFL)) { - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); IF_ERR(printk("IA: bad packet, dropping it");) if (status & RX_CER) { IF_ERR(printk(" cause: packet CRC error\n");) @@ -1169,7 +1169,7 @@ static int rx_pkt(struct atm_dev *dev) len = dma_addr - buf_addr; if (len > iadev->rx_buf_sz) { printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); goto out_free_desc; } @@ -1319,7 +1319,7 @@ static void rx_dle_intr(struct atm_dev *dev) ia_vcc = INPH_IA_VCC(vcc); if (ia_vcc == NULL) { - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); atm_return(vcc, skb->truesize); dev_kfree_skb_any(skb); goto INCR_DLE; @@ -1331,7 +1331,7 @@ static void rx_dle_intr(struct atm_dev *dev) if ((length > iadev->rx_buf_sz) || (length > (skb->len - sizeof(struct cpcs_trailer)))) { - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)", length, skb->len);) atm_return(vcc, skb->truesize); @@ -1347,7 +1347,7 @@ static void rx_dle_intr(struct atm_dev *dev) IF_RX(printk("rx_dle_intr: skb push");) vcc->push(vcc,skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); iadev->rx_pkt_cnt++; } INCR_DLE: @@ -2834,15 +2834,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg) { struct k_sonet_stats *stats; stats = &PRIV(_ia_dev[board])->sonet_stats; - printk("section_bip: %d\n", atomic_read(&stats->section_bip)); - printk("line_bip : %d\n", atomic_read(&stats->line_bip)); - printk("path_bip : %d\n", atomic_read(&stats->path_bip)); - printk("line_febe : %d\n", atomic_read(&stats->line_febe)); - printk("path_febe : %d\n", atomic_read(&stats->path_febe)); - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs)); - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs)); - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells)); - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells)); + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip)); + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip)); + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip)); + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe)); + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe)); + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs)); + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs)); + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells)); + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells)); } ia_cmds.status = 0; break; @@ -2947,7 +2947,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) { if ((desc == 0) || (desc > iadev->num_tx_desc)) { IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); if (vcc->pop) vcc->pop(vcc, skb); else @@ -3052,14 +3052,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) { ATM_DESC(skb) = vcc->vci; skb_queue_tail(&iadev->tx_dma_q, skb); - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); iadev->tx_pkt_cnt++; /* Increment transaction counter */ writel(2, iadev->dma+IPHASE5575_TX_COUNTER); #if 0 /* add flow control logic */ - if (atomic_read(&vcc->stats->tx) % 20 == 0) { + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) { if (iavcc->vc_desc_cnt > 10) { vcc->tx_quota = vcc->tx_quota * 3 / 4; printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota ); diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c index 445505d9e..10a1bd79f 100644 --- a/drivers/atm/lanai.c +++ b/drivers/atm/lanai.c @@ -1295,7 +1295,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai, vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0); lanai_endtx(lanai, lvcc); lanai_free_skb(lvcc->tx.atmvcc, skb); - atomic_inc(&lvcc->tx.atmvcc->stats->tx); + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx); } /* Try to fill the buffer - don't call unless there is backlog */ @@ -1418,7 +1418,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr) ATM_SKB(skb)->vcc = lvcc->rx.atmvcc; __net_timestamp(skb); lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb); - atomic_inc(&lvcc->rx.atmvcc->stats->rx); + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx); out: lvcc->rx.buf.ptr = end; cardvcc_write(lvcc, endptr, vcc_rxreadptr); @@ -1659,7 +1659,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s) DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 " "vcc %d\n", lanai->number, (unsigned int) s, vci); lanai->stats.service_rxnotaal5++; - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); return 0; } if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) { @@ -1671,7 +1671,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s) int bytes; read_unlock(&vcc_sklist_lock); DPRINTK("got trashed rx pdu on vci %d\n", vci); - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); lvcc->stats.x.aal5.service_trash++; bytes = (SERVICE_GET_END(s) * 16) - (((unsigned long) lvcc->rx.buf.ptr) - @@ -1683,7 +1683,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s) } if (s & SERVICE_STREAM) { read_unlock(&vcc_sklist_lock); - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); lvcc->stats.x.aal5.service_stream++; printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream " "PDU on VCI %d!\n", lanai->number, vci); @@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s) return 0; } DPRINTK("got rx crc error on vci %d\n", vci); - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); lvcc->stats.x.aal5.service_rxcrc++; lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4]; cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr); diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c index c7296b583..7db29bfd4 100644 --- a/drivers/atm/nicstar.c +++ b/drivers/atm/nicstar.c @@ -1635,7 +1635,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) if ((vc = (vc_map *) vcc->dev_data) == NULL) { printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } @@ -1643,7 +1643,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) if (!vc->tx) { printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } @@ -1651,14 +1651,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) { printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } if (skb_shinfo(skb)->nr_frags != 0) { printk("nicstar%d: No scatter-gather yet.\n", card->index); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } @@ -1706,11 +1706,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) } if (push_scqe(card, vc, scq, &scqe, skb) != 0) { - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EIO; } - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); return 0; } @@ -2028,14 +2028,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) printk ("nicstar%d: Can't allocate buffers for aal0.\n", card->index); - atomic_add(i, &vcc->stats->rx_drop); + atomic_add_unchecked(i, &vcc->stats->rx_drop); break; } if (!atm_charge(vcc, sb->truesize)) { RXPRINTK ("nicstar%d: atm_charge() dropped aal0 packets.\n", card->index); - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */ + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */ dev_kfree_skb_any(sb); break; } @@ -2050,7 +2050,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) ATM_SKB(sb)->vcc = vcc; __net_timestamp(sb); vcc->push(vcc, sb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); cell += ATM_CELL_PAYLOAD; } @@ -2067,7 +2067,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) if (iovb == NULL) { printk("nicstar%d: Out of iovec buffers.\n", card->index); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); recycle_rx_buf(card, skb); return; } @@ -2091,7 +2091,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) small or large buffer itself. */ } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) { printk("nicstar%d: received too big AAL5 SDU.\n", card->index); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, NS_MAX_IOVECS); NS_PRV_IOVCNT(iovb) = 0; @@ -2111,7 +2111,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) ("nicstar%d: Expected a small buffer, and this is not one.\n", card->index); which_list(card, skb); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); recycle_rx_buf(card, skb); vc->rx_iov = NULL; recycle_iov_buf(card, iovb); @@ -2124,7 +2124,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) ("nicstar%d: Expected a large buffer, and this is not one.\n", card->index); which_list(card, skb); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, NS_PRV_IOVCNT(iovb)); vc->rx_iov = NULL; @@ -2147,7 +2147,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) printk(" - PDU size mismatch.\n"); else printk(".\n"); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, NS_PRV_IOVCNT(iovb)); vc->rx_iov = NULL; @@ -2161,14 +2161,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) /* skb points to a small buffer */ if (!atm_charge(vcc, skb->truesize)) { push_rxbufs(card, skb); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); } else { skb_put(skb, len); dequeue_sm_buf(card, skb); ATM_SKB(skb)->vcc = vcc; __net_timestamp(skb); vcc->push(vcc, skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); } } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */ struct sk_buff *sb; @@ -2179,14 +2179,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) if (len <= NS_SMBUFSIZE) { if (!atm_charge(vcc, sb->truesize)) { push_rxbufs(card, sb); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); } else { skb_put(sb, len); dequeue_sm_buf(card, sb); ATM_SKB(sb)->vcc = vcc; __net_timestamp(sb); vcc->push(vcc, sb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); } push_rxbufs(card, skb); @@ -2195,7 +2195,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) if (!atm_charge(vcc, skb->truesize)) { push_rxbufs(card, skb); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); } else { dequeue_lg_buf(card, skb); skb_push(skb, NS_SMBUFSIZE); @@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) ATM_SKB(skb)->vcc = vcc; __net_timestamp(skb); vcc->push(vcc, skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); } push_rxbufs(card, sb); @@ -2226,7 +2226,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) printk ("nicstar%d: Out of huge buffers.\n", card->index); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, @@ -2277,7 +2277,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) card->hbpool.count++; } else dev_kfree_skb_any(hb); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); } else { /* Copy the small buffer to the huge buffer */ sb = (struct sk_buff *)iov->iov_base; @@ -2311,7 +2311,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) ATM_SKB(hb)->vcc = vcc; __net_timestamp(hb); vcc->push(vcc, hb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); } } diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index 37dd4a159..1563ebad0 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -847,7 +847,7 @@ static void solos_bh(unsigned long card_arg) } atm_charge(vcc, skb->truesize); vcc->push(vcc, skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); break; case PKT_STATUS: @@ -1128,7 +1128,7 @@ static uint32_t fpga_tx(struct solos_card *card) vcc = SKB_CB(oldskb)->vcc; if (vcc) { - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); solos_pop(vcc, oldskb); } else { dev_kfree_skb_irq(oldskb); diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c index 021593455..ce9f5b153 100644 --- a/drivers/atm/suni.c +++ b/drivers/atm/suni.c @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock); #define ADD_LIMITED(s,v) \ - atomic_add((v),&stats->s); \ - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX); + atomic_add_unchecked((v),&stats->s); \ + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX); static void suni_hz(unsigned long from_timer) diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c index 5120a96b3..e2572bdba 100644 --- a/drivers/atm/uPD98402.c +++ b/drivers/atm/uPD98402.c @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze struct sonet_stats tmp; int error = 0; - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs); + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs); sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp); if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp)); if (zero && !error) { @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) #define ADD_LIMITED(s,v) \ - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \ - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \ - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); } + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \ + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \ + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); } static void stat_event(struct atm_dev *dev) @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev) if (reason & uPD98402_INT_PFM) stat_event(dev); if (reason & uPD98402_INT_PCO) { (void) GET(PCOCR); /* clear interrupt cause */ - atomic_add(GET(HECCT), + atomic_add_unchecked(GET(HECCT), &PRIV(dev)->sonet_stats.uncorr_hcs); } if ((reason & uPD98402_INT_RFO) && @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev) PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO | uPD98402_INT_LOS),PIMR); /* enable them */ (void) fetch_stats(dev,NULL,1); /* clear kernel counters */ - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1); - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1); - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1); + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1); + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1); + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1); return 0; } diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index d3dc95484..d71820abf 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); } if (!size) { dev_kfree_skb_irq(skb); - if (vcc) atomic_inc(&vcc->stats->rx_err); + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err); continue; } if (!atm_charge(vcc,skb->truesize)) { @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); skb->len = size; ATM_SKB(skb)->vcc = vcc; vcc->push(vcc,skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); } zout(pos & 0xffff,MTA(mbx)); #if 0 /* probably a stupid idea */ @@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP | skb_queue_head(&zatm_vcc->backlog,skb); break; } - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); wake_up(&zatm_vcc->tx_wait); } diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 6470eb808..3a7d92b18 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -1136,7 +1136,7 @@ int subsys_interface_register(struct subsys_interface *sif) return -EINVAL; mutex_lock(&subsys->p->mutex); - list_add_tail(&sif->node, &subsys->p->interfaces); + pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces); if (sif->add_dev) { subsys_dev_iter_init(&iter, subsys, NULL, NULL); while ((dev = subsys_dev_iter_next(&iter))) @@ -1161,7 +1161,7 @@ void subsys_interface_unregister(struct subsys_interface *sif) subsys = sif->subsys; mutex_lock(&subsys->p->mutex); - list_del_init(&sif->node); + pax_list_del_init((struct list_head *)&sif->node); if (sif->remove_dev) { subsys_dev_iter_init(&iter, subsys, NULL, NULL); while ((dev = subsys_dev_iter_next(&iter))) diff --git a/drivers/base/devres.c b/drivers/base/devres.c index 8fc654f08..36e28e9e3 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -476,7 +476,9 @@ static int remove_nodes(struct device *dev, static int release_nodes(struct device *dev, struct list_head *first, struct list_head *end, unsigned long flags) - __releases(&dev->devres_lock) + __releases(&dev->devres_lock); +static int release_nodes(struct device *dev, struct list_head *first, + struct list_head *end, unsigned long flags) { LIST_HEAD(todo); int cnt; diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index 44a74cf13..a5dd826f8 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir) if (!thread) return 0; - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL); + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL); if (err) printk(KERN_INFO "devtmpfs: error mounting %i\n", err); else @@ -380,11 +380,11 @@ static int devtmpfsd(void *p) *err = sys_unshare(CLONE_NEWNS); if (*err) goto out; - *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options); + *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options); if (*err) goto out; - sys_chdir("/.."); /* will traverse into overmounted root */ - sys_chroot("."); + sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */ + sys_chroot((char __force_user *)"."); complete(&setup_done); while (1) { spin_lock(&req_lock); diff --git a/drivers/base/node.c b/drivers/base/node.c index 5548f9686..3cbdfc183 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -638,7 +638,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf) struct node_attr { struct device_attribute attr; enum node_states state; -}; +} __do_const; static ssize_t show_node_state(struct device *dev, struct device_attribute *attr, char *buf) diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c index be6a599bc..d9985c268 100644 --- a/drivers/base/platform-msi.c +++ b/drivers/base/platform-msi.c @@ -24,6 +24,8 @@ #include #include +#include + #define DEV_ID_SHIFT 21 #define MAX_DEV_MSIS (1 << (32 - DEV_ID_SHIFT)) @@ -81,10 +83,12 @@ static void platform_msi_update_dom_ops(struct msi_domain_info *info) BUG_ON(!ops); + pax_open_kernel(); if (ops->msi_init == NULL) - ops->msi_init = platform_msi_init; + const_cast(ops->msi_init) = platform_msi_init; if (ops->set_desc == NULL) - ops->set_desc = platform_msi_set_desc; + const_cast(ops->set_desc) = platform_msi_set_desc; + pax_close_kernel(); } static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg) @@ -102,16 +106,18 @@ static void platform_msi_update_chip_ops(struct msi_domain_info *info) struct irq_chip *chip = info->chip; BUG_ON(!chip); + pax_open_kernel(); if (!chip->irq_mask) - chip->irq_mask = irq_chip_mask_parent; + const_cast(chip->irq_mask) = irq_chip_mask_parent; if (!chip->irq_unmask) - chip->irq_unmask = irq_chip_unmask_parent; + const_cast(chip->irq_unmask) = irq_chip_unmask_parent; if (!chip->irq_eoi) - chip->irq_eoi = irq_chip_eoi_parent; + const_cast(chip->irq_eoi) = irq_chip_eoi_parent; if (!chip->irq_set_affinity) - chip->irq_set_affinity = msi_domain_set_affinity; + const_cast(chip->irq_set_affinity) = msi_domain_set_affinity; if (!chip->irq_write_msi_msg) - chip->irq_write_msi_msg = platform_msi_write_msg; + const_cast(chip->irq_write_msi_msg) = platform_msi_write_msg; + pax_close_kernel(); } static void platform_msi_free_descs(struct device *dev, int base, int nvec) diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index e023066e4..a28458ac0 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -1895,8 +1895,10 @@ int genpd_dev_pm_attach(struct device *dev) goto out; } - dev->pm_domain->detach = genpd_dev_pm_detach; - dev->pm_domain->sync = genpd_dev_pm_sync; + pax_open_kernel(); + const_cast(dev->pm_domain->detach) = genpd_dev_pm_detach; + const_cast(dev->pm_domain->sync) = genpd_dev_pm_sync; + pax_close_kernel(); mutex_lock(&pd->lock); ret = genpd_poweron(pd, 0); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 23f3b95a1..875d17f52 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -16,35 +16,32 @@ typedef int (*pm_callback_t)(struct device *); -static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset) -{ - pm_callback_t cb; - const struct dev_pm_ops *ops; - - if (dev->pm_domain) - ops = &dev->pm_domain->ops; - else if (dev->type && dev->type->pm) - ops = dev->type->pm; - else if (dev->class && dev->class->pm) - ops = dev->class->pm; - else if (dev->bus && dev->bus->pm) - ops = dev->bus->pm; - else - ops = NULL; - - if (ops) - cb = *(pm_callback_t *)((void *)ops + cb_offset); - else - cb = NULL; - - if (!cb && dev->driver && dev->driver->pm) - cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset); - - return cb; -} - -#define RPM_GET_CALLBACK(dev, callback) \ - __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback)) +#define RPM_GET_CALLBACK(dev, callback) \ +({ \ + pm_callback_t cb; \ + const struct dev_pm_ops *ops; \ + \ + if (dev->pm_domain) \ + ops = &dev->pm_domain->ops; \ + else if (dev->type && dev->type->pm) \ + ops = dev->type->pm; \ + else if (dev->class && dev->class->pm) \ + ops = dev->class->pm; \ + else if (dev->bus && dev->bus->pm) \ + ops = dev->bus->pm; \ + else \ + ops = NULL; \ + \ + if (ops) \ + cb = ops->callback; \ + else \ + cb = NULL; \ + \ + if (!cb && dev->driver && dev->driver->pm) \ + cb = dev->driver->pm->callback; \ + \ + cb; \ +}) static int rpm_resume(struct device *dev, int rpmflags); static int rpm_suspend(struct device *dev, int rpmflags); @@ -263,8 +260,8 @@ static int rpm_check_suspend_allowed(struct device *dev) * @cb: Runtime PM callback to run. * @dev: Device to run the callback for. */ +static int __rpm_callback(int (*cb)(struct device *), struct device *dev) __must_hold(&dev->power.lock); static int __rpm_callback(int (*cb)(struct device *), struct device *dev) - __releases(&dev->power.lock) __acquires(&dev->power.lock) { int retval; @@ -412,8 +409,8 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev) * * This function must be called under dev->power.lock with interrupts disabled. */ +static int rpm_suspend(struct device *dev, int rpmflags) __must_hold(&dev->power.lock); static int rpm_suspend(struct device *dev, int rpmflags) - __releases(&dev->power.lock) __acquires(&dev->power.lock) { int (*callback)(struct device *); struct device *parent = NULL; @@ -594,8 +591,8 @@ static int rpm_suspend(struct device *dev, int rpmflags) * * This function must be called under dev->power.lock with interrupts disabled. */ +static int rpm_resume(struct device *dev, int rpmflags) __must_hold(&dev->power.lock); static int rpm_resume(struct device *dev, int rpmflags) - __releases(&dev->power.lock) __acquires(&dev->power.lock) { int (*callback)(struct device *); struct device *parent = NULL; diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index a7b46798c..d30249044 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -181,7 +181,7 @@ static ssize_t rtpm_status_show(struct device *dev, return -EIO; } } - return sprintf(buf, p); + return sprintf(buf, "%s", p); } static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL); diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 62e4de2aa..38961cd63 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -36,14 +36,14 @@ static bool pm_abort_suspend __read_mostly; * They need to be modified together atomically, so it's better to use one * atomic variable to hold them both. */ -static atomic_t combined_event_count = ATOMIC_INIT(0); +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0); #define IN_PROGRESS_BITS (sizeof(int) * 4) #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1) static void split_counters(unsigned int *cnt, unsigned int *inpr) { - unsigned int comb = atomic_read(&combined_event_count); + unsigned int comb = atomic_read_unchecked(&combined_event_count); *cnt = (comb >> IN_PROGRESS_BITS); *inpr = comb & MAX_IN_PROGRESS; @@ -538,7 +538,7 @@ static void wakeup_source_activate(struct wakeup_source *ws) ws->start_prevent_time = ws->last_time; /* Increment the counter of events in progress. */ - cec = atomic_inc_return(&combined_event_count); + cec = atomic_inc_return_unchecked(&combined_event_count); trace_wakeup_source_activate(ws->name, cec); } @@ -664,7 +664,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws) * Increment the counter of registered wakeup events and decrement the * couter of wakeup events in progress simultaneously. */ - cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count); + cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count); trace_wakeup_source_deactivate(ws->name, cec); split_counters(&cnt, &inpr); diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index 36ce3511c..bfb18b968 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -410,7 +410,7 @@ static const struct file_operations regmap_reg_ranges_fops = { static int regmap_access_show(struct seq_file *s, void *ignored) { struct regmap *map = s->private; - int i, reg_len; + unsigned int i, reg_len; reg_len = regmap_calc_reg_len(map->max_register); diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index ae63bb087..64815791c 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -425,8 +425,8 @@ static void regmap_unlock_mutex(void *__map) mutex_unlock(&map->mutex); } +static void regmap_lock_spinlock(void *__map) __acquires(&map->spinlock); static void regmap_lock_spinlock(void *__map) -__acquires(&map->spinlock) { struct regmap *map = __map; unsigned long flags; @@ -435,8 +435,8 @@ __acquires(&map->spinlock) map->spinlock_flags = flags; } +static void regmap_unlock_spinlock(void *__map) __releases(&map->spinlock); static void regmap_unlock_spinlock(void *__map) -__releases(&map->spinlock) { struct regmap *map = __map; spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c index 8d98a329f..61d316524 100644 --- a/drivers/base/syscore.c +++ b/drivers/base/syscore.c @@ -22,7 +22,7 @@ static DEFINE_MUTEX(syscore_ops_lock); void register_syscore_ops(struct syscore_ops *ops) { mutex_lock(&syscore_ops_lock); - list_add_tail(&ops->node, &syscore_ops_list); + pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list); mutex_unlock(&syscore_ops_lock); } EXPORT_SYMBOL_GPL(register_syscore_ops); @@ -34,7 +34,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops); void unregister_syscore_ops(struct syscore_ops *ops) { mutex_lock(&syscore_ops_lock); - list_del(&ops->node); + pax_list_del((struct list_head *)&ops->node); mutex_unlock(&syscore_ops_lock); } EXPORT_SYMBOL_GPL(unregister_syscore_ops); diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index db9d6bb63..9c5dc788d 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -3017,7 +3017,7 @@ static void start_io(ctlr_info_t *h) while (!list_empty(&h->reqQ)) { c = list_entry(h->reqQ.next, CommandList_struct, list); /* can't do anything if fifo is full */ - if ((h->access.fifo_full(h))) { + if ((h->access->fifo_full(h))) { dev_warn(&h->pdev->dev, "fifo full\n"); break; } @@ -3027,7 +3027,7 @@ static void start_io(ctlr_info_t *h) h->Qdepth--; /* Tell the controller execute command */ - h->access.submit_command(h, c); + h->access->submit_command(h, c); /* Put job onto the completed Q */ addQ(&h->cmpQ, c); @@ -3453,17 +3453,17 @@ static void do_cciss_request(struct request_queue *q) static inline unsigned long get_next_completion(ctlr_info_t *h) { - return h->access.command_completed(h); + return h->access->command_completed(h); } static inline int interrupt_pending(ctlr_info_t *h) { - return h->access.intr_pending(h); + return h->access->intr_pending(h); } static inline long interrupt_not_for_us(ctlr_info_t *h) { - return ((h->access.intr_pending(h) == 0) || + return ((h->access->intr_pending(h) == 0) || (h->interrupts_enabled == 0)); } @@ -3496,7 +3496,7 @@ static inline u32 next_command(ctlr_info_t *h) u32 a; if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) - return h->access.command_completed(h); + return h->access->command_completed(h); if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { a = *(h->reply_pool_head); /* Next cmd in ring buffer */ @@ -4053,7 +4053,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h) trans_support & CFGTBL_Trans_use_short_tags); /* Change the access methods to the performant access methods */ - h->access = SA5_performant_access; + h->access = &SA5_performant_access; h->transMethod = CFGTBL_Trans_Performant; return; @@ -4327,7 +4327,7 @@ static int cciss_pci_init(ctlr_info_t *h) if (prod_index < 0) return -ENODEV; h->product_name = products[prod_index].product_name; - h->access = *(products[prod_index].access); + h->access = products[prod_index].access; if (cciss_board_disabled(h)) { dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); @@ -5058,7 +5058,7 @@ static int cciss_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } /* make sure the board interrupts are off */ - h->access.set_intr_mask(h, CCISS_INTR_OFF); + h->access->set_intr_mask(h, CCISS_INTR_OFF); rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx); if (rc) goto clean2; @@ -5108,7 +5108,7 @@ static int cciss_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) * fake ones to scoop up any residual completions. */ spin_lock_irqsave(&h->lock, flags); - h->access.set_intr_mask(h, CCISS_INTR_OFF); + h->access->set_intr_mask(h, CCISS_INTR_OFF); spin_unlock_irqrestore(&h->lock, flags); free_irq(h->intr[h->intr_mode], h); rc = cciss_request_irq(h, cciss_msix_discard_completions, @@ -5128,9 +5128,9 @@ static int cciss_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev_info(&h->pdev->dev, "Board READY.\n"); dev_info(&h->pdev->dev, "Waiting for stale completions to drain.\n"); - h->access.set_intr_mask(h, CCISS_INTR_ON); + h->access->set_intr_mask(h, CCISS_INTR_ON); msleep(10000); - h->access.set_intr_mask(h, CCISS_INTR_OFF); + h->access->set_intr_mask(h, CCISS_INTR_OFF); rc = controller_reset_failed(h->cfgtable); if (rc) @@ -5153,7 +5153,7 @@ static int cciss_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) cciss_scsi_setup(h); /* Turn the interrupts on so we can service requests */ - h->access.set_intr_mask(h, CCISS_INTR_ON); + h->access->set_intr_mask(h, CCISS_INTR_ON); /* Get the firmware version */ inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL); @@ -5225,7 +5225,7 @@ static void cciss_shutdown(struct pci_dev *pdev) kfree(flush_buf); if (return_code != IO_OK) dev_warn(&h->pdev->dev, "Error flushing cache\n"); - h->access.set_intr_mask(h, CCISS_INTR_OFF); + h->access->set_intr_mask(h, CCISS_INTR_OFF); free_irq(h->intr[h->intr_mode], h); } diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h index 7fda30e4a..2f279464f 100644 --- a/drivers/block/cciss.h +++ b/drivers/block/cciss.h @@ -101,7 +101,7 @@ struct ctlr_info /* information about each logical volume */ drive_info_struct *drv[CISS_MAX_LUN]; - struct access_method access; + struct access_method *access; /* queue and queue Info */ struct list_head reqQ; @@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h) } static struct access_method SA5_access = { - SA5_submit_command, - SA5_intr_mask, - SA5_fifo_full, - SA5_intr_pending, - SA5_completed, + .submit_command = SA5_submit_command, + .set_intr_mask = SA5_intr_mask, + .fifo_full = SA5_fifo_full, + .intr_pending = SA5_intr_pending, + .command_completed = SA5_completed, }; static struct access_method SA5B_access = { - SA5_submit_command, - SA5B_intr_mask, - SA5_fifo_full, - SA5B_intr_pending, - SA5_completed, + .submit_command = SA5_submit_command, + .set_intr_mask = SA5B_intr_mask, + .fifo_full = SA5_fifo_full, + .intr_pending = SA5B_intr_pending, + .command_completed = SA5_completed, }; static struct access_method SA5_performant_access = { - SA5_submit_command, - SA5_performant_intr_mask, - SA5_fifo_full, - SA5_performant_intr_pending, - SA5_performant_completed, + .submit_command = SA5_submit_command, + .set_intr_mask = SA5_performant_intr_mask, + .fifo_full = SA5_fifo_full, + .intr_pending = SA5_performant_intr_pending, + .command_completed = SA5_performant_completed, }; struct board_type { diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index ab62b81c2..8f3845086 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -1034,7 +1034,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho submit_bio(bio); /* this should not count as user activity and cause the * resync to throttle -- see drbd_rs_should_slow_down(). */ - atomic_add(len >> 9, &device->rs_sect_ev); + atomic_add_unchecked(len >> 9, &device->rs_sect_ev); } } diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 4cb8f21ff..d056229d7 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -383,7 +383,7 @@ struct drbd_epoch { struct drbd_connection *connection; struct list_head list; unsigned int barrier_nr; - atomic_t epoch_size; /* increased on every request added. */ + atomic_unchecked_t epoch_size; /* increased on every request added. */ atomic_t active; /* increased on every req. added, and dec on every finished. */ unsigned long flags; }; @@ -595,8 +595,8 @@ struct drbd_md { u32 flags; u32 md_size_sect; - s32 al_offset; /* signed relative sector offset to activity log */ - s32 bm_offset; /* signed relative sector offset to bitmap */ + s32 al_offset __intentional_overflow(0); /* signed relative sector offset to activity log */ + s32 bm_offset __intentional_overflow(0); /* signed relative sector offset to bitmap */ /* cached value of bdev->disk_conf->meta_dev_idx (see below) */ s32 meta_dev_idx; @@ -960,7 +960,7 @@ struct drbd_device { unsigned int al_tr_number; int al_tr_cycle; wait_queue_head_t seq_wait; - atomic_t packet_seq; + atomic_unchecked_t packet_seq; unsigned int peer_seq; spinlock_t peer_seq_lock; unsigned long comm_bm_set; /* communicated number of set bits. */ @@ -969,8 +969,8 @@ struct drbd_device { struct mutex own_state_mutex; struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */ char congestion_reason; /* Why we where congested... */ - atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */ - atomic_t rs_sect_ev; /* for submitted resync data rate, both */ + atomic_unchecked_t rs_sect_in; /* for incoming resync data rate, SyncTarget */ + atomic_unchecked_t rs_sect_ev; /* for submitted resync data rate, both */ int rs_last_sect_ev; /* counter to compare with */ int rs_last_events; /* counter of read or write "events" (unit sectors) * on the lower level device when we last looked. */ @@ -1129,7 +1129,7 @@ extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector, enum drbd_packet cmd); extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int size); -extern int drbd_send_bitmap(struct drbd_device *device); +extern int drbd_send_bitmap(struct drbd_device *device) __intentional_overflow(-1); extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode); extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode); extern int drbd_send_rs_deallocated(struct drbd_peer_device *, struct drbd_peer_request *); diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 83482721b..f2ddf22ff 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1363,7 +1363,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet p->sector = sector; p->block_id = block_id; p->blksize = blksize; - p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq)); + p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&peer_device->device->packet_seq)); return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0); } @@ -1695,7 +1695,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request * return -EIO; p->sector = cpu_to_be64(req->i.sector); p->block_id = (unsigned long)req; - p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq)); + p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&device->packet_seq)); dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio); if (device->state.conn >= C_SYNC_SOURCE && device->state.conn <= C_PAUSED_SYNC_T) @@ -1984,8 +1984,8 @@ void drbd_init_set_defaults(struct drbd_device *device) atomic_set(&device->unacked_cnt, 0); atomic_set(&device->local_cnt, 0); atomic_set(&device->pp_in_use_by_net, 0); - atomic_set(&device->rs_sect_in, 0); - atomic_set(&device->rs_sect_ev, 0); + atomic_set_unchecked(&device->rs_sect_in, 0); + atomic_set_unchecked(&device->rs_sect_ev, 0); atomic_set(&device->ap_in_flight, 0); atomic_set(&device->md_io.in_use, 0); @@ -2752,8 +2752,8 @@ void drbd_destroy_connection(struct kref *kref) struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref); struct drbd_resource *resource = connection->resource; - if (atomic_read(&connection->current_epoch->epoch_size) != 0) - drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size)); + if (atomic_read_unchecked(&connection->current_epoch->epoch_size) != 0) + drbd_err(connection, "epoch_size:%d\n", atomic_read_unchecked(&connection->current_epoch->epoch_size)); kfree(connection->current_epoch); idr_destroy(&connection->peer_devices); diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index f35db29ca..ac6c47248 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -89,8 +89,8 @@ int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb) #include "drbd_nla.h" #include -static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */ -static atomic_t notify_genl_seq = ATOMIC_INIT(2); /* two. */ +static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */ +static atomic_unchecked_t notify_genl_seq = ATOMIC_INIT(2); /* two. */ DEFINE_MUTEX(notification_mutex); @@ -4549,7 +4549,7 @@ void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib) unsigned seq; int err = -ENOMEM; - seq = atomic_inc_return(&drbd_genl_seq); + seq = atomic_inc_return_unchecked(&drbd_genl_seq); msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO); if (!msg) goto failed; @@ -4601,7 +4601,7 @@ void notify_resource_state(struct sk_buff *skb, int err; if (!skb) { - seq = atomic_inc_return(¬ify_genl_seq); + seq = atomic_inc_return_unchecked(¬ify_genl_seq); skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO); err = -ENOMEM; if (!skb) @@ -4652,7 +4652,7 @@ void notify_device_state(struct sk_buff *skb, int err; if (!skb) { - seq = atomic_inc_return(¬ify_genl_seq); + seq = atomic_inc_return_unchecked(¬ify_genl_seq); skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO); err = -ENOMEM; if (!skb) @@ -4701,7 +4701,7 @@ void notify_connection_state(struct sk_buff *skb, int err; if (!skb) { - seq = atomic_inc_return(¬ify_genl_seq); + seq = atomic_inc_return_unchecked(¬ify_genl_seq); skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO); err = -ENOMEM; if (!skb) @@ -4751,7 +4751,7 @@ void notify_peer_device_state(struct sk_buff *skb, int err; if (!skb) { - seq = atomic_inc_return(¬ify_genl_seq); + seq = atomic_inc_return_unchecked(¬ify_genl_seq); skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO); err = -ENOMEM; if (!skb) @@ -4794,7 +4794,7 @@ void notify_helper(enum drbd_notification_type type, { struct drbd_resource *resource = device ? device->resource : connection->resource; struct drbd_helper_info helper_info; - unsigned int seq = atomic_inc_return(¬ify_genl_seq); + unsigned int seq = atomic_inc_return_unchecked(¬ify_genl_seq); struct sk_buff *skb = NULL; struct drbd_genlmsghdr *dh; int err; diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 942384f34..2a20af47e 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -898,7 +898,7 @@ int drbd_connected(struct drbd_peer_device *peer_device) struct drbd_device *device = peer_device->device; int err; - atomic_set(&device->packet_seq, 0); + atomic_set_unchecked(&device->packet_seq, 0); device->peer_seq = 0; device->state_mutex = peer_device->connection->agreed_pro_version < 100 ? @@ -1333,7 +1333,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio do { next_epoch = NULL; - epoch_size = atomic_read(&epoch->epoch_size); + epoch_size = atomic_read_unchecked(&epoch->epoch_size); switch (ev & ~EV_CLEANUP) { case EV_PUT: @@ -1373,7 +1373,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio rv = FE_DESTROYED; } else { epoch->flags = 0; - atomic_set(&epoch->epoch_size, 0); + atomic_set_unchecked(&epoch->epoch_size, 0); /* atomic_set(&epoch->active, 0); is already zero */ if (rv == FE_STILL_LIVE) rv = FE_RECYCLED; @@ -1759,7 +1759,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf conn_wait_active_ee_empty(connection); drbd_flush(connection); - if (atomic_read(&connection->current_epoch->epoch_size)) { + if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) { epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); if (epoch) break; @@ -1773,11 +1773,11 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf } epoch->flags = 0; - atomic_set(&epoch->epoch_size, 0); + atomic_set_unchecked(&epoch->epoch_size, 0); atomic_set(&epoch->active, 0); spin_lock(&connection->epoch_lock); - if (atomic_read(&connection->current_epoch->epoch_size)) { + if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) { list_add(&epoch->list, &connection->current_epoch->list); connection->current_epoch = epoch; connection->epochs++; @@ -2030,7 +2030,9 @@ static int e_end_resync_block(struct drbd_work *w, int unused) } static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t sector, - struct packet_info *pi) __releases(local) + struct packet_info *pi) __releases(local); +static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t sector, + struct packet_info *pi) { struct drbd_device *device = peer_device->device; struct drbd_peer_request *peer_req; @@ -2052,7 +2054,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto list_add_tail(&peer_req->w.list, &device->sync_ee); spin_unlock_irq(&device->resource->req_lock); - atomic_add(pi->size >> 9, &device->rs_sect_ev); + atomic_add_unchecked(pi->size >> 9, &device->rs_sect_ev); if (drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE, 0, DRBD_FAULT_RS_WR) == 0) return 0; @@ -2151,7 +2153,7 @@ static int receive_RSDataReply(struct drbd_connection *connection, struct packet drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size); } - atomic_add(pi->size >> 9, &device->rs_sect_in); + atomic_add_unchecked(pi->size >> 9, &device->rs_sect_in); return err; } @@ -2548,7 +2550,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info * err = wait_for_and_update_peer_seq(peer_device, peer_seq); drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size); - atomic_inc(&connection->current_epoch->epoch_size); + atomic_inc_unchecked(&connection->current_epoch->epoch_size); err2 = drbd_drain_block(peer_device, pi->size); if (!err) err = err2; @@ -2589,7 +2591,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info * spin_lock(&connection->epoch_lock); peer_req->epoch = connection->current_epoch; - atomic_inc(&peer_req->epoch->epoch_size); + atomic_inc_unchecked(&peer_req->epoch->epoch_size); atomic_inc(&peer_req->epoch->active); spin_unlock(&connection->epoch_lock); @@ -2735,7 +2737,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device) curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + (int)part_stat_read(&disk->part0, sectors[1]) - - atomic_read(&device->rs_sect_ev); + atomic_read_unchecked(&device->rs_sect_ev); if (atomic_read(&device->ap_actlog_cnt) || curr_events - device->rs_last_events > 64) { @@ -2881,7 +2883,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet device->use_csums = true; } else if (pi->cmd == P_OV_REPLY) { /* track progress, we may need to throttle */ - atomic_add(size >> 9, &device->rs_sect_in); + atomic_add_unchecked(size >> 9, &device->rs_sect_in); peer_req->w.cb = w_e_end_ov_reply; dec_rs_pending(device); /* drbd_rs_begin_io done when we sent this request, @@ -2954,7 +2956,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet goto out_free_e; submit_for_resync: - atomic_add(size >> 9, &device->rs_sect_ev); + atomic_add_unchecked(size >> 9, &device->rs_sect_ev); submit: update_receiver_timing_details(connection, drbd_submit_peer_request); @@ -4907,7 +4909,7 @@ static int receive_rs_deallocated(struct drbd_connection *connection, struct pac list_add_tail(&peer_req->w.list, &device->sync_ee); spin_unlock_irq(&device->resource->req_lock); - atomic_add(pi->size >> 9, &device->rs_sect_ev); + atomic_add_unchecked(pi->size >> 9, &device->rs_sect_ev); err = drbd_submit_peer_request(device, peer_req, op, 0, DRBD_FAULT_RS_WR); if (err) { @@ -4931,7 +4933,7 @@ static int receive_rs_deallocated(struct drbd_connection *connection, struct pac drbd_send_ack_ex(peer_device, P_NEG_ACK, sector, size, ID_SYNCER); } - atomic_add(size >> 9, &device->rs_sect_in); + atomic_add_unchecked(size >> 9, &device->rs_sect_in); return err; } @@ -4940,7 +4942,7 @@ struct data_cmd { int expect_payload; unsigned int pkt_size; int (*fn)(struct drbd_connection *, struct packet_info *); -}; +} __do_const; static struct data_cmd drbd_cmd_handler[] = { [P_DATA] = { 1, sizeof(struct p_data), receive_Data }, @@ -5068,7 +5070,7 @@ static void conn_disconnect(struct drbd_connection *connection) if (!list_empty(&connection->current_epoch->list)) drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n"); /* ok, no more ee's on the fly, it is safe to reset the epoch_size */ - atomic_set(&connection->current_epoch->epoch_size, 0); + atomic_set_unchecked(&connection->current_epoch->epoch_size, 0); connection->send.seen_any_write_yet = false; drbd_info(connection, "Connection closed\n"); @@ -5574,7 +5576,7 @@ static int got_IsInSync(struct drbd_connection *connection, struct packet_info * put_ldev(device); } dec_rs_pending(device); - atomic_add(blksize >> 9, &device->rs_sect_in); + atomic_add_unchecked(blksize >> 9, &device->rs_sect_in); return 0; } @@ -5825,7 +5827,7 @@ static int got_skip(struct drbd_connection *connection, struct packet_info *pi) struct meta_sock_cmd { size_t pkt_size; int (*fn)(struct drbd_connection *connection, struct packet_info *); -}; +} __do_const; static void set_rcvtimeo(struct drbd_connection *connection, bool ping_timeout) { diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c index eea0c4aec..4eba9a882 100644 --- a/drivers/block/drbd/drbd_state.c +++ b/drivers/block/drbd/drbd_state.c @@ -1507,9 +1507,10 @@ int drbd_bitmap_io_from_worker(struct drbd_device *device, void notify_resource_state_change(struct sk_buff *skb, unsigned int seq, - struct drbd_resource_state_change *resource_state_change, + void *_resource_state_change, enum drbd_notification_type type) { + struct drbd_resource_state_change *resource_state_change = _resource_state_change; struct drbd_resource *resource = resource_state_change->resource; struct resource_info resource_info = { .res_role = resource_state_change->role[NEW], @@ -1523,9 +1524,10 @@ void notify_resource_state_change(struct sk_buff *skb, void notify_connection_state_change(struct sk_buff *skb, unsigned int seq, - struct drbd_connection_state_change *connection_state_change, + void *_connection_state_change, enum drbd_notification_type type) { + struct drbd_connection_state_change *connection_state_change = _connection_state_change; struct drbd_connection *connection = connection_state_change->connection; struct connection_info connection_info = { .conn_connection_state = connection_state_change->cstate[NEW], @@ -1537,9 +1539,10 @@ void notify_connection_state_change(struct sk_buff *skb, void notify_device_state_change(struct sk_buff *skb, unsigned int seq, - struct drbd_device_state_change *device_state_change, + void *_device_state_change, enum drbd_notification_type type) { + struct drbd_device_state_change *device_state_change = _device_state_change; struct drbd_device *device = device_state_change->device; struct device_info device_info = { .dev_disk_state = device_state_change->disk_state[NEW], @@ -1550,9 +1553,10 @@ void notify_device_state_change(struct sk_buff *skb, void notify_peer_device_state_change(struct sk_buff *skb, unsigned int seq, - struct drbd_peer_device_state_change *p, + void *_p, enum drbd_notification_type type) { + struct drbd_peer_device_state_change *p = _p; struct drbd_peer_device *peer_device = p->peer_device; struct peer_device_info peer_device_info = { .peer_repl_state = p->repl_state[NEW], diff --git a/drivers/block/drbd/drbd_state.h b/drivers/block/drbd/drbd_state.h index 6c9d5d4a8..110f64d9e 100644 --- a/drivers/block/drbd/drbd_state.h +++ b/drivers/block/drbd/drbd_state.h @@ -126,7 +126,7 @@ extern enum drbd_state_rv _drbd_set_state(struct drbd_device *, union drbd_state enum chg_state_flags, struct completion *done); extern void print_st_err(struct drbd_device *, union drbd_state, - union drbd_state, int); + union drbd_state, enum drbd_state_rv); enum drbd_state_rv _conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val, diff --git a/drivers/block/drbd/drbd_state_change.h b/drivers/block/drbd/drbd_state_change.h index 9e503a1a0..ac6026230 100644 --- a/drivers/block/drbd/drbd_state_change.h +++ b/drivers/block/drbd/drbd_state_change.h @@ -45,19 +45,19 @@ extern void forget_state_change(struct drbd_state_change *); extern void notify_resource_state_change(struct sk_buff *, unsigned int, - struct drbd_resource_state_change *, + void *, enum drbd_notification_type type); extern void notify_connection_state_change(struct sk_buff *, unsigned int, - struct drbd_connection_state_change *, + void *, enum drbd_notification_type type); extern void notify_device_state_change(struct sk_buff *, unsigned int, - struct drbd_device_state_change *, + void *, enum drbd_notification_type type); extern void notify_peer_device_state_change(struct sk_buff *, unsigned int, - struct drbd_peer_device_state_change *, + void *, enum drbd_notification_type type); #endif /* DRBD_STATE_CHANGE_H */ diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index c6755c9a0..258629304 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -87,7 +87,8 @@ void drbd_md_endio(struct bio *bio) /* reads on behalf of the partner, * "submitted" by the receiver */ -static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local) +static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local); +static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) { unsigned long flags = 0; struct drbd_peer_device *peer_device = peer_req->peer_device; @@ -108,7 +109,8 @@ static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __rele /* writes on behalf of the partner, or resync writes, * "submitted" by the receiver, final stage. */ -void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local) +void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local); +void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) { unsigned long flags = 0; struct drbd_peer_device *peer_device = peer_req->peer_device; @@ -408,7 +410,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector, list_add_tail(&peer_req->w.list, &device->read_ee); spin_unlock_irq(&device->resource->req_lock); - atomic_add(size >> 9, &device->rs_sect_ev); + atomic_add_unchecked(size >> 9, &device->rs_sect_ev); if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0, DRBD_FAULT_RS_RD) == 0) return 0; @@ -554,7 +556,7 @@ static int drbd_rs_number_requests(struct drbd_device *device) unsigned int sect_in; /* Number of sectors that came in since the last turn */ int number, mxb; - sect_in = atomic_xchg(&device->rs_sect_in, 0); + sect_in = atomic_xchg_unchecked(&device->rs_sect_in, 0); device->rs_in_flight -= sect_in; rcu_read_lock(); @@ -1662,8 +1664,8 @@ void drbd_rs_controller_reset(struct drbd_device *device) struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk; struct fifo_buffer *plan; - atomic_set(&device->rs_sect_in, 0); - atomic_set(&device->rs_sect_ev, 0); + atomic_set_unchecked(&device->rs_sect_in, 0); + atomic_set_unchecked(&device->rs_sect_ev, 0); device->rs_in_flight = 0; device->rs_last_events = (int)part_stat_read(&disk->part0, sectors[0]) + diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index e3d8e4ced..4198ed887 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -961,6 +961,10 @@ static void empty(void) { } +static void empty2(int i) +{ +} + static void (*floppy_work_fn)(void); static void floppy_work_workfn(struct work_struct *work) @@ -1953,14 +1957,14 @@ static const struct cont_t wakeup_cont = { .interrupt = empty, .redo = do_wakeup, .error = empty, - .done = (done_f)empty + .done = empty2 }; static const struct cont_t intr_cont = { .interrupt = empty, .redo = process_fd_request, .error = empty, - .done = (done_f)empty + .done = empty2 }; static int wait_til_done(void (*handler)(void), bool interruptible) diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 90fa4ac14..8328db67e 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -109,7 +109,7 @@ static int pkt_seq_show(struct seq_file *m, void *p); static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd) { - return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1); + return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL); } /* @@ -1890,7 +1890,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) return -EROFS; } pd->settings.fp = ti.fp; - pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1); + pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL); if (ti.nwa_v) { pd->nwa = be32_to_cpu(ti.next_writable); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 7b274ff46..b7e6d2408 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -65,7 +65,7 @@ * If the counter is already at its maximum value returns * -EINVAL without updating it. */ -static int atomic_inc_return_safe(atomic_t *v) +static int __intentional_overflow(-1) atomic_inc_return_safe(atomic_t *v) { unsigned int counter; diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h index e5565fbae..71be10b44 100644 --- a/drivers/block/smart1,2.h +++ b/drivers/block/smart1,2.h @@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h) } static struct access_method smart4_access = { - smart4_submit_command, - smart4_intr_mask, - smart4_fifo_full, - smart4_intr_pending, - smart4_completed, + .submit_command = smart4_submit_command, + .set_intr_mask = smart4_intr_mask, + .fifo_full = smart4_fifo_full, + .intr_pending = smart4_intr_pending, + .command_completed = smart4_completed, }; /* @@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h) } static struct access_method smart2_access = { - smart2_submit_command, - smart2_intr_mask, - smart2_fifo_full, - smart2_intr_pending, - smart2_completed, + .submit_command = smart2_submit_command, + .set_intr_mask = smart2_intr_mask, + .fifo_full = smart2_fifo_full, + .intr_pending = smart2_intr_pending, + .command_completed = smart2_completed, }; /* @@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h) } static struct access_method smart2e_access = { - smart2e_submit_command, - smart2e_intr_mask, - smart2e_fifo_full, - smart2e_intr_pending, - smart2e_completed, + .submit_command = smart2e_submit_command, + .set_intr_mask = smart2e_intr_mask, + .fifo_full = smart2e_fifo_full, + .intr_pending = smart2e_intr_pending, + .command_completed = smart2e_completed, }; /* @@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h) } static struct access_method smart1_access = { - smart1_submit_command, - smart1_intr_mask, - smart1_fifo_full, - smart1_intr_pending, - smart1_completed, + .submit_command = smart1_submit_command, + .set_intr_mask = smart1_intr_mask, + .fifo_full = smart1_fifo_full, + .intr_pending = smart1_intr_pending, + .command_completed = smart1_completed, }; diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c index b6bb58c41..7d471514e 100644 --- a/drivers/bluetooth/btwilink.c +++ b/drivers/bluetooth/btwilink.c @@ -277,7 +277,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb) static int bt_ti_probe(struct platform_device *pdev) { - static struct ti_st *hst; + struct ti_st *hst; struct hci_dev *hdev; int err; diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c index 890082315..f694867e1 100644 --- a/drivers/bus/arm-cci.c +++ b/drivers/bus/arm-cci.c @@ -1472,8 +1472,10 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) char *name = model->name; u32 num_cntrs; - pmu_event_attr_group.attrs = model->event_attrs; - pmu_format_attr_group.attrs = model->format_attrs; + pax_open_kernel(); + const_cast(pmu_event_attr_group.attrs) = model->event_attrs; + const_cast(pmu_format_attr_group.attrs) = model->format_attrs; + pax_close_kernel(); cci_pmu->pmu = (struct pmu) { .name = cci_pmu->model->name, diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 5d475b3a0..e9076c029 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -610,7 +610,6 @@ int register_cdrom(struct cdrom_device_info *cdi) ENSURE(reset, CDC_RESET); ENSURE(generic_packet, CDC_GENERIC_PACKET); cdi->mc_flags = 0; - cdo->n_minors = 0; cdi->options = CDO_USE_FFLAGS; if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY)) @@ -630,8 +629,11 @@ int register_cdrom(struct cdrom_device_info *cdi) else cdi->cdda_method = CDDA_OLD; - if (!cdo->generic_packet) - cdo->generic_packet = cdrom_dummy_generic_packet; + if (!cdo->generic_packet) { + pax_open_kernel(); + const_cast(cdo->generic_packet) = cdrom_dummy_generic_packet; + pax_close_kernel(); + } cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name); mutex_lock(&cdrom_mutex); @@ -652,7 +654,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi) if (cdi->exit) cdi->exit(cdi); - cdi->ops->n_minors--; cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name); } @@ -2137,7 +2138,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf, */ nr = nframes; do { - cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL); + cgc.buffer = kcalloc(nr, CD_FRAMESIZE_RAW, GFP_KERNEL); if (cgc.buffer) break; @@ -3441,7 +3442,7 @@ static int cdrom_print_info(const char *header, int val, char *info, struct cdrom_device_info *cdi; int ret; - ret = scnprintf(info + *pos, max_size - *pos, header); + ret = scnprintf(info + *pos, max_size - *pos, "%s", header); if (!ret) return 1; diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index 584bc3126..e64a12cb4 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = { .audio_ioctl = gdrom_audio_ioctl, .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED | CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R, - .n_minors = 1, }; static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode) diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index dcc09739a..8d34c8839 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -17,7 +17,8 @@ config DEVMEM config DEVKMEM bool "/dev/kmem virtual device support" - default y + default n + depends on !GRKERNSEC_KMEM help Say Y here if you want to support the /dev/kmem device. The /dev/kmem device is rarely used, but can be used for certain @@ -573,6 +574,7 @@ config TELCLOCK config DEVPORT bool depends on ISA || PCI + depends on !GRKERNSEC_KMEM default y source "drivers/s390/char/Kconfig" diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c index a48e05b31..6bac83173 100644 --- a/drivers/char/agp/compat_ioctl.c +++ b/drivers/char/agp/compat_ioctl.c @@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user return -ENOMEM; } - if (copy_from_user(usegment, (void __user *) ureserve.seg_list, + if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list, sizeof(*usegment) * ureserve.seg_count)) { kfree(usegment); kfree(ksegment); diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c index 0f64d149c..4cf4d6b94 100644 --- a/drivers/char/agp/frontend.c +++ b/drivers/char/agp/frontend.c @@ -806,7 +806,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg) if (copy_from_user(&reserve, arg, sizeof(struct agp_region))) return -EFAULT; - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment)) + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv)) return -EFAULT; client = agp_find_client_by_pid(reserve.pid); @@ -836,7 +836,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg) if (segment == NULL) return -ENOMEM; - if (copy_from_user(segment, (void __user *) reserve.seg_list, + if (copy_from_user(segment, (void __force_user *) reserve.seg_list, sizeof(struct agp_segment) * reserve.seg_count)) { kfree(segment); return -EFAULT; diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 0f7d28a98..d8576c688 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -1420,8 +1420,8 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, } EXPORT_SYMBOL(intel_gmch_probe); -void intel_gtt_get(u64 *gtt_total, size_t *stolen_size, - phys_addr_t *mappable_base, u64 *mappable_end) +void intel_gtt_get(u64 *gtt_total, u64 *stolen_size, + u64 *mappable_base, u64 *mappable_end) { *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT; *stolen_size = intel_private.stolen_size; diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index be54e5331..50272fe81 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -574,7 +574,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets, } static int -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, struct hpet_info *info) { struct hpet_timer __iomem *timer; diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index fcdd88681..c7f0762e0 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -436,7 +436,7 @@ struct ipmi_smi { struct proc_dir_entry *proc_dir; char proc_dir_name[10]; - atomic_t stats[IPMI_NUM_STATS]; + atomic_unchecked_t stats[IPMI_NUM_STATS]; /* * run_to_completion duplicate of smb_info, smi_info @@ -468,9 +468,9 @@ static LIST_HEAD(smi_watchers); static DEFINE_MUTEX(smi_watchers_mutex); #define ipmi_inc_stat(intf, stat) \ - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]) #define ipmi_get_stat(intf, stat) \ - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])) static const char * const addr_src_to_str[] = { "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI", @@ -2835,7 +2835,7 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, INIT_LIST_HEAD(&intf->cmd_rcvrs); init_waitqueue_head(&intf->waitq); for (i = 0; i < IPMI_NUM_STATS; i++) - atomic_set(&intf->stats[i], 0); + atomic_set_unchecked(&intf->stats[i], 0); intf->proc_dir = NULL; diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c index 9f2e3be2c..676c910e9 100644 --- a/drivers/char/ipmi/ipmi_poweroff.c +++ b/drivers/char/ipmi/ipmi_poweroff.c @@ -66,7 +66,7 @@ static void (*specific_poweroff_func)(ipmi_user_t user); /* Holds the old poweroff function so we can restore it on removal. */ static void (*old_poweroff_func)(void); -static int set_param_ifnum(const char *val, struct kernel_param *kp) +static int set_param_ifnum(const char *val, const struct kernel_param *kp) { int rv = param_set_int(val, kp); if (rv) diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index a112c0146..5bd9d252e 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -302,7 +302,7 @@ struct smi_info { unsigned char slave_addr; /* Counters and things for the proc filesystem. */ - atomic_t stats[SI_NUM_STATS]; + atomic_unchecked_t stats[SI_NUM_STATS]; struct task_struct *thread; @@ -311,9 +311,9 @@ struct smi_info { }; #define smi_inc_stat(smi, stat) \ - atomic_inc(&(smi)->stats[SI_STAT_ ## stat]) + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat]) #define smi_get_stat(smi, stat) \ - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat])) + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat])) #define SI_MAX_PARMS 4 @@ -1344,7 +1344,7 @@ static unsigned int num_slave_addrs; #define IPMI_MEM_ADDR_SPACE 1 static const char * const addr_space_to_str[] = { "i/o", "mem" }; -static int hotmod_handler(const char *val, struct kernel_param *kp); +static int hotmod_handler(const char *val, const struct kernel_param *kp); module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200); MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See" @@ -1814,7 +1814,7 @@ static struct smi_info *smi_info_alloc(void) return info; } -static int hotmod_handler(const char *val, struct kernel_param *kp) +static int hotmod_handler(const char *val, const struct kernel_param *kp) { char *str = kstrdup(val, GFP_KERNEL); int rv; @@ -3578,7 +3578,7 @@ static int try_smi_init(struct smi_info *new_smi) atomic_set(&new_smi->req_events, 0); new_smi->run_to_completion = false; for (i = 0; i < SI_NUM_STATS; i++) - atomic_set(&new_smi->stats[i], 0); + atomic_set_unchecked(&new_smi->stats[i], 0); new_smi->interrupt_disabled = true; atomic_set(&new_smi->need_watch, 0); diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 5673ffff0..3ab29080b 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -284,17 +284,17 @@ struct ssif_info { unsigned int multi_len; unsigned int multi_pos; - atomic_t stats[SSIF_NUM_STATS]; + atomic_unchecked_t stats[SSIF_NUM_STATS]; }; #define ssif_inc_stat(ssif, stat) \ - atomic_inc(&(ssif)->stats[SSIF_STAT_ ## stat]) + atomic_inc_unchecked(&(ssif)->stats[SSIF_STAT_ ## stat]) #define ssif_get_stat(ssif, stat) \ - ((unsigned int) atomic_read(&(ssif)->stats[SSIF_STAT_ ## stat])) + ((unsigned int) atomic_read_unchecked(&(ssif)->stats[SSIF_STAT_ ## stat])) static bool initialized; -static atomic_t next_intf = ATOMIC_INIT(0); +static atomic_unchecked_t next_intf = ATOMIC_INIT(0); static void return_hosed_msg(struct ssif_info *ssif_info, struct ipmi_smi_msg *msg); @@ -1608,7 +1608,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) } found: - ssif_info->intf_num = atomic_inc_return(&next_intf); + ssif_info->intf_num = atomic_inc_return_unchecked(&next_intf); if (ssif_dbg_probe) { pr_info("ssif_probe: i2c_probe found device at i2c address %x\n", @@ -1622,7 +1622,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) ssif_info->retry_timer.function = retry_timeout; for (i = 0; i < SSIF_NUM_STATS; i++) - atomic_set(&ssif_info->stats[i], 0); + atomic_set_unchecked(&ssif_info->stats[i], 0); if (ssif_info->supports_pec) ssif_info->client->flags |= I2C_CLIENT_PEC; diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 6d9cc2d39..8bf97d666 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -37,6 +38,10 @@ #define DEVPORT_MINOR 4 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC) +extern const struct file_operations grsec_fops; +#endif + static inline unsigned long size_inside_page(unsigned long start, unsigned long size) { @@ -67,13 +72,22 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) u64 cursor = from; while (cursor < to) { - if (!devmem_is_allowed(pfn)) + if (!devmem_is_allowed(pfn)) { +#ifdef CONFIG_GRKERNSEC_KMEM + gr_handle_mem_readwrite(from, to); +#endif return 0; + } cursor += PAGE_SIZE; pfn++; } return 1; } +#elif defined(CONFIG_GRKERNSEC_KMEM) +static inline int range_is_allowed(unsigned long pfn, unsigned long size) +{ + return 0; +} #else static inline int range_is_allowed(unsigned long pfn, unsigned long size) { @@ -98,6 +112,7 @@ static ssize_t read_mem(struct file *file, char __user *buf, phys_addr_t p = *ppos; ssize_t read, sz; void *ptr; + char *temp; if (p != *ppos) return 0; @@ -120,13 +135,19 @@ static ssize_t read_mem(struct file *file, char __user *buf, } #endif + temp = kmalloc(PAGE_SIZE, GFP_KERNEL|GFP_USERCOPY); + if (!temp) + return -ENOMEM; + while (count > 0) { unsigned long remaining; sz = size_inside_page(p, count); - if (!range_is_allowed(p >> PAGE_SHIFT, count)) + if (!range_is_allowed(p >> PAGE_SHIFT, count)) { + kfree(temp); return -EPERM; + } /* * On ia64 if a page has been mapped somewhere as uncached, then @@ -134,13 +155,17 @@ static ssize_t read_mem(struct file *file, char __user *buf, * corruption may occur. */ ptr = xlate_dev_mem_ptr(p); - if (!ptr) + if (!ptr || probe_kernel_read(temp, ptr, sz)) { + kfree(temp); return -EFAULT; + } - remaining = copy_to_user(buf, ptr, sz); + remaining = copy_to_user(buf, temp, sz); unxlate_dev_mem_ptr(p, ptr); - if (remaining) + if (remaining) { + kfree(temp); return -EFAULT; + } buf += sz; p += sz; @@ -148,6 +173,8 @@ static ssize_t read_mem(struct file *file, char __user *buf, read += sz; } + kfree(temp); + *ppos += read; return read; } @@ -383,6 +410,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf, read = 0; if (p < (unsigned long) high_memory) { + char *temp; + low_count = count; if (count > (unsigned long)high_memory - p) low_count = (unsigned long)high_memory - p; @@ -400,6 +429,11 @@ static ssize_t read_kmem(struct file *file, char __user *buf, count -= sz; } #endif + + temp = kmalloc(PAGE_SIZE, GFP_KERNEL|GFP_USERCOPY); + if (!temp) + return -ENOMEM; + while (low_count > 0) { sz = size_inside_page(p, low_count); @@ -412,14 +446,18 @@ static ssize_t read_kmem(struct file *file, char __user *buf, if (!virt_addr_valid(kbuf)) return -ENXIO; - if (copy_to_user(buf, kbuf, sz)) + if (probe_kernel_read(temp, kbuf, sz) || copy_to_user(buf, temp, sz)) { + kfree(temp); return -EFAULT; + } buf += sz; p += sz; read += sz; low_count -= sz; count -= sz; } + + kfree(temp); } if (count > 0) { @@ -826,6 +864,9 @@ static const struct memdev { #ifdef CONFIG_PRINTK [11] = { "kmsg", 0644, &kmsg_fops, 0 }, #endif +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC) + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, 0 }, +#endif }; static int memory_open(struct inode *inode, struct file *filp) @@ -887,7 +928,7 @@ static int __init chr_dev_init(void) continue; device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor), - NULL, devlist[minor].name); + NULL, "%s", devlist[minor].name); } return tty_init(); diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c index 678fa97e4..5598cef80 100644 --- a/drivers/char/nvram.c +++ b/drivers/char/nvram.c @@ -235,7 +235,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf, spin_unlock_irq(&rtc_lock); - if (copy_to_user(buf, contents, tmp - contents)) + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents)) return -EFAULT; *ppos = i; diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c index d28922df0..3c343d6ec 100644 --- a/drivers/char/pcmcia/synclink_cs.c +++ b/drivers/char/pcmcia/synclink_cs.c @@ -2333,7 +2333,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp) if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_close(%s) entry, count=%d\n", - __FILE__, __LINE__, info->device_name, port->count); + __FILE__, __LINE__, info->device_name, atomic_read(&port->count)); if (tty_port_close_start(port, tty, filp) == 0) goto cleanup; @@ -2351,7 +2351,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp) cleanup: if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__, - tty->driver->name, port->count); + tty->driver->name, atomic_read(&port->count)); } /* Wait until the transmitter is empty. @@ -2493,7 +2493,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp) if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_open(%s), old ref count = %d\n", - __FILE__, __LINE__, tty->driver->name, port->count); + __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count)); port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0; @@ -2504,11 +2504,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp) goto cleanup; } spin_lock(&port->lock); - port->count++; + atomic_inc(&port->count); spin_unlock(&port->lock); spin_unlock_irqrestore(&info->netlock, flags); - if (port->count == 1) { + if (atomic_read(&port->count) == 1) { /* 1st open on this device, init hardware */ retval = startup(info, tty); if (retval < 0) @@ -3897,7 +3897,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, unsigned short new_crctype; /* return error if TTY interface open */ - if (info->port.count) + if (atomic_read(&info->port.count)) return -EBUSY; switch (encoding) @@ -4001,7 +4001,7 @@ static int hdlcdev_open(struct net_device *dev) /* arbitrate between network and tty opens */ spin_lock_irqsave(&info->netlock, flags); - if (info->port.count != 0 || info->netcount != 0) { + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) { printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name); spin_unlock_irqrestore(&info->netlock, flags); return -EBUSY; @@ -4091,7 +4091,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name); /* return error if TTY interface open */ - if (info->port.count) + if (atomic_read(&info->port.count)) return -EBUSY; if (cmd != SIOCWANDEV) diff --git a/drivers/char/random.c b/drivers/char/random.c index d6876d506..a326099d1 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -292,9 +292,6 @@ /* * To allow fractional bits to be tracked, the entropy_count field is * denominated in units of 1/8th bits. - * - * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in - * credit_entropy_bits() needs to be 64 bits wide. */ #define ENTROPY_SHIFT 3 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT) @@ -680,7 +677,7 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits) /* The +2 corresponds to the /4 in the denominator */ do { - unsigned int anfrac = min(pnfrac, pool_size/2); + u64 anfrac = min(pnfrac, pool_size/2); unsigned int add = ((pool_size - entropy_count)*anfrac*3) >> s; @@ -1476,7 +1473,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf, extract_buf(r, tmp); i = min_t(int, nbytes, EXTRACT_SIZE); - if (copy_to_user(buf, tmp, i)) { + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) { ret = -EFAULT; break; } @@ -1926,7 +1923,7 @@ static char sysctl_bootid[16]; static int proc_do_uuid(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - struct ctl_table fake_table; + ctl_table_no_const fake_table; unsigned char buf[64], tmp_uuid[16], *uuid; uuid = table->data; @@ -1956,7 +1953,7 @@ static int proc_do_uuid(struct ctl_table *table, int write, static int proc_do_entropy(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - struct ctl_table fake_table; + ctl_table_no_const fake_table; int entropy_count; entropy_count = *(int *)table->data >> ENTROPY_SHIFT; diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c index 719c5b4ee..06ef8523a 100644 --- a/drivers/char/sonypi.c +++ b/drivers/char/sonypi.c @@ -54,6 +54,7 @@ #include #include +#include #include @@ -490,7 +491,7 @@ static struct sonypi_device { spinlock_t fifo_lock; wait_queue_head_t fifo_proc_list; struct fasync_struct *fifo_async; - int open_count; + local_t open_count; int model; struct input_dev *input_jog_dev; struct input_dev *input_key_dev; @@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on) static int sonypi_misc_release(struct inode *inode, struct file *file) { mutex_lock(&sonypi_device.lock); - sonypi_device.open_count--; + local_dec(&sonypi_device.open_count); mutex_unlock(&sonypi_device.lock); return 0; } @@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file) { mutex_lock(&sonypi_device.lock); /* Flush input queue on first open */ - if (!sonypi_device.open_count) + if (!local_read(&sonypi_device.open_count)) kfifo_reset(&sonypi_device.fifo); - sonypi_device.open_count++; + local_inc(&sonypi_device.open_count); mutex_unlock(&sonypi_device.lock); return 0; @@ -1491,7 +1492,7 @@ static struct platform_driver sonypi_driver = { static struct platform_device *sonypi_platform_device; -static struct dmi_system_id __initdata sonypi_dmi_table[] = { +static const struct dmi_system_id __initconst sonypi_dmi_table[] = { { .ident = "Sony Vaio", .matches = { diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index e5950131b..9653af258 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -196,6 +196,11 @@ struct tpm_chip *tpm_chip_alloc(struct device *dev, } EXPORT_SYMBOL_GPL(tpm_chip_alloc); +static void tpm_put_device(void *dev) +{ + put_device(dev); +} + /** * tpmm_chip_alloc() - allocate a new struct tpm_chip instance * @pdev: parent device to which the chip is associated @@ -213,9 +218,7 @@ struct tpm_chip *tpmm_chip_alloc(struct device *pdev, if (IS_ERR(chip)) return chip; - rc = devm_add_action_or_reset(pdev, - (void (*)(void *)) put_device, - &chip->dev); + rc = devm_add_action_or_reset(pdev, tpm_put_device, &chip->dev); if (rc) return ERR_PTR(rc); diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c index 565a9478c..dcdc06e5c 100644 --- a/drivers/char/tpm/tpm_acpi.c +++ b/drivers/char/tpm/tpm_acpi.c @@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log) virt = acpi_os_map_iomem(start, len); if (!virt) { kfree(log->bios_event_log); + log->bios_event_log = NULL; printk("%s: ERROR - Unable to map memory\n", __func__); return -EIO; } - memcpy_fromio(log->bios_event_log, virt, len); + memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len); acpi_os_unmap_iomem(virt, len); return 0; diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c index e72288632..78a48b992 100644 --- a/drivers/char/tpm/tpm_eventlog.c +++ b/drivers/char/tpm/tpm_eventlog.c @@ -108,8 +108,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos) converted_event_type = do_endian_conversion(event->event_type); if (((converted_event_type == 0) && (converted_event_size == 0)) - || ((addr + sizeof(struct tcpa_event) + converted_event_size) - >= limit)) + || (converted_event_size >= limit - addr - sizeof(struct tcpa_event))) return NULL; return addr; @@ -138,7 +137,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v, converted_event_type = do_endian_conversion(event->event_type); if (((converted_event_type == 0) && (converted_event_size == 0)) || - ((v + sizeof(struct tcpa_event) + converted_event_size) >= limit)) + (converted_event_size >= limit - v - sizeof(struct tcpa_event))) return NULL; (*pos)++; diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 5649234b7..34b55b79f 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -691,11 +691,11 @@ static ssize_t fill_readbuf(struct port *port, char __user *out_buf, if (to_user) { ssize_t ret; - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count); + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count); if (ret) return -EFAULT; } else { - memcpy((__force char *)out_buf, buf->buf + buf->offset, + memcpy((__force_kernel char *)out_buf, buf->buf + buf->offset, out_count); } @@ -1170,7 +1170,7 @@ static int get_chars(u32 vtermno, char *buf, int count) /* If we don't have an input queue yet, we can't get input. */ BUG_ON(!port->in_vq); - return fill_readbuf(port, (__force char __user *)buf, count, false); + return fill_readbuf(port, (char __force_user *)buf, count, false); } static void resize_console(struct port *port) diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c index 3bbd2a58d..69b87bbb9 100644 --- a/drivers/clk/bcm/clk-bcm2835.c +++ b/drivers/clk/bcm/clk-bcm2835.c @@ -1147,8 +1147,9 @@ static const struct clk_ops bcm2835_vpu_clock_clk_ops = { }; static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman, - const struct bcm2835_pll_data *data) + const void *_data) { + const struct bcm2835_pll_data *data = _data; struct bcm2835_pll *pll; struct clk_init_data init; int ret; @@ -1178,8 +1179,9 @@ static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman, static struct clk_hw * bcm2835_register_pll_divider(struct bcm2835_cprman *cprman, - const struct bcm2835_pll_divider_data *data) + const void *_data) { + const struct bcm2835_pll_divider_data *data = _data; struct bcm2835_pll_divider *divider; struct clk_init_data init; const char *divider_name; @@ -1237,8 +1239,9 @@ bcm2835_register_pll_divider(struct bcm2835_cprman *cprman, } static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman, - const struct bcm2835_clock_data *data) + const void *_data) { + const struct bcm2835_clock_data *data = _data; struct bcm2835_clock *clock; struct clk_init_data init; const char *parents[1 << CM_SRC_BITS]; @@ -1289,13 +1292,17 @@ static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman, return &clock->hw; } -static struct clk *bcm2835_register_gate(struct bcm2835_cprman *cprman, - const struct bcm2835_gate_data *data) +static struct clk_hw *bcm2835_register_gate(struct bcm2835_cprman *cprman, + const void *_data) { - return clk_register_gate(cprman->dev, data->name, data->parent, + const struct bcm2835_gate_data *data = _data; + struct clk *clk; + + clk = clk_register_gate(cprman->dev, data->name, data->parent, CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE, cprman->regs + data->ctl_reg, CM_GATE_BIT, 0, &cprman->regs_lock); + return __clk_get_hw(clk); } typedef struct clk_hw *(*bcm2835_clk_register)(struct bcm2835_cprman *cprman, @@ -1306,8 +1313,7 @@ struct bcm2835_clk_desc { }; /* assignment helper macros for different clock types */ -#define _REGISTER(f, ...) { .clk_register = (bcm2835_clk_register)f, \ - .data = __VA_ARGS__ } +#define _REGISTER(f, ...) { .clk_register = f, .data = __VA_ARGS__ } #define REGISTER_PLL(...) _REGISTER(&bcm2835_register_pll, \ &(struct bcm2835_pll_data) \ {__VA_ARGS__}) diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c index 00269de2f..3e17e606a 100644 --- a/drivers/clk/clk-composite.c +++ b/drivers/clk/clk-composite.c @@ -221,7 +221,7 @@ struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name, struct clk_hw *hw; struct clk_init_data init; struct clk_composite *composite; - struct clk_ops *clk_composite_ops; + clk_ops_no_const *clk_composite_ops; int ret; composite = kzalloc(sizeof(*composite), GFP_KERNEL); diff --git a/drivers/clk/socfpga/clk-gate-a10.c b/drivers/clk/socfpga/clk-gate-a10.c index c2d572748..1a305dbab 100644 --- a/drivers/clk/socfpga/clk-gate-a10.c +++ b/drivers/clk/socfpga/clk-gate-a10.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "clk.h" @@ -97,7 +98,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk) return 0; } -static struct clk_ops gateclk_ops = { +static clk_ops_no_const gateclk_ops __read_only = { .prepare = socfpga_clk_prepare, .recalc_rate = socfpga_gate_clk_recalc_rate, }; @@ -128,8 +129,10 @@ static void __init __socfpga_gate_init(struct device_node *node, socfpga_clk->hw.reg = clk_mgr_a10_base_addr + clk_gate[0]; socfpga_clk->hw.bit_idx = clk_gate[1]; - gateclk_ops.enable = clk_gate_ops.enable; - gateclk_ops.disable = clk_gate_ops.disable; + pax_open_kernel(); + const_cast(gateclk_ops.enable) = clk_gate_ops.enable; + const_cast(gateclk_ops.disable) = clk_gate_ops.disable; + pax_close_kernel(); } rc = of_property_read_u32(node, "fixed-divider", &fixed_div); diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c index aa7a6e6a1..1e9b426ed 100644 --- a/drivers/clk/socfpga/clk-gate.c +++ b/drivers/clk/socfpga/clk-gate.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "clk.h" @@ -169,7 +170,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk) return 0; } -static struct clk_ops gateclk_ops = { +static clk_ops_no_const gateclk_ops __read_only = { .prepare = socfpga_clk_prepare, .recalc_rate = socfpga_clk_recalc_rate, .get_parent = socfpga_clk_get_parent, @@ -202,8 +203,10 @@ static void __init __socfpga_gate_init(struct device_node *node, socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0]; socfpga_clk->hw.bit_idx = clk_gate[1]; - gateclk_ops.enable = clk_gate_ops.enable; - gateclk_ops.disable = clk_gate_ops.disable; + pax_open_kernel(); + const_cast(gateclk_ops.enable) = clk_gate_ops.enable; + const_cast(gateclk_ops.disable) = clk_gate_ops.disable; + pax_close_kernel(); } rc = of_property_read_u32(node, "fixed-divider", &fixed_div); diff --git a/drivers/clk/socfpga/clk-pll-a10.c b/drivers/clk/socfpga/clk-pll-a10.c index 35fabe1a3..d847c5363 100644 --- a/drivers/clk/socfpga/clk-pll-a10.c +++ b/drivers/clk/socfpga/clk-pll-a10.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "clk.h" @@ -69,7 +70,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk) CLK_MGR_PLL_CLK_SRC_MASK; } -static struct clk_ops clk_pll_ops = { +static clk_ops_no_const clk_pll_ops __read_only = { .recalc_rate = clk_pll_recalc_rate, .get_parent = clk_pll_get_parent, }; @@ -112,8 +113,10 @@ static struct clk * __init __socfpga_pll_init(struct device_node *node, pll_clk->hw.hw.init = &init; pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA; - clk_pll_ops.enable = clk_gate_ops.enable; - clk_pll_ops.disable = clk_gate_ops.disable; + pax_open_kernel(); + const_cast(clk_pll_ops.enable) = clk_gate_ops.enable; + const_cast(clk_pll_ops.disable) = clk_gate_ops.disable; + pax_close_kernel(); clk = clk_register(NULL, &pll_clk->hw.hw); if (WARN_ON(IS_ERR(clk))) { diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c index c7f463172..8d1b7d0ba 100644 --- a/drivers/clk/socfpga/clk-pll.c +++ b/drivers/clk/socfpga/clk-pll.c @@ -20,6 +20,7 @@ #include #include #include +#include #include "clk.h" @@ -75,7 +76,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk) CLK_MGR_PLL_CLK_SRC_MASK; } -static struct clk_ops clk_pll_ops = { +static clk_ops_no_const clk_pll_ops __read_only = { .recalc_rate = clk_pll_recalc_rate, .get_parent = clk_pll_get_parent, }; @@ -114,8 +115,10 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node, pll_clk->hw.hw.init = &init; pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA; - clk_pll_ops.enable = clk_gate_ops.enable; - clk_pll_ops.disable = clk_gate_ops.disable; + pax_open_kernel(); + const_cast(clk_pll_ops.enable) = clk_gate_ops.enable; + const_cast(clk_pll_ops.disable) = clk_gate_ops.disable; + pax_close_kernel(); clk = clk_register(NULL, &pll_clk->hw.hw); if (WARN_ON(IS_ERR(clk))) { diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c index 255cafb18..7b41c3b42 100644 --- a/drivers/clk/ti/adpll.c +++ b/drivers/clk/ti/adpll.c @@ -589,7 +589,7 @@ static int ti_adpll_init_clkout(struct ti_adpll_data *d, { struct ti_adpll_clkout_data *co; struct clk_init_data init; - struct clk_ops *ops; + clk_ops_no_const *ops; const char *parent_names[2]; const char *child_name; struct clk *clock; diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c index 5fcf24775..446780a7e 100644 --- a/drivers/clk/ti/clk.c +++ b/drivers/clk/ti/clk.c @@ -25,6 +25,8 @@ #include #include +#include + #include "clock.h" #undef pr_fmt @@ -84,8 +86,10 @@ int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops) } ti_clk_ll_ops = ops; - ops->clk_readl = clk_memmap_readl; - ops->clk_writel = clk_memmap_writel; + pax_open_kernel(); + const_cast(ops->clk_readl) = clk_memmap_readl; + const_cast(ops->clk_writel) = clk_memmap_writel; + pax_close_kernel(); return 0; } diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 297e9128f..d5661fbba 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -694,8 +694,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) data->acpi_perf_cpu = cpu; policy->driver_data = data; - if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) - acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; + if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { + pax_open_kernel(); + const_cast(acpi_cpufreq_driver.flags) |= CPUFREQ_CONST_LOOPS; + pax_close_kernel(); + } result = acpi_processor_register_performance(perf, cpu); if (result) @@ -833,7 +836,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu); break; case ACPI_ADR_SPACE_FIXED_HARDWARE: - acpi_cpufreq_driver.get = get_cur_freq_on_cpu; + pax_open_kernel(); + const_cast(acpi_cpufreq_driver.get) = get_cur_freq_on_cpu; + pax_close_kernel(); break; default: break; @@ -930,8 +935,10 @@ static void __init acpi_cpufreq_boost_init(void) if (!msrs) return; - acpi_cpufreq_driver.set_boost = set_boost; - acpi_cpufreq_driver.boost_enabled = boost_state(0); + pax_open_kernel(); + const_cast(acpi_cpufreq_driver.set_boost) = set_boost; + const_cast(acpi_cpufreq_driver.boost_enabled) = boost_state(0); + pax_close_kernel(); cpu_notifier_register_begin(); diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index 4d3ec92cb..cf501fc10 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -373,8 +373,11 @@ static int dt_cpufreq_probe(struct platform_device *pdev) if (ret) return ret; - if (data && data->have_governor_per_policy) - dt_cpufreq_driver.flags |= CPUFREQ_HAVE_GOVERNOR_PER_POLICY; + if (data && data->have_governor_per_policy) { + pax_open_kernel(); + const_cast(dt_cpufreq_driver.flags) |= CPUFREQ_HAVE_GOVERNOR_PER_POLICY; + pax_close_kernel(); + } ret = cpufreq_register_driver(&dt_cpufreq_driver); if (ret) diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 6e6c1fb60..ccc5cd203 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -528,12 +528,12 @@ EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq); * SYSFS INTERFACE * *********************************************************************/ static ssize_t show_boost(struct kobject *kobj, - struct attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); } -static ssize_t store_boost(struct kobject *kobj, struct attribute *attr, +static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int ret, enable; @@ -2114,7 +2114,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor) read_unlock_irqrestore(&cpufreq_driver_lock, flags); mutex_lock(&cpufreq_governor_mutex); - list_del(&governor->governor_list); + pax_list_del(&governor->governor_list); mutex_unlock(&cpufreq_governor_mutex); return; } @@ -2334,13 +2334,17 @@ int cpufreq_boost_trigger_state(int state) return 0; write_lock_irqsave(&cpufreq_driver_lock, flags); - cpufreq_driver->boost_enabled = state; + pax_open_kernel(); + const_cast(cpufreq_driver->boost_enabled) = state; + pax_close_kernel(); write_unlock_irqrestore(&cpufreq_driver_lock, flags); ret = cpufreq_driver->set_boost(state); if (ret) { write_lock_irqsave(&cpufreq_driver_lock, flags); - cpufreq_driver->boost_enabled = !state; + pax_open_kernel(); + const_cast(cpufreq_driver->boost_enabled) = !state; + pax_close_kernel(); write_unlock_irqrestore(&cpufreq_driver_lock, flags); pr_err("%s: Cannot %s BOOST\n", @@ -2381,7 +2385,9 @@ int cpufreq_enable_boost_support(void) if (cpufreq_boost_supported()) return 0; - cpufreq_driver->set_boost = cpufreq_boost_set_sw; + pax_open_kernel(); + const_cast(cpufreq_driver->set_boost) = cpufreq_boost_set_sw; + pax_close_kernel(); /* This will get removed on driver unregister */ return create_boost_sysfs_file(); @@ -2439,8 +2445,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) cpufreq_driver = driver_data; write_unlock_irqrestore(&cpufreq_driver_lock, flags); - if (driver_data->setpolicy) - driver_data->flags |= CPUFREQ_CONST_LOOPS; + if (driver_data->setpolicy) { + pax_open_kernel(); + const_cast(driver_data->flags) |= CPUFREQ_CONST_LOOPS; + pax_close_kernel(); + } if (cpufreq_boost_supported()) { ret = create_boost_sysfs_file(); diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index ef1037e9c..c832d36ba 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h @@ -171,7 +171,7 @@ void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy); struct od_ops { unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy, unsigned int freq_next, unsigned int relation); -}; +} __no_const; unsigned int dbs_update(struct cpufreq_policy *policy); void od_register_powersave_bias_handler(unsigned int (*f) diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 3a1f49f5f..42a478e4c 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -408,7 +408,7 @@ static void od_start(struct cpufreq_policy *policy) ondemand_powersave_bias_init(policy); } -static struct od_ops od_ops = { +static struct od_ops od_ops __read_only = { .powersave_bias_target = generic_powersave_bias_target, }; @@ -464,14 +464,18 @@ void od_register_powersave_bias_handler(unsigned int (*f) (struct cpufreq_policy *, unsigned int, unsigned int), unsigned int powersave_bias) { - od_ops.powersave_bias_target = f; + pax_open_kernel(); + const_cast(od_ops.powersave_bias_target) = f; + pax_close_kernel(); od_set_powersave_bias(powersave_bias); } EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler); void od_unregister_powersave_bias_handler(void) { - od_ops.powersave_bias_target = generic_powersave_bias_target; + pax_open_kernel(); + const_cast(od_ops.powersave_bias_target) = generic_powersave_bias_target; + pax_close_kernel(); od_set_powersave_bias(0); } EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 80fa656da..fe0559327 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -283,13 +283,13 @@ struct pstate_funcs { struct cpu_defaults { struct pstate_adjust_policy pid_policy; struct pstate_funcs funcs; -}; +} __do_const; static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu); static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu); static struct pstate_adjust_policy pid_params __read_mostly; -static struct pstate_funcs pstate_funcs __read_mostly; +static struct pstate_funcs *pstate_funcs __read_mostly; static int hwp_active __read_mostly; #ifdef CONFIG_ACPI @@ -658,13 +658,13 @@ static void __init intel_pstate_debug_expose_params(void) /************************** sysfs begin ************************/ #define show_one(file_name, object) \ static ssize_t show_##file_name \ - (struct kobject *kobj, struct attribute *attr, char *buf) \ + (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ { \ return sprintf(buf, "%u\n", limits->object); \ } static ssize_t show_turbo_pct(struct kobject *kobj, - struct attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { struct cpudata *cpu; int total, no_turbo, turbo_pct; @@ -680,7 +680,7 @@ static ssize_t show_turbo_pct(struct kobject *kobj, } static ssize_t show_num_pstates(struct kobject *kobj, - struct attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { struct cpudata *cpu; int total; @@ -691,7 +691,7 @@ static ssize_t show_num_pstates(struct kobject *kobj, } static ssize_t show_no_turbo(struct kobject *kobj, - struct attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { ssize_t ret; @@ -704,7 +704,7 @@ static ssize_t show_no_turbo(struct kobject *kobj, return ret; } -static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, +static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, const char *buf, size_t count) { unsigned int input; @@ -728,7 +728,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, return count; } -static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, +static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b, const char *buf, size_t count) { unsigned int input; @@ -752,7 +752,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, return count; } -static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, +static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b, const char *buf, size_t count) { unsigned int input; @@ -1173,7 +1173,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) * right CPU. */ wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, - pstate_funcs.get_val(cpu, pstate)); + pstate_funcs->get_val(cpu, pstate)); } static void intel_pstate_set_min_pstate(struct cpudata *cpu) @@ -1192,14 +1192,14 @@ static void intel_pstate_max_within_limits(struct cpudata *cpu) static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) { - cpu->pstate.min_pstate = pstate_funcs.get_min(); - cpu->pstate.max_pstate = pstate_funcs.get_max(); - cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(); - cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); - cpu->pstate.scaling = pstate_funcs.get_scaling(); + cpu->pstate.min_pstate = pstate_funcs->get_min(); + cpu->pstate.max_pstate = pstate_funcs->get_max(); + cpu->pstate.max_pstate_physical = pstate_funcs->get_max_physical(); + cpu->pstate.turbo_pstate = pstate_funcs->get_turbo(); + cpu->pstate.scaling = pstate_funcs->get_scaling(); - if (pstate_funcs.get_vid) - pstate_funcs.get_vid(cpu); + if (pstate_funcs->get_vid) + pstate_funcs->get_vid(cpu); intel_pstate_set_min_pstate(cpu); } @@ -1348,7 +1348,7 @@ static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) return; cpu->pstate.current_pstate = pstate; - wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); + wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs->get_val(cpu, pstate)); } static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) @@ -1359,7 +1359,7 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) from = cpu->pstate.current_pstate; target_pstate = cpu->policy == CPUFREQ_POLICY_PERFORMANCE ? - cpu->pstate.turbo_pstate : pstate_funcs.get_target_pstate(cpu); + cpu->pstate.turbo_pstate : pstate_funcs->get_target_pstate(cpu); intel_pstate_update_pstate(cpu, target_pstate); @@ -1683,15 +1683,15 @@ static unsigned int force_load __initdata; static int __init intel_pstate_msrs_not_valid(void) { - if (!pstate_funcs.get_max() || - !pstate_funcs.get_min() || - !pstate_funcs.get_turbo()) + if (!pstate_funcs->get_max() || + !pstate_funcs->get_min() || + !pstate_funcs->get_turbo()) return -ENODEV; return 0; } -static void __init copy_pid_params(struct pstate_adjust_policy *policy) +static void __init copy_pid_params(const struct pstate_adjust_policy *policy) { pid_params.sample_rate_ms = policy->sample_rate_ms; pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC; @@ -1704,15 +1704,7 @@ static void __init copy_pid_params(struct pstate_adjust_policy *policy) static void __init copy_cpu_funcs(struct pstate_funcs *funcs) { - pstate_funcs.get_max = funcs->get_max; - pstate_funcs.get_max_physical = funcs->get_max_physical; - pstate_funcs.get_min = funcs->get_min; - pstate_funcs.get_turbo = funcs->get_turbo; - pstate_funcs.get_scaling = funcs->get_scaling; - pstate_funcs.get_val = funcs->get_val; - pstate_funcs.get_vid = funcs->get_vid; - pstate_funcs.get_target_pstate = funcs->get_target_pstate; - + pstate_funcs = funcs; } #ifdef CONFIG_ACPI diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c index fd7781231..97e3efece 100644 --- a/drivers/cpufreq/p4-clockmod.c +++ b/drivers/cpufreq/p4-clockmod.c @@ -130,10 +130,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) case 0x0F: /* Core Duo */ case 0x16: /* Celeron Core */ case 0x1C: /* Atom */ - p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; + pax_open_kernel(); + const_cast(p4clockmod_driver.flags) |= CPUFREQ_CONST_LOOPS; + pax_close_kernel(); return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE); case 0x0D: /* Pentium M (Dothan) */ - p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; + pax_open_kernel(); + const_cast(p4clockmod_driver.flags) |= CPUFREQ_CONST_LOOPS; + pax_close_kernel(); /* fall through */ case 0x09: /* Pentium M (Banias) */ return speedstep_get_frequency(SPEEDSTEP_CPU_PM); @@ -145,7 +149,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) /* on P-4s, the TSC runs with constant frequency independent whether * throttling is active or not. */ - p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; + pax_open_kernel(); + const_cast(p4clockmod_driver.flags) |= CPUFREQ_CONST_LOOPS; + pax_close_kernel(); if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) { pr_warn("Warning: Pentium 4-M detected. The speedstep-ich or acpi cpufreq modules offer voltage scaling in addition of frequency scaling. You should use either one instead of p4-clockmod, if possible.\n"); diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c index 9bb42ba50..b01b4a2dd 100644 --- a/drivers/cpufreq/sparc-us3-cpufreq.c +++ b/drivers/cpufreq/sparc-us3-cpufreq.c @@ -18,14 +18,12 @@ #include #include -static struct cpufreq_driver *cpufreq_us3_driver; - struct us3_freq_percpu_info { struct cpufreq_frequency_table table[4]; }; /* Indexed by cpu number. */ -static struct us3_freq_percpu_info *us3_freq_table; +static struct us3_freq_percpu_info us3_freq_table[NR_CPUS]; /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled * in the Safari config register. @@ -156,16 +154,27 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy) static int us3_freq_cpu_exit(struct cpufreq_policy *policy) { - if (cpufreq_us3_driver) - us3_freq_target(policy, 0); + us3_freq_target(policy, 0); return 0; } +static int __init us3_freq_init(void); +static void __exit us3_freq_exit(void); + +static struct cpufreq_driver cpufreq_us3_driver = { + .init = us3_freq_cpu_init, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = us3_freq_target, + .get = us3_freq_get, + .exit = us3_freq_cpu_exit, + .name = "UltraSPARC-III", + +}; + static int __init us3_freq_init(void) { unsigned long manuf, impl, ver; - int ret; if (tlb_type != cheetah && tlb_type != cheetah_plus) return -ENODEV; @@ -178,55 +187,15 @@ static int __init us3_freq_init(void) (impl == CHEETAH_IMPL || impl == CHEETAH_PLUS_IMPL || impl == JAGUAR_IMPL || - impl == PANTHER_IMPL)) { - struct cpufreq_driver *driver; - - ret = -ENOMEM; - driver = kzalloc(sizeof(*driver), GFP_KERNEL); - if (!driver) - goto err_out; - - us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)), - GFP_KERNEL); - if (!us3_freq_table) - goto err_out; - - driver->init = us3_freq_cpu_init; - driver->verify = cpufreq_generic_frequency_table_verify; - driver->target_index = us3_freq_target; - driver->get = us3_freq_get; - driver->exit = us3_freq_cpu_exit; - strcpy(driver->name, "UltraSPARC-III"); - - cpufreq_us3_driver = driver; - ret = cpufreq_register_driver(driver); - if (ret) - goto err_out; - - return 0; - -err_out: - if (driver) { - kfree(driver); - cpufreq_us3_driver = NULL; - } - kfree(us3_freq_table); - us3_freq_table = NULL; - return ret; - } + impl == PANTHER_IMPL)) + return cpufreq_register_driver(&cpufreq_us3_driver); return -ENODEV; } static void __exit us3_freq_exit(void) { - if (cpufreq_us3_driver) { - cpufreq_unregister_driver(cpufreq_us3_driver); - kfree(cpufreq_us3_driver); - cpufreq_us3_driver = NULL; - kfree(us3_freq_table); - us3_freq_table = NULL; - } + cpufreq_unregister_driver(&cpufreq_us3_driver); } MODULE_AUTHOR("David S. Miller "); diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c index 41bc5397f..e46a74d1e 100644 --- a/drivers/cpufreq/speedstep-centrino.c +++ b/drivers/cpufreq/speedstep-centrino.c @@ -352,8 +352,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) !cpu_has(cpu, X86_FEATURE_EST)) return -ENODEV; - if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) - centrino_driver.flags |= CPUFREQ_CONST_LOOPS; + if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) { + pax_open_kernel(); + const_cast(centrino_driver.flags) |= CPUFREQ_CONST_LOOPS; + pax_close_kernel(); + } if (policy->cpu != 0) return -ENODEV; diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index ab264d393..6a13e87bb 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -194,7 +194,7 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev, static void poll_idle_init(struct cpuidle_driver *drv) { - struct cpuidle_state *state = &drv->states[0]; + cpuidle_state_no_const *state = &drv->states[0]; snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c index a5c111b67..11130028f 100644 --- a/drivers/cpuidle/dt_idle_states.c +++ b/drivers/cpuidle/dt_idle_states.c @@ -21,7 +21,7 @@ #include "dt_idle_states.h" -static int init_state_node(struct cpuidle_state *idle_state, +static int init_state_node(cpuidle_state_no_const *idle_state, const struct of_device_id *matches, struct device_node *state_node) { diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c index fb9f511cc..213e6cc1b 100644 --- a/drivers/cpuidle/governor.c +++ b/drivers/cpuidle/governor.c @@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov) mutex_lock(&cpuidle_lock); if (__cpuidle_find_governor(gov->name) == NULL) { ret = 0; - list_add_tail(&gov->governor_list, &cpuidle_governors); + pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors); if (!cpuidle_curr_governor || cpuidle_curr_governor->rating < gov->rating) cpuidle_switch_governor(gov); diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c index 63bd5a403..eea2dff9a 100644 --- a/drivers/cpuidle/governors/ladder.c +++ b/drivers/cpuidle/governors/ladder.c @@ -173,6 +173,15 @@ static void ladder_reflect(struct cpuidle_device *dev, int index) static struct cpuidle_governor ladder_governor = { .name = "ladder", + .rating = 25, + .enable = ladder_enable_device, + .select = ladder_select_state, + .reflect = ladder_reflect, + .owner = THIS_MODULE, +}; + +static struct cpuidle_governor ladder_governor_nohz = { + .name = "ladder", .rating = 10, .enable = ladder_enable_device, .select = ladder_select_state, @@ -190,10 +199,8 @@ static int __init init_ladder(void) * governor is better so give it a higher rating than the menu * governor. */ - if (!tick_nohz_enabled) - ladder_governor.rating = 25; - return cpuidle_register_governor(&ladder_governor); + return cpuidle_register_governor(tick_nohz_enabled ? &ladder_governor_nohz : &ladder_governor); } postcore_initcall(init_ladder); diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index 832a2c3f0..1794080df 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c @@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = { NULL }; -static struct attribute_group cpuidle_attr_group = { +static attribute_group_no_const cpuidle_attr_group = { .attrs = cpuidle_default_attrs, .name = "cpuidle", }; diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index e09d4055b..f86cbacf0 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -37,7 +37,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444); MODULE_PARM_DESC(hifn_pll_ref, "PLL reference clock (pci[freq] or ext[freq], default ext)"); -static atomic_t hifn_dev_number; +static atomic_unchecked_t hifn_dev_number; #define ACRYPTO_OP_DECRYPT 0 #define ACRYPTO_OP_ENCRYPT 1 @@ -2475,7 +2475,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_out_disable_pci_device; snprintf(name, sizeof(name), "hifn%d", - atomic_inc_return(&hifn_dev_number) - 1); + atomic_inc_return_unchecked(&hifn_dev_number) - 1); err = pci_request_regions(pdev, name); if (err) diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c index 2839fccdd..b40595a43 100644 --- a/drivers/crypto/qat/qat_common/adf_aer.c +++ b/drivers/crypto/qat/qat_common/adf_aer.c @@ -56,7 +56,7 @@ static struct workqueue_struct *device_reset_wq; static pci_ers_result_t adf_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c index 9320ae1d0..4bf8e7e56 100644 --- a/drivers/crypto/qat/qat_common/adf_sriov.c +++ b/drivers/crypto/qat/qat_common/adf_sriov.c @@ -93,7 +93,7 @@ static void adf_iov_send_resp(struct work_struct *work) kfree(pf2vf_resp); } -static void adf_vf2pf_bh_handler(void *data) +static void adf_vf2pf_bh_handler(unsigned long data) { struct adf_accel_vf_info *vf_info = (struct adf_accel_vf_info *)data; struct adf_pf2vf_resp *pf2vf_resp; @@ -126,7 +126,7 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev) vf_info->vf_nr = i; tasklet_init(&vf_info->vf2pf_bh_tasklet, - (void *)adf_vf2pf_bh_handler, + adf_vf2pf_bh_handler, (unsigned long)vf_info); mutex_init(&vf_info->pf2vf_lock); ratelimit_state_init(&vf_info->vf2pf_ratelimit, diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c index bf99e11a3..a44361cb3 100644 --- a/drivers/crypto/qat/qat_common/adf_vf_isr.c +++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c @@ -112,9 +112,9 @@ static void adf_dev_stop_async(struct work_struct *work) kfree(stop_data); } -static void adf_pf2vf_bh_handler(void *data) +static void adf_pf2vf_bh_handler(unsigned long data) { - struct adf_accel_dev *accel_dev = data; + struct adf_accel_dev *accel_dev = (struct adf_accel_dev *)data; struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct adf_bar *pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; @@ -183,7 +183,7 @@ static void adf_pf2vf_bh_handler(void *data) static int adf_setup_pf2vf_bh(struct adf_accel_dev *accel_dev) { tasklet_init(&accel_dev->vf.pf2vf_bh_tasklet, - (void *)adf_pf2vf_bh_handler, (unsigned long)accel_dev); + adf_pf2vf_bh_handler, (unsigned long)accel_dev); mutex_init(&accel_dev->vf.vf2pf_lock); return 0; diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 712592cef..d7a18b2fa 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -813,7 +813,7 @@ int devfreq_add_governor(struct devfreq_governor *governor) goto err_out; } - list_add(&governor->node, &devfreq_governor_list); + pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list); list_for_each_entry(devfreq, &devfreq_list, node) { int ret = 0; @@ -901,7 +901,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor) } } - list_del(&governor->node); + pax_list_del((struct list_head *)&governor->node); err_out: mutex_unlock(&devfreq_list_lock); diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c index 9ef46e259..775fc7568 100644 --- a/drivers/devfreq/governor_passive.c +++ b/drivers/devfreq/governor_passive.c @@ -151,7 +151,7 @@ static int devfreq_passive_event_handler(struct devfreq *devfreq, struct devfreq_passive_data *p_data = (struct devfreq_passive_data *)devfreq->data; struct devfreq *parent = (struct devfreq *)p_data->parent; - struct notifier_block *nb = &p_data->nb; + notifier_block_no_const *nb = &p_data->nb; int ret = 0; if (!parent) diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index e244e10a9..b7799ec38 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c @@ -570,7 +570,7 @@ static ssize_t hidma_show_values(struct device *dev, static int hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode) { - struct device_attribute *attrs; + device_attribute_no_const *attrs; char *name_copy; attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute), diff --git a/drivers/dma/qcom/hidma_mgmt_sys.c b/drivers/dma/qcom/hidma_mgmt_sys.c index d61f1068a..a23baa35e 100644 --- a/drivers/dma/qcom/hidma_mgmt_sys.c +++ b/drivers/dma/qcom/hidma_mgmt_sys.c @@ -194,7 +194,7 @@ static ssize_t set_values_channel(struct kobject *kobj, static int create_sysfs_entry(struct hidma_mgmt_dev *dev, char *name, int mode) { - struct device_attribute *attrs; + device_attribute_no_const *attrs; char *name_copy; attrs = devm_kmalloc(&dev->pdev->dev, diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 12fa48e38..19d6a7c80 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -227,8 +227,8 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan) schan->slave_id = -EINVAL; } - schan->desc = kcalloc(NR_DESCS_PER_CHANNEL, - sdev->desc_size, GFP_KERNEL); + schan->desc = kcalloc(sdev->desc_size, + NR_DESCS_PER_CHANNEL, GFP_KERNEL); if (!schan->desc) { ret = -ENOMEM; goto edescalloc; diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index c94ffab0d..82c11f058 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c @@ -513,7 +513,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self, return ret; } -static struct notifier_block sh_dmae_nmi_notifier __read_mostly = { +static struct notifier_block sh_dmae_nmi_notifier = { .notifier_call = sh_dmae_nmi_handler, /* Run before NMI debug handler and KGDB */ diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c index a97900333..773b7f0f4 100644 --- a/drivers/edac/edac_device.c +++ b/drivers/edac/edac_device.c @@ -468,9 +468,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev, */ int edac_device_alloc_index(void) { - static atomic_t device_indexes = ATOMIC_INIT(0); + static atomic_unchecked_t device_indexes = ATOMIC_INIT(0); - return atomic_inc_return(&device_indexes) - 1; + return atomic_inc_return_unchecked(&device_indexes) - 1; } EXPORT_SYMBOL_GPL(edac_device_alloc_index); diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c index 93da1a45c..5e2c149e3 100644 --- a/drivers/edac/edac_device_sysfs.c +++ b/drivers/edac/edac_device_sysfs.c @@ -749,7 +749,7 @@ static int edac_device_add_main_sysfs_attributes( */ while (sysfs_attrib->attr.name != NULL) { err = sysfs_create_file(&edac_dev->kobj, - (struct attribute*) sysfs_attrib); + &sysfs_attrib->attr); if (err) goto err_out; diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index 4e0f8e720..0eb949927 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c @@ -50,7 +50,7 @@ int edac_mc_get_poll_msec(void) return edac_mc_poll_msec; } -static int edac_set_poll_msec(const char *val, struct kernel_param *kp) +static int edac_set_poll_msec(const char *val, const struct kernel_param *kp) { unsigned long l; int ret; @@ -154,7 +154,7 @@ static const char * const edac_caps[] = { struct dev_ch_attribute { struct device_attribute attr; int channel; -}; +} __do_const; #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \ static struct dev_ch_attribute dev_attr_legacy_##_name = \ diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c index 5f8543be9..46aa017a3 100644 --- a/drivers/edac/edac_module.c +++ b/drivers/edac/edac_module.c @@ -19,7 +19,7 @@ #ifdef CONFIG_EDAC_DEBUG -static int edac_set_debug_level(const char *buf, struct kernel_param *kp) +static int edac_set_debug_level(const char *buf, const struct kernel_param *kp) { unsigned long val; int ret; diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c index 8f2f2899a..cbb0d7cb2 100644 --- a/drivers/edac/edac_pci.c +++ b/drivers/edac/edac_pci.c @@ -29,7 +29,7 @@ static DEFINE_MUTEX(edac_pci_ctls_mutex); static LIST_HEAD(edac_pci_list); -static atomic_t pci_indexes = ATOMIC_INIT(0); +static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0); /* * edac_pci_alloc_ctl_info @@ -224,7 +224,7 @@ static void edac_pci_workq_function(struct work_struct *work_req) */ int edac_pci_alloc_index(void) { - return atomic_inc_return(&pci_indexes) - 1; + return atomic_inc_return_unchecked(&pci_indexes) - 1; } EXPORT_SYMBOL_GPL(edac_pci_alloc_index); diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c index 6e3428ba4..9bdb20757 100644 --- a/drivers/edac/edac_pci_sysfs.c +++ b/drivers/edac/edac_pci_sysfs.c @@ -23,8 +23,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */ static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */ static int edac_pci_poll_msec = 1000; /* one second workq period */ -static atomic_t pci_parity_count = ATOMIC_INIT(0); -static atomic_t pci_nonparity_count = ATOMIC_INIT(0); +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0); +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0); static struct kobject *edac_pci_top_main_kobj; static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0); @@ -232,7 +232,7 @@ struct edac_pci_dev_attribute { void *value; ssize_t(*show) (void *, char *); ssize_t(*store) (void *, const char *, size_t); -}; +} __do_const; /* Set of show/store abstract level functions for PCI Parity object */ static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr, @@ -564,7 +564,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) edac_printk(KERN_CRIT, EDAC_PCI, "Signaled System Error on %s\n", pci_name(dev)); - atomic_inc(&pci_nonparity_count); + atomic_inc_unchecked(&pci_nonparity_count); } if (status & (PCI_STATUS_PARITY)) { @@ -572,7 +572,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) "Master Data Parity Error on %s\n", pci_name(dev)); - atomic_inc(&pci_parity_count); + atomic_inc_unchecked(&pci_parity_count); } if (status & (PCI_STATUS_DETECTED_PARITY)) { @@ -580,7 +580,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) "Detected Parity Error on %s\n", pci_name(dev)); - atomic_inc(&pci_parity_count); + atomic_inc_unchecked(&pci_parity_count); } } @@ -603,7 +603,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) edac_printk(KERN_CRIT, EDAC_PCI, "Bridge " "Signaled System Error on %s\n", pci_name(dev)); - atomic_inc(&pci_nonparity_count); + atomic_inc_unchecked(&pci_nonparity_count); } if (status & (PCI_STATUS_PARITY)) { @@ -611,7 +611,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) "Master Data Parity Error on " "%s\n", pci_name(dev)); - atomic_inc(&pci_parity_count); + atomic_inc_unchecked(&pci_parity_count); } if (status & (PCI_STATUS_DETECTED_PARITY)) { @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) "Detected Parity Error on %s\n", pci_name(dev)); - atomic_inc(&pci_parity_count); + atomic_inc_unchecked(&pci_parity_count); } } } @@ -657,7 +657,7 @@ void edac_pci_do_parity_check(void) if (!check_pci_errors) return; - before_count = atomic_read(&pci_parity_count); + before_count = atomic_read_unchecked(&pci_parity_count); /* scan all PCI devices looking for a Parity Error on devices and * bridges. @@ -669,7 +669,7 @@ void edac_pci_do_parity_check(void) /* Only if operator has selected panic on PCI Error */ if (edac_pci_get_panic_on_pe()) { /* If the count is different 'after' from 'before' */ - if (before_count != atomic_read(&pci_parity_count)) + if (before_count != atomic_read_unchecked(&pci_parity_count)) panic("EDAC: PCI Parity Error"); } } diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h index c2359a1ea..8bd119dd2 100644 --- a/drivers/edac/mce_amd.h +++ b/drivers/edac/mce_amd.h @@ -74,7 +74,7 @@ struct amd_decoder_ops { bool (*mc0_mce)(u16, u8); bool (*mc1_mce)(u16, u8); bool (*mc2_mce)(u16, u8); -}; +} __no_const; void amd_report_gart_errors(bool); void amd_register_ecc_decoder(void (*f)(int, struct mce *)); diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c index 57ea7f464..af06b7634 100644 --- a/drivers/firewire/core-card.c +++ b/drivers/firewire/core-card.c @@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, struct device *device) { - static atomic_t index = ATOMIC_INIT(-1); + static atomic_unchecked_t index = ATOMIC_INIT(-1); - card->index = atomic_inc_return(&index); + card->index = atomic_inc_return_unchecked(&index); card->driver = driver; card->device = device; card->current_tlabel = 0; @@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release); void fw_core_remove_card(struct fw_card *card) { - struct fw_card_driver dummy_driver = dummy_driver_template; + fw_card_driver_no_const dummy_driver = dummy_driver_template; card->driver->update_phy_reg(card, 4, PHY_LINK_ACTIVE | PHY_CONTENDER, 0); diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index aee149bdf..2a189600e 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -970,7 +970,7 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) { struct fw_cdev_create_iso_context *a = &arg->create_iso_context; struct fw_iso_context *context; - fw_iso_callback_t cb; + void *cb; int ret; BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT || @@ -995,7 +995,7 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) break; case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: - cb = (fw_iso_callback_t)iso_mc_callback; + cb = iso_mc_callback; break; default: diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index f9e3aee6a..269dbdb05 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c @@ -256,7 +256,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma); struct config_rom_attribute { struct device_attribute attr; u32 key; -}; +} __do_const; static ssize_t show_immediate(struct device *dev, struct device_attribute *dattr, char *buf) diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index 38c0aa60b..95466e41e 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c @@ -162,7 +162,7 @@ size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed) struct fw_iso_context *fw_iso_context_create(struct fw_card *card, int type, int channel, int speed, size_t header_size, - fw_iso_callback_t callback, void *callback_data) + void *callback, void *callback_data) { struct fw_iso_context *ctx; diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index d6a09b9cd..18e90ddc1 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c @@ -38,6 +38,7 @@ #include #include #include +#include #include diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h index e1480ff68..1a429bdcd 100644 --- a/drivers/firewire/core.h +++ b/drivers/firewire/core.h @@ -111,6 +111,7 @@ struct fw_card_driver { int (*stop_iso)(struct fw_iso_context *ctx); }; +typedef struct fw_card_driver __no_const fw_card_driver_no_const; void fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, struct device *device); diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 8bf89267d..55a4930eb 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -2049,10 +2049,12 @@ static void bus_reset_work(struct work_struct *work) be32_to_cpu(ohci->next_header)); } +#ifndef CONFIG_GRKERNSEC if (param_remote_dma) { reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0); reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0); } +#endif spin_unlock_irq(&ohci->lock); @@ -2585,8 +2587,10 @@ static int ohci_enable_phys_dma(struct fw_card *card, unsigned long flags; int n, ret = 0; +#ifndef CONFIG_GRKERNSEC if (param_remote_dma) return 0; +#endif /* * FIXME: Make sure this bitmask is cleared when we clear the busReset diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c index 44c01390d..5252697dc 100644 --- a/drivers/firmware/dmi-id.c +++ b/drivers/firmware/dmi-id.c @@ -16,7 +16,7 @@ struct dmi_device_attribute{ struct device_attribute dev_attr; int field; -}; +} __do_const; #define to_dmi_dev_attr(_dev_attr) \ container_of(_dev_attr, struct dmi_device_attribute, dev_attr) @@ -159,9 +159,14 @@ static int dmi_dev_uevent(struct device *dev, struct kobj_uevent_env *env) return 0; } +static void dmi_dev_release(struct device *dev) +{ + kfree(dev); +} + static struct class dmi_class = { .name = "dmi", - .dev_release = (void(*)(struct device *)) kfree, + .dev_release = dmi_dev_release, .dev_uevent = dmi_dev_uevent, }; diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index 88bebe196..e599fad80 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -712,14 +712,18 @@ static int __init dmi_init(void) if (!dmi_table) goto err_tables; - bin_attr_smbios_entry_point.size = smbios_entry_point_size; - bin_attr_smbios_entry_point.private = smbios_entry_point; + pax_open_kernel(); + const_cast(bin_attr_smbios_entry_point.size) = smbios_entry_point_size; + const_cast(bin_attr_smbios_entry_point.private) = smbios_entry_point; + pax_close_kernel(); ret = sysfs_create_bin_file(tables_kobj, &bin_attr_smbios_entry_point); if (ret) goto err_unmap; - bin_attr_DMI.size = dmi_len; - bin_attr_DMI.private = dmi_table; + pax_open_kernel(); + const_cast(bin_attr_DMI.size) = dmi_len; + const_cast(bin_attr_DMI.private) = dmi_table; + pax_close_kernel(); ret = sysfs_create_bin_file(tables_kobj, &bin_attr_DMI); if (!ret) return 0; diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index d42537425..1da1716b9 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -44,12 +44,12 @@ static char rcd_decode_str[CPER_REC_LEN]; */ u64 cper_next_record_id(void) { - static atomic64_t seq; + static atomic64_unchecked_t seq; - if (!atomic64_read(&seq)) - atomic64_set(&seq, ((u64)get_seconds()) << 32); + if (!atomic64_read_unchecked(&seq)) + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32); - return atomic64_inc_return(&seq); + return atomic64_inc_return_unchecked(&seq); } EXPORT_SYMBOL_GPL(cper_next_record_id); diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index a4944e22f..a5e9cad38 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -181,15 +181,17 @@ static struct attribute_group efi_subsys_attr_group = { }; static struct efivars generic_efivars; -static struct efivar_operations generic_ops; +static efivar_operations_no_const generic_ops __read_only; static int generic_ops_register(void) { - generic_ops.get_variable = efi.get_variable; - generic_ops.set_variable = efi.set_variable; - generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking; - generic_ops.get_next_variable = efi.get_next_variable; - generic_ops.query_variable_store = efi_query_variable_store; + pax_open_kernel(); + const_cast(generic_ops.get_variable) = efi.get_variable; + const_cast(generic_ops.set_variable) = efi.set_variable; + const_cast(generic_ops.set_variable_nonblocking) = efi.set_variable_nonblocking; + const_cast(generic_ops.get_next_variable) = efi.get_next_variable; + const_cast(generic_ops.query_variable_store) = efi_query_variable_store; + pax_close_kernel(); return efivars_register(&generic_efivars, &generic_ops, efi_kobj); } diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c index 3e626fd9b..1ccae431d 100644 --- a/drivers/firmware/efi/efivars.c +++ b/drivers/firmware/efi/efivars.c @@ -587,7 +587,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var) static int create_efivars_bin_attributes(void) { - struct bin_attribute *attr; + bin_attribute_no_const *attr; int error; /* new_var */ diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index 5e23e2d30..419407a17 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -28,6 +28,8 @@ OBJECT_FILES_NON_STANDARD := y # Prevents link failures: __sanitizer_cov_trace_pc() is not linked in. KCOV_INSTRUMENT := n +GCC_PLUGINS := n + lib-y := efi-stub-helper.o gop.o # include the stub's generic dependencies from lib/ when building for ARM/arm64 diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c index 8e64b77ae..bc1695a28 100644 --- a/drivers/firmware/efi/runtime-map.c +++ b/drivers/firmware/efi/runtime-map.c @@ -93,7 +93,7 @@ static void map_release(struct kobject *kobj) kfree(entry); } -static struct kobj_type __refdata map_ktype = { +static const struct kobj_type __refconst map_ktype = { .sysfs_ops = &map_attr_ops, .default_attrs = def_attrs, .release = map_release, diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c index c46387160..c8f169bf2 100644 --- a/drivers/firmware/google/gsmi.c +++ b/drivers/firmware/google/gsmi.c @@ -709,7 +709,7 @@ static u32 __init hash_oem_table_id(char s[8]) return local_hash_64(input, 32); } -static struct dmi_system_id gsmi_dmi_table[] __initdata = { +static const struct dmi_system_id gsmi_dmi_table[] __initconst = { { .ident = "Google Board", .matches = { diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c index 2f569aaed..3af549743 100644 --- a/drivers/firmware/google/memconsole.c +++ b/drivers/firmware/google/memconsole.c @@ -136,7 +136,7 @@ static bool __init found_memconsole(void) return false; } -static struct dmi_system_id memconsole_dmi_table[] __initdata = { +static const struct dmi_system_id memconsole_dmi_table[] __initconst = { { .ident = "Google Board", .matches = { @@ -155,7 +155,10 @@ static int __init memconsole_init(void) if (!found_memconsole()) return -ENODEV; - memconsole_bin_attr.size = memconsole_length; + pax_open_kernel(); + const_cast(memconsole_bin_attr.size) = memconsole_length; + pax_close_kernel(); + return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr); } diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c index 5de3ed292..d839c56af 100644 --- a/drivers/firmware/memmap.c +++ b/drivers/firmware/memmap.c @@ -124,7 +124,7 @@ static void __meminit release_firmware_map_entry(struct kobject *kobj) kfree(entry); } -static struct kobj_type __refdata memmap_ktype = { +static const struct kobj_type __refconst memmap_ktype = { .release = release_firmware_map_entry, .sysfs_ops = &memmap_attr_ops, .default_attrs = def_attrs, diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c index 8263429e2..d0ef61f7f 100644 --- a/drivers/firmware/psci.c +++ b/drivers/firmware/psci.c @@ -59,7 +59,7 @@ bool psci_tos_resident_on(int cpu) return cpu == resident_cpu; } -struct psci_operations psci_ops; +struct psci_operations psci_ops __read_only; typedef unsigned long (psci_fn)(unsigned long, unsigned long, unsigned long, unsigned long); diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c index dd262f002..2834a849f 100644 --- a/drivers/gpio/gpio-davinci.c +++ b/drivers/gpio/gpio-davinci.c @@ -440,9 +440,9 @@ static struct irq_chip *davinci_gpio_get_irq_chip(unsigned int irq) return &gpio_unbanked.chip; }; -static struct irq_chip *keystone_gpio_get_irq_chip(unsigned int irq) +static irq_chip_no_const *keystone_gpio_get_irq_chip(unsigned int irq) { - static struct irq_chip gpio_unbanked; + static irq_chip_no_const gpio_unbanked; gpio_unbanked = *irq_get_chip(irq); return &gpio_unbanked; @@ -472,7 +472,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev) struct davinci_gpio_regs __iomem *g; struct irq_domain *irq_domain = NULL; const struct of_device_id *match; - struct irq_chip *irq_chip; + irq_chip_no_const *irq_chip; gpio_get_irq_chip_cb_t gpio_get_irq_chip; /* diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c index 8d32ccc98..2d2ca6147 100644 --- a/drivers/gpio/gpio-em.c +++ b/drivers/gpio/gpio-em.c @@ -274,7 +274,7 @@ static int em_gio_probe(struct platform_device *pdev) struct em_gio_priv *p; struct resource *io[2], *irq[2]; struct gpio_chip *gpio_chip; - struct irq_chip *irq_chip; + irq_chip_no_const *irq_chip; const char *name = dev_name(&pdev->dev); unsigned int ngpios; int ret; diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c index 4f6d64351..eb4655ccb 100644 --- a/drivers/gpio/gpio-ich.c +++ b/drivers/gpio/gpio-ich.c @@ -95,7 +95,7 @@ struct ichx_desc { * this option allows driver caching written output values */ bool use_outlvl_cache; -}; +} __do_const; static struct { spinlock_t lock; diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c index 793518a30..59c5a05bc 100644 --- a/drivers/gpio/gpio-mpc8xxx.c +++ b/drivers/gpio/gpio-mpc8xxx.c @@ -21,6 +21,7 @@ #include #include #include +#include #define MPC8XXX_GPIO_PINS 32 @@ -226,7 +227,7 @@ static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type) return 0; } -static struct irq_chip mpc8xxx_irq_chip = { +static irq_chip_no_const mpc8xxx_irq_chip __read_only = { .name = "mpc8xxx-gpio", .irq_unmask = mpc8xxx_irq_unmask, .irq_mask = mpc8xxx_irq_mask, @@ -337,7 +338,9 @@ static int mpc8xxx_probe(struct platform_device *pdev) * It's assumed that only a single type of gpio controller is available * on the current machine, so overwriting global data is fine. */ - mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type; + pax_open_kernel(); + const_cast(mpc8xxx_irq_chip.irq_set_type) = devtype->irq_set_type; + pax_close_kernel(); if (devtype->gpio_dir_out) gc->direction_output = devtype->gpio_dir_out; diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index b98ede78c..c83e86072 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -1029,7 +1029,7 @@ static void omap_gpio_mod_init(struct gpio_bank *bank) writel_relaxed(0, base + bank->regs->ctrl); } -static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc) +static int omap_gpio_chip_init(struct gpio_bank *bank, irq_chip_no_const *irqc) { static int gpio; int irq_base = 0; @@ -1119,7 +1119,7 @@ static int omap_gpio_probe(struct platform_device *pdev) const struct omap_gpio_platform_data *pdata; struct resource *res; struct gpio_bank *bank; - struct irq_chip *irqc; + irq_chip_no_const *irqc; int ret; match = of_match_device(of_match_ptr(omap_gpio_match), dev); diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c index 2be48f5eb..594e8fbba 100644 --- a/drivers/gpio/gpio-rcar.c +++ b/drivers/gpio/gpio-rcar.c @@ -395,7 +395,7 @@ static int gpio_rcar_probe(struct platform_device *pdev) struct gpio_rcar_priv *p; struct resource *io, *irq; struct gpio_chip *gpio_chip; - struct irq_chip *irq_chip; + irq_chip_no_const *irq_chip; struct device *dev = &pdev->dev; const char *name = dev_name(dev); unsigned int npins; diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c index ac8deb01f..f3caa10c7 100644 --- a/drivers/gpio/gpio-vr41xx.c +++ b/drivers/gpio/gpio-vr41xx.c @@ -224,7 +224,7 @@ static int giu_get_irq(unsigned int irq) printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n", maskl, pendl, maskh, pendh); - atomic_inc(&irq_err_count); + atomic_inc_unchecked(&irq_err_count); return -EINVAL; } diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 921593133..8e92a9e1c 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -1675,8 +1675,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) } if (gpiochip->irqchip) { - gpiochip->irqchip->irq_request_resources = NULL; - gpiochip->irqchip->irq_release_resources = NULL; + pax_open_kernel(); + const_cast(gpiochip->irqchip->irq_request_resources) = NULL; + const_cast(gpiochip->irqchip->irq_release_resources) = NULL; + pax_close_kernel(); gpiochip->irqchip = NULL; } @@ -1770,8 +1772,10 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip, */ if (!irqchip->irq_request_resources && !irqchip->irq_release_resources) { - irqchip->irq_request_resources = gpiochip_irq_reqres; - irqchip->irq_release_resources = gpiochip_irq_relres; + pax_open_kernel(); + const_cast(irqchip->irq_request_resources) = gpiochip_irq_reqres; + const_cast(irqchip->irq_release_resources) = gpiochip_irq_relres; + pax_close_kernel(); } /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 05c2850c0..1e71fbc7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1790,7 +1790,7 @@ int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev); * amdgpu smumgr functions */ struct amdgpu_smumgr_funcs { - int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype); + int (*check_fw_load_finish)(struct amdgpu_device *adev, enum AMDGPU_UCODE_ID fwtype); int (*request_smu_load_fw)(struct amdgpu_device *adev); int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype); }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 6c343a933..540aba5fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -533,7 +533,7 @@ static int amdgpu_atpx_init(void) * look up whether we are the integrated or discrete GPU (all asics). * Returns the client id. */ -static int amdgpu_atpx_get_client_id(struct pci_dev *pdev) +static enum vga_switcheroo_client_id amdgpu_atpx_get_client_id(struct pci_dev *pdev) { if (amdgpu_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev)) return VGA_SWITCHEROO_IGD; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index a725bdb70..6e22b1162 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -1175,50 +1175,50 @@ static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device, } static const struct cgs_ops amdgpu_cgs_ops = { - amdgpu_cgs_gpu_mem_info, - amdgpu_cgs_gmap_kmem, - amdgpu_cgs_gunmap_kmem, - amdgpu_cgs_alloc_gpu_mem, - amdgpu_cgs_free_gpu_mem, - amdgpu_cgs_gmap_gpu_mem, - amdgpu_cgs_gunmap_gpu_mem, - amdgpu_cgs_kmap_gpu_mem, - amdgpu_cgs_kunmap_gpu_mem, - amdgpu_cgs_read_register, - amdgpu_cgs_write_register, - amdgpu_cgs_read_ind_register, - amdgpu_cgs_write_ind_register, - amdgpu_cgs_read_pci_config_byte, - amdgpu_cgs_read_pci_config_word, - amdgpu_cgs_read_pci_config_dword, - amdgpu_cgs_write_pci_config_byte, - amdgpu_cgs_write_pci_config_word, - amdgpu_cgs_write_pci_config_dword, - amdgpu_cgs_get_pci_resource, - amdgpu_cgs_atom_get_data_table, - amdgpu_cgs_atom_get_cmd_table_revs, - amdgpu_cgs_atom_exec_cmd_table, - amdgpu_cgs_create_pm_request, - amdgpu_cgs_destroy_pm_request, - amdgpu_cgs_set_pm_request, - amdgpu_cgs_pm_request_clock, - amdgpu_cgs_pm_request_engine, - amdgpu_cgs_pm_query_clock_limits, - amdgpu_cgs_set_camera_voltages, - amdgpu_cgs_get_firmware_info, - amdgpu_cgs_rel_firmware, - amdgpu_cgs_set_powergating_state, - amdgpu_cgs_set_clockgating_state, - amdgpu_cgs_get_active_displays_info, - amdgpu_cgs_notify_dpm_enabled, - amdgpu_cgs_call_acpi_method, - amdgpu_cgs_query_system_info, + .gpu_mem_info = amdgpu_cgs_gpu_mem_info, + .gmap_kmem = amdgpu_cgs_gmap_kmem, + .gunmap_kmem = amdgpu_cgs_gunmap_kmem, + .alloc_gpu_mem = amdgpu_cgs_alloc_gpu_mem, + .free_gpu_mem = amdgpu_cgs_free_gpu_mem, + .gmap_gpu_mem = amdgpu_cgs_gmap_gpu_mem, + .gunmap_gpu_mem = amdgpu_cgs_gunmap_gpu_mem, + .kmap_gpu_mem = amdgpu_cgs_kmap_gpu_mem, + .kunmap_gpu_mem = amdgpu_cgs_kunmap_gpu_mem, + .read_register = amdgpu_cgs_read_register, + .write_register = amdgpu_cgs_write_register, + .read_ind_register = amdgpu_cgs_read_ind_register, + .write_ind_register = amdgpu_cgs_write_ind_register, + .read_pci_config_byte = amdgpu_cgs_read_pci_config_byte, + .read_pci_config_word = amdgpu_cgs_read_pci_config_word, + .read_pci_config_dword = amdgpu_cgs_read_pci_config_dword, + .write_pci_config_byte = amdgpu_cgs_write_pci_config_byte, + .write_pci_config_word = amdgpu_cgs_write_pci_config_word, + .write_pci_config_dword = amdgpu_cgs_write_pci_config_dword, + .get_pci_resource = amdgpu_cgs_get_pci_resource, + .atom_get_data_table = amdgpu_cgs_atom_get_data_table, + .atom_get_cmd_table_revs = amdgpu_cgs_atom_get_cmd_table_revs, + .atom_exec_cmd_table = amdgpu_cgs_atom_exec_cmd_table, + .create_pm_request = amdgpu_cgs_create_pm_request, + .destroy_pm_request = amdgpu_cgs_destroy_pm_request, + .set_pm_request = amdgpu_cgs_set_pm_request, + .pm_request_clock = amdgpu_cgs_pm_request_clock, + .pm_request_engine = amdgpu_cgs_pm_request_engine, + .pm_query_clock_limits = amdgpu_cgs_pm_query_clock_limits, + .set_camera_voltages = amdgpu_cgs_set_camera_voltages, + .get_firmware_info = amdgpu_cgs_get_firmware_info, + .rel_firmware = amdgpu_cgs_rel_firmware, + .set_powergating_state = amdgpu_cgs_set_powergating_state, + .set_clockgating_state = amdgpu_cgs_set_clockgating_state, + .get_active_displays_info = amdgpu_cgs_get_active_displays_info, + .notify_dpm_enabled = amdgpu_cgs_notify_dpm_enabled, + .call_acpi_method = amdgpu_cgs_call_acpi_method, + .query_system_info = amdgpu_cgs_query_system_info }; static const struct cgs_os_ops amdgpu_cgs_os_ops = { - amdgpu_cgs_add_irq_source, - amdgpu_cgs_irq_get, - amdgpu_cgs_irq_put + .add_irq_source = amdgpu_cgs_add_irq_source, + .irq_get = amdgpu_cgs_irq_get, + .irq_put = amdgpu_cgs_irq_put }; struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 086aa5c9c..b347e0298 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -701,7 +701,7 @@ static int amdgpu_connector_lvds_get_modes(struct drm_connector *connector) return ret; } -static int amdgpu_connector_lvds_mode_valid(struct drm_connector *connector, +static enum drm_mode_status amdgpu_connector_lvds_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector); @@ -847,7 +847,7 @@ static int amdgpu_connector_vga_get_modes(struct drm_connector *connector) return ret; } -static int amdgpu_connector_vga_mode_valid(struct drm_connector *connector, +static enum drm_mode_status amdgpu_connector_vga_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; @@ -1168,7 +1168,7 @@ static void amdgpu_connector_dvi_force(struct drm_connector *connector) amdgpu_connector->use_digital = true; } -static int amdgpu_connector_dvi_mode_valid(struct drm_connector *connector, +static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; @@ -1438,7 +1438,7 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) return ret; } -static int amdgpu_connector_dp_mode_valid(struct drm_connector *connector, +static enum drm_mode_status amdgpu_connector_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); @@ -1552,7 +1552,7 @@ static int amdgpu_connector_virtual_get_modes(struct drm_connector *connector) return 0; } -static int amdgpu_connector_virtual_mode_valid(struct drm_connector *connector, +static enum drm_mode_status amdgpu_connector_virtual_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { return MODE_OK; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 57f3e58dc..820e0defe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1096,7 +1096,7 @@ static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev) * locking inversion with the driver load path. And the access here is * completely racy anyway. So don't bother with locking for now. */ - return dev->open_count == 0; + return local_read(&dev->open_count) == 0; } static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index e0890decc..bfa29326c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -722,9 +722,6 @@ static struct drm_driver kms_driver = { .patchlevel = KMS_DRIVER_PATCHLEVEL, }; -static struct drm_driver *driver; -static struct pci_driver *pdriver; - static struct pci_driver amdgpu_kms_pci_driver = { .name = DRIVER_NAME, .id_table = pciidlist, @@ -757,12 +754,14 @@ static int __init amdgpu_init(void) return -EINVAL; } DRM_INFO("amdgpu kernel modesetting enabled.\n"); - driver = &kms_driver; - pdriver = &amdgpu_kms_pci_driver; - driver->num_ioctls = amdgpu_max_kms_ioctl; + + pax_open_kernel(); + const_cast(kms_driver.num_ioctls) = amdgpu_max_kms_ioctl; + pax_close_kernel(); + amdgpu_register_atpx_handler(); /* let modprobe override vga console setting */ - return drm_pci_init(driver, pdriver); + return drm_pci_init(&kms_driver, &amdgpu_kms_pci_driver); error_sched: amdgpu_fence_slab_fini(); @@ -777,7 +776,7 @@ static int __init amdgpu_init(void) static void __exit amdgpu_exit(void) { amdgpu_amdkfd_fini(); - drm_pci_exit(driver, pdriver); + drm_pci_exit(&kms_driver, &amdgpu_kms_pci_driver); amdgpu_unregister_atpx_handler(); amdgpu_sync_fini(); amd_sched_fence_slab_fini(); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 51321e154..3c80c0b23 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -27,6 +27,6 @@ int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg); void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg); -unsigned amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh); +void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index f86c84427..7fe4fde9e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -231,9 +231,9 @@ static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man, } const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = { - amdgpu_gtt_mgr_init, - amdgpu_gtt_mgr_fini, - amdgpu_gtt_mgr_new, - amdgpu_gtt_mgr_del, - amdgpu_gtt_mgr_debug + .init = amdgpu_gtt_mgr_init, + .takedown = amdgpu_gtt_mgr_fini, + .get_node = amdgpu_gtt_mgr_new, + .put_node = amdgpu_gtt_mgr_del, + .debug = amdgpu_gtt_mgr_debug }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 968c4260d..aceb28c85 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -205,7 +205,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, unsigned i; int r = 0; - fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids, + fences = kmalloc_array(adev->vm_manager.num_ids, sizeof(void *), GFP_KERNEL); if (!fences) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index ee3e04e10..65f743671 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -418,7 +418,7 @@ static int kfd_ioctl_set_memory_policy(struct file *filep, (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT) ? cache_policy_coherent : cache_policy_noncoherent; - if (!dev->dqm->ops.set_cache_memory_policy(dev->dqm, + if (!dev->dqm->ops->set_cache_memory_policy(dev->dqm, &pdd->qpd, default_policy, alternate_policy, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 3f95f7cb4..0a62dad34 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -298,7 +298,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, goto device_queue_manager_error; } - if (kfd->dqm->ops.start(kfd->dqm) != 0) { + if (kfd->dqm->ops->start(kfd->dqm) != 0) { dev_err(kfd_device, "Error starting queuen manager for device (%x:%x)\n", kfd->pdev->vendor, kfd->pdev->device); @@ -354,7 +354,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd) BUG_ON(kfd == NULL); if (kfd->init_complete) { - kfd->dqm->ops.stop(kfd->dqm); + kfd->dqm->ops->stop(kfd->dqm); amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL); amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL); amd_iommu_free_device(kfd->pdev); @@ -377,7 +377,7 @@ int kgd2kfd_resume(struct kfd_dev *kfd) amd_iommu_set_invalidate_ctx_cb(kfd->pdev, iommu_pasid_shutdown_callback); amd_iommu_set_invalid_ppr_cb(kfd->pdev, iommu_invalid_ppr_cb); - kfd->dqm->ops.start(kfd->dqm); + kfd->dqm->ops->start(kfd->dqm); } return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index f49c55119..ad74c7ef3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -242,7 +242,7 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, BUG_ON(!dqm || !q || !qpd); - mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); + mqd = dqm->ops->get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); if (mqd == NULL) return -ENOMEM; @@ -288,14 +288,14 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm, mutex_lock(&dqm->lock); if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) { - mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); + mqd = dqm->ops->get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); if (mqd == NULL) { retval = -ENOMEM; goto out; } deallocate_hqd(dqm, q); } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { - mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA); + mqd = dqm->ops->get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA); if (mqd == NULL) { retval = -ENOMEM; goto out; @@ -347,7 +347,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) BUG_ON(!dqm || !q || !q->mqd); mutex_lock(&dqm->lock); - mqd = dqm->ops.get_mqd_manager(dqm, + mqd = dqm->ops->get_mqd_manager(dqm, get_mqd_type_from_queue_type(q->properties.type)); if (mqd == NULL) { mutex_unlock(&dqm->lock); @@ -414,7 +414,7 @@ static int register_process_nocpsch(struct device_queue_manager *dqm, mutex_lock(&dqm->lock); list_add(&n->list, &dqm->queues); - retval = dqm->ops_asic_specific.register_process(dqm, qpd); + retval = dqm->ops_asic_specific->register_process(dqm, qpd); dqm->processes_count++; @@ -502,7 +502,7 @@ int init_pipelines(struct device_queue_manager *dqm, memset(hpdptr, 0, CIK_HPD_EOP_BYTES * pipes_num); - mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); + mqd = dqm->ops->get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); if (mqd == NULL) { kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem); return -ENOMEM; @@ -635,7 +635,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, struct mqd_manager *mqd; int retval; - mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA); + mqd = dqm->ops->get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA); if (!mqd) return -ENOMEM; @@ -650,7 +650,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id); pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id); - dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd); + dqm->ops_asic_specific->init_sdma_vm(dqm, q, qpd); retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, &q->gart_mqd_addr, &q->properties); if (retval != 0) { @@ -712,7 +712,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm) dqm->queue_count = dqm->processes_count = 0; dqm->sdma_queue_count = 0; dqm->active_runlist = false; - retval = dqm->ops_asic_specific.initialize(dqm); + retval = dqm->ops_asic_specific->initialize(dqm); if (retval != 0) goto fail_init_pipelines; @@ -879,7 +879,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, if (q->properties.type == KFD_QUEUE_TYPE_SDMA) select_sdma_engine_id(q); - mqd = dqm->ops.get_mqd_manager(dqm, + mqd = dqm->ops->get_mqd_manager(dqm, get_mqd_type_from_queue_type(q->properties.type)); if (mqd == NULL) { @@ -887,7 +887,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, return -ENOMEM; } - dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd); + dqm->ops_asic_specific->init_sdma_vm(dqm, q, qpd); retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, &q->gart_mqd_addr, &q->properties); if (retval != 0) @@ -1060,7 +1060,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, } - mqd = dqm->ops.get_mqd_manager(dqm, + mqd = dqm->ops->get_mqd_manager(dqm, get_mqd_type_from_queue_type(q->properties.type)); if (!mqd) { retval = -ENOMEM; @@ -1149,7 +1149,7 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm, qpd->sh_mem_ape1_limit = limit >> 16; } - retval = dqm->ops_asic_specific.set_cache_memory_policy( + retval = dqm->ops_asic_specific->set_cache_memory_policy( dqm, qpd, default_policy, @@ -1172,6 +1172,36 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm, return false; } +static const struct device_queue_manager_ops cp_dqm_ops = { + .create_queue = create_queue_cpsch, + .initialize = initialize_cpsch, + .start = start_cpsch, + .stop = stop_cpsch, + .destroy_queue = destroy_queue_cpsch, + .update_queue = update_queue, + .get_mqd_manager = get_mqd_manager_nocpsch, + .register_process = register_process_nocpsch, + .unregister_process = unregister_process_nocpsch, + .uninitialize = uninitialize_nocpsch, + .create_kernel_queue = create_kernel_queue_cpsch, + .destroy_kernel_queue = destroy_kernel_queue_cpsch, + .set_cache_memory_policy = set_cache_memory_policy, +}; + +static const struct device_queue_manager_ops no_cp_dqm_ops = { + .start = start_nocpsch, + .stop = stop_nocpsch, + .create_queue = create_queue_nocpsch, + .destroy_queue = destroy_queue_nocpsch, + .update_queue = update_queue, + .get_mqd_manager = get_mqd_manager_nocpsch, + .register_process = register_process_nocpsch, + .unregister_process = unregister_process_nocpsch, + .initialize = initialize_nocpsch, + .uninitialize = uninitialize_nocpsch, + .set_cache_memory_policy = set_cache_memory_policy, +}; + struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) { struct device_queue_manager *dqm; @@ -1189,33 +1219,11 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) case KFD_SCHED_POLICY_HWS: case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: /* initialize dqm for cp scheduling */ - dqm->ops.create_queue = create_queue_cpsch; - dqm->ops.initialize = initialize_cpsch; - dqm->ops.start = start_cpsch; - dqm->ops.stop = stop_cpsch; - dqm->ops.destroy_queue = destroy_queue_cpsch; - dqm->ops.update_queue = update_queue; - dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch; - dqm->ops.register_process = register_process_nocpsch; - dqm->ops.unregister_process = unregister_process_nocpsch; - dqm->ops.uninitialize = uninitialize_nocpsch; - dqm->ops.create_kernel_queue = create_kernel_queue_cpsch; - dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch; - dqm->ops.set_cache_memory_policy = set_cache_memory_policy; + dqm->ops = &cp_dqm_ops; break; case KFD_SCHED_POLICY_NO_HWS: /* initialize dqm for no cp scheduling */ - dqm->ops.start = start_nocpsch; - dqm->ops.stop = stop_nocpsch; - dqm->ops.create_queue = create_queue_nocpsch; - dqm->ops.destroy_queue = destroy_queue_nocpsch; - dqm->ops.update_queue = update_queue; - dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch; - dqm->ops.register_process = register_process_nocpsch; - dqm->ops.unregister_process = unregister_process_nocpsch; - dqm->ops.initialize = initialize_nocpsch; - dqm->ops.uninitialize = uninitialize_nocpsch; - dqm->ops.set_cache_memory_policy = set_cache_memory_policy; + dqm->ops = &no_cp_dqm_ops; break; default: BUG(); @@ -1224,15 +1232,15 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) switch (dev->device_info->asic_family) { case CHIP_CARRIZO: - device_queue_manager_init_vi(&dqm->ops_asic_specific); + device_queue_manager_init_vi(dqm); break; case CHIP_KAVERI: - device_queue_manager_init_cik(&dqm->ops_asic_specific); + device_queue_manager_init_cik(dqm); break; } - if (dqm->ops.initialize(dqm) != 0) { + if (dqm->ops->initialize(dqm) != 0) { kfree(dqm); return NULL; } @@ -1244,6 +1252,6 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm) { BUG_ON(!dqm); - dqm->ops.uninitialize(dqm); + dqm->ops->uninitialize(dqm); kfree(dqm); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index a625b9137..411e7d18b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -154,8 +154,8 @@ struct device_queue_manager_asic_ops { */ struct device_queue_manager { - struct device_queue_manager_ops ops; - struct device_queue_manager_asic_ops ops_asic_specific; + const struct device_queue_manager_ops *ops; + const struct device_queue_manager_asic_ops *ops_asic_specific; struct mqd_manager *mqds[KFD_MQD_TYPE_MAX]; struct packet_manager packets; @@ -178,8 +178,8 @@ struct device_queue_manager { bool active_runlist; }; -void device_queue_manager_init_cik(struct device_queue_manager_asic_ops *ops); -void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops); +void device_queue_manager_init_cik(struct device_queue_manager *dqm); +void device_queue_manager_init_vi(struct device_queue_manager *dqm); void program_sh_mem_settings(struct device_queue_manager *dqm, struct qcm_process_device *qpd); int init_pipelines(struct device_queue_manager *dqm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c index c6f435aa8..34fb24774 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c @@ -37,12 +37,16 @@ static int initialize_cpsch_cik(struct device_queue_manager *dqm); static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, struct qcm_process_device *qpd); -void device_queue_manager_init_cik(struct device_queue_manager_asic_ops *ops) +static const struct device_queue_manager_asic_ops cik_dqm_asic_ops = { + .set_cache_memory_policy = set_cache_memory_policy_cik, + .register_process = register_process_cik, + .initialize = initialize_cpsch_cik, + .init_sdma_vm = init_sdma_vm, +}; + +void device_queue_manager_init_cik(struct device_queue_manager *dqm) { - ops->set_cache_memory_policy = set_cache_memory_policy_cik; - ops->register_process = register_process_cik; - ops->initialize = initialize_cpsch_cik; - ops->init_sdma_vm = init_sdma_vm; + dqm->ops_asic_specific = &cik_dqm_asic_ops; } static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c index 7e9cae9d3..fbe7ba5f7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c @@ -39,12 +39,16 @@ static int initialize_cpsch_vi(struct device_queue_manager *dqm); static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, struct qcm_process_device *qpd); -void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops) +static const struct device_queue_manager_asic_ops vi_dqm_asic_ops = { + .set_cache_memory_policy = set_cache_memory_policy_vi, + .register_process = register_process_vi, + .initialize = initialize_cpsch_vi, + .init_sdma_vm = init_sdma_vm, +}; + +void device_queue_manager_init_vi(struct device_queue_manager *dqm) { - ops->set_cache_memory_policy = set_cache_memory_policy_vi; - ops->register_process = register_process_vi; - ops->initialize = initialize_cpsch_vi; - ops->init_sdma_vm = init_sdma_vm; + dqm->ops_asic_specific = &vi_dqm_asic_ops; } static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c index 7f134aa9b..cd34d4a0d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c @@ -50,8 +50,8 @@ static void interrupt_wq(struct work_struct *); int kfd_interrupt_init(struct kfd_dev *kfd) { - void *interrupt_ring = kmalloc_array(KFD_INTERRUPT_RING_SIZE, - kfd->device_info->ih_ring_entry_size, + void *interrupt_ring = kmalloc_array(kfd->device_info->ih_ring_entry_size, + KFD_INTERRUPT_RING_SIZE, GFP_KERNEL); if (!interrupt_ring) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index d135cd002..1a75f8465 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -59,7 +59,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, switch (type) { case KFD_QUEUE_TYPE_DIQ: case KFD_QUEUE_TYPE_HIQ: - kq->mqd = dev->dqm->ops.get_mqd_manager(dev->dqm, + kq->mqd = dev->dqm->ops->get_mqd_manager(dev->dqm, KFD_MQD_TYPE_HIQ); break; default: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h index 594053136..a75b0e591 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h @@ -62,7 +62,7 @@ struct kernel_queue_ops { void (*submit_packet)(struct kernel_queue *kq); void (*rollback_packet)(struct kernel_queue *kq); -}; +} __no_const; struct kernel_queue { struct kernel_queue_ops ops; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index e1fb40b84..44ce88335 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -194,7 +194,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, if (list_empty(&pqm->queues)) { pdd->qpd.pqm = pqm; - dev->dqm->ops.register_process(dev->dqm, &pdd->qpd); + dev->dqm->ops->register_process(dev->dqm, &pdd->qpd); } pqn = kzalloc(sizeof(struct process_queue_node), GFP_KERNEL); @@ -220,7 +220,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, goto err_create_queue; pqn->q = q; pqn->kq = NULL; - retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, + retval = dev->dqm->ops->create_queue(dev->dqm, q, &pdd->qpd, &q->properties.vmid); pr_debug("DQM returned %d for create_queue\n", retval); print_queue(q); @@ -234,7 +234,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, kq->queue->properties.queue_id = *qid; pqn->kq = kq; pqn->q = NULL; - retval = dev->dqm->ops.create_kernel_queue(dev->dqm, + retval = dev->dqm->ops->create_kernel_queue(dev->dqm, kq, &pdd->qpd); break; default: @@ -265,7 +265,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, /* check if queues list is empty unregister process from device */ clear_bit(*qid, pqm->queue_slot_bitmap); if (list_empty(&pqm->queues)) - dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd); + dev->dqm->ops->unregister_process(dev->dqm, &pdd->qpd); return retval; } @@ -306,13 +306,13 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) if (pqn->kq) { /* destroy kernel queue (DIQ) */ dqm = pqn->kq->dev->dqm; - dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd); + dqm->ops->destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd); kernel_queue_uninit(pqn->kq); } if (pqn->q) { dqm = pqn->q->device->dqm; - retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q); + retval = dqm->ops->destroy_queue(dqm, &pdd->qpd, pqn->q); if (retval != 0) return retval; @@ -324,7 +324,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) clear_bit(qid, pqm->queue_slot_bitmap); if (list_empty(&pqm->queues)) - dqm->ops.unregister_process(dqm, &pdd->qpd); + dqm->ops->unregister_process(dqm, &pdd->qpd); return retval; } @@ -349,7 +349,7 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid, pqn->q->properties.queue_percent = p->queue_percent; pqn->q->properties.priority = p->priority; - retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, + retval = pqn->q->device->dqm->ops->update_queue(pqn->q->device->dqm, pqn->q); if (retval != 0) return retval; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c index 2028980f1..484984bfd 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c @@ -240,10 +240,16 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) static const struct phm_master_table_item cz_enable_clock_power_gatings_list[] = { /*we don't need an exit table here, because there is only D3 cold on Kv*/ - { phm_cf_want_uvd_power_gating, cz_tf_uvd_power_gating_initialize }, - { phm_cf_want_vce_power_gating, cz_tf_vce_power_gating_initialize }, + { + .isFunctionNeededInRuntimeTable = phm_cf_want_uvd_power_gating, + .tableFunction = cz_tf_uvd_power_gating_initialize + }, + { + .isFunctionNeededInRuntimeTable = phm_cf_want_vce_power_gating, + .tableFunction = cz_tf_vce_power_gating_initialize + }, /* to do { NULL, cz_tf_xdma_power_gating_enable }, */ - { NULL, NULL } + { } }; const struct phm_master_table_header cz_phm_enable_clock_power_gatings_master = { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 960424913..751e84f74 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -888,13 +888,13 @@ static int cz_tf_update_low_mem_pstate(struct pp_hwmgr *hwmgr, } static const struct phm_master_table_item cz_set_power_state_list[] = { - {NULL, cz_tf_update_sclk_limit}, - {NULL, cz_tf_set_deep_sleep_sclk_threshold}, - {NULL, cz_tf_set_watermark_threshold}, - {NULL, cz_tf_set_enabled_levels}, - {NULL, cz_tf_enable_nb_dpm}, - {NULL, cz_tf_update_low_mem_pstate}, - {NULL, NULL} + { .tableFunction = cz_tf_update_sclk_limit }, + { .tableFunction = cz_tf_set_deep_sleep_sclk_threshold }, + { .tableFunction = cz_tf_set_watermark_threshold }, + { .tableFunction = cz_tf_set_enabled_levels }, + { .tableFunction = cz_tf_enable_nb_dpm }, + { .tableFunction = cz_tf_update_low_mem_pstate }, + { } }; static const struct phm_master_table_header cz_set_power_state_master = { @@ -904,15 +904,15 @@ static const struct phm_master_table_header cz_set_power_state_master = { }; static const struct phm_master_table_item cz_setup_asic_list[] = { - {NULL, cz_tf_reset_active_process_mask}, - {NULL, cz_tf_upload_pptable_to_smu}, - {NULL, cz_tf_init_sclk_limit}, - {NULL, cz_tf_init_uvd_limit}, - {NULL, cz_tf_init_vce_limit}, - {NULL, cz_tf_init_acp_limit}, - {NULL, cz_tf_init_power_gate_state}, - {NULL, cz_tf_init_sclk_threshold}, - {NULL, NULL} + { .tableFunction = cz_tf_reset_active_process_mask }, + { .tableFunction = cz_tf_upload_pptable_to_smu }, + { .tableFunction = cz_tf_init_sclk_limit }, + { .tableFunction = cz_tf_init_uvd_limit }, + { .tableFunction = cz_tf_init_vce_limit }, + { .tableFunction = cz_tf_init_acp_limit }, + { .tableFunction = cz_tf_init_power_gate_state }, + { .tableFunction = cz_tf_init_sclk_threshold }, + { } }; static const struct phm_master_table_header cz_setup_asic_master = { @@ -957,10 +957,10 @@ static int cz_tf_reset_cc6_data(struct pp_hwmgr *hwmgr, } static const struct phm_master_table_item cz_power_down_asic_list[] = { - {NULL, cz_tf_power_up_display_clock_sys_pll}, - {NULL, cz_tf_clear_nb_dpm_flag}, - {NULL, cz_tf_reset_cc6_data}, - {NULL, NULL} + { .tableFunction = cz_tf_power_up_display_clock_sys_pll }, + { .tableFunction = cz_tf_clear_nb_dpm_flag }, + { .tableFunction = cz_tf_reset_cc6_data }, + { } }; static const struct phm_master_table_header cz_power_down_asic_master = { @@ -1068,8 +1068,8 @@ static int cz_tf_check_for_dpm_enabled(struct pp_hwmgr *hwmgr, } static const struct phm_master_table_item cz_disable_dpm_list[] = { - { NULL, cz_tf_check_for_dpm_enabled}, - {NULL, NULL}, + { .tableFunction = cz_tf_check_for_dpm_enabled }, + { }, }; @@ -1080,13 +1080,13 @@ static const struct phm_master_table_header cz_disable_dpm_master = { }; static const struct phm_master_table_item cz_enable_dpm_list[] = { - { NULL, cz_tf_check_for_dpm_disabled }, - { NULL, cz_tf_program_voting_clients }, - { NULL, cz_tf_start_dpm}, - { NULL, cz_tf_program_bootup_state}, - { NULL, cz_tf_enable_didt }, - { NULL, cz_tf_reset_acp_boot_level }, - {NULL, NULL}, + { .tableFunction = cz_tf_check_for_dpm_disabled }, + { .tableFunction = cz_tf_program_voting_clients }, + { .tableFunction = cz_tf_start_dpm }, + { .tableFunction = cz_tf_program_bootup_state }, + { .tableFunction = cz_tf_enable_didt }, + { .tableFunction = cz_tf_reset_acp_boot_level }, + { }, }; static const struct phm_master_table_header cz_enable_dpm_master = { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c index 29d0319b2..ac1e41d23 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c @@ -506,18 +506,18 @@ static int tf_smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr, static const struct phm_master_table_item phm_thermal_start_thermal_controller_master_list[] = { - {NULL, tf_smu7_thermal_initialize}, - {NULL, tf_smu7_thermal_set_temperature_range}, - {NULL, tf_smu7_thermal_enable_alert}, - {NULL, smum_thermal_avfs_enable}, + { .tableFunction = tf_smu7_thermal_initialize}, + { .tableFunction = tf_smu7_thermal_set_temperature_range}, + { .tableFunction = tf_smu7_thermal_enable_alert}, + { .tableFunction = smum_thermal_avfs_enable}, /* We should restrict performance levels to low before we halt the SMC. * On the other hand we are still in boot state when we do this * so it would be pointless. * If this assumption changes we have to revisit this table. */ - {NULL, smum_thermal_setup_fan_table}, - {NULL, tf_smu7_thermal_start_smc_fan_control}, - {NULL, NULL} + { .tableFunction = smum_thermal_setup_fan_table}, + { .tableFunction = tf_smu7_thermal_start_smc_fan_control}, + { } }; static const struct phm_master_table_header @@ -529,10 +529,10 @@ phm_thermal_start_thermal_controller_master = { static const struct phm_master_table_item phm_thermal_set_temperature_range_master_list[] = { - {NULL, tf_smu7_thermal_disable_alert}, - {NULL, tf_smu7_thermal_set_temperature_range}, - {NULL, tf_smu7_thermal_enable_alert}, - {NULL, NULL} + { .tableFunction = tf_smu7_thermal_disable_alert}, + { .tableFunction = tf_smu7_thermal_set_temperature_range}, + { .tableFunction = tf_smu7_thermal_enable_alert}, + { } }; static const struct phm_master_table_header diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index ffe1f85ce..7017bfce4 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -137,7 +137,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, if (r) return r; - atomic_set(&entity->fence_seq, 0); + atomic_set_unchecked(&entity->fence_seq, 0); entity->fence_context = fence_context_alloc(2); return 0; diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 51068e6c3..35b4c71fc 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -44,7 +44,7 @@ struct amd_sched_entity { spinlock_t queue_lock; struct kfifo job_queue; - atomic_t fence_seq; + atomic_unchecked_t fence_seq; uint64_t fence_context; struct fence *dependency; diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c index 88fc2d662..22aa584da 100644 --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c @@ -60,7 +60,7 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity, fence->sched = entity->sched; spin_lock_init(&fence->lock); - seq = atomic_inc_return(&entity->fence_seq); + seq = atomic_inc_return_unchecked(&entity->fence_seq); fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled, &fence->lock, entity->fence_context, seq); fence_init(&fence->finished, &amd_sched_fence_ops_finished, diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index 1e0e68f60..04bb82a30 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -213,6 +213,7 @@ static struct drm_driver armada_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME, .ioctls = armada_ioctls, + .num_ioctls = ARRAY_SIZE(armada_ioctls), .fops = &armada_drm_fops, }; @@ -333,8 +334,6 @@ static int __init armada_drm_init(void) { int ret; - armada_drm_driver.num_ioctls = ARRAY_SIZE(armada_ioctls); - ret = platform_driver_register(&armada_lcd_platform_driver); if (ret) return ret; diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 5957c3e65..970039e94 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -775,7 +775,7 @@ static int ast_get_modes(struct drm_connector *connector) return 0; } -static int ast_mode_valid(struct drm_connector *connector, +static enum drm_mode_status ast_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct ast_private *ast = connector->dev->dev_private; diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c index 0b4e5d117..759442372 100644 --- a/drivers/gpu/drm/bochs/bochs_kms.c +++ b/drivers/gpu/drm/bochs/bochs_kms.c @@ -187,7 +187,7 @@ static int bochs_connector_get_modes(struct drm_connector *connector) return count; } -static int bochs_connector_mode_valid(struct drm_connector *connector, +static enum drm_mode_status bochs_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct bochs_device *bochs = diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 44d476ea6..3dd649ff1 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -1102,7 +1102,7 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge, return true; } -static int tc_connector_mode_valid(struct drm_connector *connector, +static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { /* Accept any mode */ diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 6efdba499..ce6514acf 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -385,7 +385,7 @@ void drm_unplug_dev(struct drm_device *dev) drm_device_set_unplugged(dev); - if (dev->open_count == 0) { + if (local_read(&dev->open_count) == 0) { drm_put_dev(dev); } mutex_unlock(&drm_global_mutex); diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c index 1fd6eac14..e4206c9d6 100644 --- a/drivers/gpu/drm/drm_fb_cma_helper.c +++ b/drivers/gpu/drm/drm_fb_cma_helper.c @@ -335,7 +335,7 @@ static int drm_fbdev_cma_defio_init(struct fb_info *fbi, struct drm_gem_cma_object *cma_obj) { struct fb_deferred_io *fbdefio; - struct fb_ops *fbops; + fb_ops_no_const *fbops; /* * Per device structures are needed because: @@ -362,7 +362,7 @@ static int drm_fbdev_cma_defio_init(struct fb_info *fbi, fbdefio->deferred_io = drm_fb_helper_deferred_io; fbi->fbdefio = fbdefio; fb_deferred_io_init(fbi); - fbi->fbops->fb_mmap = drm_fbdev_cma_deferred_io_mmap; + fbops->fb_mmap = drm_fbdev_cma_deferred_io_mmap; return 0; } diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index e84faecf5..03aaa9fbe 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -132,7 +132,7 @@ int drm_open(struct inode *inode, struct file *filp) return PTR_ERR(minor); dev = minor->dev; - if (!dev->open_count++) + if (local_inc_return(&dev->open_count) == 1) need_setup = 1; /* share address_space across all char-devs of a single device */ @@ -149,7 +149,7 @@ int drm_open(struct inode *inode, struct file *filp) return 0; err_undo: - dev->open_count--; + local_dec(&dev->open_count); drm_minor_release(minor); return retcode; } @@ -370,7 +370,7 @@ int drm_release(struct inode *inode, struct file *filp) mutex_lock(&drm_global_mutex); - DRM_DEBUG("open_count = %d\n", dev->open_count); + DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count)); mutex_lock(&dev->filelist_mutex); list_del(&file_priv->lhead); @@ -383,10 +383,10 @@ int drm_release(struct inode *inode, struct file *filp) * Begin inline drm_release */ - DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", + DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n", task_pid_nr(current), (long)old_encode_dev(file_priv->minor->kdev->devt), - dev->open_count); + local_read(&dev->open_count)); if (drm_core_check_feature(dev, DRIVER_LEGACY)) drm_legacy_lock_release(dev, filp); @@ -424,7 +424,7 @@ int drm_release(struct inode *inode, struct file *filp) * End inline drm_release */ - if (!--dev->open_count) { + if (local_dec_and_test(&dev->open_count)) { drm_lastclose(dev); if (drm_device_is_unplugged(dev)) drm_put_dev(dev); @@ -563,6 +563,11 @@ unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait) } EXPORT_SYMBOL(drm_poll); +static void drm_pending_event_destroy(struct drm_pending_event *event) +{ + kfree(event); +} + /** * drm_event_reserve_init_locked - init a DRM event and reserve space for it * @dev: DRM device diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c index b404287ab..9acd63c49 100644 --- a/drivers/gpu/drm/drm_global.c +++ b/drivers/gpu/drm/drm_global.c @@ -36,7 +36,7 @@ struct drm_global_item { struct mutex mutex; void *object; - int refcount; + atomic_t refcount; }; static struct drm_global_item glob[DRM_GLOBAL_NUM]; @@ -49,7 +49,7 @@ void drm_global_init(void) struct drm_global_item *item = &glob[i]; mutex_init(&item->mutex); item->object = NULL; - item->refcount = 0; + atomic_set(&item->refcount, 0); } } @@ -59,7 +59,7 @@ void drm_global_release(void) for (i = 0; i < DRM_GLOBAL_NUM; ++i) { struct drm_global_item *item = &glob[i]; BUG_ON(item->object != NULL); - BUG_ON(item->refcount != 0); + BUG_ON(atomic_read(&item->refcount) != 0); } } @@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref) struct drm_global_item *item = &glob[ref->global_type]; mutex_lock(&item->mutex); - if (item->refcount == 0) { + if (atomic_read(&item->refcount) == 0) { ref->object = kzalloc(ref->size, GFP_KERNEL); if (unlikely(ref->object == NULL)) { ret = -ENOMEM; @@ -84,7 +84,7 @@ int drm_global_item_ref(struct drm_global_reference *ref) ref->object = item->object; } - ++item->refcount; + atomic_inc(&item->refcount); mutex_unlock(&item->mutex); return 0; @@ -102,9 +102,9 @@ void drm_global_item_unref(struct drm_global_reference *ref) struct drm_global_item *item = &glob[ref->global_type]; mutex_lock(&item->mutex); - BUG_ON(item->refcount == 0); + BUG_ON(atomic_read(&item->refcount) == 0); BUG_ON(ref->object != item->object); - if (--item->refcount == 0) { + if (atomic_dec_and_test(&item->refcount)) { ref->release(ref); item->object = NULL; } diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index 867ab8c15..54cf5c26e 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c @@ -458,7 +458,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd, request = compat_alloc_user_space(nbytes); if (!request) return -EFAULT; - list = (struct drm_buf_desc *) (request + 1); + list = (struct drm_buf_desc __user *) (request + 1); if (__put_user(count, &request->count) || __put_user(list, &request->list)) @@ -519,7 +519,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd, request = compat_alloc_user_space(nbytes); if (!request) return -EFAULT; - list = (struct drm_buf_pub *) (request + 1); + list = (struct drm_buf_pub __user *) (request + 1); if (__put_user(count, &request->count) || __put_user(list, &request->list)) @@ -1074,7 +1074,7 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd, } #endif -static drm_ioctl_compat_t *drm_compat_ioctls[] = { +static drm_ioctl_compat_t drm_compat_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version, [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique, [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap, @@ -1123,7 +1123,6 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = { long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { unsigned int nr = DRM_IOCTL_NR(cmd); - drm_ioctl_compat_t *fn; int ret; /* Assume that ioctls without an explicit compat routine will just @@ -1133,10 +1132,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (nr >= ARRAY_SIZE(drm_compat_ioctls)) return drm_ioctl(filp, cmd, arg); - fn = drm_compat_ioctls[nr]; - - if (fn != NULL) - ret = (*fn) (filp, cmd, arg); + if (drm_compat_ioctls[nr] != NULL) + ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg); else ret = drm_ioctl(filp, cmd, arg); diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 71c347347..12e69dd8f 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -651,7 +651,7 @@ long drm_ioctl(struct file *filp, struct drm_file *file_priv = filp->private_data; struct drm_device *dev; const struct drm_ioctl_desc *ioctl = NULL; - drm_ioctl_t *func; + drm_ioctl_no_const_t func; unsigned int nr = DRM_IOCTL_NR(cmd); int retcode = -EINVAL; char stack_kdata[128]; diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index 3ceea9cb9..9fe18a542 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -264,7 +264,7 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent, /* No locking needed since shadow-attach is single-threaded since it may * only be called from the per-driver module init hook. */ if (drm_core_check_feature(dev, DRIVER_LEGACY)) - list_add_tail(&dev->legacy_dev_list, &driver->legacy_dev_list); + pax_list_add_tail(&dev->legacy_dev_list, (struct list_head *)&driver->legacy_dev_list); return 0; @@ -303,7 +303,10 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver) return pci_register_driver(pdriver); /* If not using KMS, fall back to stealth mode manual scanning. */ - INIT_LIST_HEAD(&driver->legacy_dev_list); + pax_open_kernel(); + INIT_LIST_HEAD((struct list_head *)&driver->legacy_dev_list); + pax_close_kernel(); + for (i = 0; pdriver->id_table[i].vendor != 0; i++) { pid = &pdriver->id_table[i]; @@ -426,7 +429,7 @@ void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver) } else { list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list, legacy_dev_list) { - list_del(&dev->legacy_dev_list); + pax_list_del(&dev->legacy_dev_list); drm_put_dev(dev); } } diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c index a4d81cf4f..3384497a0 100644 --- a/drivers/gpu/drm/drm_property.c +++ b/drivers/gpu/drm/drm_property.c @@ -493,7 +493,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev, goto done; } - if (copy_to_user(&enum_ptr[copied].name, + if (copy_to_user(enum_ptr[copied].name, &prop_enum->name, DRM_PROP_NAME_LEN)) { ret = -EFAULT; goto done; diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index f86e7c846..97444a3d3 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -526,6 +526,11 @@ static int compare_dev(struct device *dev, void *data) return dev == (struct device *)data; } +static int platform_bus_type_match(struct device *dev, void *data) +{ + return platform_bus_type.match(dev, data); +} + static struct component_match *exynos_drm_match_add(struct device *dev) { struct component_match *match = NULL; @@ -540,7 +545,7 @@ static struct component_match *exynos_drm_match_add(struct device *dev) while ((d = bus_find_device(&platform_bus_type, p, &info->driver->driver, - (void *)platform_bus_type.match))) { + platform_bus_type_match))) { put_device(p); component_match_add(dev, &match, compare_dev, d); p = d; @@ -571,7 +576,6 @@ static int exynos_drm_platform_probe(struct platform_device *pdev) struct component_match *match; pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); - exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls); match = exynos_drm_match_add(&pdev->dev); if (IS_ERR(match)) @@ -609,7 +613,7 @@ static struct device *exynos_drm_get_dma_device(void) while ((dev = bus_find_device(&platform_bus_type, NULL, &info->driver->driver, - (void *)platform_bus_type.match))) { + platform_bus_type_match))) { put_device(dev); return dev; } @@ -630,7 +634,7 @@ static void exynos_drm_unregister_devices(void) while ((dev = bus_find_device(&platform_bus_type, NULL, &info->driver->driver, - (void *)platform_bus_type.match))) { + platform_bus_type_match))) { put_device(dev); platform_device_unregister(to_platform_device(dev)); } diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index fbd13fabd..03834dcc6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -1163,6 +1163,11 @@ int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data, return 0; } +static void exynos_g2d_dmabuf_destroy(struct drm_pending_event *event) +{ + kfree(event); +} + int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, struct drm_file *file) { diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 38eaa63af..3e42c7c97 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -919,7 +919,7 @@ static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock) return -EINVAL; } -static int hdmi_mode_valid(struct drm_connector *connector, +static enum drm_mode_status hdmi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct hdmi_context *hdata = connector_to_hdmi(connector); diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c index b837e7a92..cb5a14b7e 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_crt.c +++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c @@ -64,7 +64,7 @@ static void cdv_intel_crt_dpms(struct drm_encoder *encoder, int mode) REG_WRITE(reg, temp); } -static int cdv_intel_crt_mode_valid(struct drm_connector *connector, +static enum drm_mode_status cdv_intel_crt_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { if (mode->flags & DRM_MODE_FLAG_DBLSCAN) diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c index c52f9adf5..486d20365 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_dp.c +++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c @@ -505,7 +505,7 @@ static void cdv_intel_edp_backlight_off (struct gma_encoder *intel_encoder) msleep(intel_dp->backlight_off_delay); } -static int +static enum drm_mode_status cdv_intel_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c index 563f193fc..f08789985 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c +++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c @@ -223,7 +223,7 @@ static int cdv_hdmi_get_modes(struct drm_connector *connector) return ret; } -static int cdv_hdmi_mode_valid(struct drm_connector *connector, +static enum drm_mode_status cdv_hdmi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { if (mode->clock > 165000) diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c index ea733ab5b..57f604552 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c +++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c @@ -244,7 +244,7 @@ static void cdv_intel_lvds_restore(struct drm_connector *connector) { } -static int cdv_intel_lvds_mode_valid(struct drm_connector *connector, +static enum drm_mode_status cdv_intel_lvds_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c index a05c02060..01bfdad77 100644 --- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c +++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c @@ -120,9 +120,14 @@ static void dsi_set_pipe_plane_enable_state(struct drm_device *dev, u32 pipeconf_reg = PIPEACONF; u32 dspcntr_reg = DSPACNTR; - u32 dspcntr = dev_priv->dspcntr[pipe]; + u32 dspcntr; u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX; + if (pipe == -1) + return; + + dspcntr = dev_priv->dspcntr[pipe]; + if (pipe) { pipeconf_reg = PIPECCONF; dspcntr_reg = DSPCCNTR; @@ -645,6 +650,9 @@ static void mdfld_dsi_dpi_set_power(struct drm_encoder *encoder, bool on) if (!gma_power_begin(dev, true)) return; + if (pipe == -1) + return; + if (on) { if (mdfld_get_panel_type(dev, pipe) == TMD_VID) mdfld_dsi_dpi_turn_on(dpi_output, pipe); diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c index acb3848ef..fe020926e 100644 --- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c +++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c @@ -346,7 +346,7 @@ static int mdfld_dsi_connector_get_modes(struct drm_connector *connector) return 0; } -static int mdfld_dsi_connector_mode_valid(struct drm_connector *connector, +static enum drm_mode_status mdfld_dsi_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct mdfld_dsi_connector *dsi_connector = diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c index 8b2eb32ee..78566a80a 100644 --- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c +++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c @@ -509,7 +509,7 @@ static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode) HDMI_WRITE(HDMI_VIDEO_REG, temp); } -static int oaktrail_hdmi_mode_valid(struct drm_connector *connector, +static enum drm_mode_status oaktrail_hdmi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { if (mode->clock > 165000) diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 8f3ca526b..b341aa8a4 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -373,7 +373,6 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags) drm_irq_install(dev, dev->pdev->irq); dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ - dev->driver->get_vblank_counter = psb_get_vblank_counter; psb_modeset_init(dev); psb_fbdev_init(dev); diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h index 2a3b7c684..fbd3fa340 100644 --- a/drivers/gpu/drm/gma500/psb_intel_drv.h +++ b/drivers/gpu/drm/gma500/psb_intel_drv.h @@ -255,7 +255,7 @@ extern int intelfb_remove(struct drm_device *dev, extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); -extern int psb_intel_lvds_mode_valid(struct drm_connector *connector, +extern enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode); extern int psb_intel_lvds_set_property(struct drm_connector *connector, struct drm_property *property, diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c index fd7c91254..817080666 100644 --- a/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c @@ -343,7 +343,7 @@ static void psb_intel_lvds_restore(struct drm_connector *connector) } } -int psb_intel_lvds_mode_valid(struct drm_connector *connector, +enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_psb_private *dev_priv = connector->dev->dev_private; diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c index e787d376b..91622fd35 100644 --- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c @@ -1158,7 +1158,7 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode) return; } -static int psb_intel_sdvo_mode_valid(struct drm_connector *connector, +static enum drm_mode_status psb_intel_sdvo_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector); diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index 9798d400d..2d491357b 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -873,7 +873,7 @@ static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode) priv->dpms = mode; } -static int tda998x_connector_mode_valid(struct drm_connector *connector, +static enum drm_mode_status tda998x_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { /* TDA19988 dotclock can go up to 165MHz */ diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index d91856779..6cfd904c4 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c @@ -1250,7 +1250,7 @@ const struct drm_ioctl_desc i810_ioctls[] = { DRM_IOCTL_DEF_DRV(I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED), }; -int i810_max_ioctl = ARRAY_SIZE(i810_ioctls); +const int i810_max_ioctl = ARRAY_SIZE(i810_ioctls); /** * Determine if the device really is AGP or not. diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c index 0be55dc1e..3f3482c2b 100644 --- a/drivers/gpu/drm/i810/i810_drv.c +++ b/drivers/gpu/drm/i810/i810_drv.c @@ -85,7 +85,11 @@ static int __init i810_init(void) pr_err("drm/i810 does not support SMP\n"); return -EINVAL; } - driver.num_ioctls = i810_max_ioctl; + + pax_open_kernel(); + const_cast(driver.num_ioctls) = i810_max_ioctl; + pax_close_kernel(); + return drm_pci_init(&driver, &i810_pci_driver); } diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h index 93ec5dc4e..204ec9264 100644 --- a/drivers/gpu/drm/i810/i810_drv.h +++ b/drivers/gpu/drm/i810/i810_drv.h @@ -110,8 +110,8 @@ typedef struct drm_i810_private { int page_flipping; wait_queue_head_t irq_queue; - atomic_t irq_received; - atomic_t irq_emitted; + atomic_unchecked_t irq_received; + atomic_unchecked_t irq_emitted; int front_offset; } drm_i810_private_t; @@ -128,7 +128,7 @@ extern int i810_driver_device_is_agp(struct drm_device *dev); extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg); extern const struct drm_ioctl_desc i810_ioctls[]; -extern int i810_max_ioctl; +extern const int i810_max_ioctl; #define I810_BASE(reg) ((unsigned long) \ dev_priv->mmio_map->handle) diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h index 5e6a3013d..b6e143ebc 100644 --- a/drivers/gpu/drm/i915/dvo.h +++ b/drivers/gpu/drm/i915/dvo.h @@ -74,7 +74,7 @@ struct intel_dvo_dev_ops { * * \return MODE_OK if the mode is valid, or another MODE_* otherwise. */ - int (*mode_valid)(struct intel_dvo_device *dvo, + enum drm_mode_status (*mode_valid)(struct intel_dvo_device *dvo, struct drm_display_mode *mode); /* diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 670beebc3..642dceffb 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -50,7 +50,7 @@ #include "i915_vgpu.h" #include "intel_drv.h" -static struct drm_driver driver; +static drm_driver_no_const driver; static unsigned int i915_load_fail_count; @@ -521,7 +521,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev) * locking inversion with the driver load path. And the access here is * completely racy anyway. So don't bother with locking for now. */ - return dev->open_count == 0; + return local_read(&dev->open_count) == 0; } static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { @@ -1181,8 +1181,11 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) struct drm_i915_private *dev_priv; int ret; - if (i915.nuclear_pageflip) + if (i915.nuclear_pageflip) { + pax_open_kernel(); driver.driver_features |= DRIVER_ATOMIC; + pax_close_kernel(); + } ret = -ENOMEM; dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); @@ -2563,7 +2566,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), }; -static struct drm_driver driver = { +static drm_driver_no_const driver __read_only = { /* Don't use MTRRs here; the Xserver or userspace app should * deal with them for Intel hardware. */ diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 0c400f852..4f2bba64a 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1187,12 +1187,12 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) static int validate_exec_list(struct drm_device *dev, struct drm_i915_gem_exec_object2 *exec, - int count) + unsigned int count) { unsigned relocs_total = 0; unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry); unsigned invalid_flags; - int i; + unsigned int i; /* INTERNAL flags must not overlap with external ones */ BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & ~__EXEC_OBJECT_UNKNOWN_FLAGS); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 0bb4232f6..d1dfde01e 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -3166,8 +3166,8 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv) /* GMADR is the PCI mmio aperture into the global GTT. */ DRM_INFO("Memory usable by graphics device = %lluM\n", ggtt->base.total >> 20); - DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20); - DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", ggtt->stolen_size >> 20); + DRM_DEBUG_DRIVER("GMADR size = %lluM\n", ggtt->mappable_end >> 20); + DRM_DEBUG_DRIVER("GTT stolen size = %lluM\n", ggtt->stolen_size >> 20); #ifdef CONFIG_INTEL_IOMMU if (intel_iommu_gfx_mapped) DRM_INFO("VT-d active for gfx access\n"); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index ec78be2f8..fc20a5628 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -437,12 +437,12 @@ struct i915_ggtt { struct i915_address_space base; struct io_mapping mappable; /* Mapping to our CPU mappable region */ - size_t stolen_size; /* Total size of stolen memory */ + u64 stolen_size; /* Total size of stolen memory */ size_t stolen_usable_size; /* Total size minus BIOS reserved */ size_t stolen_reserved_base; size_t stolen_reserved_size; u64 mappable_end; /* End offset that we can CPU map */ - phys_addr_t mappable_base; /* PA of our GMADR */ + u64 mappable_base; /* PA of our GMADR */ /** "Graphics Stolen Memory" holds the global PTEs */ void __iomem *gsm; diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c index 97f3a5640..32c712efa 100644 --- a/drivers/gpu/drm/i915/i915_ioc32.c +++ b/drivers/gpu/drm/i915/i915_ioc32.c @@ -65,7 +65,7 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd, (unsigned long)request); } -static drm_ioctl_compat_t *i915_compat_ioctls[] = { +static drm_ioctl_compat_t i915_compat_ioctls[] = { [DRM_I915_GETPARAM] = compat_i915_getparam, }; @@ -81,17 +81,13 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = { long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { unsigned int nr = DRM_IOCTL_NR(cmd); - drm_ioctl_compat_t *fn = NULL; int ret; if (nr < DRM_COMMAND_BASE || nr >= DRM_COMMAND_END) return drm_compat_ioctl(filp, cmd, arg); - if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls)) - fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE]; - - if (fn != NULL) - ret = (*fn) (filp, cmd, arg); + if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls) && i915_compat_ioctls[nr - DRM_COMMAND_BASE]) + ret = (*i915_compat_ioctls[nr - DRM_COMMAND_BASE])(filp, cmd, arg); else ret = drm_ioctl(filp, cmd, arg); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 3fc286cd1..4c19f2557 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -4511,15 +4511,16 @@ void intel_irq_init(struct drm_i915_private *dev_priv) INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work, i915_hangcheck_elapsed); + pax_open_kernel(); if (IS_GEN2(dev_priv)) { /* Gen2 doesn't have a hardware frame counter */ dev->max_vblank_count = 0; - dev->driver->get_vblank_counter = drm_vblank_no_hw_counter; + const_cast(dev->driver->get_vblank_counter) = drm_vblank_no_hw_counter; } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ - dev->driver->get_vblank_counter = g4x_get_vblank_counter; + const_cast(dev->driver->get_vblank_counter) = g4x_get_vblank_counter; } else { - dev->driver->get_vblank_counter = i915_get_vblank_counter; + const_cast(dev->driver->get_vblank_counter) = i915_get_vblank_counter; dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ } @@ -4531,32 +4532,32 @@ void intel_irq_init(struct drm_i915_private *dev_priv) if (!IS_GEN2(dev_priv)) dev->vblank_disable_immediate = true; - dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; - dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; + const_cast(dev->driver->get_vblank_timestamp) = i915_get_vblank_timestamp; + const_cast(dev->driver->get_scanout_position) = i915_get_crtc_scanoutpos; if (IS_CHERRYVIEW(dev_priv)) { - dev->driver->irq_handler = cherryview_irq_handler; - dev->driver->irq_preinstall = cherryview_irq_preinstall; - dev->driver->irq_postinstall = cherryview_irq_postinstall; - dev->driver->irq_uninstall = cherryview_irq_uninstall; - dev->driver->enable_vblank = valleyview_enable_vblank; - dev->driver->disable_vblank = valleyview_disable_vblank; + const_cast(dev->driver->irq_handler) = cherryview_irq_handler; + const_cast(dev->driver->irq_preinstall) = cherryview_irq_preinstall; + const_cast(dev->driver->irq_postinstall) = cherryview_irq_postinstall; + const_cast(dev->driver->irq_uninstall) = cherryview_irq_uninstall; + const_cast(dev->driver->enable_vblank) = valleyview_enable_vblank; + const_cast(dev->driver->disable_vblank) = valleyview_disable_vblank; dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; } else if (IS_VALLEYVIEW(dev_priv)) { - dev->driver->irq_handler = valleyview_irq_handler; - dev->driver->irq_preinstall = valleyview_irq_preinstall; - dev->driver->irq_postinstall = valleyview_irq_postinstall; - dev->driver->irq_uninstall = valleyview_irq_uninstall; - dev->driver->enable_vblank = valleyview_enable_vblank; - dev->driver->disable_vblank = valleyview_disable_vblank; + const_cast(dev->driver->irq_handler) = valleyview_irq_handler; + const_cast(dev->driver->irq_preinstall) = valleyview_irq_preinstall; + const_cast(dev->driver->irq_postinstall) = valleyview_irq_postinstall; + const_cast(dev->driver->irq_uninstall) = valleyview_irq_uninstall; + const_cast(dev->driver->enable_vblank) = valleyview_enable_vblank; + const_cast(dev->driver->disable_vblank) = valleyview_disable_vblank; dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; } else if (INTEL_INFO(dev_priv)->gen >= 8) { - dev->driver->irq_handler = gen8_irq_handler; - dev->driver->irq_preinstall = gen8_irq_reset; - dev->driver->irq_postinstall = gen8_irq_postinstall; - dev->driver->irq_uninstall = gen8_irq_uninstall; - dev->driver->enable_vblank = gen8_enable_vblank; - dev->driver->disable_vblank = gen8_disable_vblank; + const_cast(dev->driver->irq_handler) = gen8_irq_handler; + const_cast(dev->driver->irq_preinstall) = gen8_irq_reset; + const_cast(dev->driver->irq_postinstall) = gen8_irq_postinstall; + const_cast(dev->driver->irq_uninstall) = gen8_irq_uninstall; + const_cast(dev->driver->enable_vblank) = gen8_enable_vblank; + const_cast(dev->driver->disable_vblank) = gen8_disable_vblank; if (IS_BROXTON(dev)) dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev)) @@ -4564,35 +4565,36 @@ void intel_irq_init(struct drm_i915_private *dev_priv) else dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; } else if (HAS_PCH_SPLIT(dev)) { - dev->driver->irq_handler = ironlake_irq_handler; - dev->driver->irq_preinstall = ironlake_irq_reset; - dev->driver->irq_postinstall = ironlake_irq_postinstall; - dev->driver->irq_uninstall = ironlake_irq_uninstall; - dev->driver->enable_vblank = ironlake_enable_vblank; - dev->driver->disable_vblank = ironlake_disable_vblank; + const_cast(dev->driver->irq_handler) = ironlake_irq_handler; + const_cast(dev->driver->irq_preinstall) = ironlake_irq_reset; + const_cast(dev->driver->irq_postinstall) = ironlake_irq_postinstall; + const_cast(dev->driver->irq_uninstall) = ironlake_irq_uninstall; + const_cast(dev->driver->enable_vblank) = ironlake_enable_vblank; + const_cast(dev->driver->disable_vblank) = ironlake_disable_vblank; dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; } else { if (IS_GEN2(dev_priv)) { - dev->driver->irq_preinstall = i8xx_irq_preinstall; - dev->driver->irq_postinstall = i8xx_irq_postinstall; - dev->driver->irq_handler = i8xx_irq_handler; - dev->driver->irq_uninstall = i8xx_irq_uninstall; + const_cast(dev->driver->irq_preinstall) = i8xx_irq_preinstall; + const_cast(dev->driver->irq_postinstall) = i8xx_irq_postinstall; + const_cast(dev->driver->irq_handler) = i8xx_irq_handler; + const_cast(dev->driver->irq_uninstall) = i8xx_irq_uninstall; } else if (IS_GEN3(dev_priv)) { - dev->driver->irq_preinstall = i915_irq_preinstall; - dev->driver->irq_postinstall = i915_irq_postinstall; - dev->driver->irq_uninstall = i915_irq_uninstall; - dev->driver->irq_handler = i915_irq_handler; + const_cast(dev->driver->irq_preinstall) = i915_irq_preinstall; + const_cast(dev->driver->irq_postinstall) = i915_irq_postinstall; + const_cast(dev->driver->irq_uninstall) = i915_irq_uninstall; + const_cast(dev->driver->irq_handler) = i915_irq_handler; } else { - dev->driver->irq_preinstall = i965_irq_preinstall; - dev->driver->irq_postinstall = i965_irq_postinstall; - dev->driver->irq_uninstall = i965_irq_uninstall; - dev->driver->irq_handler = i965_irq_handler; + const_cast(dev->driver->irq_preinstall) = i965_irq_preinstall; + const_cast(dev->driver->irq_postinstall) = i965_irq_postinstall; + const_cast(dev->driver->irq_uninstall) = i965_irq_uninstall; + const_cast(dev->driver->irq_handler) = i965_irq_handler; } if (I915_HAS_HOTPLUG(dev_priv)) dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; - dev->driver->enable_vblank = i915_enable_vblank; - dev->driver->disable_vblank = i915_disable_vblank; + const_cast(dev->driver->enable_vblank) = i915_enable_vblank; + const_cast(dev->driver->disable_vblank) = i915_disable_vblank; } + pax_close_kernel(); } /** diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index b9be8a614..05d5f12c5 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -16133,13 +16133,13 @@ struct intel_quirk { int subsystem_vendor; int subsystem_device; void (*hook)(struct drm_device *dev); -}; +} __do_const; /* For systems that don't have a meaningful PCI subdevice/subvendor ID */ struct intel_dmi_quirk { void (*hook)(struct drm_device *dev); - const struct dmi_system_id (*dmi_id_list)[]; -}; + const struct dmi_system_id *dmi_id_list; +} __do_const; static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) { @@ -16147,18 +16147,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) return 1; } -static const struct intel_dmi_quirk intel_dmi_quirks[] = { +static const struct dmi_system_id intel_dmi_quirks_table[] = { { - .dmi_id_list = &(const struct dmi_system_id[]) { - { - .callback = intel_dmi_reverse_brightness, - .ident = "NCR Corporation", - .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, ""), - }, - }, - { } /* terminating entry */ + .callback = intel_dmi_reverse_brightness, + .ident = "NCR Corporation", + .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, ""), }, + }, + { } /* terminating entry */ +}; + +static const struct intel_dmi_quirk intel_dmi_quirks[] = { + { + .dmi_id_list = intel_dmi_quirks_table, .hook = quirk_invert_brightness, }, }; @@ -16241,7 +16243,7 @@ static void intel_init_quirks(struct drm_device *dev) q->hook(dev); } for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { - if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) + if (dmi_check_system(intel_dmi_quirks[i].dmi_id_list) != 0) intel_dmi_quirks[i].hook(dev); } } diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 9672b579f..d25a507c0 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -226,7 +226,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, if (imxdrm->pipes >= MAX_CRTC) return -EINVAL; - if (imxdrm->drm->open_count) + if (local_read(&imxdrm->drm->open_count)) return -EBUSY; imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL); diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index 8fc088843..6dad243cd 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c @@ -252,7 +252,7 @@ static int imx_tve_connector_get_modes(struct drm_connector *connector) return ret; } -static int imx_tve_connector_mode_valid(struct drm_connector *connector, +static enum drm_mode_status imx_tve_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct imx_tve *tve = con_to_tve(connector); diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 0e8c4d9af..f9d98b70a 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1226,7 +1226,7 @@ static int mtk_hdmi_conn_get_modes(struct drm_connector *conn) return ret; } -static int mtk_hdmi_conn_mode_valid(struct drm_connector *conn, +static enum drm_mode_status mtk_hdmi_conn_mode_valid(struct drm_connector *conn, struct drm_display_mode *mode) { struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn); diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c index 25b2a1a42..58ae09ba9 100644 --- a/drivers/gpu/drm/mga/mga_drv.c +++ b/drivers/gpu/drm/mga/mga_drv.c @@ -92,7 +92,10 @@ static struct pci_driver mga_pci_driver = { static int __init mga_init(void) { - driver.num_ioctls = mga_max_ioctl; + pax_open_kernel(); + const_cast(driver.num_ioctls) = mga_max_ioctl; + pax_close_kernel(); + return drm_pci_init(&driver, &mga_pci_driver); } diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h index bb312339e..75b39f0fb 100644 --- a/drivers/gpu/drm/mga/mga_drv.h +++ b/drivers/gpu/drm/mga/mga_drv.h @@ -122,9 +122,9 @@ typedef struct drm_mga_private { u32 clear_cmd; u32 maccess; - atomic_t vbl_received; /**< Number of vblanks received. */ + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */ wait_queue_head_t fence_queue; - atomic_t last_fence_retired; + atomic_unchecked_t last_fence_retired; u32 next_fence_to_post; unsigned int fb_cpp; @@ -152,7 +152,7 @@ typedef struct drm_mga_private { } drm_mga_private_t; extern const struct drm_ioctl_desc mga_ioctls[]; -extern int mga_max_ioctl; +extern const int mga_max_ioctl; /* mga_dma.c */ extern int mga_dma_bootstrap(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c index 729bfd56b..14bae782b 100644 --- a/drivers/gpu/drm/mga/mga_ioc32.c +++ b/drivers/gpu/drm/mga/mga_ioc32.c @@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd, return 0; } -drm_ioctl_compat_t *mga_compat_ioctls[] = { +drm_ioctl_compat_t mga_compat_ioctls[] = { [DRM_MGA_INIT] = compat_mga_init, [DRM_MGA_GETPARAM] = compat_mga_getparam, [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap, @@ -208,17 +208,13 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = { long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { unsigned int nr = DRM_IOCTL_NR(cmd); - drm_ioctl_compat_t *fn = NULL; int ret; if (nr < DRM_COMMAND_BASE) return drm_compat_ioctl(filp, cmd, arg); - if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls)) - fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE]; - - if (fn != NULL) - ret = (*fn) (filp, cmd, arg); + if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls) && mga_compat_ioctls[nr - DRM_COMMAND_BASE]) + ret = (*mga_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg); else ret = drm_ioctl(filp, cmd, arg); diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c index 693ba708c..465bcfc50 100644 --- a/drivers/gpu/drm/mga/mga_irq.c +++ b/drivers/gpu/drm/mga/mga_irq.c @@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, unsigned int pipe) if (pipe != 0) return 0; - return atomic_read(&dev_priv->vbl_received); + return atomic_read_unchecked(&dev_priv->vbl_received); } @@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg) /* VBLANK interrupt */ if (status & MGA_VLINEPEN) { MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR); - atomic_inc(&dev_priv->vbl_received); + atomic_inc_unchecked(&dev_priv->vbl_received); drm_handle_vblank(dev, 0); handled = 1; } @@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg) if ((prim_start & ~0x03) != (prim_end & ~0x03)) MGA_WRITE(MGA_PRIMEND, prim_end); - atomic_inc(&dev_priv->last_fence_retired); + atomic_inc_unchecked(&dev_priv->last_fence_retired); wake_up(&dev_priv->fence_queue); handled = 1; } @@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence) * using fences. */ DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ, - (((cur_fence = atomic_read(&dev_priv->last_fence_retired)) + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired)) - *sequence) <= (1 << 23))); *sequence = cur_fence; diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c index 792f92449..aeb1334d3 100644 --- a/drivers/gpu/drm/mga/mga_state.c +++ b/drivers/gpu/drm/mga/mga_state.c @@ -1099,4 +1099,4 @@ const struct drm_ioctl_desc mga_ioctls[] = { DRM_IOCTL_DEF_DRV(MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), }; -int mga_max_ioctl = ARRAY_SIZE(mga_ioctls); +const int mga_max_ioctl = ARRAY_SIZE(mga_ioctls); diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 6b21cb27e..90c28760e 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -1574,7 +1574,7 @@ static uint32_t mga_vga_calculate_mode_bandwidth(struct drm_display_mode *mode, #define MODE_BANDWIDTH MODE_BAD -static int mga_vga_mode_valid(struct drm_connector *connector, +static enum drm_mode_status mga_vga_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index c8d1f19c9..10d49d43c 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -306,7 +306,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector) return num; } -static int dsi_mgr_connector_mode_valid(struct drm_connector *connector, +static enum drm_mode_status dsi_mgr_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { int id = dsi_mgr_connector_get_id(connector); diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c index 5960628ce..fe2e4de80 100644 --- a/drivers/gpu/drm/msm/edp/edp_connector.c +++ b/drivers/gpu/drm/msm/edp/edp_connector.c @@ -63,7 +63,7 @@ static int edp_connector_get_modes(struct drm_connector *connector) return ret; } -static int edp_connector_mode_valid(struct drm_connector *connector, +static enum drm_mode_status edp_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct edp_connector *edp_connector = to_edp_connector(connector); diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c index a2515b466..cec0906da 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c @@ -377,7 +377,7 @@ static int msm_hdmi_connector_get_modes(struct drm_connector *connector) return ret; } -static int msm_hdmi_connector_mode_valid(struct drm_connector *connector, +static enum drm_mode_status msm_hdmi_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index ed7143d35..527b26ab1 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -647,9 +647,12 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) dev->mode_config.max_width = config->hw->lm.max_width; dev->mode_config.max_height = config->hw->lm.max_height; - dev->driver->get_vblank_timestamp = mdp5_get_vblank_timestamp; - dev->driver->get_scanout_position = mdp5_get_scanoutpos; - dev->driver->get_vblank_counter = mdp5_get_vblank_counter; + pax_open_kernel(); + const_cast(dev->driver->get_vblank_timestamp) = mdp5_get_vblank_timestamp; + const_cast(dev->driver->get_scanout_position) = mdp5_get_scanoutpos; + const_cast(dev->driver->get_vblank_counter) = mdp5_get_vblank_counter; + pax_close_kernel(); + dev->max_vblank_count = 0xffffffff; dev->vblank_disable_immediate = true; diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 193573d19..3f62e5398 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -194,7 +194,7 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id, return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state); } -static int nouveau_dsm_get_client_id(struct pci_dev *pdev) +static enum vga_switcheroo_client_id nouveau_dsm_get_client_id(struct pci_dev *pdev) { /* easy option one - intel vendor ID means Integrated */ if (pdev->vendor == PCI_VENDOR_ID_INTEL) diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 23ffe8571..f87a82de0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -967,7 +967,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_table { const char id; int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *); -}; +} __no_const; #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry }) diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index c1084088f..575750a96 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -862,7 +862,7 @@ get_tmds_link_bandwidth(struct drm_connector *connector, bool hdmi) return 112000; } -static int +static enum drm_mode_status nouveau_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 3100fd88a..6d573f7cb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -80,9 +80,8 @@ MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1 int nouveau_runtime_pm = -1; module_param_named(runpm, nouveau_runtime_pm, int, 0400); -static struct drm_driver driver_stub; static struct drm_driver driver_pci; -static struct drm_driver driver_platform; +static drm_driver_no_const driver_platform __read_only; static u64 nouveau_pci_name(struct pci_dev *pdev) @@ -942,7 +941,7 @@ nouveau_driver_fops = { }; static struct drm_driver -driver_stub = { +driver_pci = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER | DRIVER_KMS_LEGACY_CONTEXT, @@ -954,6 +953,8 @@ driver_stub = { .postclose = nouveau_drm_postclose, .lastclose = nouveau_vga_lastclose, + .set_busid = drm_pci_set_busid, + #if defined(CONFIG_DEBUG_FS) .debugfs_init = nouveau_drm_debugfs_init, .debugfs_cleanup = nouveau_drm_debugfs_cleanup, @@ -1086,9 +1087,10 @@ nouveau_platform_device_create(const struct nvkm_device_tegra_func *func, static int __init nouveau_drm_init(void) { - driver_pci = driver_stub; - driver_pci.set_busid = drm_pci_set_busid; - driver_platform = driver_stub; + pax_open_kernel(); + driver_platform = driver_pci; + driver_platform.set_busid = NULL; + pax_close_kernel(); nouveau_display_options(); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 822a0212c..a131e6668 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -124,7 +124,6 @@ struct nouveau_drm { struct drm_global_reference mem_global_ref; struct ttm_bo_global_ref bo_global_ref; struct ttm_bo_device bdev; - atomic_t validate_sequence; int (*move)(struct nouveau_channel *, struct ttm_buffer_object *, struct ttm_mem_reg *, struct ttm_mem_reg *); diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c index 462679a8f..88e32a794 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c +++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c @@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { unsigned int nr = DRM_IOCTL_NR(cmd); - drm_ioctl_compat_t *fn = NULL; + drm_ioctl_compat_t fn = NULL; int ret; if (nr < DRM_COMMAND_BASE) diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index a6dbe8258..ec4668a41 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -107,10 +107,10 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, } const struct ttm_mem_type_manager_func nouveau_vram_manager = { - nouveau_vram_manager_init, - nouveau_vram_manager_fini, - nouveau_vram_manager_new, - nouveau_vram_manager_del, + .init = nouveau_vram_manager_init, + .takedown = nouveau_vram_manager_fini, + .get_node = nouveau_vram_manager_new, + .put_node = nouveau_vram_manager_del, }; static int @@ -184,11 +184,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) } const struct ttm_mem_type_manager_func nouveau_gart_manager = { - nouveau_gart_manager_init, - nouveau_gart_manager_fini, - nouveau_gart_manager_new, - nouveau_gart_manager_del, - nouveau_gart_manager_debug + .init = nouveau_gart_manager_init, + .takedown = nouveau_gart_manager_fini, + .get_node = nouveau_gart_manager_new, + .put_node = nouveau_gart_manager_del, + .debug = nouveau_gart_manager_debug }; /*XXX*/ @@ -257,11 +257,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) } const struct ttm_mem_type_manager_func nv04_gart_manager = { - nv04_gart_manager_init, - nv04_gart_manager_fini, - nv04_gart_manager_new, - nv04_gart_manager_del, - nv04_gart_manager_debug + .init = nv04_gart_manager_init, + .takedown = nv04_gart_manager_fini, + .get_node = nv04_gart_manager_new, + .put_node = nv04_gart_manager_del, + .debug = nv04_gart_manager_debug }; int diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c index c6a180a0c..c5c7855d5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vga.c +++ b/drivers/gpu/drm/nouveau/nouveau_vga.c @@ -73,7 +73,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev) * locking inversion with the driver load path. And the access here is * completely racy anyway. So don't bother with locking for now. */ - return dev->open_count == 0; + return local_read(&dev->open_count) == 0; } static const struct vga_switcheroo_client_ops diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c index 7deb81b6d..bb22bc27a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c @@ -154,11 +154,16 @@ shadow_fw_init(struct nvkm_bios *bios, const char *name) return (void *)fw; } +static void shadow_fw_fini(void *fw) +{ + release_firmware(fw); +} + static const struct nvbios_source shadow_fw = { .name = "firmware", .init = shadow_fw_init, - .fini = (void(*)(void *))release_firmware, + .fini = shadow_fw_fini, .read = shadow_fw_read, .rw = false, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c index 9b91da09d..b3fa90d37 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c @@ -111,11 +111,16 @@ platform_init(struct nvkm_bios *bios, const char *name) return ERR_PTR(ret); } +static void platform_fini(void *data) +{ + kfree(data); +} + const struct nvbios_source nvbios_platform = { .name = "PLATFORM", .init = platform_init, - .fini = (void(*)(void *))kfree, + .fini = platform_fini, .read = pcirom_read, .rw = true, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h index a9a8a0e10..2ad6d622e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h @@ -226,8 +226,8 @@ struct gm200_secboot_func { int gm200_secboot_init(struct nvkm_secboot *); void *gm200_secboot_dtor(struct nvkm_secboot *); -int gm200_secboot_reset(struct nvkm_secboot *, u32); -int gm200_secboot_start(struct nvkm_secboot *, u32); +int gm200_secboot_reset(struct nvkm_secboot *, enum nvkm_secboot_falcon); +int gm200_secboot_start(struct nvkm_secboot *, enum nvkm_secboot_falcon); int gm20x_secboot_prepare_blobs(struct gm200_secboot *); diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c index 8dcdd7cf9..0e37527d1 100644 --- a/drivers/gpu/drm/omapdrm/dss/display.c +++ b/drivers/gpu/drm/omapdrm/dss/display.c @@ -112,12 +112,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev) if (dssdev->name == NULL) dssdev->name = dssdev->alias; + pax_open_kernel(); if (drv && drv->get_resolution == NULL) - drv->get_resolution = omapdss_default_get_resolution; + const_cast(drv->get_resolution) = omapdss_default_get_resolution; if (drv && drv->get_recommended_bpp == NULL) - drv->get_recommended_bpp = omapdss_default_get_recommended_bpp; + const_cast(drv->get_recommended_bpp) = omapdss_default_get_recommended_bpp; if (drv && drv->get_timings == NULL) - drv->get_timings = omapdss_default_get_timings; + const_cast(drv->get_timings) = omapdss_default_get_timings; + pax_close_kernel(); mutex_lock(&panel_list_mutex); list_add_tail(&dssdev->panel_list, &panel_list); diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index 137fe690a..bbfc18cf2 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -201,7 +201,7 @@ static int omap_connector_get_modes(struct drm_connector *connector) return n; } -static int omap_connector_mode_valid(struct drm_connector *connector, +static enum drm_mode_status omap_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct omap_connector *omap_connector = to_omap_connector(connector); diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index 04270f5d1..7688e90a6 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c @@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, int ret; mutex_lock(&qdev->async_io_mutex); - irq_num = atomic_read(&qdev->irq_received_io_cmd); + irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd); if (qdev->last_sent_io_cmd > irq_num) { if (intr) ret = wait_event_interruptible_timeout(qdev->io_cmd_event, - atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); + atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); else ret = wait_event_timeout(qdev->io_cmd_event, - atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); + atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); /* 0 is timeout, just bail the "hw" has gone away */ if (ret <= 0) goto out; - irq_num = atomic_read(&qdev->irq_received_io_cmd); + irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd); } outb(val, addr); qdev->last_sent_io_cmd = irq_num + 1; if (intr) ret = wait_event_interruptible_timeout(qdev->io_cmd_event, - atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); + atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); else ret = wait_event_timeout(qdev->io_cmd_event, - atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); + atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); out: if (ret > 0) ret = 0; diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c index 6911b8c44..89d686707 100644 --- a/drivers/gpu/drm/qxl/qxl_debugfs.c +++ b/drivers/gpu/drm/qxl/qxl_debugfs.c @@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct qxl_device *qdev = node->minor->dev->dev_private; - seq_printf(m, "%d\n", atomic_read(&qdev->irq_received)); - seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display)); - seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor)); - seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd)); + seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received)); + seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display)); + seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor)); + seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd)); seq_printf(m, "%d\n", qdev->irq_received_error); return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index a61c0d460..fab7be1cc 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -880,7 +880,7 @@ static int qxl_conn_get_modes(struct drm_connector *connector) return ret; } -static int qxl_conn_mode_valid(struct drm_connector *connector, +static enum drm_mode_status qxl_conn_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_device *ddev = connector->dev; diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 460bbceae..abeb896bc 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -37,7 +37,7 @@ #include "qxl_drv.h" #include "qxl_object.h" -extern int qxl_max_ioctls; +extern const int qxl_max_ioctls; static const struct pci_device_id pciidlist[] = { { 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0 }, @@ -277,7 +277,11 @@ static int __init qxl_init(void) if (qxl_modeset == 0) return -EINVAL; - qxl_driver.num_ioctls = qxl_max_ioctls; + + pax_open_kernel(); + const_cast(qxl_driver.num_ioctls) = qxl_max_ioctls; + pax_close_kernel(); + return drm_pci_init(&qxl_driver, &qxl_pci_driver); } diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 5f3e5ad99..a9e5887fc 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -293,10 +293,10 @@ struct qxl_device { unsigned int last_sent_io_cmd; /* interrupt handling */ - atomic_t irq_received; - atomic_t irq_received_display; - atomic_t irq_received_cursor; - atomic_t irq_received_io_cmd; + atomic_unchecked_t irq_received; + atomic_unchecked_t irq_received_display; + atomic_unchecked_t irq_received_cursor; + atomic_unchecked_t irq_received_io_cmd; unsigned irq_received_error; wait_queue_head_t display_event; wait_queue_head_t cursor_event; diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index 5a4c8c492..faf4c73b3 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -183,7 +183,7 @@ static int qxl_process_single_command(struct qxl_device *qdev, /* TODO copy slow path code from i915 */ fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE)); - unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size); + unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size); { struct qxl_drawable *draw = fb_cmd; @@ -203,7 +203,7 @@ static int qxl_process_single_command(struct qxl_device *qdev, struct drm_qxl_reloc reloc; if (copy_from_user(&reloc, - &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i], + &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i], sizeof(reloc))) { ret = -EFAULT; goto out_free_bos; @@ -282,10 +282,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) { - struct drm_qxl_command *commands = - (struct drm_qxl_command *)(uintptr_t)execbuffer->commands; + struct drm_qxl_command __user *commands = + (struct drm_qxl_command __user *)(uintptr_t)execbuffer->commands; - if (copy_from_user(&user_cmd, &commands[cmd_num], + if (copy_from_user(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num], sizeof(user_cmd))) return -EFAULT; @@ -439,4 +439,4 @@ const struct drm_ioctl_desc qxl_ioctls[] = { DRM_AUTH), }; -int qxl_max_ioctls = ARRAY_SIZE(qxl_ioctls); +const int qxl_max_ioctls = ARRAY_SIZE(qxl_ioctls); diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c index 0bf1e20c6..42a73107a 100644 --- a/drivers/gpu/drm/qxl/qxl_irq.c +++ b/drivers/gpu/drm/qxl/qxl_irq.c @@ -36,19 +36,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg) if (!pending) return IRQ_NONE; - atomic_inc(&qdev->irq_received); + atomic_inc_unchecked(&qdev->irq_received); if (pending & QXL_INTERRUPT_DISPLAY) { - atomic_inc(&qdev->irq_received_display); + atomic_inc_unchecked(&qdev->irq_received_display); wake_up_all(&qdev->display_event); qxl_queue_garbage_collect(qdev, false); } if (pending & QXL_INTERRUPT_CURSOR) { - atomic_inc(&qdev->irq_received_cursor); + atomic_inc_unchecked(&qdev->irq_received_cursor); wake_up_all(&qdev->cursor_event); } if (pending & QXL_INTERRUPT_IO_CMD) { - atomic_inc(&qdev->irq_received_io_cmd); + atomic_inc_unchecked(&qdev->irq_received_io_cmd); wake_up_all(&qdev->io_cmd_event); } if (pending & QXL_INTERRUPT_ERROR) { @@ -85,10 +85,10 @@ int qxl_irq_init(struct qxl_device *qdev) init_waitqueue_head(&qdev->io_cmd_event); INIT_WORK(&qdev->client_monitors_config_work, qxl_client_monitors_config_work_func); - atomic_set(&qdev->irq_received, 0); - atomic_set(&qdev->irq_received_display, 0); - atomic_set(&qdev->irq_received_cursor, 0); - atomic_set(&qdev->irq_received_io_cmd, 0); + atomic_set_unchecked(&qdev->irq_received, 0); + atomic_set_unchecked(&qdev->irq_received_display, 0); + atomic_set_unchecked(&qdev->irq_received_cursor, 0); + atomic_set_unchecked(&qdev->irq_received_io_cmd, 0); qdev->irq_received_error = 0; ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq); qdev->ram_header->int_mask = QXL_INTERRUPT_MASK; diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index e26c82db9..7151aced9 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev) } } -static struct vm_operations_struct qxl_ttm_vm_ops; +static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only; static const struct vm_operations_struct *ttm_vm_ops; static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) @@ -145,8 +145,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma) return r; if (unlikely(ttm_vm_ops == NULL)) { ttm_vm_ops = vma->vm_ops; + pax_open_kernel(); qxl_ttm_vm_ops = *ttm_vm_ops; qxl_ttm_vm_ops.fault = &qxl_ttm_fault; + pax_close_kernel(); } vma->vm_ops = &qxl_ttm_vm_ops; return 0; @@ -475,25 +477,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data) static int qxl_ttm_debugfs_init(struct qxl_device *qdev) { #if defined(CONFIG_DEBUG_FS) - static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES]; - static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32]; - unsigned i; - - for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) { - if (i == 0) - sprintf(qxl_mem_types_names[i], "qxl_mem_mm"); - else - sprintf(qxl_mem_types_names[i], "qxl_surf_mm"); - qxl_mem_types_list[i].name = qxl_mem_types_names[i]; - qxl_mem_types_list[i].show = &qxl_mm_dump_table; - qxl_mem_types_list[i].driver_features = 0; - if (i == 0) - qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv; - else - qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV].priv; + static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = { + { + .name = "qxl_mem_mm", + .show = &qxl_mm_dump_table, + }, + { + .name = "qxl_surf_mm", + .show = &qxl_mm_dump_table, + } + }; - } - return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i); + pax_open_kernel(); + const_cast(qxl_mem_types_list[0].data) = qdev->mman.bdev.man[TTM_PL_VRAM].priv; + const_cast(qxl_mem_types_list[1].data) = qdev->mman.bdev.man[TTM_PL_PRIV].priv; + pax_close_kernel(); + + return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES); #else return 0; #endif diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c index 9e8f6163c..401567440 100644 --- a/drivers/gpu/drm/r128/r128_cce.c +++ b/drivers/gpu/drm/r128/r128_cce.c @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init) /* GH: Simple idle check. */ - atomic_set(&dev_priv->idle_count, 0); + atomic_set_unchecked(&dev_priv->idle_count, 0); /* We don't support anything other than bus-mastering ring mode, * but the ring can be in either AGP or PCI space for the ring diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c index a982be57d..a6f2cf8c8 100644 --- a/drivers/gpu/drm/r128/r128_drv.c +++ b/drivers/gpu/drm/r128/r128_drv.c @@ -94,7 +94,9 @@ static struct pci_driver r128_pci_driver = { static int __init r128_init(void) { - driver.num_ioctls = r128_max_ioctl; + pax_open_kernel(); + const_cast(driver.num_ioctls) = r128_max_ioctl; + pax_close_kernel(); return drm_pci_init(&driver, &r128_pci_driver); } diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h index 09143b840..86c8394cd 100644 --- a/drivers/gpu/drm/r128/r128_drv.h +++ b/drivers/gpu/drm/r128/r128_drv.h @@ -93,14 +93,14 @@ typedef struct drm_r128_private { int is_pci; unsigned long cce_buffers_offset; - atomic_t idle_count; + atomic_unchecked_t idle_count; int page_flipping; int current_page; u32 crtc_offset; u32 crtc_offset_cntl; - atomic_t vbl_received; + atomic_unchecked_t vbl_received; u32 color_fmt; unsigned int front_offset; @@ -135,7 +135,7 @@ typedef struct drm_r128_buf_priv { } drm_r128_buf_priv_t; extern const struct drm_ioctl_desc r128_ioctls[]; -extern int r128_max_ioctl; +extern const int r128_max_ioctl; /* r128_cce.c */ extern int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv); diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c index 663f38c63..ec159a123 100644 --- a/drivers/gpu/drm/r128/r128_ioc32.c +++ b/drivers/gpu/drm/r128/r128_ioc32.c @@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd, return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam); } -drm_ioctl_compat_t *r128_compat_ioctls[] = { +drm_ioctl_compat_t r128_compat_ioctls[] = { [DRM_R128_INIT] = compat_r128_init, [DRM_R128_DEPTH] = compat_r128_depth, [DRM_R128_STIPPLE] = compat_r128_stipple, @@ -197,17 +197,13 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = { long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { unsigned int nr = DRM_IOCTL_NR(cmd); - drm_ioctl_compat_t *fn = NULL; int ret; if (nr < DRM_COMMAND_BASE) return drm_compat_ioctl(filp, cmd, arg); - if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls)) - fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE]; - - if (fn != NULL) - ret = (*fn) (filp, cmd, arg); + if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls) && r128_compat_ioctls[nr - DRM_COMMAND_BASE]) + ret = (*r128_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg); else ret = drm_ioctl(filp, cmd, arg); diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c index 9730f4918..920e9bfe6 100644 --- a/drivers/gpu/drm/r128/r128_irq.c +++ b/drivers/gpu/drm/r128/r128_irq.c @@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, unsigned int pipe) if (pipe != 0) return 0; - return atomic_read(&dev_priv->vbl_received); + return atomic_read_unchecked(&dev_priv->vbl_received); } irqreturn_t r128_driver_irq_handler(int irq, void *arg) @@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg) /* VBLANK interrupt */ if (status & R128_CRTC_VBLANK_INT) { R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK); - atomic_inc(&dev_priv->vbl_received); + atomic_inc_unchecked(&dev_priv->vbl_received); drm_handle_vblank(dev, 0); return IRQ_HANDLED; } diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c index 8fd2d9f58..4e991662b 100644 --- a/drivers/gpu/drm/r128/r128_state.c +++ b/drivers/gpu/drm/r128/r128_state.c @@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv, static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv) { - if (atomic_read(&dev_priv->idle_count) == 0) + if (atomic_read_unchecked(&dev_priv->idle_count) == 0) r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0); else - atomic_set(&dev_priv->idle_count, 0); + atomic_set_unchecked(&dev_priv->idle_count, 0); } #endif @@ -1641,4 +1641,4 @@ const struct drm_ioctl_desc r128_ioctls[] = { DRM_IOCTL_DEF_DRV(R128_GETPARAM, r128_getparam, DRM_AUTH), }; -int r128_max_ioctl = ARRAY_SIZE(r128_ioctls); +const int r128_max_ioctl = ARRAY_SIZE(r128_ioctls); diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c index b928c17bd..e5d940016 100644 --- a/drivers/gpu/drm/radeon/mkregtable.c +++ b/drivers/gpu/drm/radeon/mkregtable.c @@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename) regex_t mask_rex; regmatch_t match[4]; char buf[1024]; - size_t end; + long end; int len; int done = 0; int r; unsigned o; struct offset *offset; char last_reg_s[10]; - int last_reg; + unsigned long last_reg; if (regcomp (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) { diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 0ae13cd2a..7b4245ab0 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -526,7 +526,7 @@ static int radeon_atpx_init(void) * look up whether we are the integrated or discrete GPU (all asics). * Returns the client id. */ -static int radeon_atpx_get_client_id(struct pci_dev *pdev) +static enum vga_switcheroo_client_id radeon_atpx_get_client_id(struct pci_dev *pdev) { if (radeon_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev)) return VGA_SWITCHEROO_IGD; diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 27affbde0..3f59f8397 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -857,7 +857,7 @@ static int radeon_lvds_get_modes(struct drm_connector *connector) return ret; } -static int radeon_lvds_mode_valid(struct drm_connector *connector, +static enum drm_mode_status radeon_lvds_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_encoder *encoder = radeon_best_single_encoder(connector); @@ -1011,7 +1011,7 @@ static int radeon_vga_get_modes(struct drm_connector *connector) return ret; } -static int radeon_vga_mode_valid(struct drm_connector *connector, +static enum drm_mode_status radeon_vga_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; @@ -1151,7 +1151,7 @@ static int radeon_tv_get_modes(struct drm_connector *connector) return 1; } -static int radeon_tv_mode_valid(struct drm_connector *connector, +static enum drm_mode_status radeon_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { if ((mode->hdisplay > 1024) || (mode->vdisplay > 768)) @@ -1483,7 +1483,7 @@ static void radeon_dvi_force(struct drm_connector *connector) radeon_connector->use_digital = true; } -static int radeon_dvi_mode_valid(struct drm_connector *connector, +static enum drm_mode_status radeon_dvi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; @@ -1781,7 +1781,7 @@ radeon_dp_detect(struct drm_connector *connector, bool force) return ret; } -static int radeon_dp_mode_valid(struct drm_connector *connector, +static enum drm_mode_status radeon_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 621af069a..1508b3fcc 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1289,7 +1289,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev) * locking inversion with the driver load path. And the access here is * completely racy anyway. So don't bother with locking for now. */ - return dev->open_count == 0; + return local_read(&dev->open_count) == 0; } static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = { diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index e0c143b86..3a7bc79ab 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -137,7 +137,7 @@ extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int crtc, const struct drm_display_mode *mode); extern bool radeon_is_px(struct drm_device *dev); extern const struct drm_ioctl_desc radeon_ioctls_kms[]; -extern int radeon_max_kms_ioctl; +extern const int radeon_max_kms_ioctl; int radeon_mmap(struct file *filp, struct vm_area_struct *vma); int radeon_mode_dumb_mmap(struct drm_file *filp, struct drm_device *dev, @@ -534,7 +534,7 @@ static struct drm_driver kms_driver = { .driver_features = DRIVER_USE_AGP | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | - DRIVER_PRIME | DRIVER_RENDER, + DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET, .load = radeon_driver_load_kms, .open = radeon_driver_open_kms, .preclose = radeon_driver_preclose_kms, @@ -606,8 +606,11 @@ static int __init radeon_init(void) DRM_INFO("radeon kernel modesetting enabled.\n"); driver = &kms_driver; pdriver = &radeon_kms_pci_driver; - driver->driver_features |= DRIVER_MODESET; - driver->num_ioctls = radeon_max_kms_ioctl; + + pax_open_kernel(); + const_cast(driver->num_ioctls) = radeon_max_kms_ioctl; + pax_close_kernel(); + radeon_register_atpx_handler(); } else { diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c index 0b98ea134..a3c770fd8 100644 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c @@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd, request = compat_alloc_user_space(sizeof(*request)); if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) || __put_user(req32.param, &request->param) - || __put_user((void __user *)(unsigned long)req32.value, + || __put_user((unsigned long)req32.value, &request->value)) return -EFAULT; @@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd, #define compat_radeon_cp_setparam NULL #endif /* X86_64 || IA64 */ -static drm_ioctl_compat_t *radeon_compat_ioctls[] = { +static drm_ioctl_compat_t radeon_compat_ioctls[] = { [DRM_RADEON_CP_INIT] = compat_radeon_cp_init, [DRM_RADEON_CLEAR] = compat_radeon_cp_clear, [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple, @@ -393,17 +393,13 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = { long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { unsigned int nr = DRM_IOCTL_NR(cmd); - drm_ioctl_compat_t *fn = NULL; int ret; if (nr < DRM_COMMAND_BASE) return drm_compat_ioctl(filp, cmd, arg); - if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls)) - fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE]; - - if (fn != NULL) - ret = (*fn) (filp, cmd, arg); + if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls) && radeon_compat_ioctls[nr - DRM_COMMAND_BASE]) + ret = (*radeon_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg); else ret = drm_ioctl(filp, cmd, arg); diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 4388ddeec..07a718c70 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -830,7 +830,7 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe) * Enable the interrupt on the requested crtc (all asics). * Returns 0 on success, -EINVAL on failure. */ -int radeon_enable_vblank_kms(struct drm_device *dev, int crtc) +int radeon_enable_vblank_kms(struct drm_device *dev, unsigned int crtc) { struct radeon_device *rdev = dev->dev_private; unsigned long irqflags; @@ -856,7 +856,7 @@ int radeon_enable_vblank_kms(struct drm_device *dev, int crtc) * * Disable the interrupt on the requested crtc (all asics). */ -void radeon_disable_vblank_kms(struct drm_device *dev, int crtc) +void radeon_disable_vblank_kms(struct drm_device *dev, unsigned int crtc) { struct radeon_device *rdev = dev->dev_private; unsigned long irqflags; @@ -885,7 +885,7 @@ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc) * scanout position. (all asics). * Returns postive status flags on success, negative error on failure. */ -int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, +int radeon_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int crtc, int *max_error, struct timeval *vblank_time, unsigned flags) @@ -954,4 +954,4 @@ const struct drm_ioctl_desc radeon_ioctls_kms[] = { DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), }; -int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms); +const int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 3de5e6e21..307217064 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -975,7 +975,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size) man->size = size >> PAGE_SHIFT; } -static struct vm_operations_struct radeon_ttm_vm_ops; +static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only; static const struct vm_operations_struct *ttm_vm_ops = NULL; static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) @@ -1016,8 +1016,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma) } if (unlikely(ttm_vm_ops == NULL)) { ttm_vm_ops = vma->vm_ops; + pax_open_kernel(); radeon_ttm_vm_ops = *ttm_vm_ops; radeon_ttm_vm_ops.fault = &radeon_ttm_fault; + pax_close_kernel(); } vma->vm_ops = &radeon_ttm_vm_ops; return 0; diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c index d47dff95f..0752202f8 100644 --- a/drivers/gpu/drm/savage/savage_bci.c +++ b/drivers/gpu/drm/savage/savage_bci.c @@ -1080,4 +1080,4 @@ const struct drm_ioctl_desc savage_ioctls[] = { DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH), }; -int savage_max_ioctl = ARRAY_SIZE(savage_ioctls); +const int savage_max_ioctl = ARRAY_SIZE(savage_ioctls); diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c index 3b807135a..472b2cded 100644 --- a/drivers/gpu/drm/savage/savage_drv.c +++ b/drivers/gpu/drm/savage/savage_drv.c @@ -76,7 +76,10 @@ static struct pci_driver savage_pci_driver = { static int __init savage_init(void) { - driver.num_ioctls = savage_max_ioctl; + pax_open_kernel(); + const_cast(driver.num_ioctls) = savage_max_ioctl; + pax_close_kernel(); + return drm_pci_init(&driver, &savage_pci_driver); } diff --git a/drivers/gpu/drm/savage/savage_drv.h b/drivers/gpu/drm/savage/savage_drv.h index 37b699571..9b31aafc5 100644 --- a/drivers/gpu/drm/savage/savage_drv.h +++ b/drivers/gpu/drm/savage/savage_drv.h @@ -107,7 +107,7 @@ enum savage_family { }; extern const struct drm_ioctl_desc savage_ioctls[]; -extern int savage_max_ioctl; +extern const int savage_max_ioctl; #define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX)) diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c index ae9839886..78e57e84a 100644 --- a/drivers/gpu/drm/sis/sis_drv.c +++ b/drivers/gpu/drm/sis/sis_drv.c @@ -128,7 +128,10 @@ static struct pci_driver sis_pci_driver = { static int __init sis_init(void) { - driver.num_ioctls = sis_max_ioctl; + pax_open_kernel(); + const_cast(driver.num_ioctls) = sis_max_ioctl; + pax_close_kernel(); + return drm_pci_init(&driver, &sis_pci_driver); } diff --git a/drivers/gpu/drm/sis/sis_drv.h b/drivers/gpu/drm/sis/sis_drv.h index 328f8a750..0cfcf5556 100644 --- a/drivers/gpu/drm/sis/sis_drv.h +++ b/drivers/gpu/drm/sis/sis_drv.h @@ -77,6 +77,6 @@ extern void sis_reclaim_buffers_locked(struct drm_device *dev, extern void sis_lastclose(struct drm_device *dev); extern const struct drm_ioctl_desc sis_ioctls[]; -extern int sis_max_ioctl; +extern const int sis_max_ioctl; #endif diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c index 03defda77..6f56b6851 100644 --- a/drivers/gpu/drm/sis/sis_mm.c +++ b/drivers/gpu/drm/sis/sis_mm.c @@ -359,4 +359,4 @@ const struct drm_ioctl_desc sis_ioctls[] = { DRM_IOCTL_DEF_DRV(SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), }; -int sis_max_ioctl = ARRAY_SIZE(sis_ioctls); +const int sis_max_ioctl = ARRAY_SIZE(sis_ioctls); diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c index cca75bddb..a1a64def6 100644 --- a/drivers/gpu/drm/sti/sti_cursor.c +++ b/drivers/gpu/drm/sti/sti_cursor.c @@ -126,7 +126,7 @@ static int cursor_dbg_show(struct seq_file *s, void *data) return 0; } -static struct drm_info_list cursor_debugfs_files[] = { +static drm_info_list_no_const cursor_debugfs_files[] __read_only = { { "cursor", cursor_dbg_show, 0, NULL }, }; @@ -135,8 +135,10 @@ static int cursor_debugfs_init(struct sti_cursor *cursor, { unsigned int i; + pax_open_kernel(); for (i = 0; i < ARRAY_SIZE(cursor_debugfs_files); i++) cursor_debugfs_files[i].data = cursor; + pax_close_kernel(); return drm_debugfs_create_files(cursor_debugfs_files, ARRAY_SIZE(cursor_debugfs_files), diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c index e8c1ed08a..b034392a1 100644 --- a/drivers/gpu/drm/sti/sti_dvo.c +++ b/drivers/gpu/drm/sti/sti_dvo.c @@ -191,7 +191,7 @@ static int dvo_dbg_show(struct seq_file *s, void *data) return 0; } -static struct drm_info_list dvo_debugfs_files[] = { +static drm_info_list_no_const dvo_debugfs_files[] __read_only = { { "dvo", dvo_dbg_show, 0, NULL }, }; @@ -206,8 +206,10 @@ static int dvo_debugfs_init(struct sti_dvo *dvo, struct drm_minor *minor) { unsigned int i; + pax_open_kernel(); for (i = 0; i < ARRAY_SIZE(dvo_debugfs_files); i++) dvo_debugfs_files[i].data = dvo; + pax_close_kernel(); return drm_debugfs_create_files(dvo_debugfs_files, ARRAY_SIZE(dvo_debugfs_files), diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c index 81df3097b..ab864fd40 100644 --- a/drivers/gpu/drm/sti/sti_gdp.c +++ b/drivers/gpu/drm/sti/sti_gdp.c @@ -284,22 +284,22 @@ static int gdp_node_dbg_show(struct seq_file *s, void *arg) return 0; } -static struct drm_info_list gdp0_debugfs_files[] = { +static drm_info_list_no_const gdp0_debugfs_files[] __read_only = { { "gdp0", gdp_dbg_show, 0, NULL }, { "gdp0_node", gdp_node_dbg_show, 0, NULL }, }; -static struct drm_info_list gdp1_debugfs_files[] = { +static drm_info_list_no_const gdp1_debugfs_files[] __read_only = { { "gdp1", gdp_dbg_show, 0, NULL }, { "gdp1_node", gdp_node_dbg_show, 0, NULL }, }; -static struct drm_info_list gdp2_debugfs_files[] = { +static drm_info_list_no_const gdp2_debugfs_files[] __read_only = { { "gdp2", gdp_dbg_show, 0, NULL }, { "gdp2_node", gdp_node_dbg_show, 0, NULL }, }; -static struct drm_info_list gdp3_debugfs_files[] = { +static drm_info_list_no_const gdp3_debugfs_files[] __read_only = { { "gdp3", gdp_dbg_show, 0, NULL }, { "gdp3_node", gdp_node_dbg_show, 0, NULL }, }; @@ -307,7 +307,7 @@ static struct drm_info_list gdp3_debugfs_files[] = { static int gdp_debugfs_init(struct sti_gdp *gdp, struct drm_minor *minor) { unsigned int i; - struct drm_info_list *gdp_debugfs_files; + drm_info_list_no_const *gdp_debugfs_files; int nb_files; switch (gdp->plane.desc) { @@ -331,8 +331,10 @@ static int gdp_debugfs_init(struct sti_gdp *gdp, struct drm_minor *minor) return -EINVAL; } + pax_open_kernel(); for (i = 0; i < nb_files; i++) gdp_debugfs_files[i].data = gdp; + pax_close_kernel(); return drm_debugfs_create_files(gdp_debugfs_files, nb_files, diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c index e7c243f70..7cc961335 100644 --- a/drivers/gpu/drm/sti/sti_hda.c +++ b/drivers/gpu/drm/sti/sti_hda.c @@ -361,7 +361,7 @@ static int hda_dbg_show(struct seq_file *s, void *data) return 0; } -static struct drm_info_list hda_debugfs_files[] = { +static drm_info_list_no_const hda_debugfs_files[] __read_only = { { "hda", hda_dbg_show, 0, NULL }, }; @@ -376,8 +376,10 @@ static int hda_debugfs_init(struct sti_hda *hda, struct drm_minor *minor) { unsigned int i; + pax_open_kernel(); for (i = 0; i < ARRAY_SIZE(hda_debugfs_files); i++) hda_debugfs_files[i].data = hda; + pax_close_kernel(); return drm_debugfs_create_files(hda_debugfs_files, ARRAY_SIZE(hda_debugfs_files), diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index 376b0763c..9a2cca502 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -727,7 +727,7 @@ static int hdmi_dbg_show(struct seq_file *s, void *data) return 0; } -static struct drm_info_list hdmi_debugfs_files[] = { +static drm_info_list_no_const hdmi_debugfs_files[] __read_only = { { "hdmi", hdmi_dbg_show, 0, NULL }, }; @@ -742,8 +742,10 @@ static int hdmi_debugfs_init(struct sti_hdmi *hdmi, struct drm_minor *minor) { unsigned int i; + pax_open_kernel(); for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_files); i++) hdmi_debugfs_files[i].data = hdmi; + pax_close_kernel(); return drm_debugfs_create_files(hdmi_debugfs_files, ARRAY_SIZE(hdmi_debugfs_files), diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c index ac82e0250..738e44fae 100644 --- a/drivers/gpu/drm/sti/sti_hqvdp.c +++ b/drivers/gpu/drm/sti/sti_hqvdp.c @@ -628,7 +628,7 @@ static int hqvdp_dbg_show(struct seq_file *s, void *data) return 0; } -static struct drm_info_list hqvdp_debugfs_files[] = { +static drm_info_list_no_const hqvdp_debugfs_files[] __read_only = { { "hqvdp", hqvdp_dbg_show, 0, NULL }, }; @@ -636,8 +636,10 @@ static int hqvdp_debugfs_init(struct sti_hqvdp *hqvdp, struct drm_minor *minor) { unsigned int i; + pax_open_kernel(); for (i = 0; i < ARRAY_SIZE(hqvdp_debugfs_files); i++) hqvdp_debugfs_files[i].data = hqvdp; + pax_close_kernel(); return drm_debugfs_create_files(hqvdp_debugfs_files, ARRAY_SIZE(hqvdp_debugfs_files), diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c index 4ddc58f7f..5c93f1a81 100644 --- a/drivers/gpu/drm/sti/sti_mixer.c +++ b/drivers/gpu/drm/sti/sti_mixer.c @@ -167,18 +167,18 @@ static int mixer_dbg_show(struct seq_file *s, void *arg) return 0; } -static struct drm_info_list mixer0_debugfs_files[] = { +static drm_info_list_no_const mixer0_debugfs_files[] __read_only = { { "mixer_main", mixer_dbg_show, 0, NULL }, }; -static struct drm_info_list mixer1_debugfs_files[] = { +static drm_info_list_no_const mixer1_debugfs_files[] __read_only = { { "mixer_aux", mixer_dbg_show, 0, NULL }, }; int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor) { unsigned int i; - struct drm_info_list *mixer_debugfs_files; + drm_info_list_no_const *mixer_debugfs_files; int nb_files; switch (mixer->id) { @@ -194,8 +194,10 @@ int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor) return -EINVAL; } + pax_open_kernel(); for (i = 0; i < nb_files; i++) mixer_debugfs_files[i].data = mixer; + pax_close_kernel(); return drm_debugfs_create_files(mixer_debugfs_files, nb_files, diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c index ad46d3558..488be646e 100644 --- a/drivers/gpu/drm/sti/sti_tvout.c +++ b/drivers/gpu/drm/sti/sti_tvout.c @@ -563,7 +563,7 @@ static int tvout_dbg_show(struct seq_file *s, void *data) return 0; } -static struct drm_info_list tvout_debugfs_files[] = { +static drm_info_list_no_const tvout_debugfs_files[] __read_only = { { "tvout", tvout_dbg_show, 0, NULL }, }; @@ -578,8 +578,10 @@ static int tvout_debugfs_init(struct sti_tvout *tvout, struct drm_minor *minor) { unsigned int i; + pax_open_kernel(); for (i = 0; i < ARRAY_SIZE(tvout_debugfs_files); i++) tvout_debugfs_files[i].data = tvout; + pax_close_kernel(); return drm_debugfs_create_files(tvout_debugfs_files, ARRAY_SIZE(tvout_debugfs_files), diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c index 2ad59892b..d0707b3bb 100644 --- a/drivers/gpu/drm/sti/sti_vid.c +++ b/drivers/gpu/drm/sti/sti_vid.c @@ -119,7 +119,7 @@ static int vid_dbg_show(struct seq_file *s, void *arg) return 0; } -static struct drm_info_list vid_debugfs_files[] = { +static drm_info_list_no_const vid_debugfs_files[] __read_only = { { "vid", vid_dbg_show, 0, NULL }, }; @@ -127,8 +127,10 @@ int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor) { unsigned int i; + pax_open_kernel(); for (i = 0; i < ARRAY_SIZE(vid_debugfs_files); i++) vid_debugfs_files[i].data = vid; + pax_close_kernel(); return drm_debugfs_create_files(vid_debugfs_files, ARRAY_SIZE(vid_debugfs_files), diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 4010d69cb..d1f7e6e12 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -1675,7 +1675,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor) } for (i = 0; i < ARRAY_SIZE(debugfs_files); i++) - dc->debugfs_files[i].data = dc; + const_cast(dc->debugfs_files[i].data) = dc; err = drm_debugfs_create_files(dc->debugfs_files, ARRAY_SIZE(debugfs_files), diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index 3dea1216b..c2b888ebc 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c @@ -63,7 +63,7 @@ struct tegra_dsi { struct clk *clk_lp; struct clk *clk; - struct drm_info_list *debugfs_files; + drm_info_list_no_const *debugfs_files; struct drm_minor *minor; struct dentry *debugfs; diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index cda0491ed..869916ed4 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c @@ -74,7 +74,7 @@ struct tegra_hdmi { bool stereo; bool dvi; - struct drm_info_list *debugfs_files; + drm_info_list_no_const *debugfs_files; struct drm_minor *minor; struct dentry *debugfs; }; diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index 74d0540b8..f5277db74 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -1263,8 +1263,11 @@ static int tegra_sor_debugfs_init(struct tegra_sor *sor, goto remove; } - for (i = 0; i < ARRAY_SIZE(debugfs_files); i++) - sor->debugfs_files[i].data = sor; + for (i = 0; i < ARRAY_SIZE(debugfs_files); i++) { + pax_open_kernel(); + const_cast(sor->debugfs_files[i].data) = sor; + pax_close_kernel(); + } err = drm_debugfs_create_files(sor->debugfs_files, ARRAY_SIZE(debugfs_files), diff --git a/drivers/gpu/drm/tilcdc/Makefile b/drivers/gpu/drm/tilcdc/Makefile index 6f675175a..3f0907347 100644 --- a/drivers/gpu/drm/tilcdc/Makefile +++ b/drivers/gpu/drm/tilcdc/Makefile @@ -1,7 +1,7 @@ ccflags-y := -Iinclude/drm -ifeq (, $(findstring -W,$(EXTRA_CFLAGS))) - ccflags-y += -Werror -endif +#ifeq (, $(findstring -W,$(EXTRA_CFLAGS))) +# ccflags-y += -Werror +#endif obj-$(CONFIG_DRM_TILCDC_SLAVE_COMPAT) += tilcdc_slave_compat.o \ tilcdc_slave_compat.dtb.o diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c index 68e895021..7149c5a03 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_external.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c @@ -27,7 +27,7 @@ static const struct tilcdc_panel_info panel_info_tda998x = { .raster_order = 0, }; -static int tilcdc_external_mode_valid(struct drm_connector *connector, +static enum drm_mode_status tilcdc_external_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct tilcdc_drm_private *priv = connector->dev->dev_private; @@ -56,7 +56,7 @@ static int tilcdc_add_external_encoder(struct drm_device *dev, struct drm_connector *connector) { struct tilcdc_drm_private *priv = dev->dev_private; - struct drm_connector_helper_funcs *connector_funcs; + drm_connector_helper_funcs_no_const *connector_funcs; priv->connectors[priv->num_connectors] = connector; priv->encoders[priv->num_encoders++] = connector->encoder; diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c index 2134bb20f..2cd7c97d9 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c @@ -179,7 +179,7 @@ static int panel_connector_get_modes(struct drm_connector *connector) return i; } -static int panel_connector_mode_valid(struct drm_connector *connector, +static enum drm_mode_status panel_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct tilcdc_drm_private *priv = connector->dev->dev_private; diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c index 458043a53..e3bace6b2 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c @@ -185,7 +185,7 @@ static int tfp410_connector_get_modes(struct drm_connector *connector) return ret; } -static int tfp410_connector_mode_valid(struct drm_connector *connector, +static enum drm_mode_status tfp410_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct tilcdc_drm_private *priv = connector->dev->dev_private; diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c index aa0bd054d..aea6a0150 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c @@ -148,10 +148,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, } const struct ttm_mem_type_manager_func ttm_bo_manager_func = { - ttm_bo_man_init, - ttm_bo_man_takedown, - ttm_bo_man_get_node, - ttm_bo_man_put_node, - ttm_bo_man_debug + .init = ttm_bo_man_init, + .takedown = ttm_bo_man_takedown, + .get_node = ttm_bo_man_get_node, + .put_node = ttm_bo_man_put_node, + .debug = ttm_bo_man_debug }; EXPORT_SYMBOL(ttm_bo_manager_func); diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index 29855be96..b039e241b 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c @@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob, zone->glob = glob; glob->zone_kernel = zone; ret = kobject_init_and_add( - &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); + &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name); if (unlikely(ret != 0)) { kobject_put(&zone->kobj); return ret; @@ -348,7 +348,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob, zone->glob = glob; glob->zone_dma32 = zone; ret = kobject_init_and_add( - &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); + &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name); if (unlikely(ret != 0)) { kobject_put(&zone->kobj); return ret; diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index a37de5db5..4a0db00c4 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -54,7 +54,7 @@ #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) #define SMALL_ALLOCATION 16 -#define FREE_ALL_PAGES (~0U) +#define FREE_ALL_PAGES (~0UL) /* times are in msecs */ #define PAGE_FREE_INTERVAL 1000 @@ -299,15 +299,14 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, * @free_all: If set to true will free all pages in pool * @use_static: Safe to use static buffer **/ -static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, +static unsigned long ttm_page_pool_free(struct ttm_page_pool *pool, unsigned long nr_free, bool use_static) { static struct page *static_buf[NUM_PAGES_TO_ALLOC]; unsigned long irq_flags; struct page *p; struct page **pages_to_free; - unsigned freed_pages = 0, - npages_to_free = nr_free; + unsigned long freed_pages = 0, npages_to_free = nr_free; if (NUM_PAGES_TO_ALLOC < nr_free) npages_to_free = NUM_PAGES_TO_ALLOC; @@ -371,7 +370,8 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, __list_del(&p->lru, &pool->list); ttm_pool_update_free_locked(pool, freed_pages); - nr_free -= freed_pages; + if (likely(nr_free != FREE_ALL_PAGES)) + nr_free -= freed_pages; } spin_unlock_irqrestore(&pool->lock, irq_flags); @@ -399,7 +399,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) unsigned i; unsigned pool_offset; struct ttm_page_pool *pool; - int shrink_pages = sc->nr_to_scan; + unsigned long shrink_pages = sc->nr_to_scan; unsigned long freed = 0; if (!mutex_trylock(&lock)) @@ -407,7 +407,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) pool_offset = ++start_pool % NUM_POOLS; /* select start pool in round robin fashion */ for (i = 0; i < NUM_POOLS; ++i) { - unsigned nr_free = shrink_pages; + unsigned long nr_free = shrink_pages; if (shrink_pages == 0) break; pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; @@ -673,7 +673,7 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, } /* Put all pages in pages list to correct pool to wait for reuse */ -static void ttm_put_pages(struct page **pages, unsigned npages, int flags, +static void ttm_put_pages(struct page **pages, unsigned long npages, int flags, enum ttm_caching_state cstate) { unsigned long irq_flags; @@ -728,7 +728,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, struct list_head plist; struct page *p = NULL; gfp_t gfp_flags = GFP_USER; - unsigned count; + unsigned long count; int r; /* set zero flag for page allocation if required */ diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index cec4b4baa..71726dd41 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -56,7 +56,7 @@ #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) #define SMALL_ALLOCATION 4 -#define FREE_ALL_PAGES (~0U) +#define FREE_ALL_PAGES (~0UL) /* times are in msecs */ #define IS_UNDEFINED (0) #define IS_WC (1<<1) @@ -416,7 +416,7 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page) * @nr_free: If set to true will free all pages in pool * @use_static: Safe to use static buffer **/ -static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free, +static unsigned long ttm_dma_page_pool_free(struct dma_pool *pool, unsigned long nr_free, bool use_static) { static struct page *static_buf[NUM_PAGES_TO_ALLOC]; @@ -424,8 +424,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free, struct dma_page *dma_p, *tmp; struct page **pages_to_free; struct list_head d_pages; - unsigned freed_pages = 0, - npages_to_free = nr_free; + unsigned long freed_pages = 0, npages_to_free = nr_free; if (NUM_PAGES_TO_ALLOC < nr_free) npages_to_free = NUM_PAGES_TO_ALLOC; @@ -502,7 +501,8 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free, /* remove range of pages from the pool */ if (freed_pages) { ttm_pool_update_free_locked(pool, freed_pages); - nr_free -= freed_pages; + if (likely(nr_free != FREE_ALL_PAGES)) + nr_free -= freed_pages; } spin_unlock_irqrestore(&pool->lock, irq_flags); @@ -938,7 +938,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) struct dma_page *d_page, *next; enum pool_type type; bool is_cached = false; - unsigned count = 0, i, npages = 0; + unsigned long count = 0, i, npages = 0; unsigned long irq_flags; type = ttm_to_type(ttm->page_flags, ttm->caching_state); @@ -1012,7 +1012,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) static unsigned start_pool; unsigned idx = 0; unsigned pool_offset; - unsigned shrink_pages = sc->nr_to_scan; + unsigned long shrink_pages = sc->nr_to_scan; struct device_pools *p; unsigned long freed = 0; @@ -1025,7 +1025,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) goto out; pool_offset = ++start_pool % _manager->npools; list_for_each_entry(p, &_manager->pools, pools) { - unsigned nr_free; + unsigned long nr_free; if (!p->dev) continue; @@ -1039,7 +1039,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true); freed += nr_free - shrink_pages; - pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n", + pr_debug("%s: (%s:%d) Asked to shrink %lu, have %lu more to go\n", p->pool->dev_name, p->pool->name, current->pid, nr_free, shrink_pages); } diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c index d2f57c52f..06d2af764 100644 --- a/drivers/gpu/drm/udl/udl_connector.c +++ b/drivers/gpu/drm/udl/udl_connector.c @@ -80,7 +80,7 @@ static int udl_get_modes(struct drm_connector *connector) return ret; } -static int udl_mode_valid(struct drm_connector *connector, +static enum drm_mode_status udl_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct udl_device *udl = connector->dev->dev_private; diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index 611b6b9bb..e0faec11d 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -242,7 +242,6 @@ static int udl_fb_release(struct fb_info *info, int user) fb_deferred_io_cleanup(info); kfree(info->fbdefio); info->fbdefio = NULL; - info->fbops->fb_mmap = udl_fb_mmap; } #endif diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 8703f56b7..7e8f99c10 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -180,6 +180,11 @@ static int compare_dev(struct device *dev, void *data) return dev == data; } +static int vc4_match(struct device *dev, void *drv) +{ + return platform_bus_type.match(dev, drv); +} + static void vc4_match_add_drivers(struct device *dev, struct component_match **match, struct platform_driver *const *drivers, @@ -191,8 +196,7 @@ static void vc4_match_add_drivers(struct device *dev, struct device_driver *drv = &drivers[i]->driver; struct device *p = NULL, *d; - while ((d = bus_find_device(&platform_bus_type, p, drv, - (void *)platform_bus_type.match))) { + while ((d = bus_find_device(&platform_bus_type, p, drv, vc4_match))) { put_device(p); component_match_add(dev, match, compare_dev, d); p = d; diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c index d17d8f245..67e8e48b3 100644 --- a/drivers/gpu/drm/via/via_dma.c +++ b/drivers/gpu/drm/via/via_dma.c @@ -737,4 +737,4 @@ const struct drm_ioctl_desc via_ioctls[] = { DRM_IOCTL_DEF_DRV(VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH) }; -int via_max_ioctl = ARRAY_SIZE(via_ioctls); +const int via_max_ioctl = ARRAY_SIZE(via_ioctls); diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c index e5582bab7..67c921bd4 100644 --- a/drivers/gpu/drm/via/via_drv.c +++ b/drivers/gpu/drm/via/via_drv.c @@ -107,7 +107,10 @@ static struct pci_driver via_pci_driver = { static int __init via_init(void) { - driver.num_ioctls = via_max_ioctl; + pax_open_kernel(); + const_cast(driver.num_ioctls) = via_max_ioctl; + pax_close_kernel(); + via_init_command_verifier(); return drm_pci_init(&driver, &via_pci_driver); } diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h index 286a785fa..c01826167 100644 --- a/drivers/gpu/drm/via/via_drv.h +++ b/drivers/gpu/drm/via/via_drv.h @@ -53,7 +53,7 @@ typedef struct drm_via_ring_buffer { typedef uint32_t maskarray_t[5]; typedef struct drm_via_irq { - atomic_t irq_received; + atomic_unchecked_t irq_received; uint32_t pending_mask; uint32_t enable_mask; wait_queue_head_t irq_queue; @@ -77,7 +77,7 @@ typedef struct drm_via_private { struct timeval last_vblank; int last_vblank_valid; unsigned usec_per_vblank; - atomic_t vbl_received; + atomic_unchecked_t vbl_received; drm_via_state_t hc_state; char pci_buf[VIA_PCI_BUF_SIZE]; const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE]; @@ -121,7 +121,7 @@ enum via_family { #define VIA_WRITE8(reg, val) DRM_WRITE8(VIA_BASE, reg, val) extern const struct drm_ioctl_desc via_ioctls[]; -extern int via_max_ioctl; +extern const int via_max_ioctl; extern int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int via_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv); diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c index ea8172c74..6ceff63be 100644 --- a/drivers/gpu/drm/via/via_irq.c +++ b/drivers/gpu/drm/via/via_irq.c @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, unsigned int pipe) if (pipe != 0) return 0; - return atomic_read(&dev_priv->vbl_received); + return atomic_read_unchecked(&dev_priv->vbl_received); } irqreturn_t via_driver_irq_handler(int irq, void *arg) @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg) status = VIA_READ(VIA_REG_INTERRUPT); if (status & VIA_IRQ_VBLANK_PENDING) { - atomic_inc(&dev_priv->vbl_received); - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) { + atomic_inc_unchecked(&dev_priv->vbl_received); + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) { do_gettimeofday(&cur_vblank); if (dev_priv->last_vblank_valid) { dev_priv->usec_per_vblank = @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg) dev_priv->last_vblank = cur_vblank; dev_priv->last_vblank_valid = 1; } - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) { + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) { DRM_DEBUG("US per vblank is: %u\n", dev_priv->usec_per_vblank); } @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg) for (i = 0; i < dev_priv->num_irqs; ++i) { if (status & cur_irq->pending_mask) { - atomic_inc(&cur_irq->irq_received); + atomic_inc_unchecked(&cur_irq->irq_received); wake_up(&cur_irq->irq_queue); handled = 1; if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ, ((VIA_READ(masks[irq][2]) & masks[irq][3]) == masks[irq][4])); - cur_irq_sequence = atomic_read(&cur_irq->irq_received); + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received); } else { DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ, (((cur_irq_sequence = - atomic_read(&cur_irq->irq_received)) - + atomic_read_unchecked(&cur_irq->irq_received)) - *sequence) <= (1 << 23))); } *sequence = cur_irq_sequence; @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev) } for (i = 0; i < dev_priv->num_irqs; ++i) { - atomic_set(&cur_irq->irq_received, 0); + atomic_set_unchecked(&cur_irq->irq_received, 0); cur_irq->enable_mask = dev_priv->irq_masks[i][0]; cur_irq->pending_mask = dev_priv->irq_masks[i][1]; init_waitqueue_head(&cur_irq->irq_queue); @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv) switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) { case VIA_IRQ_RELATIVE: irqwait->request.sequence += - atomic_read(&cur_irq->irq_received); + atomic_read_unchecked(&cur_irq->irq_received); irqwait->request.type &= ~_DRM_VBLANK_RELATIVE; case VIA_IRQ_ABSOLUTE: break; diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index 58048709c..67f126b5c 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -192,7 +192,7 @@ static int virtio_gpu_conn_get_modes(struct drm_connector *connector) return count; } -static int virtio_gpu_conn_mode_valid(struct drm_connector *connector, +static enum drm_mode_status virtio_gpu_conn_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct virtio_gpu_output *output = diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c index 80482ac5f..bf693e517 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ttm.c +++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c @@ -198,11 +198,11 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, } static const struct ttm_mem_type_manager_func virtio_gpu_bo_manager_func = { - ttm_bo_man_init, - ttm_bo_man_takedown, - ttm_bo_man_get_node, - ttm_bo_man_put_node, - ttm_bo_man_debug + .init = &ttm_bo_man_init, + .takedown = &ttm_bo_man_takedown, + .get_node = &ttm_bo_man_get_node, + .put_node = &ttm_bo_man_put_node, + .debug = &ttm_bo_man_debug }; static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 1e59a486b..d5f230b05 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -439,7 +439,7 @@ struct vmw_private { * Fencing and IRQs. */ - atomic_t marker_seq; + atomic_unchecked_t marker_seq; wait_queue_head_t fence_queue; wait_queue_head_t fifo_queue; spinlock_t waiter_lock; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index b6a0806b0..9fb54795b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c @@ -156,7 +156,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) (unsigned int) min, (unsigned int) fifo->capabilities); - atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno); + atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno); vmw_mmio_write(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE); vmw_marker_queue_init(&fifo->marker_queue); @@ -355,7 +355,7 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv, if (reserveable) vmw_mmio_write(bytes, fifo_mem + SVGA_FIFO_RESERVED); - return (void __force *) (fifo_mem + + return (void __force_kernel *) (fifo_mem + (next_cmd >> 2)); } else { need_bounce = true; @@ -544,7 +544,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) fm = vmw_fifo_reserve(dev_priv, bytes); if (unlikely(fm == NULL)) { - *seqno = atomic_read(&dev_priv->marker_seq); + *seqno = atomic_read_unchecked(&dev_priv->marker_seq); ret = -ENOMEM; (void)vmw_fallback_wait(dev_priv, false, true, *seqno, false, 3*HZ); @@ -552,7 +552,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) } do { - *seqno = atomic_add_return(1, &dev_priv->marker_seq); + *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq); } while (*seqno == 0); if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index 170b61be1..fec7348ce 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -164,9 +164,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man, } const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = { - vmw_gmrid_man_init, - vmw_gmrid_man_takedown, - vmw_gmrid_man_get_node, - vmw_gmrid_man_put_node, - vmw_gmrid_man_debug + .init = vmw_gmrid_man_init, + .takedown = vmw_gmrid_man_takedown, + .get_node = vmw_gmrid_man_get_node, + .put_node = vmw_gmrid_man_put_node, + .debug = vmw_gmrid_man_debug }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index 0c7e17232..ead94fc91 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c @@ -103,7 +103,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv, * emitted. Then the fence is stale and signaled. */ - ret = ((atomic_read(&dev_priv->marker_seq) - seqno) + ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno) > VMW_FENCE_WRAP); return ret; @@ -142,7 +142,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, } } - signal_seq = atomic_read(&dev_priv->marker_seq); + signal_seq = atomic_read_unchecked(&dev_priv->marker_seq); ret = 0; for (;;) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c index efd1ffd68..0ae13ca2d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c @@ -135,7 +135,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv, while (!vmw_lag_lt(queue, us)) { spin_lock(&queue->lock); if (list_empty(&queue->head)) - seqno = atomic_read(&dev_priv->marker_seq); + seqno = atomic_read_unchecked(&dev_priv->marker_seq); else { marker = list_first_entry(&queue->head, struct vmw_marker, head); diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index 5f962bfcb..b095fc5d1 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -1054,7 +1054,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev) * where the power switch is separate to the device being powered down. */ int vga_switcheroo_init_domain_pm_ops(struct device *dev, - struct dev_pm_domain *domain) + dev_pm_domain_no_const *domain) { /* copy over all the bus versions */ if (dev->bus && dev->bus->pm) { @@ -1125,7 +1125,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev) */ int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, - struct dev_pm_domain *domain) + dev_pm_domain_no_const *domain) { /* copy over all the bus versions */ if (dev->bus && dev->bus->pm) { diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 2b89c7010..cc990252c 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -2645,7 +2645,7 @@ EXPORT_SYMBOL_GPL(hid_ignore); int hid_add_device(struct hid_device *hdev) { - static atomic_t id = ATOMIC_INIT(0); + static atomic_unchecked_t id = ATOMIC_INIT(0); int ret; if (WARN_ON(hdev->status & HID_STAT_ADDED)) @@ -2689,7 +2689,7 @@ int hid_add_device(struct hid_device *hdev) /* XXX hack, any other cleaner solution after the driver core * is converted to allow more than 20 bytes as the device name? */ dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus, - hdev->vendor, hdev->product, atomic_inc_return(&id)); + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id)); hid_debug_register(hdev, dev_name(&hdev->dev)); ret = device_add(&hdev->dev); diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index 20b40ad26..7eb563202 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -34,7 +34,7 @@ module_param(emulate_scroll_wheel, bool, 0644); MODULE_PARM_DESC(emulate_scroll_wheel, "Emulate a scroll wheel"); static unsigned int scroll_speed = 32; -static int param_set_scroll_speed(const char *val, struct kernel_param *kp) { +static int param_set_scroll_speed(const char *val, const struct kernel_param *kp) { unsigned long speed; if (!val || kstrtoul(val, 0, &speed) || speed > 63) return -EINVAL; diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c index 3a84aaf14..915780c07 100644 --- a/drivers/hid/hid-sensor-custom.c +++ b/drivers/hid/hid-sensor-custom.c @@ -590,7 +590,7 @@ static int hid_sensor_custom_add_attributes(struct hid_sensor_custom j = 0; while (j < HID_CUSTOM_TOTAL_ATTRS && hid_custom_attrs[j].name) { - struct device_attribute *device_attr; + device_attribute_no_const *device_attr; device_attr = &sensor_inst->fields[i].sd_attrs[j]; diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c index c13fb5bd7..55a380241 100644 --- a/drivers/hid/hid-wiimote-debug.c +++ b/drivers/hid/hid-wiimote-debug.c @@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s, else if (size == 0) return -EIO; - if (copy_to_user(u, buf, size)) + if (size > sizeof(buf) || copy_to_user(u, buf, size)) return -EFAULT; *off += size; diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 5fb4c6d92..29316a6a8 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -398,7 +398,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, int ret = 0; next_gpadl_handle = - (atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1); + (atomic_inc_return_unchecked(&vmbus_connection.next_gpadl_handle) - 1); ret = create_gpadl_header(kbuffer, size, &msginfo); if (ret) @@ -715,9 +715,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, * Adjust the size down since vmbus_channel_packet_page_buffer is the * largest size we support */ - descsize = sizeof(struct vmbus_channel_packet_page_buffer) - - ((MAX_PAGE_BUFFER_COUNT - pagecount) * - sizeof(struct hv_page_buffer)); + descsize = offsetof(struct vmbus_channel_packet_page_buffer, range[pagecount]); packetlen = descsize + bufferlen; packetlen_aligned = ALIGN(packetlen, sizeof(u64)); diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c index 60dbd6cb4..80ce7a178 100644 --- a/drivers/hv/hv.c +++ b/drivers/hv/hv.c @@ -183,6 +183,7 @@ static struct clocksource hyperv_cs_tsc = { }; #endif +static char hv_hypercall_page[PAGE_SIZE] __aligned(PAGE_SIZE) __used __section(".text"); /* * hv_init - Main initialization routine. @@ -193,7 +194,6 @@ int hv_init(void) { int max_leaf; union hv_x64_msr_hypercall_contents hypercall_msr; - void *virtaddr = NULL; memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS); memset(hv_context.synic_message_page, 0, @@ -220,14 +220,9 @@ int hv_init(void) /* See if the hypercall page is already set */ rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); - virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC); - - if (!virtaddr) - goto cleanup; - hypercall_msr.enable = 1; - hypercall_msr.guest_physical_address = vmalloc_to_pfn(virtaddr); + hypercall_msr.guest_physical_address = __phys_to_pfn(slow_virt_to_phys((void *)(ktla_ktva((unsigned long)hv_hypercall_page)))); wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); /* Confirm that hypercall page did get setup. */ @@ -237,7 +232,7 @@ int hv_init(void) if (!hypercall_msr.enable) goto cleanup; - hv_context.hypercall_page = virtaddr; + hv_context.hypercall_page = hv_hypercall_page; #ifdef CONFIG_X86_64 if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) { @@ -261,13 +256,9 @@ int hv_init(void) return 0; cleanup: - if (virtaddr) { - if (hypercall_msr.enable) { - hypercall_msr.as_uint64 = 0; - wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); - } - - vfree(virtaddr); + if (hypercall_msr.enable) { + hypercall_msr.as_uint64 = 0; + wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); } return -ENOTSUPP; @@ -288,8 +279,6 @@ void hv_cleanup(bool crash) if (hv_context.hypercall_page) { hypercall_msr.as_uint64 = 0; wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); - if (!crash) - vfree(hv_context.hypercall_page); hv_context.hypercall_page = NULL; } diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index fdf8da929..d3fefc532 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -482,7 +482,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add"); module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR)); MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure"); -static atomic_t trans_id = ATOMIC_INIT(0); +static atomic_unchecked_t trans_id = ATOMIC_INIT(0); static int dm_ring_size = (5 * PAGE_SIZE); @@ -1010,7 +1010,7 @@ static void hot_add_req(struct work_struct *dummy) pr_info("Memory hot add failed\n"); dm->state = DM_INITIALIZED; - resp.hdr.trans_id = atomic_inc_return(&trans_id); + resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id); vmbus_sendpacket(dm->dev->channel, &resp, sizeof(struct dm_hot_add_response), (unsigned long)NULL, @@ -1089,7 +1089,7 @@ static void post_status(struct hv_dynmem_device *dm) memset(&status, 0, sizeof(struct dm_status)); status.hdr.type = DM_STATUS_REPORT; status.hdr.size = sizeof(struct dm_status); - status.hdr.trans_id = atomic_inc_return(&trans_id); + status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id); /* * The host expects the guest to report free and committed memory. @@ -1113,7 +1113,7 @@ static void post_status(struct hv_dynmem_device *dm) * send the status. This can happen if we were interrupted * after we picked our transaction ID. */ - if (status.hdr.trans_id != atomic_read(&trans_id)) + if (status.hdr.trans_id != atomic_read_unchecked(&trans_id)) return; /* @@ -1257,7 +1257,7 @@ static void balloon_up(struct work_struct *dummy) */ do { - bl_resp->hdr.trans_id = atomic_inc_return(&trans_id); + bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id); ret = vmbus_sendpacket(dm_device.dev->channel, bl_resp, bl_resp->hdr.size, @@ -1303,7 +1303,7 @@ static void balloon_down(struct hv_dynmem_device *dm, memset(&resp, 0, sizeof(struct dm_unballoon_response)); resp.hdr.type = DM_UNBALLOON_RESPONSE; - resp.hdr.trans_id = atomic_inc_return(&trans_id); + resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id); resp.hdr.size = sizeof(struct dm_unballoon_response); vmbus_sendpacket(dm_device.dev->channel, &resp, @@ -1363,7 +1363,7 @@ static void version_resp(struct hv_dynmem_device *dm, memset(&version_req, 0, sizeof(struct dm_version_request)); version_req.hdr.type = DM_VERSION_REQUEST; version_req.hdr.size = sizeof(struct dm_version_request); - version_req.hdr.trans_id = atomic_inc_return(&trans_id); + version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id); version_req.version.version = dm->next_version; /* @@ -1550,7 +1550,7 @@ static int balloon_probe(struct hv_device *dev, memset(&version_req, 0, sizeof(struct dm_version_request)); version_req.hdr.type = DM_VERSION_REQUEST; version_req.hdr.size = sizeof(struct dm_version_request); - version_req.hdr.trans_id = atomic_inc_return(&trans_id); + version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id); version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10; version_req.is_last_attempt = 0; @@ -1581,7 +1581,7 @@ static int balloon_probe(struct hv_device *dev, memset(&cap_msg, 0, sizeof(struct dm_capabilities)); cap_msg.hdr.type = DM_CAPABILITIES_REPORT; cap_msg.hdr.size = sizeof(struct dm_capabilities); - cap_msg.hdr.trans_id = atomic_inc_return(&trans_id); + cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id); cap_msg.caps.cap_bits.balloon = 1; cap_msg.caps.cap_bits.hot_add = 1; diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index 2b13f2a0a..e15046665 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -567,7 +567,7 @@ enum vmbus_connect_state { struct vmbus_connection { enum vmbus_connect_state conn_state; - atomic_t next_gpadl_handle; + atomic_unchecked_t next_gpadl_handle; struct completion unload_event; /* diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c index 579bdf93b..0dac21d52 100644 --- a/drivers/hwmon/acpi_power_meter.c +++ b/drivers/hwmon/acpi_power_meter.c @@ -116,7 +116,7 @@ struct sensor_template { struct device_attribute *devattr, const char *buf, size_t count); int index; -}; +} __do_const; /* Averaging interval */ static int update_avg_interval(struct acpi_power_meter_resource *resource) @@ -631,7 +631,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource, struct sensor_template *attrs) { struct device *dev = &resource->acpi_dev->dev; - struct sensor_device_attribute *sensors = + sensor_device_attribute_no_const *sensors = &resource->sensors[resource->num_sensors]; int res = 0; @@ -973,7 +973,7 @@ static int __init enable_cap_knobs(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id __initdata pm_dmi_table[] = { +static const struct dmi_system_id __initconst pm_dmi_table[] = { { enable_cap_knobs, "IBM Active Energy Manager", { diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index 0af7fd311..1fc50d474 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c @@ -1105,7 +1105,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num) { struct applesmc_node_group *grp; struct applesmc_dev_attr *node; - struct attribute *attr; + attribute_no_const *attr; int ret, i; for (grp = groups; grp->format; grp++) { @@ -1242,7 +1242,7 @@ static int applesmc_dmi_match(const struct dmi_system_id *id) * Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1". * So we need to put "Apple MacBook Pro" before "Apple MacBook". */ -static __initdata struct dmi_system_id applesmc_whitelist[] = { +static const __initconst struct dmi_system_id applesmc_whitelist[] = { { applesmc_dmi_match, "Apple MacBook Air", { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir") }, diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c index cccef8796..06ce8ec4d 100644 --- a/drivers/hwmon/asus_atk0110.c +++ b/drivers/hwmon/asus_atk0110.c @@ -147,10 +147,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids); struct atk_sensor_data { struct list_head list; struct atk_data *data; - struct device_attribute label_attr; - struct device_attribute input_attr; - struct device_attribute limit1_attr; - struct device_attribute limit2_attr; + device_attribute_no_const label_attr; + device_attribute_no_const input_attr; + device_attribute_no_const limit1_attr; + device_attribute_no_const limit2_attr; char label_attr_name[ATTR_NAME_SIZE]; char input_attr_name[ATTR_NAME_SIZE]; char limit1_attr_name[ATTR_NAME_SIZE]; @@ -270,7 +270,7 @@ static ssize_t atk_name_show(struct device *dev, static struct device_attribute atk_name_attr = __ATTR(name, 0444, atk_name_show, NULL); -static void atk_init_attribute(struct device_attribute *attr, char *name, +static void atk_init_attribute(device_attribute_no_const *attr, char *name, sysfs_show_func show) { sysfs_attr_init(&attr->attr); diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 6a27eb2fe..349ed23ed 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -783,7 +783,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } -static struct notifier_block coretemp_cpu_notifier __refdata = { +static struct notifier_block coretemp_cpu_notifier = { .notifier_call = coretemp_cpu_callback, }; diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c index 34704b045..eddfc67cd 100644 --- a/drivers/hwmon/dell-smm-hwmon.c +++ b/drivers/hwmon/dell-smm-hwmon.c @@ -890,7 +890,7 @@ static const struct i8k_config_data i8k_config_data[] = { }, }; -static struct dmi_system_id i8k_dmi_table[] __initdata = { +static const struct dmi_system_id i8k_dmi_table[] __initconst = { { .ident = "Dell Inspiron", .matches = { @@ -1006,7 +1006,7 @@ MODULE_DEVICE_TABLE(dmi, i8k_dmi_table); * of affected Dell machines for which we disallow I8K_SMM_GET_FAN_TYPE call. * See bug: https://bugzilla.kernel.org/show_bug.cgi?id=100121 */ -static struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initdata = { +static const struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initconst = { { .ident = "Dell Studio XPS 8000", .matches = { diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index a74c075a3..a6eb87f54 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -214,8 +214,8 @@ static struct attribute *hwmon_genattr(struct device *dev, const struct hwmon_ops *ops) { struct hwmon_device_attribute *hattr; - struct device_attribute *dattr; - struct attribute *a; + device_attribute_no_const *dattr; + attribute_no_const *a; umode_t mode; char *name; diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c index 1f643782c..2b6e6150d 100644 --- a/drivers/hwmon/ibmaem.c +++ b/drivers/hwmon/ibmaem.c @@ -924,7 +924,7 @@ static int aem_register_sensors(struct aem_data *data, const struct aem_rw_sensor_template *rw) { struct device *dev = &data->pdev->dev; - struct sensor_device_attribute *sensors = data->sensors; + sensor_device_attribute_no_const *sensors = data->sensors; int err; /* Set up read-only sensors */ diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c index f6a76679c..281a9deca 100644 --- a/drivers/hwmon/iio_hwmon.c +++ b/drivers/hwmon/iio_hwmon.c @@ -61,7 +61,7 @@ static int iio_hwmon_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct iio_hwmon_state *st; - struct sensor_device_attribute *a; + sensor_device_attribute_no_const *a; int ret, i; int in_i = 1, temp_i = 1, curr_i = 1, humidity_i = 1; enum iio_chan_type type; diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c index 559c596b2..3de1a9630 100644 --- a/drivers/hwmon/nct6683.c +++ b/drivers/hwmon/nct6683.c @@ -404,11 +404,11 @@ nct6683_create_attr_group(struct device *dev, const struct sensor_template_group *tg, int repeat) { - struct sensor_device_attribute_2 *a2; - struct sensor_device_attribute *a; + sensor_device_attribute_2_no_const *a2; + sensor_device_attribute_no_const *a; struct sensor_device_template **t; struct sensor_device_attr_u *su; - struct attribute_group *group; + attribute_group_no_const *group; struct attribute **attrs; int i, j, count; diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index ce75dd4db..0e68b2a5d 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -1051,10 +1051,10 @@ nct6775_create_attr_group(struct device *dev, const struct sensor_template_group *tg, int repeat) { - struct attribute_group *group; + attribute_group_no_const *group; struct sensor_device_attr_u *su; - struct sensor_device_attribute *a; - struct sensor_device_attribute_2 *a2; + sensor_device_attribute_no_const *a; + sensor_device_attribute_2_no_const *a2; struct attribute **attrs; struct sensor_device_template **t; int i, count; diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index ba59eaef2..dbf694c63 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -824,7 +824,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr) return 0; } -static void pmbus_dev_attr_init(struct device_attribute *dev_attr, +static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr, const char *name, umode_t mode, ssize_t (*show)(struct device *dev, @@ -841,7 +841,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr, dev_attr->store = store; } -static void pmbus_attr_init(struct sensor_device_attribute *a, +static void pmbus_attr_init(sensor_device_attribute_no_const *a, const char *name, umode_t mode, ssize_t (*show)(struct device *dev, @@ -863,7 +863,7 @@ static int pmbus_add_boolean(struct pmbus_data *data, u16 reg, u8 mask) { struct pmbus_boolean *boolean; - struct sensor_device_attribute *a; + sensor_device_attribute_no_const *a; boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL); if (!boolean) @@ -888,7 +888,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data, bool update, bool readonly) { struct pmbus_sensor *sensor; - struct device_attribute *a; + device_attribute_no_const *a; sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL); if (!sensor) @@ -919,7 +919,7 @@ static int pmbus_add_label(struct pmbus_data *data, const char *lstring, int index) { struct pmbus_label *label; - struct device_attribute *a; + device_attribute_no_const *a; label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL); if (!label) diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c index a2fdbb7d2..e749a3cf3 100644 --- a/drivers/hwmon/sht15.c +++ b/drivers/hwmon/sht15.c @@ -170,7 +170,7 @@ struct sht15_data { int supply_uv; bool supply_uv_valid; struct work_struct update_supply_work; - atomic_t interrupt_handled; + atomic_unchecked_t interrupt_handled; }; /** @@ -530,13 +530,13 @@ static int sht15_measurement(struct sht15_data *data, ret = gpio_direction_input(data->pdata->gpio_data); if (ret) return ret; - atomic_set(&data->interrupt_handled, 0); + atomic_set_unchecked(&data->interrupt_handled, 0); enable_irq(gpio_to_irq(data->pdata->gpio_data)); if (gpio_get_value(data->pdata->gpio_data) == 0) { disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data)); /* Only relevant if the interrupt hasn't occurred. */ - if (!atomic_read(&data->interrupt_handled)) + if (!atomic_read_unchecked(&data->interrupt_handled)) schedule_work(&data->read_work); } ret = wait_event_timeout(data->wait_queue, @@ -808,7 +808,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d) /* First disable the interrupt */ disable_irq_nosync(irq); - atomic_inc(&data->interrupt_handled); + atomic_inc_unchecked(&data->interrupt_handled); /* Then schedule a reading work struct */ if (data->state != SHT15_READING_NOTHING) schedule_work(&data->read_work); @@ -830,11 +830,11 @@ static void sht15_bh_read_data(struct work_struct *work_s) * If not, then start the interrupt again - care here as could * have gone low in meantime so verify it hasn't! */ - atomic_set(&data->interrupt_handled, 0); + atomic_set_unchecked(&data->interrupt_handled, 0); enable_irq(gpio_to_irq(data->pdata->gpio_data)); /* If still not occurred or another handler was scheduled */ if (gpio_get_value(data->pdata->gpio_data) - || atomic_read(&data->interrupt_handled)) + || atomic_read_unchecked(&data->interrupt_handled)) return; } diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c index ac91c07e3..8e6966306 100644 --- a/drivers/hwmon/via-cputemp.c +++ b/drivers/hwmon/via-cputemp.c @@ -295,7 +295,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } -static struct notifier_block via_cputemp_cpu_notifier __refdata = { +static struct notifier_block via_cputemp_cpu_notifier = { .notifier_call = via_cputemp_cpu_callback, }; diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c index 65e324054..e6c511d8a 100644 --- a/drivers/i2c/busses/i2c-amd756-s4882.c +++ b/drivers/i2c/busses/i2c-amd756-s4882.c @@ -39,7 +39,7 @@ extern struct i2c_adapter amd756_smbus; static struct i2c_adapter *s4882_adapter; -static struct i2c_algorithm *s4882_algo; +static i2c_algorithm_no_const *s4882_algo; /* Wrapper access functions for multiplexed SMBus */ static DEFINE_MUTEX(amd756_lock); diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index 96f8230cd..73d7616b3 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c @@ -57,7 +57,7 @@ struct dw_scl_sda_cfg { }; struct dw_pci_controller { - u32 bus_num; + int bus_num; u32 bus_cfg; u32 tx_fifo_depth; u32 rx_fifo_depth; diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c index 88eda09e7..cf40434e4 100644 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c @@ -37,7 +37,7 @@ extern struct i2c_adapter *nforce2_smbus; static struct i2c_adapter *s4985_adapter; -static struct i2c_algorithm *s4985_algo; +static i2c_algorithm_no_const *s4985_algo; /* Wrapper access functions for multiplexed SMBus */ static DEFINE_MUTEX(nforce2_lock); diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index 6f638bbc9..c56f34960 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c @@ -274,7 +274,7 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client, break; } - data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf; + data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf; rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len); if (IS_ERR(rdwr_pa[i].buf)) { res = PTR_ERR(rdwr_pa[i].buf); diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index bf9a2ad29..a54b1c4d4 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq) alignment = queue_dma_alignment(q) | q->dma_pad_mask; if ((unsigned long)buf & alignment || blk_rq_bytes(rq) & q->dma_pad_mask - || object_is_on_stack(buf)) + || object_starts_on_stack(buf)) drive->dma = 0; } } diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 83679da0c..6e67e4ff3 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -178,7 +178,7 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq, * 1073741822 == 549756 MB or 48bit addressing fake drive */ -static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq, +static ide_startstop_t __intentional_overflow(-1) ide_do_rw_disk(ide_drive_t *drive, struct request *rq, sector_t block) { ide_hwif_t *hwif = drive->hwif; diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index d127ace6a..6ee866fcc 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c @@ -244,7 +244,7 @@ struct chs_geom { static unsigned int ide_disks; static struct chs_geom ide_disks_chs[MAX_HWIFS * MAX_DRIVES]; -static int ide_set_disk_chs(const char *str, struct kernel_param *kp) +static int ide_set_disk_chs(const char *str, const struct kernel_param *kp) { unsigned int a, b, c = 0, h = 0, s = 0, i, j = 1; @@ -328,7 +328,7 @@ static void ide_dev_apply_params(ide_drive_t *drive, u8 unit) static unsigned int ide_ignore_cable; -static int ide_set_ignore_cable(const char *s, struct kernel_param *kp) +static int ide_set_ignore_cable(const char *s, const struct kernel_param *kp) { int i, j = 1; diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 4466a2f96..677188763 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -1208,36 +1208,46 @@ static void bxt_idle_state_table_update(void) rdmsrl(MSR_PKGC6_IRTL, msr); usec = irtl_2_usec(msr); if (usec) { - bxt_cstates[2].exit_latency = usec; - bxt_cstates[2].target_residency = usec; + pax_open_kernel(); + const_cast(bxt_cstates[2].exit_latency) = usec; + const_cast(bxt_cstates[2].target_residency) = usec; + pax_close_kernel(); } rdmsrl(MSR_PKGC7_IRTL, msr); usec = irtl_2_usec(msr); if (usec) { - bxt_cstates[3].exit_latency = usec; - bxt_cstates[3].target_residency = usec; + pax_open_kernel(); + const_cast(bxt_cstates[3].exit_latency) = usec; + const_cast(bxt_cstates[3].target_residency) = usec; + pax_close_kernel(); } rdmsrl(MSR_PKGC8_IRTL, msr); usec = irtl_2_usec(msr); if (usec) { - bxt_cstates[4].exit_latency = usec; - bxt_cstates[4].target_residency = usec; + pax_open_kernel(); + const_cast(bxt_cstates[4].exit_latency) = usec; + const_cast(bxt_cstates[4].target_residency) = usec; + pax_close_kernel(); } rdmsrl(MSR_PKGC9_IRTL, msr); usec = irtl_2_usec(msr); if (usec) { - bxt_cstates[5].exit_latency = usec; - bxt_cstates[5].target_residency = usec; + pax_open_kernel(); + const_cast(bxt_cstates[5].exit_latency) = usec; + const_cast(bxt_cstates[5].target_residency) = usec; + pax_close_kernel(); } rdmsrl(MSR_PKGC10_IRTL, msr); usec = irtl_2_usec(msr); if (usec) { - bxt_cstates[6].exit_latency = usec; - bxt_cstates[6].target_residency = usec; + pax_open_kernel(); + const_cast(bxt_cstates[6].exit_latency) = usec; + const_cast(bxt_cstates[6].target_residency) = usec; + pax_close_kernel(); } } @@ -1280,8 +1290,10 @@ static void sklh_idle_state_table_update(void) return; } - skl_cstates[5].disabled = 1; /* C8-SKL */ - skl_cstates[6].disabled = 1; /* C9-SKL */ + pax_open_kernel(); + const_cast(skl_cstates[5].disabled) = 1; /* C8-SKL */ + const_cast(skl_cstates[6].disabled) = 1; /* C9-SKL */ + pax_close_kernel(); } /* * intel_idle_state_table_update() diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index fc340ed3d..840e5ee76 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -769,7 +769,7 @@ static ssize_t iio_write_channel_info(struct device *dev, } static -int __iio_device_attr_init(struct device_attribute *dev_attr, +int __iio_device_attr_init(device_attribute_no_const *dev_attr, const char *postfix, struct iio_chan_spec const *chan, ssize_t (*readfunc)(struct device *dev, diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 71c7c4c32..f91d896f8 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -117,7 +117,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS] struct cm_counter_group { struct kobject obj; - atomic_long_t counter[CM_ATTR_COUNT]; + atomic_long_unchecked_t counter[CM_ATTR_COUNT]; }; struct cm_counter_attribute { @@ -1495,7 +1495,7 @@ static void cm_format_mra(struct cm_mra_msg *mra_msg, static void cm_format_rej(struct cm_rej_msg *rej_msg, struct cm_id_private *cm_id_priv, enum ib_cm_rej_reason reason, - void *ari, + const void *ari, u8 ari_length, const void *private_data, u8 private_data_len) @@ -1539,7 +1539,7 @@ static void cm_dup_req_handler(struct cm_work *work, struct ib_mad_send_buf *msg = NULL; int ret; - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_REQ_COUNTER]); /* Quick state check to discard duplicate REQs. */ @@ -1949,7 +1949,7 @@ static void cm_dup_rep_handler(struct cm_work *work) if (!cm_id_priv) return; - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_REP_COUNTER]); ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); if (ret) @@ -2116,7 +2116,7 @@ static int cm_rtu_handler(struct cm_work *work) if (cm_id_priv->id.state != IB_CM_REP_SENT && cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { spin_unlock_irq(&cm_id_priv->lock); - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_RTU_COUNTER]); goto out; } @@ -2299,7 +2299,7 @@ static int cm_dreq_handler(struct cm_work *work) cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id, dreq_msg->local_comm_id); if (!cm_id_priv) { - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_DREQ_COUNTER]); cm_issue_drep(work->port, work->mad_recv_wc); return -EINVAL; @@ -2324,7 +2324,7 @@ static int cm_dreq_handler(struct cm_work *work) case IB_CM_MRA_REP_RCVD: break; case IB_CM_TIMEWAIT: - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_DREQ_COUNTER]); if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) goto unlock; @@ -2338,7 +2338,7 @@ static int cm_dreq_handler(struct cm_work *work) cm_free_msg(msg); goto deref; case IB_CM_DREQ_RCVD: - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_DREQ_COUNTER]); goto unlock; default: @@ -2401,12 +2401,13 @@ static int cm_drep_handler(struct cm_work *work) } int ib_send_cm_rej(struct ib_cm_id *cm_id, - enum ib_cm_rej_reason reason, - void *ari, + int _reason, + const void *ari, u8 ari_length, const void *private_data, u8 private_data_len) { + enum ib_cm_rej_reason reason = _reason; struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; unsigned long flags; @@ -2705,7 +2706,7 @@ static int cm_mra_handler(struct cm_work *work) ib_modify_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg, timeout)) { if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) - atomic_long_inc(&work->port-> + atomic_long_inc_unchecked(&work->port-> counter_group[CM_RECV_DUPLICATES]. counter[CM_MRA_COUNTER]); goto out; @@ -2714,7 +2715,7 @@ static int cm_mra_handler(struct cm_work *work) break; case IB_CM_MRA_REQ_RCVD: case IB_CM_MRA_REP_RCVD: - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_MRA_COUNTER]); /* fall through */ default: @@ -2877,7 +2878,7 @@ static int cm_lap_handler(struct cm_work *work) case IB_CM_LAP_IDLE: break; case IB_CM_MRA_LAP_SENT: - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_LAP_COUNTER]); if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) goto unlock; @@ -2893,7 +2894,7 @@ static int cm_lap_handler(struct cm_work *work) cm_free_msg(msg); goto deref; case IB_CM_LAP_RCVD: - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_LAP_COUNTER]); goto unlock; default: @@ -2926,7 +2927,7 @@ deref: cm_deref_id(cm_id_priv); static void cm_format_apr(struct cm_apr_msg *apr_msg, struct cm_id_private *cm_id_priv, enum ib_cm_apr_status status, - void *info, + const void *info, u8 info_length, const void *private_data, u8 private_data_len) @@ -2946,12 +2947,13 @@ static void cm_format_apr(struct cm_apr_msg *apr_msg, } int ib_send_cm_apr(struct ib_cm_id *cm_id, - enum ib_cm_apr_status status, - void *info, + int _status, + const void *info, u8 info_length, const void *private_data, u8 private_data_len) { + enum ib_cm_apr_status status = _status; struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; unsigned long flags; @@ -3180,7 +3182,7 @@ static int cm_sidr_req_handler(struct cm_work *work) cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); if (cur_cm_id_priv) { spin_unlock_irq(&cm.lock); - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_SIDR_REQ_COUNTER]); goto out; /* Duplicate message. */ } @@ -3394,10 +3396,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent, if (!msg->context[0] && (attr_index != CM_REJ_COUNTER)) msg->retries = 1; - atomic_long_add(1 + msg->retries, + atomic_long_add_unchecked(1 + msg->retries, &port->counter_group[CM_XMIT].counter[attr_index]); if (msg->retries) - atomic_long_add(msg->retries, + atomic_long_add_unchecked(msg->retries, &port->counter_group[CM_XMIT_RETRIES]. counter[attr_index]); @@ -3633,7 +3635,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent, } attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id); - atomic_long_inc(&port->counter_group[CM_RECV]. + atomic_long_inc_unchecked(&port->counter_group[CM_RECV]. counter[attr_id - CM_ATTR_ID_OFFSET]); work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths, @@ -3840,7 +3842,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr, cm_attr = container_of(attr, struct cm_counter_attribute, attr); return sprintf(buf, "%ld\n", - atomic_long_read(&group->counter[cm_attr->index])); + atomic_long_read_unchecked(&group->counter[cm_attr->index])); } static const struct sysfs_ops cm_counter_ops = { diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c index cdbb1f1a6..7ed4277c7 100644 --- a/drivers/infiniband/core/fmr_pool.c +++ b/drivers/infiniband/core/fmr_pool.c @@ -98,8 +98,8 @@ struct ib_fmr_pool { struct task_struct *thread; - atomic_t req_ser; - atomic_t flush_ser; + atomic_unchecked_t req_ser; + atomic_unchecked_t flush_ser; wait_queue_head_t force_wait; }; @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr) struct ib_fmr_pool *pool = pool_ptr; do { - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) { + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) { ib_fmr_batch_release(pool); - atomic_inc(&pool->flush_ser); + atomic_inc_unchecked(&pool->flush_ser); wake_up_interruptible(&pool->force_wait); if (pool->flush_function) @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr) } set_current_state(TASK_INTERRUPTIBLE); - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 && + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 && !kthread_should_stop()) schedule(); __set_current_state(TASK_RUNNING); @@ -262,8 +262,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, pool->dirty_watermark = params->dirty_watermark; pool->dirty_len = 0; spin_lock_init(&pool->pool_lock); - atomic_set(&pool->req_ser, 0); - atomic_set(&pool->flush_ser, 0); + atomic_set_unchecked(&pool->req_ser, 0); + atomic_set_unchecked(&pool->flush_ser, 0); init_waitqueue_head(&pool->force_wait); pool->thread = kthread_run(ib_fmr_cleanup_thread, @@ -388,11 +388,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool) } spin_unlock_irq(&pool->pool_lock); - serial = atomic_inc_return(&pool->req_ser); + serial = atomic_inc_return_unchecked(&pool->req_ser); wake_up_process(pool->thread); if (wait_event_interruptible(pool->force_wait, - atomic_read(&pool->flush_ser) - serial >= 0)) + atomic_read_unchecked(&pool->flush_ser) - serial >= 0)) return -EINTR; return 0; @@ -502,7 +502,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr) } else { list_add_tail(&fmr->list, &pool->dirty_list); if (++pool->dirty_len >= pool->dirty_watermark) { - atomic_inc(&pool->req_ser); + atomic_inc_unchecked(&pool->req_ser); wake_up_process(pool->thread); } } diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c index 10469b008..e8b45f36e 100644 --- a/drivers/infiniband/core/netlink.c +++ b/drivers/infiniband/core/netlink.c @@ -176,11 +176,10 @@ static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) } { - struct netlink_dump_control c = { + netlink_dump_control_no_const c = { .dump = client->cb_table[op].dump, - .module = client->cb_table[op].module, }; - return netlink_dump_start(nls, skb, nlh, &c); + return __netlink_dump_start(nls, skb, nlh, &c, NULL, client->cb_table[op].module); } } } diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index c1fb545e8..47f692dec 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -894,7 +894,7 @@ static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num) static void setup_hw_stats(struct ib_device *device, struct ib_port *port, u8 port_num) { - struct attribute_group *hsag; + attribute_group_no_const *hsag; struct rdma_hw_stats *stats; int i, ret; diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index 7713ef089..0bb2981b9 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c @@ -920,14 +920,14 @@ static ssize_t ib_ucm_send_rej(struct ib_ucm_file *file, const char __user *inbuf, int in_len, int out_len) { - return ib_ucm_send_info(file, inbuf, in_len, (void *)ib_send_cm_rej); + return ib_ucm_send_info(file, inbuf, in_len, ib_send_cm_rej); } static ssize_t ib_ucm_send_apr(struct ib_ucm_file *file, const char __user *inbuf, int in_len, int out_len) { - return ib_ucm_send_info(file, inbuf, in_len, (void *)ib_send_cm_apr); + return ib_ucm_send_info(file, inbuf, in_len, ib_send_cm_apr); } static ssize_t ib_ucm_send_mra(struct ib_ucm_file *file, diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index cb3f515a2..cd08b78bc 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -974,6 +974,9 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, if (copy_from_user(&cmd, buf, sizeof cmd)) return -EFAULT; + if (!access_ok_noprefault(VERIFY_READ, cmd.start, cmd.length)) + return -EFAULT; + INIT_UDATA(&udata, buf + sizeof cmd, (unsigned long) cmd.response + sizeof resp, in_len - sizeof cmd, out_len - sizeof resp); diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index b85a1a983..a0dcc5976 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -111,7 +111,7 @@ void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe) if (!wq->rdev->wr_log) return; - idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) & + idx = (atomic_inc_return_unchecked(&wq->rdev->wr_log_idx) - 1) & (wq->rdev->wr_log_size - 1); le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]); getnstimeofday(&le.poll_host_ts); @@ -143,7 +143,7 @@ static int wr_log_show(struct seq_file *seq, void *v) #define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000) - idx = atomic_read(&dev->rdev.wr_log_idx) & + idx = atomic_read_unchecked(&dev->rdev.wr_log_idx) & (dev->rdev.wr_log_size - 1); end = idx - 1; if (end < 0) @@ -842,7 +842,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) sizeof(*rdev->wr_log), GFP_KERNEL); if (rdev->wr_log) { rdev->wr_log_size = 1 << c4iw_wr_log_size_order; - atomic_set(&rdev->wr_log_idx, 0); + atomic_set_unchecked(&rdev->wr_log_idx, 0); } else { pr_err(MOD "error allocating wr_log. Logging disabled\n"); } diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 7d540667d..4b8a84cae 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -182,7 +182,7 @@ struct c4iw_rdev { struct c4iw_stats stats; struct c4iw_hw_queue hw_queue; struct t4_dev_status_page *status_page; - atomic_t wr_log_idx; + atomic_unchecked_t wr_log_idx; struct wr_log_entry *wr_log; int wr_log_size; struct workqueue_struct *free_workq; diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 410408f88..9702659f2 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -266,7 +266,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, int err; struct fw_ri_tpte tpt; u32 stag_idx; - static atomic_t key; + static atomic_unchecked_t key; if (c4iw_fatal_error(rdev)) return -EIO; @@ -287,7 +287,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, if (rdev->stats.stag.cur > rdev->stats.stag.max) rdev->stats.stag.max = rdev->stats.stag.cur; mutex_unlock(&rdev->stats.lock); - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff); + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff); } PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n", __func__, stag_state, type, pdid, stag_idx); diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index 4ac8f330c..63991d8c6 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c @@ -536,7 +536,7 @@ static void tune_pcie_caps(struct hfi1_devdata *dd) * PCI error infrastructure, registered via pci */ static pci_ers_result_t -pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +pci_error_detected(struct pci_dev *pdev, enum pci_channel_state state) { struct hfi1_devdata *dd = pci_get_drvdata(pdev); pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED; diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c index 2c4b4d072..b45e806b3 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c @@ -4604,46 +4604,46 @@ static void i40iw_hw_stat_refresh_all(struct i40iw_dev_pestat *devstat) } static struct i40iw_cqp_ops iw_cqp_ops = { - i40iw_sc_cqp_init, - i40iw_sc_cqp_create, - i40iw_sc_cqp_post_sq, - i40iw_sc_cqp_get_next_send_wqe, - i40iw_sc_cqp_destroy, - i40iw_sc_poll_for_cqp_op_done + .cqp_init = i40iw_sc_cqp_init, + .cqp_create = i40iw_sc_cqp_create, + .cqp_post_sq = i40iw_sc_cqp_post_sq, + .cqp_get_next_send_wqe = i40iw_sc_cqp_get_next_send_wqe, + .cqp_destroy = i40iw_sc_cqp_destroy, + .poll_for_cqp_op_done = i40iw_sc_poll_for_cqp_op_done }; static struct i40iw_ccq_ops iw_ccq_ops = { - i40iw_sc_ccq_init, - i40iw_sc_ccq_create, - i40iw_sc_ccq_destroy, - i40iw_sc_ccq_create_done, - i40iw_sc_ccq_get_cqe_info, - i40iw_sc_ccq_arm + .ccq_init = i40iw_sc_ccq_init, + .ccq_create = i40iw_sc_ccq_create, + .ccq_destroy = i40iw_sc_ccq_destroy, + .ccq_create_done = i40iw_sc_ccq_create_done, + .ccq_get_cqe_info = i40iw_sc_ccq_get_cqe_info, + .ccq_arm = i40iw_sc_ccq_arm }; static struct i40iw_ceq_ops iw_ceq_ops = { - i40iw_sc_ceq_init, - i40iw_sc_ceq_create, - i40iw_sc_cceq_create_done, - i40iw_sc_cceq_destroy_done, - i40iw_sc_cceq_create, - i40iw_sc_ceq_destroy, - i40iw_sc_process_ceq + .ceq_init = i40iw_sc_ceq_init, + .ceq_create = i40iw_sc_ceq_create, + .cceq_create_done = i40iw_sc_cceq_create_done, + .cceq_destroy_done = i40iw_sc_cceq_destroy_done, + .cceq_create = i40iw_sc_cceq_create, + .ceq_destroy = i40iw_sc_ceq_destroy, + .process_ceq = i40iw_sc_process_ceq }; static struct i40iw_aeq_ops iw_aeq_ops = { - i40iw_sc_aeq_init, - i40iw_sc_aeq_create, - i40iw_sc_aeq_destroy, - i40iw_sc_get_next_aeqe, - i40iw_sc_repost_aeq_entries, - i40iw_sc_aeq_create_done, - i40iw_sc_aeq_destroy_done + .aeq_init = i40iw_sc_aeq_init, + .aeq_create = i40iw_sc_aeq_create, + .aeq_destroy = i40iw_sc_aeq_destroy, + .get_next_aeqe = i40iw_sc_get_next_aeqe, + .repost_aeq_entries = i40iw_sc_repost_aeq_entries, + .aeq_create_done = i40iw_sc_aeq_create_done, + .aeq_destroy_done = i40iw_sc_aeq_destroy_done }; /* iwarp pd ops */ static struct i40iw_pd_ops iw_pd_ops = { - i40iw_sc_pd_init, + .pd_init = i40iw_sc_pd_init, }; static struct i40iw_priv_qp_ops iw_priv_qp_ops = { @@ -4662,61 +4662,59 @@ static struct i40iw_priv_qp_ops iw_priv_qp_ops = { }; static struct i40iw_priv_cq_ops iw_priv_cq_ops = { - i40iw_sc_cq_init, - i40iw_sc_cq_create, - i40iw_sc_cq_destroy, - i40iw_sc_cq_modify, + .cq_init = i40iw_sc_cq_init, + .cq_create = i40iw_sc_cq_create, + .cq_destroy = i40iw_sc_cq_destroy, + .cq_modify = i40iw_sc_cq_modify, }; static struct i40iw_mr_ops iw_mr_ops = { - i40iw_sc_alloc_stag, - i40iw_sc_mr_reg_non_shared, - i40iw_sc_mr_reg_shared, - i40iw_sc_dealloc_stag, - i40iw_sc_query_stag, - i40iw_sc_mw_alloc + .alloc_stag = i40iw_sc_alloc_stag, + .mr_reg_non_shared = i40iw_sc_mr_reg_non_shared, + .mr_reg_shared = i40iw_sc_mr_reg_shared, + .dealloc_stag = i40iw_sc_dealloc_stag, + .query_stag = i40iw_sc_query_stag, + .mw_alloc = i40iw_sc_mw_alloc }; static struct i40iw_cqp_misc_ops iw_cqp_misc_ops = { - i40iw_sc_manage_push_page, - i40iw_sc_manage_hmc_pm_func_table, - i40iw_sc_set_hmc_resource_profile, - i40iw_sc_commit_fpm_values, - i40iw_sc_query_fpm_values, - i40iw_sc_static_hmc_pages_allocated, - i40iw_sc_add_arp_cache_entry, - i40iw_sc_del_arp_cache_entry, - i40iw_sc_query_arp_cache_entry, - i40iw_sc_manage_apbvt_entry, - i40iw_sc_manage_qhash_table_entry, - i40iw_sc_alloc_local_mac_ipaddr_entry, - i40iw_sc_add_local_mac_ipaddr_entry, - i40iw_sc_del_local_mac_ipaddr_entry, - i40iw_sc_cqp_nop, - i40iw_sc_commit_fpm_values_done, - i40iw_sc_query_fpm_values_done, - i40iw_sc_manage_hmc_pm_func_table_done, - i40iw_sc_suspend_qp, - i40iw_sc_resume_qp + .manage_push_page = i40iw_sc_manage_push_page, + .manage_hmc_pm_func_table = i40iw_sc_manage_hmc_pm_func_table, + .set_hmc_resource_profile = i40iw_sc_set_hmc_resource_profile, + .commit_fpm_values = i40iw_sc_commit_fpm_values, + .query_fpm_values = i40iw_sc_query_fpm_values, + .static_hmc_pages_allocated = i40iw_sc_static_hmc_pages_allocated, + .add_arp_cache_entry = i40iw_sc_add_arp_cache_entry, + .del_arp_cache_entry = i40iw_sc_del_arp_cache_entry, + .query_arp_cache_entry = i40iw_sc_query_arp_cache_entry, + .manage_apbvt_entry = i40iw_sc_manage_apbvt_entry, + .manage_qhash_table_entry = i40iw_sc_manage_qhash_table_entry, + .alloc_local_mac_ipaddr_table_entry = i40iw_sc_alloc_local_mac_ipaddr_entry, + .add_local_mac_ipaddr_entry = i40iw_sc_add_local_mac_ipaddr_entry, + .del_local_mac_ipaddr_entry = i40iw_sc_del_local_mac_ipaddr_entry, + .cqp_nop = i40iw_sc_cqp_nop, + .commit_fpm_values_done = i40iw_sc_commit_fpm_values_done, + .query_fpm_values_done = i40iw_sc_query_fpm_values_done, + .manage_hmc_pm_func_table_done = i40iw_sc_manage_hmc_pm_func_table_done, + .update_suspend_qp = i40iw_sc_suspend_qp, + .update_resume_qp = i40iw_sc_resume_qp }; static struct i40iw_hmc_ops iw_hmc_ops = { - i40iw_sc_init_iw_hmc, - i40iw_sc_parse_fpm_query_buf, - i40iw_sc_configure_iw_fpm, - i40iw_sc_parse_fpm_commit_buf, - i40iw_sc_create_hmc_obj, - i40iw_sc_del_hmc_obj, - NULL, - NULL + .init_iw_hmc = i40iw_sc_init_iw_hmc, + .parse_fpm_query_buf = i40iw_sc_parse_fpm_query_buf, + .configure_iw_fpm = i40iw_sc_configure_iw_fpm, + .parse_fpm_commit_buf = i40iw_sc_parse_fpm_commit_buf, + .create_hmc_object = i40iw_sc_create_hmc_obj, + .del_hmc_object = i40iw_sc_del_hmc_obj }; static const struct i40iw_device_pestat_ops iw_device_pestat_ops = { - i40iw_hw_stat_init, - i40iw_hw_stat_read_32, - i40iw_hw_stat_read_64, - i40iw_hw_stat_read_all, - i40iw_hw_stat_refresh_all + .iw_hw_stat_init = i40iw_hw_stat_init, + .iw_hw_stat_read_32 = i40iw_hw_stat_read_32, + .iw_hw_stat_read_64 = i40iw_hw_stat_read_64, + .iw_hw_stat_read_all = i40iw_hw_stat_read_all, + .iw_hw_stat_refresh_all = i40iw_hw_stat_refresh_all }; /** diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c index 4d28c3cb0..ec6b0b77f 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_uk.c +++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c @@ -919,29 +919,29 @@ enum i40iw_status_code i40iw_get_wqe_shift(u32 wqdepth, u32 sge, u32 inline_data } static struct i40iw_qp_uk_ops iw_qp_uk_ops = { - i40iw_qp_post_wr, - i40iw_qp_ring_push_db, - i40iw_rdma_write, - i40iw_rdma_read, - i40iw_send, - i40iw_inline_rdma_write, - i40iw_inline_send, - i40iw_stag_local_invalidate, - i40iw_mw_bind, - i40iw_post_receive, - i40iw_nop + .iw_qp_post_wr = i40iw_qp_post_wr, + .iw_qp_ring_push_db = i40iw_qp_ring_push_db, + .iw_rdma_write = i40iw_rdma_write, + .iw_rdma_read = i40iw_rdma_read, + .iw_send = i40iw_send, + .iw_inline_rdma_write = i40iw_inline_rdma_write, + .iw_inline_send = i40iw_inline_send, + .iw_stag_local_invalidate = i40iw_stag_local_invalidate, + .iw_mw_bind = i40iw_mw_bind, + .iw_post_receive = i40iw_post_receive, + .iw_post_nop = i40iw_nop }; static struct i40iw_cq_ops iw_cq_ops = { - i40iw_cq_request_notification, - i40iw_cq_poll_completion, - i40iw_cq_post_entries, - i40iw_clean_cq + .iw_cq_request_notification = i40iw_cq_request_notification, + .iw_cq_poll_completion = i40iw_cq_poll_completion, + .iw_cq_post_entries = i40iw_cq_post_entries, + .iw_cq_clean = i40iw_clean_cq }; static struct i40iw_device_uk_ops iw_device_uk_ops = { - i40iw_cq_uk_init, - i40iw_qp_uk_init, + .iwarp_cq_uk_init = i40iw_cq_uk_init, + .iwarp_qp_uk_init = i40iw_qp_uk_init, }; /** diff --git a/drivers/infiniband/hw/i40iw/i40iw_user.h b/drivers/infiniband/hw/i40iw/i40iw_user.h index 276bcefff..b2e368422 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_user.h +++ b/drivers/infiniband/hw/i40iw/i40iw_user.h @@ -343,7 +343,7 @@ struct i40iw_device_uk_ops { struct i40iw_dev_uk { struct i40iw_device_uk_ops ops_uk; -}; +} __no_const; struct i40iw_sq_uk_wr_trk_info { u64 wrid; diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 18d309e40..41ef80dd9 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -99,7 +99,7 @@ __be64 mlx4_ib_gen_node_guid(void) __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx) { - return cpu_to_be64(atomic_inc_return(&ctx->tid)) | + return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) | cpu_to_be64(0xff00000000000000LL); } diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c index a21d37f02..4f927934c 100644 --- a/drivers/infiniband/hw/mlx4/mcg.c +++ b/drivers/infiniband/hw/mlx4/mcg.c @@ -1043,7 +1043,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx) { char name[20]; - atomic_set(&ctx->tid, 0); + atomic_set_unchecked(&ctx->tid, 0); sprintf(name, "mlx4_ib_mcg%d", ctx->port); ctx->mcg_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); if (!ctx->mcg_wq) diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 35141f451..652e92eab 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -457,7 +457,7 @@ struct mlx4_ib_demux_ctx { struct list_head mcg_mgid0_list; struct workqueue_struct *mcg_wq; struct mlx4_ib_demux_pv_ctx **tun; - atomic_t tid; + atomic_unchecked_t tid; int flushing; /* flushing the work queue */ }; diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index c7f49bbb0..6a021bb80 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c @@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base) mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n"); } -int mthca_QUERY_FW(struct mthca_dev *dev) +int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev) { struct mthca_mailbox *mailbox; u32 *outbox; @@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, CMD_TIME_CLASS_B); } -int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, +int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int num_mtt) { return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT, @@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap, 0, CMD_MAP_EQ, CMD_TIME_CLASS_B); } -int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, +int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int eq_num) { return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ, @@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn) CMD_TIME_CLASS_B); } -int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey, +int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey, int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, const void *in_mad, void *response_mad) { diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index ded76c101..0cf0a08c4 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c @@ -692,7 +692,7 @@ static int mthca_init_hca(struct mthca_dev *mdev) return err; } -static int mthca_setup_hca(struct mthca_dev *dev) +static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev) { int err; diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index ed9a989e5..6aa5dc2c6 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c @@ -81,7 +81,7 @@ struct mthca_mpt_entry { * through the bitmaps) */ -static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order) +static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order) { int o; int m; @@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key) return key; } -int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift, +int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift, u64 iova, u64 total_size, u32 access, struct mthca_mr *mr) { struct mthca_mailbox *mailbox; @@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd, return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr); } -int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd, +int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd, u64 *buffer_list, int buffer_size_shift, int list_len, u64 iova, u64 total_size, u32 access, struct mthca_mr *mr) diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 358930a41..abd0b775f 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -773,7 +773,7 @@ static int mthca_alloc_resize_buf(struct mthca_dev *dev, struct mthca_cq *cq, return 0; } -static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) +static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) { struct mthca_dev *dev = to_mdev(ibcq->device); struct mthca_cq *cq = to_mcq(ibcq); diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index 35cbb17be..d336a6834 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c @@ -97,7 +97,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes"); LIST_HEAD(nes_adapter_list); static LIST_HEAD(nes_dev_list); -atomic_t qps_destroyed; +atomic_unchecked_t qps_destroyed; static unsigned int ee_flsh_adapter; static unsigned int sysfs_nonidx_addr; @@ -268,7 +268,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r struct nes_qp *nesqp = cqp_request->cqp_callback_pointer; struct nes_adapter *nesadapter = nesdev->nesadapter; - atomic_inc(&qps_destroyed); + atomic_inc_unchecked(&qps_destroyed); /* Free the control structures */ diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h index e7430c925..e8250c31b 100644 --- a/drivers/infiniband/hw/nes/nes.h +++ b/drivers/infiniband/hw/nes/nes.h @@ -180,17 +180,17 @@ extern unsigned int nes_debug_level; extern unsigned int wqm_quanta; extern struct list_head nes_adapter_list; -extern atomic_t cm_connects; -extern atomic_t cm_accepts; -extern atomic_t cm_disconnects; -extern atomic_t cm_closes; -extern atomic_t cm_connecteds; -extern atomic_t cm_connect_reqs; -extern atomic_t cm_rejects; -extern atomic_t mod_qp_timouts; -extern atomic_t qps_created; -extern atomic_t qps_destroyed; -extern atomic_t sw_qps_destroyed; +extern atomic_unchecked_t cm_connects; +extern atomic_unchecked_t cm_accepts; +extern atomic_unchecked_t cm_disconnects; +extern atomic_unchecked_t cm_closes; +extern atomic_unchecked_t cm_connecteds; +extern atomic_unchecked_t cm_connect_reqs; +extern atomic_unchecked_t cm_rejects; +extern atomic_unchecked_t mod_qp_timouts; +extern atomic_unchecked_t qps_created; +extern atomic_unchecked_t qps_destroyed; +extern atomic_unchecked_t sw_qps_destroyed; extern u32 mh_detected; extern u32 mh_pauses_sent; extern u32 cm_packets_sent; @@ -199,16 +199,16 @@ extern u32 cm_packets_created; extern u32 cm_packets_received; extern u32 cm_packets_dropped; extern u32 cm_packets_retrans; -extern atomic_t cm_listens_created; -extern atomic_t cm_listens_destroyed; +extern atomic_unchecked_t cm_listens_created; +extern atomic_unchecked_t cm_listens_destroyed; extern u32 cm_backlog_drops; -extern atomic_t cm_loopbacks; -extern atomic_t cm_nodes_created; -extern atomic_t cm_nodes_destroyed; -extern atomic_t cm_accel_dropped_pkts; -extern atomic_t cm_resets_recvd; -extern atomic_t pau_qps_created; -extern atomic_t pau_qps_destroyed; +extern atomic_unchecked_t cm_loopbacks; +extern atomic_unchecked_t cm_nodes_created; +extern atomic_unchecked_t cm_nodes_destroyed; +extern atomic_unchecked_t cm_accel_dropped_pkts; +extern atomic_unchecked_t cm_resets_recvd; +extern atomic_unchecked_t pau_qps_created; +extern atomic_unchecked_t pau_qps_destroyed; extern u32 int_mod_timer_init; extern u32 int_mod_cq_depth_256; diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 57db9b332..adfe4b10a 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -69,14 +69,14 @@ u32 cm_packets_dropped; u32 cm_packets_retrans; u32 cm_packets_created; u32 cm_packets_received; -atomic_t cm_listens_created; -atomic_t cm_listens_destroyed; +atomic_unchecked_t cm_listens_created; +atomic_unchecked_t cm_listens_destroyed; u32 cm_backlog_drops; -atomic_t cm_loopbacks; -atomic_t cm_nodes_created; -atomic_t cm_nodes_destroyed; -atomic_t cm_accel_dropped_pkts; -atomic_t cm_resets_recvd; +atomic_unchecked_t cm_loopbacks; +atomic_unchecked_t cm_nodes_created; +atomic_unchecked_t cm_nodes_destroyed; +atomic_unchecked_t cm_accel_dropped_pkts; +atomic_unchecked_t cm_resets_recvd; static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *); static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *); @@ -135,28 +135,28 @@ static void record_ird_ord(struct nes_cm_node *, u16, u16); /* instance of function pointers for client API */ /* set address of this instance to cm_core->cm_ops at cm_core alloc */ static const struct nes_cm_ops nes_cm_api = { - mini_cm_accelerated, - mini_cm_listen, - mini_cm_del_listen, - mini_cm_connect, - mini_cm_close, - mini_cm_accept, - mini_cm_reject, - mini_cm_recv_pkt, - mini_cm_dealloc_core, - mini_cm_get, - mini_cm_set + .accelerated = mini_cm_accelerated, + .listen = mini_cm_listen, + .stop_listener = mini_cm_del_listen, + .connect = mini_cm_connect, + .close = mini_cm_close, + .accept = mini_cm_accept, + .reject = mini_cm_reject, + .recv_pkt = mini_cm_recv_pkt, + .destroy_cm_core = mini_cm_dealloc_core, + .get = mini_cm_get, + .set = mini_cm_set }; static struct nes_cm_core *g_cm_core; -atomic_t cm_connects; -atomic_t cm_accepts; -atomic_t cm_disconnects; -atomic_t cm_closes; -atomic_t cm_connecteds; -atomic_t cm_connect_reqs; -atomic_t cm_rejects; +atomic_unchecked_t cm_connects; +atomic_unchecked_t cm_accepts; +atomic_unchecked_t cm_disconnects; +atomic_unchecked_t cm_closes; +atomic_unchecked_t cm_connecteds; +atomic_unchecked_t cm_connect_reqs; +atomic_unchecked_t cm_rejects; int nes_add_ref_cm_node(struct nes_cm_node *cm_node) { @@ -1333,7 +1333,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, kfree(listener); listener = NULL; ret = 0; - atomic_inc(&cm_listens_destroyed); + atomic_inc_unchecked(&cm_listens_destroyed); } else { spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); } @@ -1537,7 +1537,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, cm_node->rem_mac); add_hte_node(cm_core, cm_node); - atomic_inc(&cm_nodes_created); + atomic_inc_unchecked(&cm_nodes_created); return cm_node; } @@ -1596,7 +1596,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core, } atomic_dec(&cm_core->node_cnt); - atomic_inc(&cm_nodes_destroyed); + atomic_inc_unchecked(&cm_nodes_destroyed); nesqp = cm_node->nesqp; if (nesqp) { nesqp->cm_node = NULL; @@ -1660,7 +1660,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, static void drop_packet(struct sk_buff *skb) { - atomic_inc(&cm_accel_dropped_pkts); + atomic_inc_unchecked(&cm_accel_dropped_pkts); dev_kfree_skb_any(skb); } @@ -1723,7 +1723,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, { int reset = 0; /* whether to send reset in case of err.. */ - atomic_inc(&cm_resets_recvd); + atomic_inc_unchecked(&cm_resets_recvd); nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u." " refcnt=%d\n", cm_node, cm_node->state, atomic_read(&cm_node->ref_count)); @@ -2369,7 +2369,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, rem_ref_cm_node(cm_node->cm_core, cm_node); return NULL; } - atomic_inc(&cm_loopbacks); + atomic_inc_unchecked(&cm_loopbacks); loopbackremotenode->loopbackpartner = cm_node; loopbackremotenode->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE; @@ -2644,7 +2644,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp); else { rem_ref_cm_node(cm_core, cm_node); - atomic_inc(&cm_accel_dropped_pkts); + atomic_inc_unchecked(&cm_accel_dropped_pkts); dev_kfree_skb_any(skb); } break; @@ -2965,7 +2965,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) if ((cm_id) && (cm_id->event_handler)) { if (issue_disconn) { - atomic_inc(&cm_disconnects); + atomic_inc_unchecked(&cm_disconnects); cm_event.event = IW_CM_EVENT_DISCONNECT; cm_event.status = disconn_status; cm_event.local_addr = cm_id->m_local_addr; @@ -2987,7 +2987,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) } if (issue_close) { - atomic_inc(&cm_closes); + atomic_inc_unchecked(&cm_closes); nes_disconnect(nesqp, 1); cm_id->provider_data = nesqp; @@ -3124,7 +3124,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n", nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener); - atomic_inc(&cm_accepts); + atomic_inc_unchecked(&cm_accepts); nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n", netdev_refcnt_read(nesvnic->netdev)); @@ -3320,7 +3320,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) struct nes_cm_core *cm_core; u8 *start_buff; - atomic_inc(&cm_rejects); + atomic_inc_unchecked(&cm_rejects); cm_node = (struct nes_cm_node *)cm_id->provider_data; loopback = cm_node->loopbackpartner; cm_core = cm_node->cm_core; @@ -3382,7 +3382,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr), ntohs(laddr->sin_port)); - atomic_inc(&cm_connects); + atomic_inc_unchecked(&cm_connects); nesqp->active_conn = 1; /* cache the cm_id in the qp */ @@ -3496,7 +3496,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog) g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node); return err; } - atomic_inc(&cm_listens_created); + atomic_inc_unchecked(&cm_listens_created); } cm_id->add_ref(cm_id); @@ -3603,7 +3603,7 @@ static void cm_event_connected(struct nes_cm_event *event) if (nesqp->destroyed) return; - atomic_inc(&cm_connecteds); + atomic_inc_unchecked(&cm_connecteds); nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on" " local port 0x%04X. jiffies = %lu.\n", nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr), @@ -3788,7 +3788,7 @@ static void cm_event_reset(struct nes_cm_event *event) cm_id->add_ref(cm_id); ret = cm_id->event_handler(cm_id, &cm_event); - atomic_inc(&cm_closes); + atomic_inc_unchecked(&cm_closes); cm_event.event = IW_CM_EVENT_CLOSE; cm_event.status = 0; cm_event.provider_data = cm_id->provider_data; @@ -3828,7 +3828,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event) return; cm_id = cm_node->cm_id; - atomic_inc(&cm_connect_reqs); + atomic_inc_unchecked(&cm_connect_reqs); nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n", cm_node, cm_id, jiffies); @@ -3877,7 +3877,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event) return; cm_id = cm_node->cm_id; - atomic_inc(&cm_connect_reqs); + atomic_inc_unchecked(&cm_connect_reqs); nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n", cm_node, cm_id, jiffies); diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c index 416645259..fc952c351 100644 --- a/drivers/infiniband/hw/nes/nes_mgt.c +++ b/drivers/infiniband/hw/nes/nes_mgt.c @@ -40,8 +40,8 @@ #include "nes.h" #include "nes_mgt.h" -atomic_t pau_qps_created; -atomic_t pau_qps_destroyed; +atomic_unchecked_t pau_qps_created; +atomic_unchecked_t pau_qps_destroyed; static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic) { @@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp) { struct sk_buff *skb; unsigned long flags; - atomic_inc(&pau_qps_destroyed); + atomic_inc_unchecked(&pau_qps_destroyed); /* Free packets that have not yet been forwarded */ /* Lock is acquired by skb_dequeue when removing the skb */ @@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq * cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]); skb_queue_head_init(&nesqp->pau_list); spin_lock_init(&nesqp->pau_lock); - atomic_inc(&pau_qps_created); + atomic_inc_unchecked(&pau_qps_created); nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp); } diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 2b27d1351..8f9d46c32 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@ -461,7 +461,7 @@ static bool nes_nic_send(struct sk_buff *skb, struct net_device *netdev) /** * nes_netdev_start_xmit */ -static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; @@ -1264,36 +1264,36 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev, target_stat_values[++index] = mh_detected; target_stat_values[++index] = mh_pauses_sent; target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits; - target_stat_values[++index] = atomic_read(&cm_connects); - target_stat_values[++index] = atomic_read(&cm_accepts); - target_stat_values[++index] = atomic_read(&cm_disconnects); - target_stat_values[++index] = atomic_read(&cm_connecteds); - target_stat_values[++index] = atomic_read(&cm_connect_reqs); - target_stat_values[++index] = atomic_read(&cm_rejects); - target_stat_values[++index] = atomic_read(&mod_qp_timouts); - target_stat_values[++index] = atomic_read(&qps_created); - target_stat_values[++index] = atomic_read(&sw_qps_destroyed); - target_stat_values[++index] = atomic_read(&qps_destroyed); - target_stat_values[++index] = atomic_read(&cm_closes); + target_stat_values[++index] = atomic_read_unchecked(&cm_connects); + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts); + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects); + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds); + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs); + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects); + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts); + target_stat_values[++index] = atomic_read_unchecked(&qps_created); + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed); + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed); + target_stat_values[++index] = atomic_read_unchecked(&cm_closes); target_stat_values[++index] = cm_packets_sent; target_stat_values[++index] = cm_packets_bounced; target_stat_values[++index] = cm_packets_created; target_stat_values[++index] = cm_packets_received; target_stat_values[++index] = cm_packets_dropped; target_stat_values[++index] = cm_packets_retrans; - target_stat_values[++index] = atomic_read(&cm_listens_created); - target_stat_values[++index] = atomic_read(&cm_listens_destroyed); + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created); + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed); target_stat_values[++index] = cm_backlog_drops; - target_stat_values[++index] = atomic_read(&cm_loopbacks); - target_stat_values[++index] = atomic_read(&cm_nodes_created); - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed); - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts); - target_stat_values[++index] = atomic_read(&cm_resets_recvd); + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks); + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created); + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed); + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts); + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd); target_stat_values[++index] = nesadapter->free_4kpbl; target_stat_values[++index] = nesadapter->free_256pbl; target_stat_values[++index] = int_mod_timer_init; - target_stat_values[++index] = atomic_read(&pau_qps_created); - target_stat_values[++index] = atomic_read(&pau_qps_destroyed); + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created); + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed); } /** diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index bd6912573..10e85d54b 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -46,9 +46,9 @@ #include -atomic_t mod_qp_timouts; -atomic_t qps_created; -atomic_t sw_qps_destroyed; +atomic_unchecked_t mod_qp_timouts; +atomic_unchecked_t qps_created; +atomic_unchecked_t sw_qps_destroyed; static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev); static int nes_dereg_mr(struct ib_mr *ib_mr); @@ -1040,7 +1040,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd, if (init_attr->create_flags) return ERR_PTR(-EINVAL); - atomic_inc(&qps_created); + atomic_inc_unchecked(&qps_created); switch (init_attr->qp_type) { case IB_QPT_RC: if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) { @@ -1376,7 +1376,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp) struct iw_cm_event cm_event; int ret = 0; - atomic_inc(&sw_qps_destroyed); + atomic_inc_unchecked(&sw_qps_destroyed); nesqp->destroyed = 1; /* Blow away the connection if it exists. */ diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index ded271723..a99c0fe0e 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -150,7 +150,7 @@ static struct kparam_string kp_txselect = { .string = txselect_list, .maxlen = MAX_ATTEN_LEN }; -static int setup_txselect(const char *, struct kernel_param *); +static int setup_txselect(const char *, const struct kernel_param *); module_param_call(txselect, setup_txselect, param_get_string, &kp_txselect, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(txselect, @@ -6177,7 +6177,7 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change) } /* handle the txselect parameter changing */ -static int setup_txselect(const char *str, struct kernel_param *kp) +static int setup_txselect(const char *str, const struct kernel_param *kp) { struct qib_devdata *dd; unsigned long val; diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c index 6abe1c621..f866a3159 100644 --- a/drivers/infiniband/hw/qib/qib_pcie.c +++ b/drivers/infiniband/hw/qib/qib_pcie.c @@ -622,7 +622,7 @@ static void qib_tune_pcie_caps(struct qib_devdata *dd) * PCI error infrastructure, registered via pci */ static pci_ers_result_t -qib_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +qib_pci_error_detected(struct pci_dev *pdev, enum pci_channel_state state) { struct qib_devdata *dd = pci_get_drvdata(pdev); pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED; diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index 44b210825..88be22a0d 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -219,7 +219,7 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, spin_lock_init(&qp->grp_lock); spin_lock_init(&qp->state_lock); - atomic_set(&qp->ssn, 0); + atomic_set_unchecked(&qp->ssn, 0); atomic_set(&qp->skb_out, 0); } @@ -526,7 +526,7 @@ static void rxe_qp_reset(struct rxe_qp *qp) } /* cleanup attributes */ - atomic_set(&qp->ssn, 0); + atomic_set_unchecked(&qp->ssn, 0); qp->req.opcode = -1; qp->req.need_retry = 0; qp->req.noack_pkts = 0; diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 19841c863..f843af750 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -757,7 +757,7 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr, wqe->dma.cur_sge = 0; wqe->dma.sge_offset = 0; wqe->state = wqe_state_posted; - wqe->ssn = atomic_add_return(1, &qp->ssn); + wqe->ssn = atomic_add_return_unchecked(1, &qp->ssn); return 0; } diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h index cac1d52a0..29bb90377 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.h +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h @@ -262,7 +262,7 @@ struct rxe_qp { struct rxe_comp_info comp; struct rxe_resp_info resp; - atomic_t ssn; + atomic_unchecked_t ssn; atomic_t skb_out; int need_req_skb; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index b58d9dca5..09d2d64b2 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1029,7 +1029,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, spin_unlock_irqrestore(&priv->lock, flags); } -static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_neigh *neigh; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c index cdc7df4fd..a2fdfdbc1 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c @@ -156,7 +156,7 @@ static size_t ipoib_get_size(const struct net_device *dev) nla_total_size(2); /* IFLA_IPOIB_UMCAST */ } -static struct rtnl_link_ops ipoib_link_ops __read_mostly = { +static struct rtnl_link_ops ipoib_link_ops = { .kind = "ipoib", .maxtype = IFLA_IPOIB_MAX, .policy = ipoib_policy, diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 0b1f69ed2..0814f50c0 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -80,7 +80,7 @@ module_param(srpt_srq_size, int, 0444); MODULE_PARM_DESC(srpt_srq_size, "Shared receive queue (SRQ) size."); -static int srpt_get_u64_x(char *buffer, struct kernel_param *kp) +static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp) { return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg); } @@ -196,8 +196,9 @@ static const char *get_ch_state_name(enum rdma_ch_state s) /** * srpt_qp_event() - QP event callback function. */ -static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch) +static void srpt_qp_event(struct ib_event *event, void *_ch) { + struct srpt_rdma_ch *ch = _ch; pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n", event->event, ch->cm_id, ch->sess_name, ch->state); @@ -1628,8 +1629,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) } qp_init->qp_context = (void *)ch; - qp_init->event_handler - = (void(*)(struct ib_event *, void*))srpt_qp_event; + qp_init->event_handler = srpt_qp_event; qp_init->send_cq = ch->cq; qp_init->recv_cq = ch->cq; qp_init->srq = sdev->srq; diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index e9ae3d500..96e494006 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -997,7 +997,7 @@ static int evdev_set_mask(struct evdev_client *client, if (!cnt) return 0; - mask = kcalloc(sizeof(unsigned long), BITS_TO_LONGS(cnt), GFP_KERNEL); + mask = kcalloc(BITS_TO_LONGS(cnt), sizeof(unsigned long), GFP_KERNEL); if (!mask) return -ENOMEM; diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c index 4a2a9e370..b9261a79e 100644 --- a/drivers/input/gameport/gameport.c +++ b/drivers/input/gameport/gameport.c @@ -527,14 +527,14 @@ EXPORT_SYMBOL(gameport_set_phys); */ static void gameport_init_port(struct gameport *gameport) { - static atomic_t gameport_no = ATOMIC_INIT(-1); + static atomic_unchecked_t gameport_no = ATOMIC_INIT(-1); __module_get(THIS_MODULE); mutex_init(&gameport->drv_mutex); device_initialize(&gameport->dev); dev_set_name(&gameport->dev, "gameport%lu", - (unsigned long)atomic_inc_return(&gameport_no)); + (unsigned long)atomic_inc_return_unchecked(&gameport_no)); gameport->dev.bus = &gameport_bus; gameport->dev.release = gameport_release_port; if (gameport->parent) diff --git a/drivers/input/input.c b/drivers/input/input.c index d95c34ee5..2a6da5f1f 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -1780,7 +1780,7 @@ EXPORT_SYMBOL_GPL(input_class); */ struct input_dev *input_allocate_device(void) { - static atomic_t input_no = ATOMIC_INIT(-1); + static atomic_unchecked_t input_no = ATOMIC_INIT(-1); struct input_dev *dev; dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL); @@ -1795,7 +1795,7 @@ struct input_dev *input_allocate_device(void) INIT_LIST_HEAD(&dev->node); dev_set_name(&dev->dev, "input%lu", - (unsigned long)atomic_inc_return(&input_no)); + (unsigned long)atomic_inc_return_unchecked(&input_no)); __module_get(THIS_MODULE); } diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c index 4a95b2241..874c182af 100644 --- a/drivers/input/joystick/sidewinder.c +++ b/drivers/input/joystick/sidewinder.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c index f47851868..e80d65272 100644 --- a/drivers/input/misc/ims-pcu.c +++ b/drivers/input/misc/ims-pcu.c @@ -1855,7 +1855,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id) static int ims_pcu_init_application_mode(struct ims_pcu *pcu) { - static atomic_t device_no = ATOMIC_INIT(-1); + static atomic_unchecked_t device_no = ATOMIC_INIT(-1); const struct ims_pcu_device_info *info; int error; @@ -1886,7 +1886,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu) } /* Device appears to be operable, complete initialization */ - pcu->device_no = atomic_inc_return(&device_no); + pcu->device_no = atomic_inc_return_unchecked(&device_no); /* * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h index e0ca6cda3..b5a26815e 100644 --- a/drivers/input/mouse/psmouse.h +++ b/drivers/input/mouse/psmouse.h @@ -126,7 +126,7 @@ struct psmouse_attribute { ssize_t (*set)(struct psmouse *psmouse, void *data, const char *buf, size_t count); bool protect; -}; +} __do_const; #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr) ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr, diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c index b604564de..3f14ae4f5 100644 --- a/drivers/input/mousedev.c +++ b/drivers/input/mousedev.c @@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer, spin_unlock_irq(&client->packet_lock); - if (copy_to_user(buffer, data, count)) + if (count > sizeof(data) || copy_to_user(buffer, data, count)) return -EFAULT; return count; diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c index 1ca7f551e..25626076f 100644 --- a/drivers/input/serio/serio.c +++ b/drivers/input/serio/serio.c @@ -512,7 +512,7 @@ static void serio_release_port(struct device *dev) */ static void serio_init_port(struct serio *serio) { - static atomic_t serio_no = ATOMIC_INIT(-1); + static atomic_unchecked_t serio_no = ATOMIC_INIT(-1); __module_get(THIS_MODULE); @@ -523,7 +523,7 @@ static void serio_init_port(struct serio *serio) mutex_init(&serio->drv_mutex); device_initialize(&serio->dev); dev_set_name(&serio->dev, "serio%lu", - (unsigned long)atomic_inc_return(&serio_no)); + (unsigned long)atomic_inc_return_unchecked(&serio_no)); serio->dev.bus = &serio_bus; serio->dev.release = serio_release_port; serio->dev.groups = serio_device_attr_groups; diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c index 71ef5d65a..93380a96a 100644 --- a/drivers/input/serio/serio_raw.c +++ b/drivers/input/serio/serio_raw.c @@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data, static int serio_raw_connect(struct serio *serio, struct serio_driver *drv) { - static atomic_t serio_raw_no = ATOMIC_INIT(-1); + static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(-1); struct serio_raw *serio_raw; int err; @@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv) } snprintf(serio_raw->name, sizeof(serio_raw->name), - "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no)); + "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no)); kref_init(&serio_raw->kref); INIT_LIST_HEAD(&serio_raw->client_list); init_waitqueue_head(&serio_raw->wait); diff --git a/drivers/input/touchscreen/htcpen.c b/drivers/input/touchscreen/htcpen.c index 92e2243fb..8fd909285 100644 --- a/drivers/input/touchscreen/htcpen.c +++ b/drivers/input/touchscreen/htcpen.c @@ -219,7 +219,7 @@ static struct isa_driver htcpen_isa_driver = { } }; -static struct dmi_system_id htcshift_dmi_table[] __initdata = { +static const struct dmi_system_id htcshift_dmi_table[] __initconst = { { .ident = "Shift", .matches = { diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 11a13b5be..f1e5481e1 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -859,11 +859,21 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu, static void build_completion_wait(struct iommu_cmd *cmd, u64 address) { + phys_addr_t physaddr; WARN_ON(address & 0x7ULL); memset(cmd, 0, sizeof(*cmd)); - cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK; - cmd->data[1] = upper_32_bits(__pa(address)); + +#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW + if (object_starts_on_stack((void *)address)) { + void *adjbuf = (void *)address - current->stack + current->lowmem_stack; + physaddr = __pa((u64)adjbuf); + } else +#endif + physaddr = __pa(address); + + cmd->data[0] = lower_32_bits(physaddr) | CMD_COMPL_WAIT_STORE_MASK; + cmd->data[1] = upper_32_bits(physaddr); cmd->data[2] = 1; CMD_SET_TYPE(cmd, CMD_COMPL_WAIT); } diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index e6f9b2d74..89c505431 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -633,7 +633,7 @@ struct arm_smmu_domain { struct arm_smmu_device *smmu; struct mutex init_mutex; /* Protects smmu pointer */ - struct io_pgtable_ops *pgtbl_ops; + struct io_pgtable *pgtbl; spinlock_t pgtbl_lock; enum arm_smmu_domain_stage stage; @@ -1430,7 +1430,7 @@ static void arm_smmu_domain_free(struct iommu_domain *domain) struct arm_smmu_device *smmu = smmu_domain->smmu; iommu_put_dma_cookie(domain); - free_io_pgtable_ops(smmu_domain->pgtbl_ops); + free_io_pgtable(smmu_domain->pgtbl); /* Free the CD and ASID, if we allocated them */ if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { @@ -1508,7 +1508,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) unsigned long ias, oas; enum io_pgtable_fmt fmt; struct io_pgtable_cfg pgtbl_cfg; - struct io_pgtable_ops *pgtbl_ops; + struct io_pgtable *iop; int (*finalise_stage_fn)(struct arm_smmu_domain *, struct io_pgtable_cfg *); struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); @@ -1546,18 +1546,18 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) .iommu_dev = smmu->dev, }; - pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); - if (!pgtbl_ops) + iop = alloc_io_pgtable(fmt, &pgtbl_cfg, smmu_domain); + if (!iop) return -ENOMEM; domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; domain->geometry.aperture_end = (1UL << ias) - 1; domain->geometry.force_aperture = true; - smmu_domain->pgtbl_ops = pgtbl_ops; + smmu_domain->pgtbl = iop; ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg); if (ret < 0) - free_io_pgtable_ops(pgtbl_ops); + free_io_pgtable(iop); return ret; } @@ -1673,13 +1673,13 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, int ret; unsigned long flags; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; + struct io_pgtable *iop = smmu_domain->pgtbl; - if (!ops) + if (!iop) return -ENODEV; spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); - ret = ops->map(ops, iova, paddr, size, prot); + ret = iop->ops->map(iop, iova, paddr, size, prot); spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); return ret; } @@ -1690,13 +1690,13 @@ arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) size_t ret; unsigned long flags; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; + struct io_pgtable *iop = smmu_domain->pgtbl; - if (!ops) + if (!iop) return 0; spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); - ret = ops->unmap(ops, iova, size); + ret = iop->ops->unmap(iop, iova, size); spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); return ret; } @@ -1707,13 +1707,13 @@ arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) phys_addr_t ret; unsigned long flags; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; + struct io_pgtable *iop = smmu_domain->pgtbl; - if (!ops) + if (!iop) return 0; spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); - ret = ops->iova_to_phys(ops, iova); + ret = iop->ops->iova_to_phys(iop, iova); spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); return ret; diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 8f7281444..02ff8940e 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -405,7 +405,7 @@ enum arm_smmu_domain_stage { struct arm_smmu_domain { struct arm_smmu_device *smmu; - struct io_pgtable_ops *pgtbl_ops; + struct io_pgtable *pgtbl; spinlock_t pgtbl_lock; struct arm_smmu_cfg cfg; enum arm_smmu_domain_stage stage; @@ -811,7 +811,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, { int irq, start, ret = 0; unsigned long ias, oas; - struct io_pgtable_ops *pgtbl_ops; + struct io_pgtable *pgtbl; struct io_pgtable_cfg pgtbl_cfg; enum io_pgtable_fmt fmt; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); @@ -933,8 +933,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, }; smmu_domain->smmu = smmu; - pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); - if (!pgtbl_ops) { + pgtbl = alloc_io_pgtable(fmt, &pgtbl_cfg, smmu_domain); + if (!pgtbl) { ret = -ENOMEM; goto out_clear_smmu; } @@ -963,7 +963,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, mutex_unlock(&smmu_domain->init_mutex); /* Publish page table ops for map/unmap */ - smmu_domain->pgtbl_ops = pgtbl_ops; + smmu_domain->pgtbl = pgtbl; return 0; out_clear_smmu: @@ -996,7 +996,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) devm_free_irq(smmu->dev, irq, domain); } - free_io_pgtable_ops(smmu_domain->pgtbl_ops); + free_io_pgtable(smmu_domain->pgtbl); __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); } @@ -1267,13 +1267,13 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, int ret; unsigned long flags; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; + struct io_pgtable *iop = smmu_domain->pgtbl; - if (!ops) + if (!iop) return -ENODEV; spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); - ret = ops->map(ops, iova, paddr, size, prot); + ret = iop->ops->map(iop, iova, paddr, size, prot); spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); return ret; } @@ -1284,13 +1284,13 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t ret; unsigned long flags; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; + struct io_pgtable *iop = smmu_domain->pgtbl; - if (!ops) + if (!iop) return 0; spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); - ret = ops->unmap(ops, iova, size); + ret = iop->ops->unmap(iop, iova, size); spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); return ret; } @@ -1301,7 +1301,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; + struct io_pgtable *iop = smmu_domain->pgtbl; struct device *dev = smmu->dev; void __iomem *cb_base; u32 tmp; @@ -1322,7 +1322,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, dev_err(dev, "iova to phys timed out on %pad. Falling back to software table walk.\n", &iova); - return ops->iova_to_phys(ops, iova); + return iop->ops->iova_to_phys(iop, iova); } phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR); @@ -1341,9 +1341,9 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, phys_addr_t ret; unsigned long flags; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; + struct io_pgtable *iop = smmu_domain->pgtbl; - if (!ops) + if (!iop) return 0; spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); @@ -1351,7 +1351,7 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { ret = arm_smmu_iova_to_phys_hard(domain, iova); } else { - ret = ops->iova_to_phys(ops, iova); + ret = iop->ops->iova_to_phys(iop, iova); } spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); @@ -1862,10 +1862,12 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K) smmu->pgsize_bitmap |= SZ_64K | SZ_512M; + pax_open_kernel(); if (arm_smmu_ops.pgsize_bitmap == -1UL) - arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap; + const_cast(arm_smmu_ops.pgsize_bitmap) = smmu->pgsize_bitmap; else - arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap; + const_cast(arm_smmu_ops.pgsize_bitmap) |= smmu->pgsize_bitmap; + pax_close_kernel(); dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", smmu->pgsize_bitmap); diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index f50e51c1a..02c024776 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -49,9 +49,6 @@ #define io_pgtable_to_data(x) \ container_of((x), struct arm_v7s_io_pgtable, iop) -#define io_pgtable_ops_to_data(x) \ - io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) - /* * We have 32 bits total; 12 bits resolved at level 1, 8 bits at level 2, * and 12 bits in a page. With some carefully-chosen coefficients we can @@ -426,11 +423,10 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep); } -static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, +static int arm_v7s_map(struct io_pgtable *iop, unsigned long iova, phys_addr_t paddr, size_t size, int prot) { - struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); - struct io_pgtable *iop = &data->iop; + struct arm_v7s_io_pgtable *data = io_pgtable_to_data(iop); int ret; /* If no access, then nothing to do */ @@ -593,10 +589,10 @@ static int __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, return __arm_v7s_unmap(data, iova, size, lvl + 1, ptep); } -static int arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova, +static int arm_v7s_unmap(struct io_pgtable *iop, unsigned long iova, size_t size) { - struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); + struct arm_v7s_io_pgtable *data = io_pgtable_to_data(iop); size_t unmapped; unmapped = __arm_v7s_unmap(data, iova, size, 1, data->pgd); @@ -606,10 +602,10 @@ static int arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova, return unmapped; } -static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops, +static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable *iop, unsigned long iova) { - struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); + struct arm_v7s_io_pgtable *data = io_pgtable_to_data(iop); arm_v7s_iopte *ptep = data->pgd, pte; int lvl = 0; u32 mask; @@ -628,6 +624,12 @@ static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops, return (pte & mask) | (iova & ~mask); } +static struct io_pgtable_ops arm_v7s_io_pgtable_ops = { + .map = arm_v7s_map, + .unmap = arm_v7s_unmap, + .iova_to_phys = arm_v7s_iova_to_phys, +}; + static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) { @@ -662,11 +664,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, if (!data->l2_tables) goto out_free_data; - data->iop.ops = (struct io_pgtable_ops) { - .map = arm_v7s_map, - .unmap = arm_v7s_unmap, - .iova_to_phys = arm_v7s_iova_to_phys, - }; + data->iop.ops = &arm_v7s_io_pgtable_ops; /* We have to do this early for __arm_v7s_alloc_table to work... */ data->iop.cfg = *cfg; @@ -755,7 +753,7 @@ static struct iommu_gather_ops dummy_tlb_ops = { static int __init arm_v7s_do_selftests(void) { - struct io_pgtable_ops *ops; + struct io_pgtable *pgtbl; struct io_pgtable_cfg cfg = { .tlb = &dummy_tlb_ops, .oas = 32, @@ -770,8 +768,8 @@ static int __init arm_v7s_do_selftests(void) cfg_cookie = &cfg; - ops = alloc_io_pgtable_ops(ARM_V7S, &cfg, &cfg); - if (!ops) { + pgtbl = alloc_io_pgtable(ARM_V7S, &cfg, &cfg); + if (!pgtbl) { pr_err("selftest: failed to allocate io pgtable ops\n"); return -EINVAL; } @@ -780,13 +778,13 @@ static int __init arm_v7s_do_selftests(void) * Initial sanity checks. * Empty page tables shouldn't provide any translations. */ - if (ops->iova_to_phys(ops, 42)) + if (pgtbl->ops->iova_to_phys(pgtbl, 42)) return __FAIL(ops); - if (ops->iova_to_phys(ops, SZ_1G + 42)) + if (pgtbl->ops->iova_to_phys(pgtbl, SZ_1G + 42)) return __FAIL(ops); - if (ops->iova_to_phys(ops, SZ_2G + 42)) + if (pgtbl->ops->iova_to_phys(pgtbl, SZ_2G + 42)) return __FAIL(ops); /* @@ -796,18 +794,18 @@ static int __init arm_v7s_do_selftests(void) i = find_first_bit(&cfg.pgsize_bitmap, BITS_PER_LONG); while (i != BITS_PER_LONG) { size = 1UL << i; - if (ops->map(ops, iova, iova, size, IOMMU_READ | + if (pgtbl->ops->map(pgtbl, iova, iova, size, IOMMU_READ | IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_CACHE)) return __FAIL(ops); /* Overlapping mappings */ - if (!ops->map(ops, iova, iova + size, size, + if (!pgtbl->ops->map(pgtbl, iova, iova + size, size, IOMMU_READ | IOMMU_NOEXEC)) return __FAIL(ops); - if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) + if (pgtbl->ops->iova_to_phys(pgtbl, iova + 42) != (iova + 42)) return __FAIL(ops); iova += SZ_16M; @@ -821,14 +819,14 @@ static int __init arm_v7s_do_selftests(void) size = 1UL << __ffs(cfg.pgsize_bitmap); while (i < loopnr) { iova_start = i * SZ_16M; - if (ops->unmap(ops, iova_start + size, size) != size) + if (pgtbl->ops->unmap(pgtbl, iova_start + size, size) != size) return __FAIL(ops); /* Remap of partial unmap */ - if (ops->map(ops, iova_start + size, size, size, IOMMU_READ)) + if (pgtbl->ops->map(pgtbl, iova_start + size, size, size, IOMMU_READ)) return __FAIL(ops); - if (ops->iova_to_phys(ops, iova_start + size + 42) + if (pgtbl->ops->iova_to_phys(pgtbl, iova_start + size + 42) != (size + 42)) return __FAIL(ops); i++; @@ -840,17 +838,17 @@ static int __init arm_v7s_do_selftests(void) while (i != BITS_PER_LONG) { size = 1UL << i; - if (ops->unmap(ops, iova, size) != size) + if (pgtbl->ops->unmap(pgtbl, iova, size) != size) return __FAIL(ops); - if (ops->iova_to_phys(ops, iova + 42)) + if (pgtbl->ops->iova_to_phys(pgtbl, iova + 42)) return __FAIL(ops); /* Remap full block */ - if (ops->map(ops, iova, iova, size, IOMMU_WRITE)) + if (pgtbl->ops->map(pgtbl, iova, iova, size, IOMMU_WRITE)) return __FAIL(ops); - if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) + if (pgtbl->ops->iova_to_phys(pgtbl, iova + 42) != (iova + 42)) return __FAIL(ops); iova += SZ_16M; @@ -858,7 +856,7 @@ static int __init arm_v7s_do_selftests(void) i = find_next_bit(&cfg.pgsize_bitmap, BITS_PER_LONG, i); } - free_io_pgtable_ops(ops); + free_io_pgtable(pgtbl); selftest_running = false; diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index f5c90e136..90a737c4a 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -39,9 +39,6 @@ #define io_pgtable_to_data(x) \ container_of((x), struct arm_lpae_io_pgtable, iop) -#define io_pgtable_ops_to_data(x) \ - io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) - /* * For consistency with the architecture, we always consider * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0 @@ -381,10 +378,10 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, return pte; } -static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, +static int arm_lpae_map(struct io_pgtable *iop, unsigned long iova, phys_addr_t paddr, size_t size, int iommu_prot) { - struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); + struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop); arm_lpae_iopte *ptep = data->pgd; int ret, lvl = ARM_LPAE_START_LVL(data); arm_lpae_iopte prot; @@ -530,11 +527,11 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep); } -static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, +static int arm_lpae_unmap(struct io_pgtable *iop, unsigned long iova, size_t size) { size_t unmapped; - struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); + struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop); arm_lpae_iopte *ptep = data->pgd; int lvl = ARM_LPAE_START_LVL(data); @@ -545,10 +542,10 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, return unmapped; } -static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, +static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable *iop, unsigned long iova) { - struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); + struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop); arm_lpae_iopte pte, *ptep = data->pgd; int lvl = ARM_LPAE_START_LVL(data); @@ -615,6 +612,12 @@ static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg) } } +static struct io_pgtable_ops arm_lpae_io_pgtable_ops = { + .map = arm_lpae_map, + .unmap = arm_lpae_unmap, + .iova_to_phys = arm_lpae_iova_to_phys, +}; + static struct arm_lpae_io_pgtable * arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) { @@ -651,11 +654,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1)); data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte))); - data->iop.ops = (struct io_pgtable_ops) { - .map = arm_lpae_map, - .unmap = arm_lpae_unmap, - .iova_to_phys = arm_lpae_iova_to_phys, - }; + data->iop.ops = &arm_lpae_io_pgtable_ops; return data; } @@ -916,15 +915,15 @@ static void dummy_tlb_sync(void *cookie) WARN_ON(cookie != cfg_cookie); } -static struct iommu_gather_ops dummy_tlb_ops __initdata = { +static const struct iommu_gather_ops dummy_tlb_ops __initconst = { .tlb_flush_all = dummy_tlb_flush_all, .tlb_add_flush = dummy_tlb_add_flush, .tlb_sync = dummy_tlb_sync, }; -static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) +static void __init arm_lpae_dump_ops(struct io_pgtable *iop) { - struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); + struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop); struct io_pgtable_cfg *cfg = &data->iop.cfg; pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n", @@ -934,9 +933,9 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) data->bits_per_level, data->pgd); } -#define __FAIL(ops, i) ({ \ +#define __FAIL(iop, i) ({ \ WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \ - arm_lpae_dump_ops(ops); \ + arm_lpae_dump_ops(iop); \ selftest_running = false; \ -EFAULT; \ }) @@ -951,30 +950,32 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) int i, j; unsigned long iova; size_t size; - struct io_pgtable_ops *ops; + struct io_pgtable *iop; + const struct io_pgtable_ops *ops; selftest_running = true; for (i = 0; i < ARRAY_SIZE(fmts); ++i) { cfg_cookie = cfg; - ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg); - if (!ops) { + iop = alloc_io_pgtable(fmts[i], cfg, cfg); + if (!iop) { pr_err("selftest: failed to allocate io pgtable ops\n"); return -ENOMEM; } + ops = iop->ops; /* * Initial sanity checks. * Empty page tables shouldn't provide any translations. */ - if (ops->iova_to_phys(ops, 42)) - return __FAIL(ops, i); + if (ops->iova_to_phys(iop, 42)) + return __FAIL(iop, i); - if (ops->iova_to_phys(ops, SZ_1G + 42)) - return __FAIL(ops, i); + if (ops->iova_to_phys(iop, SZ_1G + 42)) + return __FAIL(iop, i); - if (ops->iova_to_phys(ops, SZ_2G + 42)) - return __FAIL(ops, i); + if (ops->iova_to_phys(iop, SZ_2G + 42)) + return __FAIL(iop, i); /* * Distinct mappings of different granule sizes. @@ -984,19 +985,19 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) while (j != BITS_PER_LONG) { size = 1UL << j; - if (ops->map(ops, iova, iova, size, IOMMU_READ | + if (ops->map(iop, iova, iova, size, IOMMU_READ | IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_CACHE)) - return __FAIL(ops, i); + return __FAIL(iop, i); /* Overlapping mappings */ - if (!ops->map(ops, iova, iova + size, size, + if (!ops->map(iop, iova, iova + size, size, IOMMU_READ | IOMMU_NOEXEC)) - return __FAIL(ops, i); + return __FAIL(iop, i); - if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) - return __FAIL(ops, i); + if (ops->iova_to_phys(iop, iova + 42) != (iova + 42)) + return __FAIL(iop, i); iova += SZ_1G; j++; @@ -1005,15 +1006,15 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) /* Partial unmap */ size = 1UL << __ffs(cfg->pgsize_bitmap); - if (ops->unmap(ops, SZ_1G + size, size) != size) - return __FAIL(ops, i); + if (ops->unmap(iop, SZ_1G + size, size) != size) + return __FAIL(iop, i); /* Remap of partial unmap */ - if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ)) - return __FAIL(ops, i); + if (ops->map(iop, SZ_1G + size, size, size, IOMMU_READ)) + return __FAIL(iop, i); - if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42)) - return __FAIL(ops, i); + if (ops->iova_to_phys(iop, SZ_1G + size + 42) != (size + 42)) + return __FAIL(iop, i); /* Full unmap */ iova = 0; @@ -1021,25 +1022,25 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) while (j != BITS_PER_LONG) { size = 1UL << j; - if (ops->unmap(ops, iova, size) != size) - return __FAIL(ops, i); + if (ops->unmap(iop, iova, size) != size) + return __FAIL(iop, i); - if (ops->iova_to_phys(ops, iova + 42)) - return __FAIL(ops, i); + if (ops->iova_to_phys(iop, iova + 42)) + return __FAIL(iop, i); /* Remap full block */ - if (ops->map(ops, iova, iova, size, IOMMU_WRITE)) - return __FAIL(ops, i); + if (ops->map(iop, iova, iova, size, IOMMU_WRITE)) + return __FAIL(iop, i); - if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) - return __FAIL(ops, i); + if (ops->iova_to_phys(iop, iova + 42) != (iova + 42)) + return __FAIL(iop, i); iova += SZ_1G; j++; j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j); } - free_io_pgtable_ops(ops); + free_io_pgtable(iop); } selftest_running = false; diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c index 127558d83..bc60b81df 100644 --- a/drivers/iommu/io-pgtable.c +++ b/drivers/iommu/io-pgtable.c @@ -37,7 +37,7 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = { #endif }; -struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt, +struct io_pgtable *alloc_io_pgtable(enum io_pgtable_fmt fmt, struct io_pgtable_cfg *cfg, void *cookie) { @@ -59,21 +59,18 @@ struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt, iop->cookie = cookie; iop->cfg = *cfg; - return &iop->ops; + return iop; } /* * It is the IOMMU driver's responsibility to ensure that the page table * is no longer accessible to the walker by this point. */ -void free_io_pgtable_ops(struct io_pgtable_ops *ops) +void free_io_pgtable(struct io_pgtable *iop) { - struct io_pgtable *iop; - - if (!ops) + if (!iop) return; - iop = container_of(ops, struct io_pgtable, ops); io_pgtable_tlb_flush_all(iop); io_pgtable_init_table[iop->fmt]->free(iop); } diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h index 969d82cc9..1ba9b6ee0 100644 --- a/drivers/iommu/io-pgtable.h +++ b/drivers/iommu/io-pgtable.h @@ -109,17 +109,18 @@ struct io_pgtable_cfg { * These functions map directly onto the iommu_ops member functions with * the same names. */ +struct io_pgtable; struct io_pgtable_ops { - int (*map)(struct io_pgtable_ops *ops, unsigned long iova, + int (*map)(struct io_pgtable *iop, unsigned long iova, phys_addr_t paddr, size_t size, int prot); - int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, + int (*unmap)(struct io_pgtable *iop, unsigned long iova, size_t size); - phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, + phys_addr_t (*iova_to_phys)(struct io_pgtable *iop, unsigned long iova); }; /** - * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU. + * alloc_io_pgtable() - Allocate a page table allocator for use by an IOMMU. * * @fmt: The page table format. * @cfg: The page table configuration. This will be modified to represent @@ -128,9 +129,9 @@ struct io_pgtable_ops { * @cookie: An opaque token provided by the IOMMU driver and passed back to * the callback routines in cfg->tlb. */ -struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt, - struct io_pgtable_cfg *cfg, - void *cookie); +struct io_pgtable *alloc_io_pgtable(enum io_pgtable_fmt fmt, + struct io_pgtable_cfg *cfg, + void *cookie); /** * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller @@ -139,7 +140,7 @@ struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt, * * @ops: The ops returned from alloc_io_pgtable_ops. */ -void free_io_pgtable_ops(struct io_pgtable_ops *ops); +void free_io_pgtable(struct io_pgtable *iop); /* @@ -161,11 +162,9 @@ struct io_pgtable { void *cookie; bool tlb_sync_pending; struct io_pgtable_cfg cfg; - struct io_pgtable_ops ops; + const struct io_pgtable_ops *ops; }; -#define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops) - static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop) { iop->cfg.tlb->tlb_flush_all(iop->cookie); diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 9a2f19608..870a3e21b 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -945,7 +945,7 @@ static int iommu_bus_notifier(struct notifier_block *nb, static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops) { int err; - struct notifier_block *nb; + notifier_block_no_const *nb; struct iommu_callback_data cb = { .ops = ops, }; diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index ace331da6..c32653039 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -41,7 +41,7 @@ struct ipmmu_vmsa_domain { struct iommu_domain io_domain; struct io_pgtable_cfg cfg; - struct io_pgtable_ops *iop; + struct io_pgtable *iop; unsigned int context_id; spinlock_t lock; /* Protects mappings */ @@ -319,8 +319,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) */ domain->cfg.iommu_dev = domain->mmu->dev; - domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg, - domain); + domain->iop = alloc_io_pgtable(ARM_32_LPAE_S1, &domain->cfg, domain); if (!domain->iop) return -EINVAL; @@ -478,7 +477,7 @@ static void ipmmu_domain_free(struct iommu_domain *io_domain) * been detached. */ ipmmu_domain_destroy_context(domain); - free_io_pgtable_ops(domain->iop); + free_io_pgtable(domain->iop); kfree(domain); } @@ -547,7 +546,7 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, if (!domain) return -ENODEV; - return domain->iop->map(domain->iop, iova, paddr, size, prot); + return domain->iop->ops->map(domain->iop, iova, paddr, size, prot); } static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, @@ -555,7 +554,7 @@ static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, { struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); - return domain->iop->unmap(domain->iop, iova, size); + return domain->iop->ops->unmap(domain->iop, iova, size); } static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, @@ -565,7 +564,7 @@ static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, /* TODO: Is locking needed ? */ - return domain->iop->iova_to_phys(domain->iop, iova); + return domain->iop->ops->iova_to_phys(domain->iop, iova); } static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev, diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c index 49721b4e1..62874d3f2 100644 --- a/drivers/iommu/irq_remapping.c +++ b/drivers/iommu/irq_remapping.c @@ -153,7 +153,7 @@ int __init irq_remap_enable_fault_handling(void) void panic_if_irq_remap(const char *msg) { if (irq_remapping_enabled) - panic(msg); + panic("%s", msg); } void ir_ack_apic_edge(struct irq_data *data) diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index b09692bb5..aa64d5972 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -53,7 +53,7 @@ struct msm_priv { struct list_head list_attached; struct iommu_domain domain; struct io_pgtable_cfg cfg; - struct io_pgtable_ops *iop; + struct io_pgtable *iop; struct device *dev; spinlock_t pgtlock; /* pagetable lock */ }; @@ -360,13 +360,15 @@ static int msm_iommu_domain_config(struct msm_priv *priv) .iommu_dev = priv->dev, }; - priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv); + priv->iop = alloc_io_pgtable(ARM_V7S, &priv->cfg, priv); if (!priv->iop) { dev_err(priv->dev, "Failed to allocate pgtable\n"); return -EINVAL; } - msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap; + pax_open_kernel(); + const_cast(msm_iommu_ops.pgsize_bitmap) = priv->cfg.pgsize_bitmap; + pax_close_kernel(); return 0; } @@ -429,7 +431,7 @@ static void msm_iommu_detach_dev(struct iommu_domain *domain, struct msm_iommu_ctx_dev *master; int ret; - free_io_pgtable_ops(priv->iop); + free_io_pgtable(priv->iop); spin_lock_irqsave(&msm_iommu_lock, flags); list_for_each_entry(iommu, &priv->list_attached, dom_node) { @@ -455,7 +457,7 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova, int ret; spin_lock_irqsave(&priv->pgtlock, flags); - ret = priv->iop->map(priv->iop, iova, pa, len, prot); + ret = priv->iop->ops->map(priv->iop, iova, pa, len, prot); spin_unlock_irqrestore(&priv->pgtlock, flags); return ret; @@ -468,7 +470,7 @@ static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova, unsigned long flags; spin_lock_irqsave(&priv->pgtlock, flags); - len = priv->iop->unmap(priv->iop, iova, len); + len = priv->iop->ops->unmap(priv->iop, iova, len); spin_unlock_irqrestore(&priv->pgtlock, flags); return len; diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index b12c12d74..27bf74559 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -97,7 +97,7 @@ struct mtk_iommu_domain { spinlock_t pgtlock; /* lock for page table */ struct io_pgtable_cfg cfg; - struct io_pgtable_ops *iop; + struct io_pgtable *iop; struct iommu_domain domain; }; @@ -235,7 +235,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data) if (data->enable_4GB) dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_4GB; - dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data); + dom->iop = alloc_io_pgtable(ARM_V7S, &dom->cfg, data); if (!dom->iop) { dev_err(data->dev, "Failed to alloc io pgtable\n"); return -EINVAL; @@ -328,7 +328,7 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, int ret; spin_lock_irqsave(&dom->pgtlock, flags); - ret = dom->iop->map(dom->iop, iova, paddr, size, prot); + ret = dom->iop->ops->map(dom->iop, iova, paddr, size, prot); spin_unlock_irqrestore(&dom->pgtlock, flags); return ret; @@ -342,7 +342,7 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain, size_t unmapsz; spin_lock_irqsave(&dom->pgtlock, flags); - unmapsz = dom->iop->unmap(dom->iop, iova, size); + unmapsz = dom->iop->ops->unmap(dom->iop, iova, size); spin_unlock_irqrestore(&dom->pgtlock, flags); return unmapsz; @@ -356,7 +356,7 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, phys_addr_t pa; spin_lock_irqsave(&dom->pgtlock, flags); - pa = dom->iop->iova_to_phys(dom->iop, iova); + pa = dom->iop->ops->iova_to_phys(dom->iop, iova); spin_unlock_irqrestore(&dom->pgtlock, flags); return pa; @@ -615,7 +615,7 @@ static int mtk_iommu_remove(struct platform_device *pdev) if (iommu_present(&platform_bus_type)) bus_set_iommu(&platform_bus_type, NULL); - free_io_pgtable_ops(data->m4u_dom->iop); + free_io_pgtable(data->m4u_dom->iop); clk_disable_unprepare(data->bclk); devm_free_irq(&pdev->dev, data->irq, data); component_master_del(&pdev->dev, &mtk_iommu_com_ops); diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index d6c404b35..ea4d33096 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -410,7 +410,7 @@ static void gic_handle_cascade_irq(struct irq_desc *desc) chained_irq_exit(chip, desc); } -static struct irq_chip gic_chip = { +static irq_chip_no_const gic_chip __read_only = { .irq_mask = gic_mask_irq, .irq_unmask = gic_unmask_irq, .irq_eoi = gic_eoi_irq, diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c index 1aec12c6d..7c7186310 100644 --- a/drivers/irqchip/irq-i8259.c +++ b/drivers/irqchip/irq-i8259.c @@ -192,7 +192,7 @@ static void mask_and_ack_8259A(struct irq_data *d) printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq); spurious_irq_mask |= irqmask; } - atomic_inc(&irq_err_count); + atomic_inc_unchecked(&irq_err_count); /* * Theoretically we do not have to handle this IRQ, * but in Linux this does not cause problems and is diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c index 033bccb41..7c93666ac 100644 --- a/drivers/irqchip/irq-jcore-aic.c +++ b/drivers/irqchip/irq-jcore-aic.c @@ -23,7 +23,23 @@ #define JCORE_AIC1_INTPRI_REG 8 -static struct irq_chip jcore_aic; +static void noop(struct irq_data *data) +{ +} + +static struct irq_chip jcore_aic = { + /* + * The irq chip framework requires either mask/unmask or enable/disable + * function pointers to be provided, but the hardware does not have any + * such mechanism; the only interrupt masking is at the cpu level and + * it affects all interrupts. We provide dummy mask/unmask. The hardware + * handles all interrupt control and clears pending status when the cpu + * accepts the interrupt. + */ + .irq_mask = noop, + .irq_unmask = noop, + .name = "AIC", +}; /* * The J-Core AIC1 and AIC2 are cpu-local interrupt controllers and do @@ -58,10 +74,6 @@ static const struct irq_domain_ops jcore_aic_irqdomain_ops = { .xlate = irq_domain_xlate_onecell, }; -static void noop(struct irq_data *data) -{ -} - static int __init aic_irq_of_init(struct device_node *node, struct device_node *parent) { @@ -88,18 +100,6 @@ static int __init aic_irq_of_init(struct device_node *node, min_irq = JCORE_AIC1_MIN_HWIRQ; } - /* - * The irq chip framework requires either mask/unmask or enable/disable - * function pointers to be provided, but the hardware does not have any - * such mechanism; the only interrupt masking is at the cpu level and - * it affects all interrupts. We provide dummy mask/unmask. The hardware - * handles all interrupt control and clears pending status when the cpu - * accepts the interrupt. - */ - jcore_aic.irq_mask = noop; - jcore_aic.irq_unmask = noop; - jcore_aic.name = "AIC"; - domain = irq_domain_add_linear(node, dom_sz, &jcore_aic_irqdomain_ops, &jcore_aic); if (!domain) diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c index 013fc9659..36a9a97b4 100644 --- a/drivers/irqchip/irq-mmp.c +++ b/drivers/irqchip/irq-mmp.c @@ -122,7 +122,7 @@ static void icu_unmask_irq(struct irq_data *d) } } -struct irq_chip icu_irq_chip = { +irq_chip_no_const icu_irq_chip __read_only = { .name = "icu_irq", .irq_mask = icu_mask_irq, .irq_mask_ack = icu_mask_ack_irq, diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c index 713177d97..3849ddd24 100644 --- a/drivers/irqchip/irq-renesas-intc-irqpin.c +++ b/drivers/irqchip/irq-renesas-intc-irqpin.c @@ -396,7 +396,7 @@ static int intc_irqpin_probe(struct platform_device *pdev) struct intc_irqpin_iomem *i; struct resource *io[INTC_IRQPIN_REG_NR]; struct resource *irq; - struct irq_chip *irq_chip; + irq_chip_no_const *irq_chip; void (*enable_fn)(struct irq_data *d); void (*disable_fn)(struct irq_data *d); const char *name = dev_name(dev); diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c index 2325fb3c4..fca752974 100644 --- a/drivers/irqchip/irq-ts4800.c +++ b/drivers/irqchip/irq-ts4800.c @@ -93,7 +93,7 @@ static int ts4800_ic_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; struct ts4800_irq_data *data; - struct irq_chip *irq_chip; + irq_chip_no_const *irq_chip; struct resource *res; int parent_irq; diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c index 6a2df3297..dc962f1db 100644 --- a/drivers/isdn/capi/capi.c +++ b/drivers/isdn/capi/capi.c @@ -81,8 +81,8 @@ struct capiminor { struct capi20_appl *ap; u32 ncci; - atomic_t datahandle; - atomic_t msgid; + atomic_unchecked_t datahandle; + atomic_unchecked_t msgid; struct tty_port port; int ttyinstop; @@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb) capimsg_setu16(s, 2, mp->ap->applid); capimsg_setu8 (s, 4, CAPI_DATA_B3); capimsg_setu8 (s, 5, CAPI_RESP); - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid)); + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid)); capimsg_setu32(s, 8, mp->ncci); capimsg_setu16(s, 12, datahandle); } @@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp) mp->outbytes -= len; spin_unlock_bh(&mp->outlock); - datahandle = atomic_inc_return(&mp->datahandle); + datahandle = atomic_inc_return_unchecked(&mp->datahandle); skb_push(skb, CAPI_DATA_B3_REQ_LEN); memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN); capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN); capimsg_setu16(skb->data, 2, mp->ap->applid); capimsg_setu8 (skb->data, 4, CAPI_DATA_B3); capimsg_setu8 (skb->data, 5, CAPI_REQ); - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid)); + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid)); capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */ capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */ capimsg_setu16(skb->data, 16, len); /* Data length */ diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c index aecec6d32..11e13c561 100644 --- a/drivers/isdn/gigaset/bas-gigaset.c +++ b/drivers/isdn/gigaset/bas-gigaset.c @@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf) static const struct gigaset_ops gigops = { - gigaset_write_cmd, - gigaset_write_room, - gigaset_chars_in_buffer, - gigaset_brkchars, - gigaset_init_bchannel, - gigaset_close_bchannel, - gigaset_initbcshw, - gigaset_freebcshw, - gigaset_reinitbcshw, - gigaset_initcshw, - gigaset_freecshw, - gigaset_set_modem_ctrl, - gigaset_baud_rate, - gigaset_set_line_ctrl, - gigaset_isoc_send_skb, - gigaset_isoc_input, + .write_cmd = gigaset_write_cmd, + .write_room = gigaset_write_room, + .chars_in_buffer = gigaset_chars_in_buffer, + .brkchars = gigaset_brkchars, + .init_bchannel = gigaset_init_bchannel, + .close_bchannel = gigaset_close_bchannel, + .initbcshw = gigaset_initbcshw, + .freebcshw = gigaset_freebcshw, + .reinitbcshw = gigaset_reinitbcshw, + .initcshw = gigaset_initcshw, + .freecshw = gigaset_freecshw, + .set_modem_ctrl = gigaset_set_modem_ctrl, + .baud_rate = gigaset_baud_rate, + .set_line_ctrl = gigaset_set_line_ctrl, + .send_skb = gigaset_isoc_send_skb, + .handle_input = gigaset_isoc_input, }; /* bas_gigaset_init diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c index 600c79b03..3752bab3a 100644 --- a/drivers/isdn/gigaset/interface.c +++ b/drivers/isdn/gigaset/interface.c @@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp) } tty->driver_data = cs; - ++cs->port.count; + atomic_inc(&cs->port.count); - if (cs->port.count == 1) { + if (atomic_read(&cs->port.count) == 1) { tty_port_tty_set(&cs->port, tty); cs->port.low_latency = 1; } @@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp) if (!cs->connected) gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */ - else if (!cs->port.count) + else if (!atomic_read(&cs->port.count)) dev_warn(cs->dev, "%s: device not opened\n", __func__); - else if (!--cs->port.count) + else if (!atomic_dec_return(&cs->port.count)) tty_port_tty_set(&cs->port, NULL); mutex_unlock(&cs->mutex); diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c index b90776ef5..ab0b63a4d 100644 --- a/drivers/isdn/gigaset/ser-gigaset.c +++ b/drivers/isdn/gigaset/ser-gigaset.c @@ -445,22 +445,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag) } static const struct gigaset_ops ops = { - gigaset_write_cmd, - gigaset_write_room, - gigaset_chars_in_buffer, - gigaset_brkchars, - gigaset_init_bchannel, - gigaset_close_bchannel, - gigaset_initbcshw, - gigaset_freebcshw, - gigaset_reinitbcshw, - gigaset_initcshw, - gigaset_freecshw, - gigaset_set_modem_ctrl, - gigaset_baud_rate, - gigaset_set_line_ctrl, - gigaset_m10x_send_skb, /* asyncdata.c */ - gigaset_m10x_input, /* asyncdata.c */ + .write_cmd = gigaset_write_cmd, + .write_room = gigaset_write_room, + .chars_in_buffer = gigaset_chars_in_buffer, + .brkchars = gigaset_brkchars, + .init_bchannel = gigaset_init_bchannel, + .close_bchannel = gigaset_close_bchannel, + .initbcshw = gigaset_initbcshw, + .freebcshw = gigaset_freebcshw, + .reinitbcshw = gigaset_reinitbcshw, + .initcshw = gigaset_initcshw, + .freecshw = gigaset_freecshw, + .set_modem_ctrl = gigaset_set_modem_ctrl, + .baud_rate = gigaset_baud_rate, + .set_line_ctrl = gigaset_set_line_ctrl, + .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */ + .handle_input = gigaset_m10x_input, /* asyncdata.c */ }; diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c index 5f306e2ee..ff14829c2 100644 --- a/drivers/isdn/gigaset/usb-gigaset.c +++ b/drivers/isdn/gigaset/usb-gigaset.c @@ -543,7 +543,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6]) gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf); memcpy(cs->hw.usb->bchars, buf, 6); return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41, - 0, 0, &buf, 6, 2000); + 0, 0, cs->hw.usb->bchars, 6, 2000); } static void gigaset_freebcshw(struct bc_state *bcs) @@ -862,22 +862,22 @@ static int gigaset_pre_reset(struct usb_interface *intf) } static const struct gigaset_ops ops = { - gigaset_write_cmd, - gigaset_write_room, - gigaset_chars_in_buffer, - gigaset_brkchars, - gigaset_init_bchannel, - gigaset_close_bchannel, - gigaset_initbcshw, - gigaset_freebcshw, - gigaset_reinitbcshw, - gigaset_initcshw, - gigaset_freecshw, - gigaset_set_modem_ctrl, - gigaset_baud_rate, - gigaset_set_line_ctrl, - gigaset_m10x_send_skb, - gigaset_m10x_input, + .write_cmd = gigaset_write_cmd, + .write_room = gigaset_write_room, + .chars_in_buffer = gigaset_chars_in_buffer, + .brkchars = gigaset_brkchars, + .init_bchannel = gigaset_init_bchannel, + .close_bchannel = gigaset_close_bchannel, + .initbcshw = gigaset_initbcshw, + .freebcshw = gigaset_freebcshw, + .reinitbcshw = gigaset_reinitbcshw, + .initcshw = gigaset_initcshw, + .freecshw = gigaset_freecshw, + .set_modem_ctrl = gigaset_set_modem_ctrl, + .baud_rate = gigaset_baud_rate, + .set_line_ctrl = gigaset_set_line_ctrl, + .send_skb = gigaset_m10x_send_skb, + .handle_input = gigaset_m10x_input, }; /* diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c index 4d9b19554..455075c60 100644 --- a/drivers/isdn/hardware/avm/b1.c +++ b/drivers/isdn/hardware/avm/b1.c @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file) } if (left) { if (t4file->user) { - if (copy_from_user(buf, dp, left)) + if (left > sizeof buf || copy_from_user(buf, dp, left)) return -EFAULT; } else { memcpy(buf, dp, left); @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config) } if (left) { if (config->user) { - if (copy_from_user(buf, dp, left)) + if (left > sizeof buf || copy_from_user(buf, dp, left)) return -EFAULT; } else { memcpy(buf, dp, left); diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c index 7a0bdbdd8..0a7b7db24 100644 --- a/drivers/isdn/hardware/eicon/capifunc.c +++ b/drivers/isdn/hardware/eicon/capifunc.c @@ -57,7 +57,7 @@ static u16 diva_send_message(struct capi_ctr *, diva_os_message_buffer_s *); extern void diva_os_set_controller_struct(struct capi_ctr *); -extern void DIVA_DIDD_Read(DESCRIPTOR *, int); +extern void DIVA_DIDD_Read(void *, int); /* * debug @@ -1032,7 +1032,6 @@ static void didd_callback(void *context, DESCRIPTOR *adapter, int removal) stop_dbg(); } else { memcpy(&MAdapter, adapter, sizeof(MAdapter)); - dprintf = (DIVA_DI_PRINTF) MAdapter.request; DbgRegister("CAPI20", DRIVERRELEASE_CAPI, DBG_DEFAULT); } } else if ((adapter->type > 0) && (adapter->type < 16)) { /* IDI Adapter */ @@ -1060,7 +1059,6 @@ static int divacapi_connect_didd(void) for (x = 0; x < MAX_DESCRIPTORS; x++) { if (DIDD_Table[x].type == IDI_DIMAINT) { /* MAINT found */ memcpy(&MAdapter, &DIDD_Table[x], sizeof(DAdapter)); - dprintf = (DIVA_DI_PRINTF) MAdapter.request; DbgRegister("CAPI20", DRIVERRELEASE_CAPI, DBG_DEFAULT); break; } @@ -1072,7 +1070,7 @@ static int divacapi_connect_didd(void) req.didd_notify.e.Req = 0; req.didd_notify.e.Rc = IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY; - req.didd_notify.info.callback = (void *)didd_callback; + req.didd_notify.info.callback = didd_callback; req.didd_notify.info.context = NULL; DAdapter.request((ENTITY *)&req); if (req.didd_notify.e.Rc != 0xff) { diff --git a/drivers/isdn/hardware/eicon/dadapter.c b/drivers/isdn/hardware/eicon/dadapter.c index 514209994..642b7dea4 100644 --- a/drivers/isdn/hardware/eicon/dadapter.c +++ b/drivers/isdn/hardware/eicon/dadapter.c @@ -63,10 +63,14 @@ static void no_printf(unsigned char *format, ...) ------------------------------------------------------------------------- */ #include "debuglib.c" +static void IDI_CALL_LINK_T no_request(ENTITY IDI_CALL_ENTITY_T *i) +{ +} + static DESCRIPTOR MAdapter = {IDI_DIMAINT, /* Adapter Type */ 0x00, /* Channels */ 0x0000, /* Features */ - (IDI_CALL)no_printf}; + no_request}; /* -------------------------------------------------------------------------- DAdapter. Only IDI clients with buffer, that is huge enough to get all descriptors will receive information about DAdapter @@ -100,6 +104,11 @@ void diva_didd_load_time_init(void) { void diva_didd_load_time_finit(void) { diva_os_destroy_spin_lock(&didd_spin, "didd"); } + +static void diva_didd_no_request(ENTITY *e) +{ +} + /* -------------------------------------------------------------------------- Called in order to register new adapter in adapter array return adapter handle (> 0) on success @@ -111,13 +120,12 @@ static int diva_didd_add_descriptor(DESCRIPTOR *d) { if (d->type == IDI_DIMAINT) { if (d->request) { MAdapter.request = d->request; - dprintf = (DIVA_DI_PRINTF)d->request; diva_notify_adapter_change(&MAdapter, 0); /* Inserted */ DBG_TRC(("DIMAINT registered, dprintf=%08x", d->request)) } else { DBG_TRC(("DIMAINT removed")) diva_notify_adapter_change(&MAdapter, 1); /* About to remove */ - MAdapter.request = (IDI_CALL)no_printf; + MAdapter.request = diva_didd_no_request; dprintf = no_printf; } return (NEW_MAX_DESCRIPTORS); @@ -149,7 +157,7 @@ static int diva_didd_remove_descriptor(IDI_CALL request) { DBG_TRC(("DIMAINT removed")) dprintf = no_printf; diva_notify_adapter_change(&MAdapter, 1); /* About to remove */ - MAdapter.request = (IDI_CALL)no_printf; + MAdapter.request = diva_didd_no_request; return (0); } for (i = 0; (Adapters && (i < NEW_MAX_DESCRIPTORS)); i++) { @@ -222,7 +230,7 @@ static void IDI_CALL_LINK_T diva_dadapter_request( \ case IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY: { diva_didd_adapter_notify_t *pinfo = &syncReq->didd_notify.info; pinfo->handle = diva_register_adapter_callback( \ - (didd_adapter_change_callback_t)pinfo->callback, + pinfo->callback, (void IDI_CALL_ENTITY_T *)pinfo->context); e->Rc = 0xff; } break; diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c index b0b23ed8b..e3d4e18b9 100644 --- a/drivers/isdn/hardware/eicon/diddfunc.c +++ b/drivers/isdn/hardware/eicon/diddfunc.c @@ -28,12 +28,12 @@ static DESCRIPTOR _DAdapter; /* * didd callback function */ -static void *didd_callback(void *context, DESCRIPTOR *adapter, +static void didd_callback(void *context, DESCRIPTOR *adapter, int removal) { if (adapter->type == IDI_DADAPTER) { DBG_ERR(("Notification about IDI_DADAPTER change ! Oops.")) - return (NULL); + return; } else if (adapter->type == IDI_DIMAINT) { if (removal) { DbgDeregister(); @@ -41,7 +41,6 @@ static void *didd_callback(void *context, DESCRIPTOR *adapter, DbgRegister("DIDD", DRIVERRELEASE_DIDD, DBG_DEFAULT); } } - return (NULL); } /* @@ -63,7 +62,7 @@ static int __init connect_didd(void) req.didd_notify.e.Req = 0; req.didd_notify.e.Rc = IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY; - req.didd_notify.info.callback = (void *)didd_callback; + req.didd_notify.info.callback = didd_callback; req.didd_notify.info.context = NULL; _DAdapter.request((ENTITY *)&req); if (req.didd_notify.e.Rc != 0xff) diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c index 4be5f8814..1dbd479a7 100644 --- a/drivers/isdn/hardware/eicon/divasfunc.c +++ b/drivers/isdn/hardware/eicon/divasfunc.c @@ -130,12 +130,12 @@ static void stop_dbg(void) /* * didd callback function */ -static void *didd_callback(void *context, DESCRIPTOR *adapter, +static void didd_callback(void *context, DESCRIPTOR *adapter, int removal) { if (adapter->type == IDI_DADAPTER) { DBG_ERR(("Notification about IDI_DADAPTER change ! Oops.")); - return (NULL); + return; } if (adapter->type == IDI_DIMAINT) { @@ -143,11 +143,9 @@ static void *didd_callback(void *context, DESCRIPTOR *adapter, stop_dbg(); } else { memcpy(&MAdapter, adapter, sizeof(MAdapter)); - dprintf = (DIVA_DI_PRINTF) MAdapter.request; start_dbg(); } } - return (NULL); } /* @@ -169,7 +167,7 @@ static int __init connect_didd(void) req.didd_notify.e.Req = 0; req.didd_notify.e.Rc = IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY; - req.didd_notify.info.callback = (void *)didd_callback; + req.didd_notify.info.callback = didd_callback; req.didd_notify.info.context = NULL; DAdapter.request((ENTITY *)&req); if (req.didd_notify.e.Rc != 0xff) { @@ -179,7 +177,6 @@ static int __init connect_didd(void) notify_handle = req.didd_notify.info.handle; } else if (DIDD_Table[x].type == IDI_DIMAINT) { /* MAINT found */ memcpy(&MAdapter, &DIDD_Table[x], sizeof(DAdapter)); - dprintf = (DIVA_DI_PRINTF) MAdapter.request; start_dbg(); } } diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h index dd6b53a2c..42661f6ab 100644 --- a/drivers/isdn/hardware/eicon/divasync.h +++ b/drivers/isdn/hardware/eicon/divasync.h @@ -138,7 +138,7 @@ typedef struct _diva_xdi_dma_descriptor_operation { #define IDI_SYNC_REQ_DIDD_GET_CFG_LIB_IFC 0x10 typedef struct _diva_didd_adapter_notify { dword handle; /* Notification handle */ - void *callback; + didd_adapter_change_callback_t callback; void *context; } diva_didd_adapter_notify_t; typedef struct _diva_didd_add_adapter { diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c index fef6586fe..22353ff87 100644 --- a/drivers/isdn/hardware/eicon/idifunc.c +++ b/drivers/isdn/hardware/eicon/idifunc.c @@ -154,18 +154,17 @@ static void __exit remove_all_idi_proc(void) /* * DIDD notify callback */ -static void *didd_callback(void *context, DESCRIPTOR *adapter, +static void didd_callback(void *context, DESCRIPTOR *adapter, int removal) { if (adapter->type == IDI_DADAPTER) { DBG_ERR(("Notification about IDI_DADAPTER change ! Oops.")); - return (NULL); + return; } else if (adapter->type == IDI_DIMAINT) { if (removal) { stop_dbg(); } else { memcpy(&MAdapter, adapter, sizeof(MAdapter)); - dprintf = (DIVA_DI_PRINTF) MAdapter.request; DbgRegister("User IDI", DRIVERRELEASE_IDI, DBG_DEFAULT); } } else if ((adapter->type > 0) && (adapter->type < 16)) { /* IDI Adapter */ @@ -175,7 +174,6 @@ static void *didd_callback(void *context, DESCRIPTOR *adapter, um_new_card(adapter); } } - return (NULL); } /* @@ -197,7 +195,7 @@ static int __init connect_didd(void) req.didd_notify.e.Req = 0; req.didd_notify.e.Rc = IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY; - req.didd_notify.info.callback = (void *)didd_callback; + req.didd_notify.info.callback = didd_callback; req.didd_notify.info.context = NULL; DAdapter.request((ENTITY *)&req); if (req.didd_notify.e.Rc != 0xff) { @@ -207,7 +205,6 @@ static int __init connect_didd(void) notify_handle = req.didd_notify.info.handle; } else if (DIDD_Table[x].type == IDI_DIMAINT) { /* MAINT found */ memcpy(&MAdapter, &DIDD_Table[x], sizeof(DAdapter)); - dprintf = (DIVA_DI_PRINTF) MAdapter.request; DbgRegister("User IDI", DRIVERRELEASE_IDI, DBG_DEFAULT); } else if ((DIDD_Table[x].type > 0) && (DIDD_Table[x].type < 16)) { /* IDI Adapter found */ diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c index 1cd9affb6..3775d52d9 100644 --- a/drivers/isdn/hardware/eicon/mntfunc.c +++ b/drivers/isdn/hardware/eicon/mntfunc.c @@ -26,8 +26,13 @@ extern void DIVA_DIDD_Read(void *, int); static dword notify_handle; static DESCRIPTOR DAdapter; static DESCRIPTOR MAdapter; + +static void didd_nothing(ENTITY IDI_CALL_ENTITY_T *e) +{ + diva_maint_prtComp((char *)e); +} static DESCRIPTOR MaintDescriptor = -{ IDI_DIMAINT, 0, 0, (IDI_CALL) diva_maint_prtComp }; +{ IDI_DIMAINT, 0, 0, didd_nothing }; extern int diva_os_copy_to_user(void *os_handle, void __user *dst, const void *src, int length); @@ -44,7 +49,7 @@ static void no_printf(unsigned char *x, ...) /* * DIDD callback function */ -static void *didd_callback(void *context, DESCRIPTOR *adapter, +static void didd_callback(void *context, DESCRIPTOR *adapter, int removal) { if (adapter->type == IDI_DADAPTER) { @@ -56,7 +61,6 @@ static void *didd_callback(void *context, DESCRIPTOR *adapter, dprintf = no_printf; } else { memcpy(&MAdapter, adapter, sizeof(MAdapter)); - dprintf = (DIVA_DI_PRINTF) MAdapter.request; DbgRegister("MAINT", DRIVERRELEASE_MNT, DBG_DEFAULT); } } else if ((adapter->type > 0) && (adapter->type < 16)) { @@ -66,7 +70,6 @@ static void *didd_callback(void *context, DESCRIPTOR *adapter, diva_mnt_add_xdi_adapter(adapter); } } - return (NULL); } /* @@ -88,7 +91,7 @@ static int __init connect_didd(void) req.didd_notify.e.Req = 0; req.didd_notify.e.Rc = IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY; - req.didd_notify.info.callback = (void *)didd_callback; + req.didd_notify.info.callback = didd_callback; req.didd_notify.info.context = NULL; DAdapter.request((ENTITY *)&req); if (req.didd_notify.e.Rc != 0xff) diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c index e3fa1cd64..a57b04f74 100644 --- a/drivers/isdn/hardware/mISDN/avmfritz.c +++ b/drivers/isdn/hardware/mISDN/avmfritz.c @@ -156,7 +156,7 @@ _set_debug(struct fritzcard *card) } static int -set_debug(const char *val, struct kernel_param *kp) +set_debug(const char *val, const struct kernel_param *kp) { int ret; struct fritzcard *card; diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c index 480c2d779..89f221984 100644 --- a/drivers/isdn/hardware/mISDN/hfcmulti.c +++ b/drivers/isdn/hardware/mISDN/hfcmulti.c @@ -2856,8 +2856,9 @@ hfcmulti_interrupt(int intno, void *dev_id) */ static void -hfcmulti_dbusy_timer(struct hfc_multi *hc) +hfcmulti_dbusy_timer(unsigned long _hc) { + //struct hfc_multi *hc = (struct hfc_multi *)_hc; } @@ -3878,7 +3879,7 @@ hfcmulti_initmode(struct dchannel *dch) if (hc->dnum[pt]) { mode_hfcmulti(hc, dch->slot, dch->dev.D.protocol, -1, 0, -1, 0); - dch->timer.function = (void *) hfcmulti_dbusy_timer; + dch->timer.function = hfcmulti_dbusy_timer; dch->timer.data = (long) dch; init_timer(&dch->timer); } @@ -3986,7 +3987,7 @@ hfcmulti_initmode(struct dchannel *dch) hc->chan[i].slot_rx = -1; hc->chan[i].conf = -1; mode_hfcmulti(hc, i, dch->dev.D.protocol, -1, 0, -1, 0); - dch->timer.function = (void *) hfcmulti_dbusy_timer; + dch->timer.function = hfcmulti_dbusy_timer; dch->timer.data = (long) dch; init_timer(&dch->timer); hc->chan[i - 2].slot_tx = -1; diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index ff48da61c..497fb7b73 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -301,8 +301,9 @@ reset_hfcpci(struct hfc_pci *hc) * Timer function called when kernel timer expires */ static void -hfcpci_Timer(struct hfc_pci *hc) +hfcpci_Timer(unsigned long _hc) { + struct hfc_pci *hc = (struct hfc_pci *)_hc; hc->hw.timer.expires = jiffies + 75; /* WD RESET */ /* @@ -1241,8 +1242,9 @@ hfcpci_int(int intno, void *dev_id) * timer callback for D-chan busy resolution. Currently no function */ static void -hfcpci_dbusy_timer(struct hfc_pci *hc) +hfcpci_dbusy_timer(unsigned long _hc) { +// struct hfc_pci *hc = (struct hfc_pci *)_hc; } /* @@ -1717,7 +1719,7 @@ static void inithfcpci(struct hfc_pci *hc) { printk(KERN_DEBUG "inithfcpci: entered\n"); - hc->dch.timer.function = (void *) hfcpci_dbusy_timer; + hc->dch.timer.function = hfcpci_dbusy_timer; hc->dch.timer.data = (long) &hc->dch; init_timer(&hc->dch.timer); hc->chanlimit = 2; @@ -2044,7 +2046,7 @@ setup_hw(struct hfc_pci *hc) Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1); /* At this point the needed PCI config is done */ /* fifos are still not enabled */ - hc->hw.timer.function = (void *) hfcpci_Timer; + hc->hw.timer.function = hfcpci_Timer; hc->hw.timer.data = (long) hc; init_timer(&hc->hw.timer); /* default PCM master */ @@ -2293,9 +2295,9 @@ _hfcpci_softirq(struct device *dev, void *arg) } static void -hfcpci_softirq(void *arg) +hfcpci_softirq(unsigned long arg) { - WARN_ON_ONCE(driver_for_each_device(&hfc_driver.driver, NULL, arg, + WARN_ON_ONCE(driver_for_each_device(&hfc_driver.driver, NULL, (void *)arg, _hfcpci_softirq) != 0); /* if next event would be in the past ... */ @@ -2330,7 +2332,7 @@ HFC_init(void) if (poll != HFCPCI_BTRANS_THRESHOLD) { printk(KERN_INFO "%s: Using alternative poll value of %d\n", __func__, poll); - hfc_tl.function = (void *)hfcpci_softirq; + hfc_tl.function = hfcpci_softirq; hfc_tl.data = 0; init_timer(&hfc_tl); hfc_tl.expires = jiffies + tics; diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c index d5bdbaf93..a7cdc61ea 100644 --- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c +++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c @@ -244,7 +244,7 @@ _set_debug(struct inf_hw *card) } static int -set_debug(const char *val, struct kernel_param *kp) +set_debug(const char *val, const struct kernel_param *kp) { int ret; struct inf_hw *card; @@ -586,9 +586,10 @@ reset_inf(struct inf_hw *hw) } static int -inf_ctrl(struct inf_hw *hw, u32 cmd, u_long arg) +inf_ctrl(struct ipac_hw *_hw, u32 cmd, u_long arg) { int ret = 0; + struct inf_hw *hw = container_of(_hw, struct inf_hw, ipac); switch (cmd) { case HW_RESET_REQ: @@ -915,7 +916,7 @@ setup_instance(struct inf_hw *card) spin_lock_init(&card->lock); card->ipac.isac.hwlock = &card->lock; card->ipac.hwlock = &card->lock; - card->ipac.ctrl = (void *)&inf_ctrl; + card->ipac.ctrl = &inf_ctrl; err = setup_io(card); if (err) diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c index 8d338ba36..f0cb4af2d 100644 --- a/drivers/isdn/hardware/mISDN/mISDNipac.c +++ b/drivers/isdn/hardware/mISDN/mISDNipac.c @@ -727,8 +727,9 @@ isac_release(struct isac_hw *isac) } static void -dbusy_timer_handler(struct isac_hw *isac) +dbusy_timer_handler(unsigned long _isac) { + struct isac_hw *isac = (struct isac_hw *)_isac; int rbch, star; u_long flags; @@ -796,7 +797,7 @@ isac_init(struct isac_hw *isac) } isac->mon_tx = NULL; isac->mon_rx = NULL; - isac->dch.timer.function = (void *) dbusy_timer_handler; + isac->dch.timer.function = dbusy_timer_handler; isac->dch.timer.data = (long)isac; init_timer(&isac->dch.timer); isac->mocr = 0xaa; diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c index afde4edef..e9fcae456 100644 --- a/drivers/isdn/hardware/mISDN/netjet.c +++ b/drivers/isdn/hardware/mISDN/netjet.c @@ -111,7 +111,7 @@ _set_debug(struct tiger_hw *card) } static int -set_debug(const char *val, struct kernel_param *kp) +set_debug(const char *val, const struct kernel_param *kp) { int ret; struct tiger_hw *card; diff --git a/drivers/isdn/hardware/mISDN/speedfax.c b/drivers/isdn/hardware/mISDN/speedfax.c index 5da75e05f..85c95d42d 100644 --- a/drivers/isdn/hardware/mISDN/speedfax.c +++ b/drivers/isdn/hardware/mISDN/speedfax.c @@ -94,7 +94,7 @@ _set_debug(struct sfax_hw *card) } static int -set_debug(const char *val, struct kernel_param *kp) +set_debug(const char *val, const struct kernel_param *kp) { int ret; struct sfax_hw *card; @@ -186,9 +186,10 @@ reset_speedfax(struct sfax_hw *sf) } static int -sfax_ctrl(struct sfax_hw *sf, u32 cmd, u_long arg) +sfax_ctrl(void *_sf, u32 cmd, u_long arg) { int ret = 0; + struct sfax_hw *sf = (struct sfax_hw *)_sf; switch (cmd) { case HW_RESET_REQ: @@ -386,7 +387,7 @@ setup_instance(struct sfax_hw *card) spin_lock_init(&card->lock); card->isac.hwlock = &card->lock; card->isar.hwlock = &card->lock; - card->isar.ctrl = (void *)&sfax_ctrl; + card->isar.ctrl = &sfax_ctrl; card->isac.name = card->name; card->isar.name = card->name; card->isar.owner = THIS_MODULE; diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c index 3b067ea65..5806dd37e 100644 --- a/drivers/isdn/hardware/mISDN/w6692.c +++ b/drivers/isdn/hardware/mISDN/w6692.c @@ -101,7 +101,7 @@ _set_debug(struct w6692_hw *card) } static int -set_debug(const char *val, struct kernel_param *kp) +set_debug(const char *val, const struct kernel_param *kp) { int ret; struct w6692_hw *card; @@ -819,8 +819,9 @@ w6692_irq(int intno, void *dev_id) } static void -dbusy_timer_handler(struct dchannel *dch) +dbusy_timer_handler(unsigned long _dch) { + struct dchannel *dch = (struct dchannel *)_dch; struct w6692_hw *card = dch->hw; int rbch, star; u_long flags; @@ -852,7 +853,7 @@ static void initW6692(struct w6692_hw *card) { u8 val; - card->dch.timer.function = (void *)dbusy_timer_handler; + card->dch.timer.function = dbusy_timer_handler; card->dch.timer.data = (u_long)&card->dch; init_timer(&card->dch.timer); w6692_mode(&card->bc[0], ISDN_P_NONE); diff --git a/drivers/isdn/hisax/amd7930_fn.c b/drivers/isdn/hisax/amd7930_fn.c index 36817e0a0..b02bb988a 100644 --- a/drivers/isdn/hisax/amd7930_fn.c +++ b/drivers/isdn/hisax/amd7930_fn.c @@ -685,8 +685,9 @@ DC_Close_Amd7930(struct IsdnCardState *cs) { static void -dbusy_timer_handler(struct IsdnCardState *cs) +dbusy_timer_handler(unsigned long _cs) { + struct IsdnCardState *cs = (struct IsdnCardState *)_cs; u_long flags; struct PStack *stptr; WORD dtcr, der; @@ -789,7 +790,7 @@ void Amd7930_init(struct IsdnCardState *cs) void setup_Amd7930(struct IsdnCardState *cs) { INIT_WORK(&cs->tqueue, Amd7930_bh); - cs->dbusytimer.function = (void *) dbusy_timer_handler; + cs->dbusytimer.function = dbusy_timer_handler; cs->dbusytimer.data = (long) cs; init_timer(&cs->dbusytimer); } diff --git a/drivers/isdn/hisax/arcofi.c b/drivers/isdn/hisax/arcofi.c index 29ec2dfbd..9c7123c18 100644 --- a/drivers/isdn/hisax/arcofi.c +++ b/drivers/isdn/hisax/arcofi.c @@ -112,7 +112,8 @@ arcofi_fsm(struct IsdnCardState *cs, int event, void *data) { } static void -arcofi_timer(struct IsdnCardState *cs) { +arcofi_timer(unsigned long _cs) { + struct IsdnCardState *cs = (struct IsdnCardState *)_cs; arcofi_fsm(cs, ARCOFI_TIMEOUT, NULL); } @@ -125,7 +126,7 @@ clear_arcofi(struct IsdnCardState *cs) { void init_arcofi(struct IsdnCardState *cs) { - cs->dc.isac.arcofitimer.function = (void *) arcofi_timer; + cs->dc.isac.arcofitimer.function = arcofi_timer; cs->dc.isac.arcofitimer.data = (long) cs; init_timer(&cs->dc.isac.arcofitimer); init_waitqueue_head(&cs->dc.isac.arcofi_wait); diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c index bf04d2a3c..a7d53c9c5 100644 --- a/drivers/isdn/hisax/config.c +++ b/drivers/isdn/hisax/config.c @@ -659,7 +659,7 @@ int jiftime(char *s, long mark) static u_char tmpbuf[HISAX_STATUS_BUFSIZE]; -void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, +void VHiSax_putstatus(struct IsdnCardState *cs, char *head, const char *fmt, va_list args) { /* if head == NULL the fmt contains the full info */ @@ -729,7 +729,7 @@ void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, } } -void HiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, ...) +void HiSax_putstatus(struct IsdnCardState *cs, char *head, const char *fmt, ...) { va_list args; diff --git a/drivers/isdn/hisax/diva.c b/drivers/isdn/hisax/diva.c index 4fc90de68..fda68cdf8 100644 --- a/drivers/isdn/hisax/diva.c +++ b/drivers/isdn/hisax/diva.c @@ -796,8 +796,9 @@ reset_diva(struct IsdnCardState *cs) #define DIVA_ASSIGN 1 static void -diva_led_handler(struct IsdnCardState *cs) +diva_led_handler(unsigned long _cs) { + struct IsdnCardState *cs = (struct IsdnCardState *)_cs; int blink = 0; if ((cs->subtyp == DIVA_IPAC_ISA) || @@ -898,7 +899,7 @@ Diva_card_msg(struct IsdnCardState *cs, int mt, void *arg) (cs->subtyp != DIVA_IPAC_PCI) && (cs->subtyp != DIVA_IPACX_PCI)) { spin_lock_irqsave(&cs->lock, flags); - diva_led_handler(cs); + diva_led_handler((unsigned long)cs); spin_unlock_irqrestore(&cs->lock, flags); } return (0); @@ -976,7 +977,7 @@ static int setup_diva_common(struct IsdnCardState *cs) printk(KERN_INFO "Diva: IPACX Design Id: %x\n", MemReadISAC_IPACX(cs, IPACX_ID) & 0x3F); } else { /* DIVA 2.0 */ - cs->hw.diva.tl.function = (void *) diva_led_handler; + cs->hw.diva.tl.function = diva_led_handler; cs->hw.diva.tl.data = (long) cs; init_timer(&cs->hw.diva.tl); cs->readisac = &ReadISAC; diff --git a/drivers/isdn/hisax/elsa.c b/drivers/isdn/hisax/elsa.c index d8ef64da2..9c502677e 100644 --- a/drivers/isdn/hisax/elsa.c +++ b/drivers/isdn/hisax/elsa.c @@ -606,8 +606,9 @@ check_arcofi(struct IsdnCardState *cs) #endif /* ARCOFI_USE */ static void -elsa_led_handler(struct IsdnCardState *cs) +elsa_led_handler(unsigned long _cs) { + struct IsdnCardState *cs = (struct IsdnCardState *)_cs; int blink = 0; if (cs->subtyp == ELSA_PCMCIA || cs->subtyp == ELSA_PCMCIA_IPAC) @@ -715,7 +716,7 @@ Elsa_card_msg(struct IsdnCardState *cs, int mt, void *arg) init_modem(cs); } #endif - elsa_led_handler(cs); + elsa_led_handler((unsigned long)cs); return (ret); case (MDL_REMOVE | REQUEST): cs->hw.elsa.status &= 0; @@ -767,7 +768,7 @@ Elsa_card_msg(struct IsdnCardState *cs, int mt, void *arg) else cs->hw.elsa.status &= ~ELSA_BAD_PWR; } - elsa_led_handler(cs); + elsa_led_handler((unsigned long)cs); return (ret); } @@ -1147,7 +1148,7 @@ static int setup_elsa_common(struct IsdnCard *card) init_arcofi(cs); #endif setup_isac(cs); - cs->hw.elsa.tl.function = (void *) elsa_led_handler; + cs->hw.elsa.tl.function = elsa_led_handler; cs->hw.elsa.tl.data = (long) cs; init_timer(&cs->hw.elsa.tl); /* Teste Timer */ diff --git a/drivers/isdn/hisax/fsm.c b/drivers/isdn/hisax/fsm.c index c7a94713e..5409bd35c 100644 --- a/drivers/isdn/hisax/fsm.c +++ b/drivers/isdn/hisax/fsm.c @@ -85,8 +85,9 @@ FsmChangeState(struct FsmInst *fi, int newstate) } static void -FsmExpireTimer(struct FsmTimer *ft) +FsmExpireTimer(unsigned long _ft) { + struct FsmTimer *ft = (struct FsmTimer *)_ft; #if FSM_TIMER_DEBUG if (ft->fi->debug) ft->fi->printdebug(ft->fi, "FsmExpireTimer %lx", (long) ft); @@ -98,7 +99,7 @@ void FsmInitTimer(struct FsmInst *fi, struct FsmTimer *ft) { ft->fi = fi; - ft->tl.function = (void *) FsmExpireTimer; + ft->tl.function = FsmExpireTimer; ft->tl.data = (long) ft; #if FSM_TIMER_DEBUG if (ft->fi->debug) diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c index e034ed847..52056d1fb 100644 --- a/drivers/isdn/hisax/hfc4s8s_l1.c +++ b/drivers/isdn/hisax/hfc4s8s_l1.c @@ -299,8 +299,9 @@ Read_hfc16_stable(hfc4s8s_hw *hw, int reg) /* D-channel call from HiSax */ /*****************************/ static void -dch_l2l1(struct hisax_d_if *iface, int pr, void *arg) +dch_l2l1(struct hisax_if *_iface, int pr, void *arg) { + struct hisax_d_if *iface = container_of(_iface, struct hisax_d_if, ifc); struct hfc4s8s_l1 *l1 = iface->ifc.priv; struct sk_buff *skb = (struct sk_buff *) arg; u_long flags; @@ -591,8 +592,9 @@ bch_l2l1(struct hisax_if *ifc, int pr, void *arg) /* layer 1 timer function */ /**************************/ static void -hfc_l1_timer(struct hfc4s8s_l1 *l1) +hfc_l1_timer(unsigned long _l1) { + struct hfc4s8s_l1 *l1 = (struct hfc4s8s_l1 *)_l1; u_long flags; if (!l1->enabled) @@ -1396,16 +1398,16 @@ setup_instance(hfc4s8s_hw *hw) l1p = hw->l1 + i; spin_lock_init(&l1p->lock); l1p->hw = hw; - l1p->l1_timer.function = (void *) hfc_l1_timer; + l1p->l1_timer.function = hfc_l1_timer; l1p->l1_timer.data = (long) (l1p); init_timer(&l1p->l1_timer); l1p->st_num = i; skb_queue_head_init(&l1p->d_tx_queue); l1p->d_if.ifc.priv = hw->l1 + i; - l1p->d_if.ifc.l2l1 = (void *) dch_l2l1; + l1p->d_if.ifc.l2l1 = dch_l2l1; spin_lock_init(&l1p->b_ch[0].lock); - l1p->b_ch[0].b_if.ifc.l2l1 = (void *) bch_l2l1; + l1p->b_ch[0].b_if.ifc.l2l1 = bch_l2l1; l1p->b_ch[0].b_if.ifc.priv = (void *) &l1p->b_ch[0]; l1p->b_ch[0].l1p = hw->l1 + i; l1p->b_ch[0].bchan = 1; @@ -1413,7 +1415,7 @@ setup_instance(hfc4s8s_hw *hw) skb_queue_head_init(&l1p->b_ch[0].tx_queue); spin_lock_init(&l1p->b_ch[1].lock); - l1p->b_ch[1].b_if.ifc.l2l1 = (void *) bch_l2l1; + l1p->b_ch[1].b_if.ifc.l2l1 = bch_l2l1; l1p->b_ch[1].b_if.ifc.priv = (void *) &l1p->b_ch[1]; l1p->b_ch[1].l1p = hw->l1 + i; l1p->b_ch[1].bchan = 2; diff --git a/drivers/isdn/hisax/hfc_2bds0.c b/drivers/isdn/hisax/hfc_2bds0.c index a756e5cb6..e4789baea 100644 --- a/drivers/isdn/hisax/hfc_2bds0.c +++ b/drivers/isdn/hisax/hfc_2bds0.c @@ -1014,7 +1014,7 @@ setstack_hfcd(struct PStack *st, struct IsdnCardState *cs) } static void -hfc_dbusy_timer(struct IsdnCardState *cs) +hfc_dbusy_timer(unsigned long _cs) { } @@ -1073,7 +1073,7 @@ set_cs_func(struct IsdnCardState *cs) cs->writeisacfifo = &dummyf; cs->BC_Read_Reg = &ReadReg; cs->BC_Write_Reg = &WriteReg; - cs->dbusytimer.function = (void *) hfc_dbusy_timer; + cs->dbusytimer.function = hfc_dbusy_timer; cs->dbusytimer.data = (long) cs; init_timer(&cs->dbusytimer); INIT_WORK(&cs->tqueue, hfcd_bh); diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c index 90449e1e9..9a5394c09 100644 --- a/drivers/isdn/hisax/hfc_pci.c +++ b/drivers/isdn/hisax/hfc_pci.c @@ -165,8 +165,9 @@ reset_hfcpci(struct IsdnCardState *cs) /* Timer function called when kernel timer expires */ /***************************************************/ static void -hfcpci_Timer(struct IsdnCardState *cs) +hfcpci_Timer(unsigned long _cs) { + struct IsdnCardState *cs = (struct IsdnCardState *)_cs; cs->hw.hfcpci.timer.expires = jiffies + 75; /* WD RESET */ /* WriteReg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcpci.ctmt | 0x80); @@ -1095,8 +1096,9 @@ hfcpci_interrupt(int intno, void *dev_id) /* timer callback for D-chan busy resolution. Currently no function */ /********************************************************************/ static void -hfcpci_dbusy_timer(struct IsdnCardState *cs) +hfcpci_dbusy_timer(unsigned long _cs) { + //struct IsdnCardState *cs = (struct IsdnCardState *)_cs; } /*************************************/ @@ -1582,7 +1584,7 @@ inithfcpci(struct IsdnCardState *cs) cs->bcs[1].BC_SetStack = setstack_2b; cs->bcs[0].BC_Close = close_hfcpci; cs->bcs[1].BC_Close = close_hfcpci; - cs->dbusytimer.function = (void *) hfcpci_dbusy_timer; + cs->dbusytimer.function = hfcpci_dbusy_timer; cs->dbusytimer.data = (long) cs; init_timer(&cs->dbusytimer); mode_hfcpci(cs->bcs, 0, 0); @@ -1746,7 +1748,7 @@ setup_hfcpci(struct IsdnCard *card) cs->BC_Write_Reg = NULL; cs->irq_func = &hfcpci_interrupt; cs->irq_flags |= IRQF_SHARED; - cs->hw.hfcpci.timer.function = (void *) hfcpci_Timer; + cs->hw.hfcpci.timer.function = hfcpci_Timer; cs->hw.hfcpci.timer.data = (long) cs; init_timer(&cs->hw.hfcpci.timer); cs->cardmsg = &hfcpci_card_msg; diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c index 13b2151c1..d3e0732aa 100644 --- a/drivers/isdn/hisax/hfc_sx.c +++ b/drivers/isdn/hisax/hfc_sx.c @@ -418,8 +418,9 @@ reset_hfcsx(struct IsdnCardState *cs) /* Timer function called when kernel timer expires */ /***************************************************/ static void -hfcsx_Timer(struct IsdnCardState *cs) +hfcsx_Timer(unsigned long _cs) { + struct IsdnCardState *cs = (struct IsdnCardState *)_cs; cs->hw.hfcsx.timer.expires = jiffies + 75; /* WD RESET */ /* WriteReg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcsx.ctmt | 0x80); @@ -860,8 +861,9 @@ hfcsx_interrupt(int intno, void *dev_id) /* timer callback for D-chan busy resolution. Currently no function */ /********************************************************************/ static void -hfcsx_dbusy_timer(struct IsdnCardState *cs) +hfcsx_dbusy_timer(unsigned long _cs) { + //struct IsdnCardState *cs = (struct IsdnCardState *)_cs; } /*************************************/ @@ -1495,7 +1497,7 @@ int setup_hfcsx(struct IsdnCard *card) } else return (0); /* no valid card type */ - cs->dbusytimer.function = (void *) hfcsx_dbusy_timer; + cs->dbusytimer.function = hfcsx_dbusy_timer; cs->dbusytimer.data = (long) cs; init_timer(&cs->dbusytimer); INIT_WORK(&cs->tqueue, hfcsx_bh); @@ -1507,7 +1509,7 @@ int setup_hfcsx(struct IsdnCard *card) cs->BC_Write_Reg = NULL; cs->irq_func = &hfcsx_interrupt; - cs->hw.hfcsx.timer.function = (void *) hfcsx_Timer; + cs->hw.hfcsx.timer.function = hfcsx_Timer; cs->hw.hfcsx.timer.data = (long) cs; cs->hw.hfcsx.b_fifo_size = 0; /* fifo size still unknown */ cs->hw.hfcsx.cirm = ccd_sp_irqtab[cs->irq & 0xF]; /* RAM not evaluated */ diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c index 678bd5224..1c4f12ad7 100644 --- a/drivers/isdn/hisax/hfc_usb.c +++ b/drivers/isdn/hisax/hfc_usb.c @@ -343,8 +343,10 @@ handle_led(hfcusb_data *hfc, int event) /* ISDN l1 timer T3 expires */ static void -l1_timer_expire_t3(hfcusb_data *hfc) +l1_timer_expire_t3(unsigned long _hfc) { + hfcusb_data *hfc = (hfcusb_data *)_hfc; + hfc->d_if.ifc.l1l2(&hfc->d_if.ifc, PH_DEACTIVATE | INDICATION, NULL); @@ -360,8 +362,10 @@ l1_timer_expire_t3(hfcusb_data *hfc) /* ISDN l1 timer T4 expires */ static void -l1_timer_expire_t4(hfcusb_data *hfc) +l1_timer_expire_t4(unsigned long _hfc) { + hfcusb_data *hfc = (hfcusb_data *)_hfc; + hfc->d_if.ifc.l1l2(&hfc->d_if.ifc, PH_DEACTIVATE | INDICATION, NULL); @@ -1167,12 +1171,12 @@ hfc_usb_init(hfcusb_data *hfc) /* init the t3 timer */ init_timer(&hfc->t3_timer); hfc->t3_timer.data = (long) hfc; - hfc->t3_timer.function = (void *) l1_timer_expire_t3; + hfc->t3_timer.function = l1_timer_expire_t3; /* init the t4 timer */ init_timer(&hfc->t4_timer); hfc->t4_timer.data = (long) hfc; - hfc->t4_timer.function = (void *) l1_timer_expire_t4; + hfc->t4_timer.function = l1_timer_expire_t4; /* init the background machinery for control requests */ hfc->ctrl_read.bRequestType = 0xc0; diff --git a/drivers/isdn/hisax/hfcscard.c b/drivers/isdn/hisax/hfcscard.c index 394da646e..85f5f6313 100644 --- a/drivers/isdn/hisax/hfcscard.c +++ b/drivers/isdn/hisax/hfcscard.c @@ -41,8 +41,10 @@ hfcs_interrupt(int intno, void *dev_id) } static void -hfcs_Timer(struct IsdnCardState *cs) +hfcs_Timer(unsigned long _cs) { + struct IsdnCardState *cs = (struct IsdnCardState *)_cs; + cs->hw.hfcD.timer.expires = jiffies + 75; /* WD RESET */ /* WriteReg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcD.ctmt | 0x80); @@ -253,7 +255,7 @@ int setup_hfcs(struct IsdnCard *card) outb(0x57, cs->hw.hfcD.addr | 1); } set_cs_func(cs); - cs->hw.hfcD.timer.function = (void *) hfcs_Timer; + cs->hw.hfcD.timer.function = hfcs_Timer; cs->hw.hfcD.timer.data = (long) cs; init_timer(&cs->hw.hfcD.timer); cs->cardmsg = &hfcs_card_msg; diff --git a/drivers/isdn/hisax/hisax.h b/drivers/isdn/hisax/hisax.h index 6ead6314e..338d0408b 100644 --- a/drivers/isdn/hisax/hisax.h +++ b/drivers/isdn/hisax/hisax.h @@ -1288,9 +1288,9 @@ int jiftime(char *s, long mark); int HiSax_command(isdn_ctrl *ic); int HiSax_writebuf_skb(int id, int chan, int ack, struct sk_buff *skb); __printf(3, 4) -void HiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, ...); +void HiSax_putstatus(struct IsdnCardState *cs, char *head, const char *fmt, ...); __printf(3, 0) -void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, va_list args); +void VHiSax_putstatus(struct IsdnCardState *cs, char *head, const char *fmt, va_list args); void HiSax_reportcard(int cardnr, int sel); int QuickHex(char *txt, u_char *p, int cnt); void LogFrame(struct IsdnCardState *cs, u_char *p, int size); diff --git a/drivers/isdn/hisax/icc.c b/drivers/isdn/hisax/icc.c index 96d1df050..77a05ee6c 100644 --- a/drivers/isdn/hisax/icc.c +++ b/drivers/isdn/hisax/icc.c @@ -580,8 +580,9 @@ DC_Close_icc(struct IsdnCardState *cs) { } static void -dbusy_timer_handler(struct IsdnCardState *cs) +dbusy_timer_handler(unsigned long _cs) { + struct IsdnCardState *cs = (struct IsdnCardState *)_cs; struct PStack *stptr; int rbch, star; @@ -676,7 +677,7 @@ clear_pending_icc_ints(struct IsdnCardState *cs) void setup_icc(struct IsdnCardState *cs) { INIT_WORK(&cs->tqueue, icc_bh); - cs->dbusytimer.function = (void *) dbusy_timer_handler; + cs->dbusytimer.function = dbusy_timer_handler; cs->dbusytimer.data = (long) cs; init_timer(&cs->dbusytimer); } diff --git a/drivers/isdn/hisax/ipacx.c b/drivers/isdn/hisax/ipacx.c index 9cc26b40a..d7fa04459 100644 --- a/drivers/isdn/hisax/ipacx.c +++ b/drivers/isdn/hisax/ipacx.c @@ -35,7 +35,7 @@ static void ph_command(struct IsdnCardState *cs, unsigned int command); static inline void cic_int(struct IsdnCardState *cs); static void dch_l2l1(struct PStack *st, int pr, void *arg); -static void dbusy_timer_handler(struct IsdnCardState *cs); +static void dbusy_timer_handler(unsigned long _cs); static void dch_empty_fifo(struct IsdnCardState *cs, int count); static void dch_fill_fifo(struct IsdnCardState *cs); static inline void dch_int(struct IsdnCardState *cs); @@ -198,8 +198,9 @@ dch_l2l1(struct PStack *st, int pr, void *arg) //---------------------------------------------------------- //---------------------------------------------------------- static void -dbusy_timer_handler(struct IsdnCardState *cs) +dbusy_timer_handler(unsigned long _cs) { + struct IsdnCardState *cs = (struct IsdnCardState *)_cs; struct PStack *st; int rbchd, stard; @@ -424,7 +425,7 @@ dch_init(struct IsdnCardState *cs) cs->setstack_d = dch_setstack; - cs->dbusytimer.function = (void *) dbusy_timer_handler; + cs->dbusytimer.function = dbusy_timer_handler; cs->dbusytimer.data = (long) cs; init_timer(&cs->dbusytimer); diff --git a/drivers/isdn/hisax/isac.c b/drivers/isdn/hisax/isac.c index df7e05ca8..0f7dca1b8 100644 --- a/drivers/isdn/hisax/isac.c +++ b/drivers/isdn/hisax/isac.c @@ -584,8 +584,9 @@ DC_Close_isac(struct IsdnCardState *cs) } static void -dbusy_timer_handler(struct IsdnCardState *cs) +dbusy_timer_handler(unsigned long _cs) { + struct IsdnCardState *cs = (struct IsdnCardState *)_cs; struct PStack *stptr; int rbch, star; @@ -677,7 +678,7 @@ void clear_pending_isac_ints(struct IsdnCardState *cs) void setup_isac(struct IsdnCardState *cs) { INIT_WORK(&cs->tqueue, isac_bh); - cs->dbusytimer.function = (void *) dbusy_timer_handler; + cs->dbusytimer.function = dbusy_timer_handler; cs->dbusytimer.data = (long) cs; init_timer(&cs->dbusytimer); } diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c index f4956c73a..122d24947 100644 --- a/drivers/isdn/hisax/isar.c +++ b/drivers/isdn/hisax/isar.c @@ -1267,7 +1267,8 @@ isar_int_main(struct IsdnCardState *cs) } static void -ftimer_handler(struct BCState *bcs) { +ftimer_handler(unsigned long _bcs) { + struct BCState *bcs = (struct BCState *)_bcs; if (bcs->cs->debug) debugl1(bcs->cs, "ftimer flags %04lx", bcs->Flag); @@ -1902,7 +1903,7 @@ void initisar(struct IsdnCardState *cs) cs->bcs[1].BC_SetStack = setstack_isar; cs->bcs[0].BC_Close = close_isarstate; cs->bcs[1].BC_Close = close_isarstate; - cs->bcs[0].hw.isar.ftimer.function = (void *) ftimer_handler; + cs->bcs[0].hw.isar.ftimer.function = ftimer_handler; cs->bcs[0].hw.isar.ftimer.data = (long) &cs->bcs[0]; init_timer(&cs->bcs[0].hw.isar.ftimer); cs->bcs[1].hw.isar.ftimer.function = (void *) ftimer_handler; diff --git a/drivers/isdn/hisax/isdnl3.c b/drivers/isdn/hisax/isdnl3.c index c754706f8..8b1ffd5d6 100644 --- a/drivers/isdn/hisax/isdnl3.c +++ b/drivers/isdn/hisax/isdnl3.c @@ -160,8 +160,9 @@ newl3state(struct l3_process *pc, int state) } static void -L3ExpireTimer(struct L3Timer *t) +L3ExpireTimer(unsigned long _t) { + struct L3Timer *t = (struct L3Timer *)_t; t->pc->st->lli.l4l3(t->pc->st, t->event, t->pc); } @@ -169,7 +170,7 @@ void L3InitTimer(struct l3_process *pc, struct L3Timer *t) { t->pc = pc; - t->tl.function = (void *) L3ExpireTimer; + t->tl.function = L3ExpireTimer; t->tl.data = (long) t; init_timer(&t->tl); } diff --git a/drivers/isdn/hisax/saphir.c b/drivers/isdn/hisax/saphir.c index 6b2d0eccd..4bf5a9e50 100644 --- a/drivers/isdn/hisax/saphir.c +++ b/drivers/isdn/hisax/saphir.c @@ -159,8 +159,9 @@ saphir_interrupt(int intno, void *dev_id) } static void -SaphirWatchDog(struct IsdnCardState *cs) +SaphirWatchDog(unsigned long _cs) { + struct IsdnCardState *cs = (struct IsdnCardState *)_cs; u_long flags; spin_lock_irqsave(&cs->lock, flags); @@ -268,7 +269,7 @@ int setup_saphir(struct IsdnCard *card) cs->irq, cs->hw.saphir.cfg_reg); setup_isac(cs); - cs->hw.saphir.timer.function = (void *) SaphirWatchDog; + cs->hw.saphir.timer.function = SaphirWatchDog; cs->hw.saphir.timer.data = (long) cs; init_timer(&cs->hw.saphir.timer); cs->hw.saphir.timer.expires = jiffies + 4 * HZ; diff --git a/drivers/isdn/hisax/teleint.c b/drivers/isdn/hisax/teleint.c index bf647545c..e2a370924 100644 --- a/drivers/isdn/hisax/teleint.c +++ b/drivers/isdn/hisax/teleint.c @@ -179,8 +179,9 @@ TeleInt_interrupt(int intno, void *dev_id) } static void -TeleInt_Timer(struct IsdnCardState *cs) +TeleInt_Timer(unsigned long _cs) { + struct IsdnCardState *cs = (struct IsdnCardState *)_cs; int stat = 0; u_long flags; @@ -278,7 +279,7 @@ int setup_TeleInt(struct IsdnCard *card) cs->bcs[0].hw.hfc.send = NULL; cs->bcs[1].hw.hfc.send = NULL; cs->hw.hfc.fifosize = 7 * 1024 + 512; - cs->hw.hfc.timer.function = (void *) TeleInt_Timer; + cs->hw.hfc.timer.function = TeleInt_Timer; cs->hw.hfc.timer.data = (long) cs; init_timer(&cs->hw.hfc.timer); if (!request_region(cs->hw.hfc.addr, 2, "TeleInt isdn")) { diff --git a/drivers/isdn/hisax/w6692.c b/drivers/isdn/hisax/w6692.c index a85895585..908285bb0 100644 --- a/drivers/isdn/hisax/w6692.c +++ b/drivers/isdn/hisax/w6692.c @@ -681,8 +681,9 @@ DC_Close_W6692(struct IsdnCardState *cs) } static void -dbusy_timer_handler(struct IsdnCardState *cs) +dbusy_timer_handler(unsigned long _cs) { + struct IsdnCardState *cs = (struct IsdnCardState *)_cs; struct PStack *stptr; int rbch, star; u_long flags; @@ -901,7 +902,7 @@ static void initW6692(struct IsdnCardState *cs, int part) if (part & 1) { cs->setstack_d = setstack_W6692; cs->DC_Close = DC_Close_W6692; - cs->dbusytimer.function = (void *) dbusy_timer_handler; + cs->dbusytimer.function = dbusy_timer_handler; cs->dbusytimer.data = (long) cs; init_timer(&cs->dbusytimer); resetW6692(cs); diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c index 9b856e189..fa03c92e8 100644 --- a/drivers/isdn/i4l/isdn_common.c +++ b/drivers/isdn/i4l/isdn_common.c @@ -1654,6 +1654,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg) } else return -EINVAL; case IIOCDBGVAR: + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; if (arg) { if (copy_to_user(argp, &dev, sizeof(ulong))) return -EFAULT; diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c index 91d57304d..336523ec0 100644 --- a/drivers/isdn/i4l/isdn_concap.c +++ b/drivers/isdn/i4l/isdn_concap.c @@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap) } struct concap_device_ops isdn_concap_reliable_dl_dops = { - &isdn_concap_dl_data_req, - &isdn_concap_dl_connect_req, - &isdn_concap_dl_disconn_req + .data_req = &isdn_concap_dl_data_req, + .connect_req = &isdn_concap_dl_connect_req, + .disconn_req = &isdn_concap_dl_disconn_req }; /* The following should better go into a dedicated source file such that diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c index 63eaa0a9f..00a663cca 100644 --- a/drivers/isdn/i4l/isdn_tty.c +++ b/drivers/isdn/i4l/isdn_tty.c @@ -1499,9 +1499,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp) #ifdef ISDN_DEBUG_MODEM_OPEN printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name, - port->count); + atomic_read(&port->count)); #endif - port->count++; + atomic_inc(&port->count); port->tty = tty; /* * Start up serial port @@ -1545,7 +1545,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp) #endif return; } - if ((tty->count == 1) && (port->count != 1)) { + if ((tty->count == 1) && (atomic_read(&port->count) != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always @@ -1554,15 +1554,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp) * serial port won't be shutdown. */ printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, " - "info->count is %d\n", port->count); - port->count = 1; + "info->count is %d\n", atomic_read(&port->count)); + atomic_set(&port->count, 1); } - if (--port->count < 0) { + if (atomic_dec_return(&port->count) < 0) { printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n", - info->line, port->count); - port->count = 0; + info->line, atomic_read(&port->count)); + atomic_set(&port->count, 0); } - if (port->count) { + if (atomic_read(&port->count)) { #ifdef ISDN_DEBUG_MODEM_OPEN printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n"); #endif @@ -1617,7 +1617,7 @@ isdn_tty_hangup(struct tty_struct *tty) if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup")) return; isdn_tty_shutdown(info); - port->count = 0; + atomic_set(&port->count, 0); tty_port_set_active(port, 0); port->tty = NULL; wake_up_interruptible(&port->open_wait); @@ -1962,7 +1962,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup) for (i = 0; i < ISDN_MAX_CHANNELS; i++) { modem_info *info = &dev->mdm.info[i]; - if (info->port.count == 0) + if (atomic_read(&info->port.count) == 0) continue; if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */ (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */ diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c index 0c5d8de41..ba60076e0 100644 --- a/drivers/isdn/i4l/isdn_x25iface.c +++ b/drivers/isdn/i4l/isdn_x25iface.c @@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *); static struct concap_proto_ops ix25_pops = { - &isdn_x25iface_proto_new, - &isdn_x25iface_proto_del, - &isdn_x25iface_proto_restart, - &isdn_x25iface_proto_close, - &isdn_x25iface_xmit, - &isdn_x25iface_receive, - &isdn_x25iface_connect_ind, - &isdn_x25iface_disconn_ind + .proto_new = &isdn_x25iface_proto_new, + .proto_del = &isdn_x25iface_proto_del, + .restart = &isdn_x25iface_proto_restart, + .close = &isdn_x25iface_proto_close, + .encap_and_xmit = &isdn_x25iface_xmit, + .data_ind = &isdn_x25iface_receive, + .connect_ind = &isdn_x25iface_connect_ind, + .disconn_ind = &isdn_x25iface_disconn_ind }; /* error message helper function */ diff --git a/drivers/isdn/mISDN/dsp.h b/drivers/isdn/mISDN/dsp.h index fc1733a08..27bf26161 100644 --- a/drivers/isdn/mISDN/dsp.h +++ b/drivers/isdn/mISDN/dsp.h @@ -247,7 +247,7 @@ extern void dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp); extern int dsp_cmx_conf(struct dsp *dsp, u32 conf_id); extern void dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb); extern void dsp_cmx_hdlc(struct dsp *dsp, struct sk_buff *skb); -extern void dsp_cmx_send(void *arg); +extern void dsp_cmx_send(unsigned long arg); extern void dsp_cmx_transmit(struct dsp *dsp, struct sk_buff *skb); extern int dsp_cmx_del_conf_member(struct dsp *dsp); extern int dsp_cmx_del_conf(struct dsp_conf *conf); @@ -259,7 +259,7 @@ extern u8 *dsp_dtmf_goertzel_decode(struct dsp *dsp, u8 *data, int len, extern int dsp_tone(struct dsp *dsp, int tone); extern void dsp_tone_copy(struct dsp *dsp, u8 *data, int len); -extern void dsp_tone_timeout(void *arg); +extern void dsp_tone_timeout(unsigned long arg); extern void dsp_bf_encrypt(struct dsp *dsp, u8 *data, int len); extern void dsp_bf_decrypt(struct dsp *dsp, u8 *data, int len); diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c index 8e3aa0027..723faf82b 100644 --- a/drivers/isdn/mISDN/dsp_cmx.c +++ b/drivers/isdn/mISDN/dsp_cmx.c @@ -1625,8 +1625,8 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */ static u16 dsp_count; /* last sample count */ static int dsp_count_valid; /* if we have last sample count */ -void -dsp_cmx_send(void *arg) +void __intentional_overflow(-1) +dsp_cmx_send(unsigned long arg) { struct dsp_conf *conf; struct dsp_conf_member *member; diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c index 0222b1a35..67fb76a93 100644 --- a/drivers/isdn/mISDN/dsp_core.c +++ b/drivers/isdn/mISDN/dsp_core.c @@ -1092,7 +1092,7 @@ dspcreate(struct channel_req *crq) ndsp->pcm_bank_tx = -1; ndsp->hfc_conf = -1; /* current conference number */ /* set tone timer */ - ndsp->tone.tl.function = (void *)dsp_tone_timeout; + ndsp->tone.tl.function = dsp_tone_timeout; ndsp->tone.tl.data = (long) ndsp; init_timer(&ndsp->tone.tl); @@ -1204,7 +1204,7 @@ static int __init dsp_init(void) } /* set sample timer */ - dsp_spl_tl.function = (void *)dsp_cmx_send; + dsp_spl_tl.function = dsp_cmx_send; dsp_spl_tl.data = 0; init_timer(&dsp_spl_tl); dsp_spl_tl.expires = jiffies + dsp_tics; diff --git a/drivers/isdn/mISDN/dsp_tones.c b/drivers/isdn/mISDN/dsp_tones.c index 057e0d6a3..ed229b58f 100644 --- a/drivers/isdn/mISDN/dsp_tones.c +++ b/drivers/isdn/mISDN/dsp_tones.c @@ -457,9 +457,9 @@ dsp_tone_hw_message(struct dsp *dsp, u8 *sample, int len) * timer expires * *****************/ void -dsp_tone_timeout(void *arg) +dsp_tone_timeout(unsigned long arg) { - struct dsp *dsp = arg; + struct dsp *dsp = (struct dsp *)arg; struct dsp_tone *tone = &dsp->tone; struct pattern *pat = (struct pattern *)tone->pattern; int index = tone->index; diff --git a/drivers/isdn/mISDN/fsm.c b/drivers/isdn/mISDN/fsm.c index 26477d48b..4fa38762c 100644 --- a/drivers/isdn/mISDN/fsm.c +++ b/drivers/isdn/mISDN/fsm.c @@ -97,8 +97,9 @@ mISDN_FsmChangeState(struct FsmInst *fi, int newstate) EXPORT_SYMBOL(mISDN_FsmChangeState); static void -FsmExpireTimer(struct FsmTimer *ft) +FsmExpireTimer(unsigned long _ft) { + struct FsmTimer *ft = (struct FsmTimer *)_ft; #if FSM_TIMER_DEBUG if (ft->fi->debug) ft->fi->printdebug(ft->fi, "FsmExpireTimer %lx", (long) ft); @@ -110,7 +111,7 @@ void mISDN_FsmInitTimer(struct FsmInst *fi, struct FsmTimer *ft) { ft->fi = fi; - ft->tl.function = (void *) FsmExpireTimer; + ft->tl.function = FsmExpireTimer; ft->tl.data = (long) ft; #if FSM_TIMER_DEBUG if (ft->fi->debug) diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c index 67c21876c..fc71e33ba 100644 --- a/drivers/isdn/mISDN/l1oip_core.c +++ b/drivers/isdn/mISDN/l1oip_core.c @@ -840,7 +840,7 @@ l1oip_send_bh(struct work_struct *work) * timer stuff */ static void -l1oip_keepalive(void *data) +l1oip_keepalive(unsigned long data) { struct l1oip *hc = (struct l1oip *)data; @@ -848,7 +848,7 @@ l1oip_keepalive(void *data) } static void -l1oip_timeout(void *data) +l1oip_timeout(unsigned long data) { struct l1oip *hc = (struct l1oip *)data; struct dchannel *dch = hc->chan[hc->d_idx].dch; @@ -1435,13 +1435,13 @@ init_card(struct l1oip *hc, int pri, int bundle) if (ret) return ret; - hc->keep_tl.function = (void *)l1oip_keepalive; + hc->keep_tl.function = l1oip_keepalive; hc->keep_tl.data = (ulong)hc; init_timer(&hc->keep_tl); hc->keep_tl.expires = jiffies + 2 * HZ; /* two seconds first time */ add_timer(&hc->keep_tl); - hc->timeout_tl.function = (void *)l1oip_timeout; + hc->timeout_tl.function = l1oip_timeout; hc->timeout_tl.data = (ulong)hc; init_timer(&hc->timeout_tl); hc->timeout_on = 0; /* state that we have timer off */ diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c index 0f9ed1ea0..492789f56 100644 --- a/drivers/leds/leds-clevo-mail.c +++ b/drivers/leds/leds-clevo-mail.c @@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id) * detected as working, but in reality it is not) as low as * possible. */ -static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = { +static const struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = { { .callback = clevo_mail_led_dmi_callback, .ident = "Clevo D410J", diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c index 732eb86bc..a9db8674c 100644 --- a/drivers/leds/leds-ss4200.c +++ b/drivers/leds/leds-ss4200.c @@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection"); * detected as working, but in reality it is not) as low as * possible. */ -static struct dmi_system_id nas_led_whitelist[] __initdata = { +static const struct dmi_system_id nas_led_whitelist[] __initconst = { { .callback = ss4200_led_dmi_callback, .ident = "Intel SS4200-E", diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index 9e385b38d..70778823b 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c @@ -87,7 +87,7 @@ static __init int map_switcher(void) * Copy in the compiled-in Switcher code (from x86/switcher_32.S). * It goes in the first page, which we map in momentarily. */ - memcpy(kmap(lg_switcher_pages[0]), start_switcher_text, + memcpy(kmap(lg_switcher_pages[0]), (void *)ktla_ktva((unsigned long)start_switcher_text), end_switcher_text - start_switcher_text); kunmap(lg_switcher_pages[0]); @@ -106,9 +106,16 @@ static __init int map_switcher(void) * We want the switcher text to be read-only and executable, and * the stacks to be read-write and non-executable. */ + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) + switcher_text_vma = __get_vm_area(PAGE_SIZE, VM_ALLOC|VM_NO_GUARD|VM_KERNEXEC, + switcher_addr, + switcher_addr + PAGE_SIZE); +#else switcher_text_vma = __get_vm_area(PAGE_SIZE, VM_ALLOC|VM_NO_GUARD, switcher_addr, switcher_addr + PAGE_SIZE); +#endif if (!switcher_text_vma) { err = -ENOMEM; diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index e3abebc91..6a353284a 100644 --- a/drivers/lguest/page_tables.c +++ b/drivers/lguest/page_tables.c @@ -585,7 +585,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr) /*:*/ #ifdef CONFIG_X86_PAE -static void release_pmd(pmd_t *spmd) +static void __intentional_overflow(-1) release_pmd(pmd_t *spmd) { /* If the entry's not present, there's nothing to release. */ if (pmd_flags(*spmd) & _PAGE_PRESENT) { diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c index 6e9042e3d..befd030e3 100644 --- a/drivers/lguest/x86/core.c +++ b/drivers/lguest/x86/core.c @@ -60,7 +60,7 @@ static struct { /* Offset from where switcher.S was compiled to where we've copied it */ static unsigned long switcher_offset(void) { - return switcher_addr - (unsigned long)start_switcher_text; + return switcher_addr - ktla_ktva((unsigned long)start_switcher_text); } /* This cpu's struct lguest_pages (after the Switcher text page) */ @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages) * These copies are pretty cheap, so we do them unconditionally: */ /* Save the current Host top-level page directory. */ + +#ifdef CONFIG_PAX_PER_CPU_PGD + pages->state.host_cr3 = read_cr3(); +#else pages->state.host_cr3 = __pa(current->mm->pgd); +#endif + /* * Set up the Guest's page tables to see this CPU's pages (and no * other CPU's pages). @@ -498,7 +504,7 @@ void __init lguest_arch_host_init(void) * compiled-in switcher code and the high-mapped copy we just made. */ for (i = 0; i < IDT_ENTRIES; i++) - default_idt_entries[i] += switcher_offset(); + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset(); /* * Set up the Switcher's per-cpu areas. @@ -581,7 +587,7 @@ void __init lguest_arch_host_init(void) * it will be undisturbed when we switch. To change %cs and jump we * need this structure to feed to Intel's "lcall" instruction. */ - lguest_entry.offset = (long)switch_to_guest + switcher_offset(); + lguest_entry.offset = ktla_ktva((unsigned long)switch_to_guest) + switcher_offset(); lguest_entry.segment = LGUEST_CS; /* diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S index 40634b0db..4f5855e7c 100644 --- a/drivers/lguest/x86/switcher_32.S +++ b/drivers/lguest/x86/switcher_32.S @@ -87,6 +87,7 @@ #include #include #include +#include // We mark the start of the code to copy // It's placed in .text tho it's never run here @@ -149,6 +150,13 @@ ENTRY(switch_to_guest) // Changes type when we load it: damn Intel! // For after we switch over our page tables // That entry will be read-only: we'd crash. + +#ifdef CONFIG_PAX_KERNEXEC + mov %cr0, %edx + xor $X86_CR0_WP, %edx + mov %edx, %cr0 +#endif + movl $(GDT_ENTRY_TSS*8), %edx ltr %dx @@ -157,9 +165,15 @@ ENTRY(switch_to_guest) // Let's clear it again for our return. // The GDT descriptor of the Host // Points to the table after two "size" bytes - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax // Clear "used" from type field (byte 5, bit 2) - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx) + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax) + +#ifdef CONFIG_PAX_KERNEXEC + mov %cr0, %eax + xor $X86_CR0_WP, %eax + mov %eax, %cr0 +#endif // Once our page table's switched, the Guest is live! // The Host fades as we run this final step. @@ -295,13 +309,12 @@ deliver_to_host: // I consulted gcc, and it gave // These instructions, which I gladly credit: leal (%edx,%ebx,8), %eax - movzwl (%eax),%edx - movl 4(%eax), %eax - xorw %ax, %ax - orl %eax, %edx + movl 4(%eax), %edx + movw (%eax), %dx // Now the address of the handler's in %edx // We call it now: its "iret" drops us home. - jmp *%edx + ljmp $__KERNEL_CS, $1f +1: jmp *%edx // Every interrupt can come to us here // But we must truly tell each apart. diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index 37fcaadbf..e2be8ad3c 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c @@ -231,7 +231,7 @@ static void rrpc_put_blks(struct rrpc *rrpc) static struct rrpc_lun *get_next_lun(struct rrpc *rrpc) { - int next = atomic_inc_return(&rrpc->next_lun); + int next = atomic_inc_return_unchecked(&rrpc->next_lun); return &rrpc->luns[next % rrpc->nr_luns]; } @@ -1389,7 +1389,7 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk, rrpc->nr_sects = (unsigned long long)dev->sec_per_lun * rrpc->nr_luns; /* simple round-robin strategy */ - atomic_set(&rrpc->next_lun, -1); + atomic_set_unchecked(&rrpc->next_lun, -1); ret = rrpc_area_init(rrpc, &soffset); if (ret < 0) { diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h index 5e87d52cb..26660404e 100644 --- a/drivers/lightnvm/rrpc.h +++ b/drivers/lightnvm/rrpc.h @@ -104,7 +104,7 @@ struct rrpc { /* Write strategy variables. Move these into each for structure for each * strategy */ - atomic_t next_lun; /* Whenever a page is written, this is updated + atomic_unchecked_t next_lun; /* Whenever a page is written, this is updated * to point to the next write lun */ diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig index 4d200883c..de60cb294 100644 --- a/drivers/md/bcache/Kconfig +++ b/drivers/md/bcache/Kconfig @@ -20,6 +20,7 @@ config BCACHE_CLOSURES_DEBUG bool "Debug closures" depends on BCACHE select DEBUG_FS + depends on !GRKERNSEC_KMEM ---help--- Keeps all active closures in a linked list and provides a debugfs interface to list them, which makes it possible to see asynchronous diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index ca4abe1cc..0b029ef6f 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -631,7 +631,7 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors, for (i = 0; i < KEY_PTRS(&b->key); i++) { SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors); - atomic_long_add(sectors, + atomic_long_add_unchecked(sectors, &PTR_CACHE(c, &b->key, i)->sectors_written); } diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 6b420a55c..d5acb8fc5 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -433,12 +433,12 @@ struct cache { /* The rest of this all shows up in sysfs */ #define IO_ERROR_SHIFT 20 - atomic_t io_errors; - atomic_t io_count; + atomic_unchecked_t io_errors; + atomic_unchecked_t io_count; - atomic_long_t meta_sectors_written; - atomic_long_t btree_sectors_written; - atomic_long_t sectors_written; + atomic_long_unchecked_t meta_sectors_written; + atomic_long_unchecked_t btree_sectors_written; + atomic_long_unchecked_t sectors_written; }; struct gc_stat { diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 81d3db40c..46e8b6849 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -336,15 +336,17 @@ static void btree_complete_write(struct btree *b, struct btree_write *w) w->journal = NULL; } -static void btree_node_write_unlock(struct closure *cl) +static void btree_node_write_unlock(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct btree *b = container_of(cl, struct btree, io); up(&b->io_mutex); } -static void __btree_node_write_done(struct closure *cl) +static void __btree_node_write_done(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct btree *b = container_of(cl, struct btree, io); struct btree_write *w = btree_prev_write(b); @@ -358,12 +360,13 @@ static void __btree_node_write_done(struct closure *cl) closure_return_with_destructor(cl, btree_node_write_unlock); } -static void btree_node_write_done(struct closure *cl) +static void btree_node_write_done(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct btree *b = container_of(cl, struct btree, io); bio_free_pages(b->bio); - __btree_node_write_done(cl); + __btree_node_write_done(&cl->work); } static void btree_node_write_endio(struct bio *bio) @@ -463,7 +466,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent) do_btree_node_write(b); - atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size, + atomic_long_add_unchecked(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size, &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written); b->written += set_blocks(i, block_bytes(b->c)); diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c index 864e673ae..9c022d197 100644 --- a/drivers/md/bcache/closure.c +++ b/drivers/md/bcache/closure.c @@ -29,12 +29,12 @@ static inline void closure_put_after_sub(struct closure *cl, int flags) closure_queue(cl); } else { struct closure *parent = cl->parent; - closure_fn *destructor = cl->fn; + work_func_t destructor = cl->fn; closure_debug_destroy(cl); if (destructor) - destructor(cl); + destructor(&cl->work); if (parent) closure_put(parent); diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h index 9b2fe2d3e..be17fd240 100644 --- a/drivers/md/bcache/closure.h +++ b/drivers/md/bcache/closure.h @@ -152,7 +152,7 @@ struct closure { struct workqueue_struct *wq; struct task_struct *task; struct llist_node list; - closure_fn *fn; + work_func_t fn; }; struct work_struct work; }; @@ -236,10 +236,10 @@ static inline void closure_set_stopped(struct closure *cl) atomic_sub(CLOSURE_RUNNING, &cl->remaining); } -static inline void set_closure_fn(struct closure *cl, closure_fn *fn, +static inline void set_closure_fn(struct closure *cl, work_func_t fn, struct workqueue_struct *wq) { - BUG_ON(object_is_on_stack(cl)); + BUG_ON(object_starts_on_stack(cl)); closure_set_ip(cl); cl->fn = fn; cl->wq = wq; @@ -254,7 +254,7 @@ static inline void closure_queue(struct closure *cl) INIT_WORK(&cl->work, cl->work.func); BUG_ON(!queue_work(wq, &cl->work)); } else - cl->fn(cl); + cl->fn(&cl->work); } /** @@ -373,7 +373,7 @@ do { \ * asynchronously out of a new closure - @parent will then wait for @cl to * finish. */ -static inline void closure_call(struct closure *cl, closure_fn fn, +static inline void closure_call(struct closure *cl, work_func_t fn, struct workqueue_struct *wq, struct closure *parent) { diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index e97b0acf7..5aff0fa37 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -60,7 +60,7 @@ void bch_count_io_errors(struct cache *ca, int error, const char *m) */ if (ca->set->error_decay) { - unsigned count = atomic_inc_return(&ca->io_count); + unsigned count = atomic_inc_return_unchecked(&ca->io_count); while (count > ca->set->error_decay) { unsigned errors; @@ -72,16 +72,16 @@ void bch_count_io_errors(struct cache *ca, int error, const char *m) * succesfully do so, we rescale the errors once: */ - count = atomic_cmpxchg(&ca->io_count, old, new); + count = atomic_cmpxchg_unchecked(&ca->io_count, old, new); if (count == old) { count = new; - errors = atomic_read(&ca->io_errors); + errors = atomic_read_unchecked(&ca->io_errors); do { old = errors; new = ((uint64_t) errors * 127) / 128; - errors = atomic_cmpxchg(&ca->io_errors, + errors = atomic_cmpxchg_unchecked(&ca->io_errors, old, new); } while (old != errors); } @@ -90,7 +90,7 @@ void bch_count_io_errors(struct cache *ca, int error, const char *m) if (error) { char buf[BDEVNAME_SIZE]; - unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT, + unsigned errors = atomic_add_return_unchecked(1 << IO_ERROR_SHIFT, &ca->io_errors); errors >>= IO_ERROR_SHIFT; diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 6925023e1..bff91f00c 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -555,10 +555,11 @@ static void journal_write_endio(struct bio *bio) closure_put(&w->c->journal.io); } -static void journal_write(struct closure *); +static void journal_write(struct work_struct *); -static void journal_write_done(struct closure *cl) +static void journal_write_done(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct journal *j = container_of(cl, struct journal, io); struct journal_write *w = (j->cur == j->w) ? &j->w[1] @@ -568,17 +569,19 @@ static void journal_write_done(struct closure *cl) continue_at_nobarrier(cl, journal_write, system_wq); } -static void journal_write_unlock(struct closure *cl) +static void journal_write_unlock(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct cache_set *c = container_of(cl, struct cache_set, journal.io); c->journal.io_in_flight = 0; spin_unlock(&c->journal.lock); } -static void journal_write_unlocked(struct closure *cl) +static void journal_write_unlocked(struct work_struct *work) __releases(c->journal.lock) { + struct closure *cl = container_of(work, struct closure, work); struct cache_set *c = container_of(cl, struct cache_set, journal.io); struct cache *ca; struct journal_write *w = c->journal.cur; @@ -621,7 +624,7 @@ static void journal_write_unlocked(struct closure *cl) ca = PTR_CACHE(c, k, i); bio = &ca->journal.bio; - atomic_long_add(sectors, &ca->meta_sectors_written); + atomic_long_add_unchecked(sectors, &ca->meta_sectors_written); bio_reset(bio); bio->bi_iter.bi_sector = PTR_OFFSET(k, i); @@ -654,12 +657,13 @@ static void journal_write_unlocked(struct closure *cl) continue_at(cl, journal_write_done, NULL); } -static void journal_write(struct closure *cl) +static void journal_write(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct cache_set *c = container_of(cl, struct cache_set, journal.io); spin_lock(&c->journal.lock); - journal_write_unlocked(cl); + journal_write_unlocked(&cl->work); } static void journal_try_write(struct cache_set *c) diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index 5c4bddecf..99659feff 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c @@ -34,14 +34,16 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k) /* Moving GC - IO loop */ -static void moving_io_destructor(struct closure *cl) +static void moving_io_destructor(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct moving_io *io = container_of(cl, struct moving_io, cl); kfree(io); } -static void write_moving_finish(struct closure *cl) +static void write_moving_finish(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct moving_io *io = container_of(cl, struct moving_io, cl); struct bio *bio = &io->bio.bio; @@ -89,8 +91,9 @@ static void moving_init(struct moving_io *io) bch_bio_map(bio, NULL); } -static void write_moving(struct closure *cl) +static void write_moving(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct moving_io *io = container_of(cl, struct moving_io, cl); struct data_insert_op *op = &io->op; @@ -113,8 +116,9 @@ static void write_moving(struct closure *cl) continue_at(cl, write_moving_finish, op->wq); } -static void read_moving_submit(struct closure *cl) +static void read_moving_submit(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct moving_io *io = container_of(cl, struct moving_io, cl); struct bio *bio = &io->bio.bio; diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 40ffe5e42..6757bd680 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -24,7 +24,7 @@ struct kmem_cache *bch_search_cache; -static void bch_data_insert_start(struct closure *); +static void bch_data_insert_start(struct work_struct *); static unsigned cache_mode(struct cached_dev *dc, struct bio *bio) { @@ -53,8 +53,9 @@ static void bio_csum(struct bio *bio, struct bkey *k) /* Insert data into cache */ -static void bch_data_insert_keys(struct closure *cl) +static void bch_data_insert_keys(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); atomic_t *journal_ref = NULL; struct bkey *replace_key = op->replace ? &op->replace_key : NULL; @@ -143,8 +144,9 @@ static void bch_data_invalidate(struct closure *cl) continue_at(cl, bch_data_insert_keys, op->wq); } -static void bch_data_insert_error(struct closure *cl) +static void bch_data_insert_error(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); /* @@ -170,7 +172,7 @@ static void bch_data_insert_error(struct closure *cl) op->insert_keys.top = dst; - bch_data_insert_keys(cl); + bch_data_insert_keys(&cl->work); } static void bch_data_insert_endio(struct bio *bio) @@ -191,8 +193,9 @@ static void bch_data_insert_endio(struct bio *bio) bch_bbio_endio(op->c, bio, bio->bi_error, "writing data to cache"); } -static void bch_data_insert_start(struct closure *cl) +static void bch_data_insert_start(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); struct bio *bio = op->bio, *n; @@ -313,8 +316,9 @@ static void bch_data_insert_start(struct closure *cl) * If s->bypass is true, instead of inserting the data it invalidates the * region of the cache represented by s->cache_bio and op->inode. */ -void bch_data_insert(struct closure *cl) +void bch_data_insert(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); trace_bcache_write(op->c, op->inode, op->bio, @@ -322,7 +326,7 @@ void bch_data_insert(struct closure *cl) bch_keylist_init(&op->insert_keys); bio_get(op->bio); - bch_data_insert_start(cl); + bch_data_insert_start(&cl->work); } /* Congested? */ @@ -570,8 +574,9 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) return n == bio ? MAP_DONE : MAP_CONTINUE; } -static void cache_lookup(struct closure *cl) +static void cache_lookup(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct search *s = container_of(cl, struct search, iop.cl); struct bio *bio = &s->bio.bio; int ret; @@ -631,8 +636,9 @@ static void do_bio_hook(struct search *s, struct bio *orig_bio) bio_cnt_set(bio, 3); } -static void search_free(struct closure *cl) +static void search_free(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct search *s = container_of(cl, struct search, cl); bio_complete(s); @@ -676,19 +682,21 @@ static inline struct search *search_alloc(struct bio *bio, /* Cached devices */ -static void cached_dev_bio_complete(struct closure *cl) +static void cached_dev_bio_complete(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct search *s = container_of(cl, struct search, cl); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); - search_free(cl); + search_free(&cl->work); cached_dev_put(dc); } /* Process reads */ -static void cached_dev_cache_miss_done(struct closure *cl) +static void cached_dev_cache_miss_done(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct search *s = container_of(cl, struct search, cl); if (s->iop.replace_collision) @@ -697,11 +705,12 @@ static void cached_dev_cache_miss_done(struct closure *cl) if (s->iop.bio) bio_free_pages(s->iop.bio); - cached_dev_bio_complete(cl); + cached_dev_bio_complete(&cl->work); } -static void cached_dev_read_error(struct closure *cl) +static void cached_dev_read_error(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct search *s = container_of(cl, struct search, cl); struct bio *bio = &s->bio.bio; @@ -720,8 +729,9 @@ static void cached_dev_read_error(struct closure *cl) continue_at(cl, cached_dev_cache_miss_done, NULL); } -static void cached_dev_read_done(struct closure *cl) +static void cached_dev_read_done(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct search *s = container_of(cl, struct search, cl); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); @@ -760,8 +770,9 @@ static void cached_dev_read_done(struct closure *cl) continue_at(cl, cached_dev_cache_miss_done, NULL); } -static void cached_dev_read_done_bh(struct closure *cl) +static void cached_dev_read_done_bh(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct search *s = container_of(cl, struct search, cl); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); @@ -859,13 +870,14 @@ static void cached_dev_read(struct cached_dev *dc, struct search *s) /* Process writes */ -static void cached_dev_write_complete(struct closure *cl) +static void cached_dev_write_complete(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct search *s = container_of(cl, struct search, cl); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); up_read_non_owner(&dc->writeback_lock); - cached_dev_bio_complete(cl); + cached_dev_bio_complete(&cl->work); } static void cached_dev_write(struct cached_dev *dc, struct search *s) @@ -937,8 +949,9 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) continue_at(cl, cached_dev_write_complete, NULL); } -static void cached_dev_nodata(struct closure *cl) +static void cached_dev_nodata(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct search *s = container_of(cl, struct search, cl); struct bio *bio = &s->bio.bio; @@ -1058,8 +1071,9 @@ static int flash_dev_cache_miss(struct btree *b, struct search *s, return MAP_CONTINUE; } -static void flash_dev_nodata(struct closure *cl) +static void flash_dev_nodata(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct search *s = container_of(cl, struct search, cl); if (s->iop.flush_journal) diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h index 1ff36875c..b8f4a05aa 100644 --- a/drivers/md/bcache/request.h +++ b/drivers/md/bcache/request.h @@ -33,7 +33,7 @@ struct data_insert_op { }; unsigned bch_get_congested(struct cache_set *); -void bch_data_insert(struct closure *cl); +void bch_data_insert(struct work_struct *work); void bch_cached_dev_request_init(struct cached_dev *dc); void bch_flash_dev_request_init(struct bcache_device *d); diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c index 0ca072c20..5e6e5c36c 100644 --- a/drivers/md/bcache/stats.c +++ b/drivers/md/bcache/stats.c @@ -120,7 +120,7 @@ void bch_cache_accounting_destroy(struct cache_accounting *acc) kobject_put(&acc->hour.kobj); kobject_put(&acc->day.kobj); - atomic_set(&acc->closing, 1); + atomic_set_unchecked(&acc->closing, 1); if (del_timer_sync(&acc->timer)) closure_return(&acc->cl); } @@ -151,7 +151,7 @@ static void scale_accounting(unsigned long data) struct cache_accounting *acc = (struct cache_accounting *) data; #define move_stat(name) do { \ - unsigned t = atomic_xchg(&acc->collector.name, 0); \ + unsigned t = atomic_xchg_unchecked(&acc->collector.name, 0); \ t <<= 16; \ acc->five_minute.name += t; \ acc->hour.name += t; \ @@ -174,7 +174,7 @@ static void scale_accounting(unsigned long data) acc->timer.expires += accounting_delay; - if (!atomic_read(&acc->closing)) + if (!atomic_read_unchecked(&acc->closing)) add_timer(&acc->timer); else closure_return(&acc->cl); @@ -185,14 +185,14 @@ static void mark_cache_stats(struct cache_stat_collector *stats, { if (!bypass) if (hit) - atomic_inc(&stats->cache_hits); + atomic_inc_unchecked(&stats->cache_hits); else - atomic_inc(&stats->cache_misses); + atomic_inc_unchecked(&stats->cache_misses); else if (hit) - atomic_inc(&stats->cache_bypass_hits); + atomic_inc_unchecked(&stats->cache_bypass_hits); else - atomic_inc(&stats->cache_bypass_misses); + atomic_inc_unchecked(&stats->cache_bypass_misses); } void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d, @@ -206,22 +206,22 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d, void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d) { struct cached_dev *dc = container_of(d, struct cached_dev, disk); - atomic_inc(&dc->accounting.collector.cache_readaheads); - atomic_inc(&c->accounting.collector.cache_readaheads); + atomic_inc_unchecked(&dc->accounting.collector.cache_readaheads); + atomic_inc_unchecked(&c->accounting.collector.cache_readaheads); } void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d) { struct cached_dev *dc = container_of(d, struct cached_dev, disk); - atomic_inc(&dc->accounting.collector.cache_miss_collisions); - atomic_inc(&c->accounting.collector.cache_miss_collisions); + atomic_inc_unchecked(&dc->accounting.collector.cache_miss_collisions); + atomic_inc_unchecked(&c->accounting.collector.cache_miss_collisions); } void bch_mark_sectors_bypassed(struct cache_set *c, struct cached_dev *dc, int sectors) { - atomic_add(sectors, &dc->accounting.collector.sectors_bypassed); - atomic_add(sectors, &c->accounting.collector.sectors_bypassed); + atomic_add_unchecked(sectors, &dc->accounting.collector.sectors_bypassed); + atomic_add_unchecked(sectors, &c->accounting.collector.sectors_bypassed); } void bch_cache_accounting_init(struct cache_accounting *acc, diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h index adbff141c..018c2d2e3 100644 --- a/drivers/md/bcache/stats.h +++ b/drivers/md/bcache/stats.h @@ -2,13 +2,13 @@ #define _BCACHE_STATS_H_ struct cache_stat_collector { - atomic_t cache_hits; - atomic_t cache_misses; - atomic_t cache_bypass_hits; - atomic_t cache_bypass_misses; - atomic_t cache_readaheads; - atomic_t cache_miss_collisions; - atomic_t sectors_bypassed; + atomic_unchecked_t cache_hits; + atomic_unchecked_t cache_misses; + atomic_unchecked_t cache_bypass_hits; + atomic_unchecked_t cache_bypass_misses; + atomic_unchecked_t cache_readaheads; + atomic_unchecked_t cache_miss_collisions; + atomic_unchecked_t sectors_bypassed; }; struct cache_stats { @@ -28,7 +28,7 @@ struct cache_stats { struct cache_accounting { struct closure cl; struct timer_list timer; - atomic_t closing; + atomic_unchecked_t closing; struct cache_stat_collector collector; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 849ad441c..a9e695e27 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -240,8 +240,9 @@ static void __write_super(struct cache_sb *sb, struct bio *bio) submit_bio(bio); } -static void bch_write_bdev_super_unlock(struct closure *cl) +static void bch_write_bdev_super_unlock(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write); up(&dc->sb_write_mutex); @@ -274,8 +275,9 @@ static void write_super_endio(struct bio *bio) closure_put(&ca->set->sb_write); } -static void bcache_write_super_unlock(struct closure *cl) +static void bcache_write_super_unlock(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct cache_set *c = container_of(cl, struct cache_set, sb_write); up(&c->sb_write_mutex); @@ -325,8 +327,9 @@ static void uuid_endio(struct bio *bio) closure_put(cl); } -static void uuid_io_unlock(struct closure *cl) +static void uuid_io_unlock(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct cache_set *c = container_of(cl, struct cache_set, uuid_write); up(&c->uuid_write_mutex); @@ -531,7 +534,7 @@ void bch_prio_write(struct cache *ca) ca->disk_buckets->seq++; - atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), + atomic_long_add_unchecked(ca->sb.bucket_size * prio_buckets(ca), &ca->meta_sectors_written); //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free), @@ -1051,8 +1054,9 @@ void bch_cached_dev_release(struct kobject *kobj) module_put(THIS_MODULE); } -static void cached_dev_free(struct closure *cl) +static void cached_dev_free(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); cancel_delayed_work_sync(&dc->writeback_rate_update); @@ -1076,8 +1080,9 @@ static void cached_dev_free(struct closure *cl) kobject_put(&dc->disk.kobj); } -static void cached_dev_flush(struct closure *cl) +static void cached_dev_flush(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); struct bcache_device *d = &dc->disk; @@ -1193,8 +1198,9 @@ void bch_flash_dev_release(struct kobject *kobj) kfree(d); } -static void flash_dev_free(struct closure *cl) +static void flash_dev_free(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct bcache_device *d = container_of(cl, struct bcache_device, cl); mutex_lock(&bch_register_lock); bcache_device_free(d); @@ -1202,8 +1208,9 @@ static void flash_dev_free(struct closure *cl) kobject_put(&d->kobj); } -static void flash_dev_flush(struct closure *cl) +static void flash_dev_flush(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct bcache_device *d = container_of(cl, struct bcache_device, cl); mutex_lock(&bch_register_lock); @@ -1322,8 +1329,9 @@ void bch_cache_set_release(struct kobject *kobj) module_put(THIS_MODULE); } -static void cache_set_free(struct closure *cl) +static void cache_set_free(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct cache_set *c = container_of(cl, struct cache_set, cl); struct cache *ca; unsigned i; @@ -1368,8 +1376,9 @@ static void cache_set_free(struct closure *cl) kobject_put(&c->kobj); } -static void cache_set_flush(struct closure *cl) +static void cache_set_flush(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct cache_set *c = container_of(cl, struct cache_set, caching); struct cache *ca; struct btree *b; @@ -1410,8 +1419,9 @@ static void cache_set_flush(struct closure *cl) closure_return(cl); } -static void __cache_set_unregister(struct closure *cl) +static void __cache_set_unregister(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct cache_set *c = container_of(cl, struct cache_set, caching); struct cached_dev *dc; size_t i; diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index b3ff57d61..b2e30fb99 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -739,15 +739,15 @@ SHOW(__bch_cache) sysfs_hprint(block_size, block_bytes(ca)); sysfs_print(nbuckets, ca->sb.nbuckets); sysfs_print(discard, ca->discard); - sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9); + sysfs_hprint(written, atomic_long_read_unchecked(&ca->sectors_written) << 9); sysfs_hprint(btree_written, - atomic_long_read(&ca->btree_sectors_written) << 9); + atomic_long_read_unchecked(&ca->btree_sectors_written) << 9); sysfs_hprint(metadata_written, - (atomic_long_read(&ca->meta_sectors_written) + - atomic_long_read(&ca->btree_sectors_written)) << 9); + (atomic_long_read_unchecked(&ca->meta_sectors_written) + + atomic_long_read_unchecked(&ca->btree_sectors_written)) << 9); sysfs_print(io_errors, - atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT); + atomic_read_unchecked(&ca->io_errors) >> IO_ERROR_SHIFT); if (attr == &sysfs_cache_replacement_policy) return bch_snprint_string_list(buf, PAGE_SIZE, @@ -870,11 +870,11 @@ STORE(__bch_cache) } if (attr == &sysfs_clear_stats) { - atomic_long_set(&ca->sectors_written, 0); - atomic_long_set(&ca->btree_sectors_written, 0); - atomic_long_set(&ca->meta_sectors_written, 0); - atomic_set(&ca->io_count, 0); - atomic_set(&ca->io_errors, 0); + atomic_long_set_unchecked(&ca->sectors_written, 0); + atomic_long_set_unchecked(&ca->btree_sectors_written, 0); + atomic_long_set_unchecked(&ca->meta_sectors_written, 0); + atomic_set_unchecked(&ca->io_count, 0); + atomic_set_unchecked(&ca->io_errors, 0); } return size; diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index e51644e50..5cc3c15a4 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -117,14 +117,16 @@ static void dirty_init(struct keybuf_key *w) bch_bio_map(bio, NULL); } -static void dirty_io_destructor(struct closure *cl) +static void dirty_io_destructor(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct dirty_io *io = container_of(cl, struct dirty_io, cl); kfree(io); } -static void write_dirty_finish(struct closure *cl) +static void write_dirty_finish(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct dirty_io *io = container_of(cl, struct dirty_io, cl); struct keybuf_key *w = io->bio.bi_private; struct cached_dev *dc = io->dc; @@ -173,8 +175,9 @@ static void dirty_endio(struct bio *bio) closure_put(&io->cl); } -static void write_dirty(struct closure *cl) +static void write_dirty(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct dirty_io *io = container_of(cl, struct dirty_io, cl); struct keybuf_key *w = io->bio.bi_private; @@ -200,8 +203,9 @@ static void read_dirty_endio(struct bio *bio) dirty_endio(bio); } -static void read_dirty_submit(struct closure *cl) +static void read_dirty_submit(struct work_struct *work) { + struct closure *cl = container_of(work, struct closure, work); struct dirty_io *io = container_of(cl, struct dirty_io, cl); closure_bio_submit(&io->bio, cl); diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 2d826927a..3507386ac 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -1963,7 +1963,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap) chunk_kb ? "KB" : "B"); if (bitmap->storage.file) { seq_printf(seq, ", file: "); - seq_file_path(seq, bitmap->storage.file, " \t\n"); + seq_file_path(seq, bitmap->storage.file, " \t\n\\"); } seq_printf(seq, "\n"); diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 59b2c5056..60bca5313 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -118,7 +118,7 @@ static void iot_io_end(struct io_tracker *iot, sector_t len) */ struct dm_hook_info { bio_end_io_t *bi_end_io; -}; +} __no_const; static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio, bio_end_io_t *bi_end_io, void *bi_private) @@ -182,16 +182,16 @@ struct cache_features { }; struct cache_stats { - atomic_t read_hit; - atomic_t read_miss; - atomic_t write_hit; - atomic_t write_miss; - atomic_t demotion; - atomic_t promotion; - atomic_t copies_avoided; - atomic_t cache_cell_clash; - atomic_t commit_count; - atomic_t discard_count; + atomic_unchecked_t read_hit; + atomic_unchecked_t read_miss; + atomic_unchecked_t write_hit; + atomic_unchecked_t write_miss; + atomic_unchecked_t demotion; + atomic_unchecked_t promotion; + atomic_unchecked_t copies_avoided; + atomic_unchecked_t cache_cell_clash; + atomic_unchecked_t commit_count; + atomic_unchecked_t discard_count; }; /* @@ -270,8 +270,8 @@ struct cache { atomic_t nr_io_migrations; wait_queue_head_t quiescing_wait; - atomic_t quiescing; - atomic_t quiescing_ack; + atomic_unchecked_t quiescing; + atomic_unchecked_t quiescing_ack; /* * cache_size entries, dirty if set @@ -395,8 +395,10 @@ static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache) return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT); } -static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell) +static void free_prison_cell(void *_cache, struct dm_bio_prison_cell *cell) { + struct cache *cache = _cache; + dm_bio_prison_free_cell(cache->prison, cell); } @@ -493,8 +495,10 @@ static struct dm_bio_prison_cell *prealloc_get_cell(struct prealloc *p) * You can't have more than two cells in a prealloc struct. BUG() will be * called if you try and overfill. */ -static void prealloc_put_cell(struct prealloc *p, struct dm_bio_prison_cell *cell) +static void prealloc_put_cell(void *_p, struct dm_bio_prison_cell *cell) { + struct prealloc *p = _p; + if (!p->cell2) p->cell2 = cell; @@ -637,7 +641,7 @@ static void set_discard(struct cache *cache, dm_dblock_t b) unsigned long flags; BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks)); - atomic_inc(&cache->stats.discard_count); + atomic_inc_unchecked(&cache->stats.discard_count); spin_lock_irqsave(&cache->lock, flags); set_bit(from_dblock(b), cache->discard_bitset); @@ -685,10 +689,10 @@ static void load_stats(struct cache *cache) struct dm_cache_statistics stats; dm_cache_metadata_get_stats(cache->cmd, &stats); - atomic_set(&cache->stats.read_hit, stats.read_hits); - atomic_set(&cache->stats.read_miss, stats.read_misses); - atomic_set(&cache->stats.write_hit, stats.write_hits); - atomic_set(&cache->stats.write_miss, stats.write_misses); + atomic_set_unchecked(&cache->stats.read_hit, stats.read_hits); + atomic_set_unchecked(&cache->stats.read_miss, stats.read_misses); + atomic_set_unchecked(&cache->stats.write_hit, stats.write_hits); + atomic_set_unchecked(&cache->stats.write_miss, stats.write_misses); } static void save_stats(struct cache *cache) @@ -698,10 +702,10 @@ static void save_stats(struct cache *cache) if (get_cache_mode(cache) >= CM_READ_ONLY) return; - stats.read_hits = atomic_read(&cache->stats.read_hit); - stats.read_misses = atomic_read(&cache->stats.read_miss); - stats.write_hits = atomic_read(&cache->stats.write_hit); - stats.write_misses = atomic_read(&cache->stats.write_miss); + stats.read_hits = atomic_read_unchecked(&cache->stats.read_hit); + stats.read_misses = atomic_read_unchecked(&cache->stats.read_miss); + stats.write_hits = atomic_read_unchecked(&cache->stats.write_hit); + stats.write_misses = atomic_read_unchecked(&cache->stats.write_miss); dm_cache_metadata_set_stats(cache->cmd, &stats); } @@ -1326,7 +1330,7 @@ static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) static void avoid_copy(struct dm_cache_migration *mg) { - atomic_inc(&mg->cache->stats.copies_avoided); + atomic_inc_unchecked(&mg->cache->stats.copies_avoided); migration_success_pre_commit(mg); } @@ -1636,7 +1640,7 @@ static void process_discard_bio(struct cache *cache, struct prealloc *structs, cell_prealloc = prealloc_get_cell(structs); r = bio_detain_range(cache, dblock_to_oblock(cache, b), dblock_to_oblock(cache, e), bio, cell_prealloc, - (cell_free_fn) prealloc_put_cell, + prealloc_put_cell, structs, &new_ocell); if (r > 0) return; @@ -1653,13 +1657,13 @@ static bool spare_migration_bandwidth(struct cache *cache) static void inc_hit_counter(struct cache *cache, struct bio *bio) { - atomic_inc(bio_data_dir(bio) == READ ? + atomic_inc_unchecked(bio_data_dir(bio) == READ ? &cache->stats.read_hit : &cache->stats.write_hit); } static void inc_miss_counter(struct cache *cache, struct bio *bio) { - atomic_inc(bio_data_dir(bio) == READ ? + atomic_inc_unchecked(bio_data_dir(bio) == READ ? &cache->stats.read_miss : &cache->stats.write_miss); } @@ -1790,7 +1794,7 @@ static int cell_locker(struct policy_locker *locker, dm_oblock_t b) struct dm_bio_prison_cell *cell_prealloc = prealloc_get_cell(l->structs); return bio_detain(l->cache, b, NULL, cell_prealloc, - (cell_free_fn) prealloc_put_cell, + prealloc_put_cell, l->structs, &l->cell); } @@ -1832,7 +1836,7 @@ static void process_cell(struct cache *cache, struct prealloc *structs, */ if (bio_data_dir(bio) == WRITE) { - atomic_inc(&cache->stats.demotion); + atomic_inc_unchecked(&cache->stats.demotion); invalidate(cache, structs, block, lookup_result.cblock, new_ocell); release_cell = false; @@ -1865,14 +1869,14 @@ static void process_cell(struct cache *cache, struct prealloc *structs, break; case POLICY_NEW: - atomic_inc(&cache->stats.promotion); + atomic_inc_unchecked(&cache->stats.promotion); promote(cache, structs, block, lookup_result.cblock, new_ocell); release_cell = false; break; case POLICY_REPLACE: - atomic_inc(&cache->stats.demotion); - atomic_inc(&cache->stats.promotion); + atomic_inc_unchecked(&cache->stats.demotion); + atomic_inc_unchecked(&cache->stats.promotion); demote_then_promote(cache, structs, lookup_result.old_oblock, block, lookup_result.cblock, ool.cell, new_ocell); @@ -1902,7 +1906,7 @@ static void process_bio(struct cache *cache, struct prealloc *structs, */ cell_prealloc = prealloc_get_cell(structs); r = bio_detain(cache, block, bio, cell_prealloc, - (cell_free_fn) prealloc_put_cell, + prealloc_put_cell, structs, &new_ocell); if (r > 0) return; @@ -1926,7 +1930,7 @@ static int commit(struct cache *cache, bool clean_shutdown) if (get_cache_mode(cache) >= CM_READ_ONLY) return -EINVAL; - atomic_inc(&cache->stats.commit_count); + atomic_inc_unchecked(&cache->stats.commit_count); r = dm_cache_commit(cache->cmd, clean_shutdown); if (r) metadata_operation_failed(cache, "dm_cache_commit", r); @@ -2157,32 +2161,32 @@ static void process_invalidation_requests(struct cache *cache) *--------------------------------------------------------------*/ static bool is_quiescing(struct cache *cache) { - return atomic_read(&cache->quiescing); + return atomic_read_unchecked(&cache->quiescing); } static void ack_quiescing(struct cache *cache) { if (is_quiescing(cache)) { - atomic_inc(&cache->quiescing_ack); + atomic_inc_unchecked(&cache->quiescing_ack); wake_up(&cache->quiescing_wait); } } static void wait_for_quiescing_ack(struct cache *cache) { - wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack)); + wait_event(cache->quiescing_wait, atomic_read_unchecked(&cache->quiescing_ack)); } static void start_quiescing(struct cache *cache) { - atomic_inc(&cache->quiescing); + atomic_inc_unchecked(&cache->quiescing); wait_for_quiescing_ack(cache); } static void stop_quiescing(struct cache *cache) { - atomic_set(&cache->quiescing, 0); - atomic_set(&cache->quiescing_ack, 0); + atomic_set_unchecked(&cache->quiescing, 0); + atomic_set_unchecked(&cache->quiescing_ack, 0); } static void wait_for_migrations(struct cache *cache) @@ -2869,8 +2873,8 @@ static int cache_create(struct cache_args *ca, struct cache **result) init_waitqueue_head(&cache->migration_wait); init_waitqueue_head(&cache->quiescing_wait); - atomic_set(&cache->quiescing, 0); - atomic_set(&cache->quiescing_ack, 0); + atomic_set_unchecked(&cache->quiescing, 0); + atomic_set_unchecked(&cache->quiescing_ack, 0); r = -ENOMEM; atomic_set(&cache->nr_dirty, 0); @@ -2937,12 +2941,12 @@ static int cache_create(struct cache_args *ca, struct cache **result) load_stats(cache); - atomic_set(&cache->stats.demotion, 0); - atomic_set(&cache->stats.promotion, 0); - atomic_set(&cache->stats.copies_avoided, 0); - atomic_set(&cache->stats.cache_cell_clash, 0); - atomic_set(&cache->stats.commit_count, 0); - atomic_set(&cache->stats.discard_count, 0); + atomic_set_unchecked(&cache->stats.demotion, 0); + atomic_set_unchecked(&cache->stats.promotion, 0); + atomic_set_unchecked(&cache->stats.copies_avoided, 0); + atomic_set_unchecked(&cache->stats.cache_cell_clash, 0); + atomic_set_unchecked(&cache->stats.commit_count, 0); + atomic_set_unchecked(&cache->stats.discard_count, 0); spin_lock_init(&cache->invalidation_lock); INIT_LIST_HEAD(&cache->invalidation_requests); @@ -3059,7 +3063,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio) } r = bio_detain(cache, block, bio, cell, - (cell_free_fn) free_prison_cell, + free_prison_cell, cache, &cell); if (r) { if (r < 0) @@ -3553,12 +3557,12 @@ static void cache_status(struct dm_target *ti, status_type_t type, cache->sectors_per_block, (unsigned long long) from_cblock(residency), (unsigned long long) from_cblock(cache->cache_size), - (unsigned) atomic_read(&cache->stats.read_hit), - (unsigned) atomic_read(&cache->stats.read_miss), - (unsigned) atomic_read(&cache->stats.write_hit), - (unsigned) atomic_read(&cache->stats.write_miss), - (unsigned) atomic_read(&cache->stats.demotion), - (unsigned) atomic_read(&cache->stats.promotion), + (unsigned) atomic_read_unchecked(&cache->stats.read_hit), + (unsigned) atomic_read_unchecked(&cache->stats.read_miss), + (unsigned) atomic_read_unchecked(&cache->stats.write_hit), + (unsigned) atomic_read_unchecked(&cache->stats.write_miss), + (unsigned) atomic_read_unchecked(&cache->stats.demotion), + (unsigned) atomic_read_unchecked(&cache->stats.promotion), (unsigned long) atomic_read(&cache->nr_dirty)); if (writethrough_mode(&cache->features)) diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 40ceba1fe..4141e1eb1 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -75,8 +75,8 @@ struct mapped_device { * Event handling. */ wait_queue_head_t eventq; - atomic_t event_nr; - atomic_t uevent_seq; + atomic_unchecked_t event_nr; + atomic_unchecked_t uevent_seq; struct list_head uevent_list; spinlock_t uevent_lock; /* Protect access to uevent_list */ diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 966eb4b61..aca05a3b8 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1777,7 +1777,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param) cmd == DM_LIST_VERSIONS_CMD) return 0; - if ((cmd == DM_DEV_CREATE_CMD)) { + if (cmd == DM_DEV_CREATE_CMD) { if (!*param->name) { DMWARN("name not supplied when creating device"); return -EINVAL; diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index e477af859..a5b1fce06 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -88,7 +88,7 @@ struct multipath { atomic_t nr_valid_paths; /* Total number of usable paths */ atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */ - atomic_t pg_init_count; /* Number of times pg_init called */ + atomic_unchecked_t pg_init_count;/* Number of times pg_init called */ unsigned queue_mode; @@ -203,7 +203,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti) set_bit(MPATHF_QUEUE_IO, &m->flags); atomic_set(&m->nr_valid_paths, 0); atomic_set(&m->pg_init_in_progress, 0); - atomic_set(&m->pg_init_count, 0); + atomic_set_unchecked(&m->pg_init_count, 0); m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; INIT_WORK(&m->trigger_event, trigger_event); init_waitqueue_head(&m->pg_init_wait); @@ -351,7 +351,7 @@ static int __pg_init_all_paths(struct multipath *m) if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) return 0; - atomic_inc(&m->pg_init_count); + atomic_inc_unchecked(&m->pg_init_count); clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); /* Check here to reset pg_init_required */ @@ -397,7 +397,7 @@ static void __switch_pg(struct multipath *m, struct priority_group *pg) clear_bit(MPATHF_QUEUE_IO, &m->flags); } - atomic_set(&m->pg_init_count, 0); + atomic_set_unchecked(&m->pg_init_count, 0); } static struct pgpath *choose_path_in_pg(struct multipath *m, @@ -1418,7 +1418,7 @@ static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath) spin_lock_irqsave(&m->lock, flags); - if (atomic_read(&m->pg_init_count) <= m->pg_init_retries && + if (atomic_read_unchecked(&m->pg_init_count) <= m->pg_init_retries && !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); else @@ -1736,7 +1736,7 @@ static void multipath_status(struct dm_target *ti, status_type_t type, /* Features */ if (type == STATUSTYPE_INFO) DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags), - atomic_read(&m->pg_init_count)); + atomic_read_unchecked(&m->pg_init_count)); else { DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) + (m->pg_init_retries > 0) * 2 + diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index af2d79b52..d879687df 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -3196,7 +3196,7 @@ static void raid_status(struct dm_target *ti, status_type_t type, mddev->resync_max_sectors : mddev->dev_sectors; progress = rs_get_progress(rs, resync_max_sectors, &array_in_sync); resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ? - atomic64_read(&mddev->resync_mismatches) : 0; + atomic64_read_unchecked(&mddev->resync_mismatches) : 0; sync_action = decipher_sync_action(&rs->md); /* HM FIXME: do we want another state char for raid0? It shows 'D' or 'A' now */ diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 9a8b71067..ae1bf13e7 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -42,7 +42,7 @@ enum dm_raid1_error { struct mirror { struct mirror_set *ms; - atomic_t error_count; + atomic_unchecked_t error_count; unsigned long error_type; struct dm_dev *dev; sector_t offset; @@ -187,7 +187,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms) struct mirror *m; for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++) - if (!atomic_read(&m->error_count)) + if (!atomic_read_unchecked(&m->error_count)) return m; return NULL; @@ -219,7 +219,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type) * simple way to tell if a device has encountered * errors. */ - atomic_inc(&m->error_count); + atomic_inc_unchecked(&m->error_count); if (test_and_set_bit(error_type, &m->error_type)) return; @@ -378,7 +378,7 @@ static void reset_ms_flags(struct mirror_set *ms) ms->leg_failure = 0; for (m = 0; m < ms->nr_mirrors; m++) { - atomic_set(&(ms->mirror[m].error_count), 0); + atomic_set_unchecked(&(ms->mirror[m].error_count), 0); ms->mirror[m].error_type = 0; } } @@ -423,7 +423,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector) struct mirror *m = get_default_mirror(ms); do { - if (likely(!atomic_read(&m->error_count))) + if (likely(!atomic_read_unchecked(&m->error_count))) return m; if (m-- == ms->mirror) @@ -437,7 +437,7 @@ static int default_ok(struct mirror *m) { struct mirror *default_mirror = get_default_mirror(m->ms); - return !atomic_read(&default_mirror->error_count); + return !atomic_read_unchecked(&default_mirror->error_count); } static int mirror_available(struct mirror_set *ms, struct bio *bio) @@ -577,7 +577,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads) */ if (likely(region_in_sync(ms, region, 1))) m = choose_mirror(ms, bio->bi_iter.bi_sector); - else if (m && atomic_read(&m->error_count)) + else if (m && atomic_read_unchecked(&m->error_count)) m = NULL; if (likely(m)) @@ -962,7 +962,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti, } ms->mirror[mirror].ms = ms; - atomic_set(&(ms->mirror[mirror].error_count), 0); + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0); ms->mirror[mirror].error_type = 0; ms->mirror[mirror].offset = offset; @@ -1372,7 +1372,7 @@ static void mirror_resume(struct dm_target *ti) */ static char device_status_char(struct mirror *m) { - if (!atomic_read(&(m->error_count))) + if (!atomic_read_unchecked(&(m->error_count))) return 'A'; return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' : diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index 38b05f23b..4f9959526 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c @@ -435,7 +435,7 @@ static int dm_stats_delete(struct dm_stats *stats, int id) synchronize_rcu_expedited(); dm_stat_free(&s->rcu_head); } else { - ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1; + ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1; call_rcu(&s->rcu_head, dm_stat_free); } return 0; @@ -647,8 +647,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw, ((bi_rw == WRITE) == (ACCESS_ONCE(last->last_rw) == WRITE)) )); - ACCESS_ONCE(last->last_sector) = end_sector; - ACCESS_ONCE(last->last_rw) = bi_rw; + ACCESS_ONCE_RW(last->last_sector) = end_sector; + ACCESS_ONCE_RW(last->last_rw) = bi_rw; } rcu_read_lock(); diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 28193a57b..0543cc938 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -21,7 +21,7 @@ struct stripe { struct dm_dev *dev; sector_t physical_start; - atomic_t error_count; + atomic_unchecked_t error_count; }; struct stripe_c { @@ -190,7 +190,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) kfree(sc); return r; } - atomic_set(&(sc->stripe[i].error_count), 0); + atomic_set_unchecked(&(sc->stripe[i].error_count), 0); } ti->private = sc; @@ -357,7 +357,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type, DMEMIT("%d ", sc->stripes); for (i = 0; i < sc->stripes; i++) { DMEMIT("%s ", sc->stripe[i].dev->name); - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ? + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ? 'D' : 'A'; } buffer[i] = '\0'; @@ -402,8 +402,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error) */ for (i = 0; i < sc->stripes; i++) if (!strcmp(sc->stripe[i].dev->name, major_minor)) { - atomic_inc(&(sc->stripe[i].error_count)); - if (atomic_read(&(sc->stripe[i].error_count)) < + atomic_inc_unchecked(&(sc->stripe[i].error_count)); + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) < DM_IO_ERROR_THRESHOLD) schedule_work(&sc->trigger_event); } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 5ac239d0f..d91268c60 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -308,7 +308,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, if (!dev_size) return 0; - if ((start >= dev_size) || (start + len > dev_size)) { + if ((start >= dev_size) || (len > dev_size - start)) { DMWARN("%s: %s too small for target: " "start=%llu, len=%llu, dev_size=%llu", dm_device_name(ti->table->md), bdevname(bdev, b), diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index a15091a0d..2d2020810 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -405,7 +405,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd) { pmd->info.tm = pmd->tm; pmd->info.levels = 2; - pmd->info.value_type.context = pmd->data_sm; + pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm; pmd->info.value_type.size = sizeof(__le64); pmd->info.value_type.inc = data_block_inc; pmd->info.value_type.dec = data_block_dec; @@ -424,7 +424,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd) pmd->bl_info.tm = pmd->tm; pmd->bl_info.levels = 1; - pmd->bl_info.value_type.context = pmd->data_sm; + pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm; pmd->bl_info.value_type.size = sizeof(__le64); pmd->bl_info.value_type.inc = data_block_inc; pmd->bl_info.value_type.dec = data_block_dec; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index ef7bf1dd6..a86742045 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -569,14 +569,16 @@ static void queue_io(struct mapped_device *md, struct bio *bio) * function to access the md->map field, and make sure they call * dm_put_live_table() when finished. */ -struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) +struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(&md->io_barrier); +struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) { *srcu_idx = srcu_read_lock(&md->io_barrier); return srcu_dereference(md->map, &md->io_barrier); } -void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) +void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(&md->io_barrier); +void dm_put_live_table(struct mapped_device *md, int srcu_idx) { srcu_read_unlock(&md->io_barrier, srcu_idx); } @@ -591,13 +593,15 @@ void dm_sync_table(struct mapped_device *md) * A fast alternative to dm_get_live_table/dm_put_live_table. * The caller must not block between these two functions. */ -static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) +static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU); +static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) { rcu_read_lock(); return rcu_dereference(md->map); } -static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) +static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU); +static void dm_put_live_table_fast(struct mapped_device *md) { rcu_read_unlock(); } @@ -1484,8 +1488,8 @@ static struct mapped_device *alloc_dev(int minor) spin_lock_init(&md->deferred_lock); atomic_set(&md->holders, 1); atomic_set(&md->open_count, 0); - atomic_set(&md->event_nr, 0); - atomic_set(&md->uevent_seq, 0); + atomic_set_unchecked(&md->event_nr, 0); + atomic_set_unchecked(&md->uevent_seq, 0); INIT_LIST_HEAD(&md->uevent_list); INIT_LIST_HEAD(&md->table_devices); spin_lock_init(&md->uevent_lock); @@ -1624,7 +1628,7 @@ static void event_callback(void *context) dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); - atomic_inc(&md->event_nr); + atomic_inc_unchecked(&md->event_nr); wake_up(&md->eventq); } @@ -2412,18 +2416,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, uint32_t dm_next_uevent_seq(struct mapped_device *md) { - return atomic_add_return(1, &md->uevent_seq); + return atomic_add_return_unchecked(1, &md->uevent_seq); } uint32_t dm_get_event_nr(struct mapped_device *md) { - return atomic_read(&md->event_nr); + return atomic_read_unchecked(&md->event_nr); } int dm_wait_event(struct mapped_device *md, int event_nr) { return wait_event_interruptible(md->eventq, - (event_nr != atomic_read(&md->event_nr))); + (event_nr != atomic_read_unchecked(&md->event_nr))); } void dm_uevent_add(struct mapped_device *md, struct list_head *elist) diff --git a/drivers/md/md.c b/drivers/md/md.c index 24925f2aa..1ae5ca40c 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -198,10 +198,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev); * start build, activate spare */ static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); -static atomic_t md_event_count; +static atomic_unchecked_t md_event_count; void md_new_event(struct mddev *mddev) { - atomic_inc(&md_event_count); + atomic_inc_unchecked(&md_event_count); wake_up(&md_event_waiters); } EXPORT_SYMBOL_GPL(md_new_event); @@ -1434,7 +1434,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) && (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET)) rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset); - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; @@ -1700,7 +1700,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) else sb->resync_offset = cpu_to_le64(0); - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors)); sb->raid_disks = cpu_to_le32(mddev->raid_disks); sb->size = cpu_to_le64(mddev->dev_sectors); @@ -2719,7 +2719,7 @@ __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store); static ssize_t errors_show(struct md_rdev *rdev, char *page) { - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors)); } static ssize_t @@ -2731,7 +2731,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len) rv = kstrtouint(buf, 10, &n); if (rv < 0) return rv; - atomic_set(&rdev->corrected_errors, n); + atomic_set_unchecked(&rdev->corrected_errors, n); return len; } static struct rdev_sysfs_entry rdev_errors = @@ -3180,8 +3180,8 @@ int md_rdev_init(struct md_rdev *rdev) rdev->sb_loaded = 0; rdev->bb_page = NULL; atomic_set(&rdev->nr_pending, 0); - atomic_set(&rdev->read_errors, 0); - atomic_set(&rdev->corrected_errors, 0); + atomic_set_unchecked(&rdev->read_errors, 0); + atomic_set_unchecked(&rdev->corrected_errors, 0); INIT_LIST_HEAD(&rdev->same_set); init_waitqueue_head(&rdev->blocked_wait); @@ -4403,7 +4403,7 @@ mismatch_cnt_show(struct mddev *mddev, char *page) { return sprintf(page, "%llu\n", (unsigned long long) - atomic64_read(&mddev->resync_mismatches)); + atomic64_read_unchecked(&mddev->resync_mismatches)); } static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt); @@ -5095,7 +5095,7 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data) return NULL; } -static int add_named_array(const char *val, struct kernel_param *kp) +static int add_named_array(const char *val, const struct kernel_param *kp) { /* val must be "md_*" where * is not all digits. * We allocate an array with a large free minor number, and @@ -5460,7 +5460,7 @@ static void md_clean(struct mddev *mddev) mddev->new_layout = 0; mddev->new_chunk_sectors = 0; mddev->curr_resync = 0; - atomic64_set(&mddev->resync_mismatches, 0); + atomic64_set_unchecked(&mddev->resync_mismatches, 0); mddev->suspend_lo = mddev->suspend_hi = 0; mddev->sync_speed_min = mddev->sync_speed_max = 0; mddev->recovery = 0; @@ -5877,9 +5877,10 @@ static int get_array_info(struct mddev *mddev, void __user *arg) info.patch_version = MD_PATCHLEVEL_VERSION; info.ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); info.level = mddev->level; - info.size = mddev->dev_sectors / 2; - if (info.size != mddev->dev_sectors / 2) /* overflow */ + if (2 * (sector_t)INT_MAX < mddev->dev_sectors) /* overflow */ info.size = -1; + else + info.size = mddev->dev_sectors / 2; info.nr_disks = nr; info.raid_disks = mddev->raid_disks; info.md_minor = mddev->md_minor; @@ -7458,7 +7459,7 @@ static int md_seq_show(struct seq_file *seq, void *v) spin_unlock(&pers_lock); seq_printf(seq, "\n"); - seq->poll_event = atomic_read(&md_event_count); + seq->poll_event = atomic_read_unchecked(&md_event_count); return 0; } if (v == (void*)2) { @@ -7558,7 +7559,7 @@ static int md_seq_open(struct inode *inode, struct file *file) return error; seq = file->private_data; - seq->poll_event = atomic_read(&md_event_count); + seq->poll_event = atomic_read_unchecked(&md_event_count); return error; } @@ -7575,7 +7576,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait) /* always allow read */ mask = POLLIN | POLLRDNORM; - if (seq->poll_event != atomic_read(&md_event_count)) + if (seq->poll_event != atomic_read_unchecked(&md_event_count)) mask |= POLLERR | POLLPRI; return mask; } @@ -7671,7 +7672,7 @@ static int is_mddev_idle(struct mddev *mddev, int init) struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + (int)part_stat_read(&disk->part0, sectors[1]) - - atomic_read(&disk->sync_io); + atomic_read_unchecked(&disk->sync_io); /* sync IO will cause sync_io to increase before the disk_stats * as sync_io is counted when a request starts, and * disk_stats is counted when it completes. @@ -7941,7 +7942,7 @@ void md_do_sync(struct md_thread *thread) * which defaults to physical size, but can be virtual size */ max_sectors = mddev->resync_max_sectors; - atomic64_set(&mddev->resync_mismatches, 0); + atomic64_set_unchecked(&mddev->resync_mismatches, 0); /* we don't use the checkpoint if there's a bitmap */ if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) j = mddev->resync_min; @@ -8960,11 +8961,11 @@ static __exit void md_exit(void) subsys_initcall(md_init); module_exit(md_exit) -static int get_ro(char *buffer, struct kernel_param *kp) +static int get_ro(char *buffer, const struct kernel_param *kp) { return sprintf(buffer, "%d", start_readonly); } -static int set_ro(const char *val, struct kernel_param *kp) +static int set_ro(const char *val, const struct kernel_param *kp) { return kstrtouint(val, 10, (unsigned int *)&start_readonly); } diff --git a/drivers/md/md.h b/drivers/md/md.h index 2b2041773..5369974ad 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -96,13 +96,13 @@ struct md_rdev { * only maintained for arrays that * support hot removal */ - atomic_t read_errors; /* number of consecutive read errors that + atomic_unchecked_t read_errors; /* number of consecutive read errors that * we have tried to ignore. */ time64_t last_read_error; /* monotonic time since our * last read error */ - atomic_t corrected_errors; /* number of corrected read errors, + atomic_unchecked_t corrected_errors; /* number of corrected read errors, * for reporting to userspace and storing * in superblock. */ @@ -289,7 +289,7 @@ struct mddev { sector_t resync_max_sectors; /* may be set by personality */ - atomic64_t resync_mismatches; /* count of sectors where + atomic64_unchecked_t resync_mismatches; /* count of sectors where * parity/replica mismatch found */ @@ -468,7 +468,7 @@ extern void mddev_unlock(struct mddev *mddev); static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) { - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); } struct md_personality diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c index 20557e2c6..c5fa1ef73 100644 --- a/drivers/md/persistent-data/dm-space-map-metadata.c +++ b/drivers/md/persistent-data/dm-space-map-metadata.c @@ -700,7 +700,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks) * Flick into a mode where all blocks get allocated in the new area. */ smm->begin = old_len; - memcpy(sm, &bootstrap_ops, sizeof(*sm)); + memcpy((void *)sm, &bootstrap_ops, sizeof(*sm)); /* * Extend. @@ -738,7 +738,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks) /* * Switch back to normal behaviour. */ - memcpy(sm, &ops, sizeof(*sm)); + memcpy((void *)sm, &ops, sizeof(*sm)); return r; } diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h index 3e6d1153b..ffecdeb19 100644 --- a/drivers/md/persistent-data/dm-space-map.h +++ b/drivers/md/persistent-data/dm-space-map.h @@ -71,6 +71,7 @@ struct dm_space_map { dm_sm_threshold_fn fn, void *context); }; +typedef struct dm_space_map __no_const dm_space_map_no_const; /*----------------------------------------------------------------*/ diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 29e2df5cd..c36732552 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1049,7 +1049,7 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio) struct blk_plug_cb *cb; struct raid1_plug_cb *plug = NULL; int first_clone; - int sectors_handled; + sector_t sectors_handled; int max_sectors; sector_t start_next_window; @@ -1879,7 +1879,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) if (r1_sync_page_io(rdev, sect, s, bio->bi_io_vec[idx].bv_page, READ) != 0) - atomic_add(s, &rdev->corrected_errors); + atomic_add_unchecked(s, &rdev->corrected_errors); } sectors -= s; sect += s; @@ -1970,7 +1970,7 @@ static void process_checks(struct r1bio *r1_bio) } else j = 0; if (j >= 0) - atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); + atomic64_add_unchecked(r1_bio->sectors, &mddev->resync_mismatches); if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && !error)) { /* No need to write to this device. */ @@ -2121,7 +2121,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk, rcu_read_unlock(); if (r1_sync_page_io(rdev, sect, s, conf->tmppage, READ)) { - atomic_add(s, &rdev->corrected_errors); + atomic_add_unchecked(s, &rdev->corrected_errors); printk(KERN_INFO "md/raid1:%s: read error corrected " "(%d sectors at %llu on %s)\n", diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 39fddda2f..be1dd5433 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1063,7 +1063,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio) struct md_rdev *blocked_rdev; struct blk_plug_cb *cb; struct raid10_plug_cb *plug = NULL; - int sectors_handled; + sector_t sectors_handled; int max_sectors; int sectors; @@ -1441,7 +1441,7 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio) { struct r10conf *conf = mddev->private; sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask); - int chunk_sects = chunk_mask + 1; + sector_t chunk_sects = chunk_mask + 1; struct bio *split; @@ -1829,7 +1829,7 @@ static void end_sync_read(struct bio *bio) /* The write handler will notice the lack of * R10BIO_Uptodate and record any errors etc */ - atomic_add(r10_bio->sectors, + atomic_add_unchecked(r10_bio->sectors, &conf->mirrors[d].rdev->corrected_errors); /* for reconstruct, we always reschedule after a read. @@ -1978,7 +1978,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) } if (j == vcnt) continue; - atomic64_add(r10_bio->sectors, &mddev->resync_mismatches); + atomic64_add_unchecked(r10_bio->sectors, &mddev->resync_mismatches); if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) /* Don't fix anything. */ continue; @@ -2177,7 +2177,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev) { long cur_time_mon; unsigned long hours_since_last; - unsigned int read_errors = atomic_read(&rdev->read_errors); + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors); cur_time_mon = ktime_get_seconds(); @@ -2198,9 +2198,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev) * overflowing the shift of read_errors by hours_since_last. */ if (hours_since_last >= 8 * sizeof(read_errors)) - atomic_set(&rdev->read_errors, 0); + atomic_set_unchecked(&rdev->read_errors, 0); else - atomic_set(&rdev->read_errors, read_errors >> hours_since_last); + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last); } static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector, @@ -2254,8 +2254,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 return; check_decay_read_errors(mddev, rdev); - atomic_inc(&rdev->read_errors); - if (atomic_read(&rdev->read_errors) > max_read_errors) { + atomic_inc_unchecked(&rdev->read_errors); + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) { char b[BDEVNAME_SIZE]; bdevname(rdev->bdev, b); @@ -2263,7 +2263,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 "md/raid10:%s: %s: Raid device exceeded " "read_error threshold [cur %d:max %d]\n", mdname(mddev), b, - atomic_read(&rdev->read_errors), max_read_errors); + atomic_read_unchecked(&rdev->read_errors), max_read_errors); printk(KERN_NOTICE "md/raid10:%s: %s: Failing raid device\n", mdname(mddev), b); @@ -2420,7 +2420,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 sect + choose_data_offset(r10_bio, rdev)), bdevname(rdev->bdev, b)); - atomic_add(s, &rdev->corrected_errors); + atomic_add_unchecked(s, &rdev->corrected_errors); } rdev_dec_pending(rdev, mddev); @@ -3191,6 +3191,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, } else { /* resync. Schedule a read for every block at this virt offset */ int count = 0; + sector_t sectors; bitmap_cond_end_sync(mddev->bitmap, sector_nr, 0); @@ -3216,7 +3217,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, r10_bio->sector = sector_nr; set_bit(R10BIO_IsSync, &r10_bio->state); raid10_find_phys(conf, r10_bio); - r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1; + sectors = (sector_nr | chunk_mask) - sector_nr + 1; + r10_bio->sectors = sectors; for (i = 0; i < conf->copies; i++) { int d = r10_bio->devs[i].devnum; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index cce6057b9..2c080d8b8 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1120,23 +1120,23 @@ async_copy_data(int frombio, struct bio *bio, struct page **page, struct bio_vec bvl; struct bvec_iter iter; struct page *bio_page; - int page_offset; + s64 page_offset; struct async_submit_ctl submit; enum async_tx_flags flags = 0; if (bio->bi_iter.bi_sector >= sector) - page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512; + page_offset = (s64)(bio->bi_iter.bi_sector - sector) * 512; else - page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512; + page_offset = (s64)(sector - bio->bi_iter.bi_sector) * -512; if (frombio) flags |= ASYNC_TX_FENCE; init_async_submit(&submit, flags, tx, NULL, NULL, NULL); bio_for_each_segment(bvl, bio, iter) { - int len = bvl.bv_len; - int clen; - int b_offset = 0; + s64 len = bvl.bv_len; + s64 clen; + s64 b_offset = 0; if (page_offset < 0) { b_offset = -page_offset; @@ -2040,6 +2040,10 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) return 1; } +#ifdef CONFIG_GRKERNSEC_HIDESYM +static atomic_unchecked_t raid5_cache_id = ATOMIC_INIT(0); +#endif + static int grow_stripes(struct r5conf *conf, int num) { struct kmem_cache *sc; @@ -2050,7 +2054,11 @@ static int grow_stripes(struct r5conf *conf, int num) "raid%d-%s", conf->level, mdname(conf->mddev)); else sprintf(conf->cache_name[0], +#ifdef CONFIG_GRKERNSEC_HIDESYM + "raid%d-%08lx", conf->level, atomic_inc_return_unchecked(&raid5_cache_id)); +#else "raid%d-%p", conf->level, conf->mddev); +#endif sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]); conf->active_name = 0; @@ -2354,21 +2362,21 @@ static void raid5_end_read_request(struct bio * bi) mdname(conf->mddev), STRIPE_SECTORS, (unsigned long long)s, bdevname(rdev->bdev, b)); - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors); clear_bit(R5_ReadError, &sh->dev[i].flags); clear_bit(R5_ReWrite, &sh->dev[i].flags); } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); - if (atomic_read(&rdev->read_errors)) - atomic_set(&rdev->read_errors, 0); + if (atomic_read_unchecked(&rdev->read_errors)) + atomic_set_unchecked(&rdev->read_errors, 0); } else { const char *bdn = bdevname(rdev->bdev, b); int retry = 0; int set_bad = 0; clear_bit(R5_UPTODATE, &sh->dev[i].flags); - atomic_inc(&rdev->read_errors); + atomic_inc_unchecked(&rdev->read_errors); if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) printk_ratelimited( KERN_WARNING @@ -2396,7 +2404,7 @@ static void raid5_end_read_request(struct bio * bi) mdname(conf->mddev), (unsigned long long)s, bdn); - } else if (atomic_read(&rdev->read_errors) + } else if (atomic_read_unchecked(&rdev->read_errors) > conf->max_nr_stripes) printk(KERN_WARNING "md/raid:%s: Too many read errors, failing device %s.\n", @@ -3763,7 +3771,7 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, */ set_bit(STRIPE_INSYNC, &sh->state); else { - atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); + atomic64_add_unchecked(STRIPE_SECTORS, &conf->mddev->resync_mismatches); if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) /* don't try to repair!! */ set_bit(STRIPE_INSYNC, &sh->state); @@ -3915,7 +3923,7 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, */ } } else { - atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); + atomic64_add_unchecked(STRIPE_SECTORS, &conf->mddev->resync_mismatches); if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) /* don't try to repair!! */ set_bit(STRIPE_INSYNC, &sh->state); diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c index 9914f69a4..177e48b7b 100644 --- a/drivers/media/dvb-core/dvb_net.c +++ b/drivers/media/dvb-core/dvb_net.c @@ -882,7 +882,7 @@ static int dvb_net_sec_callback(const u8 *buffer1, size_t buffer1_len, return 0; } -static int dvb_net_tx(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t dvb_net_tx(struct sk_buff *skb, struct net_device *dev) { dev_kfree_skb(skb); return NETDEV_TX_OK; diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c index 75a3f4b57..06b70a336 100644 --- a/drivers/media/dvb-core/dvbdev.c +++ b/drivers/media/dvb-core/dvbdev.c @@ -428,7 +428,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, int demux_sink_pads) { struct dvb_device *dvbdev; - struct file_operations *dvbdevfops; + file_operations_no_const *dvbdevfops; struct device *clsdev; int minor; int id, ret; diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h index 5b83e4f96..e93b22382 100644 --- a/drivers/media/dvb-frontends/af9033.h +++ b/drivers/media/dvb-frontends/af9033.h @@ -94,6 +94,6 @@ struct af9033_ops { int (*pid_filter_ctrl)(struct dvb_frontend *fe, int onoff); int (*pid_filter)(struct dvb_frontend *fe, int index, u16 pid, int onoff); -}; +} __no_const; #endif /* AF9033_H */ diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c index 5b461bb57..48d570895 100644 --- a/drivers/media/dvb-frontends/cx24116.c +++ b/drivers/media/dvb-frontends/cx24116.c @@ -1462,7 +1462,7 @@ static int cx24116_tune(struct dvb_frontend *fe, bool re_tune, return cx24116_read_status(fe, status); } -static int cx24116_get_algo(struct dvb_frontend *fe) +static enum dvbfe_algo cx24116_get_algo(struct dvb_frontend *fe) { return DVBFE_ALGO_HW; } diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c index 65a97c95b..6217471a8 100644 --- a/drivers/media/dvb-frontends/cx24117.c +++ b/drivers/media/dvb-frontends/cx24117.c @@ -1555,7 +1555,7 @@ static int cx24117_tune(struct dvb_frontend *fe, bool re_tune, return cx24117_read_status(fe, status); } -static int cx24117_get_algo(struct dvb_frontend *fe) +static enum dvbfe_algo cx24117_get_algo(struct dvb_frontend *fe) { return DVBFE_ALGO_HW; } diff --git a/drivers/media/dvb-frontends/cx24120.c b/drivers/media/dvb-frontends/cx24120.c index 216788ddb..378308706 100644 --- a/drivers/media/dvb-frontends/cx24120.c +++ b/drivers/media/dvb-frontends/cx24120.c @@ -1492,7 +1492,7 @@ static int cx24120_tune(struct dvb_frontend *fe, bool re_tune, return cx24120_read_status(fe, status); } -static int cx24120_get_algo(struct dvb_frontend *fe) +static enum dvbfe_algo cx24120_get_algo(struct dvb_frontend *fe) { return DVBFE_ALGO_HW; } diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c index 113b09494..c9424e6c2 100644 --- a/drivers/media/dvb-frontends/cx24123.c +++ b/drivers/media/dvb-frontends/cx24123.c @@ -1009,7 +1009,7 @@ static int cx24123_tune(struct dvb_frontend *fe, return retval; } -static int cx24123_get_algo(struct dvb_frontend *fe) +static enum dvbfe_algo cx24123_get_algo(struct dvb_frontend *fe) { return DVBFE_ALGO_HW; } diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c index 95267c6ed..479fdb51d 100644 --- a/drivers/media/dvb-frontends/cxd2820r_core.c +++ b/drivers/media/dvb-frontends/cxd2820r_core.c @@ -403,7 +403,7 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe) return DVBFE_ALGO_SEARCH_ERROR; } -static int cxd2820r_get_frontend_algo(struct dvb_frontend *fe) +static enum dvbfe_algo cxd2820r_get_frontend_algo(struct dvb_frontend *fe) { return DVBFE_ALGO_CUSTOM; } diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h index d5dfafb4e..b7ed9d9fb 100644 --- a/drivers/media/dvb-frontends/dib3000.h +++ b/drivers/media/dvb-frontends/dib3000.h @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff); int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff); int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl); -}; +} __no_const; #if IS_REACHABLE(CONFIG_DVB_DIB3000MB) extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config, diff --git a/drivers/media/dvb-frontends/dib7000p.h b/drivers/media/dvb-frontends/dib7000p.h index 205fbbff6..73562a9fb 100644 --- a/drivers/media/dvb-frontends/dib7000p.h +++ b/drivers/media/dvb-frontends/dib7000p.h @@ -62,7 +62,7 @@ struct dib7000p_ops { int (*get_adc_power)(struct dvb_frontend *fe); int (*slave_reset)(struct dvb_frontend *fe); struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg); -}; +} __no_const; #if IS_REACHABLE(CONFIG_DVB_DIB7000P) void *dib7000p_attach(struct dib7000p_ops *ops); diff --git a/drivers/media/dvb-frontends/dib8000.h b/drivers/media/dvb-frontends/dib8000.h index 2b8b4b165..8cef451b3 100644 --- a/drivers/media/dvb-frontends/dib8000.h +++ b/drivers/media/dvb-frontends/dib8000.h @@ -61,7 +61,7 @@ struct dib8000_ops { int (*pid_filter_ctrl)(struct dvb_frontend *fe, u8 onoff); int (*pid_filter)(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff); struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg); -}; +} __no_const; #if IS_REACHABLE(CONFIG_DVB_DIB8000) void *dib8000_attach(struct dib8000_ops *ops); diff --git a/drivers/media/dvb-frontends/hd29l2.c b/drivers/media/dvb-frontends/hd29l2.c index 1c7eb477e..c1cd6b869 100644 --- a/drivers/media/dvb-frontends/hd29l2.c +++ b/drivers/media/dvb-frontends/hd29l2.c @@ -555,7 +555,7 @@ static enum dvbfe_search hd29l2_search(struct dvb_frontend *fe) return DVBFE_ALGO_SEARCH_ERROR; } -static int hd29l2_get_frontend_algo(struct dvb_frontend *fe) +static enum dvbfe_algo hd29l2_get_frontend_algo(struct dvb_frontend *fe) { return DVBFE_ALGO_CUSTOM; } diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c index 0ca4e810e..fa7261112 100644 --- a/drivers/media/dvb-frontends/lgdt3306a.c +++ b/drivers/media/dvb-frontends/lgdt3306a.c @@ -1734,7 +1734,7 @@ static int lgdt3306a_get_tune_settings(struct dvb_frontend *fe, return 0; } -static int lgdt3306a_search(struct dvb_frontend *fe) +static enum dvbfe_search lgdt3306a_search(struct dvb_frontend *fe) { enum fe_status status = 0; int ret; diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c index fe79358b0..6b9c49979 100644 --- a/drivers/media/dvb-frontends/mb86a20s.c +++ b/drivers/media/dvb-frontends/mb86a20s.c @@ -2054,7 +2054,7 @@ static void mb86a20s_release(struct dvb_frontend *fe) kfree(state); } -static int mb86a20s_get_frontend_algo(struct dvb_frontend *fe) +static enum dvbfe_algo mb86a20s_get_frontend_algo(struct dvb_frontend *fe) { return DVBFE_ALGO_HW; } diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c index fc08429c9..c81669749 100644 --- a/drivers/media/dvb-frontends/mt312.c +++ b/drivers/media/dvb-frontends/mt312.c @@ -381,7 +381,7 @@ static int mt312_send_master_cmd(struct dvb_frontend *fe, } static int mt312_send_burst(struct dvb_frontend *fe, - const enum fe_sec_mini_cmd c) + enum fe_sec_mini_cmd c) { struct mt312_state *state = fe->demodulator_priv; const u8 mini_tab[2] = { 0x02, 0x03 }; @@ -405,7 +405,7 @@ static int mt312_send_burst(struct dvb_frontend *fe, } static int mt312_set_tone(struct dvb_frontend *fe, - const enum fe_sec_tone_mode t) + enum fe_sec_tone_mode t) { struct mt312_state *state = fe->demodulator_priv; const u8 tone_tab[2] = { 0x01, 0x00 }; @@ -429,7 +429,7 @@ static int mt312_set_tone(struct dvb_frontend *fe, } static int mt312_set_voltage(struct dvb_frontend *fe, - const enum fe_sec_voltage v) + enum fe_sec_voltage v) { struct mt312_state *state = fe->demodulator_priv; const u8 volt_tab[3] = { 0x00, 0x40, 0x00 }; diff --git a/drivers/media/dvb-frontends/s921.c b/drivers/media/dvb-frontends/s921.c index b5e3d90eb..bd00dc60e 100644 --- a/drivers/media/dvb-frontends/s921.c +++ b/drivers/media/dvb-frontends/s921.c @@ -464,7 +464,7 @@ static int s921_tune(struct dvb_frontend *fe, return rc; } -static int s921_get_algo(struct dvb_frontend *fe) +static enum dvbfe_algo s921_get_algo(struct dvb_frontend *fe) { return DVBFE_ALGO_HW; } diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c index 2783531f9..e80f3f41e 100644 --- a/drivers/media/media-device.c +++ b/drivers/media/media-device.c @@ -58,9 +58,10 @@ static int media_device_close(struct file *filp) return 0; } -static int media_device_get_info(struct media_device *dev, - struct media_device_info *info) +static long media_device_get_info(struct media_device *dev, + void *_info) { + struct media_device_info *info = _info; memset(info, 0, sizeof(*info)); if (dev->driver_name[0]) @@ -98,8 +99,9 @@ static struct media_entity *find_entity(struct media_device *mdev, u32 id) } static long media_device_enum_entities(struct media_device *mdev, - struct media_entity_desc *entd) + void *_entd) { + struct media_entity_desc *entd = _entd; struct media_entity *ent; ent = find_entity(mdev, entd->id); @@ -151,8 +153,9 @@ static void media_device_kpad_to_upad(const struct media_pad *kpad, } static long media_device_enum_links(struct media_device *mdev, - struct media_links_enum *links) + void *_links) { + struct media_links_enum *links = _links; struct media_entity *entity; entity = find_entity(mdev, links->entity); @@ -199,8 +202,9 @@ static long media_device_enum_links(struct media_device *mdev, } static long media_device_setup_link(struct media_device *mdev, - struct media_link_desc *linkd) + void *_linkd) { + struct media_link_desc *linkd = _linkd; struct media_link *link = NULL; struct media_entity *source; struct media_entity *sink; @@ -227,8 +231,9 @@ static long media_device_setup_link(struct media_device *mdev, } static long media_device_get_topology(struct media_device *mdev, - struct media_v2_topology *topo) + void *_topo) { + struct media_v2_topology *topo = _topo; struct media_entity *entity; struct media_interface *intf; struct media_pad *pad; @@ -387,7 +392,7 @@ static long copy_arg_to_user(void __user *uarg, void *karg, unsigned int cmd) #define MEDIA_IOC_ARG(__cmd, func, fl, from_user, to_user) \ [_IOC_NR(MEDIA_IOC_##__cmd)] = { \ .cmd = MEDIA_IOC_##__cmd, \ - .fn = (long (*)(struct media_device *, void *))func, \ + .fn = func, \ .flags = fl, \ .arg_from_user = from_user, \ .arg_to_user = to_user, \ diff --git a/drivers/media/pci/bt8xx/dst.c b/drivers/media/pci/bt8xx/dst.c index 35bc9b228..d5072b11b 100644 --- a/drivers/media/pci/bt8xx/dst.c +++ b/drivers/media/pci/bt8xx/dst.c @@ -1683,7 +1683,7 @@ static int dst_tune_frontend(struct dvb_frontend* fe, return 0; } -static int dst_get_tuning_algo(struct dvb_frontend *fe) +static enum dvbfe_algo dst_get_tuning_algo(struct dvb_frontend *fe) { return dst_algo ? DVBFE_ALGO_HW : DVBFE_ALGO_SW; } diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c index d83eb3b10..68382bf89 100644 --- a/drivers/media/pci/cx88/cx88-video.c +++ b/drivers/media/pci/cx88/cx88-video.c @@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION); /* ------------------------------------------------------------------ */ -static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET }; -static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET }; -static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET }; +static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET }; +static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET }; +static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET }; module_param_array(video_nr, int, NULL, 0444); module_param_array(vbi_nr, int, NULL, 0444); diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c index 22cfaff9d..8544beb73 100644 --- a/drivers/media/pci/ivtv/ivtv-driver.c +++ b/drivers/media/pci/ivtv/ivtv-driver.c @@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = { MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl); /* ivtv instance counter */ -static atomic_t ivtv_instance = ATOMIC_INIT(0); +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0); /* Parameter declarations */ static int cardtype[IVTV_MAX_CARDS]; diff --git a/drivers/media/pci/pt1/va1j5jf8007s.c b/drivers/media/pci/pt1/va1j5jf8007s.c index d0e70dc0e..e4fee6846 100644 --- a/drivers/media/pci/pt1/va1j5jf8007s.c +++ b/drivers/media/pci/pt1/va1j5jf8007s.c @@ -102,7 +102,7 @@ static int va1j5jf8007s_read_snr(struct dvb_frontend *fe, u16 *snr) return 0; } -static int va1j5jf8007s_get_frontend_algo(struct dvb_frontend *fe) +static enum dvbfe_algo va1j5jf8007s_get_frontend_algo(struct dvb_frontend *fe) { return DVBFE_ALGO_HW; } diff --git a/drivers/media/pci/pt1/va1j5jf8007t.c b/drivers/media/pci/pt1/va1j5jf8007t.c index 0268f20b8..de9dff74a 100644 --- a/drivers/media/pci/pt1/va1j5jf8007t.c +++ b/drivers/media/pci/pt1/va1j5jf8007t.c @@ -92,7 +92,7 @@ static int va1j5jf8007t_read_snr(struct dvb_frontend *fe, u16 *snr) return 0; } -static int va1j5jf8007t_get_frontend_algo(struct dvb_frontend *fe) +static enum dvbfe_algo va1j5jf8007t_get_frontend_algo(struct dvb_frontend *fe) { return DVBFE_ALGO_HW; } diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c index f50d07229..0214f25e1 100644 --- a/drivers/media/pci/solo6x10/solo6x10-core.c +++ b/drivers/media/pci/solo6x10/solo6x10-core.c @@ -411,7 +411,7 @@ static void solo_device_release(struct device *dev) static int solo_sysfs_init(struct solo_dev *solo_dev) { - struct bin_attribute *sdram_attr = &solo_dev->sdram_attr; + bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr; struct device *dev = &solo_dev->dev; const char *driver; int i; diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c index 6a35107ac..36e93540b 100644 --- a/drivers/media/pci/solo6x10/solo6x10-g723.c +++ b/drivers/media/pci/solo6x10/solo6x10-g723.c @@ -350,7 +350,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev) int solo_g723_init(struct solo_dev *solo_dev) { - static struct snd_device_ops ops = { NULL }; + static struct snd_device_ops ops = { }; struct snd_card *card; struct snd_kcontrol_new kctl; char name[32]; diff --git a/drivers/media/pci/solo6x10/solo6x10-p2m.c b/drivers/media/pci/solo6x10/solo6x10-p2m.c index 8c8484674..27b4f83d5 100644 --- a/drivers/media/pci/solo6x10/solo6x10-p2m.c +++ b/drivers/media/pci/solo6x10/solo6x10-p2m.c @@ -73,7 +73,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev, /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */ if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) { - p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M; + p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M; if (p2m_id < 0) p2m_id = -p2m_id; } diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h index 3f8da5e8c..b1de8beae 100644 --- a/drivers/media/pci/solo6x10/solo6x10.h +++ b/drivers/media/pci/solo6x10/solo6x10.h @@ -216,7 +216,7 @@ struct solo_dev { /* P2M DMA Engine */ struct solo_p2m_dev p2m_dev[SOLO_NR_P2M]; - atomic_t p2m_count; + atomic_unchecked_t p2m_count; int p2m_jiffies; unsigned int p2m_timeouts; diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c index aeb2b4e2d..53420d11e 100644 --- a/drivers/media/pci/sta2x11/sta2x11_vip.c +++ b/drivers/media/pci/sta2x11/sta2x11_vip.c @@ -775,8 +775,9 @@ static struct video_device video_dev_template = { * * IRQ_HANDLED, interrupt done. */ -static irqreturn_t vip_irq(int irq, struct sta2x11_vip *vip) +static irqreturn_t vip_irq(int irq, void *_vip) { + struct sta2x11_vip *vip = _vip; unsigned int status; status = reg_read(vip, DVP_ITS); @@ -1058,7 +1059,7 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev, spin_lock_init(&vip->slock); ret = request_irq(pdev->irq, - (irq_handler_t) vip_irq, + vip_irq, IRQF_SHARED, KBUILD_MODNAME, vip); if (ret) { dev_err(&pdev->dev, "request_irq failed\n"); diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c index 8474528be..6c4e44242 100644 --- a/drivers/media/pci/tw68/tw68-core.c +++ b/drivers/media/pci/tw68/tw68-core.c @@ -61,7 +61,7 @@ static unsigned int card[] = {[0 ... (TW68_MAXBOARDS - 1)] = UNSET }; module_param_array(card, int, NULL, 0444); MODULE_PARM_DESC(card, "card type"); -static atomic_t tw68_instance = ATOMIC_INIT(0); +static atomic_unchecked_t tw68_instance = ATOMIC_INIT(0); /* ------------------------------------------------------------------ */ diff --git a/drivers/media/pci/tw686x/tw686x-core.c b/drivers/media/pci/tw686x/tw686x-core.c index 71a0453b1..279d4478c 100644 --- a/drivers/media/pci/tw686x/tw686x-core.c +++ b/drivers/media/pci/tw686x/tw686x-core.c @@ -72,12 +72,12 @@ static const char *dma_mode_name(unsigned int mode) } } -static int tw686x_dma_mode_get(char *buffer, struct kernel_param *kp) +static int tw686x_dma_mode_get(char *buffer, const struct kernel_param *kp) { return sprintf(buffer, dma_mode_name(dma_mode)); } -static int tw686x_dma_mode_set(const char *val, struct kernel_param *kp) +static int tw686x_dma_mode_set(const char *val, const struct kernel_param *kp) { if (!strcasecmp(val, dma_mode_name(TW686X_DMA_MODE_MEMCPY))) dma_mode = TW686X_DMA_MODE_MEMCPY; diff --git a/drivers/media/pci/zoran/zoran.h b/drivers/media/pci/zoran/zoran.h index 4e7db8939..bd7ef95aa 100644 --- a/drivers/media/pci/zoran/zoran.h +++ b/drivers/media/pci/zoran/zoran.h @@ -178,7 +178,6 @@ struct zoran_fh; struct zoran_mapping { struct zoran_fh *fh; - atomic_t count; }; struct zoran_buffer { diff --git a/drivers/media/pci/zoran/zoran_card.c b/drivers/media/pci/zoran/zoran_card.c index 9d2697f5b..65fb18f61 100644 --- a/drivers/media/pci/zoran/zoran_card.c +++ b/drivers/media/pci/zoran/zoran_card.c @@ -1356,7 +1356,7 @@ static int zoran_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (zr->card.video_codec) { codec_name = codecid_to_modulename(zr->card.video_codec); if (codec_name) { - result = request_module(codec_name); + result = request_module("%s", codec_name); if (result) { dprintk(1, KERN_ERR @@ -1368,7 +1368,7 @@ static int zoran_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (zr->card.video_vfe) { vfe_name = codecid_to_modulename(zr->card.video_vfe); if (vfe_name) { - result = request_module(vfe_name); + result = request_module("%s", vfe_name); if (result < 0) { dprintk(1, KERN_ERR diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c index d6b631add..5479ecac8 100644 --- a/drivers/media/pci/zoran/zoran_driver.c +++ b/drivers/media/pci/zoran/zoran_driver.c @@ -2593,8 +2593,6 @@ zoran_poll (struct file *file, static void zoran_vm_open (struct vm_area_struct *vma) { - struct zoran_mapping *map = vma->vm_private_data; - atomic_inc(&map->count); } static void @@ -2722,7 +2720,6 @@ zoran_mmap (struct file *file, return res; } map->fh = fh; - atomic_set(&map->count, 1); vma->vm_ops = &zoran_vm_ops; vma->vm_flags |= VM_DONTEXPAND; diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c index b76c80bdf..4eb3be37b 100644 --- a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c +++ b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c @@ -665,10 +665,10 @@ static int h264_enc_deinit(unsigned long handle) } static const struct venc_common_if venc_h264_if = { - h264_enc_init, - h264_enc_encode, - h264_enc_set_param, - h264_enc_deinit, + .init = h264_enc_init, + .encode = h264_enc_encode, + .set_param = h264_enc_set_param, + .deinit = h264_enc_deinit, }; const struct venc_common_if *get_h264_enc_comm_if(void); diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c index 544f57186..a6fa145f2 100644 --- a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c +++ b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c @@ -470,10 +470,10 @@ static int vp8_enc_deinit(unsigned long handle) } static const struct venc_common_if venc_vp8_if = { - vp8_enc_init, - vp8_enc_encode, - vp8_enc_set_param, - vp8_enc_deinit, + .init = vp8_enc_init, + .encode = vp8_enc_encode, + .set_param = vp8_enc_set_param, + .deinit = vp8_enc_deinit, }; const struct venc_common_if *get_vp8_enc_comm_if(void); diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c index a31b95cb3..485cc47c9 100644 --- a/drivers/media/platform/omap/omap_vout.c +++ b/drivers/media/platform/omap/omap_vout.c @@ -63,7 +63,6 @@ enum omap_vout_channels { OMAP_VIDEO2, }; -static struct videobuf_queue_ops video_vbq_ops; /* Variables configurable through module params*/ static u32 video1_numbuffers = 3; static u32 video2_numbuffers = 3; @@ -1001,6 +1000,12 @@ static int omap_vout_open(struct file *file) { struct videobuf_queue *q; struct omap_vout_device *vout = NULL; + static struct videobuf_queue_ops video_vbq_ops = { + .buf_setup = omap_vout_buffer_setup, + .buf_prepare = omap_vout_buffer_prepare, + .buf_release = omap_vout_buffer_release, + .buf_queue = omap_vout_buffer_queue, + }; vout = video_drvdata(file); v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__); @@ -1018,10 +1023,6 @@ static int omap_vout_open(struct file *file) vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; q = &vout->vbq; - video_vbq_ops.buf_setup = omap_vout_buffer_setup; - video_vbq_ops.buf_prepare = omap_vout_buffer_prepare; - video_vbq_ops.buf_release = omap_vout_buffer_release; - video_vbq_ops.buf_queue = omap_vout_buffer_queue; spin_lock_init(&vout->vbq_lock); videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev, diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c index edd1c1de4..fbec29e8f 100644 --- a/drivers/media/platform/soc_camera/soc_camera.c +++ b/drivers/media/platform/soc_camera/soc_camera.c @@ -1723,7 +1723,7 @@ static int soc_camera_probe(struct soc_camera_host *ici, goto eadd; if (shd->module_name) - ret = request_module(shd->module_name); + ret = request_module("%s", shd->module_name); ret = shd->add_device(icd); if (ret < 0) diff --git a/drivers/media/platform/sti/c8sectpfe/Kconfig b/drivers/media/platform/sti/c8sectpfe/Kconfig index 7420a5057..e6f31a097 100644 --- a/drivers/media/platform/sti/c8sectpfe/Kconfig +++ b/drivers/media/platform/sti/c8sectpfe/Kconfig @@ -4,6 +4,7 @@ config DVB_C8SECTPFE depends on ARCH_STI || ARCH_MULTIPLATFORM || COMPILE_TEST select FW_LOADER select DEBUG_FS + depends on !GRKERNSEC_KMEM select DVB_LNBP21 if MEDIA_SUBDRV_AUTOSELECT select DVB_STV090x if MEDIA_SUBDRV_AUTOSELECT select DVB_STB6100 if MEDIA_SUBDRV_AUTOSELECT diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c index 82affaedf..42833ec00 100644 --- a/drivers/media/radio/radio-cadet.c +++ b/drivers/media/radio/radio-cadet.c @@ -333,6 +333,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo unsigned char readbuf[RDS_BUFFER]; int i = 0; + if (count > RDS_BUFFER) + return -EFAULT; mutex_lock(&dev->lock); if (dev->rdsstat == 0) cadet_start_rds(dev); @@ -349,8 +351,9 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo readbuf[i++] = dev->rdsbuf[dev->rdsout++]; mutex_unlock(&dev->lock); - if (i && copy_to_user(data, readbuf, i)) - return -EFAULT; + if (i > sizeof(readbuf) || (i && copy_to_user(data, readbuf, i))) + i = -EFAULT; + return i; } diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c index 8253f79d5..ca5f5792f 100644 --- a/drivers/media/radio/radio-maxiradio.c +++ b/drivers/media/radio/radio-maxiradio.c @@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number"); /* TEA5757 pin mappings */ static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16; -static atomic_t maxiradio_instance = ATOMIC_INIT(0); +static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0); #define PCI_VENDOR_ID_GUILLEMOT 0x5046 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001 diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c index 85667a95f..ec4dc0a3a 100644 --- a/drivers/media/radio/radio-shark.c +++ b/drivers/media/radio/radio-shark.c @@ -79,7 +79,7 @@ struct shark_device { u32 last_val; }; -static atomic_t shark_instance = ATOMIC_INIT(0); +static atomic_unchecked_t shark_instance = ATOMIC_INIT(0); static void shark_write_val(struct snd_tea575x *tea, u32 val) { diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c index 0e65a85d5..3fa6f5c57 100644 --- a/drivers/media/radio/radio-shark2.c +++ b/drivers/media/radio/radio-shark2.c @@ -74,7 +74,7 @@ struct shark_device { u8 *transfer_buffer; }; -static atomic_t shark_instance = ATOMIC_INIT(0); +static atomic_unchecked_t shark_instance = ATOMIC_INIT(0); static int shark_write_reg(struct radio_tea5777 *tea, u64 reg) { diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c index 271f725b1..35e8c8f81 100644 --- a/drivers/media/radio/radio-si476x.c +++ b/drivers/media/radio/radio-si476x.c @@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev) struct si476x_radio *radio; struct v4l2_ctrl *ctrl; - static atomic_t instance = ATOMIC_INIT(0); + static atomic_unchecked_t instance = ATOMIC_INIT(0); radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL); if (!radio) diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c index 749f07910..9e4c89388 100644 --- a/drivers/media/radio/wl128x/fmdrv_common.c +++ b/drivers/media/radio/wl128x/fmdrv_common.c @@ -71,7 +71,7 @@ module_param(default_rds_buf, uint, 0444); MODULE_PARM_DESC(default_rds_buf, "RDS buffer entries"); /* Radio Nr */ -static u32 radio_nr = -1; +static int radio_nr = -1; module_param(radio_nr, int, 0444); MODULE_PARM_DESC(radio_nr, "Radio Nr"); diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c index ec3a84be0..ec707fd62 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c +++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c @@ -35,42 +35,57 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type) { - struct hexline hx; - u8 reset; + struct hexline *hx; + u8 *reset; int ret,pos=0; + reset = kmalloc(1, GFP_KERNEL); + if (reset == NULL) + return -ENOMEM; + + hx = kmalloc(sizeof(struct hexline), GFP_KERNEL); + if (hx == NULL) { + kfree(reset); + return -ENOMEM; + } + /* stop the CPU */ - reset = 1; - if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1) + reset[0] = 1; + if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1)) != 1) err("could not stop the USB controller CPU."); - while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) { - deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk); - ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len); + while ((ret = dvb_usb_get_hexline(fw,hx,&pos)) > 0) { + deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx->addr,hx->len,hx->chk); + ret = usb_cypress_writemem(udev,hx->addr,hx->data,hx->len); - if (ret != hx.len) { + if (ret != hx->len) { err("error while transferring firmware " "(transferred size: %d, block size: %d)", - ret,hx.len); + ret,hx->len); ret = -EINVAL; break; } } if (ret < 0) { err("firmware download failed at %d with %d",pos,ret); + kfree(reset); + kfree(hx); return ret; } if (ret == 0) { /* restart the CPU */ - reset = 0; - if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) { + reset[0] = 0; + if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1) != 1) { err("could not restart the USB controller CPU."); ret = -EINVAL; } } else ret = -EIO; + kfree(reset); + kfree(hx); + return ret; } EXPORT_SYMBOL(usb_cypress_load_firmware); diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c index d774f9374..8405f45f4 100644 --- a/drivers/media/usb/dvb-usb/technisat-usb2.c +++ b/drivers/media/usb/dvb-usb/technisat-usb2.c @@ -143,8 +143,10 @@ static int technisat_usb2_i2c_access(struct usb_device *udev, /* handle tuner-i2c-nak */ if (!(b[0] == I2C_STATUS_NAK && device_addr == 0x60 - /* && device_is_technisat_usb2 */)) + /* && device_is_technisat_usb2 */)) { + ret = -ENODEV; goto err; + } } deb_i2c("status: %d, ", b[0]); diff --git a/drivers/media/usb/pvrusb2/pvrusb2-context.c b/drivers/media/usb/pvrusb2/pvrusb2-context.c index c45f30715..7d7926180 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-context.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-context.c @@ -103,8 +103,10 @@ static void pvr2_context_destroy(struct pvr2_context *mp) } -static void pvr2_context_notify(struct pvr2_context *mp) +static void pvr2_context_notify(void *_mp) { + struct pvr2_context *mp = _mp; + pvr2_context_set_notify(mp,!0); } @@ -119,9 +121,7 @@ static void pvr2_context_check(struct pvr2_context *mp) pvr2_trace(PVR2_TRACE_CTXT, "pvr2_context %p (initialize)", mp); /* Finish hardware initialization */ - if (pvr2_hdw_initialize(mp->hdw, - (void (*)(void *))pvr2_context_notify, - mp)) { + if (pvr2_hdw_initialize(mp->hdw, pvr2_context_notify, mp)) { mp->video_stream.stream = pvr2_hdw_get_video_stream(mp->hdw); /* Trigger interface initialization. By doing this diff --git a/drivers/media/usb/pvrusb2/pvrusb2-dvb.c b/drivers/media/usb/pvrusb2/pvrusb2-dvb.c index 8c9579343..2309b9e06 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-dvb.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-dvb.c @@ -101,8 +101,10 @@ static int pvr2_dvb_feed_thread(void *data) return stat; } -static void pvr2_dvb_notify(struct pvr2_dvb_adapter *adap) +static void pvr2_dvb_notify(void *_adap) { + struct pvr2_dvb_adapter *adap = _adap; + wake_up(&adap->buffer_wait_data); } @@ -161,8 +163,7 @@ static int pvr2_dvb_stream_do_start(struct pvr2_dvb_adapter *adap) if (!(adap->buffer_storage[idx])) return -ENOMEM; } - pvr2_stream_set_callback(pvr->video_stream.stream, - (pvr2_stream_callback) pvr2_dvb_notify, adap); + pvr2_stream_set_callback(pvr->video_stream.stream, pvr2_dvb_notify, adap); ret = pvr2_stream_set_buffer_count(stream, PVR2_DVB_BUFFER_COUNT); if (ret < 0) return ret; diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c index d7c54e08d..54becbacc 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c @@ -2097,7 +2097,7 @@ static void pvr2_hdw_load_modules(struct pvr2_hdw *hdw) cm = &hdw->hdw_desc->client_modules; for (idx = 0; idx < cm->cnt; idx++) { - request_module(cm->lst[idx]); + request_module("%s", cm->lst[idx]); } ct = &hdw->hdw_desc->client_table; diff --git a/drivers/media/usb/pvrusb2/pvrusb2-std.c b/drivers/media/usb/pvrusb2/pvrusb2-std.c index 9a596a3a4..38de071f8 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-std.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-std.c @@ -216,7 +216,7 @@ unsigned int pvr2_std_id_to_str(char *bufPtr, unsigned int bufSize, bufSize -= c2; bufPtr += c2; c2 = scnprintf(bufPtr,bufSize, - ip->name); + "%s", ip->name); c1 += c2; bufSize -= c2; bufPtr += c2; diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c index 2cc4d2b6f..3a559c85b 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c @@ -1090,8 +1090,10 @@ static int pvr2_v4l2_open(struct file *file) } -static void pvr2_v4l2_notify(struct pvr2_v4l2_fh *fhp) +static void pvr2_v4l2_notify(void *_fhp) { + struct pvr2_v4l2_fh *fhp = _fhp; + wake_up(&fhp->wait_data); } @@ -1124,7 +1126,7 @@ static int pvr2_v4l2_iosetup(struct pvr2_v4l2_fh *fh) hdw = fh->channel.mc_head->hdw; sp = fh->pdi->stream->stream; - pvr2_stream_set_callback(sp,(pvr2_stream_callback)pvr2_v4l2_notify,fh); + pvr2_stream_set_callback(sp,pvr2_v4l2_notify,fh); pvr2_hdw_set_stream_type(hdw,fh->pdi->config); if ((ret = pvr2_hdw_set_streaming(hdw,!0)) < 0) return ret; return pvr2_ioread_set_enabled(fh->rhp,!0); diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index 302e284a9..93781d63d 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -2078,7 +2078,7 @@ static int uvc_reset_resume(struct usb_interface *intf) * Module parameters */ -static int uvc_clock_param_get(char *buffer, struct kernel_param *kp) +static int uvc_clock_param_get(char *buffer, const struct kernel_param *kp) { if (uvc_clock_param == CLOCK_MONOTONIC) return sprintf(buffer, "CLOCK_MONOTONIC"); @@ -2086,7 +2086,7 @@ static int uvc_clock_param_get(char *buffer, struct kernel_param *kp) return sprintf(buffer, "CLOCK_REALTIME"); } -static int uvc_clock_param_set(const char *val, struct kernel_param *kp) +static int uvc_clock_param_set(const char *val, const struct kernel_param *kp) { if (strncasecmp(val, "clock_", strlen("clock_")) == 0) val += strlen("clock_"); diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c index 57cfe26a3..b7ec2982e 100644 --- a/drivers/media/v4l2-core/v4l2-common.c +++ b/drivers/media/v4l2-core/v4l2-common.c @@ -268,7 +268,7 @@ struct v4l2_subdev *v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev, BUG_ON(!v4l2_dev); if (info->modalias[0]) - request_module(info->modalias); + request_module("%s", info->modalias); spi = spi_new_device(master, info); diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index bacecbd68..277d1f866 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -449,7 +449,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user * by passing a very big num_planes value */ uplane = compat_alloc_user_space(num_planes * sizeof(struct v4l2_plane)); - kp->m.planes = (__force struct v4l2_plane *)uplane; + kp->m.planes = (__force_kernel struct v4l2_plane *)uplane; while (--num_planes >= 0) { ret = get_v4l2_plane32(uplane, uplane32, kp->memory); @@ -519,7 +519,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user if (num_planes == 0) return 0; - uplane = (__force struct v4l2_plane __user *)kp->m.planes; + uplane = (struct v4l2_plane __force_user *)kp->m.planes; if (get_user(p, &up->m.planes)) return -EFAULT; uplane32 = compat_ptr(p); @@ -581,7 +581,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame get_user(kp->flags, &up->flags) || copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt))) return -EFAULT; - kp->base = (__force void *)compat_ptr(tmp); + kp->base = (__force_kernel void *)compat_ptr(tmp); return 0; } @@ -687,7 +687,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext n * sizeof(struct v4l2_ext_control32))) return -EFAULT; kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control)); - kp->controls = (__force struct v4l2_ext_control *)kcontrols; + kp->controls = (__force_kernel struct v4l2_ext_control *)kcontrols; while (--n >= 0) { u32 id; @@ -714,7 +714,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext { struct v4l2_ext_control32 __user *ucontrols; struct v4l2_ext_control __user *kcontrols = - (__force struct v4l2_ext_control __user *)kp->controls; + (struct v4l2_ext_control __force_user *)kp->controls; int n = kp->count; compat_caddr_t p; @@ -799,7 +799,7 @@ static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up) get_user(tmp, &up->edid) || copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved))) return -EFAULT; - kp->edid = (__force u8 *)compat_ptr(tmp); + kp->edid = (__force_kernel u8 *)compat_ptr(tmp); return 0; } diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c index 62bbed76d..8bb0ce5f8 100644 --- a/drivers/media/v4l2-core/v4l2-device.c +++ b/drivers/media/v4l2-core/v4l2-device.c @@ -74,9 +74,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev) EXPORT_SYMBOL_GPL(v4l2_device_put); int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename, - atomic_t *instance) + atomic_unchecked_t *instance) { - int num = atomic_inc_return(instance) - 1; + int num = atomic_inc_return_unchecked(instance) - 1; int len = strlen(basename); if (basename[len - 1] >= '0' && basename[len - 1] <= '9') diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index c52d94c01..5edc58fbf 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -2451,49 +2451,216 @@ static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops, return -ENOTTY; } +static int v4l_vidioc_g_fbuf(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_g_fbuf(file, fh, arg); +} + +static int v4l_vidioc_s_fbuf(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_s_fbuf(file, fh, arg); +} + +static int v4l_vidioc_expbuf(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_expbuf(file, fh, arg); +} + +static int v4l_vidioc_g_std(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_g_std(file, fh, arg); +} + +static int v4l_vidioc_g_audio(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_g_audio(file, fh, arg); +} + +static int v4l_vidioc_s_audio(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_s_audio(file, fh, arg); +} + +static int v4l_vidioc_g_input(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_g_input(file, fh, arg); +} + +static int v4l_vidioc_g_edid(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_g_edid(file, fh, arg); +} + +static int v4l_vidioc_s_edid(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_s_edid(file, fh, arg); +} + +static int v4l_vidioc_g_output(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_g_output(file, fh, arg); +} + +static int v4l_vidioc_g_audout(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_g_audout(file, fh, arg); +} + +static int v4l_vidioc_s_audout(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_s_audout(file, fh, arg); +} + +static int v4l_vidioc_g_selection(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_g_selection(file, fh, arg); +} + +static int v4l_vidioc_s_selection(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_s_selection(file, fh, arg); +} + +static int v4l_vidioc_g_jpegcomp(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_g_jpegcomp(file, fh, arg); +} + +static int v4l_vidioc_s_jpegcomp(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_s_jpegcomp(file, fh, arg); +} + +static int v4l_vidioc_enumaudio(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_enumaudio(file, fh, arg); +} + +static int v4l_vidioc_enumaudout(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_enumaudout(file, fh, arg); +} + +static int v4l_vidioc_enum_framesizes(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_enum_framesizes(file, fh, arg); +} + +static int v4l_vidioc_enum_frameintervals(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_enum_frameintervals(file, fh, arg); +} + +static int v4l_vidioc_g_enc_index(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_g_enc_index(file, fh, arg); +} + +static int v4l_vidioc_encoder_cmd(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_encoder_cmd(file, fh, arg); +} + +static int v4l_vidioc_try_encoder_cmd(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_try_encoder_cmd(file, fh, arg); +} + +static int v4l_vidioc_decoder_cmd(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_decoder_cmd(file, fh, arg); +} + +static int v4l_vidioc_try_decoder_cmd(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_try_decoder_cmd(file, fh, arg); +} + +static int v4l_vidioc_s_dv_timings(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_s_dv_timings(file, fh, arg); +} + +static int v4l_vidioc_g_dv_timings(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_g_dv_timings(file, fh, arg); +} + +static int v4l_vidioc_enum_dv_timings(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_enum_dv_timings(file, fh, arg); +} + +static int v4l_vidioc_query_dv_timings(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_query_dv_timings(file, fh, arg); +} + +static int v4l_vidioc_dv_timings_cap(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + return ops->vidioc_dv_timings_cap(file, fh, arg); +} + struct v4l2_ioctl_info { unsigned int ioctl; u32 flags; const char * const name; - union { - u32 offset; - int (*func)(const struct v4l2_ioctl_ops *ops, - struct file *file, void *fh, void *p); - } u; + int (*func)(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *p); void (*debug)(const void *arg, bool write_only); -}; +} __do_const; +typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const; /* This control needs a priority check */ #define INFO_FL_PRIO (1 << 0) /* This control can be valid if the filehandle passes a control handler. */ #define INFO_FL_CTRL (1 << 1) -/* This is a standard ioctl, no need for special code */ -#define INFO_FL_STD (1 << 2) /* This is ioctl has its own function */ -#define INFO_FL_FUNC (1 << 3) +#define INFO_FL_FUNC (1 << 2) /* Queuing ioctl */ -#define INFO_FL_QUEUE (1 << 4) +#define INFO_FL_QUEUE (1 << 3) /* Zero struct from after the field to the end */ #define INFO_FL_CLEAR(v4l2_struct, field) \ ((offsetof(struct v4l2_struct, field) + \ sizeof(((struct v4l2_struct *)0)->field)) << 16) #define INFO_FL_CLEAR_MASK (_IOC_SIZEMASK << 16) -#define IOCTL_INFO_STD(_ioctl, _vidioc, _debug, _flags) \ - [_IOC_NR(_ioctl)] = { \ - .ioctl = _ioctl, \ - .flags = _flags | INFO_FL_STD, \ - .name = #_ioctl, \ - .u.offset = offsetof(struct v4l2_ioctl_ops, _vidioc), \ - .debug = _debug, \ - } - #define IOCTL_INFO_FNC(_ioctl, _func, _debug, _flags) \ [_IOC_NR(_ioctl)] = { \ .ioctl = _ioctl, \ .flags = _flags | INFO_FL_FUNC, \ .name = #_ioctl, \ - .u.func = _func, \ + .func = _func, \ .debug = _debug, \ } @@ -2504,17 +2671,17 @@ static struct v4l2_ioctl_info v4l2_ioctls[] = { IOCTL_INFO_FNC(VIDIOC_S_FMT, v4l_s_fmt, v4l_print_format, INFO_FL_PRIO), IOCTL_INFO_FNC(VIDIOC_REQBUFS, v4l_reqbufs, v4l_print_requestbuffers, INFO_FL_PRIO | INFO_FL_QUEUE), IOCTL_INFO_FNC(VIDIOC_QUERYBUF, v4l_querybuf, v4l_print_buffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_buffer, length)), - IOCTL_INFO_STD(VIDIOC_G_FBUF, vidioc_g_fbuf, v4l_print_framebuffer, 0), - IOCTL_INFO_STD(VIDIOC_S_FBUF, vidioc_s_fbuf, v4l_print_framebuffer, INFO_FL_PRIO), + IOCTL_INFO_FNC(VIDIOC_G_FBUF, v4l_vidioc_g_fbuf, v4l_print_framebuffer, 0), + IOCTL_INFO_FNC(VIDIOC_S_FBUF, v4l_vidioc_s_fbuf, v4l_print_framebuffer, INFO_FL_PRIO), IOCTL_INFO_FNC(VIDIOC_OVERLAY, v4l_overlay, v4l_print_u32, INFO_FL_PRIO), IOCTL_INFO_FNC(VIDIOC_QBUF, v4l_qbuf, v4l_print_buffer, INFO_FL_QUEUE), - IOCTL_INFO_STD(VIDIOC_EXPBUF, vidioc_expbuf, v4l_print_exportbuffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_exportbuffer, flags)), + IOCTL_INFO_FNC(VIDIOC_EXPBUF, v4l_vidioc_expbuf, v4l_print_exportbuffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_exportbuffer, flags)), IOCTL_INFO_FNC(VIDIOC_DQBUF, v4l_dqbuf, v4l_print_buffer, INFO_FL_QUEUE), IOCTL_INFO_FNC(VIDIOC_STREAMON, v4l_streamon, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE), IOCTL_INFO_FNC(VIDIOC_STREAMOFF, v4l_streamoff, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE), IOCTL_INFO_FNC(VIDIOC_G_PARM, v4l_g_parm, v4l_print_streamparm, INFO_FL_CLEAR(v4l2_streamparm, type)), IOCTL_INFO_FNC(VIDIOC_S_PARM, v4l_s_parm, v4l_print_streamparm, INFO_FL_PRIO), - IOCTL_INFO_STD(VIDIOC_G_STD, vidioc_g_std, v4l_print_std, 0), + IOCTL_INFO_FNC(VIDIOC_G_STD, v4l_vidioc_g_std, v4l_print_std, 0), IOCTL_INFO_FNC(VIDIOC_S_STD, v4l_s_std, v4l_print_std, INFO_FL_PRIO), IOCTL_INFO_FNC(VIDIOC_ENUMSTD, v4l_enumstd, v4l_print_standard, INFO_FL_CLEAR(v4l2_standard, index)), IOCTL_INFO_FNC(VIDIOC_ENUMINPUT, v4l_enuminput, v4l_print_enuminput, INFO_FL_CLEAR(v4l2_input, index)), @@ -2522,19 +2689,19 @@ static struct v4l2_ioctl_info v4l2_ioctls[] = { IOCTL_INFO_FNC(VIDIOC_S_CTRL, v4l_s_ctrl, v4l_print_control, INFO_FL_PRIO | INFO_FL_CTRL), IOCTL_INFO_FNC(VIDIOC_G_TUNER, v4l_g_tuner, v4l_print_tuner, INFO_FL_CLEAR(v4l2_tuner, index)), IOCTL_INFO_FNC(VIDIOC_S_TUNER, v4l_s_tuner, v4l_print_tuner, INFO_FL_PRIO), - IOCTL_INFO_STD(VIDIOC_G_AUDIO, vidioc_g_audio, v4l_print_audio, 0), - IOCTL_INFO_STD(VIDIOC_S_AUDIO, vidioc_s_audio, v4l_print_audio, INFO_FL_PRIO), + IOCTL_INFO_FNC(VIDIOC_G_AUDIO, v4l_vidioc_g_audio, v4l_print_audio, 0), + IOCTL_INFO_FNC(VIDIOC_S_AUDIO, v4l_vidioc_s_audio, v4l_print_audio, INFO_FL_PRIO), IOCTL_INFO_FNC(VIDIOC_QUERYCTRL, v4l_queryctrl, v4l_print_queryctrl, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_queryctrl, id)), IOCTL_INFO_FNC(VIDIOC_QUERYMENU, v4l_querymenu, v4l_print_querymenu, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_querymenu, index)), - IOCTL_INFO_STD(VIDIOC_G_INPUT, vidioc_g_input, v4l_print_u32, 0), + IOCTL_INFO_FNC(VIDIOC_G_INPUT, v4l_vidioc_g_input, v4l_print_u32, 0), IOCTL_INFO_FNC(VIDIOC_S_INPUT, v4l_s_input, v4l_print_u32, INFO_FL_PRIO), - IOCTL_INFO_STD(VIDIOC_G_EDID, vidioc_g_edid, v4l_print_edid, 0), - IOCTL_INFO_STD(VIDIOC_S_EDID, vidioc_s_edid, v4l_print_edid, INFO_FL_PRIO), - IOCTL_INFO_STD(VIDIOC_G_OUTPUT, vidioc_g_output, v4l_print_u32, 0), + IOCTL_INFO_FNC(VIDIOC_G_EDID, v4l_vidioc_g_edid, v4l_print_edid, 0), + IOCTL_INFO_FNC(VIDIOC_S_EDID, v4l_vidioc_s_edid, v4l_print_edid, INFO_FL_PRIO), + IOCTL_INFO_FNC(VIDIOC_G_OUTPUT, v4l_vidioc_g_output, v4l_print_u32, 0), IOCTL_INFO_FNC(VIDIOC_S_OUTPUT, v4l_s_output, v4l_print_u32, INFO_FL_PRIO), IOCTL_INFO_FNC(VIDIOC_ENUMOUTPUT, v4l_enumoutput, v4l_print_enumoutput, INFO_FL_CLEAR(v4l2_output, index)), - IOCTL_INFO_STD(VIDIOC_G_AUDOUT, vidioc_g_audout, v4l_print_audioout, 0), - IOCTL_INFO_STD(VIDIOC_S_AUDOUT, vidioc_s_audout, v4l_print_audioout, INFO_FL_PRIO), + IOCTL_INFO_FNC(VIDIOC_G_AUDOUT, v4l_vidioc_g_audout, v4l_print_audioout, 0), + IOCTL_INFO_FNC(VIDIOC_S_AUDOUT, v4l_vidioc_s_audout, v4l_print_audioout, INFO_FL_PRIO), IOCTL_INFO_FNC(VIDIOC_G_MODULATOR, v4l_g_modulator, v4l_print_modulator, INFO_FL_CLEAR(v4l2_modulator, index)), IOCTL_INFO_FNC(VIDIOC_S_MODULATOR, v4l_s_modulator, v4l_print_modulator, INFO_FL_PRIO), IOCTL_INFO_FNC(VIDIOC_G_FREQUENCY, v4l_g_frequency, v4l_print_frequency, INFO_FL_CLEAR(v4l2_frequency, tuner)), @@ -2542,14 +2709,14 @@ static struct v4l2_ioctl_info v4l2_ioctls[] = { IOCTL_INFO_FNC(VIDIOC_CROPCAP, v4l_cropcap, v4l_print_cropcap, INFO_FL_CLEAR(v4l2_cropcap, type)), IOCTL_INFO_FNC(VIDIOC_G_CROP, v4l_g_crop, v4l_print_crop, INFO_FL_CLEAR(v4l2_crop, type)), IOCTL_INFO_FNC(VIDIOC_S_CROP, v4l_s_crop, v4l_print_crop, INFO_FL_PRIO), - IOCTL_INFO_STD(VIDIOC_G_SELECTION, vidioc_g_selection, v4l_print_selection, INFO_FL_CLEAR(v4l2_selection, r)), - IOCTL_INFO_STD(VIDIOC_S_SELECTION, vidioc_s_selection, v4l_print_selection, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_selection, r)), - IOCTL_INFO_STD(VIDIOC_G_JPEGCOMP, vidioc_g_jpegcomp, v4l_print_jpegcompression, 0), - IOCTL_INFO_STD(VIDIOC_S_JPEGCOMP, vidioc_s_jpegcomp, v4l_print_jpegcompression, INFO_FL_PRIO), + IOCTL_INFO_FNC(VIDIOC_G_SELECTION, v4l_vidioc_g_selection, v4l_print_selection, INFO_FL_CLEAR(v4l2_selection, r)), + IOCTL_INFO_FNC(VIDIOC_S_SELECTION, v4l_vidioc_s_selection, v4l_print_selection, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_selection, r)), + IOCTL_INFO_FNC(VIDIOC_G_JPEGCOMP, v4l_vidioc_g_jpegcomp, v4l_print_jpegcompression, 0), + IOCTL_INFO_FNC(VIDIOC_S_JPEGCOMP, v4l_vidioc_s_jpegcomp, v4l_print_jpegcompression, INFO_FL_PRIO), IOCTL_INFO_FNC(VIDIOC_QUERYSTD, v4l_querystd, v4l_print_std, 0), IOCTL_INFO_FNC(VIDIOC_TRY_FMT, v4l_try_fmt, v4l_print_format, 0), - IOCTL_INFO_STD(VIDIOC_ENUMAUDIO, vidioc_enumaudio, v4l_print_audio, INFO_FL_CLEAR(v4l2_audio, index)), - IOCTL_INFO_STD(VIDIOC_ENUMAUDOUT, vidioc_enumaudout, v4l_print_audioout, INFO_FL_CLEAR(v4l2_audioout, index)), + IOCTL_INFO_FNC(VIDIOC_ENUMAUDIO, v4l_vidioc_enumaudio, v4l_print_audio, INFO_FL_CLEAR(v4l2_audio, index)), + IOCTL_INFO_FNC(VIDIOC_ENUMAUDOUT, v4l_vidioc_enumaudout, v4l_print_audioout, INFO_FL_CLEAR(v4l2_audioout, index)), IOCTL_INFO_FNC(VIDIOC_G_PRIORITY, v4l_g_priority, v4l_print_u32, 0), IOCTL_INFO_FNC(VIDIOC_S_PRIORITY, v4l_s_priority, v4l_print_u32, INFO_FL_PRIO), IOCTL_INFO_FNC(VIDIOC_G_SLICED_VBI_CAP, v4l_g_sliced_vbi_cap, v4l_print_sliced_vbi_cap, INFO_FL_CLEAR(v4l2_sliced_vbi_cap, type)), @@ -2557,26 +2724,26 @@ static struct v4l2_ioctl_info v4l2_ioctls[] = { IOCTL_INFO_FNC(VIDIOC_G_EXT_CTRLS, v4l_g_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL), IOCTL_INFO_FNC(VIDIOC_S_EXT_CTRLS, v4l_s_ext_ctrls, v4l_print_ext_controls, INFO_FL_PRIO | INFO_FL_CTRL), IOCTL_INFO_FNC(VIDIOC_TRY_EXT_CTRLS, v4l_try_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL), - IOCTL_INFO_STD(VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes, v4l_print_frmsizeenum, INFO_FL_CLEAR(v4l2_frmsizeenum, pixel_format)), - IOCTL_INFO_STD(VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals, v4l_print_frmivalenum, INFO_FL_CLEAR(v4l2_frmivalenum, height)), - IOCTL_INFO_STD(VIDIOC_G_ENC_INDEX, vidioc_g_enc_index, v4l_print_enc_idx, 0), - IOCTL_INFO_STD(VIDIOC_ENCODER_CMD, vidioc_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_encoder_cmd, flags)), - IOCTL_INFO_STD(VIDIOC_TRY_ENCODER_CMD, vidioc_try_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_CLEAR(v4l2_encoder_cmd, flags)), - IOCTL_INFO_STD(VIDIOC_DECODER_CMD, vidioc_decoder_cmd, v4l_print_decoder_cmd, INFO_FL_PRIO), - IOCTL_INFO_STD(VIDIOC_TRY_DECODER_CMD, vidioc_try_decoder_cmd, v4l_print_decoder_cmd, 0), + IOCTL_INFO_FNC(VIDIOC_ENUM_FRAMESIZES, v4l_vidioc_enum_framesizes, v4l_print_frmsizeenum, INFO_FL_CLEAR(v4l2_frmsizeenum, pixel_format)), + IOCTL_INFO_FNC(VIDIOC_ENUM_FRAMEINTERVALS, v4l_vidioc_enum_frameintervals, v4l_print_frmivalenum, INFO_FL_CLEAR(v4l2_frmivalenum, height)), + IOCTL_INFO_FNC(VIDIOC_G_ENC_INDEX, v4l_vidioc_g_enc_index, v4l_print_enc_idx, 0), + IOCTL_INFO_FNC(VIDIOC_ENCODER_CMD, v4l_vidioc_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_encoder_cmd, flags)), + IOCTL_INFO_FNC(VIDIOC_TRY_ENCODER_CMD, v4l_vidioc_try_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_CLEAR(v4l2_encoder_cmd, flags)), + IOCTL_INFO_FNC(VIDIOC_DECODER_CMD, v4l_vidioc_decoder_cmd, v4l_print_decoder_cmd, INFO_FL_PRIO), + IOCTL_INFO_FNC(VIDIOC_TRY_DECODER_CMD, v4l_vidioc_try_decoder_cmd, v4l_print_decoder_cmd, 0), IOCTL_INFO_FNC(VIDIOC_DBG_S_REGISTER, v4l_dbg_s_register, v4l_print_dbg_register, 0), IOCTL_INFO_FNC(VIDIOC_DBG_G_REGISTER, v4l_dbg_g_register, v4l_print_dbg_register, 0), IOCTL_INFO_FNC(VIDIOC_S_HW_FREQ_SEEK, v4l_s_hw_freq_seek, v4l_print_hw_freq_seek, INFO_FL_PRIO), - IOCTL_INFO_STD(VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings, v4l_print_dv_timings, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_dv_timings, bt.flags)), - IOCTL_INFO_STD(VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings, v4l_print_dv_timings, 0), + IOCTL_INFO_FNC(VIDIOC_S_DV_TIMINGS, v4l_vidioc_s_dv_timings, v4l_print_dv_timings, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_dv_timings, bt.flags)), + IOCTL_INFO_FNC(VIDIOC_G_DV_TIMINGS, v4l_vidioc_g_dv_timings, v4l_print_dv_timings, 0), IOCTL_INFO_FNC(VIDIOC_DQEVENT, v4l_dqevent, v4l_print_event, 0), IOCTL_INFO_FNC(VIDIOC_SUBSCRIBE_EVENT, v4l_subscribe_event, v4l_print_event_subscription, 0), IOCTL_INFO_FNC(VIDIOC_UNSUBSCRIBE_EVENT, v4l_unsubscribe_event, v4l_print_event_subscription, 0), IOCTL_INFO_FNC(VIDIOC_CREATE_BUFS, v4l_create_bufs, v4l_print_create_buffers, INFO_FL_PRIO | INFO_FL_QUEUE), IOCTL_INFO_FNC(VIDIOC_PREPARE_BUF, v4l_prepare_buf, v4l_print_buffer, INFO_FL_QUEUE), - IOCTL_INFO_STD(VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings, v4l_print_enum_dv_timings, INFO_FL_CLEAR(v4l2_enum_dv_timings, pad)), - IOCTL_INFO_STD(VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings, v4l_print_dv_timings, 0), - IOCTL_INFO_STD(VIDIOC_DV_TIMINGS_CAP, vidioc_dv_timings_cap, v4l_print_dv_timings_cap, INFO_FL_CLEAR(v4l2_dv_timings_cap, type)), + IOCTL_INFO_FNC(VIDIOC_ENUM_DV_TIMINGS, v4l_vidioc_enum_dv_timings, v4l_print_enum_dv_timings, INFO_FL_CLEAR(v4l2_enum_dv_timings, pad)), + IOCTL_INFO_FNC(VIDIOC_QUERY_DV_TIMINGS, v4l_vidioc_query_dv_timings, v4l_print_dv_timings, 0), + IOCTL_INFO_FNC(VIDIOC_DV_TIMINGS_CAP, v4l_vidioc_dv_timings_cap, v4l_print_dv_timings_cap, INFO_FL_CLEAR(v4l2_dv_timings_cap, type)), IOCTL_INFO_FNC(VIDIOC_ENUM_FREQ_BANDS, v4l_enum_freq_bands, v4l_print_freq_band, 0), IOCTL_INFO_FNC(VIDIOC_DBG_G_CHIP_INFO, v4l_dbg_g_chip_info, v4l_print_dbg_chip_info, INFO_FL_CLEAR(v4l2_dbg_chip_info, match)), IOCTL_INFO_FNC(VIDIOC_QUERY_EXT_CTRL, v4l_query_ext_ctrl, v4l_print_query_ext_ctrl, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_query_ext_ctrl, id)), @@ -2645,7 +2812,7 @@ static long __video_do_ioctl(struct file *file, struct video_device *vfd = video_devdata(file); const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops; bool write_only = false; - struct v4l2_ioctl_info default_info; + v4l2_ioctl_info_no_const default_info; const struct v4l2_ioctl_info *info; void *fh = file->private_data; struct v4l2_fh *vfh = NULL; @@ -2681,14 +2848,8 @@ static long __video_do_ioctl(struct file *file, } write_only = _IOC_DIR(cmd) == _IOC_WRITE; - if (info->flags & INFO_FL_STD) { - typedef int (*vidioc_op)(struct file *file, void *fh, void *p); - const void *p = vfd->ioctl_ops; - const vidioc_op *vidioc = p + info->u.offset; - - ret = (*vidioc)(file, fh, arg); - } else if (info->flags & INFO_FL_FUNC) { - ret = info->u.func(ops, file, fh, arg); + if (info->flags & INFO_FL_FUNC) { + ret = info->func(ops, file, fh, arg); } else if (!ops->vidioc_default) { ret = -ENOTTY; } else { @@ -2736,7 +2897,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size, ret = -EINVAL; break; } - *user_ptr = (void __user *)buf->m.planes; + *user_ptr = (void __force_user *)buf->m.planes; *kernel_ptr = (void **)&buf->m.planes; *array_size = sizeof(struct v4l2_plane) * buf->length; ret = 1; @@ -2753,7 +2914,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size, ret = -EINVAL; break; } - *user_ptr = (void __user *)edid->edid; + *user_ptr = (void __force_user *)edid->edid; *kernel_ptr = (void **)&edid->edid; *array_size = edid->blocks * 128; ret = 1; @@ -2771,7 +2932,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size, ret = -EINVAL; break; } - *user_ptr = (void __user *)ctrls->controls; + *user_ptr = (void __force_user *)ctrls->controls; *kernel_ptr = (void **)&ctrls->controls; *array_size = sizeof(struct v4l2_ext_control) * ctrls->count; @@ -2872,7 +3033,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg, } if (has_array_args) { - *kernel_ptr = (void __force *)user_ptr; + *kernel_ptr = (void __force_kernel *)user_ptr; if (copy_to_user(user_ptr, mbuf, array_size)) err = -EFAULT; goto out_array_args; diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index 5457c361a..478c99911 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c @@ -233,7 +233,7 @@ struct omap3_gpmc_regs { struct gpmc_device { struct device *dev; int irq; - struct irq_chip irq_chip; + struct irq_chip *irq_chip; struct gpio_chip gpio_chip; int nirqs; }; @@ -1254,10 +1254,10 @@ static int gpmc_irq_map(struct irq_domain *d, unsigned int virq, irq_set_chip_data(virq, gpmc); if (hw < GPMC_NR_NAND_IRQS) { irq_modify_status(virq, IRQ_NOREQUEST, IRQ_NOAUTOEN); - irq_set_chip_and_handler(virq, &gpmc->irq_chip, + irq_set_chip_and_handler(virq, gpmc->irq_chip, handle_simple_irq); } else { - irq_set_chip_and_handler(virq, &gpmc->irq_chip, + irq_set_chip_and_handler(virq, gpmc->irq_chip, handle_edge_irq); } @@ -1303,6 +1303,16 @@ static irqreturn_t gpmc_handle_irq(int irq, void *data) return IRQ_HANDLED; } +static struct irq_chip gpmc_irq_chip = { + .name = "gpmc", + .irq_enable = gpmc_irq_enable, + .irq_disable = gpmc_irq_disable, + .irq_ack = gpmc_irq_ack, + .irq_mask = gpmc_irq_mask, + .irq_unmask = gpmc_irq_unmask, + .irq_set_type = gpmc_irq_set_type, +}; + static int gpmc_setup_irq(struct gpmc_device *gpmc) { u32 regval; @@ -1315,13 +1325,7 @@ static int gpmc_setup_irq(struct gpmc_device *gpmc) regval = gpmc_read_reg(GPMC_IRQSTATUS); gpmc_write_reg(GPMC_IRQSTATUS, regval); - gpmc->irq_chip.name = "gpmc"; - gpmc->irq_chip.irq_enable = gpmc_irq_enable; - gpmc->irq_chip.irq_disable = gpmc_irq_disable; - gpmc->irq_chip.irq_ack = gpmc_irq_ack; - gpmc->irq_chip.irq_mask = gpmc_irq_mask; - gpmc->irq_chip.irq_unmask = gpmc_irq_unmask; - gpmc->irq_chip.irq_set_type = gpmc_irq_set_type; + gpmc->irq_chip = &gpmc_irq_chip; gpmc_irq_domain = irq_domain_add_linear(gpmc->dev->of_node, gpmc->nirqs, diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 89c7ed16b..f2ca71f56 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -99,7 +99,7 @@ module_param(mpt_channel_mapping, int, 0); MODULE_PARM_DESC(mpt_channel_mapping, " Mapping id's to channels (default=0)"); static int mpt_debug_level; -static int mpt_set_debug_level(const char *val, struct kernel_param *kp); +static int mpt_set_debug_level(const char *val, const struct kernel_param *kp); module_param_call(mpt_debug_level, mpt_set_debug_level, param_get_int, &mpt_debug_level, 0600); MODULE_PARM_DESC(mpt_debug_level, @@ -242,7 +242,7 @@ pci_enable_io_access(struct pci_dev *pdev) pci_write_config_word(pdev, PCI_COMMAND, command_reg); } -static int mpt_set_debug_level(const char *val, struct kernel_param *kp) +static int mpt_set_debug_level(const char *val, const struct kernel_param *kp) { int ret = param_set_int(val, kp); MPT_ADAPTER *ioc; @@ -6749,8 +6749,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v) seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth); seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize); +#ifdef CONFIG_GRKERNSEC_HIDESYM + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL); +#else seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma); +#endif + /* * Rounding UP to nearest 4-kB boundary here... */ @@ -6763,7 +6768,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v) ioc->facts.GlobalCredits); seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n", +#ifdef CONFIG_GRKERNSEC_HIDESYM + NULL, NULL); +#else (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma); +#endif sz = (ioc->reply_sz * ioc->reply_depth) + 128; seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n", ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz); diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c index 6955c9e22..03bc46612 100644 --- a/drivers/message/fusion/mptlan.c +++ b/drivers/message/fusion/mptlan.c @@ -680,7 +680,7 @@ mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -static int +static netdev_tx_t mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev) { struct mpt_lan_priv *priv = netdev_priv(dev); diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 7ee1667ac..c36740da9 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached) return 0; } +static inline void +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy) +{ + if (phy_info->port_details) { + phy_info->port_details->rphy = rphy; + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n", + ioc->name, rphy)); + } + + if (rphy) { + dsaswideprintk(ioc, dev_printk(KERN_DEBUG, + &rphy->dev, MYIOC_s_FMT "add:", ioc->name)); + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n", + ioc->name, rphy, rphy->dev.release)); + } +} + /* no mutex */ static void mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details) @@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info) return NULL; } -static inline void -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy) -{ - if (phy_info->port_details) { - phy_info->port_details->rphy = rphy; - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n", - ioc->name, rphy)); - } - - if (rphy) { - dsaswideprintk(ioc, dev_printk(KERN_DEBUG, - &rphy->dev, MYIOC_s_FMT "add:", ioc->name)); - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n", - ioc->name, rphy, rphy->dev.release)); - } -} - static inline struct sas_port * mptsas_get_port(struct mptsas_phyinfo *phy_info) { diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c index acf6c00b1..481c92975 100644 --- a/drivers/mfd/ab8500-debugfs.c +++ b/drivers/mfd/ab8500-debugfs.c @@ -100,7 +100,7 @@ static int irq_last; static u32 *irq_count; static int num_irqs; -static struct device_attribute **dev_attr; +static device_attribute_no_const **dev_attr; static char **event_name; static u8 avg_sample = SAMPLE_16; diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c index da5722d7c..d4050306f 100644 --- a/drivers/mfd/kempld-core.c +++ b/drivers/mfd/kempld-core.c @@ -494,7 +494,7 @@ static struct platform_driver kempld_driver = { .remove = kempld_remove, }; -static struct dmi_system_id kempld_dmi_table[] __initdata = { +static const struct dmi_system_id kempld_dmi_table[] __initconst = { { .ident = "BBL6", .matches = { diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c index 5c80aea32..100632361 100644 --- a/drivers/mfd/max8925-i2c.c +++ b/drivers/mfd/max8925-i2c.c @@ -151,7 +151,7 @@ static int max8925_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct max8925_platform_data *pdata = dev_get_platdata(&client->dev); - static struct max8925_chip *chip; + struct max8925_chip *chip; struct device_node *node = client->dev.of_node; if (node && !pdata) { diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c index ee94080e1..e2a4a3d82 100644 --- a/drivers/mfd/rn5t618.c +++ b/drivers/mfd/rn5t618.c @@ -52,7 +52,6 @@ static const struct regmap_config rn5t618_regmap_config = { }; static struct rn5t618 *rn5t618_pm_power_off; -static struct notifier_block rn5t618_restart_handler; static void rn5t618_trigger_poweroff_sequence(bool repower) { @@ -84,6 +83,12 @@ static int rn5t618_restart(struct notifier_block *this, return NOTIFY_DONE; } +static struct notifier_block rn5t618_restart_handler = { + .notifier_call = rn5t618_restart, + .priority = 192, + +}; + static const struct of_device_id rn5t618_of_match[] = { { .compatible = "ricoh,rn5t567", .data = (void *)RN5T567 }, { .compatible = "ricoh,rn5t618", .data = (void *)RN5T618 }, @@ -133,9 +138,6 @@ static int rn5t618_i2c_probe(struct i2c_client *i2c, dev_warn(&i2c->dev, "Poweroff callback already assigned\n"); } - rn5t618_restart_handler.notifier_call = rn5t618_restart; - rn5t618_restart_handler.priority = 192; - ret = register_restart_handler(&rn5t618_restart_handler); if (ret) { dev_err(&i2c->dev, "cannot register restart handler, %d\n", ret); diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c index 11cab1582..d144bd9ad 100644 --- a/drivers/mfd/tps65910.c +++ b/drivers/mfd/tps65910.c @@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq, struct tps65910_platform_data *pdata) { int ret = 0; - static struct regmap_irq_chip *tps6591x_irqs_chip; + struct regmap_irq_chip *tps6591x_irqs_chip; if (!irq) { dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n"); diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c index b46c0cfc2..89e322b43 100644 --- a/drivers/mfd/twl4030-irq.c +++ b/drivers/mfd/twl4030-irq.c @@ -34,6 +34,7 @@ #include #include #include +#include #include "twl-core.h" @@ -720,10 +721,12 @@ int twl4030_init_irq(struct device *dev, int irq_num) * Install an irq handler for each of the SIH modules; * clone dummy irq_chip since PIH can't *do* anything */ - twl4030_irq_chip = dummy_irq_chip; - twl4030_irq_chip.name = "twl4030"; + pax_open_kernel(); + memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip); + const_cast(twl4030_irq_chip.name) = "twl4030"; - twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack; + const_cast(twl4030_sih_irq_chip.irq_ack) = dummy_irq_chip.irq_ack; + pax_close_kernel(); for (i = irq_base; i < irq_end; i++) { irq_set_chip_and_handler(i, &twl4030_irq_chip, diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c index 1922cb8f6..e14fb42cf 100644 --- a/drivers/misc/c2port/core.c +++ b/drivers/misc/c2port/core.c @@ -918,7 +918,9 @@ struct c2port_device *c2port_device_register(char *name, goto error_idr_alloc; c2dev->id = ret; - bin_attr_flash_data.size = ops->blocks_num * ops->block_size; + pax_open_kernel(); + const_cast(bin_attr_flash_data.size) = ops->blocks_num * ops->block_size; + pax_close_kernel(); c2dev->dev = device_create(c2port_class, NULL, 0, c2dev, "c2port%d", c2dev->id); diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c index 99635dd9d..9718bf1c4 100644 --- a/drivers/misc/kgdbts.c +++ b/drivers/misc/kgdbts.c @@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early) char before[BREAK_INSTR_SIZE]; char after[BREAK_INSTR_SIZE]; - probe_kernel_read(before, (char *)kgdbts_break_test, + probe_kernel_read(before, (void *)ktla_ktva((unsigned long)kgdbts_break_test), BREAK_INSTR_SIZE); init_simple_test(); ts.tst = plant_and_detach_test; @@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early) /* Activate test with initial breakpoint */ if (!is_early) kgdb_breakpoint(); - probe_kernel_read(after, (char *)kgdbts_break_test, + probe_kernel_read(after, (void *)ktla_ktva((unsigned long)kgdbts_break_test), BREAK_INSTR_SIZE); if (memcmp(before, after, BREAK_INSTR_SIZE)) { printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n"); @@ -1130,7 +1130,7 @@ static void kgdbts_put_char(u8 chr) ts.run_test(0, chr); } -static int param_set_kgdbts_var(const char *kmessage, struct kernel_param *kp) +static int param_set_kgdbts_var(const char *kmessage, const struct kernel_param *kp) { int len = strlen(kmessage); @@ -1173,7 +1173,7 @@ static void kgdbts_post_exp_handler(void) module_put(THIS_MODULE); } -static struct kgdb_io kgdbts_io_ops = { +static struct kgdb_io kgdbts_io_ops __read_only = { .name = "kgdbts", .read_char = kgdbts_get_char, .write_char = kgdbts_put_char, diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c index fb8705fc3..dc2f679d0 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d.c +++ b/drivers/misc/lis3lv02d/lis3lv02d.c @@ -497,7 +497,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data) * the lid is closed. This leads to interrupts as soon as a little move * is done. */ - atomic_inc(&lis3->count); + atomic_inc_unchecked(&lis3->count); wake_up_interruptible(&lis3->misc_wait); kill_fasync(&lis3->async_queue, SIGIO, POLL_IN); @@ -583,7 +583,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file) if (lis3->pm_dev) pm_runtime_get_sync(lis3->pm_dev); - atomic_set(&lis3->count, 0); + atomic_set_unchecked(&lis3->count, 0); return 0; } @@ -615,7 +615,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf, add_wait_queue(&lis3->misc_wait, &wait); while (true) { set_current_state(TASK_INTERRUPTIBLE); - data = atomic_xchg(&lis3->count, 0); + data = atomic_xchg_unchecked(&lis3->count, 0); if (data) break; @@ -656,7 +656,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait) struct lis3lv02d, miscdev); poll_wait(file, &lis3->misc_wait, wait); - if (atomic_read(&lis3->count)) + if (atomic_read_unchecked(&lis3->count)) return POLLIN | POLLRDNORM; return 0; } diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h index c439c827e..1f20f5713 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d.h +++ b/drivers/misc/lis3lv02d/lis3lv02d.h @@ -297,7 +297,7 @@ struct lis3lv02d { struct input_polled_dev *idev; /* input device */ struct platform_device *pdev; /* platform device */ struct regulator_bulk_data regulators[2]; - atomic_t count; /* interrupt count after last read */ + atomic_unchecked_t count; /* interrupt count after last read */ union axis_conversion ac; /* hw -> logical axis */ int mapped_btns[3]; diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c index f9154b8d6..bb13e3e9c 100644 --- a/drivers/misc/lkdtm_core.c +++ b/drivers/misc/lkdtm_core.c @@ -78,7 +78,7 @@ static irqreturn_t jp_handle_irq_event(unsigned int irq, return 0; } -static void jp_tasklet_action(struct softirq_action *a) +static void jp_tasklet_action(void) { lkdtm_handler(); jprobe_return(); diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c index ddc9e4b08..9e27f4100 100644 --- a/drivers/misc/mic/scif/scif_api.c +++ b/drivers/misc/mic/scif/scif_api.c @@ -1486,10 +1486,12 @@ int scif_client_register(struct scif_client *client) { struct subsys_interface *si = &client->si; - si->name = client->name; - si->subsys = &scif_peer_bus; - si->add_dev = scif_add_client_dev; - si->remove_dev = scif_remove_client_dev; + pax_open_kernel(); + const_cast(si->name) = client->name; + const_cast(si->subsys) = &scif_peer_bus; + const_cast(si->add_dev) = scif_add_client_dev; + const_cast(si->remove_dev) = scif_remove_client_dev; + pax_close_kernel(); return subsys_interface_register(&client->si); } diff --git a/drivers/misc/mic/scif/scif_rb.c b/drivers/misc/mic/scif/scif_rb.c index 637cc4686..4fb1267a4 100644 --- a/drivers/misc/mic/scif/scif_rb.c +++ b/drivers/misc/mic/scif/scif_rb.c @@ -138,7 +138,7 @@ void scif_rb_commit(struct scif_rb *rb) * the read barrier in scif_rb_count(..) */ wmb(); - ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset; + ACCESS_ONCE_RW(*rb->write_ptr) = rb->current_write_offset; #ifdef CONFIG_INTEL_MIC_CARD /* * X100 Si bug: For the case where a Core is performing an EXT_WR @@ -147,7 +147,7 @@ void scif_rb_commit(struct scif_rb *rb) * This way, if ordering is violated for the Interrupt Message, it will * fall just behind the first Posted associated with the first EXT_WR. */ - ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset; + ACCESS_ONCE_RW(*rb->write_ptr) = rb->current_write_offset; #endif } @@ -210,7 +210,7 @@ void scif_rb_update_read_ptr(struct scif_rb *rb) * scif_rb_space(..) */ mb(); - ACCESS_ONCE(*rb->read_ptr) = new_offset; + ACCESS_ONCE_RW(*rb->read_ptr) = new_offset; #ifdef CONFIG_INTEL_MIC_CARD /* * X100 Si Bug: For the case where a Core is performing an EXT_WR @@ -219,7 +219,7 @@ void scif_rb_update_read_ptr(struct scif_rb *rb) * This way, if ordering is violated for the Interrupt Message, it will * fall just behind the first Posted associated with the first EXT_WR. */ - ACCESS_ONCE(*rb->read_ptr) = new_offset; + ACCESS_ONCE_RW(*rb->read_ptr) = new_offset; #endif } diff --git a/drivers/misc/panel.c b/drivers/misc/panel.c index 6030ac5b8..e498727e9 100644 --- a/drivers/misc/panel.c +++ b/drivers/misc/panel.c @@ -1983,7 +1983,7 @@ static void panel_process_inputs(void) } } -static void panel_scan_timer(void) +static void panel_scan_timer(unsigned long data) { if (keypad.enabled && keypad_initialized) { if (spin_trylock_irq(&pprt_lock)) { @@ -2019,7 +2019,7 @@ static void init_scan_timer(void) if (scan_timer.function) return; /* already started */ - setup_timer(&scan_timer, (void *)&panel_scan_timer, 0); + setup_timer(&scan_timer, &panel_scan_timer, 0); scan_timer.expires = jiffies + INPUT_POLL_TIME; add_timer(&scan_timer); } diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c index 1ee8e82ba..785f5280b 100644 --- a/drivers/misc/sgi-gru/gruhandles.c +++ b/drivers/misc/sgi-gru/gruhandles.c @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks) unsigned long nsec; nsec = CLKS2NSEC(clks); - atomic_long_inc(&mcs_op_statistics[op].count); - atomic_long_add(nsec, &mcs_op_statistics[op].total); + atomic_long_inc_unchecked(&mcs_op_statistics[op].count); + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total); if (mcs_op_statistics[op].max < nsec) mcs_op_statistics[op].max = nsec; } diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c index 4f7635922..cdfcb2e35 100644 --- a/drivers/misc/sgi-gru/gruprocfs.c +++ b/drivers/misc/sgi-gru/gruprocfs.c @@ -32,9 +32,9 @@ #define printstat(s, f) printstat_val(s, &gru_stats.f, #f) -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id) +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id) { - unsigned long val = atomic_long_read(v); + unsigned long val = atomic_long_read_unchecked(v); seq_printf(s, "%16lu %s\n", val, id); } @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p) seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks"); for (op = 0; op < mcsop_last; op++) { - count = atomic_long_read(&mcs_op_statistics[op].count); - total = atomic_long_read(&mcs_op_statistics[op].total); + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count); + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total); max = mcs_op_statistics[op].max; seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count, count ? total / count : 0, max); diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h index 5c3ce2459..4915ccbbe 100644 --- a/drivers/misc/sgi-gru/grutables.h +++ b/drivers/misc/sgi-gru/grutables.h @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids; * GRU statistics. */ struct gru_stats_s { - atomic_long_t vdata_alloc; - atomic_long_t vdata_free; - atomic_long_t gts_alloc; - atomic_long_t gts_free; - atomic_long_t gms_alloc; - atomic_long_t gms_free; - atomic_long_t gts_double_allocate; - atomic_long_t assign_context; - atomic_long_t assign_context_failed; - atomic_long_t free_context; - atomic_long_t load_user_context; - atomic_long_t load_kernel_context; - atomic_long_t lock_kernel_context; - atomic_long_t unlock_kernel_context; - atomic_long_t steal_user_context; - atomic_long_t steal_kernel_context; - atomic_long_t steal_context_failed; - atomic_long_t nopfn; - atomic_long_t asid_new; - atomic_long_t asid_next; - atomic_long_t asid_wrap; - atomic_long_t asid_reuse; - atomic_long_t intr; - atomic_long_t intr_cbr; - atomic_long_t intr_tfh; - atomic_long_t intr_spurious; - atomic_long_t intr_mm_lock_failed; - atomic_long_t call_os; - atomic_long_t call_os_wait_queue; - atomic_long_t user_flush_tlb; - atomic_long_t user_unload_context; - atomic_long_t user_exception; - atomic_long_t set_context_option; - atomic_long_t check_context_retarget_intr; - atomic_long_t check_context_unload; - atomic_long_t tlb_dropin; - atomic_long_t tlb_preload_page; - atomic_long_t tlb_dropin_fail_no_asid; - atomic_long_t tlb_dropin_fail_upm; - atomic_long_t tlb_dropin_fail_invalid; - atomic_long_t tlb_dropin_fail_range_active; - atomic_long_t tlb_dropin_fail_idle; - atomic_long_t tlb_dropin_fail_fmm; - atomic_long_t tlb_dropin_fail_no_exception; - atomic_long_t tfh_stale_on_fault; - atomic_long_t mmu_invalidate_range; - atomic_long_t mmu_invalidate_page; - atomic_long_t flush_tlb; - atomic_long_t flush_tlb_gru; - atomic_long_t flush_tlb_gru_tgh; - atomic_long_t flush_tlb_gru_zero_asid; - - atomic_long_t copy_gpa; - atomic_long_t read_gpa; - - atomic_long_t mesq_receive; - atomic_long_t mesq_receive_none; - atomic_long_t mesq_send; - atomic_long_t mesq_send_failed; - atomic_long_t mesq_noop; - atomic_long_t mesq_send_unexpected_error; - atomic_long_t mesq_send_lb_overflow; - atomic_long_t mesq_send_qlimit_reached; - atomic_long_t mesq_send_amo_nacked; - atomic_long_t mesq_send_put_nacked; - atomic_long_t mesq_page_overflow; - atomic_long_t mesq_qf_locked; - atomic_long_t mesq_qf_noop_not_full; - atomic_long_t mesq_qf_switch_head_failed; - atomic_long_t mesq_qf_unexpected_error; - atomic_long_t mesq_noop_unexpected_error; - atomic_long_t mesq_noop_lb_overflow; - atomic_long_t mesq_noop_qlimit_reached; - atomic_long_t mesq_noop_amo_nacked; - atomic_long_t mesq_noop_put_nacked; - atomic_long_t mesq_noop_page_overflow; + atomic_long_unchecked_t vdata_alloc; + atomic_long_unchecked_t vdata_free; + atomic_long_unchecked_t gts_alloc; + atomic_long_unchecked_t gts_free; + atomic_long_unchecked_t gms_alloc; + atomic_long_unchecked_t gms_free; + atomic_long_unchecked_t gts_double_allocate; + atomic_long_unchecked_t assign_context; + atomic_long_unchecked_t assign_context_failed; + atomic_long_unchecked_t free_context; + atomic_long_unchecked_t load_user_context; + atomic_long_unchecked_t load_kernel_context; + atomic_long_unchecked_t lock_kernel_context; + atomic_long_unchecked_t unlock_kernel_context; + atomic_long_unchecked_t steal_user_context; + atomic_long_unchecked_t steal_kernel_context; + atomic_long_unchecked_t steal_context_failed; + atomic_long_unchecked_t nopfn; + atomic_long_unchecked_t asid_new; + atomic_long_unchecked_t asid_next; + atomic_long_unchecked_t asid_wrap; + atomic_long_unchecked_t asid_reuse; + atomic_long_unchecked_t intr; + atomic_long_unchecked_t intr_cbr; + atomic_long_unchecked_t intr_tfh; + atomic_long_unchecked_t intr_spurious; + atomic_long_unchecked_t intr_mm_lock_failed; + atomic_long_unchecked_t call_os; + atomic_long_unchecked_t call_os_wait_queue; + atomic_long_unchecked_t user_flush_tlb; + atomic_long_unchecked_t user_unload_context; + atomic_long_unchecked_t user_exception; + atomic_long_unchecked_t set_context_option; + atomic_long_unchecked_t check_context_retarget_intr; + atomic_long_unchecked_t check_context_unload; + atomic_long_unchecked_t tlb_dropin; + atomic_long_unchecked_t tlb_preload_page; + atomic_long_unchecked_t tlb_dropin_fail_no_asid; + atomic_long_unchecked_t tlb_dropin_fail_upm; + atomic_long_unchecked_t tlb_dropin_fail_invalid; + atomic_long_unchecked_t tlb_dropin_fail_range_active; + atomic_long_unchecked_t tlb_dropin_fail_idle; + atomic_long_unchecked_t tlb_dropin_fail_fmm; + atomic_long_unchecked_t tlb_dropin_fail_no_exception; + atomic_long_unchecked_t tfh_stale_on_fault; + atomic_long_unchecked_t mmu_invalidate_range; + atomic_long_unchecked_t mmu_invalidate_page; + atomic_long_unchecked_t flush_tlb; + atomic_long_unchecked_t flush_tlb_gru; + atomic_long_unchecked_t flush_tlb_gru_tgh; + atomic_long_unchecked_t flush_tlb_gru_zero_asid; + + atomic_long_unchecked_t copy_gpa; + atomic_long_unchecked_t read_gpa; + + atomic_long_unchecked_t mesq_receive; + atomic_long_unchecked_t mesq_receive_none; + atomic_long_unchecked_t mesq_send; + atomic_long_unchecked_t mesq_send_failed; + atomic_long_unchecked_t mesq_noop; + atomic_long_unchecked_t mesq_send_unexpected_error; + atomic_long_unchecked_t mesq_send_lb_overflow; + atomic_long_unchecked_t mesq_send_qlimit_reached; + atomic_long_unchecked_t mesq_send_amo_nacked; + atomic_long_unchecked_t mesq_send_put_nacked; + atomic_long_unchecked_t mesq_page_overflow; + atomic_long_unchecked_t mesq_qf_locked; + atomic_long_unchecked_t mesq_qf_noop_not_full; + atomic_long_unchecked_t mesq_qf_switch_head_failed; + atomic_long_unchecked_t mesq_qf_unexpected_error; + atomic_long_unchecked_t mesq_noop_unexpected_error; + atomic_long_unchecked_t mesq_noop_lb_overflow; + atomic_long_unchecked_t mesq_noop_qlimit_reached; + atomic_long_unchecked_t mesq_noop_amo_nacked; + atomic_long_unchecked_t mesq_noop_put_nacked; + atomic_long_unchecked_t mesq_noop_page_overflow; }; @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync, tghop_invalidate, mcsop_last}; struct mcs_op_statistic { - atomic_long_t count; - atomic_long_t total; + atomic_long_unchecked_t count; + atomic_long_unchecked_t total; unsigned long max; }; @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last]; #define STAT(id) do { \ if (gru_options & OPT_STATS) \ - atomic_long_inc(&gru_stats.id); \ + atomic_long_inc_unchecked(&gru_stats.id); \ } while (0) #ifdef CONFIG_SGI_GRU_DEBUG diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h index c862cd458..0d176fed8 100644 --- a/drivers/misc/sgi-xp/xp.h +++ b/drivers/misc/sgi-xp/xp.h @@ -288,7 +288,7 @@ struct xpc_interface { xpc_notify_func, void *); void (*received) (short, int, void *); enum xp_retval (*partid_to_nasids) (short, void *); -}; +} __no_const; extern struct xpc_interface xpc_interface; diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c index 01be66d02..4a305b4de 100644 --- a/drivers/misc/sgi-xp/xp_main.c +++ b/drivers/misc/sgi-xp/xp_main.c @@ -71,20 +71,42 @@ EXPORT_SYMBOL_GPL(xpc_registrations); /* * Initialize the XPC interface to indicate that XPC isn't loaded. */ -static enum xp_retval -xpc_notloaded(void) +static void xpc_notloaded_connect(int ch_number) +{ +} + +static void xpc_notloaded_disconnect(int ch_number) +{ +} + +static enum xp_retval xpc_notloaded_send(short partid, int ch_number, u32 flags, void *payload, + u16 payload_size) +{ + return xpNotLoaded; +} + +static enum xp_retval xpc_notloaded_send_notify(short partid, int ch_number, u32 flags, void *payload, + u16 payload_size, xpc_notify_func func, void *key) +{ + return xpNotLoaded; +} + +static void xpc_notloaded_received(short partid, int ch_number, void *payload) +{ +} + +static enum xp_retval xpc_notloaded_partid_to_nasids(short partid, void *nasid_mask) { return xpNotLoaded; } struct xpc_interface xpc_interface = { - (void (*)(int))xpc_notloaded, - (void (*)(int))xpc_notloaded, - (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded, - (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func, - void *))xpc_notloaded, - (void (*)(short, int, void *))xpc_notloaded, - (enum xp_retval(*)(short, void *))xpc_notloaded + .connect = xpc_notloaded_connect, + .disconnect = xpc_notloaded_disconnect, + .send = xpc_notloaded_send, + .send_notify = xpc_notloaded_send_notify, + .received = xpc_notloaded_received, + .partid_to_nasids = xpc_notloaded_partid_to_nasids }; EXPORT_SYMBOL_GPL(xpc_interface); @@ -115,17 +137,12 @@ EXPORT_SYMBOL_GPL(xpc_set_interface); void xpc_clear_interface(void) { - xpc_interface.connect = (void (*)(int))xpc_notloaded; - xpc_interface.disconnect = (void (*)(int))xpc_notloaded; - xpc_interface.send = (enum xp_retval(*)(short, int, u32, void *, u16)) - xpc_notloaded; - xpc_interface.send_notify = (enum xp_retval(*)(short, int, u32, void *, - u16, xpc_notify_func, - void *))xpc_notloaded; - xpc_interface.received = (void (*)(short, int, void *)) - xpc_notloaded; - xpc_interface.partid_to_nasids = (enum xp_retval(*)(short, void *)) - xpc_notloaded; + xpc_interface.connect = xpc_notloaded_connect; + xpc_interface.disconnect = xpc_notloaded_disconnect; + xpc_interface.send = xpc_notloaded_send; + xpc_interface.send_notify = xpc_notloaded_send_notify; + xpc_interface.received = xpc_notloaded_received; + xpc_interface.partid_to_nasids = xpc_notloaded_partid_to_nasids; } EXPORT_SYMBOL_GPL(xpc_clear_interface); diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h index b94d5f767..7f494c516 100644 --- a/drivers/misc/sgi-xp/xpc.h +++ b/drivers/misc/sgi-xp/xpc.h @@ -835,6 +835,7 @@ struct xpc_arch_operations { void (*received_payload) (struct xpc_channel *, void *); void (*notify_senders_of_disconnect) (struct xpc_channel *); }; +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const; /* struct xpc_partition act_state values (for XPC HB) */ @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[]; /* found in xpc_main.c */ extern struct device *xpc_part; extern struct device *xpc_chan; -extern struct xpc_arch_operations xpc_arch_ops; +extern xpc_arch_operations_no_const xpc_arch_ops; extern int xpc_disengage_timelimit; extern int xpc_disengage_timedout; extern int xpc_activate_IRQ_rcvd; diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index 7f327121e..8539ab27e 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c @@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = { .notifier_call = xpc_system_die, }; -struct xpc_arch_operations xpc_arch_ops; +xpc_arch_operations_no_const xpc_arch_ops; /* * Timer function to enforce the timelimit on the partition disengage. diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c index 557f9782c..c8ce9fb11 100644 --- a/drivers/misc/sgi-xp/xpnet.c +++ b/drivers/misc/sgi-xp/xpnet.c @@ -421,7 +421,7 @@ xpnet_send(struct sk_buff *skb, struct xpnet_pending_msg *queued_msg, * destination partid. If the destination partid octets are 0xffff, * this packet is to be broadcast to all connected partitions. */ -static int +static netdev_tx_t xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct xpnet_pending_msg *queued_msg; diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c index 3c4399251..8d173bf18 100644 --- a/drivers/misc/ti-st/st_kim.c +++ b/drivers/misc/ti-st/st_kim.c @@ -581,9 +581,10 @@ static int show_list(struct seq_file *s, void *unused) return 0; } -static ssize_t show_install(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_install(struct kobject *_dev, + struct kobj_attribute *attr, char *buf) { + struct device *dev = (struct device *)_dev; struct kim_data_s *kim_data = dev_get_drvdata(dev); return sprintf(buf, "%d\n", kim_data->ldisc_install); } @@ -610,47 +611,50 @@ static ssize_t store_baud_rate(struct device *dev, } #endif /* if DEBUG */ -static ssize_t show_dev_name(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_dev_name(struct kobject *_dev, + struct kobj_attribute *attr, char *buf) { + struct device *dev = (struct device *)_dev; struct kim_data_s *kim_data = dev_get_drvdata(dev); return sprintf(buf, "%s\n", kim_data->dev_name); } -static ssize_t show_baud_rate(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_baud_rate(struct kobject *_dev, + struct kobj_attribute *attr, char *buf) { + struct device *dev = (struct device *)_dev; struct kim_data_s *kim_data = dev_get_drvdata(dev); return sprintf(buf, "%d\n", kim_data->baud_rate); } -static ssize_t show_flow_cntrl(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_flow_cntrl(struct kobject *_dev, + struct kobj_attribute *attr, char *buf) { + struct device *dev = (struct device *)_dev; struct kim_data_s *kim_data = dev_get_drvdata(dev); return sprintf(buf, "%d\n", kim_data->flow_cntrl); } /* structures specific for sysfs entries */ static struct kobj_attribute ldisc_install = -__ATTR(install, 0444, (void *)show_install, NULL); +__ATTR(install, 0444, show_install, NULL); static struct kobj_attribute uart_dev_name = #ifdef DEBUG /* TODO: move this to debug-fs if possible */ -__ATTR(dev_name, 0644, (void *)show_dev_name, (void *)store_dev_name); +__ATTR(dev_name, 0644, show_dev_name, store_dev_name); #else -__ATTR(dev_name, 0444, (void *)show_dev_name, NULL); +__ATTR(dev_name, 0444, show_dev_name, NULL); #endif static struct kobj_attribute uart_baud_rate = #ifdef DEBUG /* TODO: move to debugfs */ -__ATTR(baud_rate, 0644, (void *)show_baud_rate, (void *)store_baud_rate); +__ATTR(baud_rate, 0644, show_baud_rate, store_baud_rate); #else -__ATTR(baud_rate, 0444, (void *)show_baud_rate, NULL); +__ATTR(baud_rate, 0444, show_baud_rate, NULL); #endif static struct kobj_attribute uart_flow_cntrl = -__ATTR(flow_cntrl, 0444, (void *)show_flow_cntrl, NULL); +__ATTR(flow_cntrl, 0444, show_flow_cntrl, NULL); static struct attribute *uim_attrs[] = { &ldisc_install.attr, diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index df382be62..a16bfb7a0 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c @@ -2103,8 +2103,8 @@ static int mmc_test_rw_multiple_size(struct mmc_test_card *test, { int ret = 0; int i; - void *pre_req = test->card->host->ops->pre_req; - void *post_req = test->card->host->ops->post_req; + void (*pre_req)(struct mmc_host *, struct mmc_request *, bool) = test->card->host->ops->pre_req; + void (*post_req)(struct mmc_host *, struct mmc_request *, int) = test->card->host->ops->post_req; if (rw->do_nonblock_req && ((!pre_req && post_req) || (pre_req && !post_req))) { diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h index e8cd2dec3..c1640f654 100644 --- a/drivers/mmc/host/dw_mmc.h +++ b/drivers/mmc/host/dw_mmc.h @@ -298,5 +298,5 @@ struct dw_mci_drv_data { struct mmc_ios *ios); int (*switch_voltage)(struct mmc_host *mmc, struct mmc_ios *ios); -}; +} __do_const; #endif /* _DW_MMC_H_ */ diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index df990bb8c..e647253e9 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -1613,7 +1613,9 @@ static int mmci_probe(struct amba_device *dev, mmc->caps |= MMC_CAP_CMD23; if (variant->busy_detect) { - mmci_ops.card_busy = mmci_card_busy; + pax_open_kernel(); + const_cast(mmci_ops.card_busy) = mmci_card_busy; + pax_close_kernel(); mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE); mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; mmc->max_busy_timeout = 0; diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 5f2f24a73..e80f6f358 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -2076,7 +2076,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev) if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) { dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n"); - omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk; + pax_open_kernel(); + const_cast(omap_hsmmc_ops.multi_io_quirk) = omap_hsmmc_multi_io_quirk; + pax_close_kernel(); } device_init_wakeup(&pdev->dev, true); diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 7123ef96e..758d07728 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -1255,9 +1255,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) writel(0x0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS); } - if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) - sdhci_esdhc_ops.platform_execute_tuning = + if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) { + pax_open_kernel(); + const_cast(sdhci_esdhc_ops.platform_execute_tuning) = esdhc_executing_tuning; + pax_close_kernel(); + } if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536) host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 784c5a848..356732858 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -598,9 +598,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev) * we can use overriding functions instead of default. */ if (sc->no_divider) { - sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock; - sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock; - sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock; + pax_open_kernel(); + const_cast(sdhci_s3c_ops.set_clock) = sdhci_cmu_set_clock; + const_cast(sdhci_s3c_ops.get_min_clock) = sdhci_cmu_get_min_clock; + const_cast(sdhci_s3c_ops.get_max_clock) = sdhci_cmu_get_max_clock; + pax_close_kernel(); } /* It supports additional host capabilities if needed */ diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c index 700567603..f431ee396 100644 --- a/drivers/mmc/host/tmio_mmc_pio.c +++ b/drivers/mmc/host/tmio_mmc_pio.c @@ -1069,8 +1069,10 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host, goto host_free; } - tmio_mmc_ops.card_busy = _host->card_busy; - tmio_mmc_ops.start_signal_voltage_switch = _host->start_signal_voltage_switch; + pax_open_kernel(); + const_cast(tmio_mmc_ops.card_busy) = _host->card_busy; + const_cast(tmio_mmc_ops.start_signal_voltage_switch) = _host->start_signal_voltage_switch; + pax_close_kernel(); mmc->ops = &tmio_mmc_ops; mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities; diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c index 94d3eb42c..7d342965f 100644 --- a/drivers/mtd/chips/cfi_cmdset_0020.c +++ b/drivers/mtd/chips/cfi_cmdset_0020.c @@ -666,7 +666,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs, size_t totlen = 0, thislen; int ret = 0; size_t buflen = 0; - static char *buffer; + char *buffer; if (!ECCBUF_SIZE) { /* We should fall back to a general writev implementation. diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c index 7c887f111..62fd6905c 100644 --- a/drivers/mtd/devices/block2mtd.c +++ b/drivers/mtd/devices/block2mtd.c @@ -431,7 +431,7 @@ static int block2mtd_setup2(const char *val) } -static int block2mtd_setup(const char *val, struct kernel_param *kp) +static int block2mtd_setup(const char *val, const struct kernel_param *kp) { #ifdef MODULE return block2mtd_setup2(val); diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c index 8b66e52ca..7287696a2 100644 --- a/drivers/mtd/devices/phram.c +++ b/drivers/mtd/devices/phram.c @@ -266,7 +266,7 @@ static int phram_setup(const char *val) return ret; } -static int phram_param_call(const char *val, struct kernel_param *kp) +static int phram_param_call(const char *val, const struct kernel_param *kp) { #ifdef MODULE return phram_setup(val); diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c index 385305e66..8051e876d 100644 --- a/drivers/mtd/maps/gpio-addr-flash.c +++ b/drivers/mtd/maps/gpio-addr-flash.c @@ -128,7 +128,7 @@ static void gf_copy_from(struct map_info *map, void *to, unsigned long from, ssi * @map: MTD map state * @ofs: desired offset to write */ -static void gf_write(struct map_info *map, map_word d1, unsigned long ofs) +static void gf_write(struct map_info *map, const map_word d1, unsigned long ofs) { struct async_state *state = gf_map_info_to_state(map); uint16_t d; diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c index 6dc97aa66..c251b90ed 100644 --- a/drivers/mtd/maps/latch-addr-flash.c +++ b/drivers/mtd/maps/latch-addr-flash.c @@ -52,7 +52,7 @@ static map_word lf_read(struct map_info *map, unsigned long ofs) return datum; } -static void lf_write(struct map_info *map, map_word datum, unsigned long ofs) +static void lf_write(struct map_info *map, const map_word datum, unsigned long ofs) { struct latch_addr_flash_info *info; diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c index eb0242e0b..1a4c5b9dd 100644 --- a/drivers/mtd/maps/pci.c +++ b/drivers/mtd/maps/pci.c @@ -59,13 +59,13 @@ static void mtd_pci_copyfrom(struct map_info *_map, void *to, unsigned long from memcpy_fromio(to, map->base + map->translate(map, from), len); } -static void mtd_pci_write8(struct map_info *_map, map_word val, unsigned long ofs) +static void mtd_pci_write8(struct map_info *_map, const map_word val, unsigned long ofs) { struct map_pci_info *map = (struct map_pci_info *)_map; writeb(val.x[0], map->base + map->translate(map, ofs)); } -static void mtd_pci_write32(struct map_info *_map, map_word val, unsigned long ofs) +static void mtd_pci_write32(struct map_info *_map, const map_word val, unsigned long ofs) { struct map_pci_info *map = (struct map_pci_info *)_map; writel(val.x[0], map->base + map->translate(map, ofs)); diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c index 70bb403f6..3ae94c685 100644 --- a/drivers/mtd/maps/pcmciamtd.c +++ b/drivers/mtd/maps/pcmciamtd.c @@ -161,7 +161,7 @@ static void pcmcia_copy_from_remap(struct map_info *map, void *to, unsigned long } -static void pcmcia_write8_remap(struct map_info *map, map_word d, unsigned long adr) +static void pcmcia_write8_remap(struct map_info *map, const map_word d, unsigned long adr) { void __iomem *addr = remap_window(map, adr); @@ -173,7 +173,7 @@ static void pcmcia_write8_remap(struct map_info *map, map_word d, unsigned long } -static void pcmcia_write16_remap(struct map_info *map, map_word d, unsigned long adr) +static void pcmcia_write16_remap(struct map_info *map, const map_word d, unsigned long adr) { void __iomem *addr = remap_window(map, adr); if(!addr) @@ -256,7 +256,7 @@ static void pcmcia_copy_from(struct map_info *map, void *to, unsigned long from, } -static void pcmcia_write8(struct map_info *map, map_word d, unsigned long adr) +static void pcmcia_write8(struct map_info *map, const map_word d, unsigned long adr) { void __iomem *win_base = (void __iomem *)map->map_priv_2; @@ -269,7 +269,7 @@ static void pcmcia_write8(struct map_info *map, map_word d, unsigned long adr) } -static void pcmcia_write16(struct map_info *map, map_word d, unsigned long adr) +static void pcmcia_write16(struct map_info *map, const map_word d, unsigned long adr) { void __iomem *win_base = (void __iomem *)map->map_priv_2; diff --git a/drivers/mtd/maps/sbc_gxx.c b/drivers/mtd/maps/sbc_gxx.c index 556a2dfe9..e77132943 100644 --- a/drivers/mtd/maps/sbc_gxx.c +++ b/drivers/mtd/maps/sbc_gxx.c @@ -138,7 +138,7 @@ static void sbc_gxx_copy_from(struct map_info *map, void *to, unsigned long from } } -static void sbc_gxx_write8(struct map_info *map, map_word d, unsigned long adr) +static void sbc_gxx_write8(struct map_info *map, const map_word d, unsigned long adr) { spin_lock(&sbc_gxx_spin); sbc_gxx_page(map, adr); diff --git a/drivers/mtd/nand/brcmnand/bcm63138_nand.c b/drivers/mtd/nand/brcmnand/bcm63138_nand.c index 59444b3a6..b8fd6d529 100644 --- a/drivers/mtd/nand/brcmnand/bcm63138_nand.c +++ b/drivers/mtd/nand/brcmnand/bcm63138_nand.c @@ -81,8 +81,10 @@ static int bcm63138_nand_probe(struct platform_device *pdev) if (IS_ERR(priv->base)) return PTR_ERR(priv->base); + pax_open_kernel(); soc->ctlrdy_ack = bcm63138_nand_intc_ack; soc->ctlrdy_set_enabled = bcm63138_nand_intc_set; + pax_close_kernel(); return brcmnand_probe(pdev, soc); } diff --git a/drivers/mtd/nand/brcmnand/brcmnand.h b/drivers/mtd/nand/brcmnand/brcmnand.h index 5c44cd4ab..4b4ba7d3c 100644 --- a/drivers/mtd/nand/brcmnand/brcmnand.h +++ b/drivers/mtd/nand/brcmnand/brcmnand.h @@ -25,7 +25,7 @@ struct brcmnand_soc { void (*ctlrdy_set_enabled)(struct brcmnand_soc *soc, bool en); void (*prepare_data_bus)(struct brcmnand_soc *soc, bool prepare, bool is_param); -}; +} __no_const; static inline void brcmnand_soc_data_bus_prepare(struct brcmnand_soc *soc, bool is_param) diff --git a/drivers/mtd/nand/brcmnand/iproc_nand.c b/drivers/mtd/nand/brcmnand/iproc_nand.c index 4c6ae1136..6a09e5302 100644 --- a/drivers/mtd/nand/brcmnand/iproc_nand.c +++ b/drivers/mtd/nand/brcmnand/iproc_nand.c @@ -130,9 +130,11 @@ static int iproc_nand_probe(struct platform_device *pdev) if (IS_ERR(priv->ext_base)) return PTR_ERR(priv->ext_base); + pax_open_kernel(); soc->ctlrdy_ack = iproc_nand_intc_ack; soc->ctlrdy_set_enabled = iproc_nand_intc_set; soc->prepare_data_bus = iproc_nand_apb_access; + pax_close_kernel(); return brcmnand_probe(pdev, soc); } diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c index 0b0c93702..e3a9cca4b 100644 --- a/drivers/mtd/nand/cafe_nand.c +++ b/drivers/mtd/nand/cafe_nand.c @@ -345,7 +345,17 @@ static irqreturn_t cafe_nand_interrupt(int irq, void *id) return IRQ_HANDLED; } -static void cafe_nand_bug(struct mtd_info *mtd) +static void cafe_nand_bug_hwctl(struct mtd_info *mtd, int mode) +{ + BUG(); +} + +static int cafe_nand_bug_calculate(struct mtd_info *mtd, const uint8_t *dat, uint8_t *ecc_code) +{ + BUG(); +} + +static int cafe_nand_bug_correct(struct mtd_info *mtd, uint8_t *dat, uint8_t *read_ecc, uint8_t *calc_ecc) { BUG(); } @@ -780,9 +790,9 @@ static int cafe_nand_probe(struct pci_dev *pdev, cafe->nand.ecc.size = mtd->writesize; cafe->nand.ecc.bytes = 14; cafe->nand.ecc.strength = 4; - cafe->nand.ecc.hwctl = (void *)cafe_nand_bug; - cafe->nand.ecc.calculate = (void *)cafe_nand_bug; - cafe->nand.ecc.correct = (void *)cafe_nand_bug; + cafe->nand.ecc.hwctl = cafe_nand_bug_hwctl; + cafe->nand.ecc.calculate = cafe_nand_bug_calculate; + cafe->nand.ecc.correct = cafe_nand_bug_correct; cafe->nand.ecc.write_page = cafe_nand_write_page_lowlevel; cafe->nand.ecc.write_oob = cafe_nand_write_oob; cafe->nand.ecc.read_page = cafe_nand_read_page; diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c index 0476ae877..8d320ef65 100644 --- a/drivers/mtd/nand/denali.c +++ b/drivers/mtd/nand/denali.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "denali.h" diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c index 6c062b825..828bdc036 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c @@ -415,7 +415,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr) /* first try to map the upper buffer directly */ if (virt_addr_valid(this->upper_buf) && - !object_is_on_stack(this->upper_buf)) { + !object_starts_on_stack(this->upper_buf)) { sg_init_one(sgl, this->upper_buf, this->upper_len); ret = dma_map_sg(this->dev, sgl, 1, dr); if (ret == 0) diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c index a5dfbfbeb..8042ab463 100644 --- a/drivers/mtd/nftlmount.c +++ b/drivers/mtd/nftlmount.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c index 3692dd547..b731a9b44 100644 --- a/drivers/mtd/sm_ftl.c +++ b/drivers/mtd/sm_ftl.c @@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr, #define SM_CIS_VENDOR_OFFSET 0x59 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl) { - struct attribute_group *attr_group; + attribute_group_no_const *attr_group; struct attribute **attributes; struct sm_sysfs_attribute *vendor_attribute; char *vendor; diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 85d54f37e..e1da78f40 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -1389,7 +1389,7 @@ static int __init bytes_str_to_int(const char *str) * This function returns zero in case of success and a negative error code in * case of error. */ -static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp) +static int __init ubi_mtd_param_parse(const char *val, const struct kernel_param *kp) { int i, len; struct mtd_dev_param *p; diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index b8df0f5e8..0d64b6e69 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -666,7 +666,7 @@ static int bond_fill_info(struct sk_buff *skb, return -EMSGSIZE; } -struct rtnl_link_ops bond_link_ops __read_mostly = { +struct rtnl_link_ops bond_link_ops = { .kind = "bond", .priv_size = sizeof(struct bonding), .setup = bond_setup, diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c index ddabce759..6583c2948 100644 --- a/drivers/net/caif/caif_hsi.c +++ b/drivers/net/caif/caif_hsi.c @@ -1011,7 +1011,7 @@ static void cfhsi_aggregation_tout(unsigned long arg) cfhsi_start_tx(cfhsi); } -static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t cfhsi_xmit(struct sk_buff *skb, struct net_device *dev) { struct cfhsi *cfhsi = NULL; int start_xfer = 0; @@ -1441,7 +1441,7 @@ static int caif_hsi_newlink(struct net *src_net, struct net_device *dev, return -ENODEV; } -static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = { +static struct rtnl_link_ops caif_hsi_link_ops = { .kind = "cfhsi", .priv_size = sizeof(struct cfhsi), .setup = cfhsi_setup, diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c index c2dea4916..4bf83b56c 100644 --- a/drivers/net/caif/caif_serial.c +++ b/drivers/net/caif/caif_serial.c @@ -277,7 +277,7 @@ static int handle_tx(struct ser_device *ser) return tty_wr; } -static int caif_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t caif_xmit(struct sk_buff *skb, struct net_device *dev) { struct ser_device *ser; diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c index 3a529fbe5..c55ad5e10 100644 --- a/drivers/net/caif/caif_spi.c +++ b/drivers/net/caif/caif_spi.c @@ -486,7 +486,7 @@ static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc) complete(&cfspi->comp); } -static int cfspi_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t cfspi_xmit(struct sk_buff *skb, struct net_device *dev) { struct cfspi *cfspi = NULL; unsigned long flags; diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c index b306210b0..c5345de11 100644 --- a/drivers/net/caif/caif_virtio.c +++ b/drivers/net/caif/caif_virtio.c @@ -519,7 +519,7 @@ static struct buf_info *cfv_alloc_and_copy_to_shm(struct cfv_info *cfv, } /* Put the CAIF packet on the virtio ring and kick the receiver */ -static int cfv_netdev_tx(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t cfv_netdev_tx(struct sk_buff *skb, struct net_device *netdev) { struct cfv_info *cfv = netdev_priv(netdev); struct buf_info *buf_info; diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 22570ea3a..c46237572 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -81,7 +81,7 @@ config CAN_BFIN config CAN_FLEXCAN tristate "Support for Freescale FLEXCAN based chips" - depends on ARM || PPC + depends on (ARM && CPU_LITTLE_ENDIAN) || PPC ---help--- Say Y here if you want to support for Freescale FlexCAN. diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c index 1deb8ff90..4e2b0c153 100644 --- a/drivers/net/can/bfin_can.c +++ b/drivers/net/can/bfin_can.c @@ -338,7 +338,7 @@ static int bfin_can_get_berr_counter(const struct net_device *dev, return 0; } -static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bfin_can_priv *priv = netdev_priv(dev); struct bfin_can_regs __iomem *reg = priv->membase; diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 8d6208c0b..7731e3c09 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -1053,7 +1053,7 @@ static void can_dellink(struct net_device *dev, struct list_head *head) return; } -static struct rtnl_link_ops can_link_ops __read_mostly = { +static struct rtnl_link_ops can_link_ops = { .kind = "can", .maxtype = IFLA_CAN_MAX, .policy = can_policy, diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 16f7cadda..e643cf45e 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -465,7 +465,7 @@ static int flexcan_get_berr_counter(const struct net_device *dev, return err; } -static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) { const struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->regs; diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index f13bb8d9b..26e4a4406 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -1684,7 +1684,7 @@ static int ican3_stop(struct net_device *ndev) return 0; } -static int ican3_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t ican3_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ican3_dev *mod = netdev_priv(ndev); struct can_frame *cf = (struct can_frame *)skb->data; diff --git a/drivers/net/can/led.c b/drivers/net/can/led.c index c1b667675..50a8a51f9 100644 --- a/drivers/net/can/led.c +++ b/drivers/net/can/led.c @@ -128,7 +128,7 @@ static int can_led_notifier(struct notifier_block *nb, unsigned long msg, } /* notifier block for netdevice event */ -static struct notifier_block can_netdev_notifier __read_mostly = { +static struct notifier_block can_netdev_notifier = { .notifier_call = can_led_notifier, }; diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c index 68ef0a4cd..9e4938b30 100644 --- a/drivers/net/can/sun4i_can.c +++ b/drivers/net/can/sun4i_can.c @@ -409,7 +409,7 @@ static int sun4ican_set_mode(struct net_device *dev, enum can_mode mode) * xx xx xx xx ff ll 00 11 22 33 44 55 66 77 * [ can_id ] [flags] [len] [can data (up to 8 bytes] */ -static int sun4ican_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t sun4ican_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct sun4ican_priv *priv = netdev_priv(dev); struct can_frame *cf = (struct can_frame *)skb->data; diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c index 674f36708..ec3a31f43 100644 --- a/drivers/net/can/vcan.c +++ b/drivers/net/can/vcan.c @@ -163,7 +163,7 @@ static void vcan_setup(struct net_device *dev) dev->destructor = free_netdev; } -static struct rtnl_link_ops vcan_link_ops __read_mostly = { +static struct rtnl_link_ops vcan_link_ops = { .kind = "vcan", .setup = vcan_setup, }; diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index c71a03593..08768cec4 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -386,7 +386,7 @@ static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode) * * Return: 0 on success and failure value on error */ -static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index 69fc8409a..77a32fc0f 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -167,7 +167,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[]) return 0; } -static struct rtnl_link_ops dummy_link_ops __read_mostly = { +static struct rtnl_link_ops dummy_link_ops = { .kind = DRV_NAME, .setup = dummy_setup, .validate = dummy_validate, diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index 39ca9350d..bd14a10c8 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -808,7 +808,7 @@ static int ax_probe(struct platform_device *pdev) struct ei_device *ei_local; struct ax_device *ax; struct resource *irq, *mem, *mem2; - unsigned long mem_size, mem2_size = 0; + resource_size_t mem_size, mem2_size = 0; int ret = 0; dev = ax__alloc_ei_netdev(sizeof(struct ax_device)); @@ -852,9 +852,11 @@ static int ax_probe(struct platform_device *pdev) if (ax->plat->reg_offsets) ei_local->reg_offset = ax->plat->reg_offsets; else { + resource_size_t _mem_size = mem_size; + _mem_size /= 0x18; ei_local->reg_offset = ax->reg_offsets; for (ret = 0; ret < 0x18; ret++) - ax->reg_offsets[ret] = (mem_size / 0x18) * ret; + ax->reg_offsets[ret] = _mem_size * ret; } if (!request_mem_region(mem->start, mem_size, pdev->name)) { diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c index 4ea717d68..549ae6911 100644 --- a/drivers/net/ethernet/8390/axnet_cs.c +++ b/drivers/net/ethernet/8390/axnet_cs.c @@ -96,7 +96,7 @@ static void get_8390_hdr(struct net_device *, static void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset); static void block_output(struct net_device *dev, int count, - const u_char *buf, const int start_page); + const u_char *buf, int start_page); static void axnet_detach(struct pcmcia_device *p_dev); @@ -667,7 +667,7 @@ static void block_input(struct net_device *dev, int count, /*====================================================================*/ static void block_output(struct net_device *dev, int count, - const u_char *buf, const int start_page) + const u_char *buf, int start_page) { unsigned int nic_base = dev->base_addr; diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c index 57e97910c..c93b6a03a 100644 --- a/drivers/net/ethernet/8390/ne2k-pci.c +++ b/drivers/net/ethernet/8390/ne2k-pci.c @@ -172,8 +172,8 @@ static void ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr * int ring_page); static void ne2k_pci_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset); -static void ne2k_pci_block_output(struct net_device *dev, const int count, - const unsigned char *buf, const int start_page); +static void ne2k_pci_block_output(struct net_device *dev, int count, + const unsigned char *buf, int start_page); static const struct ethtool_ops ne2k_pci_ethtool_ops; @@ -563,7 +563,7 @@ static void ne2k_pci_block_input(struct net_device *dev, int count, } static void ne2k_pci_block_output(struct net_device *dev, int count, - const unsigned char *buf, const int start_page) + const unsigned char *buf, int start_page) { long nic_base = NE_BASE; unsigned long dma_start; diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c index 2f79d29f1..ed5a64e05 100644 --- a/drivers/net/ethernet/8390/pcnet_cs.c +++ b/drivers/net/ethernet/8390/pcnet_cs.c @@ -1208,7 +1208,7 @@ static void dma_block_input(struct net_device *dev, int count, /*====================================================================*/ static void dma_block_output(struct net_device *dev, int count, - const u_char *buf, const int start_page) + const u_char *buf, int start_page) { unsigned int nic_base = dev->base_addr; struct pcnet_dev *info = PRIV(dev); @@ -1387,7 +1387,7 @@ static void shmem_block_input(struct net_device *dev, int count, /*====================================================================*/ static void shmem_block_output(struct net_device *dev, int count, - const u_char *buf, const int start_page) + const u_char *buf, int start_page) { void __iomem *shmem = ei_status.mem + (start_page << 8); shmem -= ei_status.tx_start_page << 8; diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index 00f9ee3fc..dfc62eade 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -1097,7 +1097,7 @@ static void tx_reclaim_skb_timeout(unsigned long lp) tx_reclaim_skb((struct bfin_mac_local *)lp); } -static int bfin_mac_hard_start_xmit(struct sk_buff *skb, +static netdev_tx_t bfin_mac_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bfin_mac_local *lp = netdev_priv(dev); diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 6ffdff68b..8b96f60a4 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -412,7 +412,7 @@ static void emac_timeout(struct net_device *dev) /* Hardware start transmission. * Send a packet to media from the upper layer. */ -static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct emac_board_info *db = netdev_priv(dev); unsigned long channel; diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index a0eee7218..9af876bbf 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -543,7 +543,7 @@ static irqreturn_t altera_isr(int irq, void *dev_id) * physically contiguous fragment starting at * skb->data, for length of skb_headlen(skb). */ -static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t tse_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct altera_tse_private *priv = netdev_priv(dev); unsigned int txsize = priv->tx_ring_size; @@ -1233,7 +1233,7 @@ static int tse_shutdown(struct net_device *dev) return 0; } -static struct net_device_ops altera_tse_netdev_ops = { +static net_device_ops_no_const altera_tse_netdev_ops __read_only = { .ndo_open = tse_open, .ndo_stop = tse_shutdown, .ndo_start_xmit = tse_start_xmit, @@ -1470,11 +1470,13 @@ static int altera_tse_probe(struct platform_device *pdev) ndev->netdev_ops = &altera_tse_netdev_ops; altera_tse_set_ethtool_ops(ndev); + pax_open_kernel(); altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode; if (priv->hash_filter) altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode_hashfilter; + pax_close_kernel(); /* Scatter/gather IO is not supported, * so it is turned off diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c index dc57f2759..4f8eae5e5 100644 --- a/drivers/net/ethernet/amd/7990.c +++ b/drivers/net/ethernet/amd/7990.c @@ -535,7 +535,7 @@ void lance_tx_timeout(struct net_device *dev) } EXPORT_SYMBOL_GPL(lance_tx_timeout); -int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) +netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_init_block *ib = lp->init_block; diff --git a/drivers/net/ethernet/amd/7990.h b/drivers/net/ethernet/amd/7990.h index e9e0be313..1b8e3af93 100644 --- a/drivers/net/ethernet/amd/7990.h +++ b/drivers/net/ethernet/amd/7990.h @@ -240,7 +240,7 @@ struct lance_private { /* Now the prototypes we export */ int lance_open(struct net_device *dev); int lance_close(struct net_device *dev); -int lance_start_xmit(struct sk_buff *skb, struct net_device *dev); +netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev); void lance_set_multicast(struct net_device *dev); void lance_tx_timeout(struct net_device *dev); #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index f92cc9715..e357b9b20 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -1690,8 +1690,9 @@ static int amd8111e_resume(struct pci_dev *pci_dev) return 0; } -static void amd8111e_config_ipg(struct net_device *dev) +static void amd8111e_config_ipg(unsigned long _dev) { + struct net_device *dev = (struct net_device *)_dev; struct amd8111e_priv *lp = netdev_priv(dev); struct ipg_info *ipg_data = &lp->ipg_data; void __iomem *mmio = lp->mmio; @@ -1904,7 +1905,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev, if(lp->options & OPTION_DYN_IPG_ENABLE){ init_timer(&lp->ipg_data.ipg_timer); lp->ipg_data.ipg_timer.data = (unsigned long) dev; - lp->ipg_data.ipg_timer.function = (void *)&amd8111e_config_ipg; + lp->ipg_data.ipg_timer.function = &amd8111e_config_ipg; lp->ipg_data.ipg_timer.expires = jiffies + IPG_CONVERGE_JIFFIES; lp->ipg_data.ipg = DEFAULT_IPG; diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c index d2bc8e5dc..2285a75d4 100644 --- a/drivers/net/ethernet/amd/atarilance.c +++ b/drivers/net/ethernet/amd/atarilance.c @@ -339,7 +339,7 @@ static unsigned long lance_probe1( struct net_device *dev, struct lance_addr *init_rec ); static int lance_open( struct net_device *dev ); static void lance_init_ring( struct net_device *dev ); -static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ); +static netdev_tx_t lance_start_xmit( struct sk_buff *skb, struct net_device *dev ); static irqreturn_t lance_interrupt( int irq, void *dev_id ); static int lance_rx( struct net_device *dev ); static int lance_close( struct net_device *dev ); @@ -770,7 +770,7 @@ static void lance_tx_timeout (struct net_device *dev) /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ -static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) +static netdev_tx_t lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) { struct lance_private *lp = netdev_priv(dev); struct lance_ioreg *IO = lp->iobase; diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c index b799c7ac8..58bd5b38b 100644 --- a/drivers/net/ethernet/amd/declance.c +++ b/drivers/net/ethernet/amd/declance.c @@ -893,7 +893,7 @@ static void lance_tx_timeout(struct net_device *dev) netif_wake_queue(dev); } -static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_regs *ll = lp->ll; diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index c22bf52d3..a83f5f63c 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -318,7 +318,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *); static void pcnet32_load_multicast(struct net_device *dev); static void pcnet32_set_multicast_list(struct net_device *); static int pcnet32_ioctl(struct net_device *, struct ifreq *, int); -static void pcnet32_watchdog(struct net_device *); +static void pcnet32_watchdog(unsigned long); static int mdio_read(struct net_device *dev, int phy_id, int reg_num); static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val); @@ -1915,7 +1915,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) init_timer(&lp->watchdog_timer); lp->watchdog_timer.data = (unsigned long)dev; - lp->watchdog_timer.function = (void *)&pcnet32_watchdog; + lp->watchdog_timer.function = &pcnet32_watchdog; /* The PCNET32-specific entries in the device structure. */ dev->netdev_ops = &pcnet32_netdev_ops; @@ -2837,8 +2837,9 @@ static void pcnet32_check_media(struct net_device *dev, int verbose) * Could possibly be changed to use mii_check_media instead. */ -static void pcnet32_watchdog(struct net_device *dev) +static void pcnet32_watchdog(unsigned long _dev) { + struct net_device *dev = (struct net_device *)_dev; struct pcnet32_private *lp = netdev_priv(dev); unsigned long flags; diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c index 3d8c6b2cd..35160ad4e 100644 --- a/drivers/net/ethernet/amd/sun3lance.c +++ b/drivers/net/ethernet/amd/sun3lance.c @@ -235,7 +235,7 @@ struct lance_private { static int lance_probe( struct net_device *dev); static int lance_open( struct net_device *dev ); static void lance_init_ring( struct net_device *dev ); -static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ); +static netdev_tx_t lance_start_xmit( struct sk_buff *skb, struct net_device *dev ); static irqreturn_t lance_interrupt( int irq, void *dev_id); static int lance_rx( struct net_device *dev ); static int lance_close( struct net_device *dev ); @@ -511,7 +511,7 @@ static void lance_init_ring( struct net_device *dev ) } -static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) +static netdev_tx_t lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) { struct lance_private *lp = netdev_priv(dev); int entry, len; diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c index 9b56b4025..f183a4d4d 100644 --- a/drivers/net/ethernet/amd/sunlance.c +++ b/drivers/net/ethernet/amd/sunlance.c @@ -1106,7 +1106,7 @@ static void lance_tx_timeout(struct net_device *dev) netif_wake_queue(dev); } -static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); int entry, skblen, len; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index bbef95973..999ab1d1b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -1283,14 +1283,14 @@ do { \ * operations, everything works on mask values. */ #define XMDIO_READ(_pdata, _mmd, _reg) \ - ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \ + ((_pdata)->hw_if->read_mmd_regs((_pdata), 0, \ MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff))) #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \ (XMDIO_READ((_pdata), _mmd, _reg) & _mask) #define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \ - ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \ + ((_pdata)->hw_if->write_mmd_regs((_pdata), 0, \ MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val))) #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \ diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c index 895d35639..b1c866e60 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c @@ -202,7 +202,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev, pdata->num_tcs = max_tc + 1; memcpy(pdata->ets, ets, sizeof(*pdata->ets)); - pdata->hw_if.config_dcb_tc(pdata); + pdata->hw_if->config_dcb_tc(pdata); return 0; } @@ -249,7 +249,7 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev, memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc)); - pdata->hw_if.config_dcb_pfc(pdata); + pdata->hw_if->config_dcb_pfc(pdata); return 0; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c index b3bc87fe3..5bdfdd34a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c @@ -353,7 +353,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata, static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata) { - struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_hw_if *hw_if = pdata->hw_if; struct xgbe_channel *channel; struct xgbe_ring *ring; struct xgbe_ring_data *rdata; @@ -394,7 +394,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata) static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata) { - struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_hw_if *hw_if = pdata->hw_if; struct xgbe_channel *channel; struct xgbe_ring *ring; struct xgbe_ring_desc *rdesc; @@ -628,17 +628,12 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) return 0; } -void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if) -{ - DBGPR("-->xgbe_init_function_ptrs_desc\n"); - - desc_if->alloc_ring_resources = xgbe_alloc_ring_resources; - desc_if->free_ring_resources = xgbe_free_ring_resources; - desc_if->map_tx_skb = xgbe_map_tx_skb; - desc_if->map_rx_buffer = xgbe_map_rx_buffer; - desc_if->unmap_rdata = xgbe_unmap_rdata; - desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init; - desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init; - - DBGPR("<--xgbe_init_function_ptrs_desc\n"); -} +const struct xgbe_desc_if default_xgbe_desc_if = { + .alloc_ring_resources = xgbe_alloc_ring_resources, + .free_ring_resources = xgbe_free_ring_resources, + .map_tx_skb = xgbe_map_tx_skb, + .map_rx_buffer = xgbe_map_rx_buffer, + .unmap_rdata = xgbe_unmap_rdata, + .wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init, + .wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init, +}; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 1babcc11a..aa7f8f4ee 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -2816,7 +2816,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata) static int xgbe_init(struct xgbe_prv_data *pdata) { - struct xgbe_desc_if *desc_if = &pdata->desc_if; + struct xgbe_desc_if *desc_if = pdata->desc_if; int ret; DBGPR("-->xgbe_init\n"); @@ -2882,107 +2882,102 @@ static int xgbe_init(struct xgbe_prv_data *pdata) return 0; } -void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) -{ - DBGPR("-->xgbe_init_function_ptrs\n"); - - hw_if->tx_complete = xgbe_tx_complete; +const struct xgbe_hw_if default_xgbe_hw_if = { + .tx_complete = xgbe_tx_complete, - hw_if->set_mac_address = xgbe_set_mac_address; - hw_if->config_rx_mode = xgbe_config_rx_mode; + .set_mac_address = xgbe_set_mac_address, + .config_rx_mode = xgbe_config_rx_mode, - hw_if->enable_rx_csum = xgbe_enable_rx_csum; - hw_if->disable_rx_csum = xgbe_disable_rx_csum; + .enable_rx_csum = xgbe_enable_rx_csum, + .disable_rx_csum = xgbe_disable_rx_csum, - hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping; - hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping; - hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering; - hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering; - hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table; + .enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping, + .disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping, + .enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering, + .disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering, + .update_vlan_hash_table = xgbe_update_vlan_hash_table, - hw_if->read_mmd_regs = xgbe_read_mmd_regs; - hw_if->write_mmd_regs = xgbe_write_mmd_regs; + .read_mmd_regs = xgbe_read_mmd_regs, + .write_mmd_regs = xgbe_write_mmd_regs, - hw_if->set_gmii_speed = xgbe_set_gmii_speed; - hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed; - hw_if->set_xgmii_speed = xgbe_set_xgmii_speed; + .set_gmii_speed = xgbe_set_gmii_speed, + .set_gmii_2500_speed = xgbe_set_gmii_2500_speed, + .set_xgmii_speed = xgbe_set_xgmii_speed, - hw_if->enable_tx = xgbe_enable_tx; - hw_if->disable_tx = xgbe_disable_tx; - hw_if->enable_rx = xgbe_enable_rx; - hw_if->disable_rx = xgbe_disable_rx; + .enable_tx = xgbe_enable_tx, + .disable_tx = xgbe_disable_tx, + .enable_rx = xgbe_enable_rx, + .disable_rx = xgbe_disable_rx, - hw_if->powerup_tx = xgbe_powerup_tx; - hw_if->powerdown_tx = xgbe_powerdown_tx; - hw_if->powerup_rx = xgbe_powerup_rx; - hw_if->powerdown_rx = xgbe_powerdown_rx; + .powerup_tx = xgbe_powerup_tx, + .powerdown_tx = xgbe_powerdown_tx, + .powerup_rx = xgbe_powerup_rx, + .powerdown_rx = xgbe_powerdown_rx, - hw_if->dev_xmit = xgbe_dev_xmit; - hw_if->dev_read = xgbe_dev_read; - hw_if->enable_int = xgbe_enable_int; - hw_if->disable_int = xgbe_disable_int; - hw_if->init = xgbe_init; - hw_if->exit = xgbe_exit; + .dev_xmit = xgbe_dev_xmit, + .dev_read = xgbe_dev_read, + .enable_int = xgbe_enable_int, + .disable_int = xgbe_disable_int, + .init = xgbe_init, + .exit = xgbe_exit, /* Descriptor related Sequences have to be initialized here */ - hw_if->tx_desc_init = xgbe_tx_desc_init; - hw_if->rx_desc_init = xgbe_rx_desc_init; - hw_if->tx_desc_reset = xgbe_tx_desc_reset; - hw_if->rx_desc_reset = xgbe_rx_desc_reset; - hw_if->is_last_desc = xgbe_is_last_desc; - hw_if->is_context_desc = xgbe_is_context_desc; - hw_if->tx_start_xmit = xgbe_tx_start_xmit; + .tx_desc_init = xgbe_tx_desc_init, + .rx_desc_init = xgbe_rx_desc_init, + .tx_desc_reset = xgbe_tx_desc_reset, + .rx_desc_reset = xgbe_rx_desc_reset, + .is_last_desc = xgbe_is_last_desc, + .is_context_desc = xgbe_is_context_desc, + .tx_start_xmit = xgbe_tx_start_xmit, /* For FLOW ctrl */ - hw_if->config_tx_flow_control = xgbe_config_tx_flow_control; - hw_if->config_rx_flow_control = xgbe_config_rx_flow_control; + .config_tx_flow_control = xgbe_config_tx_flow_control, + .config_rx_flow_control = xgbe_config_rx_flow_control, /* For RX coalescing */ - hw_if->config_rx_coalesce = xgbe_config_rx_coalesce; - hw_if->config_tx_coalesce = xgbe_config_tx_coalesce; - hw_if->usec_to_riwt = xgbe_usec_to_riwt; - hw_if->riwt_to_usec = xgbe_riwt_to_usec; + .config_rx_coalesce = xgbe_config_rx_coalesce, + .config_tx_coalesce = xgbe_config_tx_coalesce, + .usec_to_riwt = xgbe_usec_to_riwt, + .riwt_to_usec = xgbe_riwt_to_usec, /* For RX and TX threshold config */ - hw_if->config_rx_threshold = xgbe_config_rx_threshold; - hw_if->config_tx_threshold = xgbe_config_tx_threshold; + .config_rx_threshold = xgbe_config_rx_threshold, + .config_tx_threshold = xgbe_config_tx_threshold, /* For RX and TX Store and Forward Mode config */ - hw_if->config_rsf_mode = xgbe_config_rsf_mode; - hw_if->config_tsf_mode = xgbe_config_tsf_mode; + .config_rsf_mode = xgbe_config_rsf_mode, + .config_tsf_mode = xgbe_config_tsf_mode, /* For TX DMA Operating on Second Frame config */ - hw_if->config_osp_mode = xgbe_config_osp_mode; + .config_osp_mode = xgbe_config_osp_mode, /* For RX and TX PBL config */ - hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val; - hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val; - hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val; - hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val; - hw_if->config_pblx8 = xgbe_config_pblx8; + .config_rx_pbl_val = xgbe_config_rx_pbl_val, + .get_rx_pbl_val = xgbe_get_rx_pbl_val, + .config_tx_pbl_val = xgbe_config_tx_pbl_val, + .get_tx_pbl_val = xgbe_get_tx_pbl_val, + .config_pblx8 = xgbe_config_pblx8, /* For MMC statistics support */ - hw_if->tx_mmc_int = xgbe_tx_mmc_int; - hw_if->rx_mmc_int = xgbe_rx_mmc_int; - hw_if->read_mmc_stats = xgbe_read_mmc_stats; + .tx_mmc_int = xgbe_tx_mmc_int, + .rx_mmc_int = xgbe_rx_mmc_int, + .read_mmc_stats = xgbe_read_mmc_stats, /* For PTP config */ - hw_if->config_tstamp = xgbe_config_tstamp; - hw_if->update_tstamp_addend = xgbe_update_tstamp_addend; - hw_if->set_tstamp_time = xgbe_set_tstamp_time; - hw_if->get_tstamp_time = xgbe_get_tstamp_time; - hw_if->get_tx_tstamp = xgbe_get_tx_tstamp; + .config_tstamp = xgbe_config_tstamp, + .update_tstamp_addend = xgbe_update_tstamp_addend, + .set_tstamp_time = xgbe_set_tstamp_time, + .get_tstamp_time = xgbe_get_tstamp_time, + .get_tx_tstamp = xgbe_get_tx_tstamp, /* For Data Center Bridging config */ - hw_if->config_tc = xgbe_config_tc; - hw_if->config_dcb_tc = xgbe_config_dcb_tc; - hw_if->config_dcb_pfc = xgbe_config_dcb_pfc; + .config_tc = xgbe_config_tc, + .config_dcb_tc = xgbe_config_dcb_tc, + .config_dcb_pfc = xgbe_config_dcb_pfc, /* For Receive Side Scaling */ - hw_if->enable_rss = xgbe_enable_rss; - hw_if->disable_rss = xgbe_disable_rss; - hw_if->set_rss_hash_key = xgbe_set_rss_hash_key; - hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table; - - DBGPR("<--xgbe_init_function_ptrs\n"); -} + .enable_rss = xgbe_enable_rss, + .disable_rss = xgbe_disable_rss, + .set_rss_hash_key = xgbe_set_rss_hash_key, + .set_rss_lookup_table = xgbe_set_rss_lookup_table, +}; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 7f9216db0..26872f65d 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -245,7 +245,7 @@ static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel, * support, tell it now */ if (ring->tx.xmit_more) - pdata->hw_if.tx_start_xmit(channel, ring); + pdata->hw_if->tx_start_xmit(channel, ring); return NETDEV_TX_BUSY; } @@ -273,7 +273,7 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu) static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata) { - struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_hw_if *hw_if = pdata->hw_if; struct xgbe_channel *channel; enum xgbe_int int_id; unsigned int i; @@ -295,7 +295,7 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata) static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata) { - struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_hw_if *hw_if = pdata->hw_if; struct xgbe_channel *channel; enum xgbe_int int_id; unsigned int i; @@ -318,7 +318,7 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata) static irqreturn_t xgbe_isr(int irq, void *data) { struct xgbe_prv_data *pdata = data; - struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_hw_if *hw_if = pdata->hw_if; struct xgbe_channel *channel; unsigned int dma_isr, dma_ch_isr; unsigned int mac_isr, mac_tssr; @@ -447,7 +447,7 @@ static void xgbe_service(struct work_struct *work) struct xgbe_prv_data, service_work); - pdata->phy_if.phy_status(pdata); + pdata->phy_if->phy_status(pdata); } static void xgbe_service_timer(unsigned long data) @@ -706,7 +706,7 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata) void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata) { - struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_hw_if *hw_if = pdata->hw_if; DBGPR("-->xgbe_init_tx_coalesce\n"); @@ -720,7 +720,7 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata) void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata) { - struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_hw_if *hw_if = pdata->hw_if; DBGPR("-->xgbe_init_rx_coalesce\n"); @@ -735,7 +735,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata) static void xgbe_free_tx_data(struct xgbe_prv_data *pdata) { - struct xgbe_desc_if *desc_if = &pdata->desc_if; + struct xgbe_desc_if *desc_if = pdata->desc_if; struct xgbe_channel *channel; struct xgbe_ring *ring; struct xgbe_ring_data *rdata; @@ -760,7 +760,7 @@ static void xgbe_free_tx_data(struct xgbe_prv_data *pdata) static void xgbe_free_rx_data(struct xgbe_prv_data *pdata) { - struct xgbe_desc_if *desc_if = &pdata->desc_if; + struct xgbe_desc_if *desc_if = pdata->desc_if; struct xgbe_channel *channel; struct xgbe_ring *ring; struct xgbe_ring_data *rdata; @@ -788,13 +788,13 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) pdata->phy_link = -1; pdata->phy_speed = SPEED_UNKNOWN; - return pdata->phy_if.phy_reset(pdata); + return pdata->phy_if->phy_reset(pdata); } int xgbe_powerdown(struct net_device *netdev, unsigned int caller) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_hw_if *hw_if = pdata->hw_if; unsigned long flags; DBGPR("-->xgbe_powerdown\n"); @@ -833,7 +833,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller) int xgbe_powerup(struct net_device *netdev, unsigned int caller) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_hw_if *hw_if = pdata->hw_if; unsigned long flags; DBGPR("-->xgbe_powerup\n"); @@ -870,8 +870,8 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller) static int xgbe_start(struct xgbe_prv_data *pdata) { - struct xgbe_hw_if *hw_if = &pdata->hw_if; - struct xgbe_phy_if *phy_if = &pdata->phy_if; + struct xgbe_hw_if *hw_if = pdata->hw_if; + struct xgbe_phy_if *phy_if = pdata->phy_if; struct net_device *netdev = pdata->netdev; int ret; @@ -914,8 +914,8 @@ static int xgbe_start(struct xgbe_prv_data *pdata) static void xgbe_stop(struct xgbe_prv_data *pdata) { - struct xgbe_hw_if *hw_if = &pdata->hw_if; - struct xgbe_phy_if *phy_if = &pdata->phy_if; + struct xgbe_hw_if *hw_if = pdata->hw_if; + struct xgbe_phy_if *phy_if = pdata->phy_if; struct xgbe_channel *channel; struct net_device *netdev = pdata->netdev; struct netdev_queue *txq; @@ -1143,7 +1143,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, return -ERANGE; } - pdata->hw_if.config_tstamp(pdata, mac_tscr); + pdata->hw_if->config_tstamp(pdata, mac_tscr); memcpy(&pdata->tstamp_config, &config, sizeof(config)); @@ -1292,7 +1292,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata, static int xgbe_open(struct net_device *netdev) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - struct xgbe_desc_if *desc_if = &pdata->desc_if; + struct xgbe_desc_if *desc_if = pdata->desc_if; int ret; DBGPR("-->xgbe_open\n"); @@ -1364,7 +1364,7 @@ static int xgbe_open(struct net_device *netdev) static int xgbe_close(struct net_device *netdev) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - struct xgbe_desc_if *desc_if = &pdata->desc_if; + struct xgbe_desc_if *desc_if = pdata->desc_if; DBGPR("-->xgbe_close\n"); @@ -1388,11 +1388,11 @@ static int xgbe_close(struct net_device *netdev) return 0; } -static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - struct xgbe_hw_if *hw_if = &pdata->hw_if; - struct xgbe_desc_if *desc_if = &pdata->desc_if; + struct xgbe_hw_if *hw_if = pdata->hw_if; + struct xgbe_desc_if *desc_if = pdata->desc_if; struct xgbe_channel *channel; struct xgbe_ring *ring; struct xgbe_packet_data *packet; @@ -1461,7 +1461,7 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) static void xgbe_set_rx_mode(struct net_device *netdev) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_hw_if *hw_if = pdata->hw_if; DBGPR("-->xgbe_set_rx_mode\n"); @@ -1473,7 +1473,7 @@ static void xgbe_set_rx_mode(struct net_device *netdev) static int xgbe_set_mac_address(struct net_device *netdev, void *addr) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_hw_if *hw_if = pdata->hw_if; struct sockaddr *saddr = addr; DBGPR("-->xgbe_set_mac_address\n"); @@ -1548,7 +1548,7 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev, DBGPR("-->%s\n", __func__); - pdata->hw_if.read_mmc_stats(pdata); + pdata->hw_if->read_mmc_stats(pdata); s->rx_packets = pstats->rxframecount_gb; s->rx_bytes = pstats->rxoctetcount_gb; @@ -1575,7 +1575,7 @@ static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_hw_if *hw_if = pdata->hw_if; DBGPR("-->%s\n", __func__); @@ -1591,7 +1591,7 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_hw_if *hw_if = pdata->hw_if; DBGPR("-->%s\n", __func__); @@ -1641,7 +1641,7 @@ static int xgbe_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, return -EINVAL; pdata->num_tcs = tc; - pdata->hw_if.config_tc(pdata); + pdata->hw_if->config_tc(pdata); return 0; } @@ -1650,7 +1650,7 @@ static int xgbe_set_features(struct net_device *netdev, netdev_features_t features) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_hw_if *hw_if = pdata->hw_if; netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter; int ret = 0; @@ -1716,8 +1716,8 @@ const struct net_device_ops *xgbe_get_netdev_ops(void) static void xgbe_rx_refresh(struct xgbe_channel *channel) { struct xgbe_prv_data *pdata = channel->pdata; - struct xgbe_hw_if *hw_if = &pdata->hw_if; - struct xgbe_desc_if *desc_if = &pdata->desc_if; + struct xgbe_hw_if *hw_if = pdata->hw_if; + struct xgbe_desc_if *desc_if = pdata->desc_if; struct xgbe_ring *ring = channel->rx_ring; struct xgbe_ring_data *rdata; @@ -1794,8 +1794,8 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, static int xgbe_tx_poll(struct xgbe_channel *channel) { struct xgbe_prv_data *pdata = channel->pdata; - struct xgbe_hw_if *hw_if = &pdata->hw_if; - struct xgbe_desc_if *desc_if = &pdata->desc_if; + struct xgbe_hw_if *hw_if = pdata->hw_if; + struct xgbe_desc_if *desc_if = pdata->desc_if; struct xgbe_ring *ring = channel->tx_ring; struct xgbe_ring_data *rdata; struct xgbe_ring_desc *rdesc; @@ -1865,7 +1865,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel) static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) { struct xgbe_prv_data *pdata = channel->pdata; - struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_hw_if *hw_if = pdata->hw_if; struct xgbe_ring *ring = channel->rx_ring; struct xgbe_ring_data *rdata; struct xgbe_packet_data *packet; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index 4007b429c..57861614e 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -206,7 +206,7 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev, u8 *stat; int i; - pdata->hw_if.read_mmc_stats(pdata); + pdata->hw_if->read_mmc_stats(pdata); for (i = 0; i < XGBE_STATS_COUNT; i++) { stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset; *data++ = *(u64 *)stat; @@ -267,7 +267,7 @@ static int xgbe_set_pauseparam(struct net_device *netdev, pdata->phy.advertising ^= ADVERTISED_Asym_Pause; if (netif_running(netdev)) - ret = pdata->phy_if.phy_config_aneg(pdata); + ret = pdata->phy_if->phy_config_aneg(pdata); return ret; } @@ -368,7 +368,7 @@ static int xgbe_set_settings(struct net_device *netdev, pdata->phy.advertising &= ~ADVERTISED_Autoneg; if (netif_running(netdev)) - ret = pdata->phy_if.phy_config_aneg(pdata); + ret = pdata->phy_if->phy_config_aneg(pdata); return ret; } @@ -422,7 +422,7 @@ static int xgbe_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_hw_if *hw_if = pdata->hw_if; unsigned int rx_frames, rx_riwt, rx_usecs; unsigned int tx_frames; @@ -545,7 +545,7 @@ static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_hw_if *hw_if = pdata->hw_if; unsigned int ret; if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) { diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index 4f7635178..96d3fb14c 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -202,13 +202,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata) DBGPR("<--xgbe_default_config\n"); } -static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata) -{ - xgbe_init_function_ptrs_dev(&pdata->hw_if); - xgbe_init_function_ptrs_phy(&pdata->phy_if); - xgbe_init_function_ptrs_desc(&pdata->desc_if); -} - #ifdef CONFIG_ACPI static int xgbe_acpi_support(struct xgbe_prv_data *pdata) { @@ -647,10 +640,12 @@ static int xgbe_probe(struct platform_device *pdev) memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len); /* Set all the function pointers */ - xgbe_init_all_fptrs(pdata); + pdata->hw_if = &default_xgbe_hw_if; + pdata->phy_if = &default_xgbe_phy_if; + pdata->desc_if = &default_xgbe_desc_if; /* Issue software reset to device */ - pdata->hw_if.exit(pdata); + pdata->hw_if->exit(pdata); /* Populate the hardware features */ xgbe_get_all_hw_features(pdata); @@ -704,7 +699,7 @@ static int xgbe_probe(struct platform_device *pdev) XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); /* Call MDIO/PHY initialization routine */ - pdata->phy_if.phy_init(pdata); + pdata->phy_if->phy_init(pdata); /* Set device operations */ netdev->netdev_ops = xgbe_get_netdev_ops(); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 84c5d296d..697b4f2ff 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -202,7 +202,7 @@ static void xgbe_xgmii_mode(struct xgbe_prv_data *pdata) xgbe_an_enable_kr_training(pdata); /* Set MAC to 10G speed */ - pdata->hw_if.set_xgmii_speed(pdata); + pdata->hw_if->set_xgmii_speed(pdata); /* Set PCS to KR/10G speed */ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2); @@ -250,7 +250,7 @@ static void xgbe_gmii_2500_mode(struct xgbe_prv_data *pdata) xgbe_an_disable_kr_training(pdata); /* Set MAC to 2.5G speed */ - pdata->hw_if.set_gmii_2500_speed(pdata); + pdata->hw_if->set_gmii_2500_speed(pdata); /* Set PCS to KX/1G speed */ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2); @@ -298,7 +298,7 @@ static void xgbe_gmii_mode(struct xgbe_prv_data *pdata) xgbe_an_disable_kr_training(pdata); /* Set MAC to 1G speed */ - pdata->hw_if.set_gmii_speed(pdata); + pdata->hw_if->set_gmii_speed(pdata); /* Set PCS to KX/1G speed */ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2); @@ -877,13 +877,13 @@ static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata) if (pdata->tx_pause != pdata->phy.tx_pause) { new_state = 1; - pdata->hw_if.config_tx_flow_control(pdata); + pdata->hw_if->config_tx_flow_control(pdata); pdata->tx_pause = pdata->phy.tx_pause; } if (pdata->rx_pause != pdata->phy.rx_pause) { new_state = 1; - pdata->hw_if.config_rx_flow_control(pdata); + pdata->hw_if->config_rx_flow_control(pdata); pdata->rx_pause = pdata->phy.rx_pause; } @@ -1348,14 +1348,13 @@ static void xgbe_phy_init(struct xgbe_prv_data *pdata) xgbe_dump_phy_registers(pdata); } -void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if) -{ - phy_if->phy_init = xgbe_phy_init; +const struct xgbe_phy_if default_xgbe_phy_if = { + .phy_init = xgbe_phy_init, - phy_if->phy_reset = xgbe_phy_reset; - phy_if->phy_start = xgbe_phy_start; - phy_if->phy_stop = xgbe_phy_stop; + .phy_reset = xgbe_phy_reset, + .phy_start = xgbe_phy_start, + .phy_stop = xgbe_phy_stop, - phy_if->phy_status = xgbe_phy_status; - phy_if->phy_config_aneg = xgbe_phy_config_aneg; -} + .phy_status = xgbe_phy_status, + .phy_config_aneg = xgbe_phy_config_aneg, +}; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c index b03e4f58d..78e4cc4e6 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c @@ -129,7 +129,7 @@ static cycle_t xgbe_cc_read(const struct cyclecounter *cc) tstamp_cc); u64 nsec; - nsec = pdata->hw_if.get_tstamp_time(pdata); + nsec = pdata->hw_if->get_tstamp_time(pdata); return nsec; } @@ -158,7 +158,7 @@ static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta) spin_lock_irqsave(&pdata->tstamp_lock, flags); - pdata->hw_if.update_tstamp_addend(pdata, addend); + pdata->hw_if->update_tstamp_addend(pdata, addend); spin_unlock_irqrestore(&pdata->tstamp_lock, flags); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 5dd17dcea..962ff6a2f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -786,9 +786,9 @@ struct xgbe_prv_data { int dev_irq; unsigned int per_channel_irq; - struct xgbe_hw_if hw_if; - struct xgbe_phy_if phy_if; - struct xgbe_desc_if desc_if; + const struct xgbe_hw_if *hw_if; + const struct xgbe_phy_if *phy_if; + const struct xgbe_desc_if *desc_if; /* AXI DMA settings */ unsigned int coherent; @@ -951,6 +951,10 @@ struct xgbe_prv_data { #endif }; +extern const struct xgbe_hw_if default_xgbe_hw_if; +extern const struct xgbe_phy_if default_xgbe_phy_if; +extern const struct xgbe_desc_if default_xgbe_desc_if; + /* Function prototypes*/ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 8158d4698..f22bcfcb4 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -122,7 +122,7 @@ static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool) } } -static irqreturn_t xgene_enet_rx_irq(const int irq, void *data) +static irqreturn_t xgene_enet_rx_irq(int irq, void *data) { struct xgene_enet_desc_ring *rx_ring = data; @@ -642,7 +642,7 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, return processed; } -static int xgene_enet_napi(struct napi_struct *napi, const int budget) +static int xgene_enet_napi(struct napi_struct *napi, int budget) { struct xgene_enet_desc_ring *ring; int processed; diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index be865b4da..75a631223 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -611,7 +611,7 @@ static struct net_device_stats *arc_emac_stats(struct net_device *ndev) * * This function is invoked from upper layers to initiate transmission. */ -static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) { struct arc_emac_priv *priv = netdev_priv(ndev); unsigned int len, *txbd_curr = &priv->txbd_curr; diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index c0f84b735..85e14335c 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1710,7 +1710,7 @@ static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { struct alx_priv *alx = pci_get_drvdata(pdev); struct net_device *netdev = alx->dev; diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index a3200ea6d..d02b523a9 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -2704,7 +2704,7 @@ static void atl1c_remove(struct pci_dev *pdev) * this device has been detected. */ static pci_ers_result_t atl1c_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl1c_adapter *adapter = netdev_priv(netdev); diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 974713b19..5e0112baf 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -2475,7 +2475,7 @@ static void atl1e_remove(struct pci_dev *pdev) * this device has been detected. */ static pci_ers_result_t -atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +atl1e_io_error_detected(struct pci_dev *pdev, enum pci_channel_state state) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl1e_adapter *adapter = netdev_priv(netdev); diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c index e078d8da9..f588e89ea 100644 --- a/drivers/net/ethernet/aurora/nb8800.c +++ b/drivers/net/ethernet/aurora/nb8800.c @@ -396,7 +396,7 @@ static void nb8800_tx_dma_start_irq(struct net_device *dev) spin_unlock(&priv->tx_lock); } -static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t nb8800_xmit(struct sk_buff *skb, struct net_device *dev) { struct nb8800_priv *priv = netdev_priv(dev); struct nb8800_tx_desc *txd; diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 537090952..8e9962eb4 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -571,7 +571,7 @@ static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id) /* * tx request callback */ -static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bcm_enet_priv *priv; struct bcm_enet_desc *desc; diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index dd06f62e1..a55eceeb4 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -8728,7 +8728,7 @@ static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume); * this device has been detected. */ static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { struct net_device *dev = pci_get_drvdata(pdev); struct bnx2 *bp = netdev_priv(dev); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 243cb9748..d213a5f60 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -1125,7 +1125,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp) static inline void bnx2x_init_bp_objs(struct bnx2x *bp) { /* RX_MODE controlling object */ - bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj); + bnx2x_init_rx_mode_obj(bp); /* multicast configuration controlling object */ bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 1fb80100e..0a8dc2050 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -5621,7 +5621,7 @@ static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy, return 0; } -static int bnx2x_link_settings_status(struct bnx2x_phy *phy, +static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { @@ -5695,7 +5695,7 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy, return rc; } -static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy, +static u8 bnx2x_warpcore_read_status(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { @@ -7436,7 +7436,7 @@ static void bnx2x_8073_specific_func(struct bnx2x_phy *phy, } } -static int bnx2x_8073_config_init(struct bnx2x_phy *phy, +static void bnx2x_8073_config_init(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { @@ -7499,7 +7499,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy, if (params->loopback_mode == LOOPBACK_EXT) { bnx2x_807x_force_10G(bp, phy); DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n"); - return 0; + return; } else { bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0002); @@ -7581,7 +7581,6 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); DP(NETIF_MSG_LINK, "807x Autoneg Restart: Advertise 1G=%x, 10G=%x\n", ((val & (1<<5)) > 0), ((val & (1<<7)) > 0)); - return 0; } static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy, @@ -7748,7 +7747,7 @@ static void bnx2x_8073_link_reset(struct bnx2x_phy *phy, /******************************************************************/ /* BCM8705 PHY SECTION */ /******************************************************************/ -static int bnx2x_8705_config_init(struct bnx2x_phy *phy, +static void bnx2x_8705_config_init(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { @@ -7772,7 +7771,6 @@ static int bnx2x_8705_config_init(struct bnx2x_phy *phy, MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_CNTL, 0x1); /* BCM8705 doesn't have microcode, hence the 0 */ bnx2x_save_spirom_version(bp, params->port, params->shmem_base, 0); - return 0; } static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy, @@ -8959,7 +8957,7 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy, /******************************************************************/ /* BCM8706 PHY SECTION */ /******************************************************************/ -static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy, +static void bnx2x_8706_config_init(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { @@ -9061,11 +9059,9 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1); } - - return 0; } -static int bnx2x_8706_read_status(struct bnx2x_phy *phy, +static u8 bnx2x_8706_read_status(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { @@ -9142,7 +9138,7 @@ static u8 bnx2x_8726_read_status(struct bnx2x_phy *phy, } -static int bnx2x_8726_config_init(struct bnx2x_phy *phy, +static void bnx2x_8726_config_init(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { @@ -9223,8 +9219,6 @@ static int bnx2x_8726_config_init(struct bnx2x_phy *phy, phy->tx_preemphasis[1]); } - return 0; - } static void bnx2x_8726_link_reset(struct bnx2x_phy *phy, @@ -9360,7 +9354,7 @@ static void bnx2x_8727_config_speed(struct bnx2x_phy *phy, } } -static int bnx2x_8727_config_init(struct bnx2x_phy *phy, +static void bnx2x_8727_config_init(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { @@ -9442,8 +9436,6 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, (tmp2 & 0x7fff)); } - - return 0; } static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, @@ -10018,7 +10010,7 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, return 0; } -static int bnx2x_8481_config_init(struct bnx2x_phy *phy, +static void bnx2x_8481_config_init(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { @@ -10032,7 +10024,7 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy, bnx2x_wait_reset_complete(bp, phy, params); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); - return bnx2x_848xx_cmn_config_init(phy, params, vars); + bnx2x_848xx_cmn_config_init(phy, params, vars); } #define PHY848xx_CMDHDLR_WAIT 300 @@ -10282,7 +10274,7 @@ static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp, return reset_gpios; } -static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy, +static void bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy, struct link_params *params) { struct bnx2x *bp = params->bp; @@ -10311,8 +10303,6 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy, udelay(10); DP(NETIF_MSG_LINK, "84833 hw reset on pin values 0x%x\n", reset_gpios); - - return 0; } static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy, @@ -10355,7 +10345,7 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy, } #define PHY84833_CONSTANT_LATENCY 1193 -static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, +static void bnx2x_848x3_config_init(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { @@ -10502,7 +10492,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, if (rc) { DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n"); bnx2x_8483x_disable_eee(phy, params, vars); - return rc; + return; } if ((phy->req_duplex == DUPLEX_FULL) && @@ -10514,7 +10504,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, rc = bnx2x_8483x_disable_eee(phy, params, vars); if (rc) { DP(NETIF_MSG_LINK, "Failed to set EEE advertisement\n"); - return rc; + return; } } else { vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK; @@ -10553,7 +10543,6 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, MDIO_84833_TOP_CFG_XGPHY_STRAP1, (u16)~MDIO_84833_SUPER_ISOLATE); } - return rc; } static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, @@ -11113,7 +11102,7 @@ static void bnx2x_54618se_specific_func(struct bnx2x_phy *phy, } } -static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, +static void bnx2x_54618se_config_init(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { @@ -11315,8 +11304,6 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, autoneg_val); - - return 0; } @@ -11540,7 +11527,7 @@ static void bnx2x_7101_config_loopback(struct bnx2x_phy *phy, MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100); } -static int bnx2x_7101_config_init(struct bnx2x_phy *phy, +static void bnx2x_7101_config_init(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { @@ -11577,7 +11564,6 @@ static int bnx2x_7101_config_init(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER2, &fw_ver2); bnx2x_save_spirom_version(bp, params->port, (u32)(fw_ver1<<16 | fw_ver2), phy->ver_addr); - return 0; } static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy, @@ -11746,9 +11732,9 @@ static const struct bnx2x_phy phy_serdes = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t)bnx2x_xgxs_config_init, - .read_status = (read_status_t)bnx2x_link_settings_status, - .link_reset = (link_reset_t)bnx2x_int_link_reset, + .config_init = bnx2x_xgxs_config_init, + .read_status = bnx2x_link_settings_status, + .link_reset = bnx2x_int_link_reset, .config_loopback = (config_loopback_t)NULL, .format_fw_ver = (format_fw_ver_t)NULL, .hw_reset = (hw_reset_t)NULL, @@ -11782,14 +11768,14 @@ static const struct bnx2x_phy phy_xgxs = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t)bnx2x_xgxs_config_init, - .read_status = (read_status_t)bnx2x_link_settings_status, - .link_reset = (link_reset_t)bnx2x_int_link_reset, - .config_loopback = (config_loopback_t)bnx2x_set_xgxs_loopback, + .config_init = bnx2x_xgxs_config_init, + .read_status = bnx2x_link_settings_status, + .link_reset = bnx2x_int_link_reset, + .config_loopback = bnx2x_set_xgxs_loopback, .format_fw_ver = (format_fw_ver_t)NULL, .hw_reset = (hw_reset_t)NULL, .set_link_led = (set_link_led_t)NULL, - .phy_specific_func = (phy_specific_func_t)bnx2x_xgxs_specific_func + .phy_specific_func = bnx2x_xgxs_specific_func }; static const struct bnx2x_phy phy_warpcore = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, @@ -11820,12 +11806,12 @@ static const struct bnx2x_phy phy_warpcore = { .speed_cap_mask = 0, /* req_duplex = */0, /* rsrv = */0, - .config_init = (config_init_t)bnx2x_warpcore_config_init, - .read_status = (read_status_t)bnx2x_warpcore_read_status, - .link_reset = (link_reset_t)bnx2x_warpcore_link_reset, - .config_loopback = (config_loopback_t)bnx2x_set_warpcore_loopback, + .config_init = bnx2x_warpcore_config_init, + .read_status = bnx2x_warpcore_read_status, + .link_reset = bnx2x_warpcore_link_reset, + .config_loopback = bnx2x_set_warpcore_loopback, .format_fw_ver = (format_fw_ver_t)NULL, - .hw_reset = (hw_reset_t)bnx2x_warpcore_hw_reset, + .hw_reset = bnx2x_warpcore_hw_reset, .set_link_led = (set_link_led_t)NULL, .phy_specific_func = (phy_specific_func_t)NULL }; @@ -11851,13 +11837,13 @@ static const struct bnx2x_phy phy_7101 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t)bnx2x_7101_config_init, - .read_status = (read_status_t)bnx2x_7101_read_status, - .link_reset = (link_reset_t)bnx2x_common_ext_link_reset, - .config_loopback = (config_loopback_t)bnx2x_7101_config_loopback, - .format_fw_ver = (format_fw_ver_t)bnx2x_7101_format_ver, - .hw_reset = (hw_reset_t)bnx2x_7101_hw_reset, - .set_link_led = (set_link_led_t)bnx2x_7101_set_link_led, + .config_init = bnx2x_7101_config_init, + .read_status = bnx2x_7101_read_status, + .link_reset = bnx2x_common_ext_link_reset, + .config_loopback = bnx2x_7101_config_loopback, + .format_fw_ver = bnx2x_7101_format_ver, + .hw_reset = bnx2x_7101_hw_reset, + .set_link_led = bnx2x_7101_set_link_led, .phy_specific_func = (phy_specific_func_t)NULL }; static const struct bnx2x_phy phy_8073 = { @@ -11882,14 +11868,14 @@ static const struct bnx2x_phy phy_8073 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t)bnx2x_8073_config_init, - .read_status = (read_status_t)bnx2x_8073_read_status, - .link_reset = (link_reset_t)bnx2x_8073_link_reset, + .config_init = bnx2x_8073_config_init, + .read_status = bnx2x_8073_read_status, + .link_reset = bnx2x_8073_link_reset, .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver, + .format_fw_ver = bnx2x_format_ver, .hw_reset = (hw_reset_t)NULL, .set_link_led = (set_link_led_t)NULL, - .phy_specific_func = (phy_specific_func_t)bnx2x_8073_specific_func + .phy_specific_func = bnx2x_8073_specific_func }; static const struct bnx2x_phy phy_8705 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705, @@ -11910,11 +11896,11 @@ static const struct bnx2x_phy phy_8705 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t)bnx2x_8705_config_init, - .read_status = (read_status_t)bnx2x_8705_read_status, - .link_reset = (link_reset_t)bnx2x_common_ext_link_reset, + .config_init = bnx2x_8705_config_init, + .read_status = bnx2x_8705_read_status, + .link_reset = bnx2x_common_ext_link_reset, .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)bnx2x_null_format_ver, + .format_fw_ver = bnx2x_null_format_ver, .hw_reset = (hw_reset_t)NULL, .set_link_led = (set_link_led_t)NULL, .phy_specific_func = (phy_specific_func_t)NULL @@ -11939,11 +11925,11 @@ static const struct bnx2x_phy phy_8706 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t)bnx2x_8706_config_init, - .read_status = (read_status_t)bnx2x_8706_read_status, - .link_reset = (link_reset_t)bnx2x_common_ext_link_reset, + .config_init = bnx2x_8706_config_init, + .read_status = bnx2x_8706_read_status, + .link_reset = bnx2x_common_ext_link_reset, .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver, + .format_fw_ver = bnx2x_format_ver, .hw_reset = (hw_reset_t)NULL, .set_link_led = (set_link_led_t)NULL, .phy_specific_func = (phy_specific_func_t)NULL @@ -11971,11 +11957,11 @@ static const struct bnx2x_phy phy_8726 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t)bnx2x_8726_config_init, - .read_status = (read_status_t)bnx2x_8726_read_status, - .link_reset = (link_reset_t)bnx2x_8726_link_reset, - .config_loopback = (config_loopback_t)bnx2x_8726_config_loopback, - .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver, + .config_init = bnx2x_8726_config_init, + .read_status = bnx2x_8726_read_status, + .link_reset = bnx2x_8726_link_reset, + .config_loopback = bnx2x_8726_config_loopback, + .format_fw_ver = bnx2x_format_ver, .hw_reset = (hw_reset_t)NULL, .set_link_led = (set_link_led_t)NULL, .phy_specific_func = (phy_specific_func_t)NULL @@ -12002,14 +11988,14 @@ static const struct bnx2x_phy phy_8727 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t)bnx2x_8727_config_init, - .read_status = (read_status_t)bnx2x_8727_read_status, - .link_reset = (link_reset_t)bnx2x_8727_link_reset, + .config_init = bnx2x_8727_config_init, + .read_status = bnx2x_8727_read_status, + .link_reset = bnx2x_8727_link_reset, .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver, - .hw_reset = (hw_reset_t)bnx2x_8727_hw_reset, - .set_link_led = (set_link_led_t)bnx2x_8727_set_link_led, - .phy_specific_func = (phy_specific_func_t)bnx2x_8727_specific_func + .format_fw_ver = bnx2x_format_ver, + .hw_reset = bnx2x_8727_hw_reset, + .set_link_led = bnx2x_8727_set_link_led, + .phy_specific_func = bnx2x_8727_specific_func }; static const struct bnx2x_phy phy_8481 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, @@ -12037,13 +12023,13 @@ static const struct bnx2x_phy phy_8481 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t)bnx2x_8481_config_init, - .read_status = (read_status_t)bnx2x_848xx_read_status, - .link_reset = (link_reset_t)bnx2x_8481_link_reset, + .config_init = bnx2x_8481_config_init, + .read_status = bnx2x_848xx_read_status, + .link_reset = bnx2x_8481_link_reset, .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, - .hw_reset = (hw_reset_t)bnx2x_8481_hw_reset, - .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, + .format_fw_ver = bnx2x_848xx_format_ver, + .hw_reset = bnx2x_8481_hw_reset, + .set_link_led = bnx2x_848xx_set_link_led, .phy_specific_func = (phy_specific_func_t)NULL }; @@ -12074,14 +12060,14 @@ static const struct bnx2x_phy phy_84823 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t)bnx2x_848x3_config_init, - .read_status = (read_status_t)bnx2x_848xx_read_status, - .link_reset = (link_reset_t)bnx2x_848x3_link_reset, + .config_init = bnx2x_848x3_config_init, + .read_status = bnx2x_848xx_read_status, + .link_reset = bnx2x_848x3_link_reset, .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, + .format_fw_ver = bnx2x_848xx_format_ver, .hw_reset = (hw_reset_t)NULL, - .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func + .set_link_led = bnx2x_848xx_set_link_led, + .phy_specific_func = bnx2x_848xx_specific_func }; static const struct bnx2x_phy phy_84833 = { @@ -12109,14 +12095,14 @@ static const struct bnx2x_phy phy_84833 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t)bnx2x_848x3_config_init, - .read_status = (read_status_t)bnx2x_848xx_read_status, - .link_reset = (link_reset_t)bnx2x_848x3_link_reset, + .config_init = bnx2x_848x3_config_init, + .read_status = bnx2x_848xx_read_status, + .link_reset = bnx2x_848x3_link_reset, .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, - .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, - .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func + .format_fw_ver = bnx2x_848xx_format_ver, + .hw_reset = bnx2x_84833_hw_reset_phy, + .set_link_led = bnx2x_848xx_set_link_led, + .phy_specific_func = bnx2x_848xx_specific_func }; static const struct bnx2x_phy phy_84834 = { @@ -12143,14 +12129,14 @@ static const struct bnx2x_phy phy_84834 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t)bnx2x_848x3_config_init, - .read_status = (read_status_t)bnx2x_848xx_read_status, - .link_reset = (link_reset_t)bnx2x_848x3_link_reset, + .config_init = bnx2x_848x3_config_init, + .read_status = bnx2x_848xx_read_status, + .link_reset = bnx2x_848x3_link_reset, .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, - .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, - .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func + .format_fw_ver = bnx2x_848xx_format_ver, + .hw_reset = bnx2x_84833_hw_reset_phy, + .set_link_led = bnx2x_848xx_set_link_led, + .phy_specific_func = bnx2x_848xx_specific_func }; static const struct bnx2x_phy phy_84858 = { @@ -12177,14 +12163,14 @@ static const struct bnx2x_phy phy_84858 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t)bnx2x_848x3_config_init, - .read_status = (read_status_t)bnx2x_848xx_read_status, - .link_reset = (link_reset_t)bnx2x_848x3_link_reset, + .config_init = bnx2x_848x3_config_init, + .read_status = bnx2x_848xx_read_status, + .link_reset = bnx2x_848x3_link_reset, .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)bnx2x_8485x_format_ver, - .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, - .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func + .format_fw_ver = bnx2x_8485x_format_ver, + .hw_reset = bnx2x_84833_hw_reset_phy, + .set_link_led = bnx2x_848xx_set_link_led, + .phy_specific_func = bnx2x_848xx_specific_func }; static const struct bnx2x_phy phy_54618se = { @@ -12211,14 +12197,14 @@ static const struct bnx2x_phy phy_54618se = { .speed_cap_mask = 0, /* req_duplex = */0, /* rsrv = */0, - .config_init = (config_init_t)bnx2x_54618se_config_init, - .read_status = (read_status_t)bnx2x_54618se_read_status, - .link_reset = (link_reset_t)bnx2x_54618se_link_reset, - .config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback, + .config_init = bnx2x_54618se_config_init, + .read_status = bnx2x_54618se_read_status, + .link_reset = bnx2x_54618se_link_reset, + .config_loopback = bnx2x_54618se_config_loopback, .format_fw_ver = (format_fw_ver_t)NULL, .hw_reset = (hw_reset_t)NULL, - .set_link_led = (set_link_led_t)bnx2x_5461x_set_link_led, - .phy_specific_func = (phy_specific_func_t)bnx2x_54618se_specific_func + .set_link_led = bnx2x_5461x_set_link_led, + .phy_specific_func = bnx2x_54618se_specific_func }; /*****************************************************************/ /* */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h index b7d251108..a625bae0f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h @@ -126,7 +126,7 @@ struct link_vars; struct link_params; struct bnx2x_phy; -typedef u8 (*config_init_t)(struct bnx2x_phy *phy, struct link_params *params, +typedef void (*config_init_t)(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars); typedef u8 (*read_status_t)(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars); @@ -134,7 +134,7 @@ typedef void (*link_reset_t)(struct bnx2x_phy *phy, struct link_params *params); typedef void (*config_loopback_t)(struct bnx2x_phy *phy, struct link_params *params); -typedef u8 (*format_fw_ver_t)(u32 raw, u8 *str, u16 *len); +typedef int (*format_fw_ver_t)(u32 raw, u8 *str, u16 *len); typedef void (*hw_reset_t)(struct bnx2x_phy *phy, struct link_params *params); typedef void (*set_link_led_t)(struct bnx2x_phy *phy, struct link_params *params, u8 mode); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index bf5e497e0..ad9dc95e0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -14163,7 +14163,7 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) * this device has been detected. */ static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { struct net_device *dev = pci_get_drvdata(pdev); struct bnx2x *bp = netdev_priv(dev); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index cea6bdcde..c9fbf14fb 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -2576,15 +2576,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp, return rc; } -void bnx2x_init_rx_mode_obj(struct bnx2x *bp, - struct bnx2x_rx_mode_obj *o) +void bnx2x_init_rx_mode_obj(struct bnx2x *bp) { if (CHIP_IS_E1x(bp)) { - o->wait_comp = bnx2x_empty_rx_mode_wait; - o->config_rx_mode = bnx2x_set_rx_mode_e1x; + bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait; + bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x; } else { - o->wait_comp = bnx2x_wait_rx_mode_comp_e2; - o->config_rx_mode = bnx2x_set_rx_mode_e2; + bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2; + bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2; } } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 0bf2fd470..e28af4307 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -1446,8 +1446,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp, /********************* RX MODE ****************/ -void bnx2x_init_rx_mode_obj(struct bnx2x *bp, - struct bnx2x_rx_mode_obj *o); +void bnx2x_init_rx_mode_obj(struct bnx2x *bp); /** * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters. diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index f08a20b92..6f9f924ee 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6970,7 +6970,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) * this device has been detected. */ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { struct net_device *netdev = pci_get_drvdata(pdev); struct bnxt *bp = netdev_priv(netdev); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index b84339952..a17660850 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -18115,7 +18115,7 @@ static void tg3_shutdown(struct pci_dev *pdev) * this device has been detected. */ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { struct net_device *netdev = pci_get_drvdata(pdev); struct tg3 *tp = netdev_priv(netdev); diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 3b5e98ecb..52b391656 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -150,6 +150,7 @@ #define CHIPREV_ID_5750_A0 0x4000 #define CHIPREV_ID_5750_A1 0x4001 #define CHIPREV_ID_5750_A3 0x4003 +#define CHIPREV_ID_5750_C1 0x4201 #define CHIPREV_ID_5750_C2 0x4202 #define CHIPREV_ID_5752_A0_HW 0x5000 #define CHIPREV_ID_5752_A0 0x6000 diff --git a/drivers/net/ethernet/brocade/bna/bfa_cs.h b/drivers/net/ethernet/brocade/bna/bfa_cs.h index 1d11d666d..8f7a3cb9c 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_cs.h +++ b/drivers/net/ethernet/brocade/bna/bfa_cs.h @@ -34,10 +34,19 @@ struct bfa_sm_table { int state; /*!< state machine encoding */ char *name; /*!< state name for display */ }; -#define BFA_SM(_sm) ((bfa_sm_t)(_sm)) +#define BFA_SM(_sm) (_sm) + +#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (_state)) +#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (_state)) /* State machine with entry actions. */ -typedef void (*bfa_fsm_t)(void *fsm, int event); +struct bfa_ioc; +enum ioc_event; +struct bfa_iocpf; +enum iocpf_event; + +typedef void (*bfa_fsm_ioc_t)(struct bfa_ioc *fsm, enum ioc_event event); +typedef void (*bfa_fsm_iocpf_t)(struct bfa_iocpf *fsm, enum iocpf_event event); /* oc - object class eg. bfa_ioc * st - state, eg. reset @@ -49,16 +58,37 @@ typedef void (*bfa_fsm_t)(void *fsm, int event); static void oc ## _sm_ ## st ## _entry(otype * fsm) #define bfa_fsm_set_state(_fsm, _state) do { \ - (_fsm)->fsm = (bfa_fsm_t)(_state); \ + (_fsm)->fsm = (_state); \ _state ## _entry(_fsm); \ } while (0) #define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event))) -#define bfa_fsm_cmp_state(_fsm, _state) \ - ((_fsm)->fsm == (bfa_fsm_t)(_state)) +#define bfa_fsm_cmp_state(_fsm, _state) ((_fsm)->fsm == (_state)) + +/* For converting from state machine function to state encoding. */ +struct iocpf_sm_table { + bfa_fsm_iocpf_t sm; /*!< state machine function */ + int state; /*!< state machine encoding */ + char *name; /*!< state name for display */ +}; +struct ioc_sm_table { + bfa_fsm_ioc_t sm; /*!< state machine function */ + int state; /*!< state machine encoding */ + char *name; /*!< state name for display */ +}; + +static inline int +iocpf_sm_to_state(const struct iocpf_sm_table *smt, bfa_fsm_iocpf_t sm) +{ + int i = 0; + + while (smt[i].sm && smt[i].sm != sm) + i++; + return smt[i].state; +} static inline int -bfa_sm_to_state(const struct bfa_sm_table *smt, bfa_sm_t sm) +ioc_sm_to_state(const struct ioc_sm_table *smt, bfa_fsm_ioc_t sm) { int i = 0; diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index 9e59663a6..3564807e8 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -122,7 +122,7 @@ bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event); -static struct bfa_sm_table ioc_sm_table[] = { +static struct ioc_sm_table ioc_sm_table[] = { {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT}, {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET}, {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING}, @@ -191,7 +191,7 @@ bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf, enum iocpf_event); bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event); -static struct bfa_sm_table iocpf_sm_table[] = { +static struct iocpf_sm_table iocpf_sm_table[] = { {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET}, {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH}, {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH}, @@ -2862,12 +2862,12 @@ static enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc *ioc) { enum bfa_iocpf_state iocpf_st; - enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm); + enum bfa_ioc_state ioc_st = ioc_sm_to_state(ioc_sm_table, ioc->fsm); if (ioc_st == BFA_IOC_ENABLING || ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) { - iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); + iocpf_st = iocpf_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); switch (iocpf_st) { case BFA_IOCPF_SEMWAIT: @@ -2985,7 +2985,7 @@ bfa_nw_iocpf_timeout(struct bfa_ioc *ioc) { enum bfa_iocpf_state iocpf_st; - iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); + iocpf_st = iocpf_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); if (iocpf_st == BFA_IOCPF_HWINIT) bfa_ioc_poll_fwinit(ioc); diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h index 2c0b4c076..97873eb3c 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h @@ -156,7 +156,7 @@ struct bfa_ioc_notify { } while (0) struct bfa_iocpf { - bfa_fsm_t fsm; + bfa_fsm_iocpf_t fsm; struct bfa_ioc *ioc; bool fw_mismatch_notified; bool auto_recover; @@ -164,7 +164,7 @@ struct bfa_iocpf { }; struct bfa_ioc { - bfa_fsm_t fsm; + bfa_fsm_ioc_t fsm; struct bfa *bfa; struct bfa_pcidev pcidev; struct timer_list ioc_timer; diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.h b/drivers/net/ethernet/brocade/bna/bfa_msgq.h index 66bc8b5ac..bf6446676 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_msgq.h +++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.h @@ -63,8 +63,10 @@ enum bfa_msgq_cmdq_flags { BFA_MSGQ_CMDQ_F_DB_UPDATE = 1, }; +enum cmdq_event; + struct bfa_msgq_cmdq { - bfa_fsm_t fsm; + void (*fsm)(struct bfa_msgq_cmdq *, enum cmdq_event); enum bfa_msgq_cmdq_flags flags; u16 producer_index; @@ -89,8 +91,10 @@ enum bfa_msgq_rspq_flags { typedef void (*bfa_msgq_mcfunc_t)(void *cbarg, struct bfi_msgq_mhdr *mhdr); +enum rspq_event; + struct bfa_msgq_rspq { - bfa_fsm_t fsm; + void (*fsm)(struct bfa_msgq_rspq *, enum rspq_event); enum bfa_msgq_rspq_flags flags; u16 producer_index; diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c index 4e5c3874a..0791dab5b 100644 --- a/drivers/net/ethernet/brocade/bna/bna_enet.c +++ b/drivers/net/ethernet/brocade/bna/bna_enet.c @@ -1265,7 +1265,7 @@ bna_enet_mtu_get(struct bna_enet *enet) void bna_enet_enable(struct bna_enet *enet) { - if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped) + if (enet->fsm != bna_enet_sm_stopped) return; enet->flags |= BNA_ENET_F_ENABLED; @@ -1676,10 +1676,10 @@ bna_cb_ioceth_reset(void *arg) } static struct bfa_ioc_cbfn bna_ioceth_cbfn = { - bna_cb_ioceth_enable, - bna_cb_ioceth_disable, - bna_cb_ioceth_hbfail, - bna_cb_ioceth_reset + .enable_cbfn = bna_cb_ioceth_enable, + .disable_cbfn = bna_cb_ioceth_disable, + .hbfail_cbfn = bna_cb_ioceth_hbfail, + .reset_cbfn = bna_cb_ioceth_reset }; static void bna_attr_init(struct bna_ioceth *ioceth) @@ -1759,12 +1759,12 @@ bna_ioceth_uninit(struct bna_ioceth *ioceth) void bna_ioceth_enable(struct bna_ioceth *ioceth) { - if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) { + if (ioceth->fsm == bna_ioceth_sm_ready) { bnad_cb_ioceth_ready(ioceth->bna->bnad); return; } - if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped) + if (ioceth->fsm == bna_ioceth_sm_stopped) bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE); } diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c index 95bc470ae..c12be9f97 100644 --- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c +++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c @@ -1964,7 +1964,7 @@ static void bna_rx_stop(struct bna_rx *rx) { rx->rx_flags &= ~BNA_RX_F_ENET_STARTED; - if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped) + if (rx->fsm == bna_rx_sm_stopped) bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx); else { rx->stop_cbfn = bna_rx_mod_cb_rx_stopped; @@ -2543,7 +2543,7 @@ bna_rx_destroy(struct bna_rx *rx) void bna_rx_enable(struct bna_rx *rx) { - if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped) + if (rx->fsm != bna_rx_sm_stopped) return; rx->rx_flags |= BNA_RX_F_ENABLED; @@ -3531,7 +3531,7 @@ bna_tx_destroy(struct bna_tx *tx) void bna_tx_enable(struct bna_tx *tx) { - if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped) + if (tx->fsm != bna_tx_sm_stopped) return; tx->flags |= BNA_TX_F_ENABLED; diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h index c438d032e..4653f43e8 100644 --- a/drivers/net/ethernet/brocade/bna/bna_types.h +++ b/drivers/net/ethernet/brocade/bna/bna_types.h @@ -320,8 +320,10 @@ struct bna_attr { /* IOCEth */ +enum bna_ioceth_event; + struct bna_ioceth { - bfa_fsm_t fsm; + void (*fsm)(struct bna_ioceth *, enum bna_ioceth_event); struct bfa_ioc ioc; struct bna_attr attr; @@ -342,8 +344,10 @@ struct bna_pause_config { enum bna_status rx_pause; }; +enum bna_enet_event; + struct bna_enet { - bfa_fsm_t fsm; + void (*fsm)(struct bna_enet *, enum bna_enet_event); enum bna_enet_flags flags; enum bna_enet_type type; @@ -368,8 +372,10 @@ struct bna_enet { /* Ethport */ +enum bna_ethport_event; + struct bna_ethport { - bfa_fsm_t fsm; + void (*fsm)(struct bna_ethport *, enum bna_ethport_event); enum bna_ethport_flags flags; enum bna_link_status link_status; @@ -462,13 +468,15 @@ struct bna_txq { }; /* Tx object */ +enum bna_tx_event; + struct bna_tx { /* This should be the first one */ struct list_head qe; int rid; int hw_id; - bfa_fsm_t fsm; + void (*fsm)(struct bna_tx *, enum bna_tx_event); enum bna_tx_flags flags; enum bna_tx_type type; @@ -706,8 +714,10 @@ struct bna_rxp { }; /* RxF structure (hardware Rx Function) */ +enum bna_rxf_event; + struct bna_rxf { - bfa_fsm_t fsm; + void (*fsm)(struct bna_rxf *, enum bna_rxf_event); struct bfa_msgq_cmd_entry msgq_cmd; union { @@ -777,13 +787,15 @@ struct bna_rxf { }; /* Rx object */ +enum bna_rx_event; + struct bna_rx { /* This should be the first one */ struct list_head qe; int rid; int hw_id; - bfa_fsm_t fsm; + void (*fsm)(struct bna_rx *, enum bna_rx_event); enum bna_rx_type type; diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 69f505bbc..c19bc0a64 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -1100,8 +1100,9 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx) * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm. */ static void -bnad_tx_cleanup(struct delayed_work *work) +bnad_tx_cleanup(struct work_struct *_work) { + struct delayed_work *work = (struct delayed_work *)_work; struct bnad_tx_info *tx_info = container_of(work, struct bnad_tx_info, tx_cleanup_work); struct bnad *bnad = NULL; @@ -1179,7 +1180,7 @@ bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx) * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm. */ static void -bnad_rx_cleanup(void *work) +bnad_rx_cleanup(struct work_struct *work) { struct bnad_rx_info *rx_info = container_of(work, struct bnad_rx_info, rx_cleanup_work); @@ -2003,8 +2004,7 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id) } tx_info->tx = tx; - INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, - (work_func_t)bnad_tx_cleanup); + INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, bnad_tx_cleanup); /* Register ISR for the Tx object */ if (intr_info->intr_type == BNA_INTR_T_MSIX) { @@ -2260,8 +2260,7 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id) rx_info->rx = rx; spin_unlock_irqrestore(&bnad->bna_lock, flags); - INIT_WORK(&rx_info->rx_cleanup_work, - (work_func_t)(bnad_rx_cleanup)); + INIT_WORK(&rx_info->rx_cleanup_work, bnad_rx_cleanup); /* * Init NAPI, so that state is set to NAPI_STATE_SCHED, diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index ec09fcece..b6ab1fddf 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -1359,7 +1359,7 @@ static inline int macb_clear_csum(struct sk_buff *skb) return 0; } -static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) { u16 queue_index = skb_get_queue_mapping(skb); struct macb *bp = netdev_priv(dev); @@ -2661,7 +2661,7 @@ static int at91ether_close(struct net_device *dev) } /* Transmit packet */ -static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct macb *lp = netdev_priv(dev); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index ea75d600f..c21f5098b 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -394,7 +394,7 @@ static void stop_pci_io(struct octeon_device *oct) * this device has been detected. */ static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { struct octeon_device *oct = pci_get_drvdata(pdev); @@ -3079,7 +3079,7 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct, * @returns whether the packet was transmitted to the device okay or not * (NETDEV_TX_OK or NETDEV_TX_BUSY) */ -static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) { struct lio *lio; struct octnet_buf_free_info *finfo; @@ -3565,7 +3565,7 @@ static void liquidio_del_vxlan_port(struct net_device *netdev, OCTNET_CMD_VXLAN_PORT_DEL); } -static struct net_device_ops lionetdevops = { +static net_device_ops_no_const lionetdevops __read_only = { .ndo_open = liquidio_open, .ndo_stop = liquidio_stop, .ndo_start_xmit = liquidio_xmit, @@ -3814,8 +3814,11 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); - if (num_iqueues > 1) + if (num_iqueues > 1) { + pax_open_kernel(); lionetdevops.ndo_select_queue = select_q; + pax_close_kernel(); + } /* Associate the routines that will handle different * netdev tasks. diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 79570e781..64de71405 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -2990,7 +2990,7 @@ void t3_fatal_err(struct adapter *adapter) * this device has been detected. */ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { struct adapter *adapter = pci_get_drvdata(pdev); diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h index 8cffcdfd5..aadf0431e 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev, */ struct l2t_skb_cb { arp_failure_handler_func arp_failure_handler; -}; +} __no_const; #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 5cd4f3b11..dd9d206dc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3859,7 +3859,7 @@ static int adap_init0(struct adapter *adap) /* EEH callbacks */ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { int i; struct adapter *adap = pci_get_drvdata(pdev); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index 109bc6304..646ff4d8b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h @@ -558,7 +558,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *, struct sge_eth_txq *, unsigned int); void t4vf_free_sge_resources(struct adapter *); -int t4vf_eth_xmit(struct sk_buff *, struct net_device *); +netdev_tx_t t4vf_eth_xmit(struct sk_buff *, struct net_device *); int t4vf_ethrx_handler(struct sge_rspq *, const __be64 *, const struct pkt_gl *); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index f3ed9ce99..9b80a26ee 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -1159,7 +1159,7 @@ static inline void txq_advance(struct sge_txq *tq, unsigned int n) * * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled. */ -int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) +netdev_tx_t t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) { u32 wr_mid; u64 cntrl, *end; diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index f45385f5c..24f6c11ee 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1021,7 +1021,7 @@ static void dm9000_send_packet(struct net_device *dev, * Hardware start transmission. * Send a packet to media from the upper layer. */ -static int +static netdev_tx_t dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned long flags; diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index 6620fc861..24405bc05 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -912,7 +912,7 @@ static int de4x5_init(struct net_device *dev); static int de4x5_sw_reset(struct net_device *dev); static int de4x5_rx(struct net_device *dev); static int de4x5_tx(struct net_device *dev); -static void de4x5_ast(struct net_device *dev); +static void de4x5_ast(unsigned long _dev); static int de4x5_txur(struct net_device *dev); static int de4x5_rx_ovfc(struct net_device *dev); @@ -1149,7 +1149,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) lp->gendev = gendev; spin_lock_init(&lp->lock); init_timer(&lp->timer); - lp->timer.function = (void (*)(unsigned long))de4x5_ast; + lp->timer.function = de4x5_ast; lp->timer.data = (unsigned long)dev; de4x5_parse_params(dev); @@ -1743,8 +1743,9 @@ de4x5_tx(struct net_device *dev) } static void -de4x5_ast(struct net_device *dev) +de4x5_ast(unsigned long _dev) { + struct net_device *dev = (struct net_device *)_dev; struct de4x5_private *lp = netdev_priv(dev); int next_tick = DE4X5_AUTOSENSE_MS; int dt; @@ -2371,7 +2372,7 @@ autoconf_media(struct net_device *dev) lp->media = INIT; lp->tcount = 0; - de4x5_ast(dev); + de4x5_ast((unsigned long)dev); return lp->media; } @@ -5376,7 +5377,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) for (i=0; idev_addr[i]; } - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; break; case DE4X5_SET_HWADDR: /* Set the hardware address */ @@ -5416,7 +5417,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) spin_lock_irqsave(&lp->lock, flags); memcpy(&statbuf, &lp->pktStats, ioc->len); spin_unlock_irqrestore(&lp->lock, flags); - if (copy_to_user(ioc->data, &statbuf, ioc->len)) + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len)) return -EFAULT; break; } diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 93aa29391..5bc45aece 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -597,7 +597,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val) if (wrapped) newacc += 65536; - ACCESS_ONCE(*acc) = newacc; + ACCESS_ONCE_RW(*acc) = newacc; } static void populate_erx_stats(struct be_adapter *adapter, @@ -6017,7 +6017,7 @@ static void be_shutdown(struct pci_dev *pdev) } static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { struct be_adapter *adapter = pci_get_drvdata(pdev); diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 262587240..9f3ad7225 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -1200,7 +1201,7 @@ static int ftgmac100_stop(struct net_device *netdev) return 0; } -static int ftgmac100_hard_start_xmit(struct sk_buff *skb, +static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct ftgmac100 *priv = netdev_priv(netdev); diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index dce5f7b7f..222e70996 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -31,6 +31,8 @@ #include #include #include +#include +#include #include "ftmac100.h" @@ -1009,7 +1011,7 @@ static int ftmac100_stop(struct net_device *netdev) return 0; } -static int ftmac100_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t ftmac100_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct ftmac100 *priv = netdev_priv(netdev); dma_addr_t map; diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index 446ae9d60..79d1d754a 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -305,7 +305,7 @@ static int mpc52xx_fec_close(struct net_device *dev) * invariant will hold if you make sure that the netif_*_queue() * calls are done at the proper times. */ -static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct mpc52xx_fec_priv *priv = netdev_priv(dev); struct bcom_fec_bd *bd; diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 4b8626058..2a5ecb380 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -481,7 +481,7 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev, } #endif -static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); cbd_t __iomem *bdp; diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 9061c2f82..d7df4ea3e 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -112,7 +112,7 @@ const char gfar_driver_version[] = "2.0"; static int gfar_enet_open(struct net_device *dev); -static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); +static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); static void gfar_reset_task(struct work_struct *work); static void gfar_timeout(struct net_device *dev); static int gfar_close(struct net_device *dev); @@ -2324,7 +2324,7 @@ static inline bool gfar_csum_errata_76(struct gfar_private *priv, /* This is called by the kernel when a frame is ready for transmission. * It is pointed to by the dev->hard_start_xmit function pointer */ -static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); struct gfar_priv_tx_q *tx_queue = NULL; diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index f76d33279..8b96df876 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3085,7 +3085,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) /* This is called by the kernel when a frame is ready for transmission. */ /* It is pointed to by the dev->hard_start_xmit function pointer */ -static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ucc_geth_private *ugeth = netdev_priv(dev); #ifdef CONFIG_UGETH_TX_ON_DEMAND diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 39778892b..e496763e0 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -422,7 +422,7 @@ static void hip04_start_tx_timer(struct hip04_priv *priv) ns, HRTIMER_MODE_REL); } -static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct hip04_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index e69a6bed3..231423a7b 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -600,7 +600,7 @@ static irqreturn_t hix5hd2_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static int hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev) { struct hix5hd2_priv *priv = netdev_priv(dev); struct hix5hd2_desc *desc; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index 2d0cb609a..9dd45c9c3 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -845,16 +845,18 @@ int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev) struct hnae_ae_dev *ae_dev = &dsaf_dev->ae_dev; static atomic_t id = ATOMIC_INIT(-1); + pax_open_kernel(); switch (dsaf_dev->dsaf_ver) { case AE_VERSION_1: - hns_dsaf_ops.toggle_ring_irq = hns_ae_toggle_ring_irq; + const_cast(hns_dsaf_ops.toggle_ring_irq) = hns_ae_toggle_ring_irq; break; case AE_VERSION_2: - hns_dsaf_ops.toggle_ring_irq = hns_aev2_toggle_ring_irq; + const_cast(hns_dsaf_ops.toggle_ring_irq) = hns_aev2_toggle_ring_irq; break; default: break; } + pax_close_kernel(); snprintf(ae_dev->name, AE_NAME_SIZE, "%s%d", DSAF_DEVICE_NAME, (int)atomic_inc_return(&id)); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index c494fc52b..ad40d2f88 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -347,7 +347,7 @@ struct dsaf_device { struct ppe_common_cb *ppe_common[DSAF_COMM_DEV_NUM]; struct rcb_common_cb *rcb_common[DSAF_COMM_DEV_NUM]; struct hns_mac_cb *mac_cb[DSAF_MAX_PORT_NUM]; - struct dsaf_misc_op *misc_op; + const struct dsaf_misc_op *misc_op; struct dsaf_hw_stats hw_stats[DSAF_NODE_NUM]; struct dsaf_int_stat int_stat; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index 67accce1d..c7c747cbf 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -582,56 +582,54 @@ hns_mac_config_sds_loopback_acpi(struct hns_mac_cb *mac_cb, bool en) return 0; } -struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev) +const struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev) { - struct dsaf_misc_op *misc_op; - - misc_op = devm_kzalloc(dsaf_dev->dev, sizeof(*misc_op), GFP_KERNEL); - if (!misc_op) - return NULL; - - if (dev_of_node(dsaf_dev->dev)) { - misc_op->cpld_set_led = hns_cpld_set_led; - misc_op->cpld_reset_led = cpld_led_reset; - misc_op->cpld_set_led_id = cpld_set_led_id; - - misc_op->dsaf_reset = hns_dsaf_rst; - misc_op->xge_srst = hns_dsaf_xge_srst_by_port; - misc_op->xge_core_srst = hns_dsaf_xge_core_srst_by_port; - misc_op->ge_srst = hns_dsaf_ge_srst_by_port; - misc_op->ppe_srst = hns_ppe_srst_by_port; - misc_op->ppe_comm_srst = hns_ppe_com_srst; - misc_op->hns_dsaf_srst_chns = hns_dsaf_srst_chns; - misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst; - - misc_op->get_phy_if = hns_mac_get_phy_if; - misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt; - - misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback; - } else if (is_acpi_node(dsaf_dev->dev->fwnode)) { - misc_op->cpld_set_led = hns_cpld_set_led; - misc_op->cpld_reset_led = cpld_led_reset; - misc_op->cpld_set_led_id = cpld_set_led_id; - - misc_op->dsaf_reset = hns_dsaf_rst_acpi; - misc_op->xge_srst = hns_dsaf_xge_srst_by_port_acpi; - misc_op->xge_core_srst = hns_dsaf_xge_core_srst_by_port_acpi; - misc_op->ge_srst = hns_dsaf_ge_srst_by_port_acpi; - misc_op->ppe_srst = hns_ppe_srst_by_port_acpi; - misc_op->ppe_comm_srst = hns_ppe_com_srst; - misc_op->hns_dsaf_srst_chns = hns_dsaf_srst_chns_acpi; - misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst_acpi; - - misc_op->get_phy_if = hns_mac_get_phy_if_acpi; - misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt; - - misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback_acpi; - } else { - devm_kfree(dsaf_dev->dev, (void *)misc_op); - misc_op = NULL; - } + static const struct dsaf_misc_op dsaf_misc_ops = { + .cpld_set_led = hns_cpld_set_led, + .cpld_reset_led = cpld_led_reset, + .cpld_set_led_id = cpld_set_led_id, + + .dsaf_reset = hns_dsaf_rst, + .xge_srst = hns_dsaf_xge_srst_by_port, + .xge_core_srst = hns_dsaf_xge_core_srst_by_port, + .ge_srst = hns_dsaf_ge_srst_by_port, + .ppe_srst = hns_ppe_srst_by_port, + .ppe_comm_srst = hns_ppe_com_srst, + .hns_dsaf_srst_chns = hns_dsaf_srst_chns, + .hns_dsaf_roce_srst = hns_dsaf_roce_srst, + + .get_phy_if = hns_mac_get_phy_if, + .get_sfp_prsnt = hns_mac_get_sfp_prsnt, + + .cfg_serdes_loopback = hns_mac_config_sds_loopback, + }; + + static const struct dsaf_misc_op dsaf_misc_ops_acpi = { + .cpld_set_led = hns_cpld_set_led, + .cpld_reset_led = cpld_led_reset, + .cpld_set_led_id = cpld_set_led_id, + + .dsaf_reset = hns_dsaf_rst_acpi, + .xge_srst = hns_dsaf_xge_srst_by_port_acpi, + .xge_core_srst = hns_dsaf_xge_core_srst_by_port_acpi, + .ge_srst = hns_dsaf_ge_srst_by_port_acpi, + .ppe_srst = hns_ppe_srst_by_port_acpi, + .ppe_comm_srst = hns_ppe_com_srst, + .hns_dsaf_srst_chns = hns_dsaf_srst_chns_acpi, + .hns_dsaf_roce_srst = hns_dsaf_roce_srst_acpi, + + .get_phy_if = hns_mac_get_phy_if_acpi, + .get_sfp_prsnt = hns_mac_get_sfp_prsnt, + + .cfg_serdes_loopback = hns_mac_config_sds_loopback_acpi, + }; + + if (dev_of_node(dsaf_dev->dev)) + return &dsaf_misc_ops; + else if (is_acpi_node(dsaf_dev->dev->fwnode)) + return &dsaf_misc_ops_acpi; - return (void *)misc_op; + return NULL; } static int hns_dsaf_dev_match(struct device *dev, void *fwnode) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h index 310e80261..5e0cca857 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h @@ -33,7 +33,7 @@ #define DSAF_LED_DATA_B 4 #define DSAF_LED_ANCHOR_B 5 -struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev); +const struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev); struct platform_device *hns_dsaf_find_platform_device(struct fwnode_handle *fwnode); #endif diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c index 3dbc53c21..fa08fb88e 100644 --- a/drivers/net/ethernet/i825xx/lib82596.c +++ b/drivers/net/ethernet/i825xx/lib82596.c @@ -347,7 +347,7 @@ static const char init_setup[] = 0x7f /* *multi IA */ }; static int i596_open(struct net_device *dev); -static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev); +static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev); static irqreturn_t i596_interrupt(int irq, void *dev_id); static int i596_close(struct net_device *dev); static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); @@ -965,7 +965,7 @@ static void i596_tx_timeout (struct net_device *dev) } -static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct i596_private *lp = netdev_priv(dev); struct tx_cmd *tx_cmd; diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index bd719e25d..3c8d84a94 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -2047,7 +2047,7 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, dev_consume_skb_any(skb); } -static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ehea_port *port = netdev_priv(dev); struct ehea_swqe *swqe; diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 8f139197f..d346cedaf 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -1415,7 +1415,7 @@ static inline int emac_xmit_finish(struct emac_instance *dev, int len) } /* Tx lock BH */ -static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct emac_instance *dev = netdev_priv(ndev); unsigned int len = skb->len; @@ -1473,7 +1473,7 @@ static inline int emac_xmit_split(struct emac_instance *dev, int slot, } /* Tx lock BH disabled (SG version for TAH equipped EMACs) */ -static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev) { struct emac_instance *dev = netdev_priv(ndev); int nr_frags = skb_shinfo(skb)->nr_frags; diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 8c8363dac..25e112563 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -3105,7 +3105,7 @@ static void e100_shutdown(struct pci_dev *pdev) * @pdev: Pointer to PCI device * @state: The current pci connection state */ -static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, enum pci_channel_state state) { struct net_device *netdev = pci_get_drvdata(pdev); struct nic *nic = netdev_priv(netdev); diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index f42129d09..d2e3932b2 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -5272,7 +5272,7 @@ static void e1000_netpoll(struct net_device *netdev) * this device has been detected. */ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 7017281ba..6bbf47ec4 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -6784,7 +6784,7 @@ static void e1000_netpoll(struct net_device *netdev) * this device has been detected. */ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index b1a2f8437..d64732814 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -2261,7 +2261,7 @@ static int fm10k_suspend(struct pci_dev *pdev, * this device has been detected. */ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { struct fm10k_intfc *interface = pci_get_drvdata(pdev); struct net_device *netdev = interface->netdev; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index f1feceab7..3f54fcb69 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -417,7 +417,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf) wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32); /* Update the base adjustement value. */ - ACCESS_ONCE(pf->ptp_base_adj) = incval; + ACCESS_ONCE_RW(pf->ptp_base_adj) = incval; smp_mb(); /* Force the above update. */ } diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 9affd7c19..2248c78dc 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -7837,7 +7837,7 @@ static void igb_netpoll(struct net_device *netdev) * this device has been detected. **/ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 7dff7f623..fc020eb1d 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -2515,7 +2515,7 @@ static void igbvf_netpoll(struct net_device *netdev) * this device has been detected. */ static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { struct net_device *netdev = pci_get_drvdata(pdev); struct igbvf_adapter *adapter = netdev_priv(netdev); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index fee1f2918..57fce92cb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9912,7 +9912,7 @@ static void ixgbe_remove(struct pci_dev *pdev) * this device has been detected. */ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index a92277683..bd7a80042 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -1122,7 +1122,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) } /* update the base incval used to calculate frequency adjustment */ - ACCESS_ONCE(adapter->base_incval) = incval; + ACCESS_ONCE_RW(adapter->base_incval) = incval; smp_mb(); /* need lock to prevent incorrect read while modifying cyclecounter */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index cbf70fe40..c75e433e8 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -3629,7 +3629,7 @@ static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) return __ixgbevf_maybe_stop_tx(tx_ring, size); } -static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbevf_tx_buffer *first; @@ -4221,7 +4221,7 @@ static void ixgbevf_remove(struct pci_dev *pdev) * this device has been detected. **/ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev); diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 707bc4680..14bf9a56f 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2292,7 +2292,7 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, } /* Main tx processing */ -static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); u16 txq_id = skb_get_queue_mapping(skb); diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 930c8165f..1686d310e 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -5237,7 +5237,7 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, } /* Main tx processing */ -static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); struct mvpp2_tx_queue *txq, *aggr_txq; diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 5d5000c8e..7437949ff 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1271,7 +1271,7 @@ static int pxa168_rx_poll(struct napi_struct *napi, int budget) return work_done; } -static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index e2509bba3..8357fefcc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -495,8 +495,8 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev, wmb(); /* we want to dirty this cache line once */ - ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb; - ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped; + ACCESS_ONCE_RW(ring->last_nr_txbb) = last_nr_txbb; + ACCESS_ONCE_RW(ring->cons) = ring_cons + txbbs_skipped; if (ring->free_tx_desc == mlx4_en_recycle_tx_desc) return done < budget; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 75d07fa9d..d766d8ef1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -4069,7 +4069,7 @@ static const struct pci_device_id mlx4_pci_table[] = { MODULE_DEVICE_TABLE(pci, mlx4_pci_table); static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 0c9ef8729..c10ec5067 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1312,7 +1312,7 @@ static void remove_one(struct pci_dev *pdev) } static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_priv *priv = &dev->priv; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index f902c4d3d..de8ce4ec3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -4640,16 +4640,16 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused, return notifier_from_errno(err); } -static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = { +static struct notifier_block mlxsw_sp_netdevice_nb = { .notifier_call = mlxsw_sp_netdevice_event, }; -static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = { +static struct notifier_block mlxsw_sp_inetaddr_nb = { .notifier_call = mlxsw_sp_inetaddr_event, .priority = 10, /* Must be called before FIB notifier block */ }; -static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = { +static struct notifier_block mlxsw_sp_router_netevent_nb = { .notifier_call = mlxsw_sp_router_netevent_event, }; diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c index 20cb85bc0..6135d90f3 100644 --- a/drivers/net/ethernet/micrel/ks8695net.c +++ b/drivers/net/ethernet/micrel/ks8695net.c @@ -1156,7 +1156,7 @@ ks8695_timeout(struct net_device *ndev) * sk_buff and adds it to the TX ring. It then kicks the TX DMA * engine to ensure transmission begins. */ -static int +static netdev_tx_t ks8695_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ks8695_priv *ksp = netdev_priv(ndev); diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c index 2fc5cd56c..6c6108a40 100644 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/drivers/net/ethernet/micrel/ks8851_mll.c @@ -1020,7 +1020,7 @@ static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len) * spin_lock_irqsave is required because tx and rx should be mutual exclusive. * So while tx is in-progress, prevent IRQ interrupt from happenning. */ -static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev) { int retv = NETDEV_TX_OK; struct ks_net *ks = netdev_priv(netdev); diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 4367dd687..c32f151b8 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -319,7 +319,7 @@ static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct moxart_mac_priv_t *priv = netdev_priv(ndev); void *desc; diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index eaa37c079..8295b08c2 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -8556,7 +8556,7 @@ static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro, * this device has been detected. */ static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { struct net_device *netdev = pci_get_drvdata(pdev); struct s2io_nic *sp = netdev_priv(netdev); diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c index 6223930a8..975033d7d 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c @@ -3457,7 +3457,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp, struct __vxge_hw_fifo *fifo; struct vxge_hw_fifo_config *config; u32 txdl_size, txdl_per_memblock; - struct vxge_hw_mempool_cbs fifo_mp_callback; + static struct vxge_hw_mempool_cbs fifo_mp_callback = { + .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc, + }; + struct __vxge_hw_virtualpath *vpath; if ((vp == NULL) || (attr == NULL)) { @@ -3540,8 +3543,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp, goto exit; } - fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc; - fifo->mempool = __vxge_hw_mempool_create(vpath->hldev, fifo->config->memblock_size, diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 850d2b539..5161bca24 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -4043,7 +4043,7 @@ static int vxge_pm_resume(struct pci_dev *pdev) * this device has been detected. */ static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { struct __vxge_hw_device *hldev = pci_get_drvdata(pdev); struct net_device *netdev = hldev->ndev; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index aee3fd2b6..af5b0db94 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -752,7 +752,7 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, * * Return: NETDEV_TX_OK on success. */ -static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); const struct skb_frag_struct *frag; diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c index 0c49be93f..7b926e2ef 100644 --- a/drivers/net/ethernet/netx-eth.c +++ b/drivers/net/ethernet/netx-eth.c @@ -107,7 +107,7 @@ static void netx_eth_set_multicast_list(struct net_device *ndev) /* implement me */ } -static int +static netdev_tx_t netx_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct netx_eth_priv *priv = netdev_priv(ndev); diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index 712d8bcb7..52b4cde30 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -633,7 +633,7 @@ static int w90p910_send_frame(struct net_device *dev, return 0; } -static int w90p910_ether_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t w90p910_ether_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 9b0d7f463..c29155fc0 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -357,8 +357,8 @@ struct ring_desc { }; struct ring_desc_ex { - __le32 bufhigh; - __le32 buflow; + __le32 bufhigh __intentional_overflow(0); + __le32 buflow __intentional_overflow(0); __le32 txvlan; __le32 flaglen; }; diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 8e13ec84c..b654ea067 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1053,7 +1053,7 @@ static int lpc_eth_close(struct net_device *ndev) return 0; } -static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct netdata_local *pldat = netdev_priv(ndev); u32 len, txidx; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 3cd87a41a..3eb33e7d2 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -2130,7 +2130,7 @@ static int pch_gbe_stop(struct net_device *netdev) * - NETDEV_TX_OK: Normal end * - NETDEV_TX_BUSY: Error end */ -static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct pch_gbe_adapter *adapter = netdev_priv(netdev); struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; @@ -2439,7 +2439,7 @@ static const struct net_device_ops pch_gbe_netdev_ops = { }; static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { struct net_device *netdev = pci_get_drvdata(pdev); struct pch_gbe_adapter *adapter = netdev_priv(netdev); diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 1b8136496..80d665b2d 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -1757,7 +1757,7 @@ static int netxen_nic_attach_func(struct pci_dev *pdev) } static pci_ers_result_t netxen_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { struct netxen_adapter *adapter = pci_get_drvdata(pdev); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 74105c6d0..cf93de650 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -2320,7 +2320,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter) max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS; } else if (ret == QLC_83XX_DEFAULT_OPMODE) { ahw->nic_mode = QLCNIC_DEFAULT_MODE; - adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; + pax_open_kernel(); + const_cast(adapter->nic_ops->init_driver) = qlcnic_83xx_init_default_driver; + pax_close_kernel(); ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; max_sds_rings = QLCNIC_MAX_SDS_RINGS; max_tx_rings = QLCNIC_MAX_TX_RINGS; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c index 34906750b..cf148ea45 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c @@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter) case QLCNIC_NON_PRIV_FUNC: ahw->op_mode = QLCNIC_NON_PRIV_FUNC; ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; - nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic; + pax_open_kernel(); + const_cast(nic_ops->init_driver) = qlcnic_83xx_init_non_privileged_vnic; + pax_close_kernel(); break; case QLCNIC_PRIV_FUNC: ahw->op_mode = QLCNIC_PRIV_FUNC; ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry; - nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic; + pax_open_kernel(); + const_cast(nic_ops->init_driver) = qlcnic_83xx_init_privileged_vnic; + pax_close_kernel(); break; case QLCNIC_MGMT_FUNC: ahw->op_mode = QLCNIC_MGMT_FUNC; ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; - nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic; + pax_open_kernel(); + const_cast(nic_ops->init_driver) = qlcnic_83xx_init_mgmt_vnic; + pax_close_kernel(); break; default: dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n"); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 49447b595..1bb555c03 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -3975,7 +3975,7 @@ static void qlcnic_82xx_io_resume(struct pci_dev *pdev) } static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index 0844b7c75..afa10a163 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c @@ -1285,7 +1285,7 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter) int qlcnic_dump_fw(struct qlcnic_adapter *adapter) { struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; - static const struct qlcnic_dump_operations *fw_dump_ops; + const struct qlcnic_dump_operations *fw_dump_ops; struct qlcnic_83xx_dump_template_hdr *hdr_83xx; u32 entry_offset, dump, no_entries, buf_offset = 0; int i, k, ops_cnt, ops_index, dump_size = 0; diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 0b4deb31e..61fbc9e02 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -1493,7 +1493,7 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt, } /* Transmit the packet using specified transmit queue */ -int emac_mac_tx_buf_send(struct emac_adapter *adpt, struct emac_tx_queue *tx_q, +netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt, struct emac_tx_queue *tx_q, struct sk_buff *skb) { struct emac_tpd tpd; diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.h b/drivers/net/ethernet/qualcomm/emac/emac-mac.h index f3aa24dc4..cb6b701c1 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.h +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.h @@ -235,7 +235,7 @@ void emac_mac_stop(struct emac_adapter *adpt); void emac_mac_mode_config(struct emac_adapter *adpt); void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q, int *num_pkts, int max_pkts); -int emac_mac_tx_buf_send(struct emac_adapter *adpt, struct emac_tx_queue *tx_q, +netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt, struct emac_tx_queue *tx_q, struct sk_buff *skb); void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q); void emac_mac_rx_tx_ring_init_all(struct platform_device *pdev, diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 57b35aeac..fe76b1f08 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -139,7 +139,7 @@ static int emac_napi_rtx(struct napi_struct *napi, int budget) } /* Transmit the packet */ -static int emac_start_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct emac_adapter *adpt = netdev_priv(netdev); diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 363ca653c..875062711 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -798,22 +798,22 @@ struct rtl8169_private { struct mdio_ops { void (*write)(struct rtl8169_private *, int, int); int (*read)(struct rtl8169_private *, int); - } mdio_ops; + } __no_const mdio_ops; struct pll_power_ops { void (*down)(struct rtl8169_private *); void (*up)(struct rtl8169_private *); - } pll_power_ops; + } __no_const pll_power_ops; struct jumbo_ops { void (*enable)(struct rtl8169_private *); void (*disable)(struct rtl8169_private *); - } jumbo_ops; + } __no_const jumbo_ops; struct csi_ops { void (*write)(struct rtl8169_private *, int, int); u32 (*read)(struct rtl8169_private *, int); - } csi_ops; + } __no_const csi_ops; int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv); int (*get_settings)(struct net_device *, struct ethtool_cmd *); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 1a92de705..cfda428c8 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2297,7 +2297,7 @@ static void sh_eth_tx_timeout(struct net_device *ndev) } /* Packet transmit function */ -static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); struct sh_eth_txdesc *txdesc; diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 24b746406..1a1cbdff3 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -2890,7 +2890,7 @@ static int rocker_netdevice_event(struct notifier_block *unused, return NOTIFY_DONE; } -static struct notifier_block rocker_netdevice_nb __read_mostly = { +static struct notifier_block rocker_netdevice_nb = { .notifier_call = rocker_netdevice_event, }; @@ -2924,7 +2924,7 @@ static int rocker_netevent_event(struct notifier_block *unused, return NOTIFY_DONE; } -static struct notifier_block rocker_netevent_nb __read_mostly = { +static struct notifier_block rocker_netevent_nb = { .notifier_call = rocker_netevent_event, }; diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index c2bd5378f..540a981bd 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c @@ -578,7 +578,7 @@ static inline int sgiseeq_reset(struct net_device *dev) return 0; } -static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct sgiseeq_private *sp = netdev_priv(dev); struct hpc3_ethregs *hregs = sp->hregs; diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 77a5364f7..4a8d1f824 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -832,7 +832,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) ptp->start.dma_addr); /* Clear flag that signals MC ready */ - ACCESS_ONCE(*start) = 0; + ACCESS_ONCE_RW(*start) = 0; rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf, MC_CMD_PTP_IN_SYNCHRONIZE_LEN); EFX_BUG_ON_PARANOID(rc); diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c index cd38b44ae..bfcf4b9b0 100644 --- a/drivers/net/ethernet/sfc/selftest.c +++ b/drivers/net/ethernet/sfc/selftest.c @@ -82,8 +82,8 @@ struct efx_loopback_state { int packet_count; struct sk_buff **skbs; bool offload_csum; - atomic_t rx_good; - atomic_t rx_bad; + atomic_unchecked_t rx_good; + atomic_unchecked_t rx_bad; struct efx_loopback_payload payload; }; @@ -357,12 +357,12 @@ void efx_loopback_rx_packet(struct efx_nic *efx, netif_vdbg(efx, drv, efx->net_dev, "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx)); - atomic_inc(&state->rx_good); + atomic_inc_unchecked(&state->rx_good); return; err: #ifdef DEBUG - if (atomic_read(&state->rx_bad) == 0) { + if (atomic_read_unchecked(&state->rx_bad) == 0) { netif_err(efx, drv, efx->net_dev, "received packet:\n"); print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, buf_ptr, pkt_len, 0); @@ -371,7 +371,7 @@ void efx_loopback_rx_packet(struct efx_nic *efx, &state->payload, sizeof(state->payload), 0); } #endif - atomic_inc(&state->rx_bad); + atomic_inc_unchecked(&state->rx_bad); } /* Initialise an efx_selftest_state for a new iteration */ @@ -405,8 +405,8 @@ static void efx_iterate_state(struct efx_nic *efx) memcpy(&payload->msg, payload_msg, sizeof(payload_msg)); /* Fill out remaining state members */ - atomic_set(&state->rx_good, 0); - atomic_set(&state->rx_bad, 0); + atomic_set_unchecked(&state->rx_good, 0); + atomic_set_unchecked(&state->rx_bad, 0); smp_wmb(); } @@ -464,7 +464,7 @@ static int efx_poll_loopback(struct efx_nic *efx) { struct efx_loopback_state *state = efx->loopback_selftest; - return atomic_read(&state->rx_good) == state->packet_count; + return atomic_read_unchecked(&state->rx_good) == state->packet_count; } static int efx_end_loopback(struct efx_tx_queue *tx_queue, @@ -490,8 +490,8 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue, netif_tx_unlock_bh(efx->net_dev); /* Check TX completion and received packet counts */ - rx_good = atomic_read(&state->rx_good); - rx_bad = atomic_read(&state->rx_bad); + rx_good = atomic_read_unchecked(&state->rx_good); + rx_bad = atomic_read_unchecked(&state->rx_bad); if (tx_done != state->packet_count) { /* Don't free the skbs; they will be picked up on TX * overflow or channel teardown. diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index 7a254da85..0693a2b4c 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c @@ -103,7 +103,7 @@ static inline struct net_device *priv_netdev(struct ioc3_private *dev) static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static void ioc3_set_multicast_list(struct net_device *dev); -static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev); +static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev); static void ioc3_timeout(struct net_device *dev); static inline unsigned int ioc3_hash(const unsigned char *addr); static inline void ioc3_stop(struct ioc3_private *ip); @@ -1397,7 +1397,7 @@ static struct pci_driver ioc3_driver = { .remove = ioc3_remove_one, }; -static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned long data; struct ioc3_private *ip = netdev_priv(dev); diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index cb49c9654..c1498cc78 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -514,7 +514,7 @@ static void smc911x_hardware_send_pkt(struct net_device *dev) * now, or set the card to generates an interrupt when ready * for the packet. */ -static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct smc911x_local *lp = netdev_priv(dev); unsigned int free; diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 73212590d..ac4007538 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -637,7 +637,7 @@ done: if (!THROTTLE_TX_PKTS) * now, or set the card to generates an interrupt when ready * for the packet. */ -static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct smc_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 8b0016a78..443f4c1af 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -1776,7 +1776,7 @@ static int smsc911x_stop(struct net_device *dev) } /* Entry point for transmitting a packet */ -static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct smsc911x_data *pdata = netdev_priv(dev); unsigned int freespace; diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c index ce9aa7928..ad1831f41 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c @@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode) writel(value, mmcaddr + MMC_CNTRL); - pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n", - MMC_CNTRL, value); +// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n", +// MMC_CNTRL, value); } /* To mask all all interrupts.*/ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index b2893fbe2..4d6caf351 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1177,8 +1177,8 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv) if (!priv->rx_skbuff) goto err_rx_skbuff; - priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE, - sizeof(*priv->tx_skbuff_dma), + priv->tx_skbuff_dma = kmalloc_array(sizeof(*priv->tx_skbuff_dma), + DMA_TX_SIZE, GFP_KERNEL); if (!priv->tx_skbuff_dma) goto err_tx_skbuff_dma; diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c index 02f452730..7f02f30b6 100644 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c @@ -951,7 +951,7 @@ static void bigmac_tx_timeout(struct net_device *dev) } /* Put a packet on the wire. */ -static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); int len, entry; diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c index 958294814..7f70dc81f 100644 --- a/drivers/net/ethernet/sun/sunqe.c +++ b/drivers/net/ethernet/sun/sunqe.c @@ -569,7 +569,7 @@ static void qe_tx_timeout(struct net_device *dev) } /* Get a packet queued to go onto the wire. */ -static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t qe_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); struct sunqe_buffers *qbufs = qep->buffers; diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index a2f9b47de..05a914785 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -131,7 +131,7 @@ static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb, } /* Wrappers to common functions */ -static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) { return sunvnet_start_xmit_common(skb, dev, vnet_tx_port_find); } diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c index 904a5a12a..6ef5cff9e 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.c +++ b/drivers/net/ethernet/sun/sunvnet_common.c @@ -1126,7 +1126,7 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies) return skb; } -static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb, +static netdev_tx_t vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb, struct vnet_port *(*vnet_tx_port) (struct sk_buff *, struct net_device *)) { @@ -1134,7 +1134,7 @@ static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb, struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; struct sk_buff *segs; int maclen, datalen; - int status; + netdev_tx_t status; int gso_size, gso_type, gso_segs; int hlen = skb_transport_header(skb) - skb_mac_header(skb); int proto = IPPROTO_IP; @@ -1190,7 +1190,7 @@ static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb, skb_push(skb, maclen); skb_reset_mac_header(skb); - status = 0; + status = NETDEV_TX_OK; while (segs) { struct sk_buff *curr = segs; diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c index 97d64bfed..630f69b61 100644 --- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c +++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c @@ -2176,7 +2176,7 @@ static void dwceqos_tx_rollback(struct net_local *lp, struct dwceqos_tx *tx) lp->gso_size = tx->prev_gso_size; } -static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct net_local *lp = netdev_priv(ndev); struct dwceqos_tx trans; diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index 28097be2f..8efd8c217 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c @@ -544,7 +544,7 @@ static int cpmac_poll(struct napi_struct *napi, int budget) } -static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) { int queue; unsigned int len; diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 481c7bf03..2a6ed4c56 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -943,7 +943,7 @@ static void emac_tx_handler(void *token, int len, int status) * * Returns success(NETDEV_TX_OK) or error code (typically out of desc's) */ -static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev) { struct device *emac_dev = &ndev->dev; int ret_code; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 32516661f..6eb86ae91 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1237,7 +1237,7 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp, } /* Submit the packet */ -static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct netcp_intf *netcp = netdev_priv(ndev); int subqueue = skb_get_queue_mapping(skb); diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 9d14731cd..7d6ad9182 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -2600,7 +2600,7 @@ static struct platform_driver rhine_driver_platform = { } }; -static struct dmi_system_id rhine_dmi_table[] __initdata = { +static const struct dmi_system_id rhine_dmi_table[] __initconst = { { .ident = "EPIA-M", .matches = { diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index d2349a1bc..04c86482a 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -835,7 +835,7 @@ static void w5100_tx_work(struct work_struct *work) w5100_tx_skb(priv->ndev, skb); } -static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t w5100_start_tx(struct sk_buff *skb, struct net_device *ndev) { struct w5100_priv *priv = netdev_priv(ndev); diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index ca31a57db..c61514b2d 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -365,7 +365,7 @@ static void w5300_tx_timeout(struct net_device *ndev) netif_wake_queue(ndev); } -static int w5300_start_tx(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t w5300_start_tx(struct sk_buff *skb, struct net_device *ndev) { struct w5300_priv *priv = netdev_priv(ndev); diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index a9bd665fd..2fc292425 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -673,7 +673,7 @@ static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag) return 0; } -static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); struct cdmac_bd *cur_p; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index c688d68c3..b7b42ac4a 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -651,7 +651,7 @@ static inline int axienet_check_tx_bd_space(struct axienet_local *lp, * start the transmission. Additionally if checksum offloading is supported, * it populates AXI Stream Control fields with appropriate values. */ -static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) { u32 ii; u32 num_frag; diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 93dc10b10..65986711e 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -995,7 +995,7 @@ static int xemaclite_close(struct net_device *dev) * * Return: 0, always. */ -static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) +static netdev_tx_t xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) { struct net_local *lp = netdev_priv(dev); struct sk_buff *new_skb; diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 8b4822ad2..e99c1c413 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1467,7 +1467,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) return -EMSGSIZE; } -static struct rtnl_link_ops geneve_link_ops __read_mostly = { +static struct rtnl_link_ops geneve_link_ops = { .kind = "geneve", .maxtype = IFLA_GENEVE_MAX, .policy = geneve_policy, @@ -1533,7 +1533,7 @@ static int geneve_netdevice_event(struct notifier_block *unused, return NOTIFY_DONE; } -static struct notifier_block geneve_notifier_block __read_mostly = { +static struct notifier_block geneve_notifier_block = { .notifier_call = geneve_netdevice_event, }; diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 97e0cbca0..3aec9e578 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -58,7 +58,7 @@ struct pdp_ctx { struct in_addr ms_addr_ip4; struct in_addr sgsn_addr_ip4; - atomic_t tx_seq; + atomic_unchecked_t tx_seq; struct rcu_head rcu_head; }; @@ -407,7 +407,7 @@ static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx) gtp0->flags = 0x1e; /* v0, GTP-non-prime. */ gtp0->type = GTP_TPDU; gtp0->length = htons(payload_len); - gtp0->seq = htons((atomic_inc_return(&pctx->tx_seq) - 1) % 0xffff); + gtp0->seq = htons((atomic_inc_return_unchecked(&pctx->tx_seq) - 1) % 0xffff); gtp0->flow = htons(pctx->u.v0.flow); gtp0->number = 0xff; gtp0->spare[0] = gtp0->spare[1] = gtp0->spare[2] = 0xff; @@ -751,7 +751,7 @@ static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev) return -EMSGSIZE; } -static struct rtnl_link_ops gtp_link_ops __read_mostly = { +static struct rtnl_link_ops gtp_link_ops = { .kind = "gtp", .maxtype = IFLA_GTP_MAX, .policy = gtp_policy, @@ -959,7 +959,7 @@ static int ipv4_pdp_add(struct net_device *dev, struct genl_info *info) return -ENOMEM; ipv4_pdp_fill(pctx, info); - atomic_set(&pctx->tx_seq, 0); + atomic_set_unchecked(&pctx->tx_seq, 0); switch (pctx->gtp_version) { case GTP_V0: diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index 78dbc4454..b7831d0fd 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c @@ -768,7 +768,7 @@ static void epp_bh(struct work_struct *work) * ===================== network driver interface ========================= */ -static int baycom_send_packet(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t baycom_send_packet(struct sk_buff *skb, struct net_device *dev) { struct baycom_state *bc = netdev_priv(dev); diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index f4fbcb5aa..8830bbbbf 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -160,7 +160,7 @@ struct rndis_device { enum rndis_device_state state; bool link_state; - atomic_t new_req_id; + atomic_unchecked_t new_req_id; spinlock_t request_lock; struct list_head req_list; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index ff038e507..740da409f 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -352,7 +352,7 @@ static u32 get_net_transport_info(struct sk_buff *skb, u32 *trans_off) return ret_val; } -static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) +static netdev_tx_t netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); struct hv_netvsc_packet *packet = NULL; diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 9195d5da8..1d570d156 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -101,7 +101,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev, * template */ set = &rndis_msg->msg.set_req; - set->req_id = atomic_inc_return(&dev->new_req_id); + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id); /* Add to the request list */ spin_lock_irqsave(&dev->request_lock, flags); @@ -881,7 +881,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev) /* Setup the rndis set */ halt = &request->request_msg.msg.halt_req; - halt->req_id = atomic_inc_return(&dev->new_req_id); + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id); /* Ignore return since this msg is optional. */ rndis_filter_send_request(dev, request); @@ -1099,8 +1099,7 @@ int rndis_filter_device_add(struct hv_device *dev, if (net_device->num_chn == 1) goto out; - net_device->sub_cb_buf = vzalloc((net_device->num_chn - 1) * - NETVSC_PACKET_SIZE); + net_device->sub_cb_buf = vzalloc(net_device->num_sc_offered * NETVSC_PACKET_SIZE); if (!net_device->sub_cb_buf) { net_device->num_chn = 1; dev_info(&dev->device, "No memory for subchannels.\n"); diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 66c0eeafc..27486de4a 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -290,7 +290,7 @@ static int ifb_validate(struct nlattr *tb[], struct nlattr *data[]) return 0; } -static struct rtnl_link_ops ifb_link_ops __read_mostly = { +static struct rtnl_link_ops ifb_link_ops = { .kind = "ifb", .priv_size = sizeof(struct ifb_dev_private), .setup = ifb_setup, diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index b4e990743..20efd7849 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -484,7 +484,7 @@ static void ipvlan_multicast_enqueue(struct ipvl_port *port, schedule_work(&port->wq); } else { spin_unlock(&port->backlog.lock); - atomic_long_inc(&skb->dev->rx_dropped); + atomic_long_inc_unchecked(&skb->dev->rx_dropped); kfree_skb(skb); } } diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index dfbc4ef6d..47e795de2 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -26,7 +26,7 @@ static struct nf_hook_ops ipvl_nfops[] __read_mostly = { }, }; -static struct l3mdev_ops ipvl_l3mdev_ops __read_mostly = { +static struct l3mdev_ops ipvl_l3mdev_ops = { .l3mdev_l3_rcv = ipvlan_l3_rcv, }; @@ -816,15 +816,15 @@ static int ipvlan_addr4_event(struct notifier_block *unused, return NOTIFY_OK; } -static struct notifier_block ipvlan_addr4_notifier_block __read_mostly = { +static struct notifier_block ipvlan_addr4_notifier_block = { .notifier_call = ipvlan_addr4_event, }; -static struct notifier_block ipvlan_notifier_block __read_mostly = { +static struct notifier_block ipvlan_notifier_block = { .notifier_call = ipvlan_device_event, }; -static struct notifier_block ipvlan_addr6_notifier_block __read_mostly = { +static struct notifier_block ipvlan_addr6_notifier_block = { .notifier_call = ipvlan_addr6_event, }; diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c index a0849f49b..147a4a618 100644 --- a/drivers/net/irda/vlsi_ir.c +++ b/drivers/net/irda/vlsi_ir.c @@ -142,7 +142,7 @@ static void vlsi_ring_debug(struct vlsi_ring *r) printk(KERN_DEBUG "%s - ring %p / size %u / mask 0x%04x / len %u / dir %d / hw %p\n", __func__, r, r->size, r->mask, r->len, r->dir, r->rd[0].hw); printk(KERN_DEBUG "%s - head = %d / tail = %d\n", __func__, - atomic_read(&r->head) & r->mask, atomic_read(&r->tail) & r->mask); + atomic_read_unchecked(&r->head) & r->mask, atomic_read_unchecked(&r->tail) & r->mask); for (i = 0; i < r->size; i++) { rd = &r->rd[i]; printk(KERN_DEBUG "%s - ring descr %u: ", __func__, i); @@ -301,8 +301,8 @@ static void vlsi_proc_ring(struct seq_file *seq, struct vlsi_ring *r) seq_printf(seq, "size %u / mask 0x%04x / len %u / dir %d / hw %p\n", r->size, r->mask, r->len, r->dir, r->rd[0].hw); - h = atomic_read(&r->head) & r->mask; - t = atomic_read(&r->tail) & r->mask; + h = atomic_read_unchecked(&r->head) & r->mask; + t = atomic_read_unchecked(&r->tail) & r->mask; seq_printf(seq, "head = %d / tail = %d ", h, t); if (h == t) seq_printf(seq, "(empty)\n"); @@ -410,8 +410,8 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr r->rd = (struct ring_descr *)(r+1); r->mask = size - 1; r->size = size; - atomic_set(&r->head, 0); - atomic_set(&r->tail, 0); + atomic_set_unchecked(&r->head, 0); + atomic_set_unchecked(&r->tail, 0); for (i = 0; i < size; i++) { rd = r->rd + i; @@ -1268,10 +1268,10 @@ static int vlsi_init_chip(struct pci_dev *pdev) iobase+VLSI_PIO_RINGSIZE); ptr = inw(iobase+VLSI_PIO_RINGPTR); - atomic_set(&idev->rx_ring->head, RINGPTR_GET_RX(ptr)); - atomic_set(&idev->rx_ring->tail, RINGPTR_GET_RX(ptr)); - atomic_set(&idev->tx_ring->head, RINGPTR_GET_TX(ptr)); - atomic_set(&idev->tx_ring->tail, RINGPTR_GET_TX(ptr)); + atomic_set_unchecked(&idev->rx_ring->head, RINGPTR_GET_RX(ptr)); + atomic_set_unchecked(&idev->rx_ring->tail, RINGPTR_GET_RX(ptr)); + atomic_set_unchecked(&idev->tx_ring->head, RINGPTR_GET_TX(ptr)); + atomic_set_unchecked(&idev->tx_ring->tail, RINGPTR_GET_TX(ptr)); vlsi_set_baud(idev, iobase); /* idev->new_baud used as provided by caller */ diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h index f9db2ce4c..6cd460c4d 100644 --- a/drivers/net/irda/vlsi_ir.h +++ b/drivers/net/irda/vlsi_ir.h @@ -671,7 +671,7 @@ struct vlsi_ring { unsigned len; unsigned size; unsigned mask; - atomic_t head, tail; + atomic_unchecked_t head, tail; struct ring_descr *rd; }; @@ -681,13 +681,13 @@ static inline struct ring_descr *ring_last(struct vlsi_ring *r) { int t; - t = atomic_read(&r->tail) & r->mask; - return (((t+1) & r->mask) == (atomic_read(&r->head) & r->mask)) ? NULL : &r->rd[t]; + t = atomic_read_unchecked(&r->tail) & r->mask; + return (((t+1) & r->mask) == (atomic_read_unchecked(&r->head) & r->mask)) ? NULL : &r->rd[t]; } static inline struct ring_descr *ring_put(struct vlsi_ring *r) { - atomic_inc(&r->tail); + atomic_inc_unchecked(&r->tail); return ring_last(r); } @@ -695,13 +695,13 @@ static inline struct ring_descr *ring_first(struct vlsi_ring *r) { int h; - h = atomic_read(&r->head) & r->mask; - return (h == (atomic_read(&r->tail) & r->mask)) ? NULL : &r->rd[h]; + h = atomic_read_unchecked(&r->head) & r->mask; + return (h == (atomic_read_unchecked(&r->tail) & r->mask)) ? NULL : &r->rd[h]; } static inline struct ring_descr *ring_get(struct vlsi_ring *r) { - atomic_inc(&r->head); + atomic_inc_unchecked(&r->head); return ring_first(r); } diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 6255973e3..7ae59f57c 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -216,6 +216,6 @@ static __net_init int loopback_net_init(struct net *net) } /* Registered in net/core/dev.c */ -struct pernet_operations __net_initdata loopback_net_ops = { +struct pernet_operations __net_initconst loopback_net_ops = { .init = loopback_net_init, }; diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index d2e61e002..8c0d77ac7 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -3389,7 +3389,7 @@ static int macsec_fill_info(struct sk_buff *skb, return -EMSGSIZE; } -static struct rtnl_link_ops macsec_link_ops __read_mostly = { +static struct rtnl_link_ops macsec_link_ops = { .kind = "macsec", .priv_size = sizeof(struct macsec_dev), .maxtype = IFLA_MACSEC_MAX, diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 26d6f0bbe..af4d2ad76 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -343,7 +343,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port, free_nskb: kfree_skb(nskb); err: - atomic_long_inc(&skb->dev->rx_dropped); + atomic_long_inc_unchecked(&skb->dev->rx_dropped); } static void macvlan_flush_sources(struct macvlan_port *port, @@ -1522,13 +1522,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = { int macvlan_link_register(struct rtnl_link_ops *ops) { /* common fields */ - ops->priv_size = sizeof(struct macvlan_dev); - ops->validate = macvlan_validate; - ops->maxtype = IFLA_MACVLAN_MAX; - ops->policy = macvlan_policy; - ops->changelink = macvlan_changelink; - ops->get_size = macvlan_get_size; - ops->fill_info = macvlan_fill_info; + pax_open_kernel(); + const_cast(ops->priv_size) = sizeof(struct macvlan_dev); + const_cast(ops->validate) = macvlan_validate; + const_cast(ops->maxtype) = IFLA_MACVLAN_MAX; + const_cast(ops->policy) = macvlan_policy; + const_cast(ops->changelink) = macvlan_changelink; + const_cast(ops->get_size) = macvlan_get_size; + const_cast(ops->fill_info) = macvlan_fill_info; + pax_close_kernel(); return rtnl_link_register(ops); }; @@ -1616,7 +1618,7 @@ static int macvlan_device_event(struct notifier_block *unused, return NOTIFY_DONE; } -static struct notifier_block macvlan_notifier_block __read_mostly = { +static struct notifier_block macvlan_notifier_block = { .notifier_call = macvlan_device_event, }; diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 6f38daf2d..5a5bedd76 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -514,7 +514,7 @@ static void macvtap_setup(struct net_device *dev) dev->tx_queue_len = TUN_READQ_SIZE; } -static struct rtnl_link_ops macvtap_link_ops __read_mostly = { +static struct rtnl_link_ops macvtap_link_ops = { .kind = "macvtap", .setup = macvtap_setup, .newlink = macvtap_newlink, @@ -1054,7 +1054,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, ret = 0; u = q->flags; - if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) || + if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) || put_user(u, &ifr->ifr_flags)) ret = -EFAULT; macvtap_put_vlan(vlan); @@ -1137,8 +1137,8 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, } ret = 0; u = vlan->dev->type; - if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) || - copy_to_user(&ifr->ifr_hwaddr.sa_data, vlan->dev->dev_addr, ETH_ALEN) || + if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) || + copy_to_user(ifr->ifr_hwaddr.sa_data, vlan->dev->dev_addr, ETH_ALEN) || put_user(u, &ifr->ifr_hwaddr.sa_family)) ret = -EFAULT; macvtap_put_vlan(vlan); @@ -1316,7 +1316,7 @@ static int macvtap_device_event(struct notifier_block *unused, return NOTIFY_DONE; } -static struct notifier_block macvtap_notifier_block __read_mostly = { +static struct notifier_block macvtap_notifier_block = { .notifier_call = macvtap_device_event, }; diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c index 7b7c70e23..a92dc8393 100644 --- a/drivers/net/nlmon.c +++ b/drivers/net/nlmon.c @@ -154,7 +154,7 @@ static int nlmon_validate(struct nlattr *tb[], struct nlattr *data[]) return 0; } -static struct rtnl_link_ops nlmon_link_ops __read_mostly = { +static struct rtnl_link_ops nlmon_link_ops = { .kind = "nlmon", .priv_size = sizeof(struct nlmon), .setup = nlmon_setup, diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index c4ceb082e..46c9044f6 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -411,7 +411,7 @@ static int get_phy_c45_devs_in_pkg(struct mii_bus *bus, int addr, int dev_addr, * zero on success. * */ -static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id, +static int get_phy_c45_ids(struct mii_bus *bus, int addr, int *phy_id, struct phy_c45_device_ids *c45_ids) { int phy_reg; int i, reg_addr; @@ -482,7 +482,7 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id, * its return value is in turn returned. * */ -static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id, +static int get_phy_id(struct mii_bus *bus, int addr, int *phy_id, bool is_c45, struct phy_c45_device_ids *c45_ids) { int phy_reg; @@ -520,7 +520,7 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id, struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45) { struct phy_c45_device_ids c45_ids = {0}; - u32 phy_id = 0; + int phy_id = 0; int r; r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids); diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c index 9c4b41a4d..03da80ba2 100644 --- a/drivers/net/plip/plip.c +++ b/drivers/net/plip/plip.c @@ -950,7 +950,7 @@ plip_interrupt(void *dev_id) spin_unlock_irqrestore(&nl->lock, flags); } -static int +static netdev_tx_t plip_tx_packet(struct sk_buff *skb, struct net_device *dev) { struct net_local *nl = netdev_priv(dev); diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 5489c0ec1..50a54df70 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -1144,7 +1144,7 @@ static struct net *ppp_nl_get_link_net(const struct net_device *dev) return ppp->ppp_net; } -static struct rtnl_link_ops ppp_link_ops __read_mostly = { +static struct rtnl_link_ops ppp_link_ops = { .kind = "ppp", .maxtype = IFLA_PPP_MAX, .policy = ppp_nl_policy, @@ -1262,7 +1262,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data; struct ppp_stats stats; struct ppp_comp_stats cstats; - char *vers; switch (cmd) { case SIOCGPPPSTATS: @@ -1284,8 +1283,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) break; case SIOCGPPPVER: - vers = PPP_VERSION; - if (copy_to_user(addr, vers, strlen(vers) + 1)) + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION))) break; err = 0; break; diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index 1951b1085..b02adc9b1 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -332,7 +332,7 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb) } skb->ip_summed = CHECKSUM_NONE; - skb_set_network_header(skb, skb->head-skb->data); + skb->network_header = 0; ppp_input(&po->chan, skb); return NET_RX_SUCCESS; diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index a31f4610b..949a77af0 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c @@ -170,7 +170,7 @@ static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev, return 0; } -static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev) { int i; struct rionet_private *rnet = netdev_priv(ndev); diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c index 27ed25252..80cffdeac 100644 --- a/drivers/net/slip/slhc.c +++ b/drivers/net/slip/slhc.c @@ -491,7 +491,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize) register struct tcphdr *thp; register struct iphdr *ip; register struct cstate *cs; - int len, hdrlen; + long len, hdrlen; unsigned char *cp = icp; /* We've got a compressed packet; read the change byte */ diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index a380649bf..fd8fe79cf 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2135,7 +2135,7 @@ static unsigned int team_get_num_rx_queues(void) return TEAM_DEFAULT_NUM_RX_QUEUES; } -static struct rtnl_link_ops team_link_ops __read_mostly = { +static struct rtnl_link_ops team_link_ops = { .kind = DRV_NAME, .priv_size = sizeof(struct team), .setup = team_setup, @@ -2930,7 +2930,7 @@ static int team_device_event(struct notifier_block *unused, return NOTIFY_DONE; } -static struct notifier_block team_notifier_block __read_mostly = { +static struct notifier_block team_notifier_block = { .notifier_call = team_device_event, }; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 18402d795..61603d095 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -966,7 +966,7 @@ static void tun_set_headroom(struct net_device *dev, int new_hr) { struct tun_struct *tun = netdev_priv(dev); - if (new_hr < NET_SKB_PAD) + if (new_hr < 0 || new_hr < NET_SKB_PAD) new_hr = NET_SKB_PAD; tun->align = new_hr; @@ -1548,7 +1548,7 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[]) return -EINVAL; } -static struct rtnl_link_ops tun_link_ops __read_mostly = { +static struct rtnl_link_ops tun_link_ops = { .kind = DRV_NAME, .priv_size = sizeof(struct tun_struct), .setup = tun_setup, @@ -1977,7 +1977,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr) } static long __tun_chr_ioctl(struct file *file, unsigned int cmd, - unsigned long arg, int ifreq_len) + unsigned long arg, size_t ifreq_len) { struct tun_file *tfile = file->private_data; struct tun_struct *tun; @@ -1991,6 +1991,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, int le; int ret; + if (ifreq_len > sizeof ifr) + return -EFAULT; + if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) { if (copy_from_user(&ifr, argp, ifreq_len)) return -EFAULT; @@ -2506,7 +2509,7 @@ static int tun_device_event(struct notifier_block *unused, return NOTIFY_DONE; } -static struct notifier_block tun_notifier_block __read_mostly = { +static struct notifier_block tun_notifier_block = { .notifier_call = tun_device_event, }; diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index e7b516342..0d30ed856 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -72,7 +72,7 @@ #include #include #include - +#include #define MOD_AUTHOR "Option Wireless" #define MOD_DESCRIPTION "USB High Speed Option driver" @@ -1175,7 +1175,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial) struct urb *urb; urb = serial->rx_urb[0]; - if (serial->port.count > 0) { + if (atomic_read(&serial->port.count) > 0) { count = put_rxbuf_data(urb, serial); if (count == -1) return; @@ -1213,7 +1213,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb) DUMP1(urb->transfer_buffer, urb->actual_length); /* Anyone listening? */ - if (serial->port.count == 0) + if (atomic_read(&serial->port.count) == 0) return; if (serial->parent->port_spec & HSO_INFO_CRC_BUG) @@ -1229,8 +1229,9 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb) * This needs to be a tasklet otherwise we will * end up recursively calling this function. */ -static void hso_unthrottle_tasklet(struct hso_serial *serial) +static void hso_unthrottle_tasklet(unsigned long _serial) { + struct hso_serial *serial = (struct hso_serial *)_serial; unsigned long flags; spin_lock_irqsave(&serial->serial_lock, flags); @@ -1274,18 +1275,17 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp) tty_port_tty_set(&serial->port, tty); /* check for port already opened, if not set the termios */ - serial->port.count++; - if (serial->port.count == 1) { + if (atomic_inc_return(&serial->port.count) == 1) { serial->rx_state = RX_IDLE; /* Force default termio settings */ _hso_serial_set_termios(tty, NULL); tasklet_init(&serial->unthrottle_tasklet, - (void (*)(unsigned long))hso_unthrottle_tasklet, + hso_unthrottle_tasklet, (unsigned long)serial); result = hso_start_serial_device(serial->parent, GFP_KERNEL); if (result) { hso_stop_serial_device(serial->parent); - serial->port.count--; + atomic_dec(&serial->port.count); } else { kref_get(&serial->parent->ref); } @@ -1323,10 +1323,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp) /* reset the rts and dtr */ /* do the actual close */ - serial->port.count--; + atomic_dec(&serial->port.count); - if (serial->port.count <= 0) { - serial->port.count = 0; + if (atomic_read(&serial->port.count) <= 0) { + atomic_set(&serial->port.count, 0); tty_port_tty_set(&serial->port, NULL); if (!usb_gone) hso_stop_serial_device(serial->parent); @@ -1409,7 +1409,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old) /* the actual setup */ spin_lock_irqsave(&serial->serial_lock, flags); - if (serial->port.count) + if (atomic_read(&serial->port.count)) _hso_serial_set_termios(tty, old); else tty->termios = *old; @@ -1884,7 +1884,7 @@ static void intr_callback(struct urb *urb) i); spin_lock(&serial->serial_lock); if (serial->rx_state == RX_IDLE && - serial->port.count > 0) { + atomic_read(&serial->port.count) > 0) { /* Setup and send a ctrl req read on * port i */ if (!serial->rx_urb_filled[0]) { @@ -3041,7 +3041,7 @@ static int hso_resume(struct usb_interface *iface) /* Start all serial ports */ for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) { if (serial_table[i] && (serial_table[i]->interface == iface)) { - if (dev2ser(serial_table[i])->port.count) { + if (atomic_read(&dev2ser(serial_table[i])->port.count)) { result = hso_start_serial_device(serial_table[i], GFP_NOIO); hso_kick_transmit(dev2ser(serial_table[i])); diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 76465b117..2d72355fb 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -400,7 +400,7 @@ static int ipheth_close(struct net_device *net) return 0; } -static int ipheth_tx(struct sk_buff *skb, struct net_device *net) +static netdev_tx_t ipheth_tx(struct sk_buff *skb, struct net_device *net) { struct ipheth_device *dev = netdev_priv(net); struct usb_device *udev = dev->udev; diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 90b426c5f..4a6209cbe 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -632,7 +632,7 @@ struct r8152 { bool (*in_nway)(struct r8152 *); void (*hw_phy_cfg)(struct r8152 *); void (*autosuspend_en)(struct r8152 *tp, bool enable); - } rtl_ops; + } __no_const rtl_ops; int intr_interval; u32 saved_wolopts; diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c index a25158876..6d1323305 100644 --- a/drivers/net/usb/sierra_net.c +++ b/drivers/net/usb/sierra_net.c @@ -51,7 +51,7 @@ static const char driver_name[] = "sierra_net"; /* atomic counter partially included in MAC address to make sure 2 devices * do not end up with the same MAC - concept breaks in case of > 255 ifaces */ -static atomic_t iface_counter = ATOMIC_INIT(0); +static atomic_unchecked_t iface_counter = ATOMIC_INIT(0); /* * SYNC Timer Delay definition used to set the expiry time @@ -697,7 +697,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->netdev_ops = &sierra_net_device_ops; /* change MAC addr to include, ifacenum, and to be unique */ - dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter); + dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter); dev->net->dev_addr[ETH_ALEN-1] = ifacenum; /* we will have to manufacture ethernet headers, prepare template */ diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 51fc0c33a..6cc1baa7a 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -48,7 +48,7 @@ module_param(gso, bool, 0444); DECLARE_EWMA(pkt_len, 1, 64) /* Minimum alignment for mergeable packet buffers. */ -#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256) +#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256UL) #define VIRTNET_DRIVER_VERSION "1.0.0" diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 95cf1d844..b2a00f60a 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -1296,7 +1296,7 @@ static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = { [IFLA_VRF_TABLE] = { .type = NLA_U32 }, }; -static struct rtnl_link_ops vrf_link_ops __read_mostly = { +static struct rtnl_link_ops vrf_link_ops = { .kind = DRV_NAME, .priv_size = sizeof(struct net_vrf), @@ -1333,7 +1333,7 @@ static int vrf_device_event(struct notifier_block *unused, return NOTIFY_DONE; } -static struct notifier_block vrf_notifier_block __read_mostly = { +static struct notifier_block vrf_notifier_block = { .notifier_call = vrf_device_event, }; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 0fafaa9d9..0227536bd 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -3193,7 +3193,7 @@ static struct net *vxlan_get_link_net(const struct net_device *dev) return vxlan->net; } -static struct rtnl_link_ops vxlan_link_ops __read_mostly = { +static struct rtnl_link_ops vxlan_link_ops = { .kind = "vxlan", .maxtype = IFLA_VXLAN_MAX, .policy = vxlan_policy, @@ -3277,7 +3277,7 @@ static int vxlan_netdevice_event(struct notifier_block *unused, return NOTIFY_DONE; } -static struct notifier_block vxlan_notifier_block __read_mostly = { +static struct notifier_block vxlan_notifier_block = { .notifier_call = vxlan_netdevice_event, }; diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c index 5920c996f..ff2e4a565 100644 --- a/drivers/net/wan/lmc/lmc_media.c +++ b/drivers/net/wan/lmc/lmc_media.c @@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int); static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32); lmc_media_t lmc_ds3_media = { - lmc_ds3_init, /* special media init stuff */ - lmc_ds3_default, /* reset to default state */ - lmc_ds3_set_status, /* reset status to state provided */ - lmc_dummy_set_1, /* set clock source */ - lmc_dummy_set2_1, /* set line speed */ - lmc_ds3_set_100ft, /* set cable length */ - lmc_ds3_set_scram, /* set scrambler */ - lmc_ds3_get_link_status, /* get link status */ - lmc_dummy_set_1, /* set link status */ - lmc_ds3_set_crc_length, /* set CRC length */ - lmc_dummy_set_1, /* set T1 or E1 circuit type */ - lmc_ds3_watchdog + .init = lmc_ds3_init, /* special media init stuff */ + .defaults = lmc_ds3_default, /* reset to default state */ + .set_status = lmc_ds3_set_status, /* reset status to state provided */ + .set_clock_source = lmc_dummy_set_1, /* set clock source */ + .set_speed = lmc_dummy_set2_1, /* set line speed */ + .set_cable_length = lmc_ds3_set_100ft, /* set cable length */ + .set_scrambler = lmc_ds3_set_scram, /* set scrambler */ + .get_link_status = lmc_ds3_get_link_status, /* get link status */ + .set_link_status = lmc_dummy_set_1, /* set link status */ + .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */ + .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */ + .watchdog = lmc_ds3_watchdog }; lmc_media_t lmc_hssi_media = { - lmc_hssi_init, /* special media init stuff */ - lmc_hssi_default, /* reset to default state */ - lmc_hssi_set_status, /* reset status to state provided */ - lmc_hssi_set_clock, /* set clock source */ - lmc_dummy_set2_1, /* set line speed */ - lmc_dummy_set_1, /* set cable length */ - lmc_dummy_set_1, /* set scrambler */ - lmc_hssi_get_link_status, /* get link status */ - lmc_hssi_set_link_status, /* set link status */ - lmc_hssi_set_crc_length, /* set CRC length */ - lmc_dummy_set_1, /* set T1 or E1 circuit type */ - lmc_hssi_watchdog + .init = lmc_hssi_init, /* special media init stuff */ + .defaults = lmc_hssi_default, /* reset to default state */ + .set_status = lmc_hssi_set_status, /* reset status to state provided */ + .set_clock_source = lmc_hssi_set_clock, /* set clock source */ + .set_speed = lmc_dummy_set2_1, /* set line speed */ + .set_cable_length = lmc_dummy_set_1, /* set cable length */ + .set_scrambler = lmc_dummy_set_1, /* set scrambler */ + .get_link_status = lmc_hssi_get_link_status, /* get link status */ + .set_link_status = lmc_hssi_set_link_status, /* set link status */ + .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */ + .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */ + .watchdog = lmc_hssi_watchdog }; -lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */ - lmc_ssi_default, /* reset to default state */ - lmc_ssi_set_status, /* reset status to state provided */ - lmc_ssi_set_clock, /* set clock source */ - lmc_ssi_set_speed, /* set line speed */ - lmc_dummy_set_1, /* set cable length */ - lmc_dummy_set_1, /* set scrambler */ - lmc_ssi_get_link_status, /* get link status */ - lmc_ssi_set_link_status, /* set link status */ - lmc_ssi_set_crc_length, /* set CRC length */ - lmc_dummy_set_1, /* set T1 or E1 circuit type */ - lmc_ssi_watchdog +lmc_media_t lmc_ssi_media = { + .init = lmc_ssi_init, /* special media init stuff */ + .defaults = lmc_ssi_default, /* reset to default state */ + .set_status = lmc_ssi_set_status, /* reset status to state provided */ + .set_clock_source = lmc_ssi_set_clock, /* set clock source */ + .set_speed = lmc_ssi_set_speed, /* set line speed */ + .set_cable_length = lmc_dummy_set_1, /* set cable length */ + .set_scrambler = lmc_dummy_set_1, /* set scrambler */ + .get_link_status = lmc_ssi_get_link_status, /* get link status */ + .set_link_status = lmc_ssi_set_link_status, /* set link status */ + .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */ + .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */ + .watchdog = lmc_ssi_watchdog }; lmc_media_t lmc_t1_media = { - lmc_t1_init, /* special media init stuff */ - lmc_t1_default, /* reset to default state */ - lmc_t1_set_status, /* reset status to state provided */ - lmc_t1_set_clock, /* set clock source */ - lmc_dummy_set2_1, /* set line speed */ - lmc_dummy_set_1, /* set cable length */ - lmc_dummy_set_1, /* set scrambler */ - lmc_t1_get_link_status, /* get link status */ - lmc_dummy_set_1, /* set link status */ - lmc_t1_set_crc_length, /* set CRC length */ - lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */ - lmc_t1_watchdog + .init = lmc_t1_init, /* special media init stuff */ + .defaults = lmc_t1_default, /* reset to default state */ + .set_status = lmc_t1_set_status, /* reset status to state provided */ + .set_clock_source = lmc_t1_set_clock, /* set clock source */ + .set_speed = lmc_dummy_set2_1, /* set line speed */ + .set_cable_length = lmc_dummy_set_1, /* set cable length */ + .set_scrambler = lmc_dummy_set_1, /* set scrambler */ + .get_link_status = lmc_t1_get_link_status, /* get link status */ + .set_link_status = lmc_dummy_set_1, /* set link status */ + .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */ + .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */ + .watchdog = lmc_t1_watchdog }; static void diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c index 2f0bd6955..e46ed7bba 100644 --- a/drivers/net/wan/z85230.c +++ b/drivers/net/wan/z85230.c @@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan) struct z8530_irqhandler z8530_sync = { - z8530_rx, - z8530_tx, - z8530_status + .rx = z8530_rx, + .tx = z8530_tx, + .status = z8530_status }; EXPORT_SYMBOL(z8530_sync); @@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan) } static struct z8530_irqhandler z8530_dma_sync = { - z8530_dma_rx, - z8530_dma_tx, - z8530_dma_status + .rx = z8530_dma_rx, + .tx = z8530_dma_tx, + .status = z8530_dma_status }; static struct z8530_irqhandler z8530_txdma_sync = { - z8530_rx, - z8530_dma_tx, - z8530_dma_status + .rx = z8530_rx, + .tx = z8530_dma_tx, + .status = z8530_dma_status }; /** @@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan) struct z8530_irqhandler z8530_nop= { - z8530_rx_clear, - z8530_tx_clear, - z8530_status_clear + .rx = z8530_rx_clear, + .tx = z8530_tx_clear, + .status = z8530_status_clear }; diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c index 0b602951f..b8bfa5b2b 100644 --- a/drivers/net/wimax/i2400m/rx.c +++ b/drivers/net/wimax/i2400m/rx.c @@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m) if (i2400m->rx_roq == NULL) goto error_roq_alloc; - rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log), + rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1, GFP_KERNEL); if (rd == NULL) { result = -ENOMEM; diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index 0b4d79659..28ef2242e 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -887,12 +887,12 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, return 0; } -static struct ath10k_ce_ring * +static struct ath10k_ce_ring * __intentional_overflow(-1) ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id, const struct ce_attr *attr) { struct ath10k_ce_ring *src_ring; - u32 nentries = attr->src_nentries; + unsigned long nentries = attr->src_nentries; dma_addr_t base_addr; nentries = roundup_pow_of_two(nentries); @@ -938,7 +938,7 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id, const struct ce_attr *attr) { struct ath10k_ce_ring *dest_ring; - u32 nentries; + unsigned long nentries; dma_addr_t base_addr; nentries = roundup_pow_of_two(attr->dest_nentries); diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h index 0c55cd92a..7fc013b37 100644 --- a/drivers/net/wireless/ath/ath10k/htc.h +++ b/drivers/net/wireless/ath/ath10k/htc.h @@ -269,13 +269,13 @@ enum ath10k_htc_ep_id { struct ath10k_htc_ops { void (*target_send_suspend_complete)(struct ath10k *ar); -}; +} __no_const; struct ath10k_htc_ep_ops { void (*ep_tx_complete)(struct ath10k *, struct sk_buff *); void (*ep_rx_complete)(struct ath10k *, struct sk_buff *); void (*ep_tx_credits)(struct ath10k *); -}; +} __no_const; /* service connection information */ struct ath10k_htc_svc_conn_req { diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index f2e85eb22..5e10c3e60 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -8006,8 +8006,11 @@ int ath10k_mac_register(struct ath10k *ar) * supports the pull-push mechanism. */ if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, - ar->running_fw->fw_file.fw_features)) - ar->ops->wake_tx_queue = NULL; + ar->running_fw->fw_file.fw_features)) { + pax_open_kernel(); + const_cast(ar->ops->wake_tx_queue) = NULL; + pax_close_kernel(); + } ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy, ath10k_reg_notifier); diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h index 0bf4592a3..11a7ae1e3 100644 --- a/drivers/net/wireless/ath/ath6kl/core.h +++ b/drivers/net/wireless/ath/ath6kl/core.h @@ -915,7 +915,7 @@ void ath6kl_tx_data_cleanup(struct ath6kl *ar); struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar); void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie); -int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev); +netdev_tx_t ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev); struct aggr_info *aggr_init(struct ath6kl_vif *vif); void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info, diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c index 9df41d5e3..fb12f17f1 100644 --- a/drivers/net/wireless/ath/ath6kl/txrx.c +++ b/drivers/net/wireless/ath/ath6kl/txrx.c @@ -353,7 +353,7 @@ int ath6kl_control_tx(void *devt, struct sk_buff *skb, return status; } -int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev) +netdev_tx_t ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev) { struct ath6kl *ar = ath6kl_priv(dev); struct ath6kl_cookie *cookie = NULL; diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig index 8f231c67d..48902b842 100644 --- a/drivers/net/wireless/ath/ath9k/Kconfig +++ b/drivers/net/wireless/ath/ath9k/Kconfig @@ -3,7 +3,6 @@ config ATH9K_HW config ATH9K_COMMON tristate select ATH_COMMON - select DEBUG_FS select RELAY config ATH9K_DFS_DEBUGFS def_bool y diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c index f816909d9..e56cd8b02 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c @@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) ads->ds_txstatus6 = ads->ds_txstatus7 = 0; ads->ds_txstatus8 = ads->ds_txstatus9 = 0; - ACCESS_ONCE(ads->ds_link) = i->link; - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0]; + ACCESS_ONCE_RW(ads->ds_link) = i->link; + ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0]; ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore); ctl6 = SM(i->keytype, AR_EncrType); @@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) if ((i->is_first || i->is_last) && i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) { - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0) + ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0) | set11nTries(i->rates, 1) | set11nTries(i->rates, 2) | set11nTries(i->rates, 3) | (i->dur_update ? AR_DurUpdateEna : 0) | SM(0, AR_BurstDur); - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0) + ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0) | set11nRate(i->rates, 1) | set11nRate(i->rates, 2) | set11nRate(i->rates, 3); } else { - ACCESS_ONCE(ads->ds_ctl2) = 0; - ACCESS_ONCE(ads->ds_ctl3) = 0; + ACCESS_ONCE_RW(ads->ds_ctl2) = 0; + ACCESS_ONCE_RW(ads->ds_ctl3) = 0; } if (!i->is_first) { - ACCESS_ONCE(ads->ds_ctl0) = 0; - ACCESS_ONCE(ads->ds_ctl1) = ctl1; - ACCESS_ONCE(ads->ds_ctl6) = ctl6; + ACCESS_ONCE_RW(ads->ds_ctl0) = 0; + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1; + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6; return; } @@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) break; } - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen) + ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen) | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0) | SM(i->txpower[0], AR_XmitPower0) | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0) @@ -289,27 +289,27 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable : (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0)); - ACCESS_ONCE(ads->ds_ctl1) = ctl1; - ACCESS_ONCE(ads->ds_ctl6) = ctl6; + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1; + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6; if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST) return; - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0) + ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0) | set11nPktDurRTSCTS(i->rates, 1); - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2) + ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2) | set11nPktDurRTSCTS(i->rates, 3); - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0) + ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0) | set11nRateFlags(i->rates, 1) | set11nRateFlags(i->rates, 2) | set11nRateFlags(i->rates, 3) | SM(i->rtscts_rate, AR_RTSCTSRate); - ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1); - ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2); - ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3); + ACCESS_ONCE_RW(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1); + ACCESS_ONCE_RW(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2); + ACCESS_ONCE_RW(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3); } static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds, diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c index da84b705c..83e497821 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c @@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) (i->qcu << AR_TxQcuNum_S) | desc_len; checksum += val; - ACCESS_ONCE(ads->info) = val; + ACCESS_ONCE_RW(ads->info) = val; checksum += i->link; - ACCESS_ONCE(ads->link) = i->link; + ACCESS_ONCE_RW(ads->link) = i->link; checksum += i->buf_addr[0]; - ACCESS_ONCE(ads->data0) = i->buf_addr[0]; + ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0]; checksum += i->buf_addr[1]; - ACCESS_ONCE(ads->data1) = i->buf_addr[1]; + ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1]; checksum += i->buf_addr[2]; - ACCESS_ONCE(ads->data2) = i->buf_addr[2]; + ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2]; checksum += i->buf_addr[3]; - ACCESS_ONCE(ads->data3) = i->buf_addr[3]; + ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3]; checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen); - ACCESS_ONCE(ads->ctl3) = val; + ACCESS_ONCE_RW(ads->ctl3) = val; checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen); - ACCESS_ONCE(ads->ctl5) = val; + ACCESS_ONCE_RW(ads->ctl5) = val; checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen); - ACCESS_ONCE(ads->ctl7) = val; + ACCESS_ONCE_RW(ads->ctl7) = val; checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen); - ACCESS_ONCE(ads->ctl9) = val; + ACCESS_ONCE_RW(ads->ctl9) = val; checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff); - ACCESS_ONCE(ads->ctl10) = checksum; + ACCESS_ONCE_RW(ads->ctl10) = checksum; if (i->is_first || i->is_last) { - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0) + ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0) | set11nTries(i->rates, 1) | set11nTries(i->rates, 2) | set11nTries(i->rates, 3) | (i->dur_update ? AR_DurUpdateEna : 0) | SM(0, AR_BurstDur); - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0) + ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0) | set11nRate(i->rates, 1) | set11nRate(i->rates, 2) | set11nRate(i->rates, 3); } else { - ACCESS_ONCE(ads->ctl13) = 0; - ACCESS_ONCE(ads->ctl14) = 0; + ACCESS_ONCE_RW(ads->ctl13) = 0; + ACCESS_ONCE_RW(ads->ctl14) = 0; } ads->ctl20 = 0; @@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) ctl17 = SM(i->keytype, AR_EncrType); if (!i->is_first) { - ACCESS_ONCE(ads->ctl11) = 0; - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore; - ACCESS_ONCE(ads->ctl15) = 0; - ACCESS_ONCE(ads->ctl16) = 0; - ACCESS_ONCE(ads->ctl17) = ctl17; - ACCESS_ONCE(ads->ctl18) = 0; - ACCESS_ONCE(ads->ctl19) = 0; + ACCESS_ONCE_RW(ads->ctl11) = 0; + ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore; + ACCESS_ONCE_RW(ads->ctl15) = 0; + ACCESS_ONCE_RW(ads->ctl16) = 0; + ACCESS_ONCE_RW(ads->ctl17) = ctl17; + ACCESS_ONCE_RW(ads->ctl18) = 0; + ACCESS_ONCE_RW(ads->ctl19) = 0; return; } - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen) + ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen) | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0) | SM(i->txpower[0], AR_XmitPower0) | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0) @@ -135,26 +135,26 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S; ctl12 |= SM(val, AR_PAPRDChainMask); - ACCESS_ONCE(ads->ctl12) = ctl12; - ACCESS_ONCE(ads->ctl17) = ctl17; + ACCESS_ONCE_RW(ads->ctl12) = ctl12; + ACCESS_ONCE_RW(ads->ctl17) = ctl17; - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0) + ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0) | set11nPktDurRTSCTS(i->rates, 1); - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2) + ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2) | set11nPktDurRTSCTS(i->rates, 3); - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0) + ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0) | set11nRateFlags(i->rates, 1) | set11nRateFlags(i->rates, 2) | set11nRateFlags(i->rates, 3) | SM(i->rtscts_rate, AR_RTSCTSRate); - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding; + ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding; - ACCESS_ONCE(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1); - ACCESS_ONCE(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2); - ACCESS_ONCE(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3); + ACCESS_ONCE_RW(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1); + ACCESS_ONCE_RW(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2); + ACCESS_ONCE_RW(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3); } static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads) diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 9cbca1229..eae7c7961 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -672,7 +672,7 @@ struct ath_hw_private_ops { #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT bool (*is_aic_enabled)(struct ath_hw *ah); #endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */ -}; +} __no_const; /** * struct ath_spec_scan - parameters for Atheros spectral scan @@ -748,7 +748,7 @@ struct ath_hw_ops { #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable); #endif -}; +} __no_const; struct ath_nf_limits { s16 max; diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index e9f32b52f..d394d930e 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -2622,16 +2622,18 @@ void ath9k_fill_chanctx_ops(void) if (!ath9k_is_chanctx_enabled()) return; - ath9k_ops.hw_scan = ath9k_hw_scan; - ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan; - ath9k_ops.remain_on_channel = ath9k_remain_on_channel; - ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel; - ath9k_ops.add_chanctx = ath9k_add_chanctx; - ath9k_ops.remove_chanctx = ath9k_remove_chanctx; - ath9k_ops.change_chanctx = ath9k_change_chanctx; - ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx; - ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx; - ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx; + pax_open_kernel(); + const_cast(ath9k_ops.hw_scan) = ath9k_hw_scan; + const_cast(ath9k_ops.cancel_hw_scan) = ath9k_cancel_hw_scan; + const_cast(ath9k_ops.remain_on_channel) = ath9k_remain_on_channel; + const_cast(ath9k_ops.cancel_remain_on_channel) = ath9k_cancel_remain_on_channel; + const_cast(ath9k_ops.add_chanctx) = ath9k_add_chanctx; + const_cast(ath9k_ops.remove_chanctx) = ath9k_remove_chanctx; + const_cast(ath9k_ops.change_chanctx) = ath9k_change_chanctx; + const_cast(ath9k_ops.assign_vif_chanctx) = ath9k_assign_vif_chanctx; + const_cast(ath9k_ops.unassign_vif_chanctx) = ath9k_unassign_vif_chanctx; + const_cast(ath9k_ops.mgd_prepare_tx) = ath9k_mgd_prepare_tx; + pax_close_kernel(); } #endif diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h index 237d0cda1..6c094fd17 100644 --- a/drivers/net/wireless/ath/carl9170/carl9170.h +++ b/drivers/net/wireless/ath/carl9170/carl9170.h @@ -297,7 +297,7 @@ struct ar9170 { unsigned long max_queue_stop_timeout[__AR9170_NUM_TXQ]; bool needs_full_reset; bool force_usb_reset; - atomic_t pending_restarts; + atomic_unchecked_t pending_restarts; /* interface mode settings */ struct list_head vif_list; @@ -400,7 +400,7 @@ struct ar9170 { struct carl9170_sta_tid __rcu *tx_ampdu_iter; struct list_head tx_ampdu_list; atomic_t tx_ampdu_upload; - atomic_t tx_ampdu_scheduler; + atomic_unchecked_t tx_ampdu_scheduler; atomic_t tx_total_pending; atomic_t tx_total_queued; unsigned int tx_ampdu_list_len; @@ -412,7 +412,7 @@ struct ar9170 { spinlock_t mem_lock; unsigned long *mem_bitmap; atomic_t mem_free_blocks; - atomic_t mem_allocs; + atomic_unchecked_t mem_allocs; /* rxstream mpdu merge */ struct ar9170_rx_head rx_plcp; diff --git a/drivers/net/wireless/ath/carl9170/debug.c b/drivers/net/wireless/ath/carl9170/debug.c index ec3a64e5d..4d4a4e2b2 100644 --- a/drivers/net/wireless/ath/carl9170/debug.c +++ b/drivers/net/wireless/ath/carl9170/debug.c @@ -223,7 +223,7 @@ static char *carl9170_debugfs_mem_usage_read(struct ar9170 *ar, char *buf, ADD(buf, *len, bufsize, "cookies: used:%3d / total:%3d, allocs:%d\n", bitmap_weight(ar->mem_bitmap, ar->fw.mem_blocks), - ar->fw.mem_blocks, atomic_read(&ar->mem_allocs)); + ar->fw.mem_blocks, atomic_read_unchecked(&ar->mem_allocs)); ADD(buf, *len, bufsize, "memory: free:%3d (%3d KiB) / total:%3d KiB)\n", atomic_read(&ar->mem_free_blocks), @@ -674,7 +674,7 @@ static char *carl9170_debugfs_bug_read(struct ar9170 *ar, char *buf, ADD(buf, *ret, bufsize, "reported firmware BUGs:%d\n", ar->fw.bug_counter); ADD(buf, *ret, bufsize, "pending restart requests:%d\n", - atomic_read(&ar->pending_restarts)); + atomic_read_unchecked(&ar->pending_restarts)); return buf; } __DEBUGFS_DECLARE_RW_FILE(bug, 400, CARL9170_STOPPED); @@ -781,7 +781,7 @@ DEBUGFS_READONLY_FILE(usb_rx_pool_urbs, 20, "%d", DEBUGFS_READONLY_FILE(tx_total_queued, 20, "%d", atomic_read(&ar->tx_total_queued)); DEBUGFS_READONLY_FILE(tx_ampdu_scheduler, 20, "%d", - atomic_read(&ar->tx_ampdu_scheduler)); + atomic_read_unchecked(&ar->tx_ampdu_scheduler)); DEBUGFS_READONLY_FILE(tx_total_pending, 20, "%d", atomic_read(&ar->tx_total_pending)); diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index ffb22a04b..231c7bcaf 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c @@ -320,7 +320,7 @@ static void carl9170_zap_queues(struct ar9170 *ar) rcu_read_unlock(); atomic_set(&ar->tx_ampdu_upload, 0); - atomic_set(&ar->tx_ampdu_scheduler, 0); + atomic_set_unchecked(&ar->tx_ampdu_scheduler, 0); atomic_set(&ar->tx_total_pending, 0); atomic_set(&ar->tx_total_queued, 0); atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks); @@ -370,7 +370,7 @@ static int carl9170_op_start(struct ieee80211_hw *hw) ar->max_queue_stop_timeout[i] = 0; } - atomic_set(&ar->mem_allocs, 0); + atomic_set_unchecked(&ar->mem_allocs, 0); err = carl9170_usb_open(ar); if (err) @@ -490,7 +490,7 @@ static void carl9170_restart_work(struct work_struct *work) if (!err && !ar->force_usb_reset) { ar->restart_counter++; - atomic_set(&ar->pending_restarts, 0); + atomic_set_unchecked(&ar->pending_restarts, 0); ieee80211_restart_hw(ar->hw); } else { @@ -513,7 +513,7 @@ void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r) * By ignoring these *surplus* reset events, the device won't be * killed again, right after it has recovered. */ - if (atomic_inc_return(&ar->pending_restarts) > 1) { + if (atomic_inc_return_unchecked(&ar->pending_restarts) > 1) { dev_dbg(&ar->udev->dev, "ignoring restart (%d)\n", r); return; } @@ -1820,7 +1820,7 @@ void *carl9170_alloc(size_t priv_size) spin_lock_init(&ar->tx_ampdu_list_lock); spin_lock_init(&ar->mem_lock); spin_lock_init(&ar->state_lock); - atomic_set(&ar->pending_restarts, 0); + atomic_set_unchecked(&ar->pending_restarts, 0); ar->vifs = 0; for (i = 0; i < ar->hw->queues; i++) { skb_queue_head_init(&ar->tx_status[i]); diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c index 2bf04c9ed..ae059574f 100644 --- a/drivers/net/wireless/ath/carl9170/tx.c +++ b/drivers/net/wireless/ath/carl9170/tx.c @@ -193,7 +193,7 @@ static int carl9170_alloc_dev_space(struct ar9170 *ar, struct sk_buff *skb) unsigned int chunks; int cookie = -1; - atomic_inc(&ar->mem_allocs); + atomic_inc_unchecked(&ar->mem_allocs); chunks = DIV_ROUND_UP(skb->len, ar->fw.mem_block_size); if (unlikely(atomic_sub_return(chunks, &ar->mem_free_blocks) < 0)) { @@ -1130,7 +1130,7 @@ static void carl9170_tx_ampdu(struct ar9170 *ar) unsigned int i = 0, done_ampdus = 0; u16 seq, queue, tmpssn; - atomic_inc(&ar->tx_ampdu_scheduler); + atomic_inc_unchecked(&ar->tx_ampdu_scheduler); ar->tx_ampdu_schedule = false; if (atomic_read(&ar->tx_ampdu_upload)) diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index 44746ca0d..6e17b3b2c 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -164,7 +164,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) struct wil6210_priv *wil; struct device *dev = &pdev->dev; int rc; - const struct wil_platform_rops rops = { + static const struct wil_platform_rops rops = { .ramdump = wil_platform_rop_ramdump, .fw_recovery = wil_platform_rop_fw_recovery, }; diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h index f8c41172a..72c917e0d 100644 --- a/drivers/net/wireless/ath/wil6210/wil_platform.h +++ b/drivers/net/wireless/ath/wil6210/wil_platform.h @@ -37,7 +37,7 @@ struct wil_platform_ops { int (*resume)(void *handle); void (*uninit)(void *handle); int (*notify)(void *handle, enum wil_platform_event evt); -}; +} __no_const; /** * struct wil_platform_rops - wil platform module callbacks from diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c index 2e3ece345..b135b076d 100644 --- a/drivers/net/wireless/atmel/at76c50x-usb.c +++ b/drivers/net/wireless/atmel/at76c50x-usb.c @@ -346,7 +346,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state) } /* Convert timeout from the DFU status to jiffies */ -static inline unsigned long at76_get_timeout(struct dfu_status *s) +static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s) { return msecs_to_jiffies((s->poll_timeout[2] << 16) | (s->poll_timeout[1] << 8) diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c index b6107aea6..817004938 100644 --- a/drivers/net/wireless/atmel/atmel.c +++ b/drivers/net/wireless/atmel/atmel.c @@ -1648,9 +1648,10 @@ EXPORT_SYMBOL(stop_atmel_card); static int atmel_set_essid(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_point *dwrq = &wrqu->essid; struct atmel_private *priv = netdev_priv(dev); /* Check if we asked for `any' */ @@ -1676,9 +1677,10 @@ static int atmel_set_essid(struct net_device *dev, static int atmel_get_essid(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_point *dwrq = &wrqu->essid; struct atmel_private *priv = netdev_priv(dev); /* Get the current SSID */ @@ -1697,9 +1699,10 @@ static int atmel_get_essid(struct net_device *dev, static int atmel_get_wap(struct net_device *dev, struct iw_request_info *info, - struct sockaddr *awrq, + union iwreq_data *wrqu, char *extra) { + struct sockaddr *awrq = &wrqu->ap_addr; struct atmel_private *priv = netdev_priv(dev); memcpy(awrq->sa_data, priv->CurrentBSSID, ETH_ALEN); awrq->sa_family = ARPHRD_ETHER; @@ -1709,9 +1712,10 @@ static int atmel_get_wap(struct net_device *dev, static int atmel_set_encode(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_point *dwrq = &wrqu->encoding; struct atmel_private *priv = netdev_priv(dev); /* Basic checking: do we have a key to set ? @@ -1798,9 +1802,10 @@ static int atmel_set_encode(struct net_device *dev, static int atmel_get_encode(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_point *dwrq = &wrqu->encoding; struct atmel_private *priv = netdev_priv(dev); int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; @@ -2008,18 +2013,20 @@ static int atmel_get_auth(struct net_device *dev, static int atmel_get_name(struct net_device *dev, struct iw_request_info *info, - char *cwrq, + union iwreq_data *wrqu, char *extra) { + char *cwrq = wrqu->name; strcpy(cwrq, "IEEE 802.11-DS"); return 0; } static int atmel_set_rate(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->bitrate; struct atmel_private *priv = netdev_priv(dev); if (vwrq->fixed == 0) { @@ -2058,9 +2065,10 @@ static int atmel_set_rate(struct net_device *dev, static int atmel_set_mode(struct net_device *dev, struct iw_request_info *info, - __u32 *uwrq, + union iwreq_data *wrqu, char *extra) { + __u32 *uwrq = &wrqu->mode; struct atmel_private *priv = netdev_priv(dev); if (*uwrq != IW_MODE_ADHOC && *uwrq != IW_MODE_INFRA) @@ -2072,9 +2080,10 @@ static int atmel_set_mode(struct net_device *dev, static int atmel_get_mode(struct net_device *dev, struct iw_request_info *info, - __u32 *uwrq, + union iwreq_data *wrqu, char *extra) { + __u32 *uwrq = &wrqu->mode; struct atmel_private *priv = netdev_priv(dev); *uwrq = priv->operating_mode; @@ -2083,9 +2092,10 @@ static int atmel_get_mode(struct net_device *dev, static int atmel_get_rate(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->bitrate; struct atmel_private *priv = netdev_priv(dev); if (priv->auto_tx_rate) { @@ -2113,9 +2123,10 @@ static int atmel_get_rate(struct net_device *dev, static int atmel_set_power(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->power; struct atmel_private *priv = netdev_priv(dev); priv->power_mode = vwrq->disabled ? 0 : 1; return -EINPROGRESS; @@ -2123,9 +2134,10 @@ static int atmel_set_power(struct net_device *dev, static int atmel_get_power(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->power; struct atmel_private *priv = netdev_priv(dev); vwrq->disabled = priv->power_mode ? 0 : 1; vwrq->flags = IW_POWER_ON; @@ -2134,9 +2146,10 @@ static int atmel_get_power(struct net_device *dev, static int atmel_set_retry(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->retry; struct atmel_private *priv = netdev_priv(dev); if (!vwrq->disabled && (vwrq->flags & IW_RETRY_LIMIT)) { @@ -2157,9 +2170,10 @@ static int atmel_set_retry(struct net_device *dev, static int atmel_get_retry(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->retry; struct atmel_private *priv = netdev_priv(dev); vwrq->disabled = 0; /* Can't be disabled */ @@ -2180,9 +2194,10 @@ static int atmel_get_retry(struct net_device *dev, static int atmel_set_rts(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->rts; struct atmel_private *priv = netdev_priv(dev); int rthr = vwrq->value; @@ -2198,9 +2213,10 @@ static int atmel_set_rts(struct net_device *dev, static int atmel_get_rts(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->rts; struct atmel_private *priv = netdev_priv(dev); vwrq->value = priv->rts_threshold; @@ -2212,9 +2228,10 @@ static int atmel_get_rts(struct net_device *dev, static int atmel_set_frag(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->frag; struct atmel_private *priv = netdev_priv(dev); int fthr = vwrq->value; @@ -2231,9 +2248,10 @@ static int atmel_set_frag(struct net_device *dev, static int atmel_get_frag(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->frag; struct atmel_private *priv = netdev_priv(dev); vwrq->value = priv->frag_threshold; @@ -2245,9 +2263,10 @@ static int atmel_get_frag(struct net_device *dev, static int atmel_set_freq(struct net_device *dev, struct iw_request_info *info, - struct iw_freq *fwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_freq *fwrq = &wrqu->freq; struct atmel_private *priv = netdev_priv(dev); int rc = -EINPROGRESS; /* Call commit handler */ @@ -2275,9 +2294,10 @@ static int atmel_set_freq(struct net_device *dev, static int atmel_get_freq(struct net_device *dev, struct iw_request_info *info, - struct iw_freq *fwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_freq *fwrq = &wrqu->freq; struct atmel_private *priv = netdev_priv(dev); fwrq->m = priv->channel; @@ -2287,7 +2307,7 @@ static int atmel_get_freq(struct net_device *dev, static int atmel_set_scan(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, + union iwreq_data *dwrq, char *extra) { struct atmel_private *priv = netdev_priv(dev); @@ -2325,9 +2345,10 @@ static int atmel_set_scan(struct net_device *dev, static int atmel_get_scan(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_point *dwrq = &wrqu->data; struct atmel_private *priv = netdev_priv(dev); int i; char *current_ev = extra; @@ -2396,9 +2417,10 @@ static int atmel_get_scan(struct net_device *dev, static int atmel_get_range(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_point *dwrq = &wrqu->data; struct atmel_private *priv = netdev_priv(dev); struct iw_range *range = (struct iw_range *) extra; int k, i, j; @@ -2470,9 +2492,10 @@ static int atmel_get_range(struct net_device *dev, static int atmel_set_wap(struct net_device *dev, struct iw_request_info *info, - struct sockaddr *awrq, + union iwreq_data *wrqu, char *extra) { + struct sockaddr *awrq = &wrqu->ap_addr; struct atmel_private *priv = netdev_priv(dev); int i; static const u8 any[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; @@ -2512,7 +2535,7 @@ static int atmel_set_wap(struct net_device *dev, static int atmel_config_commit(struct net_device *dev, struct iw_request_info *info, /* NULL */ - void *zwrq, /* NULL */ + union iwreq_data *zwrq, /* NULL */ char *extra) /* NULL */ { return atmel_open(dev); @@ -2520,61 +2543,61 @@ static int atmel_config_commit(struct net_device *dev, static const iw_handler atmel_handler[] = { - (iw_handler) atmel_config_commit, /* SIOCSIWCOMMIT */ - (iw_handler) atmel_get_name, /* SIOCGIWNAME */ - (iw_handler) NULL, /* SIOCSIWNWID */ - (iw_handler) NULL, /* SIOCGIWNWID */ - (iw_handler) atmel_set_freq, /* SIOCSIWFREQ */ - (iw_handler) atmel_get_freq, /* SIOCGIWFREQ */ - (iw_handler) atmel_set_mode, /* SIOCSIWMODE */ - (iw_handler) atmel_get_mode, /* SIOCGIWMODE */ - (iw_handler) NULL, /* SIOCSIWSENS */ - (iw_handler) NULL, /* SIOCGIWSENS */ - (iw_handler) NULL, /* SIOCSIWRANGE */ - (iw_handler) atmel_get_range, /* SIOCGIWRANGE */ - (iw_handler) NULL, /* SIOCSIWPRIV */ - (iw_handler) NULL, /* SIOCGIWPRIV */ - (iw_handler) NULL, /* SIOCSIWSTATS */ - (iw_handler) NULL, /* SIOCGIWSTATS */ - (iw_handler) NULL, /* SIOCSIWSPY */ - (iw_handler) NULL, /* SIOCGIWSPY */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) atmel_set_wap, /* SIOCSIWAP */ - (iw_handler) atmel_get_wap, /* SIOCGIWAP */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) NULL, /* SIOCGIWAPLIST */ - (iw_handler) atmel_set_scan, /* SIOCSIWSCAN */ - (iw_handler) atmel_get_scan, /* SIOCGIWSCAN */ - (iw_handler) atmel_set_essid, /* SIOCSIWESSID */ - (iw_handler) atmel_get_essid, /* SIOCGIWESSID */ - (iw_handler) NULL, /* SIOCSIWNICKN */ - (iw_handler) NULL, /* SIOCGIWNICKN */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) atmel_set_rate, /* SIOCSIWRATE */ - (iw_handler) atmel_get_rate, /* SIOCGIWRATE */ - (iw_handler) atmel_set_rts, /* SIOCSIWRTS */ - (iw_handler) atmel_get_rts, /* SIOCGIWRTS */ - (iw_handler) atmel_set_frag, /* SIOCSIWFRAG */ - (iw_handler) atmel_get_frag, /* SIOCGIWFRAG */ - (iw_handler) NULL, /* SIOCSIWTXPOW */ - (iw_handler) NULL, /* SIOCGIWTXPOW */ - (iw_handler) atmel_set_retry, /* SIOCSIWRETRY */ - (iw_handler) atmel_get_retry, /* SIOCGIWRETRY */ - (iw_handler) atmel_set_encode, /* SIOCSIWENCODE */ - (iw_handler) atmel_get_encode, /* SIOCGIWENCODE */ - (iw_handler) atmel_set_power, /* SIOCSIWPOWER */ - (iw_handler) atmel_get_power, /* SIOCGIWPOWER */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) NULL, /* SIOCSIWGENIE */ - (iw_handler) NULL, /* SIOCGIWGENIE */ - (iw_handler) atmel_set_auth, /* SIOCSIWAUTH */ - (iw_handler) atmel_get_auth, /* SIOCGIWAUTH */ - (iw_handler) atmel_set_encodeext, /* SIOCSIWENCODEEXT */ - (iw_handler) atmel_get_encodeext, /* SIOCGIWENCODEEXT */ - (iw_handler) NULL, /* SIOCSIWPMKSA */ + atmel_config_commit, /* SIOCSIWCOMMIT */ + atmel_get_name, /* SIOCGIWNAME */ + NULL, /* SIOCSIWNWID */ + NULL, /* SIOCGIWNWID */ + atmel_set_freq, /* SIOCSIWFREQ */ + atmel_get_freq, /* SIOCGIWFREQ */ + atmel_set_mode, /* SIOCSIWMODE */ + atmel_get_mode, /* SIOCGIWMODE */ + NULL, /* SIOCSIWSENS */ + NULL, /* SIOCGIWSENS */ + NULL, /* SIOCSIWRANGE */ + atmel_get_range, /* SIOCGIWRANGE */ + NULL, /* SIOCSIWPRIV */ + NULL, /* SIOCGIWPRIV */ + NULL, /* SIOCSIWSTATS */ + NULL, /* SIOCGIWSTATS */ + NULL, /* SIOCSIWSPY */ + NULL, /* SIOCGIWSPY */ + NULL, /* -- hole -- */ + NULL, /* -- hole -- */ + atmel_set_wap, /* SIOCSIWAP */ + atmel_get_wap, /* SIOCGIWAP */ + NULL, /* -- hole -- */ + NULL, /* SIOCGIWAPLIST */ + atmel_set_scan, /* SIOCSIWSCAN */ + atmel_get_scan, /* SIOCGIWSCAN */ + atmel_set_essid, /* SIOCSIWESSID */ + atmel_get_essid, /* SIOCGIWESSID */ + NULL, /* SIOCSIWNICKN */ + NULL, /* SIOCGIWNICKN */ + NULL, /* -- hole -- */ + NULL, /* -- hole -- */ + atmel_set_rate, /* SIOCSIWRATE */ + atmel_get_rate, /* SIOCGIWRATE */ + atmel_set_rts, /* SIOCSIWRTS */ + atmel_get_rts, /* SIOCGIWRTS */ + atmel_set_frag, /* SIOCSIWFRAG */ + atmel_get_frag, /* SIOCGIWFRAG */ + NULL, /* SIOCSIWTXPOW */ + NULL, /* SIOCGIWTXPOW */ + atmel_set_retry, /* SIOCSIWRETRY */ + atmel_get_retry, /* SIOCGIWRETRY */ + atmel_set_encode, /* SIOCSIWENCODE */ + atmel_get_encode, /* SIOCGIWENCODE */ + atmel_set_power, /* SIOCSIWPOWER */ + atmel_get_power, /* SIOCGIWPOWER */ + NULL, /* -- hole -- */ + NULL, /* -- hole -- */ + NULL, /* SIOCSIWGENIE */ + NULL, /* SIOCGIWGENIE */ + atmel_set_auth, /* SIOCSIWAUTH */ + atmel_get_auth, /* SIOCGIWAUTH */ + atmel_set_encodeext, /* SIOCSIWENCODEEXT */ + atmel_get_encodeext, /* SIOCGIWENCODEEXT */ + NULL, /* SIOCSIWPMKSA */ }; static const iw_handler atmel_private_handler[] = diff --git a/drivers/net/wireless/broadcom/b43/phy_lp.c b/drivers/net/wireless/broadcom/b43/phy_lp.c index 6922cbb99..c45026c0d 100644 --- a/drivers/net/wireless/broadcom/b43/phy_lp.c +++ b/drivers/net/wireless/broadcom/b43/phy_lp.c @@ -2502,7 +2502,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev, { struct ssb_bus *bus = dev->dev->sdev->bus; - static const struct b206x_channel *chandata = NULL; + const struct b206x_channel *chandata = NULL; u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000; u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count; u16 old_comm15, scale; diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c index a061fa705..3a5ddc523 100644 --- a/drivers/net/wireless/broadcom/b43legacy/main.c +++ b/drivers/net/wireless/broadcom/b43legacy/main.c @@ -1303,8 +1303,9 @@ static void handle_irq_ucode_debug(struct b43legacy_wldev *dev) } /* Interrupt handler bottom-half */ -static void b43legacy_interrupt_tasklet(struct b43legacy_wldev *dev) +static void b43legacy_interrupt_tasklet(unsigned long _dev) { + struct b43legacy_wldev *dev = (struct b43legacy_wldev *)_dev; u32 reason; u32 dma_reason[ARRAY_SIZE(dev->dma_reason)]; u32 merged_dma_reason = 0; @@ -3772,7 +3773,7 @@ static int b43legacy_one_core_attach(struct ssb_device *dev, b43legacy_set_status(wldev, B43legacy_STAT_UNINIT); wldev->bad_frames_preempt = modparam_bad_frames_preempt; tasklet_init(&wldev->isr_tasklet, - (void (*)(unsigned long))b43legacy_interrupt_tasklet, + b43legacy_interrupt_tasklet, (unsigned long)wldev); if (modparam_pio) wldev->__using_pio = true; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 78d9966a3..2808cb5ef 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -5229,6 +5229,50 @@ static struct cfg80211_ops brcmf_cfg80211_ops = { .tdls_oper = brcmf_cfg80211_tdls_oper, }; +static struct cfg80211_ops brcmf_cfg80211_ops2 = { + .add_virtual_intf = brcmf_cfg80211_add_iface, + .del_virtual_intf = brcmf_cfg80211_del_iface, + .change_virtual_intf = brcmf_cfg80211_change_iface, + .scan = brcmf_cfg80211_scan, + .set_wiphy_params = brcmf_cfg80211_set_wiphy_params, + .join_ibss = brcmf_cfg80211_join_ibss, + .leave_ibss = brcmf_cfg80211_leave_ibss, + .get_station = brcmf_cfg80211_get_station, + .dump_station = brcmf_cfg80211_dump_station, + .set_tx_power = brcmf_cfg80211_set_tx_power, + .get_tx_power = brcmf_cfg80211_get_tx_power, + .add_key = brcmf_cfg80211_add_key, + .del_key = brcmf_cfg80211_del_key, + .get_key = brcmf_cfg80211_get_key, + .set_default_key = brcmf_cfg80211_config_default_key, + .set_default_mgmt_key = brcmf_cfg80211_config_default_mgmt_key, + .set_power_mgmt = brcmf_cfg80211_set_power_mgmt, + .connect = brcmf_cfg80211_connect, + .disconnect = brcmf_cfg80211_disconnect, + .suspend = brcmf_cfg80211_suspend, + .resume = brcmf_cfg80211_resume, + .set_pmksa = brcmf_cfg80211_set_pmksa, + .del_pmksa = brcmf_cfg80211_del_pmksa, + .flush_pmksa = brcmf_cfg80211_flush_pmksa, + .start_ap = brcmf_cfg80211_start_ap, + .stop_ap = brcmf_cfg80211_stop_ap, + .change_beacon = brcmf_cfg80211_change_beacon, + .del_station = brcmf_cfg80211_del_station, + .change_station = brcmf_cfg80211_change_station, + .sched_scan_start = brcmf_cfg80211_sched_scan_start, + .sched_scan_stop = brcmf_cfg80211_sched_scan_stop, + .mgmt_frame_register = brcmf_cfg80211_mgmt_frame_register, + .mgmt_tx = brcmf_cfg80211_mgmt_tx, + .remain_on_channel = brcmf_p2p_remain_on_channel, + .cancel_remain_on_channel = brcmf_cfg80211_cancel_remain_on_channel, + .start_p2p_device = brcmf_p2p_start_device, + .stop_p2p_device = brcmf_p2p_stop_device, + .crit_proto_start = brcmf_cfg80211_crit_proto_start, + .crit_proto_stop = brcmf_cfg80211_crit_proto_stop, + .tdls_oper = brcmf_cfg80211_tdls_oper, + .set_rekey_data = brcmf_cfg80211_set_rekey_data, +}; + struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, enum nl80211_iftype type) { @@ -6845,7 +6889,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, struct net_device *ndev = brcmf_get_ifp(drvr, 0)->ndev; struct brcmf_cfg80211_info *cfg; struct wiphy *wiphy; - struct cfg80211_ops *ops; + struct cfg80211_ops *ops = &brcmf_cfg80211_ops; struct brcmf_cfg80211_vif *vif; struct brcmf_if *ifp; s32 err = 0; @@ -6857,14 +6901,10 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, return NULL; } - ops = kmemdup(&brcmf_cfg80211_ops, sizeof(*ops), GFP_KERNEL); - if (!ops) - return NULL; - ifp = netdev_priv(ndev); #ifdef CONFIG_PM if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_GTK)) - ops->set_rekey_data = brcmf_cfg80211_set_rekey_data; + ops = &brcmf_cfg80211_ops2; #endif wiphy = wiphy_new(ops, sizeof(struct brcmf_cfg80211_info)); if (!wiphy) { @@ -7003,7 +7043,6 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, ifp->vif = NULL; wiphy_out: brcmf_free_wiphy(wiphy); - kfree(ops); return NULL; } @@ -7014,7 +7053,6 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg) brcmf_btcoex_detach(cfg); wiphy_unregister(cfg->wiphy); - kfree(cfg->ops); wl_deinit_priv(cfg); brcmf_free_wiphy(cfg->wiphy); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c index 1c4e9dd57..a6388e7af 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c @@ -394,8 +394,9 @@ struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp) return sh; } -static void wlc_phy_timercb_phycal(struct brcms_phy *pi) +static void wlc_phy_timercb_phycal(void *_pi) { + struct brcms_phy *pi = _pi; uint delay = 5; if (PHY_PERICAL_MPHASE_PENDING(pi)) { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c index a0de5db0c..b72381791 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c @@ -57,12 +57,11 @@ void wlc_phy_shim_detach(struct phy_shim_info *physhim) } struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim, - void (*fn)(struct brcms_phy *pi), + void (*fn)(void *pi), void *arg, const char *name) { return (struct wlapi_timer *) - brcms_init_timer(physhim->wl, (void (*)(void *))fn, - arg, name); + brcms_init_timer(physhim->wl, fn, arg, name); } void wlapi_free_timer(struct wlapi_timer *t) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h index dd8774717..27d0934e6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h @@ -131,7 +131,7 @@ void wlc_phy_shim_detach(struct phy_shim_info *physhim); /* PHY to WL utility functions */ struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim, - void (*fn)(struct brcms_phy *pi), + void (*fn)(void *pi), void *arg, const char *name); void wlapi_free_timer(struct wlapi_timer *t); void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic); diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index 69b826d22..669a1e0e0 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -4779,7 +4779,7 @@ static int get_dec_u16( char *buffer, int *start, int limit ) { } static int airo_config_commit(struct net_device *dev, - struct iw_request_info *info, void *zwrq, + struct iw_request_info *info, union iwreq_data *zwrq, char *extra); static inline int sniffing_mode(struct airo_info *ai) @@ -5766,9 +5766,11 @@ static int airo_get_quality (StatusRid *status_rid, CapabilityRid *cap_rid) */ static int airo_get_name(struct net_device *dev, struct iw_request_info *info, - char *cwrq, + union iwreq_data *wrqu, char *extra) { + char *cwrq = wrqu->name; + strcpy(cwrq, "IEEE 802.11-DS"); return 0; } @@ -5779,9 +5781,10 @@ static int airo_get_name(struct net_device *dev, */ static int airo_set_freq(struct net_device *dev, struct iw_request_info *info, - struct iw_freq *fwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_freq *fwrq = &wrqu->freq; struct airo_info *local = dev->ml_priv; int rc = -EINPROGRESS; /* Call commit handler */ @@ -5820,9 +5823,10 @@ static int airo_set_freq(struct net_device *dev, */ static int airo_get_freq(struct net_device *dev, struct iw_request_info *info, - struct iw_freq *fwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_freq *fwrq = &wrqu->freq; struct airo_info *local = dev->ml_priv; StatusRid status_rid; /* Card status info */ int ch; @@ -5852,9 +5856,10 @@ static int airo_get_freq(struct net_device *dev, */ static int airo_set_essid(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_point *dwrq = &wrqu->essid; struct airo_info *local = dev->ml_priv; SsidRid SSID_rid; /* SSIDs */ @@ -5897,9 +5902,10 @@ static int airo_set_essid(struct net_device *dev, */ static int airo_get_essid(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_point *dwrq = &wrqu->essid; struct airo_info *local = dev->ml_priv; StatusRid status_rid; /* Card status info */ @@ -5925,9 +5931,10 @@ static int airo_get_essid(struct net_device *dev, */ static int airo_set_wap(struct net_device *dev, struct iw_request_info *info, - struct sockaddr *awrq, + union iwreq_data *wrqu, char *extra) { + struct sockaddr *awrq = &wrqu->ap_addr; struct airo_info *local = dev->ml_priv; Cmd cmd; Resp rsp; @@ -5960,9 +5967,10 @@ static int airo_set_wap(struct net_device *dev, */ static int airo_get_wap(struct net_device *dev, struct iw_request_info *info, - struct sockaddr *awrq, + union iwreq_data *wrqu, char *extra) { + struct sockaddr *awrq = &wrqu->ap_addr; struct airo_info *local = dev->ml_priv; StatusRid status_rid; /* Card status info */ @@ -5981,9 +5989,10 @@ static int airo_get_wap(struct net_device *dev, */ static int airo_set_nick(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_point *dwrq = &wrqu->data; struct airo_info *local = dev->ml_priv; /* Check the size of the string */ @@ -6004,9 +6013,10 @@ static int airo_set_nick(struct net_device *dev, */ static int airo_get_nick(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_point *dwrq = &wrqu->data; struct airo_info *local = dev->ml_priv; readConfigRid(local, 1); @@ -6023,9 +6033,10 @@ static int airo_get_nick(struct net_device *dev, */ static int airo_set_rate(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->bitrate; struct airo_info *local = dev->ml_priv; CapabilityRid cap_rid; /* Card capability info */ u8 brate = 0; @@ -6093,9 +6104,10 @@ static int airo_set_rate(struct net_device *dev, */ static int airo_get_rate(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->bitrate; struct airo_info *local = dev->ml_priv; StatusRid status_rid; /* Card status info */ @@ -6115,9 +6127,10 @@ static int airo_get_rate(struct net_device *dev, */ static int airo_set_rts(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->rts; struct airo_info *local = dev->ml_priv; int rthr = vwrq->value; @@ -6139,9 +6152,10 @@ static int airo_set_rts(struct net_device *dev, */ static int airo_get_rts(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->rts; struct airo_info *local = dev->ml_priv; readConfigRid(local, 1); @@ -6158,9 +6172,10 @@ static int airo_get_rts(struct net_device *dev, */ static int airo_set_frag(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->frag; struct airo_info *local = dev->ml_priv; int fthr = vwrq->value; @@ -6183,9 +6198,10 @@ static int airo_set_frag(struct net_device *dev, */ static int airo_get_frag(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->frag; struct airo_info *local = dev->ml_priv; readConfigRid(local, 1); @@ -6202,9 +6218,10 @@ static int airo_get_frag(struct net_device *dev, */ static int airo_set_mode(struct net_device *dev, struct iw_request_info *info, - __u32 *uwrq, + union iwreq_data *wrqu, char *extra) { + __u32 *uwrq = &wrqu->mode; struct airo_info *local = dev->ml_priv; int reset = 0; @@ -6265,9 +6282,10 @@ static int airo_set_mode(struct net_device *dev, */ static int airo_get_mode(struct net_device *dev, struct iw_request_info *info, - __u32 *uwrq, + union iwreq_data *wrqu, char *extra) { + __u32 *uwrq = &wrqu->mode; struct airo_info *local = dev->ml_priv; readConfigRid(local, 1); @@ -6300,9 +6318,10 @@ static inline int valid_index(struct airo_info *ai, int index) */ static int airo_set_encode(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_point *dwrq = &wrqu->encoding; struct airo_info *local = dev->ml_priv; int perm = (dwrq->flags & IW_ENCODE_TEMP ? 0 : 1); __le16 currentAuthType = local->config.authType; @@ -6399,9 +6418,10 @@ static int airo_set_encode(struct net_device *dev, */ static int airo_get_encode(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_point *dwrq = &wrqu->encoding; struct airo_info *local = dev->ml_priv; int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; int wep_key_len; @@ -6746,9 +6766,10 @@ static int airo_get_auth(struct net_device *dev, */ static int airo_set_txpow(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->txpower; struct airo_info *local = dev->ml_priv; CapabilityRid cap_rid; /* Card capability info */ int i; @@ -6783,9 +6804,10 @@ static int airo_set_txpow(struct net_device *dev, */ static int airo_get_txpow(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->txpower; struct airo_info *local = dev->ml_priv; readConfigRid(local, 1); @@ -6803,9 +6825,10 @@ static int airo_get_txpow(struct net_device *dev, */ static int airo_set_retry(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->retry; struct airo_info *local = dev->ml_priv; int rc = -EINVAL; @@ -6841,9 +6864,10 @@ static int airo_set_retry(struct net_device *dev, */ static int airo_get_retry(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->retry; struct airo_info *local = dev->ml_priv; vwrq->disabled = 0; /* Can't be disabled */ @@ -6872,9 +6896,10 @@ static int airo_get_retry(struct net_device *dev, */ static int airo_get_range(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_point *dwrq = &wrqu->data; struct airo_info *local = dev->ml_priv; struct iw_range *range = (struct iw_range *) extra; CapabilityRid cap_rid; /* Card capability info */ @@ -6998,9 +7023,10 @@ static int airo_get_range(struct net_device *dev, */ static int airo_set_power(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->power; struct airo_info *local = dev->ml_priv; readConfigRid(local, 1); @@ -7055,9 +7081,10 @@ static int airo_set_power(struct net_device *dev, */ static int airo_get_power(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->power; struct airo_info *local = dev->ml_priv; __le16 mode; @@ -7086,9 +7113,10 @@ static int airo_get_power(struct net_device *dev, */ static int airo_set_sens(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->sens; struct airo_info *local = dev->ml_priv; readConfigRid(local, 1); @@ -7105,9 +7133,10 @@ static int airo_set_sens(struct net_device *dev, */ static int airo_get_sens(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->sens; struct airo_info *local = dev->ml_priv; readConfigRid(local, 1); @@ -7125,9 +7154,10 @@ static int airo_get_sens(struct net_device *dev, */ static int airo_get_aplist(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_point *dwrq = &wrqu->data; struct airo_info *local = dev->ml_priv; struct sockaddr *address = (struct sockaddr *) extra; struct iw_quality *qual; @@ -7203,7 +7233,7 @@ static int airo_get_aplist(struct net_device *dev, */ static int airo_set_scan(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, + union iwreq_data *dwrq, char *extra) { struct airo_info *ai = dev->ml_priv; @@ -7434,9 +7464,10 @@ static inline char *airo_translate_scan(struct net_device *dev, */ static int airo_get_scan(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, + union iwreq_data *wrqu, char *extra) { + struct iw_point *dwrq = &wrqu->data; struct airo_info *ai = dev->ml_priv; BSSListElement *net; int err = 0; @@ -7478,7 +7509,7 @@ static int airo_get_scan(struct net_device *dev, */ static int airo_config_commit(struct net_device *dev, struct iw_request_info *info, /* NULL */ - void *zwrq, /* NULL */ + union iwreq_data *zwrq, /* NULL */ char *extra) /* NULL */ { struct airo_info *local = dev->ml_priv; @@ -7528,61 +7559,61 @@ static const struct iw_priv_args airo_private_args[] = { static const iw_handler airo_handler[] = { - (iw_handler) airo_config_commit, /* SIOCSIWCOMMIT */ - (iw_handler) airo_get_name, /* SIOCGIWNAME */ - (iw_handler) NULL, /* SIOCSIWNWID */ - (iw_handler) NULL, /* SIOCGIWNWID */ - (iw_handler) airo_set_freq, /* SIOCSIWFREQ */ - (iw_handler) airo_get_freq, /* SIOCGIWFREQ */ - (iw_handler) airo_set_mode, /* SIOCSIWMODE */ - (iw_handler) airo_get_mode, /* SIOCGIWMODE */ - (iw_handler) airo_set_sens, /* SIOCSIWSENS */ - (iw_handler) airo_get_sens, /* SIOCGIWSENS */ - (iw_handler) NULL, /* SIOCSIWRANGE */ - (iw_handler) airo_get_range, /* SIOCGIWRANGE */ - (iw_handler) NULL, /* SIOCSIWPRIV */ - (iw_handler) NULL, /* SIOCGIWPRIV */ - (iw_handler) NULL, /* SIOCSIWSTATS */ - (iw_handler) NULL, /* SIOCGIWSTATS */ + airo_config_commit, /* SIOCSIWCOMMIT */ + airo_get_name, /* SIOCGIWNAME */ + NULL, /* SIOCSIWNWID */ + NULL, /* SIOCGIWNWID */ + airo_set_freq, /* SIOCSIWFREQ */ + airo_get_freq, /* SIOCGIWFREQ */ + airo_set_mode, /* SIOCSIWMODE */ + airo_get_mode, /* SIOCGIWMODE */ + airo_set_sens, /* SIOCSIWSENS */ + airo_get_sens, /* SIOCGIWSENS */ + NULL, /* SIOCSIWRANGE */ + airo_get_range, /* SIOCGIWRANGE */ + NULL, /* SIOCSIWPRIV */ + NULL, /* SIOCGIWPRIV */ + NULL, /* SIOCSIWSTATS */ + NULL, /* SIOCGIWSTATS */ iw_handler_set_spy, /* SIOCSIWSPY */ iw_handler_get_spy, /* SIOCGIWSPY */ iw_handler_set_thrspy, /* SIOCSIWTHRSPY */ iw_handler_get_thrspy, /* SIOCGIWTHRSPY */ - (iw_handler) airo_set_wap, /* SIOCSIWAP */ - (iw_handler) airo_get_wap, /* SIOCGIWAP */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) airo_get_aplist, /* SIOCGIWAPLIST */ - (iw_handler) airo_set_scan, /* SIOCSIWSCAN */ - (iw_handler) airo_get_scan, /* SIOCGIWSCAN */ - (iw_handler) airo_set_essid, /* SIOCSIWESSID */ - (iw_handler) airo_get_essid, /* SIOCGIWESSID */ - (iw_handler) airo_set_nick, /* SIOCSIWNICKN */ - (iw_handler) airo_get_nick, /* SIOCGIWNICKN */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) airo_set_rate, /* SIOCSIWRATE */ - (iw_handler) airo_get_rate, /* SIOCGIWRATE */ - (iw_handler) airo_set_rts, /* SIOCSIWRTS */ - (iw_handler) airo_get_rts, /* SIOCGIWRTS */ - (iw_handler) airo_set_frag, /* SIOCSIWFRAG */ - (iw_handler) airo_get_frag, /* SIOCGIWFRAG */ - (iw_handler) airo_set_txpow, /* SIOCSIWTXPOW */ - (iw_handler) airo_get_txpow, /* SIOCGIWTXPOW */ - (iw_handler) airo_set_retry, /* SIOCSIWRETRY */ - (iw_handler) airo_get_retry, /* SIOCGIWRETRY */ - (iw_handler) airo_set_encode, /* SIOCSIWENCODE */ - (iw_handler) airo_get_encode, /* SIOCGIWENCODE */ - (iw_handler) airo_set_power, /* SIOCSIWPOWER */ - (iw_handler) airo_get_power, /* SIOCGIWPOWER */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) NULL, /* SIOCSIWGENIE */ - (iw_handler) NULL, /* SIOCGIWGENIE */ - (iw_handler) airo_set_auth, /* SIOCSIWAUTH */ - (iw_handler) airo_get_auth, /* SIOCGIWAUTH */ - (iw_handler) airo_set_encodeext, /* SIOCSIWENCODEEXT */ - (iw_handler) airo_get_encodeext, /* SIOCGIWENCODEEXT */ - (iw_handler) NULL, /* SIOCSIWPMKSA */ + airo_set_wap, /* SIOCSIWAP */ + airo_get_wap, /* SIOCGIWAP */ + NULL, /* -- hole -- */ + airo_get_aplist, /* SIOCGIWAPLIST */ + airo_set_scan, /* SIOCSIWSCAN */ + airo_get_scan, /* SIOCGIWSCAN */ + airo_set_essid, /* SIOCSIWESSID */ + airo_get_essid, /* SIOCGIWESSID */ + airo_set_nick, /* SIOCSIWNICKN */ + airo_get_nick, /* SIOCGIWNICKN */ + NULL, /* -- hole -- */ + NULL, /* -- hole -- */ + airo_set_rate, /* SIOCSIWRATE */ + airo_get_rate, /* SIOCGIWRATE */ + airo_set_rts, /* SIOCSIWRTS */ + airo_get_rts, /* SIOCGIWRTS */ + airo_set_frag, /* SIOCSIWFRAG */ + airo_get_frag, /* SIOCGIWFRAG */ + airo_set_txpow, /* SIOCSIWTXPOW */ + airo_get_txpow, /* SIOCGIWTXPOW */ + airo_set_retry, /* SIOCSIWRETRY */ + airo_get_retry, /* SIOCGIWRETRY */ + airo_set_encode, /* SIOCSIWENCODE */ + airo_get_encode, /* SIOCGIWENCODE */ + airo_set_power, /* SIOCSIWPOWER */ + airo_get_power, /* SIOCGIWPOWER */ + NULL, /* -- hole -- */ + NULL, /* -- hole -- */ + NULL, /* SIOCSIWGENIE */ + NULL, /* SIOCGIWGENIE */ + airo_set_auth, /* SIOCSIWAUTH */ + airo_get_auth, /* SIOCGIWAUTH */ + airo_set_encodeext, /* SIOCSIWENCODEEXT */ + airo_get_encodeext, /* SIOCGIWENCODEEXT */ + NULL, /* SIOCSIWPMKSA */ }; /* Note : don't describe AIROIDIFC and AIROOLDIDIFC in here. @@ -7845,7 +7876,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) { struct airo_info *ai = dev->ml_priv; int ridcode; int enabled; - static int (* writer)(struct airo_info *, u16 rid, const void *, int, int); + int (* writer)(struct airo_info *, u16 rid, const void *, int, int); unsigned char *iobuf; /* Only super-user can write RIDs */ diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index 02a299a89..5afdac405 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -3220,8 +3220,9 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv) } } -static void ipw2100_irq_tasklet(struct ipw2100_priv *priv) +static void ipw2100_irq_tasklet(unsigned long _priv) { + struct ipw2100_priv *priv = (struct ipw2100_priv *)_priv; struct net_device *dev = priv->net_dev; unsigned long flags; u32 inta, tmp; @@ -6029,7 +6030,7 @@ static void ipw2100_rf_kill(struct work_struct *work) spin_unlock_irqrestore(&priv->low_lock, flags); } -static void ipw2100_irq_tasklet(struct ipw2100_priv *priv); +static void ipw2100_irq_tasklet(unsigned long _priv); static const struct net_device_ops ipw2100_netdev_ops = { .ndo_open = ipw2100_open, @@ -6158,8 +6159,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev, INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill); INIT_DELAYED_WORK(&priv->scan_event, ipw2100_scan_event); - tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) - ipw2100_irq_tasklet, (unsigned long)priv); + tasklet_init(&priv->irq_tasklet, ipw2100_irq_tasklet, (unsigned long)priv); /* NOTE: We do not start the deferred work for status checks yet */ priv->stop_rf_kill = 1; diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index 85d91de1e..f709be4f8 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -1968,8 +1968,9 @@ static void notify_wx_assoc_event(struct ipw_priv *priv) wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); } -static void ipw_irq_tasklet(struct ipw_priv *priv) +static void ipw_irq_tasklet(unsigned long _priv) { + struct ipw_priv *priv = (struct ipw_priv *)_priv; u32 inta, inta_mask, handled = 0; unsigned long flags; int rc = 0; @@ -10705,8 +10706,7 @@ static int ipw_setup_deferred_work(struct ipw_priv *priv) INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate); #endif /* CONFIG_IPW2200_QOS */ - tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) - ipw_irq_tasklet, (unsigned long)priv); + tasklet_init(&priv->irq_tasklet, ipw_irq_tasklet, (unsigned long)priv); return ret; } diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c index 260d6a3a4..91b948537 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c @@ -1399,8 +1399,9 @@ il3945_dump_nic_error_log(struct il_priv *il) } static void -il3945_irq_tasklet(struct il_priv *il) +il3945_irq_tasklet(unsigned long _il) { + struct il_priv *il = (struct il_priv *)_il; u32 inta, handled = 0; u32 inta_fh; unsigned long flags; @@ -3432,7 +3433,7 @@ il3945_setup_deferred_work(struct il_priv *il) setup_timer(&il->watchdog, il_bg_watchdog, (unsigned long)il); tasklet_init(&il->irq_tasklet, - (void (*)(unsigned long))il3945_irq_tasklet, + il3945_irq_tasklet, (unsigned long)il); } @@ -3469,7 +3470,7 @@ static struct attribute_group il3945_attribute_group = { .attrs = il3945_sysfs_entries, }; -static struct ieee80211_ops il3945_mac_ops __read_mostly = { +static struct ieee80211_ops il3945_mac_ops = { .tx = il3945_mac_tx, .start = il3945_mac_start, .stop = il3945_mac_stop, @@ -3633,7 +3634,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ if (il3945_mod_params.disable_hw_scan) { D_INFO("Disabling hw_scan\n"); - il3945_mac_ops.hw_scan = NULL; + pax_open_kernel(); + const_cast(il3945_mac_ops.hw_scan) = NULL; + pax_close_kernel(); } D_INFO("*** LOAD DRIVER ***\n"); diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index a27559854..ca3e41c20 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -4361,8 +4361,9 @@ il4965_synchronize_irq(struct il_priv *il) } static void -il4965_irq_tasklet(struct il_priv *il) +il4965_irq_tasklet(unsigned long _il) { + struct il_priv *il = (struct il_priv *)_il; u32 inta, handled = 0; u32 inta_fh; unsigned long flags; @@ -6259,9 +6260,7 @@ il4965_setup_deferred_work(struct il_priv *il) setup_timer(&il->watchdog, il_bg_watchdog, (unsigned long)il); - tasklet_init(&il->irq_tasklet, - (void (*)(unsigned long))il4965_irq_tasklet, - (unsigned long)il); + tasklet_init(&il->irq_tasklet, il4965_irq_tasklet, (unsigned long)il); } static void diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c index affe760c8..9f6cbdd57 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c @@ -190,7 +190,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file, { struct iwl_priv *priv = file->private_data; char buf[64]; - int buf_size; + size_t buf_size; u32 offset, len; memset(buf, 0, sizeof(buf)); @@ -456,7 +456,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file, struct iwl_priv *priv = file->private_data; char buf[8]; - int buf_size; + size_t buf_size; u32 reset_flag; memset(buf, 0, sizeof(buf)); @@ -537,7 +537,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file, { struct iwl_priv *priv = file->private_data; char buf[8]; - int buf_size; + size_t buf_size; int ht40; memset(buf, 0, sizeof(buf)); @@ -589,7 +589,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file, { struct iwl_priv *priv = file->private_data; char buf[8]; - int buf_size; + size_t buf_size; int value; memset(buf, 0, sizeof(buf)); @@ -681,10 +681,10 @@ DEBUGFS_READ_FILE_OPS(temperature); DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override); DEBUGFS_READ_FILE_OPS(current_sleep_command); -static const char *fmt_value = " %-30s %10u\n"; -static const char *fmt_hex = " %-30s 0x%02X\n"; -static const char *fmt_table = " %-30s %10u %10u %10u %10u\n"; -static const char *fmt_header = +static const char fmt_value[] = " %-30s %10u\n"; +static const char fmt_hex[] = " %-30s 0x%02X\n"; +static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n"; +static const char fmt_header[] = "%-32s current cumulative delta max\n"; static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz) @@ -1854,7 +1854,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file, { struct iwl_priv *priv = file->private_data; char buf[8]; - int buf_size; + size_t buf_size; int clear; memset(buf, 0, sizeof(buf)); @@ -1899,7 +1899,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file, { struct iwl_priv *priv = file->private_data; char buf[8]; - int buf_size; + size_t buf_size; int trace; memset(buf, 0, sizeof(buf)); @@ -1970,7 +1970,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file, { struct iwl_priv *priv = file->private_data; char buf[8]; - int buf_size; + size_t buf_size; int missed; memset(buf, 0, sizeof(buf)); @@ -2011,7 +2011,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file, struct iwl_priv *priv = file->private_data; char buf[8]; - int buf_size; + size_t buf_size; int plcp; memset(buf, 0, sizeof(buf)); @@ -2071,7 +2071,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file, struct iwl_priv *priv = file->private_data; char buf[8]; - int buf_size; + size_t buf_size; int flush; memset(buf, 0, sizeof(buf)); @@ -2161,7 +2161,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file, struct iwl_priv *priv = file->private_data; char buf[8]; - int buf_size; + size_t buf_size; int rts; if (!priv->cfg->ht_params) @@ -2202,7 +2202,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file, { struct iwl_priv *priv = file->private_data; char buf[8]; - int buf_size; + size_t buf_size; memset(buf, 0, sizeof(buf)); buf_size = min(count, sizeof(buf) - 1); @@ -2236,7 +2236,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file, struct iwl_priv *priv = file->private_data; u32 event_log_flag; char buf[8]; - int buf_size; + size_t buf_size; /* check that the interface is up */ if (!iwl_is_ready(priv)) @@ -2290,7 +2290,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file, struct iwl_priv *priv = file->private_data; char buf[8]; u32 calib_disabled; - int buf_size; + size_t buf_size; memset(buf, 0, sizeof(buf)); buf_size = min(count, sizeof(buf) - 1); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c index 6c2d6da7e..4660f3926 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c @@ -933,7 +933,7 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw, rx_p1ks = data->tkip->rx_uni; - pn64 = atomic64_read(&key->tx_pn); + pn64 = atomic64_read_unchecked(&key->tx_pn); tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64)); tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64)); @@ -986,7 +986,7 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw, aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc; aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc; - pn64 = atomic64_read(&key->tx_pn); + pn64 = atomic64_read_unchecked(&key->tx_pn); aes_tx_sc->pn = cpu_to_le64(pn64); } else aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index b88e2048a..9d505c77d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -258,7 +258,7 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, rx_p1ks = data->tkip->rx_uni; - pn64 = atomic64_read(&key->tx_pn); + pn64 = atomic64_read_unchecked(&key->tx_pn); tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64)); tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64)); @@ -313,7 +313,7 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc; aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc; - pn64 = atomic64_read(&key->tx_pn); + pn64 = atomic64_read_unchecked(&key->tx_pn); aes_tx_sc->pn = cpu_to_le64(pn64); } else { aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc; @@ -1622,12 +1622,12 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, case WLAN_CIPHER_SUITE_CCMP: iwl_mvm_set_aes_rx_seq(data->mvm, sc->aes.unicast_rsc, sta, key); - atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn)); + atomic64_set_unchecked(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn)); break; case WLAN_CIPHER_SUITE_TKIP: iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq); iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key); - atomic64_set(&key->tx_pn, + atomic64_set_unchecked(&key->tx_pn, (u64)seq.tkip.iv16 | ((u64)seq.tkip.iv32 << 16)); break; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 66957ac12..23446ed00 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -385,7 +385,7 @@ static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info, struct ieee80211_key_conf *keyconf = info->control.hw_key; u64 pn; - pn = atomic64_inc_return(&keyconf->tx_pn); + pn = atomic64_inc_return_unchecked(&keyconf->tx_pn); crypto_hdr[0] = pn; crypto_hdr[2] = 0; crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6); @@ -418,7 +418,7 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, case WLAN_CIPHER_SUITE_TKIP: tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; - pn = atomic64_inc_return(&keyconf->tx_pn); + pn = atomic64_inc_return_unchecked(&keyconf->tx_pn); ieee80211_tkip_add_iv(crypto_hdr, keyconf, pn); ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key); break; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index ae95533e5..ace0cd32e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -2420,7 +2420,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file, struct isr_statistics *isr_stats = &trans_pcie->isr_stats; char buf[8]; - int buf_size; + size_t buf_size; u32 reset_flag; memset(buf, 0, sizeof(buf)); @@ -2441,7 +2441,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file, { struct iwl_trans *trans = file->private_data; char buf[8]; - int buf_size; + size_t buf_size; int csr; memset(buf, 0, sizeof(buf)); diff --git a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c index a5656bc0e..7401f47d0 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c +++ b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c @@ -101,8 +101,9 @@ static int prism2_get_datarates(struct net_device *dev, u8 *rates) static int prism2_get_name(struct net_device *dev, struct iw_request_info *info, - char *name, char *extra) + union iwreq_data *wrqu, char *extra) { + char *name = wrqu->name; u8 rates[10]; int len, i, over2 = 0; @@ -123,8 +124,9 @@ static int prism2_get_name(struct net_device *dev, static int prism2_ioctl_siwencode(struct net_device *dev, struct iw_request_info *info, - struct iw_point *erq, char *keybuf) + union iwreq_data *wrqu, char *keybuf) { + struct iw_point *erq = &wrqu->encoding; struct hostap_interface *iface; local_info_t *local; int i; @@ -225,8 +227,9 @@ static int prism2_ioctl_siwencode(struct net_device *dev, static int prism2_ioctl_giwencode(struct net_device *dev, struct iw_request_info *info, - struct iw_point *erq, char *key) + union iwreq_data *wrqu, char *key) { + struct iw_point *erq = &wrqu->encoding; struct hostap_interface *iface; local_info_t *local; int i, len; @@ -331,8 +334,9 @@ static int hostap_set_rate(struct net_device *dev) static int prism2_ioctl_siwrate(struct net_device *dev, struct iw_request_info *info, - struct iw_param *rrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_param *rrq = &wrqu->bitrate; struct hostap_interface *iface; local_info_t *local; @@ -391,8 +395,9 @@ static int prism2_ioctl_siwrate(struct net_device *dev, static int prism2_ioctl_giwrate(struct net_device *dev, struct iw_request_info *info, - struct iw_param *rrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_param *rrq = &wrqu->bitrate; u16 val; struct hostap_interface *iface; local_info_t *local; @@ -450,8 +455,9 @@ static int prism2_ioctl_giwrate(struct net_device *dev, static int prism2_ioctl_siwsens(struct net_device *dev, struct iw_request_info *info, - struct iw_param *sens, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_param *sens = &wrqu->sens; struct hostap_interface *iface; local_info_t *local; @@ -471,8 +477,9 @@ static int prism2_ioctl_siwsens(struct net_device *dev, static int prism2_ioctl_giwsens(struct net_device *dev, struct iw_request_info *info, - struct iw_param *sens, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_param *sens = &wrqu->sens; struct hostap_interface *iface; local_info_t *local; __le16 val; @@ -495,8 +502,9 @@ static int prism2_ioctl_giwsens(struct net_device *dev, /* Deprecated in new wireless extension API */ static int prism2_ioctl_giwaplist(struct net_device *dev, struct iw_request_info *info, - struct iw_point *data, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_point *data = &wrqu->data; struct hostap_interface *iface; local_info_t *local; struct sockaddr *addr; @@ -536,8 +544,9 @@ static int prism2_ioctl_giwaplist(struct net_device *dev, static int prism2_ioctl_siwrts(struct net_device *dev, struct iw_request_info *info, - struct iw_param *rts, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_param *rts = &wrqu->rts; struct hostap_interface *iface; local_info_t *local; __le16 val; @@ -563,8 +572,9 @@ static int prism2_ioctl_siwrts(struct net_device *dev, static int prism2_ioctl_giwrts(struct net_device *dev, struct iw_request_info *info, - struct iw_param *rts, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_param *rts = &wrqu->rts; struct hostap_interface *iface; local_info_t *local; __le16 val; @@ -586,8 +596,9 @@ static int prism2_ioctl_giwrts(struct net_device *dev, static int prism2_ioctl_siwfrag(struct net_device *dev, struct iw_request_info *info, - struct iw_param *rts, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_param *rts = &wrqu->rts; struct hostap_interface *iface; local_info_t *local; __le16 val; @@ -613,8 +624,9 @@ static int prism2_ioctl_siwfrag(struct net_device *dev, static int prism2_ioctl_giwfrag(struct net_device *dev, struct iw_request_info *info, - struct iw_param *rts, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_param *rts = &wrqu->rts; struct hostap_interface *iface; local_info_t *local; __le16 val; @@ -679,11 +691,12 @@ static int hostap_join_ap(struct net_device *dev) static int prism2_ioctl_siwap(struct net_device *dev, struct iw_request_info *info, - struct sockaddr *ap_addr, char *extra) + union iwreq_data *wrqu, char *extra) { #ifdef PRISM2_NO_STATION_MODES return -EOPNOTSUPP; #else /* PRISM2_NO_STATION_MODES */ + struct sockaddr *ap_addr = &wrqu->ap_addr; struct hostap_interface *iface; local_info_t *local; @@ -719,8 +732,9 @@ static int prism2_ioctl_siwap(struct net_device *dev, static int prism2_ioctl_giwap(struct net_device *dev, struct iw_request_info *info, - struct sockaddr *ap_addr, char *extra) + union iwreq_data *wrqu, char *extra) { + struct sockaddr *ap_addr = &wrqu->ap_addr; struct hostap_interface *iface; local_info_t *local; @@ -755,8 +769,9 @@ static int prism2_ioctl_giwap(struct net_device *dev, static int prism2_ioctl_siwnickn(struct net_device *dev, struct iw_request_info *info, - struct iw_point *data, char *nickname) + union iwreq_data *wrqu, char *nickname) { + struct iw_point *data = &wrqu->data; struct hostap_interface *iface; local_info_t *local; @@ -776,8 +791,9 @@ static int prism2_ioctl_siwnickn(struct net_device *dev, static int prism2_ioctl_giwnickn(struct net_device *dev, struct iw_request_info *info, - struct iw_point *data, char *nickname) + union iwreq_data *wrqu, char *nickname) { + struct iw_point *data = &wrqu->data; struct hostap_interface *iface; local_info_t *local; int len; @@ -803,8 +819,9 @@ static int prism2_ioctl_giwnickn(struct net_device *dev, static int prism2_ioctl_siwfreq(struct net_device *dev, struct iw_request_info *info, - struct iw_freq *freq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_freq *freq = &wrqu->freq; struct hostap_interface *iface; local_info_t *local; @@ -840,8 +857,9 @@ static int prism2_ioctl_siwfreq(struct net_device *dev, static int prism2_ioctl_giwfreq(struct net_device *dev, struct iw_request_info *info, - struct iw_freq *freq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_freq *freq = &wrqu->freq; struct hostap_interface *iface; local_info_t *local; u16 val; @@ -884,8 +902,9 @@ static void hostap_monitor_set_type(local_info_t *local) static int prism2_ioctl_siwessid(struct net_device *dev, struct iw_request_info *info, - struct iw_point *data, char *ssid) + union iwreq_data *wrqu, char *ssid) { + struct iw_point *data = &wrqu->data; struct hostap_interface *iface; local_info_t *local; @@ -920,8 +939,9 @@ static int prism2_ioctl_siwessid(struct net_device *dev, static int prism2_ioctl_giwessid(struct net_device *dev, struct iw_request_info *info, - struct iw_point *data, char *essid) + union iwreq_data *wrqu, char *essid) { + struct iw_point *data = &wrqu->data; struct hostap_interface *iface; local_info_t *local; u16 val; @@ -956,8 +976,9 @@ static int prism2_ioctl_giwessid(struct net_device *dev, static int prism2_ioctl_giwrange(struct net_device *dev, struct iw_request_info *info, - struct iw_point *data, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_point *data = &wrqu->data; struct hostap_interface *iface; local_info_t *local; struct iw_range *range = (struct iw_range *) extra; @@ -1131,8 +1152,9 @@ static int hostap_monitor_mode_disable(local_info_t *local) static int prism2_ioctl_siwmode(struct net_device *dev, struct iw_request_info *info, - __u32 *mode, char *extra) + union iwreq_data *wrqu, char *extra) { + __u32 *mode = &wrqu->mode; struct hostap_interface *iface; local_info_t *local; int double_reset = 0; @@ -1207,8 +1229,9 @@ static int prism2_ioctl_siwmode(struct net_device *dev, static int prism2_ioctl_giwmode(struct net_device *dev, struct iw_request_info *info, - __u32 *mode, char *extra) + union iwreq_data *wrqu, char *extra) { + __u32 *mode = &wrqu->mode; struct hostap_interface *iface; local_info_t *local; @@ -1232,11 +1255,12 @@ static int prism2_ioctl_giwmode(struct net_device *dev, static int prism2_ioctl_siwpower(struct net_device *dev, struct iw_request_info *info, - struct iw_param *wrq, char *extra) + union iwreq_data *wrqu, char *extra) { #ifdef PRISM2_NO_STATION_MODES return -EOPNOTSUPP; #else /* PRISM2_NO_STATION_MODES */ + struct iw_param *wrq = &wrqu->power; int ret = 0; if (wrq->disabled) @@ -1291,11 +1315,12 @@ static int prism2_ioctl_siwpower(struct net_device *dev, static int prism2_ioctl_giwpower(struct net_device *dev, struct iw_request_info *info, - struct iw_param *rrq, char *extra) + union iwreq_data *wrqu, char *extra) { #ifdef PRISM2_NO_STATION_MODES return -EOPNOTSUPP; #else /* PRISM2_NO_STATION_MODES */ + struct iw_param *rrq = &wrqu->power; struct hostap_interface *iface; local_info_t *local; __le16 enable, mcast; @@ -1349,8 +1374,9 @@ static int prism2_ioctl_giwpower(struct net_device *dev, static int prism2_ioctl_siwretry(struct net_device *dev, struct iw_request_info *info, - struct iw_param *rrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_param *rrq = &wrqu->bitrate; struct hostap_interface *iface; local_info_t *local; @@ -1410,8 +1436,9 @@ static int prism2_ioctl_siwretry(struct net_device *dev, static int prism2_ioctl_giwretry(struct net_device *dev, struct iw_request_info *info, - struct iw_param *rrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_param *rrq = &wrqu->bitrate; struct hostap_interface *iface; local_info_t *local; __le16 shortretry, longretry, lifetime, altretry; @@ -1504,8 +1531,9 @@ static u16 prism2_txpower_dBm_to_hfa386x(int val) static int prism2_ioctl_siwtxpow(struct net_device *dev, struct iw_request_info *info, - struct iw_param *rrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_param *rrq = &wrqu->bitrate; struct hostap_interface *iface; local_info_t *local; #ifdef RAW_TXPOWER_SETTING @@ -1585,9 +1613,10 @@ static int prism2_ioctl_siwtxpow(struct net_device *dev, static int prism2_ioctl_giwtxpow(struct net_device *dev, struct iw_request_info *info, - struct iw_param *rrq, char *extra) + union iwreq_data *wrqu, char *extra) { #ifdef RAW_TXPOWER_SETTING + struct iw_param *rrq = &wrqu->bitrate; struct hostap_interface *iface; local_info_t *local; u16 resp0; @@ -1720,8 +1749,9 @@ static inline int prism2_request_scan(struct net_device *dev) static int prism2_ioctl_siwscan(struct net_device *dev, struct iw_request_info *info, - struct iw_point *data, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_point *data = &wrqu->data; struct hostap_interface *iface; local_info_t *local; int ret; @@ -2068,8 +2098,9 @@ static inline int prism2_ioctl_giwscan_sta(struct net_device *dev, static int prism2_ioctl_giwscan(struct net_device *dev, struct iw_request_info *info, - struct iw_point *data, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_point *data = &wrqu->data; struct hostap_interface *iface; local_info_t *local; int res; @@ -2314,7 +2345,7 @@ static int prism2_ioctl_priv_inquire(struct net_device *dev, int *i) static int prism2_ioctl_priv_prism2_param(struct net_device *dev, struct iw_request_info *info, - void *wrqu, char *extra) + union iwreq_data *wrqu, char *extra) { struct hostap_interface *iface; local_info_t *local; @@ -2665,7 +2696,7 @@ static int prism2_ioctl_priv_prism2_param(struct net_device *dev, static int prism2_ioctl_priv_get_prism2_param(struct net_device *dev, struct iw_request_info *info, - void *wrqu, char *extra) + union iwreq_data *wrqu, char *extra) { struct hostap_interface *iface; local_info_t *local; @@ -2852,7 +2883,7 @@ static int prism2_ioctl_priv_get_prism2_param(struct net_device *dev, static int prism2_ioctl_priv_readmif(struct net_device *dev, struct iw_request_info *info, - void *wrqu, char *extra) + union iwreq_data *wrqu, char *extra) { struct hostap_interface *iface; local_info_t *local; @@ -2873,7 +2904,7 @@ static int prism2_ioctl_priv_readmif(struct net_device *dev, static int prism2_ioctl_priv_writemif(struct net_device *dev, struct iw_request_info *info, - void *wrqu, char *extra) + union iwreq_data *wrqu, char *extra) { struct hostap_interface *iface; local_info_t *local; @@ -2911,7 +2942,7 @@ static int prism2_ioctl_priv_monitor(struct net_device *dev, int *i) /* Disable monitor mode - old mode was not saved, so go to * Master mode */ mode = IW_MODE_MASTER; - ret = prism2_ioctl_siwmode(dev, NULL, &mode, NULL); + ret = prism2_ioctl_siwmode(dev, NULL, (union iwreq_data *)&mode, NULL); } else if (*i == 1) { /* netlink socket mode is not supported anymore since it did * not separate different devices from each other and was not @@ -2928,7 +2959,7 @@ static int prism2_ioctl_priv_monitor(struct net_device *dev, int *i) break; } mode = IW_MODE_MONITOR; - ret = prism2_ioctl_siwmode(dev, NULL, &mode, NULL); + ret = prism2_ioctl_siwmode(dev, NULL, (union iwreq_data *)&mode, NULL); hostap_monitor_mode_enable(local); } else ret = -EINVAL; @@ -3090,8 +3121,9 @@ static int prism2_set_genericelement(struct net_device *dev, u8 *elem, static int prism2_ioctl_siwauth(struct net_device *dev, struct iw_request_info *info, - struct iw_param *data, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_param *data = &wrqu->param; struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; @@ -3156,8 +3188,9 @@ static int prism2_ioctl_siwauth(struct net_device *dev, static int prism2_ioctl_giwauth(struct net_device *dev, struct iw_request_info *info, - struct iw_param *data, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_param *data = &wrqu->param; struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; @@ -3195,8 +3228,9 @@ static int prism2_ioctl_giwauth(struct net_device *dev, static int prism2_ioctl_siwencodeext(struct net_device *dev, struct iw_request_info *info, - struct iw_point *erq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_point *erq = &wrqu->encoding; struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; struct iw_encode_ext *ext = (struct iw_encode_ext *) extra; @@ -3369,8 +3403,9 @@ static int prism2_ioctl_siwencodeext(struct net_device *dev, static int prism2_ioctl_giwencodeext(struct net_device *dev, struct iw_request_info *info, - struct iw_point *erq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_point *erq = &wrqu->encoding; struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; struct lib80211_crypt_data **crypt; @@ -3677,16 +3712,19 @@ static int prism2_ioctl_set_assoc_ap_addr(local_info_t *local, static int prism2_ioctl_siwgenie(struct net_device *dev, struct iw_request_info *info, - struct iw_point *data, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_point *data = &wrqu->data; + return prism2_set_genericelement(dev, extra, data->length); } static int prism2_ioctl_giwgenie(struct net_device *dev, struct iw_request_info *info, - struct iw_point *data, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_point *data = &wrqu->data; struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; int len = local->generic_elem_len - 2; @@ -3724,7 +3762,7 @@ static int prism2_ioctl_set_generic_element(local_info_t *local, static int prism2_ioctl_siwmlme(struct net_device *dev, struct iw_request_info *info, - struct iw_point *data, char *extra) + union iwreq_data *data, char *extra) { struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; @@ -3875,70 +3913,70 @@ const struct ethtool_ops prism2_ethtool_ops = { static const iw_handler prism2_handler[] = { - (iw_handler) NULL, /* SIOCSIWCOMMIT */ - (iw_handler) prism2_get_name, /* SIOCGIWNAME */ - (iw_handler) NULL, /* SIOCSIWNWID */ - (iw_handler) NULL, /* SIOCGIWNWID */ - (iw_handler) prism2_ioctl_siwfreq, /* SIOCSIWFREQ */ - (iw_handler) prism2_ioctl_giwfreq, /* SIOCGIWFREQ */ - (iw_handler) prism2_ioctl_siwmode, /* SIOCSIWMODE */ - (iw_handler) prism2_ioctl_giwmode, /* SIOCGIWMODE */ - (iw_handler) prism2_ioctl_siwsens, /* SIOCSIWSENS */ - (iw_handler) prism2_ioctl_giwsens, /* SIOCGIWSENS */ - (iw_handler) NULL /* not used */, /* SIOCSIWRANGE */ - (iw_handler) prism2_ioctl_giwrange, /* SIOCGIWRANGE */ - (iw_handler) NULL /* not used */, /* SIOCSIWPRIV */ - (iw_handler) NULL /* kernel code */, /* SIOCGIWPRIV */ - (iw_handler) NULL /* not used */, /* SIOCSIWSTATS */ - (iw_handler) NULL /* kernel code */, /* SIOCGIWSTATS */ - iw_handler_set_spy, /* SIOCSIWSPY */ - iw_handler_get_spy, /* SIOCGIWSPY */ - iw_handler_set_thrspy, /* SIOCSIWTHRSPY */ - iw_handler_get_thrspy, /* SIOCGIWTHRSPY */ - (iw_handler) prism2_ioctl_siwap, /* SIOCSIWAP */ - (iw_handler) prism2_ioctl_giwap, /* SIOCGIWAP */ - (iw_handler) prism2_ioctl_siwmlme, /* SIOCSIWMLME */ - (iw_handler) prism2_ioctl_giwaplist, /* SIOCGIWAPLIST */ - (iw_handler) prism2_ioctl_siwscan, /* SIOCSIWSCAN */ - (iw_handler) prism2_ioctl_giwscan, /* SIOCGIWSCAN */ - (iw_handler) prism2_ioctl_siwessid, /* SIOCSIWESSID */ - (iw_handler) prism2_ioctl_giwessid, /* SIOCGIWESSID */ - (iw_handler) prism2_ioctl_siwnickn, /* SIOCSIWNICKN */ - (iw_handler) prism2_ioctl_giwnickn, /* SIOCGIWNICKN */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) prism2_ioctl_siwrate, /* SIOCSIWRATE */ - (iw_handler) prism2_ioctl_giwrate, /* SIOCGIWRATE */ - (iw_handler) prism2_ioctl_siwrts, /* SIOCSIWRTS */ - (iw_handler) prism2_ioctl_giwrts, /* SIOCGIWRTS */ - (iw_handler) prism2_ioctl_siwfrag, /* SIOCSIWFRAG */ - (iw_handler) prism2_ioctl_giwfrag, /* SIOCGIWFRAG */ - (iw_handler) prism2_ioctl_siwtxpow, /* SIOCSIWTXPOW */ - (iw_handler) prism2_ioctl_giwtxpow, /* SIOCGIWTXPOW */ - (iw_handler) prism2_ioctl_siwretry, /* SIOCSIWRETRY */ - (iw_handler) prism2_ioctl_giwretry, /* SIOCGIWRETRY */ - (iw_handler) prism2_ioctl_siwencode, /* SIOCSIWENCODE */ - (iw_handler) prism2_ioctl_giwencode, /* SIOCGIWENCODE */ - (iw_handler) prism2_ioctl_siwpower, /* SIOCSIWPOWER */ - (iw_handler) prism2_ioctl_giwpower, /* SIOCGIWPOWER */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) prism2_ioctl_siwgenie, /* SIOCSIWGENIE */ - (iw_handler) prism2_ioctl_giwgenie, /* SIOCGIWGENIE */ - (iw_handler) prism2_ioctl_siwauth, /* SIOCSIWAUTH */ - (iw_handler) prism2_ioctl_giwauth, /* SIOCGIWAUTH */ - (iw_handler) prism2_ioctl_siwencodeext, /* SIOCSIWENCODEEXT */ - (iw_handler) prism2_ioctl_giwencodeext, /* SIOCGIWENCODEEXT */ - (iw_handler) NULL, /* SIOCSIWPMKSA */ - (iw_handler) NULL, /* -- hole -- */ + NULL, /* SIOCSIWCOMMIT */ + prism2_get_name, /* SIOCGIWNAME */ + NULL, /* SIOCSIWNWID */ + NULL, /* SIOCGIWNWID */ + prism2_ioctl_siwfreq, /* SIOCSIWFREQ */ + prism2_ioctl_giwfreq, /* SIOCGIWFREQ */ + prism2_ioctl_siwmode, /* SIOCSIWMODE */ + prism2_ioctl_giwmode, /* SIOCGIWMODE */ + prism2_ioctl_siwsens, /* SIOCSIWSENS */ + prism2_ioctl_giwsens, /* SIOCGIWSENS */ + NULL /* not used */, /* SIOCSIWRANGE */ + prism2_ioctl_giwrange, /* SIOCGIWRANGE */ + NULL /* not used */, /* SIOCSIWPRIV */ + NULL /* kernel code */, /* SIOCGIWPRIV */ + NULL /* not used */, /* SIOCSIWSTATS */ + NULL /* kernel code */, /* SIOCGIWSTATS */ + iw_handler_set_spy, /* SIOCSIWSPY */ + iw_handler_get_spy, /* SIOCGIWSPY */ + iw_handler_set_thrspy, /* SIOCSIWTHRSPY */ + iw_handler_get_thrspy, /* SIOCGIWTHRSPY */ + prism2_ioctl_siwap, /* SIOCSIWAP */ + prism2_ioctl_giwap, /* SIOCGIWAP */ + prism2_ioctl_siwmlme, /* SIOCSIWMLME */ + prism2_ioctl_giwaplist, /* SIOCGIWAPLIST */ + prism2_ioctl_siwscan, /* SIOCSIWSCAN */ + prism2_ioctl_giwscan, /* SIOCGIWSCAN */ + prism2_ioctl_siwessid, /* SIOCSIWESSID */ + prism2_ioctl_giwessid, /* SIOCGIWESSID */ + prism2_ioctl_siwnickn, /* SIOCSIWNICKN */ + prism2_ioctl_giwnickn, /* SIOCGIWNICKN */ + NULL, /* -- hole -- */ + NULL, /* -- hole -- */ + prism2_ioctl_siwrate, /* SIOCSIWRATE */ + prism2_ioctl_giwrate, /* SIOCGIWRATE */ + prism2_ioctl_siwrts, /* SIOCSIWRTS */ + prism2_ioctl_giwrts, /* SIOCGIWRTS */ + prism2_ioctl_siwfrag, /* SIOCSIWFRAG */ + prism2_ioctl_giwfrag, /* SIOCGIWFRAG */ + prism2_ioctl_siwtxpow, /* SIOCSIWTXPOW */ + prism2_ioctl_giwtxpow, /* SIOCGIWTXPOW */ + prism2_ioctl_siwretry, /* SIOCSIWRETRY */ + prism2_ioctl_giwretry, /* SIOCGIWRETRY */ + prism2_ioctl_siwencode, /* SIOCSIWENCODE */ + prism2_ioctl_giwencode, /* SIOCGIWENCODE */ + prism2_ioctl_siwpower, /* SIOCSIWPOWER */ + prism2_ioctl_giwpower, /* SIOCGIWPOWER */ + NULL, /* -- hole -- */ + NULL, /* -- hole -- */ + prism2_ioctl_siwgenie, /* SIOCSIWGENIE */ + prism2_ioctl_giwgenie, /* SIOCGIWGENIE */ + prism2_ioctl_siwauth, /* SIOCSIWAUTH */ + prism2_ioctl_giwauth, /* SIOCGIWAUTH */ + prism2_ioctl_siwencodeext, /* SIOCSIWENCODEEXT */ + prism2_ioctl_giwencodeext, /* SIOCGIWENCODEEXT */ + NULL, /* SIOCSIWPMKSA */ + NULL, /* -- hole -- */ }; static const iw_handler prism2_private_handler[] = { /* SIOCIWFIRSTPRIV + */ - (iw_handler) prism2_ioctl_priv_prism2_param, /* 0 */ - (iw_handler) prism2_ioctl_priv_get_prism2_param, /* 1 */ - (iw_handler) prism2_ioctl_priv_writemif, /* 2 */ - (iw_handler) prism2_ioctl_priv_readmif, /* 3 */ + prism2_ioctl_priv_prism2_param, /* 0 */ + prism2_ioctl_priv_get_prism2_param, /* 1 */ + prism2_ioctl_priv_writemif, /* 2 */ + prism2_ioctl_priv_readmif, /* 3 */ }; const struct iw_handler_def hostap_iw_handler_def = @@ -3946,8 +3984,8 @@ const struct iw_handler_def hostap_iw_handler_def = .num_standard = ARRAY_SIZE(prism2_handler), .num_private = ARRAY_SIZE(prism2_private_handler), .num_private_args = ARRAY_SIZE(prism2_priv), - .standard = (iw_handler *) prism2_handler, - .private = (iw_handler *) prism2_private_handler, + .standard = prism2_handler, + .private = prism2_private_handler, .private_args = (struct iw_priv_args *) prism2_priv, .get_wireless_stats = hostap_get_wireless_stats, }; diff --git a/drivers/net/wireless/intersil/orinoco/wext.c b/drivers/net/wireless/intersil/orinoco/wext.c index 1d4dae422..0508fc162 100644 --- a/drivers/net/wireless/intersil/orinoco/wext.c +++ b/drivers/net/wireless/intersil/orinoco/wext.c @@ -154,9 +154,10 @@ static struct iw_statistics *orinoco_get_wireless_stats(struct net_device *dev) static int orinoco_ioctl_setwap(struct net_device *dev, struct iw_request_info *info, - struct sockaddr *ap_addr, + union iwreq_data *wrqu, char *extra) { + struct sockaddr *ap_addr = &wrqu->ap_addr; struct orinoco_private *priv = ndev_priv(dev); int err = -EINPROGRESS; /* Call commit handler */ unsigned long flags; @@ -213,9 +214,10 @@ static int orinoco_ioctl_setwap(struct net_device *dev, static int orinoco_ioctl_getwap(struct net_device *dev, struct iw_request_info *info, - struct sockaddr *ap_addr, + union iwreq_data *wrqu, char *extra) { + struct sockaddr *ap_addr = &wrqu->ap_addr; struct orinoco_private *priv = ndev_priv(dev); int err = 0; @@ -234,9 +236,10 @@ static int orinoco_ioctl_getwap(struct net_device *dev, static int orinoco_ioctl_setiwencode(struct net_device *dev, struct iw_request_info *info, - struct iw_point *erq, + union iwreq_data *wrqu, char *keybuf) { + struct iw_point *erq = &wrqu->encoding; struct orinoco_private *priv = ndev_priv(dev); int index = (erq->flags & IW_ENCODE_INDEX) - 1; int setindex = priv->tx_key; @@ -325,9 +328,10 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev, static int orinoco_ioctl_getiwencode(struct net_device *dev, struct iw_request_info *info, - struct iw_point *erq, + union iwreq_data *wrqu, char *keybuf) { + struct iw_point *erq = &wrqu->encoding; struct orinoco_private *priv = ndev_priv(dev); int index = (erq->flags & IW_ENCODE_INDEX) - 1; unsigned long flags; @@ -361,9 +365,10 @@ static int orinoco_ioctl_getiwencode(struct net_device *dev, static int orinoco_ioctl_setessid(struct net_device *dev, struct iw_request_info *info, - struct iw_point *erq, + union iwreq_data *wrqu, char *essidbuf) { + struct iw_point *erq = &wrqu->essid; struct orinoco_private *priv = ndev_priv(dev); unsigned long flags; @@ -392,9 +397,10 @@ static int orinoco_ioctl_setessid(struct net_device *dev, static int orinoco_ioctl_getessid(struct net_device *dev, struct iw_request_info *info, - struct iw_point *erq, + union iwreq_data *wrqu, char *essidbuf) { + struct iw_point *erq = &wrqu->essid; struct orinoco_private *priv = ndev_priv(dev); int active; int err = 0; @@ -420,9 +426,10 @@ static int orinoco_ioctl_getessid(struct net_device *dev, static int orinoco_ioctl_setfreq(struct net_device *dev, struct iw_request_info *info, - struct iw_freq *frq, + union iwreq_data *wrqu, char *extra) { + struct iw_freq *frq = &wrqu->freq; struct orinoco_private *priv = ndev_priv(dev); int chan = -1; unsigned long flags; @@ -469,9 +476,10 @@ static int orinoco_ioctl_setfreq(struct net_device *dev, static int orinoco_ioctl_getfreq(struct net_device *dev, struct iw_request_info *info, - struct iw_freq *frq, + union iwreq_data *wrqu, char *extra) { + struct iw_freq *frq = &wrqu->freq; struct orinoco_private *priv = ndev_priv(dev); int tmp; @@ -488,9 +496,10 @@ static int orinoco_ioctl_getfreq(struct net_device *dev, static int orinoco_ioctl_getsens(struct net_device *dev, struct iw_request_info *info, - struct iw_param *srq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *srq = &wrqu->sens; struct orinoco_private *priv = ndev_priv(dev); struct hermes *hw = &priv->hw; u16 val; @@ -517,9 +526,10 @@ static int orinoco_ioctl_getsens(struct net_device *dev, static int orinoco_ioctl_setsens(struct net_device *dev, struct iw_request_info *info, - struct iw_param *srq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *srq = &wrqu->sens; struct orinoco_private *priv = ndev_priv(dev); int val = srq->value; unsigned long flags; @@ -540,9 +550,10 @@ static int orinoco_ioctl_setsens(struct net_device *dev, static int orinoco_ioctl_setrate(struct net_device *dev, struct iw_request_info *info, - struct iw_param *rrq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *rrq = &wrqu->bitrate; struct orinoco_private *priv = ndev_priv(dev); int ratemode; int bitrate; /* 100s of kilobits */ @@ -574,9 +585,10 @@ static int orinoco_ioctl_setrate(struct net_device *dev, static int orinoco_ioctl_getrate(struct net_device *dev, struct iw_request_info *info, - struct iw_param *rrq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *rrq = &wrqu->bitrate; struct orinoco_private *priv = ndev_priv(dev); int err = 0; int bitrate, automatic; @@ -610,9 +622,10 @@ static int orinoco_ioctl_getrate(struct net_device *dev, static int orinoco_ioctl_setpower(struct net_device *dev, struct iw_request_info *info, - struct iw_param *prq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *prq = &wrqu->power; struct orinoco_private *priv = ndev_priv(dev); int err = -EINPROGRESS; /* Call commit handler */ unsigned long flags; @@ -664,9 +677,10 @@ static int orinoco_ioctl_setpower(struct net_device *dev, static int orinoco_ioctl_getpower(struct net_device *dev, struct iw_request_info *info, - struct iw_param *prq, + union iwreq_data *wrqu, char *extra) { + struct iw_param *prq = &wrqu->power; struct orinoco_private *priv = ndev_priv(dev); struct hermes *hw = &priv->hw; int err = 0; @@ -1097,7 +1111,7 @@ static int orinoco_ioctl_set_mlme(struct net_device *dev, static int orinoco_ioctl_reset(struct net_device *dev, struct iw_request_info *info, - void *wrqu, + union iwreq_data *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); @@ -1121,7 +1135,7 @@ static int orinoco_ioctl_reset(struct net_device *dev, static int orinoco_ioctl_setibssport(struct net_device *dev, struct iw_request_info *info, - void *wrqu, + union iwreq_data *wrqu, char *extra) { @@ -1143,7 +1157,7 @@ static int orinoco_ioctl_setibssport(struct net_device *dev, static int orinoco_ioctl_getibssport(struct net_device *dev, struct iw_request_info *info, - void *wrqu, + union iwreq_data *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); @@ -1155,7 +1169,7 @@ static int orinoco_ioctl_getibssport(struct net_device *dev, static int orinoco_ioctl_setport3(struct net_device *dev, struct iw_request_info *info, - void *wrqu, + union iwreq_data *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); @@ -1201,7 +1215,7 @@ static int orinoco_ioctl_setport3(struct net_device *dev, static int orinoco_ioctl_getport3(struct net_device *dev, struct iw_request_info *info, - void *wrqu, + union iwreq_data *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); @@ -1213,7 +1227,7 @@ static int orinoco_ioctl_getport3(struct net_device *dev, static int orinoco_ioctl_setpreamble(struct net_device *dev, struct iw_request_info *info, - void *wrqu, + union iwreq_data *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); @@ -1245,7 +1259,7 @@ static int orinoco_ioctl_setpreamble(struct net_device *dev, static int orinoco_ioctl_getpreamble(struct net_device *dev, struct iw_request_info *info, - void *wrqu, + union iwreq_data *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); @@ -1265,9 +1279,10 @@ static int orinoco_ioctl_getpreamble(struct net_device *dev, * For Wireless Tools 25 and 26 append "dummy" are the end. */ static int orinoco_ioctl_getrid(struct net_device *dev, struct iw_request_info *info, - struct iw_point *data, + union iwreq_data *wrqu, char *extra) { + struct iw_point *data = &wrqu->data; struct orinoco_private *priv = ndev_priv(dev); struct hermes *hw = &priv->hw; int rid = data->flags; @@ -1303,7 +1318,7 @@ static int orinoco_ioctl_getrid(struct net_device *dev, /* Commit handler, called after set operations */ static int orinoco_ioctl_commit(struct net_device *dev, struct iw_request_info *info, - void *wrqu, + union iwreq_data *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); @@ -1347,36 +1362,36 @@ static const struct iw_priv_args orinoco_privtab[] = { */ static const iw_handler orinoco_handler[] = { - IW_HANDLER(SIOCSIWCOMMIT, (iw_handler)orinoco_ioctl_commit), - IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname), - IW_HANDLER(SIOCSIWFREQ, (iw_handler)orinoco_ioctl_setfreq), - IW_HANDLER(SIOCGIWFREQ, (iw_handler)orinoco_ioctl_getfreq), - IW_HANDLER(SIOCSIWMODE, (iw_handler)cfg80211_wext_siwmode), - IW_HANDLER(SIOCGIWMODE, (iw_handler)cfg80211_wext_giwmode), - IW_HANDLER(SIOCSIWSENS, (iw_handler)orinoco_ioctl_setsens), - IW_HANDLER(SIOCGIWSENS, (iw_handler)orinoco_ioctl_getsens), - IW_HANDLER(SIOCGIWRANGE, (iw_handler)cfg80211_wext_giwrange), + IW_HANDLER(SIOCSIWCOMMIT, orinoco_ioctl_commit), + IW_HANDLER(SIOCGIWNAME, cfg80211_wext_giwname), + IW_HANDLER(SIOCSIWFREQ, orinoco_ioctl_setfreq), + IW_HANDLER(SIOCGIWFREQ, orinoco_ioctl_getfreq), + IW_HANDLER(SIOCSIWMODE, cfg80211_wext_siwmode), + IW_HANDLER(SIOCGIWMODE, cfg80211_wext_giwmode), + IW_HANDLER(SIOCSIWSENS, orinoco_ioctl_setsens), + IW_HANDLER(SIOCGIWSENS, orinoco_ioctl_getsens), + IW_HANDLER(SIOCGIWRANGE, cfg80211_wext_giwrange), IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy), IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy), IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy), IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy), - IW_HANDLER(SIOCSIWAP, (iw_handler)orinoco_ioctl_setwap), - IW_HANDLER(SIOCGIWAP, (iw_handler)orinoco_ioctl_getwap), - IW_HANDLER(SIOCSIWSCAN, (iw_handler)cfg80211_wext_siwscan), - IW_HANDLER(SIOCGIWSCAN, (iw_handler)cfg80211_wext_giwscan), - IW_HANDLER(SIOCSIWESSID, (iw_handler)orinoco_ioctl_setessid), - IW_HANDLER(SIOCGIWESSID, (iw_handler)orinoco_ioctl_getessid), - IW_HANDLER(SIOCSIWRATE, (iw_handler)orinoco_ioctl_setrate), - IW_HANDLER(SIOCGIWRATE, (iw_handler)orinoco_ioctl_getrate), - IW_HANDLER(SIOCSIWRTS, (iw_handler)cfg80211_wext_siwrts), - IW_HANDLER(SIOCGIWRTS, (iw_handler)cfg80211_wext_giwrts), - IW_HANDLER(SIOCSIWFRAG, (iw_handler)cfg80211_wext_siwfrag), - IW_HANDLER(SIOCGIWFRAG, (iw_handler)cfg80211_wext_giwfrag), - IW_HANDLER(SIOCGIWRETRY, (iw_handler)cfg80211_wext_giwretry), - IW_HANDLER(SIOCSIWENCODE, (iw_handler)orinoco_ioctl_setiwencode), - IW_HANDLER(SIOCGIWENCODE, (iw_handler)orinoco_ioctl_getiwencode), - IW_HANDLER(SIOCSIWPOWER, (iw_handler)orinoco_ioctl_setpower), - IW_HANDLER(SIOCGIWPOWER, (iw_handler)orinoco_ioctl_getpower), + IW_HANDLER(SIOCSIWAP, orinoco_ioctl_setwap), + IW_HANDLER(SIOCGIWAP, orinoco_ioctl_getwap), + IW_HANDLER(SIOCSIWSCAN, cfg80211_wext_siwscan), + IW_HANDLER(SIOCGIWSCAN, cfg80211_wext_giwscan), + IW_HANDLER(SIOCSIWESSID, orinoco_ioctl_setessid), + IW_HANDLER(SIOCGIWESSID, orinoco_ioctl_getessid), + IW_HANDLER(SIOCSIWRATE, orinoco_ioctl_setrate), + IW_HANDLER(SIOCGIWRATE, orinoco_ioctl_getrate), + IW_HANDLER(SIOCSIWRTS, cfg80211_wext_siwrts), + IW_HANDLER(SIOCGIWRTS, cfg80211_wext_giwrts), + IW_HANDLER(SIOCSIWFRAG, cfg80211_wext_siwfrag), + IW_HANDLER(SIOCGIWFRAG, cfg80211_wext_giwfrag), + IW_HANDLER(SIOCGIWRETRY, cfg80211_wext_giwretry), + IW_HANDLER(SIOCSIWENCODE, orinoco_ioctl_setiwencode), + IW_HANDLER(SIOCGIWENCODE, orinoco_ioctl_getiwencode), + IW_HANDLER(SIOCSIWPOWER, orinoco_ioctl_setpower), + IW_HANDLER(SIOCGIWPOWER, orinoco_ioctl_getpower), IW_HANDLER(SIOCSIWGENIE, orinoco_ioctl_set_genie), IW_HANDLER(SIOCGIWGENIE, orinoco_ioctl_get_genie), IW_HANDLER(SIOCSIWMLME, orinoco_ioctl_set_mlme), @@ -1391,15 +1406,15 @@ static const iw_handler orinoco_handler[] = { Added typecasting since we no longer use iwreq_data -- Moustafa */ static const iw_handler orinoco_private_handler[] = { - [0] = (iw_handler)orinoco_ioctl_reset, - [1] = (iw_handler)orinoco_ioctl_reset, - [2] = (iw_handler)orinoco_ioctl_setport3, - [3] = (iw_handler)orinoco_ioctl_getport3, - [4] = (iw_handler)orinoco_ioctl_setpreamble, - [5] = (iw_handler)orinoco_ioctl_getpreamble, - [6] = (iw_handler)orinoco_ioctl_setibssport, - [7] = (iw_handler)orinoco_ioctl_getibssport, - [9] = (iw_handler)orinoco_ioctl_getrid, + [0] = orinoco_ioctl_reset, + [1] = orinoco_ioctl_reset, + [2] = orinoco_ioctl_setport3, + [3] = orinoco_ioctl_getport3, + [4] = orinoco_ioctl_setpreamble, + [5] = orinoco_ioctl_getpreamble, + [6] = orinoco_ioctl_setibssport, + [7] = orinoco_ioctl_getibssport, + [9] = orinoco_ioctl_getrid, }; const struct iw_handler_def orinoco_handler_def = { diff --git a/drivers/net/wireless/intersil/prism54/isl_ioctl.c b/drivers/net/wireless/intersil/prism54/isl_ioctl.c index 48e8a978a..3499ec810 100644 --- a/drivers/net/wireless/intersil/prism54/isl_ioctl.c +++ b/drivers/net/wireless/intersil/prism54/isl_ioctl.c @@ -45,7 +45,7 @@ static void prism54_wpa_bss_ie_add(islpci_private *priv, u8 *bssid, u8 *wpa_ie, size_t wpa_ie_len); static size_t prism54_wpa_bss_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie); static int prism54_set_wpa(struct net_device *, struct iw_request_info *, - __u32 *, char *); + union iwreq_data *, char *); /* In 500 kbps */ static const unsigned char scan_rate_list[] = { 2, 4, 11, 22, @@ -240,7 +240,7 @@ prism54_get_wireless_stats(struct net_device *ndev) static int prism54_commit(struct net_device *ndev, struct iw_request_info *info, - char *cwrq, char *extra) + union iwreq_data *cwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); @@ -256,8 +256,9 @@ prism54_commit(struct net_device *ndev, struct iw_request_info *info, static int prism54_get_name(struct net_device *ndev, struct iw_request_info *info, - char *cwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + char *cwrq = wrqu->name; islpci_private *priv = netdev_priv(ndev); char *capabilities; union oid_res_t r; @@ -287,8 +288,9 @@ prism54_get_name(struct net_device *ndev, struct iw_request_info *info, static int prism54_set_freq(struct net_device *ndev, struct iw_request_info *info, - struct iw_freq *fwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_freq *fwrq = &wrqu->freq; islpci_private *priv = netdev_priv(ndev); int rvalue; u32 c; @@ -307,8 +309,9 @@ prism54_set_freq(struct net_device *ndev, struct iw_request_info *info, static int prism54_get_freq(struct net_device *ndev, struct iw_request_info *info, - struct iw_freq *fwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_freq *fwrq = &wrqu->freq; islpci_private *priv = netdev_priv(ndev); union oid_res_t r; int rvalue; @@ -324,8 +327,9 @@ prism54_get_freq(struct net_device *ndev, struct iw_request_info *info, static int prism54_set_mode(struct net_device *ndev, struct iw_request_info *info, - __u32 * uwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + __u32 *uwrq = &wrqu->mode; islpci_private *priv = netdev_priv(ndev); u32 mlmeautolevel = CARD_DEFAULT_MLME_MODE; @@ -368,8 +372,9 @@ prism54_set_mode(struct net_device *ndev, struct iw_request_info *info, /* Use mib cache */ static int prism54_get_mode(struct net_device *ndev, struct iw_request_info *info, - __u32 * uwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + __u32 *uwrq = &wrqu->mode; islpci_private *priv = netdev_priv(ndev); BUG_ON((priv->iw_mode < IW_MODE_AUTO) || (priv->iw_mode > @@ -386,8 +391,9 @@ prism54_get_mode(struct net_device *ndev, struct iw_request_info *info, static int prism54_set_sens(struct net_device *ndev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->sens; islpci_private *priv = netdev_priv(ndev); u32 sens; @@ -399,8 +405,9 @@ prism54_set_sens(struct net_device *ndev, struct iw_request_info *info, static int prism54_get_sens(struct net_device *ndev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->sens; islpci_private *priv = netdev_priv(ndev); union oid_res_t r; int rvalue; @@ -416,8 +423,9 @@ prism54_get_sens(struct net_device *ndev, struct iw_request_info *info, static int prism54_get_range(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_point *dwrq = &wrqu->data; struct iw_range *range = (struct iw_range *) extra; islpci_private *priv = netdev_priv(ndev); u8 *data; @@ -521,8 +529,9 @@ prism54_get_range(struct net_device *ndev, struct iw_request_info *info, static int prism54_set_wap(struct net_device *ndev, struct iw_request_info *info, - struct sockaddr *awrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct sockaddr *awrq = &wrqu->ap_addr; islpci_private *priv = netdev_priv(ndev); char bssid[6]; int rvalue; @@ -543,8 +552,9 @@ prism54_set_wap(struct net_device *ndev, struct iw_request_info *info, static int prism54_get_wap(struct net_device *ndev, struct iw_request_info *info, - struct sockaddr *awrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct sockaddr *awrq = &wrqu->ap_addr; islpci_private *priv = netdev_priv(ndev); union oid_res_t r; int rvalue; @@ -559,7 +569,7 @@ prism54_get_wap(struct net_device *ndev, struct iw_request_info *info, static int prism54_set_scan(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) + union iwreq_data *vwrq, char *extra) { /* hehe the device does this automagicaly */ return 0; @@ -679,8 +689,9 @@ prism54_translate_bss(struct net_device *ndev, struct iw_request_info *info, static int prism54_get_scan(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_point *dwrq = &wrqu->data; islpci_private *priv = netdev_priv(ndev); int i, rvalue; struct obj_bsslist *bsslist; @@ -733,8 +744,9 @@ prism54_get_scan(struct net_device *ndev, struct iw_request_info *info, static int prism54_set_essid(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_point *dwrq = &wrqu->data; islpci_private *priv = netdev_priv(ndev); struct obj_ssid essid; @@ -760,8 +772,9 @@ prism54_set_essid(struct net_device *ndev, struct iw_request_info *info, static int prism54_get_essid(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_point *dwrq = &wrqu->data; islpci_private *priv = netdev_priv(ndev); struct obj_ssid *essid; union oid_res_t r; @@ -790,8 +803,9 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info, */ static int prism54_set_nick(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_point *dwrq = &wrqu->data; islpci_private *priv = netdev_priv(ndev); if (dwrq->length > IW_ESSID_MAX_SIZE) @@ -807,8 +821,9 @@ prism54_set_nick(struct net_device *ndev, struct iw_request_info *info, static int prism54_get_nick(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_point *dwrq = &wrqu->data; islpci_private *priv = netdev_priv(ndev); dwrq->length = 0; @@ -826,9 +841,9 @@ prism54_get_nick(struct net_device *ndev, struct iw_request_info *info, static int prism54_set_rate(struct net_device *ndev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) + union iwreq_data *wrqu, char *extra) { - + struct iw_param *vwrq = &wrqu->bitrate; islpci_private *priv = netdev_priv(ndev); u32 rate, profile; char *data; @@ -899,8 +914,9 @@ prism54_set_rate(struct net_device *ndev, static int prism54_get_rate(struct net_device *ndev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->bitrate; islpci_private *priv = netdev_priv(ndev); int rvalue; char *data; @@ -926,8 +942,9 @@ prism54_get_rate(struct net_device *ndev, static int prism54_set_rts(struct net_device *ndev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->rts; islpci_private *priv = netdev_priv(ndev); return mgt_set_request(priv, DOT11_OID_RTSTHRESH, 0, &vwrq->value); @@ -935,8 +952,9 @@ prism54_set_rts(struct net_device *ndev, struct iw_request_info *info, static int prism54_get_rts(struct net_device *ndev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->rts; islpci_private *priv = netdev_priv(ndev); union oid_res_t r; int rvalue; @@ -950,8 +968,9 @@ prism54_get_rts(struct net_device *ndev, struct iw_request_info *info, static int prism54_set_frag(struct net_device *ndev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->frag; islpci_private *priv = netdev_priv(ndev); return mgt_set_request(priv, DOT11_OID_FRAGTHRESH, 0, &vwrq->value); @@ -959,8 +978,9 @@ prism54_set_frag(struct net_device *ndev, struct iw_request_info *info, static int prism54_get_frag(struct net_device *ndev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->frag; islpci_private *priv = netdev_priv(ndev); union oid_res_t r; int rvalue; @@ -980,8 +1000,9 @@ prism54_get_frag(struct net_device *ndev, struct iw_request_info *info, static int prism54_set_retry(struct net_device *ndev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->retry; islpci_private *priv = netdev_priv(ndev); u32 slimit = 0, llimit = 0; /* short and long limit */ u32 lifetime = 0; @@ -1022,8 +1043,9 @@ prism54_set_retry(struct net_device *ndev, struct iw_request_info *info, static int prism54_get_retry(struct net_device *ndev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->retry; islpci_private *priv = netdev_priv(ndev); union oid_res_t r; int rvalue = 0; @@ -1054,8 +1076,9 @@ prism54_get_retry(struct net_device *ndev, struct iw_request_info *info, static int prism54_set_encode(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_point *dwrq = &wrqu->data; islpci_private *priv = netdev_priv(ndev); int rvalue = 0, force = 0; int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0; @@ -1155,8 +1178,9 @@ prism54_set_encode(struct net_device *ndev, struct iw_request_info *info, static int prism54_get_encode(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_point *dwrq = &wrqu->data; islpci_private *priv = netdev_priv(ndev); struct obj_key *key; u32 devindex, index = (dwrq->flags & IW_ENCODE_INDEX) - 1; @@ -1203,8 +1227,9 @@ prism54_get_encode(struct net_device *ndev, struct iw_request_info *info, static int prism54_get_txpower(struct net_device *ndev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->txpower; islpci_private *priv = netdev_priv(ndev); union oid_res_t r; int rvalue; @@ -1223,8 +1248,9 @@ prism54_get_txpower(struct net_device *ndev, struct iw_request_info *info, static int prism54_set_txpower(struct net_device *ndev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->txpower; islpci_private *priv = netdev_priv(ndev); s32 u = vwrq->value; @@ -1249,8 +1275,9 @@ prism54_set_txpower(struct net_device *ndev, struct iw_request_info *info, static int prism54_set_genie(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *data, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_point *data = &wrqu->data; islpci_private *priv = netdev_priv(ndev); int alen, ret = 0; struct obj_attachment *attach; @@ -1298,8 +1325,9 @@ static int prism54_set_genie(struct net_device *ndev, static int prism54_get_genie(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *data, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_point *data = &wrqu->data; islpci_private *priv = netdev_priv(ndev); int len = priv->wpa_ie_len; @@ -1739,7 +1767,7 @@ static int prism54_get_encodeext(struct net_device *ndev, static int prism54_reset(struct net_device *ndev, struct iw_request_info *info, - __u32 * uwrq, char *extra) + union iwreq_data * uwrq, char *extra) { islpci_reset(netdev_priv(ndev), 0); @@ -1748,8 +1776,9 @@ prism54_reset(struct net_device *ndev, struct iw_request_info *info, static int prism54_get_oid(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_point *dwrq = &wrqu->data; union oid_res_t r; int rvalue; enum oid_num_t n = dwrq->flags; @@ -1763,8 +1792,9 @@ prism54_get_oid(struct net_device *ndev, struct iw_request_info *info, static int prism54_set_u32(struct net_device *ndev, struct iw_request_info *info, - __u32 * uwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + __u32 * uwrq = &wrqu->mode; u32 oid = uwrq[0], u = uwrq[1]; return mgt_set_request(netdev_priv(ndev), oid, 0, &u); @@ -1772,8 +1802,9 @@ prism54_set_u32(struct net_device *ndev, struct iw_request_info *info, static int prism54_set_raw(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_point *dwrq = &wrqu->data; u32 oid = dwrq->flags; return mgt_set_request(netdev_priv(ndev), oid, 0, extra); @@ -1819,7 +1850,7 @@ prism54_acl_clean(struct islpci_acl *acl) static int prism54_add_mac(struct net_device *ndev, struct iw_request_info *info, - struct sockaddr *awrq, char *extra) + union iwreq_data *awrq, char *extra) { islpci_private *priv = netdev_priv(ndev); struct islpci_acl *acl = &priv->acl; @@ -1848,7 +1879,7 @@ prism54_add_mac(struct net_device *ndev, struct iw_request_info *info, static int prism54_del_mac(struct net_device *ndev, struct iw_request_info *info, - struct sockaddr *awrq, char *extra) + union iwreq_data *awrq, char *extra) { islpci_private *priv = netdev_priv(ndev); struct islpci_acl *acl = &priv->acl; @@ -1875,8 +1906,9 @@ prism54_del_mac(struct net_device *ndev, struct iw_request_info *info, static int prism54_get_mac(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_point *dwrq = &wrqu->data; islpci_private *priv = netdev_priv(ndev); struct islpci_acl *acl = &priv->acl; struct mac_entry *entry; @@ -1903,8 +1935,9 @@ prism54_get_mac(struct net_device *ndev, struct iw_request_info *info, static int prism54_set_policy(struct net_device *ndev, struct iw_request_info *info, - __u32 * uwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + __u32 * uwrq = &wrqu->mode; islpci_private *priv = netdev_priv(ndev); struct islpci_acl *acl = &priv->acl; u32 mlmeautolevel; @@ -1939,8 +1972,9 @@ prism54_set_policy(struct net_device *ndev, struct iw_request_info *info, static int prism54_get_policy(struct net_device *ndev, struct iw_request_info *info, - __u32 * uwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + __u32 * uwrq = &wrqu->mode; islpci_private *priv = netdev_priv(ndev); struct islpci_acl *acl = &priv->acl; @@ -1979,7 +2013,7 @@ prism54_mac_accept(struct islpci_acl *acl, char *mac) static int prism54_kick_all(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) + union iwreq_data *dwrq, char *extra) { struct obj_mlme *mlme; int rvalue; @@ -1999,7 +2033,7 @@ prism54_kick_all(struct net_device *ndev, struct iw_request_info *info, static int prism54_kick_mac(struct net_device *ndev, struct iw_request_info *info, - struct sockaddr *awrq, char *extra) + union iwreq_data *awrq, char *extra) { struct obj_mlme *mlme; struct sockaddr *addr = (struct sockaddr *) extra; @@ -2085,8 +2119,7 @@ link_changed(struct net_device *ndev, u32 bitrate) netif_carrier_on(ndev); if (priv->iw_mode == IW_MODE_INFRA) { union iwreq_data uwrq; - prism54_get_wap(ndev, NULL, (struct sockaddr *) &uwrq, - NULL); + prism54_get_wap(ndev, NULL, &uwrq, NULL); wireless_send_event(ndev, SIOCGIWAP, &uwrq, NULL); } else send_simple_event(netdev_priv(ndev), @@ -2498,8 +2531,9 @@ prism54_set_mac_address(struct net_device *ndev, void *addr) static int prism54_set_wpa(struct net_device *ndev, struct iw_request_info *info, - __u32 * uwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + __u32 * uwrq = &wrqu->mode; islpci_private *priv = netdev_priv(ndev); u32 mlme, authen, dot1x, filter, wep; @@ -2542,8 +2576,9 @@ prism54_set_wpa(struct net_device *ndev, struct iw_request_info *info, static int prism54_get_wpa(struct net_device *ndev, struct iw_request_info *info, - __u32 * uwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + __u32 * uwrq = &wrqu->mode; islpci_private *priv = netdev_priv(ndev); *uwrq = priv->wpa; return 0; @@ -2551,8 +2586,9 @@ prism54_get_wpa(struct net_device *ndev, struct iw_request_info *info, static int prism54_set_prismhdr(struct net_device *ndev, struct iw_request_info *info, - __u32 * uwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + __u32 * uwrq = &wrqu->mode; islpci_private *priv = netdev_priv(ndev); priv->monitor_type = (*uwrq ? ARPHRD_IEEE80211_PRISM : ARPHRD_IEEE80211); @@ -2564,8 +2600,9 @@ prism54_set_prismhdr(struct net_device *ndev, struct iw_request_info *info, static int prism54_get_prismhdr(struct net_device *ndev, struct iw_request_info *info, - __u32 * uwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + __u32 * uwrq = &wrqu->mode; islpci_private *priv = netdev_priv(ndev); *uwrq = (priv->monitor_type == ARPHRD_IEEE80211_PRISM); return 0; @@ -2573,8 +2610,9 @@ prism54_get_prismhdr(struct net_device *ndev, struct iw_request_info *info, static int prism54_debug_oid(struct net_device *ndev, struct iw_request_info *info, - __u32 * uwrq, char *extra) + union iwreq_data *wrqu, char *extra) { + __u32 * uwrq = &wrqu->mode; islpci_private *priv = netdev_priv(ndev); priv->priv_oid = *uwrq; @@ -2585,8 +2623,9 @@ prism54_debug_oid(struct net_device *ndev, struct iw_request_info *info, static int prism54_debug_get_oid(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *data, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_point *data = &wrqu->data; islpci_private *priv = netdev_priv(ndev); struct islpci_mgmtframe *response; int ret = -EIO; @@ -2621,8 +2660,9 @@ prism54_debug_get_oid(struct net_device *ndev, struct iw_request_info *info, static int prism54_debug_set_oid(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *data, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_point *data = &wrqu->data; islpci_private *priv = netdev_priv(ndev); struct islpci_mgmtframe *response; int ret = 0, response_op = PIMFOR_OP_ERROR; @@ -2682,60 +2722,60 @@ prism54_set_spy(struct net_device *ndev, } static const iw_handler prism54_handler[] = { - (iw_handler) prism54_commit, /* SIOCSIWCOMMIT */ - (iw_handler) prism54_get_name, /* SIOCGIWNAME */ - (iw_handler) NULL, /* SIOCSIWNWID */ - (iw_handler) NULL, /* SIOCGIWNWID */ - (iw_handler) prism54_set_freq, /* SIOCSIWFREQ */ - (iw_handler) prism54_get_freq, /* SIOCGIWFREQ */ - (iw_handler) prism54_set_mode, /* SIOCSIWMODE */ - (iw_handler) prism54_get_mode, /* SIOCGIWMODE */ - (iw_handler) prism54_set_sens, /* SIOCSIWSENS */ - (iw_handler) prism54_get_sens, /* SIOCGIWSENS */ - (iw_handler) NULL, /* SIOCSIWRANGE */ - (iw_handler) prism54_get_range, /* SIOCGIWRANGE */ - (iw_handler) NULL, /* SIOCSIWPRIV */ - (iw_handler) NULL, /* SIOCGIWPRIV */ - (iw_handler) NULL, /* SIOCSIWSTATS */ - (iw_handler) NULL, /* SIOCGIWSTATS */ + prism54_commit, /* SIOCSIWCOMMIT */ + prism54_get_name, /* SIOCGIWNAME */ + NULL, /* SIOCSIWNWID */ + NULL, /* SIOCGIWNWID */ + prism54_set_freq, /* SIOCSIWFREQ */ + prism54_get_freq, /* SIOCGIWFREQ */ + prism54_set_mode, /* SIOCSIWMODE */ + prism54_get_mode, /* SIOCGIWMODE */ + prism54_set_sens, /* SIOCSIWSENS */ + prism54_get_sens, /* SIOCGIWSENS */ + NULL, /* SIOCSIWRANGE */ + prism54_get_range, /* SIOCGIWRANGE */ + NULL, /* SIOCSIWPRIV */ + NULL, /* SIOCGIWPRIV */ + NULL, /* SIOCSIWSTATS */ + NULL, /* SIOCGIWSTATS */ prism54_set_spy, /* SIOCSIWSPY */ iw_handler_get_spy, /* SIOCGIWSPY */ iw_handler_set_thrspy, /* SIOCSIWTHRSPY */ iw_handler_get_thrspy, /* SIOCGIWTHRSPY */ - (iw_handler) prism54_set_wap, /* SIOCSIWAP */ - (iw_handler) prism54_get_wap, /* SIOCGIWAP */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) NULL, /* SIOCGIWAPLIST deprecated */ - (iw_handler) prism54_set_scan, /* SIOCSIWSCAN */ - (iw_handler) prism54_get_scan, /* SIOCGIWSCAN */ - (iw_handler) prism54_set_essid, /* SIOCSIWESSID */ - (iw_handler) prism54_get_essid, /* SIOCGIWESSID */ - (iw_handler) prism54_set_nick, /* SIOCSIWNICKN */ - (iw_handler) prism54_get_nick, /* SIOCGIWNICKN */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) prism54_set_rate, /* SIOCSIWRATE */ - (iw_handler) prism54_get_rate, /* SIOCGIWRATE */ - (iw_handler) prism54_set_rts, /* SIOCSIWRTS */ - (iw_handler) prism54_get_rts, /* SIOCGIWRTS */ - (iw_handler) prism54_set_frag, /* SIOCSIWFRAG */ - (iw_handler) prism54_get_frag, /* SIOCGIWFRAG */ - (iw_handler) prism54_set_txpower, /* SIOCSIWTXPOW */ - (iw_handler) prism54_get_txpower, /* SIOCGIWTXPOW */ - (iw_handler) prism54_set_retry, /* SIOCSIWRETRY */ - (iw_handler) prism54_get_retry, /* SIOCGIWRETRY */ - (iw_handler) prism54_set_encode, /* SIOCSIWENCODE */ - (iw_handler) prism54_get_encode, /* SIOCGIWENCODE */ - (iw_handler) NULL, /* SIOCSIWPOWER */ - (iw_handler) NULL, /* SIOCGIWPOWER */ + prism54_set_wap, /* SIOCSIWAP */ + prism54_get_wap, /* SIOCGIWAP */ + NULL, /* -- hole -- */ + NULL, /* SIOCGIWAPLIST deprecated */ + prism54_set_scan, /* SIOCSIWSCAN */ + prism54_get_scan, /* SIOCGIWSCAN */ + prism54_set_essid, /* SIOCSIWESSID */ + prism54_get_essid, /* SIOCGIWESSID */ + prism54_set_nick, /* SIOCSIWNICKN */ + prism54_get_nick, /* SIOCGIWNICKN */ + NULL, /* -- hole -- */ + NULL, /* -- hole -- */ + prism54_set_rate, /* SIOCSIWRATE */ + prism54_get_rate, /* SIOCGIWRATE */ + prism54_set_rts, /* SIOCSIWRTS */ + prism54_get_rts, /* SIOCGIWRTS */ + prism54_set_frag, /* SIOCSIWFRAG */ + prism54_get_frag, /* SIOCGIWFRAG */ + prism54_set_txpower, /* SIOCSIWTXPOW */ + prism54_get_txpower, /* SIOCGIWTXPOW */ + prism54_set_retry, /* SIOCSIWRETRY */ + prism54_get_retry, /* SIOCGIWRETRY */ + prism54_set_encode, /* SIOCSIWENCODE */ + prism54_get_encode, /* SIOCGIWENCODE */ + NULL, /* SIOCSIWPOWER */ + NULL, /* SIOCGIWPOWER */ NULL, /* -- hole -- */ NULL, /* -- hole -- */ - (iw_handler) prism54_set_genie, /* SIOCSIWGENIE */ - (iw_handler) prism54_get_genie, /* SIOCGIWGENIE */ - (iw_handler) prism54_set_auth, /* SIOCSIWAUTH */ - (iw_handler) prism54_get_auth, /* SIOCGIWAUTH */ - (iw_handler) prism54_set_encodeext, /* SIOCSIWENCODEEXT */ - (iw_handler) prism54_get_encodeext, /* SIOCGIWENCODEEXT */ + prism54_set_genie, /* SIOCSIWGENIE */ + prism54_get_genie, /* SIOCGIWGENIE */ + prism54_set_auth, /* SIOCSIWAUTH */ + prism54_get_auth, /* SIOCGIWAUTH */ + prism54_set_encodeext, /* SIOCSIWENCODEEXT */ + prism54_get_encodeext, /* SIOCGIWENCODEEXT */ NULL, /* SIOCSIWPMKSA */ }; @@ -2872,31 +2912,31 @@ static const struct iw_priv_args prism54_private_args[] = { }; static const iw_handler prism54_private_handler[] = { - (iw_handler) prism54_reset, - (iw_handler) prism54_get_policy, - (iw_handler) prism54_set_policy, - (iw_handler) prism54_get_mac, - (iw_handler) prism54_add_mac, - (iw_handler) NULL, - (iw_handler) prism54_del_mac, - (iw_handler) NULL, - (iw_handler) prism54_kick_mac, - (iw_handler) NULL, - (iw_handler) prism54_kick_all, - (iw_handler) prism54_get_wpa, - (iw_handler) prism54_set_wpa, - (iw_handler) NULL, - (iw_handler) prism54_debug_oid, - (iw_handler) prism54_debug_get_oid, - (iw_handler) prism54_debug_set_oid, - (iw_handler) prism54_get_oid, - (iw_handler) prism54_set_u32, - (iw_handler) NULL, - (iw_handler) prism54_set_raw, - (iw_handler) NULL, - (iw_handler) prism54_set_raw, - (iw_handler) prism54_get_prismhdr, - (iw_handler) prism54_set_prismhdr, + prism54_reset, + prism54_get_policy, + prism54_set_policy, + prism54_get_mac, + prism54_add_mac, + NULL, + prism54_del_mac, + NULL, + prism54_kick_mac, + NULL, + prism54_kick_all, + prism54_get_wpa, + prism54_set_wpa, + NULL, + prism54_debug_oid, + prism54_debug_get_oid, + prism54_debug_set_oid, + prism54_get_oid, + prism54_set_u32, + NULL, + prism54_set_raw, + NULL, + prism54_set_raw, + prism54_get_prismhdr, + prism54_set_prismhdr, }; const struct iw_handler_def prism54_handler_def = { diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index d3bad5779..198911396 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -3360,20 +3360,20 @@ static int __init init_mac80211_hwsim(void) if (channels < 1) return -EINVAL; - mac80211_hwsim_mchan_ops = mac80211_hwsim_ops; - mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan; - mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan; - mac80211_hwsim_mchan_ops.sw_scan_start = NULL; - mac80211_hwsim_mchan_ops.sw_scan_complete = NULL; - mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc; - mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc; - mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx; - mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx; - mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx; - mac80211_hwsim_mchan_ops.assign_vif_chanctx = - mac80211_hwsim_assign_vif_chanctx; - mac80211_hwsim_mchan_ops.unassign_vif_chanctx = - mac80211_hwsim_unassign_vif_chanctx; + pax_open_kernel(); + memcpy((void *)&mac80211_hwsim_mchan_ops, &mac80211_hwsim_ops, sizeof mac80211_hwsim_mchan_ops); + const_cast(mac80211_hwsim_mchan_ops.hw_scan) = mac80211_hwsim_hw_scan; + const_cast(mac80211_hwsim_mchan_ops.cancel_hw_scan) = mac80211_hwsim_cancel_hw_scan; + const_cast(mac80211_hwsim_mchan_ops.sw_scan_start) = NULL; + const_cast(mac80211_hwsim_mchan_ops.sw_scan_complete) = NULL; + const_cast(mac80211_hwsim_mchan_ops.remain_on_channel) = mac80211_hwsim_roc; + const_cast(mac80211_hwsim_mchan_ops.cancel_remain_on_channel) = mac80211_hwsim_croc; + const_cast(mac80211_hwsim_mchan_ops.add_chanctx) = mac80211_hwsim_add_chanctx; + const_cast(mac80211_hwsim_mchan_ops.remove_chanctx) = mac80211_hwsim_remove_chanctx; + const_cast(mac80211_hwsim_mchan_ops.change_chanctx) = mac80211_hwsim_change_chanctx; + const_cast(mac80211_hwsim_mchan_ops.assign_vif_chanctx) = mac80211_hwsim_assign_vif_chanctx; + const_cast(mac80211_hwsim_mchan_ops.unassign_vif_chanctx) = mac80211_hwsim_unassign_vif_chanctx; + pax_close_kernel(); spin_lock_init(&hwsim_radio_lock); diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 645f031e0..5122cca41 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -853,7 +853,7 @@ mwifiex_clone_skb_for_tx_status(struct mwifiex_private *priv, /* * CFG802.11 network device handler for data transmission. */ -static int +static netdev_tx_t mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); @@ -1539,14 +1539,10 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter, struct semaphore *sem, */ void mwifiex_do_flr(struct mwifiex_adapter *adapter, bool prepare) { - struct mwifiex_if_ops if_ops; - if (!prepare) { - mwifiex_reinit_sw(adapter, adapter->card_sem, &if_ops, + mwifiex_reinit_sw(adapter, adapter->card_sem, NULL, adapter->iface_type); } else { - memcpy(&if_ops, &adapter->if_ops, - sizeof(struct mwifiex_if_ops)); mwifiex_shutdown_sw(adapter, adapter->card_sem); } } diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c index 18fbb96a4..db73632c2 100644 --- a/drivers/net/wireless/marvell/mwifiex/util.c +++ b/drivers/net/wireless/marvell/mwifiex/util.c @@ -751,7 +751,7 @@ void mwifiex_hist_data_reset(struct mwifiex_private *priv) void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags) { struct sk_buff *skb; - int buf_len, pad; + long buf_len, pad; buf_len = rx_len + MWIFIEX_RX_HEADROOM + MWIFIEX_DMA_ALIGN_SZ; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c index 155f34398..5db43e7ae 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c @@ -54,7 +54,7 @@ rt2x00mmio_regbusy_read((__dev), RFCSR, RFCSR_BUSY, (__reg)) static void rt2400pci_bbp_write(struct rt2x00_dev *rt2x00dev, - const unsigned int word, const u8 value) + const unsigned int word, u8 value) { u32 reg; @@ -109,7 +109,7 @@ static void rt2400pci_bbp_read(struct rt2x00_dev *rt2x00dev, } static void rt2400pci_rf_write(struct rt2x00_dev *rt2x00dev, - const unsigned int word, const u32 value) + const unsigned int word, u32 value) { u32 reg; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c index 2553cdd74..6a60ef96f 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c @@ -54,7 +54,7 @@ rt2x00mmio_regbusy_read((__dev), RFCSR, RFCSR_BUSY, (__reg)) static void rt2500pci_bbp_write(struct rt2x00_dev *rt2x00dev, - const unsigned int word, const u8 value) + const unsigned int word, u8 value) { u32 reg; @@ -109,7 +109,7 @@ static void rt2500pci_bbp_read(struct rt2x00_dev *rt2x00dev, } static void rt2500pci_rf_write(struct rt2x00_dev *rt2x00dev, - const unsigned int word, const u32 value) + const unsigned int word, u32 value) { u32 reg; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c index 2d64611de..66754f4e0 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c @@ -142,7 +142,7 @@ static int rt2500usb_regbusy_read(struct rt2x00_dev *rt2x00dev, rt2500usb_regbusy_read((__dev), PHY_CSR10, PHY_CSR10_RF_BUSY, (__reg)) static void rt2500usb_bbp_write(struct rt2x00_dev *rt2x00dev, - const unsigned int word, const u8 value) + const unsigned int word, u8 value) { u16 reg; @@ -196,7 +196,7 @@ static void rt2500usb_bbp_read(struct rt2x00_dev *rt2x00dev, } static void rt2500usb_rf_write(struct rt2x00_dev *rt2x00dev, - const unsigned int word, const u32 value) + const unsigned int word, u32 value) { u16 reg; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index bf3f0a399..9d2a6d000 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -83,7 +83,7 @@ static inline bool rt2800_is_305x_soc(struct rt2x00_dev *rt2x00dev) } static void rt2800_bbp_write(struct rt2x00_dev *rt2x00dev, - const unsigned int word, const u8 value) + const unsigned int word, u8 value) { u32 reg; @@ -140,7 +140,7 @@ static void rt2800_bbp_read(struct rt2x00_dev *rt2x00dev, } static void rt2800_rfcsr_write(struct rt2x00_dev *rt2x00dev, - const unsigned int word, const u8 value) + const unsigned int word, u8 value) { u32 reg; @@ -195,7 +195,7 @@ static void rt2800_rfcsr_read(struct rt2x00_dev *rt2x00dev, } static void rt2800_rf_write(struct rt2x00_dev *rt2x00dev, - const unsigned int word, const u32 value) + const unsigned int word, u32 value) { u32 reg; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index f68d49212..38ba52d16 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -378,7 +378,7 @@ struct rt2x00_intf { * for hardware which doesn't support hardware * sequence counting. */ - atomic_t seqno; + atomic_unchecked_t seqno; }; static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c index 68b620b24..92ecd9ef7 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c @@ -224,9 +224,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev, * sequence counter given by mac80211. */ if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) - seqno = atomic_add_return(0x10, &intf->seqno); + seqno = atomic_add_return_unchecked(0x10, &intf->seqno); else - seqno = atomic_read(&intf->seqno); + seqno = atomic_read_unchecked(&intf->seqno); hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); hdr->seq_ctrl |= cpu_to_le16(seqno); diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c index 1c4226701..e80c54bed 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c @@ -63,7 +63,7 @@ MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); H2M_MAILBOX_CSR_OWNER, (__reg)) static void rt61pci_bbp_write(struct rt2x00_dev *rt2x00dev, - const unsigned int word, const u8 value) + const unsigned int word, u8 value) { u32 reg; @@ -118,7 +118,7 @@ static void rt61pci_bbp_read(struct rt2x00_dev *rt2x00dev, } static void rt61pci_rf_write(struct rt2x00_dev *rt2x00dev, - const unsigned int word, const u32 value) + const unsigned int word, u32 value) { u32 reg; diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c index 903cc6f67..d2029ec11 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c @@ -61,7 +61,7 @@ MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); rt2x00usb_regbusy_read((__dev), PHY_CSR4, PHY_CSR4_BUSY, (__reg)) static void rt73usb_bbp_write(struct rt2x00_dev *rt2x00dev, - const unsigned int word, const u8 value) + const unsigned int word, u8 value) { u32 reg; @@ -116,7 +116,7 @@ static void rt73usb_bbp_read(struct rt2x00_dev *rt2x00dev, } static void rt73usb_rf_write(struct rt2x00_dev *rt2x00dev, - const unsigned int word, const u32 value) + const unsigned int word, u32 value) { u32 reg; diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 4ac928bf1..9832ba83b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -467,15 +467,15 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw) rtlpriv->works.hw = hw; rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name); INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq, - (void *)rtl_watchdog_wq_callback); + rtl_watchdog_wq_callback); INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq, - (void *)rtl_ips_nic_off_wq_callback); + rtl_ips_nic_off_wq_callback); INIT_DELAYED_WORK(&rtlpriv->works.ps_work, - (void *)rtl_swlps_wq_callback); + rtl_swlps_wq_callback); INIT_DELAYED_WORK(&rtlpriv->works.ps_rfon_wq, - (void *)rtl_swlps_rfon_wq_callback); + rtl_swlps_rfon_wq_callback); INIT_DELAYED_WORK(&rtlpriv->works.fwevt_wq, - (void *)rtl_fwevt_wq_callback); + rtl_fwevt_wq_callback); } @@ -1559,7 +1559,7 @@ void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb) } EXPORT_SYMBOL_GPL(rtl_beacon_statistic); -void rtl_watchdog_wq_callback(void *data) +void rtl_watchdog_wq_callback(struct work_struct *data) { struct rtl_works *rtlworks = container_of_dwork_rtl(data, struct rtl_works, @@ -1722,7 +1722,7 @@ void rtl_watch_dog_timer_callback(unsigned long data) mod_timer(&rtlpriv->works.watchdog_timer, jiffies + MSECS(RTL_WATCH_DOG_TIME)); } -void rtl_fwevt_wq_callback(void *data) +void rtl_fwevt_wq_callback(struct work_struct *data) { struct rtl_works *rtlworks = container_of_dwork_rtl(data, struct rtl_works, fwevt_wq); diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h index 74233d601..482e49556 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.h +++ b/drivers/net/wireless/realtek/rtlwifi/base.h @@ -134,8 +134,8 @@ int rtl_rx_agg_start(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u16 tid); int rtl_rx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u16 tid); -void rtl_watchdog_wq_callback(void *data); -void rtl_fwevt_wq_callback(void *data); +void rtl_watchdog_wq_callback(struct work_struct *data); +void rtl_fwevt_wq_callback(struct work_struct *data); void rtl_get_tcb_desc(struct ieee80211_hw *hw, struct ieee80211_tx_info *info, diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index 5be4fc960..05d33701c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -1095,13 +1095,16 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id) return ret; } -static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw) +static void _rtl_pci_irq_tasklet(unsigned long _hw) { + struct ieee80211_hw *hw = (struct ieee80211_hw *)_hw; + _rtl_pci_tx_chk_waitq(hw); } -static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw) +static void _rtl_pci_prepare_bcn_tasklet(unsigned long _hw) { + struct ieee80211_hw *hw = (struct ieee80211_hw *)_hw; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); @@ -1222,12 +1225,8 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw, rtlpci->acm_method = EACMWAY2_SW; /*task */ - tasklet_init(&rtlpriv->works.irq_tasklet, - (void (*)(unsigned long))_rtl_pci_irq_tasklet, - (unsigned long)hw); - tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet, - (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet, - (unsigned long)hw); + tasklet_init(&rtlpriv->works.irq_tasklet, _rtl_pci_irq_tasklet, (unsigned long)hw); + tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet, _rtl_pci_prepare_bcn_tasklet, (unsigned long)hw); INIT_WORK(&rtlpriv->works.lps_change_work, rtl_lps_change_work_callback); } diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c index d0ffc4d50..5f197b42c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/ps.c +++ b/drivers/net/wireless/realtek/rtlwifi/ps.c @@ -198,7 +198,7 @@ static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw) ppsc->swrf_processing = false; } -void rtl_ips_nic_off_wq_callback(void *data) +void rtl_ips_nic_off_wq_callback(struct work_struct *data) { struct rtl_works *rtlworks = container_of_dwork_rtl(data, struct rtl_works, ips_nic_off_wq); @@ -582,7 +582,7 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw) spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag); } -void rtl_swlps_rfon_wq_callback(void *data) +void rtl_swlps_rfon_wq_callback(struct work_struct *data) { struct rtl_works *rtlworks = container_of_dwork_rtl(data, struct rtl_works, ps_rfon_wq); @@ -696,7 +696,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw) } EXPORT_SYMBOL_GPL(rtl_lps_leave); -void rtl_swlps_wq_callback(void *data) +void rtl_swlps_wq_callback(struct work_struct *data) { struct rtl_works *rtlworks = container_of_dwork_rtl(data, struct rtl_works, diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.h b/drivers/net/wireless/realtek/rtlwifi/ps.h index 0df2b5203..0607d337a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/ps.h +++ b/drivers/net/wireless/realtek/rtlwifi/ps.h @@ -32,15 +32,15 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw); bool rtl_ps_disable_nic(struct ieee80211_hw *hw); void rtl_ips_nic_off(struct ieee80211_hw *hw); void rtl_ips_nic_on(struct ieee80211_hw *hw); -void rtl_ips_nic_off_wq_callback(void *data); +void rtl_ips_nic_off_wq_callback(struct work_struct *data); void rtl_lps_enter(struct ieee80211_hw *hw); void rtl_lps_leave(struct ieee80211_hw *hw); void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode); void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len); -void rtl_swlps_wq_callback(void *data); -void rtl_swlps_rfon_wq_callback(void *data); +void rtl_swlps_wq_callback(struct work_struct *data); +void rtl_swlps_rfon_wq_callback(struct work_struct *data); void rtl_swlps_rf_awake(struct ieee80211_hw *hw); void rtl_swlps_rf_sleep(struct ieee80211_hw *hw); void rtl_p2p_ps_cmd(struct ieee80211_hw *hw , u8 p2p_ps_state); diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c index b661f896e..ebea675a3 100644 --- a/drivers/net/wireless/ti/wl1251/sdio.c +++ b/drivers/net/wireless/ti/wl1251/sdio.c @@ -282,13 +282,17 @@ static int wl1251_sdio_probe(struct sdio_func *func, irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); - wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq; - wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq; + pax_open_kernel(); + const_cast(wl1251_sdio_ops.enable_irq) = wl1251_enable_line_irq; + const_cast(wl1251_sdio_ops.disable_irq) = wl1251_disable_line_irq; + pax_close_kernel(); wl1251_info("using dedicated interrupt line"); } else { - wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq; - wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq; + pax_open_kernel(); + const_cast(wl1251_sdio_ops.enable_irq) = wl1251_sdio_enable_irq; + const_cast(wl1251_sdio_ops.disable_irq) = wl1251_sdio_disable_irq; + pax_close_kernel(); wl1251_info("using SDIO interrupt"); } diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c index 29f13f890..adfe3cf80 100644 --- a/drivers/net/wireless/ti/wl12xx/main.c +++ b/drivers/net/wireless/ti/wl12xx/main.c @@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl) sizeof(wl->conf.mem)); /* read data preparation is only needed by wl127x */ - wl->ops->prepare_read = wl127x_prepare_read; + pax_open_kernel(); + const_cast(wl->ops->prepare_read) = wl127x_prepare_read; + pax_close_kernel(); wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER, @@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl) sizeof(wl->conf.mem)); /* read data preparation is only needed by wl127x */ - wl->ops->prepare_read = wl127x_prepare_read; + pax_open_kernel(); + const_cast(wl->ops->prepare_read) = wl127x_prepare_read; + pax_close_kernel(); wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER, diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c index 7b367f70d..06f47e058 100644 --- a/drivers/net/wireless/ti/wl18xx/main.c +++ b/drivers/net/wireless/ti/wl18xx/main.c @@ -2033,8 +2033,10 @@ static int wl18xx_setup(struct wl1271 *wl) } if (!checksum_param) { - wl18xx_ops.set_rx_csum = NULL; - wl18xx_ops.init_vif = NULL; + pax_open_kernel(); + const_cast(wl18xx_ops.set_rx_csum) = NULL; + const_cast(wl18xx_ops.init_vif) = NULL; + pax_close_kernel(); } /* Enable 11a Band only if we have 5G antennas */ diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c index 799f70f2d..0c4b5d173 100644 --- a/drivers/net/wireless/zydas/zd1201.c +++ b/drivers/net/wireless/zydas/zd1201.c @@ -890,7 +890,7 @@ static void zd1201_set_multicast(struct net_device *dev) } static int zd1201_config_commit(struct net_device *dev, - struct iw_request_info *info, struct iw_point *data, char *essid) + struct iw_request_info *info, union iwreq_data *data, char *essid) { struct zd1201 *zd = netdev_priv(dev); @@ -898,15 +898,18 @@ static int zd1201_config_commit(struct net_device *dev, } static int zd1201_get_name(struct net_device *dev, - struct iw_request_info *info, char *name, char *extra) + struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { + char *name = wrqu->name; + strcpy(name, "IEEE 802.11b"); return 0; } static int zd1201_set_freq(struct net_device *dev, - struct iw_request_info *info, struct iw_freq *freq, char *extra) + struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { + struct iw_freq *freq = &wrqu->freq; struct zd1201 *zd = netdev_priv(dev); short channel = 0; int err; @@ -926,8 +929,9 @@ static int zd1201_set_freq(struct net_device *dev, } static int zd1201_get_freq(struct net_device *dev, - struct iw_request_info *info, struct iw_freq *freq, char *extra) + struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { + struct iw_freq *freq = &wrqu->freq; struct zd1201 *zd = netdev_priv(dev); short channel; int err; @@ -942,8 +946,9 @@ static int zd1201_get_freq(struct net_device *dev, } static int zd1201_set_mode(struct net_device *dev, - struct iw_request_info *info, __u32 *mode, char *extra) + struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { + __u32 *mode = &wrqu->mode; struct zd1201 *zd = netdev_priv(dev); short porttype, monitor = 0; unsigned char buffer[IW_ESSID_MAX_SIZE+2]; @@ -1004,8 +1009,9 @@ static int zd1201_set_mode(struct net_device *dev, } static int zd1201_get_mode(struct net_device *dev, - struct iw_request_info *info, __u32 *mode, char *extra) + struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { + __u32 *mode = &wrqu->mode; struct zd1201 *zd = netdev_priv(dev); short porttype; int err; @@ -1041,8 +1047,9 @@ static int zd1201_get_mode(struct net_device *dev, } static int zd1201_get_range(struct net_device *dev, - struct iw_request_info *info, struct iw_point *wrq, char *extra) + struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { + struct iw_point *wrq = &wrqu->data; struct iw_range *range = (struct iw_range *)extra; wrq->length = sizeof(struct iw_range); @@ -1080,8 +1087,9 @@ static int zd1201_get_range(struct net_device *dev, * the stats after asking the bssid. */ static int zd1201_get_wap(struct net_device *dev, - struct iw_request_info *info, struct sockaddr *ap_addr, char *extra) + struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { + struct sockaddr *ap_addr = &wrqu->ap_addr; struct zd1201 *zd = netdev_priv(dev); unsigned char buffer[6]; @@ -1101,15 +1109,16 @@ static int zd1201_get_wap(struct net_device *dev, } static int zd1201_set_scan(struct net_device *dev, - struct iw_request_info *info, struct iw_point *srq, char *extra) + struct iw_request_info *info, union iwreq_data *srq, char *extra) { /* We do everything in get_scan */ return 0; } static int zd1201_get_scan(struct net_device *dev, - struct iw_request_info *info, struct iw_point *srq, char *extra) + struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { + struct iw_point *srq = &wrqu->data; struct zd1201 *zd = netdev_priv(dev); int err, i, j, enabled_save; struct iw_event iwe; @@ -1200,8 +1209,9 @@ static int zd1201_get_scan(struct net_device *dev, } static int zd1201_set_essid(struct net_device *dev, - struct iw_request_info *info, struct iw_point *data, char *essid) + struct iw_request_info *info, union iwreq_data *wrqu, char *essid) { + struct iw_point *data = &wrqu->essid; struct zd1201 *zd = netdev_priv(dev); if (data->length > IW_ESSID_MAX_SIZE) @@ -1215,8 +1225,9 @@ static int zd1201_set_essid(struct net_device *dev, } static int zd1201_get_essid(struct net_device *dev, - struct iw_request_info *info, struct iw_point *data, char *essid) + struct iw_request_info *info, union iwreq_data *wrqu, char *essid) { + struct iw_point *data = &wrqu->essid; struct zd1201 *zd = netdev_priv(dev); memcpy(essid, zd->essid, zd->essidlen); @@ -1227,8 +1238,10 @@ static int zd1201_get_essid(struct net_device *dev, } static int zd1201_get_nick(struct net_device *dev, struct iw_request_info *info, - struct iw_point *data, char *nick) + union iwreq_data *wrqu, char *nick) { + struct iw_point *data = &wrqu->data; + strcpy(nick, "zd1201"); data->flags = 1; data->length = strlen(nick); @@ -1236,8 +1249,9 @@ static int zd1201_get_nick(struct net_device *dev, struct iw_request_info *info, } static int zd1201_set_rate(struct net_device *dev, - struct iw_request_info *info, struct iw_param *rrq, char *extra) + struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { + struct iw_param *rrq = &wrqu->bitrate; struct zd1201 *zd = netdev_priv(dev); short rate; int err; @@ -1269,8 +1283,9 @@ static int zd1201_set_rate(struct net_device *dev, } static int zd1201_get_rate(struct net_device *dev, - struct iw_request_info *info, struct iw_param *rrq, char *extra) + struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { + struct iw_param *rrq = &wrqu->bitrate; struct zd1201 *zd = netdev_priv(dev); short rate; int err; @@ -1302,8 +1317,9 @@ static int zd1201_get_rate(struct net_device *dev, } static int zd1201_set_rts(struct net_device *dev, struct iw_request_info *info, - struct iw_param *rts, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_param *rts = &wrqu->rts; struct zd1201 *zd = netdev_priv(dev); int err; short val = rts->value; @@ -1322,8 +1338,9 @@ static int zd1201_set_rts(struct net_device *dev, struct iw_request_info *info, } static int zd1201_get_rts(struct net_device *dev, struct iw_request_info *info, - struct iw_param *rts, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_param *rts = &wrqu->rts; struct zd1201 *zd = netdev_priv(dev); short rtst; int err; @@ -1339,8 +1356,9 @@ static int zd1201_get_rts(struct net_device *dev, struct iw_request_info *info, } static int zd1201_set_frag(struct net_device *dev, struct iw_request_info *info, - struct iw_param *frag, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_param *frag = &wrqu->frag; struct zd1201 *zd = netdev_priv(dev); int err; short val = frag->value; @@ -1360,8 +1378,9 @@ static int zd1201_set_frag(struct net_device *dev, struct iw_request_info *info, } static int zd1201_get_frag(struct net_device *dev, struct iw_request_info *info, - struct iw_param *frag, char *extra) + union iwreq_data *wrqu, char *extra) { + struct iw_param *frag = &wrqu->frag; struct zd1201 *zd = netdev_priv(dev); short fragt; int err; @@ -1377,20 +1396,21 @@ static int zd1201_get_frag(struct net_device *dev, struct iw_request_info *info, } static int zd1201_set_retry(struct net_device *dev, - struct iw_request_info *info, struct iw_param *rrq, char *extra) + struct iw_request_info *info, union iwreq_data *rrq, char *extra) { return 0; } static int zd1201_get_retry(struct net_device *dev, - struct iw_request_info *info, struct iw_param *rrq, char *extra) + struct iw_request_info *info, union iwreq_data *rrq, char *extra) { return 0; } static int zd1201_set_encode(struct net_device *dev, - struct iw_request_info *info, struct iw_point *erq, char *key) + struct iw_request_info *info, union iwreq_data *wrqu, char *key) { + struct iw_point *erq = &wrqu->encoding; struct zd1201 *zd = netdev_priv(dev); short i; int err, rid; @@ -1446,8 +1466,9 @@ static int zd1201_set_encode(struct net_device *dev, } static int zd1201_get_encode(struct net_device *dev, - struct iw_request_info *info, struct iw_point *erq, char *key) + struct iw_request_info *info, union iwreq_data *wrqu, char *key) { + struct iw_point *erq = &wrqu->encoding; struct zd1201 *zd = netdev_priv(dev); short i; int err; @@ -1479,8 +1500,9 @@ static int zd1201_get_encode(struct net_device *dev, } static int zd1201_set_power(struct net_device *dev, - struct iw_request_info *info, struct iw_param *vwrq, char *extra) + struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->power; struct zd1201 *zd = netdev_priv(dev); short enabled, duration, level; int err; @@ -1518,8 +1540,9 @@ static int zd1201_set_power(struct net_device *dev, } static int zd1201_get_power(struct net_device *dev, - struct iw_request_info *info, struct iw_param *vwrq, char *extra) + struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { + struct iw_param *vwrq = &wrqu->power; struct zd1201 *zd = netdev_priv(dev); short enabled, level, duration; int err; @@ -1556,57 +1579,58 @@ static int zd1201_get_power(struct net_device *dev, static const iw_handler zd1201_iw_handler[] = { - (iw_handler) zd1201_config_commit, /* SIOCSIWCOMMIT */ - (iw_handler) zd1201_get_name, /* SIOCGIWNAME */ - (iw_handler) NULL, /* SIOCSIWNWID */ - (iw_handler) NULL, /* SIOCGIWNWID */ - (iw_handler) zd1201_set_freq, /* SIOCSIWFREQ */ - (iw_handler) zd1201_get_freq, /* SIOCGIWFREQ */ - (iw_handler) zd1201_set_mode, /* SIOCSIWMODE */ - (iw_handler) zd1201_get_mode, /* SIOCGIWMODE */ - (iw_handler) NULL, /* SIOCSIWSENS */ - (iw_handler) NULL, /* SIOCGIWSENS */ - (iw_handler) NULL, /* SIOCSIWRANGE */ - (iw_handler) zd1201_get_range, /* SIOCGIWRANGE */ - (iw_handler) NULL, /* SIOCSIWPRIV */ - (iw_handler) NULL, /* SIOCGIWPRIV */ - (iw_handler) NULL, /* SIOCSIWSTATS */ - (iw_handler) NULL, /* SIOCGIWSTATS */ - (iw_handler) NULL, /* SIOCSIWSPY */ - (iw_handler) NULL, /* SIOCGIWSPY */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) NULL/*zd1201_set_wap*/, /* SIOCSIWAP */ - (iw_handler) zd1201_get_wap, /* SIOCGIWAP */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) NULL, /* SIOCGIWAPLIST */ - (iw_handler) zd1201_set_scan, /* SIOCSIWSCAN */ - (iw_handler) zd1201_get_scan, /* SIOCGIWSCAN */ - (iw_handler) zd1201_set_essid, /* SIOCSIWESSID */ - (iw_handler) zd1201_get_essid, /* SIOCGIWESSID */ - (iw_handler) NULL, /* SIOCSIWNICKN */ - (iw_handler) zd1201_get_nick, /* SIOCGIWNICKN */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) zd1201_set_rate, /* SIOCSIWRATE */ - (iw_handler) zd1201_get_rate, /* SIOCGIWRATE */ - (iw_handler) zd1201_set_rts, /* SIOCSIWRTS */ - (iw_handler) zd1201_get_rts, /* SIOCGIWRTS */ - (iw_handler) zd1201_set_frag, /* SIOCSIWFRAG */ - (iw_handler) zd1201_get_frag, /* SIOCGIWFRAG */ - (iw_handler) NULL, /* SIOCSIWTXPOW */ - (iw_handler) NULL, /* SIOCGIWTXPOW */ - (iw_handler) zd1201_set_retry, /* SIOCSIWRETRY */ - (iw_handler) zd1201_get_retry, /* SIOCGIWRETRY */ - (iw_handler) zd1201_set_encode, /* SIOCSIWENCODE */ - (iw_handler) zd1201_get_encode, /* SIOCGIWENCODE */ - (iw_handler) zd1201_set_power, /* SIOCSIWPOWER */ - (iw_handler) zd1201_get_power, /* SIOCGIWPOWER */ + zd1201_config_commit, /* SIOCSIWCOMMIT */ + zd1201_get_name, /* SIOCGIWNAME */ + NULL, /* SIOCSIWNWID */ + NULL, /* SIOCGIWNWID */ + zd1201_set_freq, /* SIOCSIWFREQ */ + zd1201_get_freq, /* SIOCGIWFREQ */ + zd1201_set_mode, /* SIOCSIWMODE */ + zd1201_get_mode, /* SIOCGIWMODE */ + NULL, /* SIOCSIWSENS */ + NULL, /* SIOCGIWSENS */ + NULL, /* SIOCSIWRANGE */ + zd1201_get_range, /* SIOCGIWRANGE */ + NULL, /* SIOCSIWPRIV */ + NULL, /* SIOCGIWPRIV */ + NULL, /* SIOCSIWSTATS */ + NULL, /* SIOCGIWSTATS */ + NULL, /* SIOCSIWSPY */ + NULL, /* SIOCGIWSPY */ + NULL, /* -- hole -- */ + NULL, /* -- hole -- */ + NULL/*zd1201_set_wap*/, /* SIOCSIWAP */ + zd1201_get_wap, /* SIOCGIWAP */ + NULL, /* -- hole -- */ + NULL, /* SIOCGIWAPLIST */ + zd1201_set_scan, /* SIOCSIWSCAN */ + zd1201_get_scan, /* SIOCGIWSCAN */ + zd1201_set_essid, /* SIOCSIWESSID */ + zd1201_get_essid, /* SIOCGIWESSID */ + NULL, /* SIOCSIWNICKN */ + zd1201_get_nick, /* SIOCGIWNICKN */ + NULL, /* -- hole -- */ + NULL, /* -- hole -- */ + zd1201_set_rate, /* SIOCSIWRATE */ + zd1201_get_rate, /* SIOCGIWRATE */ + zd1201_set_rts, /* SIOCSIWRTS */ + zd1201_get_rts, /* SIOCGIWRTS */ + zd1201_set_frag, /* SIOCSIWFRAG */ + zd1201_get_frag, /* SIOCGIWFRAG */ + NULL, /* SIOCSIWTXPOW */ + NULL, /* SIOCGIWTXPOW */ + zd1201_set_retry, /* SIOCSIWRETRY */ + zd1201_get_retry, /* SIOCGIWRETRY */ + zd1201_set_encode, /* SIOCSIWENCODE */ + zd1201_get_encode, /* SIOCGIWENCODE */ + zd1201_set_power, /* SIOCSIWPOWER */ + zd1201_get_power, /* SIOCGIWPOWER */ }; static int zd1201_set_hostauth(struct net_device *dev, - struct iw_request_info *info, struct iw_param *rrq, char *extra) + struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { + struct iw_param *rrq = &wrqu->param; struct zd1201 *zd = netdev_priv(dev); if (!zd->ap) @@ -1616,8 +1640,9 @@ static int zd1201_set_hostauth(struct net_device *dev, } static int zd1201_get_hostauth(struct net_device *dev, - struct iw_request_info *info, struct iw_param *rrq, char *extra) + struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { + struct iw_param *rrq = &wrqu->param; struct zd1201 *zd = netdev_priv(dev); short hostauth; int err; @@ -1635,8 +1660,9 @@ static int zd1201_get_hostauth(struct net_device *dev, } static int zd1201_auth_sta(struct net_device *dev, - struct iw_request_info *info, struct sockaddr *sta, char *extra) + struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { + struct sockaddr *sta = &wrqu->addr; struct zd1201 *zd = netdev_priv(dev); unsigned char buffer[10]; @@ -1651,8 +1677,9 @@ static int zd1201_auth_sta(struct net_device *dev, } static int zd1201_set_maxassoc(struct net_device *dev, - struct iw_request_info *info, struct iw_param *rrq, char *extra) + struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { + struct iw_param *rrq = &wrqu->param; struct zd1201 *zd = netdev_priv(dev); int err; @@ -1666,8 +1693,9 @@ static int zd1201_set_maxassoc(struct net_device *dev, } static int zd1201_get_maxassoc(struct net_device *dev, - struct iw_request_info *info, struct iw_param *rrq, char *extra) + struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { + struct iw_param *rrq = &wrqu->param; struct zd1201 *zd = netdev_priv(dev); short maxassoc; int err; @@ -1685,12 +1713,12 @@ static int zd1201_get_maxassoc(struct net_device *dev, } static const iw_handler zd1201_private_handler[] = { - (iw_handler) zd1201_set_hostauth, /* ZD1201SIWHOSTAUTH */ - (iw_handler) zd1201_get_hostauth, /* ZD1201GIWHOSTAUTH */ - (iw_handler) zd1201_auth_sta, /* ZD1201SIWAUTHSTA */ - (iw_handler) NULL, /* nothing to get */ - (iw_handler) zd1201_set_maxassoc, /* ZD1201SIMAXASSOC */ - (iw_handler) zd1201_get_maxassoc, /* ZD1201GIMAXASSOC */ + zd1201_set_hostauth, /* ZD1201SIWHOSTAUTH */ + zd1201_get_hostauth, /* ZD1201GIWHOSTAUTH */ + zd1201_auth_sta, /* ZD1201SIWAUTHSTA */ + NULL, /* nothing to get */ + zd1201_set_maxassoc, /* ZD1201SIMAXASSOC */ + zd1201_get_maxassoc, /* ZD1201GIMAXASSOC */ }; static const struct iw_priv_args zd1201_private_args[] = { diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c index f176adb1b..037a8febc 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c @@ -380,7 +380,7 @@ static inline void handle_regs_int(struct urb *urb) { struct zd_usb *usb = urb->context; struct zd_usb_interrupt *intr = &usb->intr; - int len; + unsigned int len; u16 int_num; ZD_ASSERT(in_interrupt()); diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 74dc2bf71..e942c3b1d 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -160,7 +160,7 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, return vif->hash.mapping[skb_get_hash_raw(skb) % size]; } -static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct xenvif *vif = netdev_priv(dev); struct xenvif_queue *queue = NULL; diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 0cdcb2169..72354a5d2 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -550,7 +550,7 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb, #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1) -static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats); diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c index 435861189..e0cbb1a83 100644 --- a/drivers/ntb/test/ntb_pingpong.c +++ b/drivers/ntb/test/ntb_pingpong.c @@ -99,7 +99,7 @@ struct pp_ctx { unsigned long db_delay; struct dentry *debugfs_node_dir; struct dentry *debugfs_count; - atomic_t count; + atomic_unchecked_t count; }; static struct dentry *pp_debugfs_dir; @@ -177,7 +177,7 @@ static void pp_db_event(void *ctx, int vec) dev_dbg(&pp->ntb->dev, "Pong vec %d bits %#llx\n", vec, db_bits); - atomic_inc(&pp->count); + atomic_inc_unchecked(&pp->count); } spin_unlock_irqrestore(&pp->db_lock, irqflags); } @@ -194,7 +194,7 @@ static int pp_debugfs_setup(struct pp_ctx *pp) if (!pp->debugfs_node_dir) return -ENODEV; - pp->debugfs_count = debugfs_create_atomic_t("count", S_IRUSR | S_IWUSR, + pp->debugfs_count = debugfs_create_atomic_unchecked_t("count", S_IRUSR | S_IWUSR, pp->debugfs_node_dir, &pp->count); if (!pp->debugfs_count) @@ -238,7 +238,7 @@ static int pp_probe(struct ntb_client *client, pp->ntb = ntb; pp->db_bits = 0; - atomic_set(&pp->count, 0); + atomic_set_unchecked(&pp->count, 0); spin_lock_init(&pp->db_lock); setup_timer(&pp->db_timer, pp_ping, (unsigned long)pp); pp->db_delay = msecs_to_jiffies(delay_ms); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 5e52034ab..a57342ea5 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2038,7 +2038,7 @@ static int nvme_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume); static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { struct nvme_dev *dev = pci_get_drvdata(pdev); diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index c89d5d231..bcd894691 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -1295,7 +1295,9 @@ static int __init of_fdt_raw_init(void) pr_warn("not creating '/sys/firmware/fdt': CRC check failed\n"); return 0; } - of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params); + pax_open_kernel(); + const_cast(of_fdt_raw_attr.size) = fdt_totalsize(initial_boot_params); + pax_close_kernel(); return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr); } late_initcall(of_fdt_raw_init); diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c index 82f7000a2..d6d044744 100644 --- a/drivers/oprofile/buffer_sync.c +++ b/drivers/oprofile/buffer_sync.c @@ -345,7 +345,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm) if (cookie == NO_COOKIE) offset = pc; if (cookie == INVALID_COOKIE) { - atomic_inc(&oprofile_stats.sample_lost_no_mapping); + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping); offset = pc; } if (cookie != last_cookie) { @@ -389,14 +389,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel) /* add userspace sample */ if (!mm) { - atomic_inc(&oprofile_stats.sample_lost_no_mm); + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm); return 0; } cookie = lookup_dcookie(mm, s->eip, &offset); if (cookie == INVALID_COOKIE) { - atomic_inc(&oprofile_stats.sample_lost_no_mapping); + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping); return 0; } @@ -554,7 +554,7 @@ void sync_buffer(int cpu) /* ignore backtraces if failed to add a sample */ if (state == sb_bt_start) { state = sb_bt_ignore; - atomic_inc(&oprofile_stats.bt_lost_no_mapping); + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping); } } release_mm(mm); diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c index c0cc4e7ff..44d4e5485 100644 --- a/drivers/oprofile/event_buffer.c +++ b/drivers/oprofile/event_buffer.c @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value) } if (buffer_pos == buffer_size) { - atomic_inc(&oprofile_stats.event_lost_overflow); + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow); return; } diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c index ed2c3ec07..deda85a09 100644 --- a/drivers/oprofile/oprof.c +++ b/drivers/oprofile/oprof.c @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work) if (oprofile_ops.switch_events()) return; - atomic_inc(&oprofile_stats.multiplex_counter); + atomic_inc_unchecked(&oprofile_stats.multiplex_counter); start_switch_worker(); } diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c index 59659cea4..6c860a009 100644 --- a/drivers/oprofile/oprofile_stats.c +++ b/drivers/oprofile/oprofile_stats.c @@ -30,11 +30,11 @@ void oprofile_reset_stats(void) cpu_buf->sample_invalid_eip = 0; } - atomic_set(&oprofile_stats.sample_lost_no_mm, 0); - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0); - atomic_set(&oprofile_stats.event_lost_overflow, 0); - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0); - atomic_set(&oprofile_stats.multiplex_counter, 0); + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0); + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0); + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0); + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0); + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0); } diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h index 1fc622bd1..8c48fc31c 100644 --- a/drivers/oprofile/oprofile_stats.h +++ b/drivers/oprofile/oprofile_stats.h @@ -13,11 +13,11 @@ #include struct oprofile_stat_struct { - atomic_t sample_lost_no_mm; - atomic_t sample_lost_no_mapping; - atomic_t bt_lost_no_mapping; - atomic_t event_lost_overflow; - atomic_t multiplex_counter; + atomic_unchecked_t sample_lost_no_mm; + atomic_unchecked_t sample_lost_no_mapping; + atomic_unchecked_t bt_lost_no_mapping; + atomic_unchecked_t event_lost_overflow; + atomic_unchecked_t multiplex_counter; }; extern struct oprofile_stat_struct oprofile_stats; diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c index 134398e02..266e72485 100644 --- a/drivers/oprofile/oprofilefs.c +++ b/drivers/oprofile/oprofilefs.c @@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root, static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset) { - atomic_t *val = file->private_data; - return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset); + atomic_unchecked_t *val = file->private_data; + return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset); } @@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = { int oprofilefs_create_ro_atomic(struct dentry *root, - char const *name, atomic_t *val) + char const *name, atomic_unchecked_t *val) { return __oprofilefs_create_file(root, name, &atomic_ro_fops, 0444, val); diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c index 74ed3e459..3e74a1c40 100644 --- a/drivers/parport/procfs.c +++ b/drivers/parport/procfs.c @@ -65,7 +65,7 @@ static int do_active_device(struct ctl_table *table, int write, *ppos += len; - return copy_to_user(result, buffer, len) ? -EFAULT : 0; + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0; } #ifdef CONFIG_PARPORT_1284 @@ -107,7 +107,7 @@ static int do_autoprobe(struct ctl_table *table, int write, *ppos += len; - return copy_to_user (result, buffer, len) ? -EFAULT : 0; + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0; } #endif /* IEEE1284.3 support. */ diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c index 37e29b580..616d5208f 100644 --- a/drivers/pci/host/vmd.c +++ b/drivers/pci/host/vmd.c @@ -396,7 +396,7 @@ static void vmd_teardown_dma_ops(struct vmd_dev *vmd) #define ASSIGN_VMD_DMA_OPS(source, dest, fn) \ do { \ if (source->fn) \ - dest->fn = vmd_##fn; \ + const_cast(dest->fn) = vmd_##fn;\ } while (0) static void vmd_setup_dma_ops(struct vmd_dev *vmd) @@ -410,6 +410,8 @@ static void vmd_setup_dma_ops(struct vmd_dev *vmd) if (!source) return; + + pax_open_kernel(); ASSIGN_VMD_DMA_OPS(source, dest, alloc); ASSIGN_VMD_DMA_OPS(source, dest, free); ASSIGN_VMD_DMA_OPS(source, dest, mmap); @@ -427,6 +429,8 @@ static void vmd_setup_dma_ops(struct vmd_dev *vmd) #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask); #endif + pax_close_kernel(); + add_dma_domain(domain); } #undef ASSIGN_VMD_DMA_OPS diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c index f6221d739..80121aed5 100644 --- a/drivers/pci/hotplug/acpiphp_ibm.c +++ b/drivers/pci/hotplug/acpiphp_ibm.c @@ -465,7 +465,9 @@ static int __init ibm_acpiphp_init(void) goto init_cleanup; } - ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL); + pax_open_kernel(); + const_cast(ibm_apci_table_attr.size) = ibm_get_table_from_acpi(NULL); + pax_close_kernel(); retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr); return retval; diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c index 88a44a707..de358ce27 100644 --- a/drivers/pci/hotplug/cpcihp_generic.c +++ b/drivers/pci/hotplug/cpcihp_generic.c @@ -73,7 +73,6 @@ static u16 port; static unsigned int enum_bit; static u8 enum_mask; -static struct cpci_hp_controller_ops generic_hpc_ops; static struct cpci_hp_controller generic_hpc; static int __init validate_parameters(void) @@ -139,6 +138,10 @@ static int query_enum(void) return ((value & enum_mask) == enum_mask); } +static struct cpci_hp_controller_ops generic_hpc_ops = { + .query_enum = query_enum, +}; + static int __init cpcihp_generic_init(void) { int status; @@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void) pci_dev_put(dev); memset(&generic_hpc, 0, sizeof(struct cpci_hp_controller)); - generic_hpc_ops.query_enum = query_enum; generic_hpc.ops = &generic_hpc_ops; status = cpci_hp_register_controller(&generic_hpc); diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c index 5f49c3fd7..438f01901 100644 --- a/drivers/pci/hotplug/cpcihp_zt5550.c +++ b/drivers/pci/hotplug/cpcihp_zt5550.c @@ -59,7 +59,6 @@ /* local variables */ static bool debug; static bool poll; -static struct cpci_hp_controller_ops zt5550_hpc_ops; static struct cpci_hp_controller zt5550_hpc; /* Primary cPCI bus bridge device */ @@ -204,6 +203,10 @@ static int zt5550_hc_disable_irq(void) return 0; } +static struct cpci_hp_controller_ops zt5550_hpc_ops = { + .query_enum = zt5550_hc_query_enum, +}; + static int zt5550_hc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int status; @@ -215,16 +218,17 @@ static int zt5550_hc_init_one(struct pci_dev *pdev, const struct pci_device_id * dbg("returned from zt5550_hc_config"); memset(&zt5550_hpc, 0, sizeof(struct cpci_hp_controller)); - zt5550_hpc_ops.query_enum = zt5550_hc_query_enum; zt5550_hpc.ops = &zt5550_hpc_ops; if (!poll) { zt5550_hpc.irq = hc_dev->irq; zt5550_hpc.irq_flags = IRQF_SHARED; zt5550_hpc.dev_id = hc_dev; - zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq; - zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq; - zt5550_hpc_ops.check_irq = zt5550_hc_check_irq; + pax_open_kernel(); + const_cast(zt5550_hpc_ops.enable_irq) = zt5550_hc_enable_irq; + const_cast(zt5550_hpc_ops.disable_irq) = zt5550_hc_disable_irq; + const_cast(zt5550_hpc_ops.check_irq) = zt5550_hc_check_irq; + pax_open_kernel(); } else { info("using ENUM# polling mode"); } diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c index c25fc9061..b05477470 100644 --- a/drivers/pci/hotplug/cpqphp_nvram.c +++ b/drivers/pci/hotplug/cpqphp_nvram.c @@ -425,8 +425,10 @@ static u32 store_HRT(void __iomem *rom_start) void compaq_nvram_init(void __iomem *rom_start) { +#ifndef CONFIG_PAX_KERNEXEC if (rom_start) compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR); +#endif dbg("int15 entry = %p\n", compaq_int15_entry_point); diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c index fea0b8b33..0d3e89006 100644 --- a/drivers/pci/hotplug/pci_hotplug_core.c +++ b/drivers/pci/hotplug/pci_hotplug_core.c @@ -434,8 +434,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, return -EINVAL; } - slot->ops->owner = owner; - slot->ops->mod_name = mod_name; + pax_open_kernel(); + const_cast(slot->ops->owner) = owner; + const_cast(slot->ops->mod_name) = mod_name; + pax_close_kernel(); mutex_lock(&pci_hp_mutex); /* diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 7d32fa33d..62d9b1d20 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -86,7 +86,7 @@ static int init_slot(struct controller *ctrl) struct slot *slot = ctrl->slot; struct hotplug_slot *hotplug = NULL; struct hotplug_slot_info *info = NULL; - struct hotplug_slot_ops *ops = NULL; + hotplug_slot_ops_no_const *ops = NULL; char name[SLOT_NAME_SIZE]; int retval = -ENOMEM; diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 3455f752d..889b24431 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -477,8 +477,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev) { struct attribute **msi_attrs; struct attribute *msi_attr; - struct device_attribute *msi_dev_attr; - struct attribute_group *msi_irq_group; + device_attribute_no_const *msi_dev_attr; + attribute_group_no_const *msi_irq_group; const struct attribute_group **msi_irq_groups; struct msi_desc *entry; int ret = -ENOMEM; @@ -540,7 +540,7 @@ static int populate_msi_sysfs(struct pci_dev *pdev) count = 0; msi_attr = msi_attrs[count]; while (msi_attr) { - msi_dev_attr = container_of(msi_attr, struct device_attribute, attr); + msi_dev_attr = container_of(msi_attr, device_attribute_no_const, attr); kfree(msi_attr->name); kfree(msi_dev_attr); ++count; @@ -1418,12 +1418,14 @@ static void pci_msi_domain_update_dom_ops(struct msi_domain_info *info) if (ops == NULL) { info->ops = &pci_msi_domain_ops_default; } else { + pax_open_kernel(); if (ops->set_desc == NULL) - ops->set_desc = pci_msi_domain_set_desc; + const_cast(ops->set_desc) = pci_msi_domain_set_desc; if (ops->msi_check == NULL) - ops->msi_check = pci_msi_domain_check_cap; + const_cast(ops->msi_check) = pci_msi_domain_check_cap; if (ops->handle_error == NULL) - ops->handle_error = pci_msi_domain_handle_error; + const_cast(ops->handle_error) = pci_msi_domain_handle_error; + pax_close_kernel(); } } @@ -1432,12 +1434,14 @@ static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info) struct irq_chip *chip = info->chip; BUG_ON(!chip); + pax_open_kernel(); if (!chip->irq_write_msi_msg) - chip->irq_write_msi_msg = pci_msi_domain_write_msg; + const_cast(chip->irq_write_msi_msg) = pci_msi_domain_write_msg; if (!chip->irq_mask) - chip->irq_mask = pci_msi_mask_irq; + const_cast(chip->irq_mask) = pci_msi_mask_irq; if (!chip->irq_unmask) - chip->irq_unmask = pci_msi_unmask_irq; + const_cast(chip->irq_unmask) = pci_msi_unmask_irq; + pax_close_kernel(); } /** diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index bcd10c795..c7c18bcee 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -1141,7 +1141,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) { /* allocate attribute structure, piggyback attribute name */ int name_len = write_combine ? 13 : 10; - struct bin_attribute *res_attr; + bin_attribute_no_const *res_attr; char *res_attr_name; int retval; @@ -1321,7 +1321,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor static int pci_create_capabilities_sysfs(struct pci_dev *dev) { int retval; - struct bin_attribute *attr; + bin_attribute_no_const *attr; /* If the device has VPD, try to expose it in sysfs. */ if (dev->vpd) { @@ -1368,7 +1368,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev) { int retval; int rom_size; - struct bin_attribute *attr; + bin_attribute_no_const *attr; if (!sysfs_initialized) return -EACCES; diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 451856210..fd0ac76e6 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -116,7 +116,7 @@ struct pci_vpd_ops { struct pci_vpd { const struct pci_vpd_ops *ops; - struct bin_attribute *attr; /* descriptor for sysfs VPD entry */ + bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */ struct mutex lock; unsigned int len; u16 flag; @@ -317,7 +317,7 @@ static inline int pci_iov_bus_range(struct pci_bus *bus) #endif /* CONFIG_PCI_IOV */ -unsigned long pci_cardbus_resource_alignment(struct resource *); +unsigned long pci_cardbus_resource_alignment(const struct resource *); static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, struct resource *res) diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index b0916b126..5ba622426 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -27,9 +27,9 @@ #define MODULE_PARAM_PREFIX "pcie_aspm." /* Note: those are not register definitions */ -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */ -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */ -#define ASPM_STATE_L1 (4) /* L1 state */ +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */ +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */ +#define ASPM_STATE_L1 (4U) /* L1 state */ #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW) #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1) @@ -782,7 +782,7 @@ void pci_disable_link_state(struct pci_dev *pdev, int state) } EXPORT_SYMBOL(pci_disable_link_state); -static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp) +static int pcie_aspm_set_policy(const char *val, const struct kernel_param *kp) { int i; struct pcie_link_state *link; @@ -809,7 +809,7 @@ static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp) return 0; } -static int pcie_aspm_get_policy(char *buffer, struct kernel_param *kp) +static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp) { int i, cnt = 0; for (i = 0; i < ARRAY_SIZE(policy_str); i++) diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 79327cc14..28fde3f60 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -360,7 +360,7 @@ static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id __initdata pcie_portdrv_dmi_table[] = { +static const struct dmi_system_id __initconst pcie_portdrv_dmi_table[] = { /* * Boxes that should not use MSI for PCIe PME signaling. */ diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 300770cdc..552fc7ee7 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -180,7 +180,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, u16 orig_cmd; struct pci_bus_region region, inverted_region; - mask = type ? PCI_ROM_ADDRESS_MASK : ~0; + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0; /* No printks while decoding is disabled! */ if (!dev->mmio_always_on) { diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index 2408abe4e..455d4d4cf 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c @@ -437,7 +437,16 @@ static const struct file_operations proc_bus_pci_dev_operations = { static int __init pci_proc_init(void) { struct pci_dev *dev = NULL; + +#ifdef CONFIG_GRKERNSEC_PROC_ADD +#ifdef CONFIG_GRKERNSEC_PROC_USER + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL); +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL); +#endif +#else proc_bus_pci_dir = proc_mkdir("bus/pci", NULL); +#endif proc_create("devices", 0, proc_bus_pci_dir, &proc_bus_pci_dev_operations); proc_initialized = 1; diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index f30ca75b5..4bae5cb8f 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -406,8 +406,12 @@ static void __assign_resources_sorted(struct list_head *head, /* Update res in head list with add_size in realloc_head list */ list_for_each_entry_safe(dev_res, tmp_res, head, list) { - dev_res->res->end += get_res_add_size(realloc_head, - dev_res->res); + resource_size_t add_size = get_res_add_size(realloc_head, dev_res->res); + + if (dev_res->res->start == 0 && dev_res->res->end == RESOURCE_SIZE_MAX) + dev_res->res->end = add_size - 1; + else + dev_res->res->end += get_res_add_size(realloc_head, dev_res->res); /* * There are two kinds of additional resources in the list: @@ -1120,7 +1124,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, return 0; } -unsigned long pci_cardbus_resource_alignment(struct resource *res) +unsigned long pci_cardbus_resource_alignment(const struct resource *res) { if (res->flags & IORESOURCE_IO) return pci_cardbus_io_size; diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c index d318ca055..40fba0143 100644 --- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c +++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c @@ -1097,7 +1097,7 @@ static int nmk_gpio_probe(struct platform_device *dev) struct device_node *np = dev->dev.of_node; struct nmk_gpio_chip *nmk_chip; struct gpio_chip *chip; - struct irq_chip *irqchip; + irq_chip_no_const *irqchip; int latent_irq; bool supports_sleepmode; int irq; diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index 9f0904185..5713a0ec7 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c @@ -23,6 +23,7 @@ #include /* Since we request GPIOs from ourself */ #include +#include #include "pinctrl-at91.h" #include "core.h" @@ -1600,7 +1601,9 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev, at91_gpio->pioc_hwirq = irqd_to_hwirq(d); /* Setup proper .irq_set_type function */ - gpio_irqchip.irq_set_type = at91_gpio->ops->irq_type; + pax_open_kernel(); + const_cast(gpio_irqchip.irq_set_type) = at91_gpio->ops->irq_type; + pax_close_kernel(); /* Disable irqs of this PIO controller */ writel_relaxed(~0, at91_gpio->regbase + PIO_IDR); diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c index e8a44a9bc..d8599736a 100644 --- a/drivers/platform/chrome/chromeos_laptop.c +++ b/drivers/platform/chrome/chromeos_laptop.c @@ -518,7 +518,7 @@ static struct chromeos_laptop cr48 = { .callback = chromeos_laptop_dmi_matched, \ .driver_data = (void *)&board_ -static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = { +static const struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = { { .ident = "Samsung Series 5 550", .matches = { diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c index 308a853ac..b0693fdec 100644 --- a/drivers/platform/chrome/chromeos_pstore.c +++ b/drivers/platform/chrome/chromeos_pstore.c @@ -14,7 +14,7 @@ #include #include -static struct dmi_system_id chromeos_pstore_dmi_table[] __initdata = { +static const struct dmi_system_id chromeos_pstore_dmi_table[] __initconst = { { /* * Today all Chromebooks/boxes ship with Google_* as version and diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c index f9a245465..275966484 100644 --- a/drivers/platform/chrome/cros_ec_lpc.c +++ b/drivers/platform/chrome/cros_ec_lpc.c @@ -300,7 +300,7 @@ static int cros_ec_lpc_remove(struct platform_device *pdev) return 0; } -static struct dmi_system_id cros_ec_lpc_dmi_table[] __initdata = { +static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = { { /* * Today all Chromebooks/boxes ship with Google_* as version and diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c index 005629447..8f8c2d5e8 100644 --- a/drivers/platform/x86/alienware-wmi.c +++ b/drivers/platform/x86/alienware-wmi.c @@ -209,7 +209,7 @@ struct wmax_led_args { } __packed; static struct platform_device *platform_device; -static struct device_attribute *zone_dev_attrs; +static device_attribute_no_const *zone_dev_attrs; static struct attribute **zone_attrs; static struct platform_zone *zone_data; @@ -219,7 +219,7 @@ static struct platform_driver platform_driver = { } }; -static struct attribute_group zone_attribute_group = { +static attribute_group_no_const zone_attribute_group = { .name = "rgb_zones", }; diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c index a66be1373..124be136d 100644 --- a/drivers/platform/x86/apple-gmux.c +++ b/drivers/platform/x86/apple-gmux.c @@ -482,7 +482,7 @@ static int gmux_set_power_state(enum vga_switcheroo_client_id id, return gmux_set_discrete_state(apple_gmux_data, state); } -static int gmux_get_client_id(struct pci_dev *pdev) +static enum vga_switcheroo_client_id gmux_get_client_id(struct pci_dev *pdev) { /* * Early Macbook Pros with switchable graphics use nvidia diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index ce6ca31a2..3e69fbe47 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -1872,6 +1872,10 @@ static int show_dsts(struct seq_file *m, void *data) int err; u32 retval = -1; +#ifdef CONFIG_GRKERNSEC_KMEM + return -EPERM; +#endif + err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval); if (err < 0) @@ -1888,6 +1892,10 @@ static int show_devs(struct seq_file *m, void *data) int err; u32 retval = -1; +#ifdef CONFIG_GRKERNSEC_KMEM + return -EPERM; +#endif + err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param, &retval); @@ -1912,6 +1920,10 @@ static int show_call(struct seq_file *m, void *data) union acpi_object *obj; acpi_status status; +#ifdef CONFIG_GRKERNSEC_KMEM + return -EPERM; +#endif + status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID, 1, asus->debug.method_id, &input, &output); diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c index e1c2b6d4b..8f25439ed 100644 --- a/drivers/platform/x86/compal-laptop.c +++ b/drivers/platform/x86/compal-laptop.c @@ -805,7 +805,7 @@ static int dmi_check_cb_extra(const struct dmi_system_id *id) return 1; } -static struct dmi_system_id __initdata compal_dmi_table[] = { +static const struct dmi_system_id __initconst compal_dmi_table[] = { { .ident = "FL90/IFL90", .matches = { diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c index 458e6c948..089aee733 100644 --- a/drivers/platform/x86/hdaps.c +++ b/drivers/platform/x86/hdaps.c @@ -514,7 +514,7 @@ static int __init hdaps_dmi_match_invert(const struct dmi_system_id *id) "ThinkPad T42p", so the order of the entries matters. If your ThinkPad is not recognized, please update to latest BIOS. This is especially the case for some R52 ThinkPads. */ -static struct dmi_system_id __initdata hdaps_whitelist[] = { +static const struct dmi_system_id __initconst hdaps_whitelist[] = { HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad R50p", HDAPS_BOTH_AXES), HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R50"), HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R51"), diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c index c62e5e11c..854b4188d 100644 --- a/drivers/platform/x86/ibm_rtl.c +++ b/drivers/platform/x86/ibm_rtl.c @@ -227,7 +227,7 @@ static void rtl_teardown_sysfs(void) { } -static struct dmi_system_id __initdata ibm_rtl_dmi_table[] = { +static const struct dmi_system_id __initconst ibm_rtl_dmi_table[] = { { \ .matches = { \ DMI_MATCH(DMI_SYS_VENDOR, "IBM"), \ diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c index 6aa33c4a8..cfb54257e 100644 --- a/drivers/platform/x86/intel_oaktrail.c +++ b/drivers/platform/x86/intel_oaktrail.c @@ -299,7 +299,7 @@ static int dmi_check_cb(const struct dmi_system_id *id) return 0; } -static struct dmi_system_id __initdata oaktrail_dmi_table[] = { +static const struct dmi_system_id __initconst oaktrail_dmi_table[] = { { .ident = "OakTrail platform", .matches = { diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c index 423177046..cbf93a6fc 100644 --- a/drivers/platform/x86/msi-laptop.c +++ b/drivers/platform/x86/msi-laptop.c @@ -605,7 +605,7 @@ static int dmi_check_cb(const struct dmi_system_id *dmi) return 1; } -static struct dmi_system_id __initdata msi_dmi_table[] = { +static const struct dmi_system_id __initconst msi_dmi_table[] = { { .ident = "MSI S270", .matches = { @@ -1000,12 +1000,14 @@ static int __init load_scm_model_init(struct platform_device *sdev) if (!quirks->ec_read_only) { /* allow userland write sysfs file */ - dev_attr_bluetooth.store = store_bluetooth; - dev_attr_wlan.store = store_wlan; - dev_attr_threeg.store = store_threeg; - dev_attr_bluetooth.attr.mode |= S_IWUSR; - dev_attr_wlan.attr.mode |= S_IWUSR; - dev_attr_threeg.attr.mode |= S_IWUSR; + pax_open_kernel(); + const_cast(dev_attr_bluetooth.store) = store_bluetooth; + const_cast(dev_attr_wlan.store) = store_wlan; + const_cast(dev_attr_threeg.store) = store_threeg; + const_cast(dev_attr_bluetooth.attr.mode) |= S_IWUSR; + const_cast(dev_attr_wlan.attr.mode) |= S_IWUSR; + const_cast(dev_attr_threeg.attr.mode) |= S_IWUSR; + pax_close_kernel(); } /* disable hardware control by fn key */ diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c index 978e6d640..1f0b37dec 100644 --- a/drivers/platform/x86/msi-wmi.c +++ b/drivers/platform/x86/msi-wmi.c @@ -184,7 +184,7 @@ static const struct backlight_ops msi_backlight_ops = { static void msi_wmi_notify(u32 value, void *context) { struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; - static struct key_entry *key; + struct key_entry *key; union acpi_object *obj; acpi_status status; diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c index 8c146e2b6..356c62ea6 100644 --- a/drivers/platform/x86/samsung-laptop.c +++ b/drivers/platform/x86/samsung-laptop.c @@ -1567,7 +1567,7 @@ static int __init samsung_dmi_matched(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id __initdata samsung_dmi_table[] = { +static const struct dmi_system_id __initconst samsung_dmi_table[] = { { .matches = { DMI_MATCH(DMI_SYS_VENDOR, diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c index e6aac725a..e11ff24fc 100644 --- a/drivers/platform/x86/samsung-q10.c +++ b/drivers/platform/x86/samsung-q10.c @@ -95,7 +95,7 @@ static int __init dmi_check_callback(const struct dmi_system_id *id) return 1; } -static struct dmi_system_id __initdata samsungq10_dmi_table[] = { +static const struct dmi_system_id __initconst samsungq10_dmi_table[] = { { .ident = "Samsung Q10", .matches = { diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index c890a4958..9545052cf 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -2556,7 +2556,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd) } /* High speed charging function */ -static struct device_attribute *hsc_handle; +static device_attribute_no_const *hsc_handle; static ssize_t sony_nc_highspeed_charging_store(struct device *dev, struct device_attribute *attr, @@ -2630,7 +2630,7 @@ static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd) } /* low battery function */ -static struct device_attribute *lowbatt_handle; +static device_attribute_no_const *lowbatt_handle; static ssize_t sony_nc_lowbatt_store(struct device *dev, struct device_attribute *attr, @@ -2696,7 +2696,7 @@ static void sony_nc_lowbatt_cleanup(struct platform_device *pd) } /* fan speed function */ -static struct device_attribute *fan_handle, *hsf_handle; +static device_attribute_no_const *fan_handle, *hsf_handle; static ssize_t sony_nc_hsfan_store(struct device *dev, struct device_attribute *attr, @@ -2803,7 +2803,7 @@ static void sony_nc_fanspeed_cleanup(struct platform_device *pd) } /* USB charge function */ -static struct device_attribute *uc_handle; +static device_attribute_no_const *uc_handle; static ssize_t sony_nc_usb_charge_store(struct device *dev, struct device_attribute *attr, @@ -2877,7 +2877,7 @@ static void sony_nc_usb_charge_cleanup(struct platform_device *pd) } /* Panel ID function */ -static struct device_attribute *panel_handle; +static device_attribute_no_const *panel_handle; static ssize_t sony_nc_panelid_show(struct device *dev, struct device_attribute *attr, char *buffer) @@ -2924,7 +2924,7 @@ static void sony_nc_panelid_cleanup(struct platform_device *pd) } /* smart connect function */ -static struct device_attribute *sc_handle; +static device_attribute_no_const *sc_handle; static ssize_t sony_nc_smart_conn_store(struct device *dev, struct device_attribute *attr, @@ -4880,7 +4880,7 @@ static struct acpi_driver sony_pic_driver = { .drv.pm = &sony_pic_pm, }; -static struct dmi_system_id __initdata sonypi_dmi_table[] = { +static const struct dmi_system_id __initconst sonypi_dmi_table[] = { { .ident = "Sony Vaio", .matches = { diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index b65ce7519..d92001eea 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -2462,10 +2462,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn, && !tp_features.bright_unkfw) TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME); } +} #undef TPACPI_COMPARE_KEY #undef TPACPI_MAY_SEND_KEY -} /* * Polling driver @@ -4203,7 +4203,7 @@ static int bluetooth_get_status(void) TPACPI_RFK_RADIO_ON : TPACPI_RFK_RADIO_OFF; } -static int bluetooth_set_status(enum tpacpi_rfkill_state state) +static int bluetooth_set_status(const enum tpacpi_rfkill_state state) { int status; @@ -4391,7 +4391,7 @@ static int wan_get_status(void) TPACPI_RFK_RADIO_ON : TPACPI_RFK_RADIO_OFF; } -static int wan_set_status(enum tpacpi_rfkill_state state) +static int wan_set_status(const enum tpacpi_rfkill_state state) { int status; @@ -4577,7 +4577,7 @@ static int uwb_get_status(void) TPACPI_RFK_RADIO_ON : TPACPI_RFK_RADIO_OFF; } -static int uwb_set_status(enum tpacpi_rfkill_state state) +static int uwb_set_status(const enum tpacpi_rfkill_state state) { int status; @@ -9526,7 +9526,7 @@ static struct ibm_init_struct ibms_init[] __initdata = { }, }; -static int __init set_ibm_param(const char *val, struct kernel_param *kp) +static int __init set_ibm_param(const char *val, const struct kernel_param *kp) { unsigned int i; struct ibm_struct *ibm; diff --git a/drivers/platform/x86/toshiba-wmi.c b/drivers/platform/x86/toshiba-wmi.c index 2df07ee8f..92dca69e5 100644 --- a/drivers/platform/x86/toshiba-wmi.c +++ b/drivers/platform/x86/toshiba-wmi.c @@ -64,7 +64,7 @@ static void toshiba_wmi_notify(u32 value, void *context) kfree(response.pointer); } -static struct dmi_system_id toshiba_wmi_dmi_table[] __initdata = { +static const struct dmi_system_id toshiba_wmi_dmi_table[] __initconst = { { .ident = "Toshiba laptop", .matches = { diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h index 3151fd164..12c5b207d 100644 --- a/drivers/pnp/base.h +++ b/drivers/pnp/base.h @@ -163,7 +163,7 @@ struct pnp_resource *pnp_add_resource(struct pnp_dev *dev, struct resource *res); struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq, int flags); -struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, int dma, +struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, resource_size_t dma, int flags); struct pnp_resource *pnp_add_io_resource(struct pnp_dev *dev, resource_size_t start, diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c index 438d4c72c..ca8a2fba1 100644 --- a/drivers/pnp/pnpbios/bioscalls.c +++ b/drivers/pnp/pnpbios/bioscalls.c @@ -59,7 +59,7 @@ do { \ set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \ } while(0) -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092, +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093, (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1); /* @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3, cpu = get_cpu(); save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8]; + + pax_open_kernel(); get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc; + pax_close_kernel(); /* On some boxes IRQ's during PnP BIOS calls are deadly. */ spin_lock_irqsave(&pnp_bios_lock, flags); @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3, :"memory"); spin_unlock_irqrestore(&pnp_bios_lock, flags); + pax_open_kernel(); get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40; + pax_close_kernel(); + put_cpu(); /* If we get here and this is set then the PnP BIOS faulted on us. */ @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base) return status; } -void pnpbios_calls_init(union pnp_bios_install_struct *header) +void __init pnpbios_calls_init(union pnp_bios_install_struct *header) { int i; @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header) pnp_bios_callpoint.offset = header->fields.pm16offset; pnp_bios_callpoint.segment = PNP_CS16; + pax_open_kernel(); + for_each_possible_cpu(i) { struct desc_struct *gdt = get_cpu_gdt_table(i); if (!gdt) @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header) set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS], (unsigned long)__va(header->fields.pm16dseg)); } + + pax_close_kernel(); } diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c index c38a5b973..6b3284cff 100644 --- a/drivers/pnp/pnpbios/core.c +++ b/drivers/pnp/pnpbios/core.c @@ -494,7 +494,7 @@ static int __init exploding_pnp_bios(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id pnpbios_dmi_table[] __initdata = { +static const struct dmi_system_id pnpbios_dmi_table[] __initconst = { { /* PnPBIOS GPF on boot */ .callback = exploding_pnp_bios, .ident = "Higraded P14H", diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c index f980ff716..77121c4a8 100644 --- a/drivers/pnp/resource.c +++ b/drivers/pnp/resource.c @@ -543,7 +543,7 @@ struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq, return pnp_res; } -struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, int dma, +struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, resource_size_t dma, int flags) { struct pnp_resource *pnp_res; @@ -551,7 +551,7 @@ struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, int dma, pnp_res = pnp_new_resource(dev); if (!pnp_res) { - dev_err(&dev->dev, "can't add resource for DMA %d\n", dma); + dev_err(&dev->dev, "can't add resource for DMA %lld\n", dma); return NULL; } diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c index 1b5d45058..b6042f8bc 100644 --- a/drivers/power/reset/at91-reset.c +++ b/drivers/power/reset/at91-reset.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -206,7 +207,9 @@ static int __init at91_reset_probe(struct platform_device *pdev) } match = of_match_node(at91_reset_of_match, pdev->dev.of_node); - at91_restart_nb.notifier_call = match->data; + pax_open_kernel(); + const_cast(at91_restart_nb.notifier_call) = match->data; + pax_close_kernel(); sclk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(sclk)) diff --git a/drivers/power/supply/pda_power.c b/drivers/power/supply/pda_power.c index dfe1ee89f..70ba23508 100644 --- a/drivers/power/supply/pda_power.c +++ b/drivers/power/supply/pda_power.c @@ -38,7 +38,11 @@ static struct power_supply *pda_psy_ac, *pda_psy_usb; #if IS_ENABLED(CONFIG_USB_PHY) static struct usb_phy *transceiver; -static struct notifier_block otg_nb; +static int otg_handle_notification(struct notifier_block *nb, + unsigned long event, void *unused); +static struct notifier_block otg_nb = { + .notifier_call = otg_handle_notification, +}; #endif static struct regulator *ac_draw; @@ -373,7 +377,6 @@ static int pda_power_probe(struct platform_device *pdev) #if IS_ENABLED(CONFIG_USB_PHY) if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) { - otg_nb.notifier_call = otg_handle_notification; ret = usb_register_notifier(transceiver, &otg_nb); if (ret) { dev_err(dev, "failure to register otg notifier\n"); diff --git a/drivers/power/supply/power_supply.h b/drivers/power/supply/power_supply.h index cc439fd89..8fa30df6b 100644 --- a/drivers/power/supply/power_supply.h +++ b/drivers/power/supply/power_supply.h @@ -16,12 +16,12 @@ struct power_supply; #ifdef CONFIG_SYSFS -extern void power_supply_init_attrs(struct device_type *dev_type); +extern void power_supply_init_attrs(void); extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env); #else -static inline void power_supply_init_attrs(struct device_type *dev_type) {} +static inline void power_supply_init_attrs(void) {} #define power_supply_uevent NULL #endif /* CONFIG_SYSFS */ diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index a74d8ca38..c98d74524 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -28,7 +28,10 @@ EXPORT_SYMBOL_GPL(power_supply_class); ATOMIC_NOTIFIER_HEAD(power_supply_notifier); EXPORT_SYMBOL_GPL(power_supply_notifier); -static struct device_type power_supply_dev_type; +extern const struct attribute_group *power_supply_attr_groups[]; +static struct device_type power_supply_dev_type = { + .groups = power_supply_attr_groups, +}; #define POWER_SUPPLY_DEFERRED_REGISTER_TIME msecs_to_jiffies(10) @@ -969,7 +972,7 @@ static int __init power_supply_class_init(void) return PTR_ERR(power_supply_class); power_supply_class->dev_uevent = power_supply_uevent; - power_supply_init_attrs(&power_supply_dev_type); + power_supply_init_attrs(); return 0; } diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index bcde8d134..0406331d7 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -239,17 +239,15 @@ static struct attribute_group power_supply_attr_group = { .is_visible = power_supply_attr_is_visible, }; -static const struct attribute_group *power_supply_attr_groups[] = { +const struct attribute_group *power_supply_attr_groups[] = { &power_supply_attr_group, NULL, }; -void power_supply_init_attrs(struct device_type *dev_type) +void power_supply_init_attrs(void) { int i; - dev_type->groups = power_supply_attr_groups; - for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++) __power_supply_attrs[i] = &power_supply_attrs[i].attr; } diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c index 14bde0db8..9391277e5 100644 --- a/drivers/powercap/powercap_sys.c +++ b/drivers/powercap/powercap_sys.c @@ -154,8 +154,77 @@ struct powercap_constraint_attr { struct device_attribute name_attr; }; +static ssize_t show_constraint_name(struct device *dev, + struct device_attribute *dev_attr, + char *buf); + static struct powercap_constraint_attr - constraint_attrs[MAX_CONSTRAINTS_PER_ZONE]; + constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = { + [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = { + .power_limit_attr = { + .attr = { + .name = NULL, + .mode = S_IWUSR | S_IRUGO + }, + .show = show_constraint_power_limit_uw, + .store = store_constraint_power_limit_uw + }, + + .time_window_attr = { + .attr = { + .name = NULL, + .mode = S_IWUSR | S_IRUGO + }, + .show = show_constraint_time_window_us, + .store = store_constraint_time_window_us + }, + + .max_power_attr = { + .attr = { + .name = NULL, + .mode = S_IRUGO + }, + .show = show_constraint_max_power_uw, + .store = NULL + }, + + .min_power_attr = { + .attr = { + .name = NULL, + .mode = S_IRUGO + }, + .show = show_constraint_min_power_uw, + .store = NULL + }, + + .max_time_window_attr = { + .attr = { + .name = NULL, + .mode = S_IRUGO + }, + .show = show_constraint_max_time_window_us, + .store = NULL + }, + + .min_time_window_attr = { + .attr = { + .name = NULL, + .mode = S_IRUGO + }, + .show = show_constraint_min_time_window_us, + .store = NULL + }, + + .name_attr = { + .attr = { + .name = NULL, + .mode = S_IRUGO + }, + .show = show_constraint_name, + .store = NULL + } + } +}; /* A list of powercap control_types */ static LIST_HEAD(powercap_cntrl_list); @@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev, } static int create_constraint_attribute(int id, const char *name, - int mode, - struct device_attribute *dev_attr, - ssize_t (*show)(struct device *, - struct device_attribute *, char *), - ssize_t (*store)(struct device *, - struct device_attribute *, - const char *, size_t) - ) + struct device_attribute *dev_attr) { + name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name); - dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s", - id, name); - if (!dev_attr->attr.name) + if (!name) return -ENOMEM; - dev_attr->attr.mode = mode; - dev_attr->show = show; - dev_attr->store = store; + + pax_open_kernel(); + const_cast(dev_attr->attr.name) = name; + pax_close_kernel(); return 0; } @@ -236,49 +298,31 @@ static int seed_constraint_attributes(void) for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) { ret = create_constraint_attribute(i, "power_limit_uw", - S_IWUSR | S_IRUGO, - &constraint_attrs[i].power_limit_attr, - show_constraint_power_limit_uw, - store_constraint_power_limit_uw); + &constraint_attrs[i].power_limit_attr); if (ret) goto err_alloc; ret = create_constraint_attribute(i, "time_window_us", - S_IWUSR | S_IRUGO, - &constraint_attrs[i].time_window_attr, - show_constraint_time_window_us, - store_constraint_time_window_us); + &constraint_attrs[i].time_window_attr); if (ret) goto err_alloc; - ret = create_constraint_attribute(i, "name", S_IRUGO, - &constraint_attrs[i].name_attr, - show_constraint_name, - NULL); + ret = create_constraint_attribute(i, "name", + &constraint_attrs[i].name_attr); if (ret) goto err_alloc; - ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO, - &constraint_attrs[i].max_power_attr, - show_constraint_max_power_uw, - NULL); + ret = create_constraint_attribute(i, "max_power_uw", + &constraint_attrs[i].max_power_attr); if (ret) goto err_alloc; - ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO, - &constraint_attrs[i].min_power_attr, - show_constraint_min_power_uw, - NULL); + ret = create_constraint_attribute(i, "min_power_uw", + &constraint_attrs[i].min_power_attr); if (ret) goto err_alloc; ret = create_constraint_attribute(i, "max_time_window_us", - S_IRUGO, - &constraint_attrs[i].max_time_window_attr, - show_constraint_max_time_window_us, - NULL); + &constraint_attrs[i].max_time_window_attr); if (ret) goto err_alloc; ret = create_constraint_attribute(i, "min_time_window_us", - S_IRUGO, - &constraint_attrs[i].min_time_window_attr, - show_constraint_min_time_window_us, - NULL); + &constraint_attrs[i].min_time_window_attr); if (ret) goto err_alloc; @@ -378,10 +422,12 @@ static void create_power_zone_common_attributes( power_zone->zone_dev_attrs[count++] = &dev_attr_max_energy_range_uj.attr; if (power_zone->ops->get_energy_uj) { + pax_open_kernel(); if (power_zone->ops->reset_energy_uj) - dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO; + const_cast(dev_attr_energy_uj.attr.mode) = S_IWUSR | S_IRUGO; else - dev_attr_energy_uj.attr.mode = S_IRUGO; + const_cast(dev_attr_energy_uj.attr.mode) = S_IRUGO; + pax_close_kernel(); power_zone->zone_dev_attrs[count++] = &dev_attr_energy_uj.attr; } diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index 9c5d41421..c7900ce54 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h @@ -51,7 +51,7 @@ struct ptp_clock { struct mutex pincfg_mux; /* protect concurrent info->pin_config access */ wait_queue_head_t tsev_wq; int defunct; /* tells readers to go away when clock is being removed */ - struct device_attribute *pin_dev_attr; + device_attribute_no_const *pin_dev_attr; struct attribute **pin_attr; struct attribute_group pin_attr_group; }; diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c index 302e626fe..12579afe5 100644 --- a/drivers/ptp/ptp_sysfs.c +++ b/drivers/ptp/ptp_sysfs.c @@ -280,7 +280,7 @@ static int ptp_populate_pins(struct ptp_clock *ptp) goto no_pin_attr; for (i = 0; i < n_pins; i++) { - struct device_attribute *da = &ptp->pin_dev_attr[i]; + device_attribute_no_const *da = &ptp->pin_dev_attr[i]; sysfs_attr_init(&da->attr); da->attr.name = info->pin_config[i].name; da->attr.mode = 0644; diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 5c1519b22..eb73d916d 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -3916,7 +3916,7 @@ regulator_register(const struct regulator_desc *regulator_desc, const struct regulation_constraints *constraints = NULL; const struct regulator_init_data *init_data; struct regulator_config *config = NULL; - static atomic_t regulator_no = ATOMIC_INIT(-1); + static atomic_unchecked_t regulator_no = ATOMIC_INIT(-1); struct regulator_dev *rdev; struct device *dev; int ret, i; @@ -4009,7 +4009,7 @@ regulator_register(const struct regulator_desc *regulator_desc, rdev->dev.class = ®ulator_class; rdev->dev.parent = dev; dev_set_name(&rdev->dev, "regulator.%lu", - (unsigned long) atomic_inc_return(®ulator_no)); + (unsigned long) atomic_inc_return_unchecked(®ulator_no)); /* set regulator constraints */ if (init_data) diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c index b87f62dd4..34f1cdf4a 100644 --- a/drivers/regulator/max8660.c +++ b/drivers/regulator/max8660.c @@ -423,8 +423,10 @@ static int max8660_probe(struct i2c_client *client, max8660->shadow_regs[MAX8660_OVER1] = 5; } else { /* Otherwise devices can be toggled via software */ - max8660_dcdc_ops.enable = max8660_dcdc_enable; - max8660_dcdc_ops.disable = max8660_dcdc_disable; + pax_open_kernel(); + const_cast(max8660_dcdc_ops.enable) = max8660_dcdc_enable; + const_cast(max8660_dcdc_ops.disable) = max8660_dcdc_disable; + pax_close_kernel(); } /* diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c index e0c747aa9..c6eb788c2 100644 --- a/drivers/regulator/max8973-regulator.c +++ b/drivers/regulator/max8973-regulator.c @@ -751,9 +751,11 @@ static int max8973_probe(struct i2c_client *client, if (!pdata->enable_ext_control) { max->desc.enable_reg = MAX8973_VOUT; max->desc.enable_mask = MAX8973_VOUT_ENABLE; - max->ops.enable = regulator_enable_regmap; - max->ops.disable = regulator_disable_regmap; - max->ops.is_enabled = regulator_is_enabled_regmap; + pax_open_kernel(); + const_cast(max->ops.enable) = regulator_enable_regmap; + const_cast(max->ops.disable) = regulator_disable_regmap; + const_cast(max->ops.is_enabled) = regulator_is_enabled_regmap; + pax_close_kernel(); break; } @@ -781,9 +783,11 @@ static int max8973_probe(struct i2c_client *client, max->desc.enable_reg = MAX8973_VOUT; max->desc.enable_mask = MAX8973_VOUT_ENABLE; - max->ops.enable = regulator_enable_regmap; - max->ops.disable = regulator_disable_regmap; - max->ops.is_enabled = regulator_is_enabled_regmap; + pax_open_kernel(); + const_cast(max->ops.enable) = regulator_enable_regmap; + const_cast(max->ops.disable) = regulator_disable_regmap; + const_cast(max->ops.is_enabled) = regulator_is_enabled_regmap; + pax_close_kernel(); max->ops.set_current_limit = max8973_set_current_limit; max->ops.get_current_limit = max8973_get_current_limit; break; diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c index 0d17c9206..ce5897e2f 100644 --- a/drivers/regulator/mc13892-regulator.c +++ b/drivers/regulator/mc13892-regulator.c @@ -584,10 +584,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev) mc13xxx_unlock(mc13892); /* update mc13892_vcam ops */ - memcpy(&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops, + pax_open_kernel(); + memcpy((void *)&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops, sizeof(struct regulator_ops)); - mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode, - mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode, + const_cast(mc13892_vcam_ops.set_mode) = mc13892_vcam_set_mode, + const_cast(mc13892_vcam_ops.get_mode) = mc13892_vcam_get_mode, + pax_close_kernel(); mc13892_regulators[MC13892_VCAM].desc.ops = &mc13892_vcam_ops; mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators, diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 08552270b..f2fe5af32 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -323,9 +323,10 @@ void rproc_free_vring(struct rproc_vring *rvring) * * Returns 0 on success, or an appropriate error code otherwise */ -static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, +static int rproc_handle_vdev(struct rproc *rproc, void *_rsc, int offset, int avail) { + struct fw_rsc_vdev *rsc = _rsc; struct device *dev = &rproc->dev; struct rproc_vdev *rvdev; int i, ret; @@ -400,9 +401,10 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, * * Returns 0 on success, or an appropriate error code otherwise */ -static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc, +static int rproc_handle_trace(struct rproc *rproc, void *_rsc, int offset, int avail) { + struct fw_rsc_trace *rsc = _rsc; struct rproc_mem_entry *trace; struct device *dev = &rproc->dev; void *ptr; @@ -480,9 +482,10 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc, * and not allow firmwares to request access to physical addresses that * are outside those ranges. */ -static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc, +static int rproc_handle_devmem(struct rproc *rproc, void *_rsc, int offset, int avail) { + struct fw_rsc_devmem *rsc = _rsc; struct rproc_mem_entry *mapping; struct device *dev = &rproc->dev; int ret; @@ -552,9 +555,10 @@ static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc, * pressure is important; it may have a substantial impact on performance. */ static int rproc_handle_carveout(struct rproc *rproc, - struct fw_rsc_carveout *rsc, + void *_rsc, int offset, int avail) { + struct fw_rsc_carveout *rsc = _rsc; struct rproc_mem_entry *carveout, *mapping; struct device *dev = &rproc->dev; dma_addr_t dma; @@ -673,9 +677,11 @@ static int rproc_handle_carveout(struct rproc *rproc, return ret; } -static int rproc_count_vrings(struct rproc *rproc, struct fw_rsc_vdev *rsc, +static int rproc_count_vrings(struct rproc *rproc, void *_rsc, int offset, int avail) { + struct fw_rsc_vdev *rsc = _rsc; + /* Summarize the number of notification IDs */ rproc->max_notifyid += rsc->num_of_vrings; @@ -687,14 +693,14 @@ static int rproc_count_vrings(struct rproc *rproc, struct fw_rsc_vdev *rsc, * enum fw_resource_type. */ static rproc_handle_resource_t rproc_loading_handlers[RSC_LAST] = { - [RSC_CARVEOUT] = (rproc_handle_resource_t)rproc_handle_carveout, - [RSC_DEVMEM] = (rproc_handle_resource_t)rproc_handle_devmem, - [RSC_TRACE] = (rproc_handle_resource_t)rproc_handle_trace, - [RSC_VDEV] = (rproc_handle_resource_t)rproc_count_vrings, + [RSC_CARVEOUT] = rproc_handle_carveout, + [RSC_DEVMEM] = rproc_handle_devmem, + [RSC_TRACE] = rproc_handle_trace, + [RSC_VDEV] = rproc_count_vrings, }; static rproc_handle_resource_t rproc_vdev_handler[RSC_LAST] = { - [RSC_VDEV] = (rproc_handle_resource_t)rproc_handle_vdev, + [RSC_VDEV] = rproc_handle_vdev, }; /* handle firmware resource entries before booting the remote processor */ diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c index 9a3f2a6f5..c19b00a13 100644 --- a/drivers/rtc/rtc-armada38x.c +++ b/drivers/rtc/rtc-armada38x.c @@ -18,6 +18,7 @@ #include #include #include +#include #define RTC_STATUS 0x0 #define RTC_STATUS_ALARM1 BIT(0) @@ -246,8 +247,10 @@ static __init int armada38x_rtc_probe(struct platform_device *pdev) * If there is no interrupt available then we can't * use the alarm */ - armada38x_rtc_ops.set_alarm = NULL; - armada38x_rtc_ops.alarm_irq_enable = NULL; + pax_open_kernel(); + const_cast(armada38x_rtc_ops.set_alarm) = NULL; + const_cast(armada38x_rtc_ops.alarm_irq_enable) = NULL; + pax_close_kernel(); } platform_set_drvdata(pdev, rtc); if (rtc->irq != -1) diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 7030d7cd3..eafd7ef37 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -736,7 +736,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) } /* export at least the first block of NVRAM */ - nvram.size = address_space - NVRAM_OFFSET; + pax_open_kernel(); + const_cast(nvram.size) = address_space - NVRAM_OFFSET; + pax_close_kernel(); retval = sysfs_create_bin_file(&dev->kobj, &nvram); if (retval < 0) { dev_dbg(dev, "can't create nvram file? %d\n", retval); diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c index a6d9434ad..dc26b717b 100644 --- a/drivers/rtc/rtc-dev.c +++ b/drivers/rtc/rtc-dev.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "rtc-core.h" static dev_t rtc_devt; @@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file, if (copy_from_user(&tm, uarg, sizeof(tm))) return -EFAULT; + gr_log_timechange(); + return rtc_set_time(rtc, &tm); case RTC_PIE_ON: diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index 4e31036ee..4cfe2afca 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c @@ -111,7 +111,7 @@ struct ds1307 { u8 offset; /* register's offset */ u8 regs[11]; u16 nvram_offset; - struct bin_attribute *nvram; + bin_attribute_no_const *nvram; enum ds_type type; unsigned long flags; #define HAS_NVRAM 0 /* bit 0 == sysfs file active */ diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index 58698d21c..8560ebf9e 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -798,9 +798,11 @@ static int m41t80_probe(struct i2c_client *client, dev_warn(&client->dev, "unable to request IRQ, alarms disabled\n"); client->irq = 0; } else { - m41t80_rtc_ops.read_alarm = m41t80_read_alarm; - m41t80_rtc_ops.set_alarm = m41t80_set_alarm; - m41t80_rtc_ops.alarm_irq_enable = m41t80_alarm_irq_enable; + pax_open_kernel(); + const_cast(m41t80_rtc_ops.read_alarm) = m41t80_read_alarm; + const_cast(m41t80_rtc_ops.set_alarm) = m41t80_set_alarm; + const_cast(m41t80_rtc_ops.alarm_irq_enable) = m41t80_alarm_irq_enable; + pax_close_kernel(); /* Enable the wakealarm */ device_init_wakeup(&client->dev, true); } diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c index d99a705be..99654e778 100644 --- a/drivers/rtc/rtc-m48t59.c +++ b/drivers/rtc/rtc-m48t59.c @@ -485,7 +485,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev) if (IS_ERR(m48t59->rtc)) return PTR_ERR(m48t59->rtc); - m48t59_nvram_attr.size = pdata->offset; + pax_open_kernel(); + const_cast(m48t59_nvram_attr.size) = pdata->offset; + pax_close_kernel(); ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr); if (ret) diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c index 1f9f7b4bf..6f8788371 100644 --- a/drivers/rtc/rtc-rv3029c2.c +++ b/drivers/rtc/rtc-rv3029c2.c @@ -832,9 +832,11 @@ static int rv3029_probe(struct device *dev, struct regmap *regmap, int irq, dev_warn(dev, "unable to request IRQ, alarms disabled\n"); rv3029->irq = 0; } else { - rv3029_rtc_ops.read_alarm = rv3029_read_alarm; - rv3029_rtc_ops.set_alarm = rv3029_set_alarm; - rv3029_rtc_ops.alarm_irq_enable = rv3029_alarm_irq_enable; + pax_open_kernel(); + const_cast(rv3029_rtc_ops.read_alarm) = rv3029_read_alarm; + const_cast(rv3029_rtc_ops.set_alarm) = rv3029_set_alarm; + const_cast(rv3029_rtc_ops.alarm_irq_enable) = rv3029_alarm_irq_enable; + pax_close_kernel(); } } diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c index f9277e536..a1296a314 100644 --- a/drivers/rtc/rtc-rv8803.c +++ b/drivers/rtc/rtc-rv8803.c @@ -536,6 +536,15 @@ static int rx8900_trickle_charger_init(struct rv8803_data *rv8803) flags); } +static struct rtc_class_ops rv8803_rtc_alarm_ops = { + .read_time = rv8803_get_time, + .set_time = rv8803_set_time, + .ioctl = rv8803_ioctl, + .read_alarm = rv8803_get_alarm, + .set_alarm = rv8803_set_alarm, + .alarm_irq_enable = rv8803_alarm_irq_enable, +}; + static int rv8803_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -580,15 +589,11 @@ static int rv8803_probe(struct i2c_client *client, if (err) { dev_warn(&client->dev, "unable to request IRQ, alarms disabled\n"); client->irq = 0; - } else { - rv8803_rtc_ops.read_alarm = rv8803_get_alarm; - rv8803_rtc_ops.set_alarm = rv8803_set_alarm; - rv8803_rtc_ops.alarm_irq_enable = rv8803_alarm_irq_enable; } } rv8803->rtc = devm_rtc_device_register(&client->dev, client->name, - &rv8803_rtc_ops, THIS_MODULE); + client->irq > 0 ? &rv8803_rtc_alarm_ops : &rv8803_rtc_ops, THIS_MODULE); if (IS_ERR(rv8803->rtc)) { dev_err(&client->dev, "unable to register the class device\n"); return PTR_ERR(rv8803->rtc); diff --git a/drivers/rtc/rtc-rx8010.c b/drivers/rtc/rtc-rx8010.c index 7163b91bb..d7a2c3113 100644 --- a/drivers/rtc/rtc-rx8010.c +++ b/drivers/rtc/rtc-rx8010.c @@ -483,9 +483,11 @@ static int rx8010_probe(struct i2c_client *client, dev_err(&client->dev, "unable to request IRQ\n"); client->irq = 0; } else { - rx8010_rtc_ops.read_alarm = rx8010_read_alarm; - rx8010_rtc_ops.set_alarm = rx8010_set_alarm; - rx8010_rtc_ops.alarm_irq_enable = rx8010_alarm_irq_enable; + pax_open_kernel(); + const_cast(rx8010_rtc_ops.read_alarm) = rx8010_read_alarm; + const_cast(rx8010_rtc_ops.set_alarm) = rx8010_set_alarm; + const_cast(rx8010_rtc_ops.alarm_irq_enable) = rx8010_alarm_irq_enable; + pax_close_kernel(); } } diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c index 3a2da4c89..1d1d4b112 100644 --- a/drivers/rtc/rtc-test.c +++ b/drivers/rtc/rtc-test.c @@ -112,8 +112,10 @@ static int test_probe(struct platform_device *plat_dev) struct rtc_device *rtc; if (test_mmss64) { - test_rtc_ops.set_mmss64 = test_rtc_set_mmss64; - test_rtc_ops.set_mmss = NULL; + pax_open_kernel(); + const_cast(test_rtc_ops.set_mmss64) = test_rtc_set_mmss64; + const_cast(test_rtc_ops.set_mmss) = NULL; + pax_close_kernel(); } rtc = devm_rtc_device_register(&plat_dev->dev, "test", diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index 272cb6cd1..43eaab995 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c @@ -1035,7 +1035,7 @@ tty3270_open(struct tty_struct *tty, struct file *filp) struct tty3270 *tp = tty->driver_data; struct tty_port *port = &tp->port; - port->count++; + atomic_inc(&port->count); tty_port_tty_set(port, tty); return 0; } diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 6678d1fd8..0293b70d9 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -770,6 +770,11 @@ static int aac_probe_container_callback1(struct scsi_cmnd * scsicmd) return 0; } +static void aac_probe_container_scsi_done(struct scsi_cmnd * scsicmd) +{ + scsicmd->device = NULL; +} + int aac_probe_container(struct aac_dev *dev, int cid) { struct scsi_cmnd *scsicmd = kmalloc(sizeof(*scsicmd), GFP_KERNEL); @@ -782,7 +787,7 @@ int aac_probe_container(struct aac_dev *dev, int cid) return -ENOMEM; } scsicmd->list.next = NULL; - scsicmd->scsi_done = (void (*)(struct scsi_cmnd*))aac_probe_container_callback1; + scsicmd->scsi_done = aac_probe_container_scsi_done; scsicmd->device = scsidev; scsidev->sdev_state = 0; diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h index d47b527b2..f2c4a8933 100644 --- a/drivers/scsi/aic7xxx/aic79xx.h +++ b/drivers/scsi/aic7xxx/aic79xx.h @@ -1046,7 +1046,7 @@ typedef enum { typedef uint8_t ahd_mode_state; -typedef void ahd_callback_t (void *); +typedef void ahd_linux_callback_t (u_long); struct ahd_completion { diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c index 109e2c99e..7d3c9b542 100644 --- a/drivers/scsi/aic7xxx/aic79xx_core.c +++ b/drivers/scsi/aic7xxx/aic79xx_core.c @@ -207,7 +207,7 @@ static void ahd_add_scb_to_free_list(struct ahd_softc *ahd, static u_int ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid, u_int prev, u_int next, u_int tid); static void ahd_reset_current_bus(struct ahd_softc *ahd); -static ahd_callback_t ahd_stat_timer; +static ahd_linux_callback_t ahd_stat_timer; #ifdef AHD_DUMP_SEQ static void ahd_dumpseq(struct ahd_softc *ahd); #endif @@ -7041,10 +7041,9 @@ static const char *termstat_strings[] = { /***************************** Timer Facilities *******************************/ #define ahd_timer_init init_timer #define ahd_timer_stop del_timer_sync -typedef void ahd_linux_callback_t (u_long); static void -ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_callback_t *func, void *arg) +ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_linux_callback_t *func, void *arg) { struct ahd_softc *ahd; @@ -7052,7 +7051,7 @@ ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_callback_t *func, void *arg) del_timer(timer); timer->data = (u_long)arg; timer->expires = jiffies + (usec * HZ)/1000000; - timer->function = (ahd_linux_callback_t*)func; + timer->function = func; add_timer(timer); } @@ -8878,9 +8877,9 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset) /**************************** Statistics Processing ***************************/ static void -ahd_stat_timer(void *arg) +ahd_stat_timer(unsigned long arg) { - struct ahd_softc *ahd = arg; + struct ahd_softc *ahd = (struct ahd_softc *)arg; u_long s; int enint_coal; diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index d9239c2d4..dc556c3e2 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -5547,7 +5547,7 @@ static void beiscsi_recover_port(struct work_struct *work) } static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { struct beiscsi_hba *phba = NULL; diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h index 0e119d838..1bf8a4946 100644 --- a/drivers/scsi/bfa/bfa.h +++ b/drivers/scsi/bfa/bfa.h @@ -225,8 +225,10 @@ struct bfa_faa_args_s { bfa_boolean_t busy; }; +enum iocfc_event; + struct bfa_iocfc_s { - bfa_fsm_t fsm; + void (*fsm)(struct bfa_iocfc_s *, enum iocfc_event); struct bfa_s *bfa; struct bfa_iocfc_cfg_s cfg; u32 req_cq_pi[BFI_IOC_MAX_CQS]; diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c index 7209afad8..2450c1255 100644 --- a/drivers/scsi/bfa/bfa_core.c +++ b/drivers/scsi/bfa/bfa_core.c @@ -1919,15 +1919,13 @@ bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q) struct list_head *qe; struct list_head *qen; struct bfa_cb_qe_s *hcb_qe; - bfa_cb_cbfn_status_t cbfn; list_for_each_safe(qe, qen, comp_q) { hcb_qe = (struct bfa_cb_qe_s *) qe; if (hcb_qe->pre_rmv) { /* qe is invalid after return, dequeue before cbfn() */ list_del(qe); - cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn); - cbfn(hcb_qe->cbarg, hcb_qe->fw_status); + hcb_qe->cbfn(hcb_qe->cbarg, hcb_qe->fw_status); } else hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE); } diff --git a/drivers/scsi/bfa/bfa_cs.h b/drivers/scsi/bfa/bfa_cs.h index df6760ca0..3b22f4da7 100644 --- a/drivers/scsi/bfa/bfa_cs.h +++ b/drivers/scsi/bfa/bfa_cs.h @@ -184,8 +184,6 @@ bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe) * @ BFA state machine interfaces */ -typedef void (*bfa_sm_t)(void *sm, int event); - /* * oc - object class eg. bfa_ioc * st - state, eg. reset @@ -195,20 +193,75 @@ typedef void (*bfa_sm_t)(void *sm, int event); #define bfa_sm_state_decl(oc, st, otype, etype) \ static void oc ## _sm_ ## st(otype * fsm, etype event) -#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state)) +#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (_state)) #define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event))) #define bfa_sm_get_state(_sm) ((_sm)->sm) -#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state)) +#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (_state)) /* * For converting from state machine function to state encoding. */ -struct bfa_sm_table_s { - bfa_sm_t sm; /* state machine function */ +struct bfa_iocpf_s; +enum iocpf_event; +typedef void (*bfa_fsm_iocpf_t)(struct bfa_iocpf_s *, enum iocpf_event); + +struct iocpf_sm_table_s { + bfa_fsm_iocpf_t sm; /* state machine function */ + int state; /* state machine encoding */ + char *name; /* state name for display */ +}; + +struct bfa_ioc_s; +enum ioc_event; +typedef void (*bfa_fsm_ioc_t)(struct bfa_ioc_s *, enum ioc_event); + +struct ioc_sm_table_s { + bfa_fsm_ioc_t sm; /* state machine function */ int state; /* state machine encoding */ char *name; /* state name for display */ }; -#define BFA_SM(_sm) ((bfa_sm_t)(_sm)) + +struct bfa_fcs_rport_s; +enum rport_event; +typedef void(*bfa_fcs_rport_t)(struct bfa_fcs_rport_s *, enum rport_event); + +struct rport_sm_table_s { + bfa_fcs_rport_t sm; /* state machine function */ + int state; /* state machine encoding */ + char *name; /* state name for display */ +}; + +struct bfa_fcs_vport_s; +enum bfa_fcs_vport_event; +typedef void(*bfa_fcs_vport_t)(struct bfa_fcs_vport_s *, enum bfa_fcs_vport_event); + +struct vport_sm_table_s { + bfa_fcs_vport_t sm; /* state machine function */ + int state; /* state machine encoding */ + char *name; /* state name for display */ +}; + +struct bfa_fcs_itnim_s; +enum bfa_fcs_itnim_event; +typedef void(*bfa_fcs_itnim_t)(struct bfa_fcs_itnim_s *, enum bfa_fcs_itnim_event); + +struct itnim_sm_table_s { + bfa_fcs_itnim_t sm; /* state machine function */ + int state; /* state machine encoding */ + char *name; /* state name for display */ +}; + +struct bfa_fcport_s; +enum bfa_fcport_sm_event; +typedef void(*bfa_fcport_t)(struct bfa_fcport_s *, enum bfa_fcport_sm_event); + +struct fcport_sm_table_s { + bfa_fcport_t sm; /* state machine function */ + int state; /* state machine encoding */ + char *name; /* state name for display */ +}; + +#define BFA_SM(_sm) (_sm) /* * State machine with entry actions. @@ -226,17 +279,66 @@ typedef void (*bfa_fsm_t)(void *fsm, int event); static void oc ## _sm_ ## st ## _entry(otype * fsm) #define bfa_fsm_set_state(_fsm, _state) do { \ - (_fsm)->fsm = (bfa_fsm_t)(_state); \ + (_fsm)->fsm = (_state); \ _state ## _entry(_fsm); \ } while (0) #define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event))) #define bfa_fsm_get_state(_fsm) ((_fsm)->fsm) -#define bfa_fsm_cmp_state(_fsm, _state) \ - ((_fsm)->fsm == (bfa_fsm_t)(_state)) +#define bfa_fsm_cmp_state(_fsm, _state) ((_fsm)->fsm == (_state)) + +static inline int +iocpf_sm_to_state(struct iocpf_sm_table_s *smt, bfa_fsm_iocpf_t sm) +{ + int i = 0; + + while (smt[i].sm && smt[i].sm != sm) + i++; + return smt[i].state; +} + +static inline int +ioc_sm_to_state(struct ioc_sm_table_s *smt, bfa_fsm_ioc_t sm) +{ + int i = 0; + + while (smt[i].sm && smt[i].sm != sm) + i++; + return smt[i].state; +} + +static inline int +rport_sm_to_state(struct rport_sm_table_s *smt, bfa_fcs_rport_t sm) +{ + int i = 0; + + while (smt[i].sm && smt[i].sm != sm) + i++; + return smt[i].state; +} + +static inline int +vport_sm_to_state(struct vport_sm_table_s *smt, bfa_fcs_vport_t sm) +{ + int i = 0; + + while (smt[i].sm && smt[i].sm != sm) + i++; + return smt[i].state; +} + +static inline int +itnim_sm_to_state(struct itnim_sm_table_s *smt, bfa_fcs_itnim_t sm) +{ + int i = 0; + + while (smt[i].sm && smt[i].sm != sm) + i++; + return smt[i].state; +} static inline int -bfa_sm_to_state(struct bfa_sm_table_s *smt, bfa_sm_t sm) +fcport_sm_to_state(struct fcport_sm_table_s *smt, bfa_fcport_t sm) { int i = 0; diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h index e93921dec..ee6b4c0fb 100644 --- a/drivers/scsi/bfa/bfa_fcpim.h +++ b/drivers/scsi/bfa/bfa_fcpim.h @@ -37,7 +37,7 @@ struct bfa_iotag_s { struct bfa_itn_s { bfa_isr_func_t isr; -}; +} __no_const; void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m)); @@ -165,9 +165,11 @@ struct bfa_fcp_mod_s { /* * BFA IO (initiator mode) */ +enum bfa_ioim_event; + struct bfa_ioim_s { struct list_head qe; /* queue elememt */ - bfa_sm_t sm; /* BFA ioim state machine */ + void (*sm)(struct bfa_ioim_s *, enum bfa_ioim_event);/* BFA ioim state machine */ struct bfa_s *bfa; /* BFA module */ struct bfa_fcpim_s *fcpim; /* parent fcpim module */ struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */ @@ -197,9 +199,11 @@ struct bfa_ioim_sp_s { /* * BFA Task management command (initiator mode) */ +enum bfa_tskim_event; + struct bfa_tskim_s { struct list_head qe; - bfa_sm_t sm; + void (*sm)(struct bfa_tskim_s *, enum bfa_tskim_event); struct bfa_s *bfa; /* BFA module */ struct bfa_fcpim_s *fcpim; /* parent fcpim module */ struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */ @@ -219,9 +223,11 @@ struct bfa_tskim_s { /* * BFA i-t-n (initiator mode) */ +enum bfa_itnim_event; + struct bfa_itnim_s { struct list_head qe; /* queue element */ - bfa_sm_t sm; /* i-t-n im BFA state machine */ + void (*sm)(struct bfa_itnim_s *, enum bfa_itnim_event);/* i-t-n im BFA state machine */ struct bfa_s *bfa; /* bfa instance */ struct bfa_rport_s *rport; /* bfa rport */ void *ditn; /* driver i-t-n structure */ diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c index 1e7e139d7..c2031dd6a 100644 --- a/drivers/scsi/bfa/bfa_fcs.c +++ b/drivers/scsi/bfa/bfa_fcs.c @@ -39,10 +39,21 @@ struct bfa_fcs_mod_s { #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit } static struct bfa_fcs_mod_s fcs_modules[] = { - { bfa_fcs_port_attach, NULL, NULL }, - { bfa_fcs_uf_attach, NULL, NULL }, - { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit, - bfa_fcs_fabric_modexit }, + { + .attach = bfa_fcs_port_attach, + .modinit = NULL, + .modexit = NULL + }, + { + .attach = bfa_fcs_uf_attach, + .modinit = NULL, + .modexit = NULL + }, + { + .attach = bfa_fcs_fabric_attach, + .modinit = bfa_fcs_fabric_modinit, + .modexit = bfa_fcs_fabric_modexit + }, }; /* diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h index 0f797a55d..73b170a3c 100644 --- a/drivers/scsi/bfa/bfa_fcs.h +++ b/drivers/scsi/bfa/bfa_fcs.h @@ -67,8 +67,10 @@ struct bfa_fcs_s; #define BFA_FCS_PID_IS_WKA(pid) ((bfa_ntoh3b(pid) > 0xFFF000) ? 1 : 0) #define BFA_FCS_MAX_RPORT_LOGINS 1024 +enum vport_ns_event; + struct bfa_fcs_lport_ns_s { - bfa_sm_t sm; /* state machine */ + void (*sm)(struct bfa_fcs_lport_ns_s *, enum vport_ns_event);/* state machine */ struct bfa_timer_s timer; struct bfa_fcs_lport_s *port; /* parent port */ struct bfa_fcxp_s *fcxp; @@ -77,18 +79,20 @@ struct bfa_fcs_lport_ns_s { u8 num_rsnn_nn_retries; }; +enum port_scn_event; struct bfa_fcs_lport_scn_s { - bfa_sm_t sm; /* state machine */ + void (*sm)(struct bfa_fcs_lport_scn_s *, enum port_scn_event);/* state machine */ struct bfa_timer_s timer; struct bfa_fcs_lport_s *port; /* parent port */ struct bfa_fcxp_s *fcxp; struct bfa_fcxp_wqe_s fcxp_wqe; }; +enum port_fdmi_event; struct bfa_fcs_lport_fdmi_s { - bfa_sm_t sm; /* state machine */ + void (*sm)(struct bfa_fcs_lport_fdmi_s *, enum port_fdmi_event);/* state machine */ struct bfa_timer_s timer; struct bfa_fcs_lport_ms_s *ms; /* parent ms */ struct bfa_fcxp_s *fcxp; @@ -97,9 +101,10 @@ struct bfa_fcs_lport_fdmi_s { u8 rsvd[3]; }; +enum port_ms_event; struct bfa_fcs_lport_ms_s { - bfa_sm_t sm; /* state machine */ + void (*sm)(struct bfa_fcs_lport_ms_s *, enum port_ms_event);/* state machine */ struct bfa_timer_s timer; struct bfa_fcs_lport_s *port; /* parent port */ struct bfa_fcxp_s *fcxp; @@ -139,10 +144,11 @@ union bfa_fcs_lport_topo_u { struct bfa_fcs_lport_n2n_s pn2n; }; +enum bfa_fcs_lport_event; struct bfa_fcs_lport_s { struct list_head qe; /* used by port/vport */ - bfa_sm_t sm; /* state machine */ + void (*sm)(struct bfa_fcs_lport_s *, enum bfa_fcs_lport_event); /* state machine */ struct bfa_fcs_fabric_s *fabric; /* parent fabric */ struct bfa_lport_cfg_s port_cfg; /* port configuration */ struct bfa_timer_s link_timer; /* timer for link offline */ @@ -179,10 +185,11 @@ enum bfa_fcs_fabric_type { BFA_FCS_FABRIC_LOOP = 3, }; +enum bfa_fcs_fabric_event; struct bfa_fcs_fabric_s { struct list_head qe; /* queue element */ - bfa_sm_t sm; /* state machine */ + void (*sm)(struct bfa_fcs_fabric_s *, enum bfa_fcs_fabric_event); /* state machine */ struct bfa_fcs_s *fcs; /* FCS instance */ struct bfa_fcs_lport_s bport; /* base logical port */ enum bfa_fcs_fabric_type fab_type; /* fabric type */ @@ -355,9 +362,11 @@ void bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port, struct fchs_s *rx_frame, u32 len); void bfa_fcs_lport_lip_scn_online(bfa_fcs_lport_t *port); +enum bfa_fcs_vport_event; + struct bfa_fcs_vport_s { struct list_head qe; /* queue elem */ - bfa_sm_t sm; /* state machine */ + void (*sm)(struct bfa_fcs_vport_s *, enum bfa_fcs_vport_event);/* state machine */ bfa_fcs_lport_t lport; /* logical port */ struct bfa_timer_s timer; struct bfad_vport_s *vport_drv; /* Driver private */ @@ -409,8 +418,10 @@ struct bfa_fcs_tin_s; struct bfa_fcs_iprp_s; /* Rport Features (RPF) */ +enum rpf_event; + struct bfa_fcs_rpf_s { - bfa_sm_t sm; /* state machine */ + void (*sm)(struct bfa_fcs_rpf_s *, enum rpf_event); /* state machine */ struct bfa_fcs_rport_s *rport; /* parent rport */ struct bfa_timer_s timer; /* general purpose timer */ struct bfa_fcxp_s *fcxp; /* FCXP needed for discarding */ @@ -425,6 +436,8 @@ struct bfa_fcs_rpf_s { */ }; +enum rport_event; + struct bfa_fcs_rport_s { struct list_head qe; /* used by port/vport */ struct bfa_fcs_lport_s *port; /* parent FCS port */ @@ -441,7 +454,7 @@ struct bfa_fcs_rport_s { wwn_t pwwn; /* port wwn of rport */ wwn_t nwwn; /* node wwn of rport */ struct bfa_rport_symname_s psym_name; /* port symbolic name */ - bfa_sm_t sm; /* state machine */ + void (*sm)(struct bfa_fcs_rport_s *, enum rport_event); /* state machine */ struct bfa_timer_s timer; /* general purpose timer */ struct bfa_fcs_itnim_s *itnim; /* ITN initiator mode role */ struct bfa_fcs_tin_s *tin; /* ITN initiator mode role */ @@ -502,9 +515,10 @@ void bfa_fcs_rpf_rport_offline(struct bfa_fcs_rport_s *rport); * forward declarations */ struct bfad_itnim_s; +enum bfa_fcs_itnim_event; struct bfa_fcs_itnim_s { - bfa_sm_t sm; /* state machine */ + void (*sm)(struct bfa_fcs_itnim_s *, enum bfa_fcs_itnim_event);/* state machine */ struct bfa_fcs_rport_s *rport; /* parent remote rport */ struct bfad_itnim_s *itnim_drv; /* driver peer instance */ struct bfa_fcs_s *fcs; /* fcs instance */ diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c index 2e3b19e7e..7a9b7299d 100644 --- a/drivers/scsi/bfa/bfa_fcs_fcpim.c +++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c @@ -60,7 +60,7 @@ static void bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim, static void bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event); -static struct bfa_sm_table_s itnim_sm_table[] = { +static struct itnim_sm_table_s itnim_sm_table[] = { {BFA_SM(bfa_fcs_itnim_sm_offline), BFA_ITNIM_OFFLINE}, {BFA_SM(bfa_fcs_itnim_sm_prli_send), BFA_ITNIM_PRLI_SEND}, {BFA_SM(bfa_fcs_itnim_sm_prli), BFA_ITNIM_PRLI_SENT}, @@ -673,7 +673,7 @@ bfa_status_t bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim) { bfa_trc(itnim->fcs, itnim->rport->pid); - switch (bfa_sm_to_state(itnim_sm_table, itnim->sm)) { + switch (itnim_sm_to_state(itnim_sm_table, itnim->sm)) { case BFA_ITNIM_ONLINE: case BFA_ITNIM_INITIATIOR: return BFA_STATUS_OK; @@ -773,7 +773,7 @@ bfa_fcs_itnim_attr_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn, if (itnim == NULL) return BFA_STATUS_NO_FCPIM_NEXUS; - attr->state = bfa_sm_to_state(itnim_sm_table, itnim->sm); + attr->state = itnim_sm_to_state(itnim_sm_table, itnim->sm); attr->retry = itnim->seq_rec; attr->rec_support = itnim->rec_support; attr->conf_comp = itnim->conf_comp; diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index 4ddda72f6..ccf40a3c7 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -90,15 +90,26 @@ static struct { void (*offline) (struct bfa_fcs_lport_s *port); } __port_action[] = { { - bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online, - bfa_fcs_lport_unknown_offline}, { - bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online, - bfa_fcs_lport_fab_offline}, { - bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online, - bfa_fcs_lport_n2n_offline}, { - bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online, - bfa_fcs_lport_loop_offline}, - }; + .init = bfa_fcs_lport_unknown_init, + .online = bfa_fcs_lport_unknown_online, + .offline = bfa_fcs_lport_unknown_offline + }, + { + .init = bfa_fcs_lport_fab_init, + .online = bfa_fcs_lport_fab_online, + .offline = bfa_fcs_lport_fab_offline + }, + { + .init = bfa_fcs_lport_n2n_init, + .online = bfa_fcs_lport_n2n_online, + .offline = bfa_fcs_lport_n2n_offline + }, + { + .init = bfa_fcs_lport_loop_init, + .online = bfa_fcs_lport_loop_online, + .offline = bfa_fcs_lport_loop_offline + }, +}; /* * fcs_port_sm FCS logical port state machine @@ -6040,7 +6051,7 @@ static void bfa_fcs_vport_sm_stopping(struct bfa_fcs_vport_s *vport, static void bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event); -static struct bfa_sm_table_s vport_sm_table[] = { +static struct vport_sm_table_s vport_sm_table[] = { {BFA_SM(bfa_fcs_vport_sm_uninit), BFA_FCS_VPORT_UNINIT}, {BFA_SM(bfa_fcs_vport_sm_created), BFA_FCS_VPORT_CREATED}, {BFA_SM(bfa_fcs_vport_sm_offline), BFA_FCS_VPORT_OFFLINE}, @@ -6871,7 +6882,7 @@ bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport, memset(attr, 0, sizeof(struct bfa_vport_attr_s)); bfa_fcs_lport_get_attr(&vport->lport, &attr->port_attr); - attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm); + attr->vport_state = vport_sm_to_state(vport_sm_table, vport->sm); } diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c index de50349a3..6d676bea3 100644 --- a/drivers/scsi/bfa/bfa_fcs_rport.c +++ b/drivers/scsi/bfa/bfa_fcs_rport.c @@ -144,7 +144,7 @@ static void bfa_fcs_rport_sm_fc4_off_delete(struct bfa_fcs_rport_s *rport, static void bfa_fcs_rport_sm_delete_pending(struct bfa_fcs_rport_s *rport, enum rport_event event); -static struct bfa_sm_table_s rport_sm_table[] = { +static struct rport_sm_table_s rport_sm_table[] = { {BFA_SM(bfa_fcs_rport_sm_uninit), BFA_RPORT_UNINIT}, {BFA_SM(bfa_fcs_rport_sm_plogi_sending), BFA_RPORT_PLOGI}, {BFA_SM(bfa_fcs_rport_sm_plogiacc_sending), BFA_RPORT_ONLINE}, @@ -2980,7 +2980,7 @@ bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, int bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport) { - return bfa_sm_to_state(rport_sm_table, rport->sm); + return rport_sm_to_state(rport_sm_table, rport->sm); } diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index a1ada4a31..6ed9ba271 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -148,7 +148,7 @@ bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event); -static struct bfa_sm_table_s ioc_sm_table[] = { +static struct ioc_sm_table_s ioc_sm_table[] = { {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT}, {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET}, {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING}, @@ -236,7 +236,7 @@ bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s, enum iocpf_event); bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event); -static struct bfa_sm_table_s iocpf_sm_table[] = { +static struct iocpf_sm_table_s iocpf_sm_table[] = { {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET}, {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH}, {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH}, @@ -2830,12 +2830,12 @@ enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc_s *ioc) { enum bfa_iocpf_state iocpf_st; - enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm); + enum bfa_ioc_state ioc_st = ioc_sm_to_state(ioc_sm_table, ioc->fsm); if (ioc_st == BFA_IOC_ENABLING || ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) { - iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); + iocpf_st = iocpf_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); switch (iocpf_st) { case BFA_IOCPF_SEMWAIT: diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h index 713745da4..78b9671eb 100644 --- a/drivers/scsi/bfa/bfa_ioc.h +++ b/drivers/scsi/bfa/bfa_ioc.h @@ -259,7 +259,7 @@ struct bfa_ioc_cbfn_s { bfa_ioc_disable_cbfn_t disable_cbfn; bfa_ioc_hbfail_cbfn_t hbfail_cbfn; bfa_ioc_reset_cbfn_t reset_cbfn; -}; +} __no_const; /* * IOC event notification mechanism. @@ -286,16 +286,20 @@ struct bfa_ioc_notify_s { (__notify)->cbarg = (__cbarg); \ } while (0) +enum iocpf_event; + struct bfa_iocpf_s { - bfa_fsm_t fsm; + void (*fsm)(struct bfa_iocpf_s *, enum iocpf_event); struct bfa_ioc_s *ioc; bfa_boolean_t fw_mismatch_notified; bfa_boolean_t auto_recover; u32 poll_time; }; +enum ioc_event; + struct bfa_ioc_s { - bfa_fsm_t fsm; + void (*fsm)(struct bfa_ioc_s *, enum ioc_event); struct bfa_s *bfa; struct bfa_pcidev_s pcidev; struct bfa_timer_mod_s *timer_mod; @@ -353,7 +357,7 @@ struct bfa_ioc_hwif_s { void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate); enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc); -}; +} __no_const; /* * Queue element to wait for room in request queue. FIFO order is @@ -779,8 +783,10 @@ struct bfa_dconf_s { }; #pragma pack() +enum bfa_dconf_event; + struct bfa_dconf_mod_s { - bfa_sm_t sm; + void (*sm)(struct bfa_dconf_mod_s *, enum bfa_dconf_event); u8 instance; bfa_boolean_t read_data_valid; bfa_boolean_t min_cfg; diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h index 53135f21f..640621b4c 100644 --- a/drivers/scsi/bfa/bfa_modules.h +++ b/drivers/scsi/bfa/bfa_modules.h @@ -79,12 +79,12 @@ enum { \ extern struct bfa_module_s hal_mod_ ## __mod; \ struct bfa_module_s hal_mod_ ## __mod = { \ - bfa_ ## __mod ## _meminfo, \ - bfa_ ## __mod ## _attach, \ - bfa_ ## __mod ## _detach, \ - bfa_ ## __mod ## _start, \ - bfa_ ## __mod ## _stop, \ - bfa_ ## __mod ## _iocdisable, \ + .meminfo = bfa_ ## __mod ## _meminfo, \ + .attach = bfa_ ## __mod ## _attach, \ + .detach = bfa_ ## __mod ## _detach, \ + .start = bfa_ ## __mod ## _start, \ + .stop = bfa_ ## __mod ## _stop, \ + .iocdisable = bfa_ ## __mod ## _iocdisable, \ } #define BFA_CACHELINE_SZ (256) diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c index 12de29217..ec9f0ab6b 100644 --- a/drivers/scsi/bfa/bfa_svc.c +++ b/drivers/scsi/bfa/bfa_svc.c @@ -225,7 +225,7 @@ static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln, static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event); -static struct bfa_sm_table_s hal_port_sm_table[] = { +static struct fcport_sm_table_s hal_port_sm_table[] = { {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT}, {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT}, {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING}, @@ -3642,7 +3642,7 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) fcport->event_arg.i2hmsg = i2hmsg; bfa_trc(bfa, msg->mhdr.msg_id); - bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm)); + bfa_trc(bfa, fcport_sm_to_state(hal_port_sm_table, fcport->sm)); switch (msg->mhdr.msg_id) { case BFI_FCPORT_I2H_ENABLE_RSP: @@ -4077,7 +4077,7 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr) attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa); attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa); - attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm); + attr->port_state = fcport_sm_to_state(hal_port_sm_table, fcport->sm); attr->fec_state = fcport->fec_state; @@ -4159,7 +4159,7 @@ bfa_fcport_is_disabled(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - return bfa_sm_to_state(hal_port_sm_table, fcport->sm) == + return fcport_sm_to_state(hal_port_sm_table, fcport->sm) == BFA_PORT_ST_DISABLED; } @@ -4169,7 +4169,7 @@ bfa_fcport_is_dport(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) == + return (fcport_sm_to_state(hal_port_sm_table, fcport->sm) == BFA_PORT_ST_DPORT); } @@ -4178,7 +4178,7 @@ bfa_fcport_is_ddport(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) == + return (fcport_sm_to_state(hal_port_sm_table, fcport->sm) == BFA_PORT_ST_DDPORT); } diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h index ea2278bc7..6f51a7300 100644 --- a/drivers/scsi/bfa/bfa_svc.h +++ b/drivers/scsi/bfa/bfa_svc.h @@ -160,6 +160,8 @@ struct bfa_fcxp_rsp_info_s { u32 rsp_maxlen; /* max response length expected */ }; +typedef void (*bfa_sm_t)(void *sm, int event); + struct bfa_fcxp_s { struct list_head qe; /* fcxp queue element */ bfa_sm_t sm; /* state machine */ @@ -295,9 +297,11 @@ struct bfa_rport_info_s { /* * BFA rport data structure */ +enum bfa_rport_event; + struct bfa_rport_s { struct list_head qe; /* queue element */ - bfa_sm_t sm; /* state machine */ + void (*sm)(struct bfa_rport_s *, enum bfa_rport_event);/* state machine */ struct bfa_s *bfa; /* backpointer to BFA */ void *rport_drv; /* fcs/driver rport object */ u16 fw_handle; /* firmware rport handle */ @@ -388,10 +392,12 @@ void bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw); /* * LPS - bfa lport login/logout service interface */ +enum bfa_lps_event; + struct bfa_lps_s { struct list_head qe; /* queue element */ struct bfa_s *bfa; /* parent bfa instance */ - bfa_sm_t sm; /* finite state machine */ + void (*sm)(struct bfa_lps_s *, enum bfa_lps_event);/* finite state machine */ u8 bfa_tag; /* lport tag */ u8 fw_tag; /* lport fw tag */ u8 reqq; /* lport request queue */ @@ -450,9 +456,11 @@ void bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); /* * Link notification data structure */ +enum bfa_fcport_ln_sm_event; + struct bfa_fcport_ln_s { struct bfa_fcport_s *fcport; - bfa_sm_t sm; + void (*sm)(struct bfa_fcport_ln_s *, enum bfa_fcport_ln_sm_event); struct bfa_cb_qe_s ln_qe; /* BFA callback queue elem for ln */ enum bfa_port_linkstate ln_event; /* ln event for callback */ }; @@ -466,7 +474,7 @@ struct bfa_fcport_trunk_s { */ struct bfa_fcport_s { struct bfa_s *bfa; /* parent BFA instance */ - bfa_sm_t sm; /* port state machine */ + void (*sm)(struct bfa_fcport_s *, enum bfa_fcport_sm_event); /* port state machine */ wwn_t nwwn; /* node wwn of physical port */ wwn_t pwwn; /* port wwn of physical oprt */ enum bfa_port_speed speed_sup; @@ -714,9 +722,11 @@ struct bfa_fcdiag_lb_s { u32 status; }; +enum bfa_dport_sm_event; + struct bfa_dport_s { struct bfa_s *bfa; /* Back pointer to BFA */ - bfa_sm_t sm; /* finite state machine */ + void (*sm)(struct bfa_dport_s *, enum bfa_dport_sm_event);/* finite state machine */ struct bfa_reqq_wait_s reqq_wait; bfa_cb_diag_t cbfn; void *cbarg; diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index 0e76e8669..b7d303c3d 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -406,6 +406,16 @@ bfad_hcb_comp(void *arg, bfa_status_t status) complete(&fcomp->comp); } +void +bfad_stats_comp(void *arg, bfa_boolean_t _status) +{ + struct bfad_hal_comp *fcomp = (struct bfad_hal_comp *)arg; + bfa_status_t status = (bfa_status_t)_status; + + fcomp->status = status; + complete(&fcomp->comp); +} + /* * bfa_init callback */ @@ -1440,7 +1450,7 @@ bfad_pci_remove(struct pci_dev *pdev) * PCI Error Recovery entry, error detected. */ static pci_ers_result_t -bfad_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +bfad_pci_error_detected(struct pci_dev *pdev, enum pci_channel_state state) { struct bfad_s *bfad = pci_get_drvdata(pdev); unsigned long flags; diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c index d1ad0208d..661c0f95a 100644 --- a/drivers/scsi/bfa/bfad_bsg.c +++ b/drivers/scsi/bfa/bfad_bsg.c @@ -2145,7 +2145,7 @@ bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd) struct bfa_cb_pending_q_s cb_qe; init_completion(&fcomp.comp); - bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, + bfa_pending_q_init(&cb_qe, bfad_stats_comp, &fcomp, &iocmd->stats); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe); @@ -2169,7 +2169,7 @@ bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd) struct bfa_cb_pending_q_s cb_qe; init_completion(&fcomp.comp); - bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL); + bfa_pending_q_init(&cb_qe, bfad_stats_comp, &fcomp, NULL); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe); @@ -2453,7 +2453,7 @@ bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd) struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); init_completion(&fcomp.comp); - bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, + bfa_pending_q_init(&cb_qe, bfad_stats_comp, &fcomp, &iocmd->stats); spin_lock_irqsave(&bfad->bfad_lock, flags); @@ -2484,7 +2484,7 @@ bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd) struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); init_completion(&fcomp.comp); - bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, + bfa_pending_q_init(&cb_qe, bfad_stats_comp, &fcomp, NULL); spin_lock_irqsave(&bfad->bfad_lock, flags); diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h index f9e862093..807a9830f 100644 --- a/drivers/scsi/bfa/bfad_drv.h +++ b/drivers/scsi/bfa/bfad_drv.h @@ -187,8 +187,10 @@ union bfad_tmp_buf { /* * BFAD (PCI function) data structure */ +enum bfad_sm_event; + struct bfad_s { - bfa_sm_t sm; /* state machine */ + void (*sm)(struct bfad_s *, enum bfad_sm_event); /* state machine */ struct list_head list_entry; struct bfa_s bfa; struct bfa_fcs_s bfa_fcs; @@ -309,6 +311,7 @@ void bfad_fcs_stop(struct bfad_s *bfad); void bfad_remove_intr(struct bfad_s *bfad); void bfad_hal_mem_release(struct bfad_s *bfad); void bfad_hcb_comp(void *arg, bfa_status_t status); +void bfad_stats_comp(void *arg, bfa_boolean_t _status); int bfad_setup_intr(struct bfad_s *bfad); void bfad_remove_intr(struct bfad_s *bfad); diff --git a/drivers/scsi/csiostor/csio_defs.h b/drivers/scsi/csiostor/csio_defs.h index c38017b4a..3268e62fd 100644 --- a/drivers/scsi/csiostor/csio_defs.h +++ b/drivers/scsi/csiostor/csio_defs.h @@ -73,7 +73,8 @@ csio_list_deleted(struct list_head *list) #define csio_list_prev(elem) (((struct list_head *)(elem))->prev) /* State machine */ -typedef void (*csio_sm_state_t)(void *, uint32_t); +struct csio_sm; +typedef void (*csio_sm_state_t)(struct csio_sm *, uint32_t); struct csio_sm { struct list_head sm_list; @@ -81,9 +82,9 @@ struct csio_sm { }; static inline void -csio_set_state(void *smp, void *state) +csio_set_state(struct csio_sm *smp, csio_sm_state_t state) { - ((struct csio_sm *)smp)->sm_state = (csio_sm_state_t)state; + smp->sm_state = state; } static inline void @@ -93,21 +94,21 @@ csio_init_state(struct csio_sm *smp, void *state) } static inline void -csio_post_event(void *smp, uint32_t evt) +csio_post_event(struct csio_sm *smp, uint32_t evt) { - ((struct csio_sm *)smp)->sm_state(smp, evt); + smp->sm_state(smp, evt); } static inline csio_sm_state_t -csio_get_state(void *smp) +csio_get_state(struct csio_sm *smp) { - return ((struct csio_sm *)smp)->sm_state; + return smp->sm_state; } static inline bool -csio_match_state(void *smp, void *state) +csio_match_state(struct csio_sm *smp, csio_sm_state_t state) { - return (csio_get_state(smp) == (csio_sm_state_t)state); + return (csio_get_state(smp) == state); } #define CSIO_ASSERT(cond) BUG_ON(!(cond)) diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index 14343bfd1..efe85070c 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c @@ -89,15 +89,15 @@ static void csio_mgmtm_cleanup(struct csio_mgmtm *); static void csio_hw_mbm_cleanup(struct csio_hw *); /* State machine forward declarations */ -static void csio_hws_uninit(struct csio_hw *, enum csio_hw_ev); -static void csio_hws_configuring(struct csio_hw *, enum csio_hw_ev); -static void csio_hws_initializing(struct csio_hw *, enum csio_hw_ev); -static void csio_hws_ready(struct csio_hw *, enum csio_hw_ev); -static void csio_hws_quiescing(struct csio_hw *, enum csio_hw_ev); -static void csio_hws_quiesced(struct csio_hw *, enum csio_hw_ev); -static void csio_hws_resetting(struct csio_hw *, enum csio_hw_ev); -static void csio_hws_removing(struct csio_hw *, enum csio_hw_ev); -static void csio_hws_pcierr(struct csio_hw *, enum csio_hw_ev); +static void csio_hws_uninit(struct csio_sm *, uint32_t); +static void csio_hws_configuring(struct csio_sm *, uint32_t); +static void csio_hws_initializing(struct csio_sm *, uint32_t); +static void csio_hws_ready(struct csio_sm *, uint32_t); +static void csio_hws_quiescing(struct csio_sm *, uint32_t); +static void csio_hws_quiesced(struct csio_sm *, uint32_t); +static void csio_hws_resetting(struct csio_sm *, uint32_t); +static void csio_hws_removing(struct csio_sm *, uint32_t); +static void csio_hws_pcierr(struct csio_sm *, uint32_t); static void csio_hw_initialize(struct csio_hw *hw); static void csio_evtq_stop(struct csio_hw *hw); @@ -105,12 +105,12 @@ static void csio_evtq_start(struct csio_hw *hw); int csio_is_hw_ready(struct csio_hw *hw) { - return csio_match_state(hw, csio_hws_ready); + return csio_match_state(&hw->sm, csio_hws_ready); } int csio_is_hw_removing(struct csio_hw *hw) { - return csio_match_state(hw, csio_hws_removing); + return csio_match_state(&hw->sm, csio_hws_removing); } @@ -2326,8 +2326,11 @@ csio_hw_fatal_err(struct csio_hw *hw) * */ static void -csio_hws_uninit(struct csio_hw *hw, enum csio_hw_ev evt) +csio_hws_uninit(struct csio_sm *_hw, uint32_t _evt) { + struct csio_hw *hw = container_of(_hw, struct csio_hw, sm); + enum csio_hw_ev evt = _evt; + hw->prev_evt = hw->cur_evt; hw->cur_evt = evt; CSIO_INC_STATS(hw, n_evt_sm[evt]); @@ -2351,8 +2354,11 @@ csio_hws_uninit(struct csio_hw *hw, enum csio_hw_ev evt) * */ static void -csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt) +csio_hws_configuring(struct csio_sm *_hw, uint32_t _evt) { + struct csio_hw *hw = container_of(_hw, struct csio_hw, sm); + enum csio_hw_ev evt = _evt; + hw->prev_evt = hw->cur_evt; hw->cur_evt = evt; CSIO_INC_STATS(hw, n_evt_sm[evt]); @@ -2389,8 +2395,11 @@ csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt) * */ static void -csio_hws_initializing(struct csio_hw *hw, enum csio_hw_ev evt) +csio_hws_initializing(struct csio_sm *_hw, uint32_t _evt) { + struct csio_hw *hw = container_of(_hw, struct csio_hw, sm); + enum csio_hw_ev evt = _evt; + hw->prev_evt = hw->cur_evt; hw->cur_evt = evt; CSIO_INC_STATS(hw, n_evt_sm[evt]); @@ -2427,8 +2436,11 @@ csio_hws_initializing(struct csio_hw *hw, enum csio_hw_ev evt) * */ static void -csio_hws_ready(struct csio_hw *hw, enum csio_hw_ev evt) +csio_hws_ready(struct csio_sm *_hw, uint32_t _evt) { + struct csio_hw *hw = container_of(_hw, struct csio_hw, sm); + enum csio_hw_ev evt = _evt; + /* Remember the event */ hw->evtflag = evt; @@ -2476,8 +2488,11 @@ csio_hws_ready(struct csio_hw *hw, enum csio_hw_ev evt) * */ static void -csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt) +csio_hws_quiescing(struct csio_sm *_hw, uint32_t _evt) { + struct csio_hw *hw = container_of(_hw, struct csio_hw, sm); + enum csio_hw_ev evt = _evt; + hw->prev_evt = hw->cur_evt; hw->cur_evt = evt; CSIO_INC_STATS(hw, n_evt_sm[evt]); @@ -2536,8 +2551,11 @@ csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt) * */ static void -csio_hws_quiesced(struct csio_hw *hw, enum csio_hw_ev evt) +csio_hws_quiesced(struct csio_sm *_hw, uint32_t _evt) { + struct csio_hw *hw = container_of(_hw, struct csio_hw, sm); + enum csio_hw_ev evt = _evt; + hw->prev_evt = hw->cur_evt; hw->cur_evt = evt; CSIO_INC_STATS(hw, n_evt_sm[evt]); @@ -2561,8 +2579,11 @@ csio_hws_quiesced(struct csio_hw *hw, enum csio_hw_ev evt) * */ static void -csio_hws_resetting(struct csio_hw *hw, enum csio_hw_ev evt) +csio_hws_resetting(struct csio_sm *_hw, uint32_t _evt) { + struct csio_hw *hw = container_of(_hw, struct csio_hw, sm); + enum csio_hw_ev evt = _evt; + hw->prev_evt = hw->cur_evt; hw->cur_evt = evt; CSIO_INC_STATS(hw, n_evt_sm[evt]); @@ -2587,8 +2608,11 @@ csio_hws_resetting(struct csio_hw *hw, enum csio_hw_ev evt) * */ static void -csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt) +csio_hws_removing(struct csio_sm *_hw, uint32_t _evt) { + struct csio_hw *hw = container_of(_hw, struct csio_hw, sm); + enum csio_hw_ev evt = _evt; + hw->prev_evt = hw->cur_evt; hw->cur_evt = evt; CSIO_INC_STATS(hw, n_evt_sm[evt]); @@ -2622,8 +2646,11 @@ csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt) * */ static void -csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt) +csio_hws_pcierr(struct csio_sm *_hw, uint32_t _evt) { + struct csio_hw *hw = container_of(_hw, struct csio_hw, sm); + enum csio_hw_ev evt = _evt; + hw->prev_evt = hw->cur_evt; hw->cur_evt = evt; CSIO_INC_STATS(hw, n_evt_sm[evt]); diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c index e46a333c6..1c3bf68ef 100644 --- a/drivers/scsi/csiostor/csio_init.c +++ b/drivers/scsi/csiostor/csio_init.c @@ -1053,7 +1053,7 @@ static void csio_remove_one(struct pci_dev *pdev) * */ static pci_ers_result_t -csio_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +csio_pci_error_detected(struct pci_dev *pdev, enum pci_channel_state state) { struct csio_hw *hw = pci_get_drvdata(pdev); diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c index c00b2ff72..da72dbc43 100644 --- a/drivers/scsi/csiostor/csio_lnode.c +++ b/drivers/scsi/csiostor/csio_lnode.c @@ -55,10 +55,10 @@ int csio_fdmi_enable = 1; #define PORT_ID_PTR(_x) ((uint8_t *)(&_x) + 1) /* Lnode SM declarations */ -static void csio_lns_uninit(struct csio_lnode *, enum csio_ln_ev); -static void csio_lns_online(struct csio_lnode *, enum csio_ln_ev); -static void csio_lns_ready(struct csio_lnode *, enum csio_ln_ev); -static void csio_lns_offline(struct csio_lnode *, enum csio_ln_ev); +static void csio_lns_uninit(struct csio_sm *, uint32_t); +static void csio_lns_online(struct csio_sm *, uint32_t); +static void csio_lns_ready(struct csio_sm *, uint32_t); +static void csio_lns_offline(struct csio_sm *, uint32_t); static int csio_ln_mgmt_submit_req(struct csio_ioreq *, void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *), @@ -1077,7 +1077,7 @@ csio_handle_link_down(struct csio_hw *hw, uint8_t portid, uint32_t fcfi, int csio_is_lnode_ready(struct csio_lnode *ln) { - return (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)); + return (csio_get_state(&ln->sm) == csio_lns_ready); } /*****************************************************************************/ @@ -1093,8 +1093,10 @@ csio_is_lnode_ready(struct csio_lnode *ln) * Return - none. */ static void -csio_lns_uninit(struct csio_lnode *ln, enum csio_ln_ev evt) +csio_lns_uninit(struct csio_sm *_ln, uint32_t _evt) { + struct csio_lnode *ln = container_of(_ln, struct csio_lnode, sm); + enum csio_ln_ev evt = _evt; struct csio_hw *hw = csio_lnode_to_hw(ln); struct csio_lnode *rln = hw->rln; int rv; @@ -1146,8 +1148,10 @@ csio_lns_uninit(struct csio_lnode *ln, enum csio_ln_ev evt) * Return - none. */ static void -csio_lns_online(struct csio_lnode *ln, enum csio_ln_ev evt) +csio_lns_online(struct csio_sm *_ln, uint32_t _evt) { + struct csio_lnode *ln = container_of(_ln, struct csio_lnode, sm); + enum csio_ln_ev evt = _evt; struct csio_hw *hw = csio_lnode_to_hw(ln); CSIO_INC_STATS(ln, n_evt_sm[evt]); @@ -1198,8 +1202,10 @@ csio_lns_online(struct csio_lnode *ln, enum csio_ln_ev evt) * Return - none. */ static void -csio_lns_ready(struct csio_lnode *ln, enum csio_ln_ev evt) +csio_lns_ready(struct csio_sm *_ln, uint32_t _evt) { + struct csio_lnode *ln = container_of(_ln, struct csio_lnode, sm); + enum csio_ln_ev evt = _evt; struct csio_hw *hw = csio_lnode_to_hw(ln); CSIO_INC_STATS(ln, n_evt_sm[evt]); @@ -1272,8 +1278,10 @@ csio_lns_ready(struct csio_lnode *ln, enum csio_ln_ev evt) * Return - none. */ static void -csio_lns_offline(struct csio_lnode *ln, enum csio_ln_ev evt) +csio_lns_offline(struct csio_sm *_ln, uint32_t _evt) { + struct csio_lnode *ln = container_of(_ln, struct csio_lnode, sm); + enum csio_ln_ev evt = _evt; struct csio_hw *hw = csio_lnode_to_hw(ln); struct csio_lnode *rln = hw->rln; int rv; @@ -1349,15 +1357,15 @@ csio_free_fcfinfo(struct kref *kref) void csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str) { - if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_uninit)) { + if (csio_get_state(&ln->sm) == csio_lns_uninit) { strcpy(str, "UNINIT"); return; } - if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)) { + if (csio_get_state(&ln->sm) == csio_lns_ready) { strcpy(str, "READY"); return; } - if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_offline)) { + if (csio_get_state(&ln->sm) == csio_lns_offline) { strcpy(str, "OFFLINE"); return; } diff --git a/drivers/scsi/csiostor/csio_rnode.c b/drivers/scsi/csiostor/csio_rnode.c index e9c3b045f..4ba3a598d 100644 --- a/drivers/scsi/csiostor/csio_rnode.c +++ b/drivers/scsi/csiostor/csio_rnode.c @@ -46,10 +46,10 @@ static int csio_rnode_init(struct csio_rnode *, struct csio_lnode *); static void csio_rnode_exit(struct csio_rnode *); /* Static machine forward declarations */ -static void csio_rns_uninit(struct csio_rnode *, enum csio_rn_ev); -static void csio_rns_ready(struct csio_rnode *, enum csio_rn_ev); -static void csio_rns_offline(struct csio_rnode *, enum csio_rn_ev); -static void csio_rns_disappeared(struct csio_rnode *, enum csio_rn_ev); +static void csio_rns_uninit(struct csio_sm *, uint32_t); +static void csio_rns_ready(struct csio_sm *, uint32_t); +static void csio_rns_offline(struct csio_sm *, uint32_t); +static void csio_rns_disappeared(struct csio_sm *, uint32_t); /* RNF event mapping */ static enum csio_rn_ev fwevt_to_rnevt[] = { @@ -88,13 +88,13 @@ static enum csio_rn_ev fwevt_to_rnevt[] = { int csio_is_rnode_ready(struct csio_rnode *rn) { - return csio_match_state(rn, csio_rns_ready); + return csio_match_state(&rn->sm, csio_rns_ready); } static int csio_is_rnode_uninit(struct csio_rnode *rn) { - return csio_match_state(rn, csio_rns_uninit); + return csio_match_state(&rn->sm, csio_rns_uninit); } static int @@ -601,8 +601,10 @@ __csio_unreg_rnode(struct csio_rnode *rn) * */ static void -csio_rns_uninit(struct csio_rnode *rn, enum csio_rn_ev evt) +csio_rns_uninit(struct csio_sm *_rn, uint32_t _evt) { + struct csio_rnode *rn = container_of(_rn, struct csio_rnode, sm); + enum csio_rn_ev evt = _evt; struct csio_lnode *ln = csio_rnode_to_lnode(rn); int ret = 0; @@ -641,8 +643,10 @@ csio_rns_uninit(struct csio_rnode *rn, enum csio_rn_ev evt) * */ static void -csio_rns_ready(struct csio_rnode *rn, enum csio_rn_ev evt) +csio_rns_ready(struct csio_sm *_rn, uint32_t _evt) { + struct csio_rnode *rn = container_of(_rn, struct csio_rnode, sm); + enum csio_rn_ev evt = _evt; struct csio_lnode *ln = csio_rnode_to_lnode(rn); int ret = 0; @@ -726,8 +730,10 @@ csio_rns_ready(struct csio_rnode *rn, enum csio_rn_ev evt) * */ static void -csio_rns_offline(struct csio_rnode *rn, enum csio_rn_ev evt) +csio_rns_offline(struct csio_sm *_rn, uint32_t _evt) { + struct csio_rnode *rn = container_of(_rn, struct csio_rnode, sm); + enum csio_rn_ev evt = _evt; struct csio_lnode *ln = csio_rnode_to_lnode(rn); int ret = 0; @@ -785,8 +791,10 @@ csio_rns_offline(struct csio_rnode *rn, enum csio_rn_ev evt) * */ static void -csio_rns_disappeared(struct csio_rnode *rn, enum csio_rn_ev evt) +csio_rns_disappeared(struct csio_sm *_rn, uint32_t _evt) { + struct csio_rnode *rn = container_of(_rn, struct csio_rnode, sm); + enum csio_rn_ev evt = _evt; struct csio_lnode *ln = csio_rnode_to_lnode(rn); int ret = 0; diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c index 89a52b941..0262e39fb 100644 --- a/drivers/scsi/csiostor/csio_scsi.c +++ b/drivers/scsi/csiostor/csio_scsi.c @@ -65,12 +65,12 @@ static int csio_ddp_descs = 128; static int csio_do_abrt_cls(struct csio_hw *, struct csio_ioreq *, bool); -static void csio_scsis_uninit(struct csio_ioreq *, enum csio_scsi_ev); -static void csio_scsis_io_active(struct csio_ioreq *, enum csio_scsi_ev); -static void csio_scsis_tm_active(struct csio_ioreq *, enum csio_scsi_ev); -static void csio_scsis_aborting(struct csio_ioreq *, enum csio_scsi_ev); -static void csio_scsis_closing(struct csio_ioreq *, enum csio_scsi_ev); -static void csio_scsis_shost_cmpl_await(struct csio_ioreq *, enum csio_scsi_ev); +static void csio_scsis_uninit(struct csio_sm *, uint32_t); +static void csio_scsis_io_active(struct csio_sm *, uint32_t); +static void csio_scsis_tm_active(struct csio_sm *, uint32_t); +static void csio_scsis_aborting(struct csio_sm *, uint32_t); +static void csio_scsis_closing(struct csio_sm *, uint32_t); +static void csio_scsis_shost_cmpl_await(struct csio_sm *, uint32_t); /* * csio_scsi_match_io - Match an ioreq with the given SCSI level data. @@ -700,8 +700,10 @@ csio_scsi_abrt_cls(struct csio_ioreq *req, bool abort) /* START: SCSI SM */ /*****************************************************************************/ static void -csio_scsis_uninit(struct csio_ioreq *req, enum csio_scsi_ev evt) +csio_scsis_uninit(struct csio_sm *_req, uint32_t _evt) { + struct csio_ioreq *req = container_of(_req, struct csio_ioreq, sm); + enum csio_scsi_ev evt = _evt; struct csio_hw *hw = req->lnode->hwp; struct csio_scsim *scsim = csio_hw_to_scsim(hw); @@ -770,8 +772,10 @@ csio_scsis_uninit(struct csio_ioreq *req, enum csio_scsi_ev evt) } static void -csio_scsis_io_active(struct csio_ioreq *req, enum csio_scsi_ev evt) +csio_scsis_io_active(struct csio_sm *_req, uint32_t _evt) { + struct csio_ioreq *req = container_of(_req, struct csio_ioreq, sm); + enum csio_scsi_ev evt = _evt; struct csio_hw *hw = req->lnode->hwp; struct csio_scsim *scm = csio_hw_to_scsim(hw); struct csio_rnode *rn; @@ -842,8 +846,10 @@ csio_scsis_io_active(struct csio_ioreq *req, enum csio_scsi_ev evt) } static void -csio_scsis_tm_active(struct csio_ioreq *req, enum csio_scsi_ev evt) +csio_scsis_tm_active(struct csio_sm *_req, uint32_t _evt) { + struct csio_ioreq *req = container_of(_req, struct csio_ioreq, sm); + enum csio_scsi_ev evt = _evt; struct csio_hw *hw = req->lnode->hwp; struct csio_scsim *scm = csio_hw_to_scsim(hw); @@ -885,8 +891,10 @@ csio_scsis_tm_active(struct csio_ioreq *req, enum csio_scsi_ev evt) } static void -csio_scsis_aborting(struct csio_ioreq *req, enum csio_scsi_ev evt) +csio_scsis_aborting(struct csio_sm *_req, uint32_t _evt) { + struct csio_ioreq *req = container_of(_req, struct csio_ioreq, sm); + enum csio_scsi_ev evt = _evt; struct csio_hw *hw = req->lnode->hwp; struct csio_scsim *scm = csio_hw_to_scsim(hw); @@ -982,8 +990,10 @@ csio_scsis_aborting(struct csio_ioreq *req, enum csio_scsi_ev evt) } static void -csio_scsis_closing(struct csio_ioreq *req, enum csio_scsi_ev evt) +csio_scsis_closing(struct csio_sm *_req, uint32_t _evt) { + struct csio_ioreq *req = container_of(_req, struct csio_ioreq, sm); + enum csio_scsi_ev evt = _evt; struct csio_hw *hw = req->lnode->hwp; struct csio_scsim *scm = csio_hw_to_scsim(hw); @@ -1046,8 +1056,11 @@ csio_scsis_closing(struct csio_ioreq *req, enum csio_scsi_ev evt) } static void -csio_scsis_shost_cmpl_await(struct csio_ioreq *req, enum csio_scsi_ev evt) +csio_scsis_shost_cmpl_await(struct csio_sm *_req, uint32_t _evt) { + struct csio_ioreq *req = container_of(_req, struct csio_ioreq, sm); + enum csio_scsi_ev evt = _evt; + switch (evt) { case CSIO_SCSIE_ABORT: case CSIO_SCSIE_CLOSE: diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c index d6e53aee2..6432a50b2 100644 --- a/drivers/scsi/esas2r/esas2r_init.c +++ b/drivers/scsi/esas2r/esas2r_init.c @@ -237,7 +237,7 @@ static void esas2r_claim_interrupts(struct esas2r_adapter *a) flags |= IRQF_SHARED; esas2r_log(ESAS2R_LOG_INFO, - "esas2r_claim_interrupts irq=%d (%p, %s, %x)", + "esas2r_claim_interrupts irq=%d (%p, %s, %lx)", a->pcid->irq, a, a->name, flags); if (request_irq(a->pcid->irq, diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c index 3e8483410..34976f9a1 100644 --- a/drivers/scsi/esas2r/esas2r_ioctl.c +++ b/drivers/scsi/esas2r/esas2r_ioctl.c @@ -1301,7 +1301,7 @@ int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg) ioctl = kzalloc(sizeof(struct atto_express_ioctl), GFP_KERNEL); if (ioctl == NULL) { esas2r_log(ESAS2R_LOG_WARN, - "ioctl_handler kzalloc failed for %d bytes", + "ioctl_handler kzalloc failed for %lu bytes", sizeof(struct atto_express_ioctl)); return -ENOMEM; } diff --git a/drivers/scsi/esas2r/esas2r_log.h b/drivers/scsi/esas2r/esas2r_log.h index 7b6397bb5..75b9d23cd 100644 --- a/drivers/scsi/esas2r/esas2r_log.h +++ b/drivers/scsi/esas2r/esas2r_log.h @@ -61,8 +61,8 @@ enum { #endif }; -int esas2r_log(const long level, const char *format, ...); -int esas2r_log_dev(const long level, +__printf(2, 3) int esas2r_log(const long level, const char *format, ...); +__printf(3, 4) int esas2r_log_dev(const long level, const struct device *dev, const char *format, ...); diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c index 5092c821d..072d07b45 100644 --- a/drivers/scsi/esas2r/esas2r_main.c +++ b/drivers/scsi/esas2r/esas2r_main.c @@ -198,7 +198,7 @@ static ssize_t write_hw(struct file *file, struct kobject *kobj, GFP_KERNEL); if (a->local_atto_ioctl == NULL) { esas2r_log(ESAS2R_LOG_WARN, - "write_hw kzalloc failed for %d bytes", + "write_hw kzalloc failed for %lu bytes", sizeof(struct atto_ioctl)); return -ENOMEM; } @@ -1186,7 +1186,7 @@ static int esas2r_dev_targ_reset(struct scsi_cmnd *cmd, bool target_reset) } else { esas2r_log(ESAS2R_LOG_CRIT, "unable to allocate a request for a " - "device reset (%d:%d)!", + "device reset (%d:%llu)!", cmd->device->id, cmd->device->lun); } diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c index 0675fd128..bbebe908d 100644 --- a/drivers/scsi/fcoe/fcoe_sysfs.c +++ b/drivers/scsi/fcoe/fcoe_sysfs.c @@ -33,8 +33,8 @@ */ #include "libfcoe.h" -static atomic_t ctlr_num; -static atomic_t fcf_num; +static atomic_unchecked_t ctlr_num; +static atomic_unchecked_t fcf_num; /* * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs @@ -724,7 +724,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent, if (!ctlr) goto out; - ctlr->id = atomic_inc_return(&ctlr_num) - 1; + ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1; ctlr->f = f; ctlr->mode = FIP_CONN_TYPE_FABRIC; INIT_LIST_HEAD(&ctlr->fcfs); @@ -941,7 +941,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr, fcf->dev.parent = &ctlr->dev; fcf->dev.bus = &fcoe_bus_type; fcf->dev.type = &fcoe_fcf_device_type; - fcf->id = atomic_inc_return(&fcf_num) - 1; + fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1; fcf->state = FCOE_FCF_STATE_UNKNOWN; fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo; @@ -977,8 +977,8 @@ int __init fcoe_sysfs_setup(void) { int error; - atomic_set(&ctlr_num, 0); - atomic_set(&fcf_num, 0); + atomic_set_unchecked(&ctlr_num, 0); + atomic_set_unchecked(&fcf_num, 0); error = bus_register(&fcoe_bus_type); if (error) diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c index 375c536cb..618843b2e 100644 --- a/drivers/scsi/fcoe/fcoe_transport.c +++ b/drivers/scsi/fcoe/fcoe_transport.c @@ -32,13 +32,13 @@ MODULE_AUTHOR("Open-FCoE.org"); MODULE_DESCRIPTION("FIP discovery protocol and FCoE transport for FCoE HBAs"); MODULE_LICENSE("GPL v2"); -static int fcoe_transport_create(const char *, struct kernel_param *); -static int fcoe_transport_destroy(const char *, struct kernel_param *); +static int fcoe_transport_create(const char *, const struct kernel_param *); +static int fcoe_transport_destroy(const char *, const struct kernel_param *); static int fcoe_transport_show(char *buffer, const struct kernel_param *kp); static struct fcoe_transport *fcoe_transport_lookup(struct net_device *device); static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *device); -static int fcoe_transport_enable(const char *, struct kernel_param *); -static int fcoe_transport_disable(const char *, struct kernel_param *); +static int fcoe_transport_enable(const char *, const struct kernel_param *); +static int fcoe_transport_disable(const char *, const struct kernel_param *); static int libfcoe_device_notification(struct notifier_block *notifier, ulong event, void *ptr); @@ -865,7 +865,7 @@ EXPORT_SYMBOL(fcoe_ctlr_destroy_store); * * Returns: 0 for success */ -static int fcoe_transport_create(const char *buffer, struct kernel_param *kp) +static int fcoe_transport_create(const char *buffer, const struct kernel_param *kp) { int rc = -ENODEV; struct net_device *netdev = NULL; @@ -930,7 +930,7 @@ static int fcoe_transport_create(const char *buffer, struct kernel_param *kp) * * Returns: 0 for success */ -static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp) +static int fcoe_transport_destroy(const char *buffer, const struct kernel_param *kp) { int rc = -ENODEV; struct net_device *netdev = NULL; @@ -974,7 +974,7 @@ static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp) * * Returns: 0 for success */ -static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp) +static int fcoe_transport_disable(const char *buffer, const struct kernel_param *kp) { int rc = -ENODEV; struct net_device *netdev = NULL; @@ -1008,7 +1008,7 @@ static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp) * * Returns: 0 for success */ -static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp) +static int fcoe_transport_enable(const char *buffer, const struct kernel_param *kp) { int rc = -ENODEV; struct net_device *netdev = NULL; diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index a1d6ab76a..42c2b5ed1 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -944,10 +944,10 @@ static inline u32 next_command(struct ctlr_info *h, u8 q) struct reply_queue_buffer *rq = &h->reply_queue[q]; if (h->transMethod & CFGTBL_Trans_io_accel1) - return h->access.command_completed(h, q); + return h->access->command_completed(h, q); if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) - return h->access.command_completed(h, q); + return h->access->command_completed(h, q); if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { a = rq->head[rq->current_entry]; @@ -1129,7 +1129,7 @@ static void __enqueue_cmd_and_start_io(struct ctlr_info *h, break; default: set_performant_mode(h, c, reply_queue); - h->access.submit_command(h, c); + h->access->submit_command(h, c); } } @@ -7111,17 +7111,17 @@ static void __iomem *remap_pci_mem(ulong base, ulong size) static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q) { - return h->access.command_completed(h, q); + return h->access->command_completed(h, q); } static inline bool interrupt_pending(struct ctlr_info *h) { - return h->access.intr_pending(h); + return h->access->intr_pending(h); } static inline long interrupt_not_for_us(struct ctlr_info *h) { - return (h->access.intr_pending(h) == 0) || + return (h->access->intr_pending(h) == 0) || (h->interrupts_enabled == 0); } @@ -8049,7 +8049,7 @@ static int hpsa_pci_init(struct ctlr_info *h) if (prod_index < 0) return prod_index; h->product_name = products[prod_index].product_name; - h->access = *(products[prod_index].access); + h->access = products[prod_index].access; h->needs_abort_tags_swizzled = ctlr_needs_abort_tags_swizzled(h->board_id); @@ -8448,7 +8448,7 @@ static void controller_lockup_detected(struct ctlr_info *h) unsigned long flags; u32 lockup_detected; - h->access.set_intr_mask(h, HPSA_INTR_OFF); + h->access->set_intr_mask(h, HPSA_INTR_OFF); spin_lock_irqsave(&h->lock, flags); lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); if (!lockup_detected) { @@ -8786,7 +8786,7 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } /* make sure the board interrupts are off */ - h->access.set_intr_mask(h, HPSA_INTR_OFF); + h->access->set_intr_mask(h, HPSA_INTR_OFF); rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx); if (rc) @@ -8839,7 +8839,7 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) * fake ones to scoop up any residual completions. */ spin_lock_irqsave(&h->lock, flags); - h->access.set_intr_mask(h, HPSA_INTR_OFF); + h->access->set_intr_mask(h, HPSA_INTR_OFF); spin_unlock_irqrestore(&h->lock, flags); hpsa_free_irqs(h); rc = hpsa_request_irqs(h, hpsa_msix_discard_completions, @@ -8869,9 +8869,9 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev_info(&h->pdev->dev, "Board READY.\n"); dev_info(&h->pdev->dev, "Waiting for stale completions to drain.\n"); - h->access.set_intr_mask(h, HPSA_INTR_ON); + h->access->set_intr_mask(h, HPSA_INTR_ON); msleep(10000); - h->access.set_intr_mask(h, HPSA_INTR_OFF); + h->access->set_intr_mask(h, HPSA_INTR_OFF); rc = controller_reset_failed(h->cfgtable); if (rc) @@ -8898,7 +8898,7 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Turn the interrupts on so we can service requests */ - h->access.set_intr_mask(h, HPSA_INTR_ON); + h->access->set_intr_mask(h, HPSA_INTR_ON); hpsa_hba_inquiry(h); @@ -8924,7 +8924,7 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ hpsa_free_performant_mode(h); - h->access.set_intr_mask(h, HPSA_INTR_OFF); + h->access->set_intr_mask(h, HPSA_INTR_OFF); clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */ hpsa_free_sg_chain_blocks(h); clean5: /* cmd, irq, shost, pci, lu, aer/h */ @@ -9059,7 +9059,7 @@ static void hpsa_shutdown(struct pci_dev *pdev) * To write all data in the battery backed cache to disks */ hpsa_flush_cache(h); - h->access.set_intr_mask(h, HPSA_INTR_OFF); + h->access->set_intr_mask(h, HPSA_INTR_OFF); hpsa_free_irqs(h); /* init_one 4 */ hpsa_disable_interrupt_mode(h); /* pci_init 2 */ } @@ -9201,7 +9201,7 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) CFGTBL_Trans_enable_directed_msix | (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2)); - struct access_method access = SA5_performant_access; + struct access_method *access = &SA5_performant_access; /* This is a bit complicated. There are 8 registers on * the controller which we write to to tell it 8 different @@ -9243,7 +9243,7 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) * perform the superfluous readl() after each command submission. */ if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2)) - access = SA5_performant_access_no_read; + access = &SA5_performant_access_no_read; /* Controller spec: zero out this buffer. */ for (i = 0; i < h->nreply_queues; i++) @@ -9273,12 +9273,12 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) * enable outbound interrupt coalescing in accelerator mode; */ if (trans_support & CFGTBL_Trans_io_accel1) { - access = SA5_ioaccel_mode1_access; + access = &SA5_ioaccel_mode1_access; writel(10, &h->cfgtable->HostWrite.CoalIntDelay); writel(4, &h->cfgtable->HostWrite.CoalIntCount); } else { if (trans_support & CFGTBL_Trans_io_accel2) { - access = SA5_ioaccel_mode2_access; + access = &SA5_ioaccel_mode2_access; writel(10, &h->cfgtable->HostWrite.CoalIntDelay); writel(4, &h->cfgtable->HostWrite.CoalIntCount); } diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index 9ea162de8..40c2e1bda 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -180,7 +180,7 @@ struct ctlr_info { unsigned int msix_vector; unsigned int msi_vector; int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */ - struct access_method access; + struct access_method *access; /* queue and queue Info */ unsigned int Qdepth; @@ -580,38 +580,38 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q) } static struct access_method SA5_access = { - SA5_submit_command, - SA5_intr_mask, - SA5_intr_pending, - SA5_completed, + .submit_command = SA5_submit_command, + .set_intr_mask = SA5_intr_mask, + .intr_pending = SA5_intr_pending, + .command_completed = SA5_completed, }; static struct access_method SA5_ioaccel_mode1_access = { - SA5_submit_command, - SA5_performant_intr_mask, - SA5_ioaccel_mode1_intr_pending, - SA5_ioaccel_mode1_completed, + .submit_command = SA5_submit_command, + .set_intr_mask = SA5_performant_intr_mask, + .intr_pending = SA5_ioaccel_mode1_intr_pending, + .command_completed = SA5_ioaccel_mode1_completed, }; static struct access_method SA5_ioaccel_mode2_access = { - SA5_submit_command_ioaccel2, - SA5_performant_intr_mask, - SA5_performant_intr_pending, - SA5_performant_completed, + .submit_command = SA5_submit_command_ioaccel2, + .set_intr_mask = SA5_performant_intr_mask, + .intr_pending = SA5_performant_intr_pending, + .command_completed = SA5_performant_completed, }; static struct access_method SA5_performant_access = { - SA5_submit_command, - SA5_performant_intr_mask, - SA5_performant_intr_pending, - SA5_performant_completed, + .submit_command = SA5_submit_command, + .set_intr_mask = SA5_performant_intr_mask, + .intr_pending = SA5_performant_intr_pending, + .command_completed = SA5_performant_completed, }; static struct access_method SA5_performant_access_no_read = { - SA5_submit_command_no_read, - SA5_performant_intr_mask, - SA5_performant_intr_pending, - SA5_performant_completed, + .submit_command = SA5_submit_command_no_read, + .set_intr_mask = SA5_performant_intr_mask, + .intr_pending = SA5_performant_intr_pending, + .command_completed = SA5_performant_completed, }; struct board_type { diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index a83f705ed..b40c5e665 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c @@ -1082,7 +1082,6 @@ static const char *hptiop_info(struct Scsi_Host *host) static int hptiop_reset_hba(struct hptiop_hba *hba) { if (atomic_xchg(&hba->resetting, 1) == 0) { - atomic_inc(&hba->reset_count); hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET); } @@ -1340,7 +1339,6 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id) hba->iopintf_v2 = 0; atomic_set(&hba->resetting, 0); - atomic_set(&hba->reset_count, 0); init_waitqueue_head(&hba->reset_wq); init_waitqueue_head(&hba->ioctl_wq); diff --git a/drivers/scsi/hptiop.h b/drivers/scsi/hptiop.h index 4d1c51153..d5744cb20 100644 --- a/drivers/scsi/hptiop.h +++ b/drivers/scsi/hptiop.h @@ -330,7 +330,6 @@ struct hptiop_hba { void *dma_coherent[HPTIOP_MAX_REQUESTS]; dma_addr_t dma_coherent_handle[HPTIOP_MAX_REQUESTS]; - atomic_t reset_count; atomic_t resetting; wait_queue_head_t reset_wq; diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 532474109..0d62bad7f 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -948,7 +948,7 @@ static void ipr_send_command(struct ipr_cmnd *ipr_cmd) **/ static void ipr_do_req(struct ipr_cmnd *ipr_cmd, void (*done) (struct ipr_cmnd *), - void (*timeout_func) (struct ipr_cmnd *), u32 timeout) + void (*timeout_func) (unsigned long), u32 timeout) { list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); @@ -956,7 +956,7 @@ static void ipr_do_req(struct ipr_cmnd *ipr_cmd, ipr_cmd->timer.data = (unsigned long) ipr_cmd; ipr_cmd->timer.expires = jiffies + timeout; - ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func; + ipr_cmd->timer.function = timeout_func; add_timer(&ipr_cmd->timer); @@ -1038,7 +1038,7 @@ static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr, * none **/ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd, - void (*timeout_func) (struct ipr_cmnd *ipr_cmd), + void (*timeout_func) (unsigned long ipr_cmd), u32 timeout) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; @@ -1058,7 +1058,7 @@ static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg) if (ioa_cfg->hrrq_num == 1) hrrq = 0; else { - hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index); + hrrq = atomic_add_return_unchecked(1, &ioa_cfg->hrrq_index); hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1; } return hrrq; @@ -2622,8 +2622,9 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd) * Return value: * none **/ -static void ipr_timeout(struct ipr_cmnd *ipr_cmd) +static void ipr_timeout(unsigned long _ipr_cmd) { + struct ipr_cmnd *ipr_cmd = (struct ipr_cmnd *)_ipr_cmd; unsigned long lock_flags = 0; struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; @@ -2654,8 +2655,9 @@ static void ipr_timeout(struct ipr_cmnd *ipr_cmd) * Return value: * none **/ -static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd) +static void ipr_oper_timeout(unsigned long _ipr_cmd) { + struct ipr_cmnd *ipr_cmd = (struct ipr_cmnd *)_ipr_cmd; unsigned long lock_flags = 0; struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; @@ -5348,8 +5350,9 @@ static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd) * Return value: * none **/ -static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd) +static void ipr_abort_timeout(unsigned long _ipr_cmd) { + struct ipr_cmnd *ipr_cmd = (struct ipr_cmnd *)_ipr_cmd; struct ipr_cmnd *reset_cmd; struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_cmd_pkt *cmd_pkt; @@ -8125,8 +8128,9 @@ static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd) * Return value: * none **/ -static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd) +static void ipr_reset_timer_done(unsigned long _ipr_cmd) { + struct ipr_cmnd *ipr_cmd = (struct ipr_cmnd *)_ipr_cmd; struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; unsigned long lock_flags = 0; @@ -8164,7 +8168,7 @@ static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd, ipr_cmd->timer.data = (unsigned long) ipr_cmd; ipr_cmd->timer.expires = jiffies + timeout; - ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done; + ipr_cmd->timer.function = ipr_reset_timer_done; add_timer(&ipr_cmd->timer); } @@ -8194,9 +8198,9 @@ static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg) ioa_cfg->identify_hrrq_index = 0; if (ioa_cfg->hrrq_num == 1) - atomic_set(&ioa_cfg->hrrq_index, 0); + atomic_set_unchecked(&ioa_cfg->hrrq_index, 0); else - atomic_set(&ioa_cfg->hrrq_index, 1); + atomic_set_unchecked(&ioa_cfg->hrrq_index, 1); /* Zero out config table */ memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size); @@ -8250,7 +8254,7 @@ static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd) ipr_cmd->timer.data = (unsigned long) ipr_cmd; ipr_cmd->timer.expires = jiffies + stage_time * HZ; - ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout; + ipr_cmd->timer.function = ipr_oper_timeout; ipr_cmd->done = ipr_reset_ioa_job; add_timer(&ipr_cmd->timer); @@ -8322,7 +8326,7 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd) ipr_cmd->timer.data = (unsigned long) ipr_cmd; ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ); - ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout; + ipr_cmd->timer.function = ipr_oper_timeout; ipr_cmd->done = ipr_reset_ioa_job; add_timer(&ipr_cmd->timer); list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); @@ -9310,7 +9314,7 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev) * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT */ static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { switch (state) { case pci_channel_io_frozen: diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 8995053d0..454107df6 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -1543,7 +1543,7 @@ struct ipr_ioa_cfg { struct ipr_hrr_queue hrrq[IPR_MAX_HRRQ_NUM]; u32 hrrq_num; - atomic_t hrrq_index; + atomic_unchecked_t hrrq_index; u16 identify_hrrq_index; struct ipr_bus_attributes bus_attr[IPR_MAX_NUM_BUSES]; diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 16ca31ad5..f5adf48f4 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -101,12 +101,12 @@ struct fc_exch_mgr { u16 pool_max_index; struct { - atomic_t no_free_exch; - atomic_t no_free_exch_xid; - atomic_t xid_not_found; - atomic_t xid_busy; - atomic_t seq_not_found; - atomic_t non_bls_resp; + atomic_unchecked_t no_free_exch; + atomic_unchecked_t no_free_exch_xid; + atomic_unchecked_t xid_not_found; + atomic_unchecked_t xid_busy; + atomic_unchecked_t seq_not_found; + atomic_unchecked_t non_bls_resp; } stats; }; @@ -809,7 +809,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, /* allocate memory for exchange */ ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC); if (!ep) { - atomic_inc(&mp->stats.no_free_exch); + atomic_inc_unchecked(&mp->stats.no_free_exch); goto out; } memset(ep, 0, sizeof(*ep)); @@ -872,7 +872,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, return ep; err: spin_unlock_bh(&pool->lock); - atomic_inc(&mp->stats.no_free_exch_xid); + atomic_inc_unchecked(&mp->stats.no_free_exch_xid); mempool_free(ep, mp->ep_pool); return NULL; } @@ -1029,7 +1029,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, xid = ntohs(fh->fh_ox_id); /* we originated exch */ ep = fc_exch_find(mp, xid); if (!ep) { - atomic_inc(&mp->stats.xid_not_found); + atomic_inc_unchecked(&mp->stats.xid_not_found); reject = FC_RJT_OX_ID; goto out; } @@ -1059,7 +1059,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, ep = fc_exch_find(mp, xid); if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) { if (ep) { - atomic_inc(&mp->stats.xid_busy); + atomic_inc_unchecked(&mp->stats.xid_busy); reject = FC_RJT_RX_ID; goto rel; } @@ -1070,7 +1070,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, } xid = ep->xid; /* get our XID */ } else if (!ep) { - atomic_inc(&mp->stats.xid_not_found); + atomic_inc_unchecked(&mp->stats.xid_not_found); reject = FC_RJT_RX_ID; /* XID not found */ goto out; } @@ -1088,7 +1088,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, } else { sp = &ep->seq; if (sp->id != fh->fh_seq_id) { - atomic_inc(&mp->stats.seq_not_found); + atomic_inc_unchecked(&mp->stats.seq_not_found); if (f_ctl & FC_FC_END_SEQ) { /* * Update sequence_id based on incoming last @@ -1539,22 +1539,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) ep = fc_exch_find(mp, ntohs(fh->fh_ox_id)); if (!ep) { - atomic_inc(&mp->stats.xid_not_found); + atomic_inc_unchecked(&mp->stats.xid_not_found); goto out; } if (ep->esb_stat & ESB_ST_COMPLETE) { - atomic_inc(&mp->stats.xid_not_found); + atomic_inc_unchecked(&mp->stats.xid_not_found); goto rel; } if (ep->rxid == FC_XID_UNKNOWN) ep->rxid = ntohs(fh->fh_rx_id); if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) { - atomic_inc(&mp->stats.xid_not_found); + atomic_inc_unchecked(&mp->stats.xid_not_found); goto rel; } if (ep->did != ntoh24(fh->fh_s_id) && ep->did != FC_FID_FLOGI) { - atomic_inc(&mp->stats.xid_not_found); + atomic_inc_unchecked(&mp->stats.xid_not_found); goto rel; } sof = fr_sof(fp); @@ -1563,7 +1563,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) sp->ssb_stat |= SSB_ST_RESP; sp->id = fh->fh_seq_id; } else if (sp->id != fh->fh_seq_id) { - atomic_inc(&mp->stats.seq_not_found); + atomic_inc_unchecked(&mp->stats.seq_not_found); goto rel; } @@ -1626,9 +1626,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */ if (!sp) - atomic_inc(&mp->stats.xid_not_found); + atomic_inc_unchecked(&mp->stats.xid_not_found); else - atomic_inc(&mp->stats.non_bls_resp); + atomic_inc_unchecked(&mp->stats.non_bls_resp); fc_frame_free(fp); } @@ -2268,13 +2268,13 @@ void fc_exch_update_stats(struct fc_lport *lport) list_for_each_entry(ema, &lport->ema_list, ema_list) { mp = ema->mp; - st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch); + st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch); st->fc_no_free_exch_xid += - atomic_read(&mp->stats.no_free_exch_xid); - st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found); - st->fc_xid_busy += atomic_read(&mp->stats.xid_busy); - st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found); - st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp); + atomic_read_unchecked(&mp->stats.no_free_exch_xid); + st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found); + st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy); + st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found); + st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp); } } EXPORT_SYMBOL(fc_exch_update_stats); diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 763f012fd..641a55ac6 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -532,7 +532,7 @@ static struct ata_port_operations sas_sata_ops = { .postreset = ata_std_postreset, .error_handler = ata_std_error_handler, .post_internal_cmd = sas_ata_post_internal, - .qc_defer = ata_std_qc_defer, + .qc_defer = ata_std_qc_defer, .qc_prep = ata_noop_qc_prep, .qc_issue = sas_ata_qc_issue, .qc_fill_rtf = sas_ata_qc_fill_rtf, diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index b48485946..1ea4db4bb 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -430,7 +430,7 @@ struct lpfc_vport { struct dentry *debug_nodelist; struct dentry *vport_debugfs_root; struct lpfc_debugfs_trc *disc_trc; - atomic_t disc_trc_cnt; + atomic_unchecked_t disc_trc_cnt; #endif uint8_t stat_data_enabled; uint8_t stat_data_blocked; @@ -898,8 +898,8 @@ struct lpfc_hba { struct timer_list fabric_block_timer; unsigned long bit_flags; #define FABRIC_COMANDS_BLOCKED 0 - atomic_t num_rsrc_err; - atomic_t num_cmd_success; + atomic_unchecked_t num_rsrc_err; + atomic_unchecked_t num_cmd_success; unsigned long last_rsrc_error_time; unsigned long last_ramp_down_time; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS @@ -934,7 +934,7 @@ struct lpfc_hba { struct dentry *debug_slow_ring_trc; struct lpfc_debugfs_trc *slow_ring_trc; - atomic_t slow_ring_trc_cnt; + atomic_unchecked_t slow_ring_trc_cnt; /* iDiag debugfs sub-directory */ struct dentry *idiag_root; struct dentry *idiag_pci_cfg; diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index a63542bac..80692eedf 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc, #include -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0); +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0); static unsigned long lpfc_debugfs_start_time = 0L; /* iDiag */ @@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size) lpfc_debugfs_enable = 0; len = 0; - index = (atomic_read(&vport->disc_trc_cnt) + 1) & + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) & (lpfc_debugfs_max_disc_trc - 1); for (i = index; i < lpfc_debugfs_max_disc_trc; i++) { dtp = vport->disc_trc + i; @@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size) lpfc_debugfs_enable = 0; len = 0; - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) & + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) & (lpfc_debugfs_max_slow_ring_trc - 1); for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) { dtp = phba->slow_ring_trc + i; @@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt, !vport || !vport->disc_trc) return; - index = atomic_inc_return(&vport->disc_trc_cnt) & + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) & (lpfc_debugfs_max_disc_trc - 1); dtp = vport->disc_trc + index; dtp->fmt = fmt; dtp->data1 = data1; dtp->data2 = data2; dtp->data3 = data3; - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt); + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt); dtp->jif = jiffies; #endif return; @@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt, !phba || !phba->slow_ring_trc) return; - index = atomic_inc_return(&phba->slow_ring_trc_cnt) & + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) & (lpfc_debugfs_max_slow_ring_trc - 1); dtp = phba->slow_ring_trc + index; dtp->fmt = fmt; dtp->data1 = data1; dtp->data2 = data2; dtp->data3 = data3; - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt); + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt); dtp->jif = jiffies; #endif return; @@ -4268,7 +4268,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) "slow_ring buffer\n"); goto debug_failed; } - atomic_set(&phba->slow_ring_trc_cnt, 0); + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0); memset(phba->slow_ring_trc, 0, (sizeof(struct lpfc_debugfs_trc) * lpfc_debugfs_max_slow_ring_trc)); @@ -4314,7 +4314,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) "buffer\n"); goto debug_failed; } - atomic_set(&vport->disc_trc_cnt, 0); + atomic_set_unchecked(&vport->disc_trc_cnt, 0); snprintf(name, sizeof(name), "discovery_trace"); vport->debug_disc_trc = diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index b100a22b3..7bc84eaec 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -11127,7 +11127,7 @@ lpfc_pci_resume_one(struct pci_dev *pdev) * PCI_ERS_RESULT_DISCONNECT - device could not be recovered **/ static pci_ers_result_t -lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +lpfc_io_error_detected(struct pci_dev *pdev, enum pci_channel_state state) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; @@ -11434,8 +11434,10 @@ lpfc_init(void) printk(KERN_ERR "Could not register lpfcmgmt device, " "misc_register returned with status %d", error); - lpfc_transport_functions.vport_create = lpfc_vport_create; - lpfc_transport_functions.vport_delete = lpfc_vport_delete; + pax_open_kernel(); + const_cast(lpfc_transport_functions.vport_create) = lpfc_vport_create; + const_cast(lpfc_transport_functions.vport_delete) = lpfc_vport_delete; + pax_close_kernel(); lpfc_transport_template = fc_attach_transport(&lpfc_transport_functions); if (lpfc_transport_template == NULL) diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index d197aa176..c1178a684 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -261,7 +261,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba) unsigned long expires; spin_lock_irqsave(&phba->hbalock, flags); - atomic_inc(&phba->num_rsrc_err); + atomic_inc_unchecked(&phba->num_rsrc_err); phba->last_rsrc_error_time = jiffies; expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL; @@ -303,8 +303,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) unsigned long num_rsrc_err, num_cmd_success; int i; - num_rsrc_err = atomic_read(&phba->num_rsrc_err); - num_cmd_success = atomic_read(&phba->num_cmd_success); + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err); + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success); /* * The error and success command counters are global per @@ -331,8 +331,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) } } lpfc_destroy_vport_work_array(phba, vports); - atomic_set(&phba->num_rsrc_err, 0); - atomic_set(&phba->num_cmd_success, 0); + atomic_set_unchecked(&phba->num_rsrc_err, 0); + atomic_set_unchecked(&phba->num_cmd_success, 0); } /** diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 3aaea713b..6cd098b0e 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -2048,7 +2048,7 @@ struct megasas_instance { s8 init_id; u16 max_num_sge; - u16 max_fw_cmds; + u16 max_fw_cmds __intentional_overflow(-1); u16 max_mfi_cmds; u16 max_scsi_cmds; u16 ldio_threshold; diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index a1a5ceb42..8e83e346e 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -105,7 +105,7 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc); * */ static int -_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp) +_scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp) { int ret = param_set_int(val, kp); struct MPT3SAS_ADAPTER *ioc; diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 8a7941b81..eb919a5d9 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -281,7 +281,7 @@ struct _scsi_io_transfer { * Note: The logging levels are defined in mpt3sas_debug.h. */ static int -_scsih_set_debug_level(const char *val, struct kernel_param *kp) +_scsih_set_debug_level(const char *val, const struct kernel_param *kp) { int ret = param_set_int(val, kp); struct MPT3SAS_ADAPTER *ioc; @@ -8955,7 +8955,7 @@ scsih_resume(struct pci_dev *pdev) * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT */ static pci_ers_result_t -scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +scsih_pci_error_detected(struct pci_dev *pdev, enum pci_channel_state state) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index 68a5c347f..c88d66b60 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev) res->scsi_dev = scsi_dev; scsi_dev->hostdata = res; res->change_detected = 0; - atomic_set(&res->read_failures, 0); - atomic_set(&res->write_failures, 0); + atomic_set_unchecked(&res->read_failures, 0); + atomic_set_unchecked(&res->write_failures, 0); rc = 0; } spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags); @@ -557,8 +557,9 @@ static void pmcraid_reset_type(struct pmcraid_instance *pinstance) static void pmcraid_ioa_reset(struct pmcraid_cmd *); -static void pmcraid_bist_done(struct pmcraid_cmd *cmd) +static void pmcraid_bist_done(unsigned long _cmd) { + struct pmcraid_cmd *cmd = (struct pmcraid_cmd *)_cmd; struct pmcraid_instance *pinstance = cmd->drv_inst; unsigned long lock_flags; int rc; @@ -573,8 +574,7 @@ static void pmcraid_bist_done(struct pmcraid_cmd *cmd) cmd->timer.expires = jiffies + cmd->time_left; cmd->time_left = 0; cmd->timer.data = (unsigned long)cmd; - cmd->timer.function = - (void (*)(unsigned long))pmcraid_bist_done; + cmd->timer.function = pmcraid_bist_done; add_timer(&cmd->timer); } else { cmd->time_left = 0; @@ -607,7 +607,7 @@ static void pmcraid_start_bist(struct pmcraid_cmd *cmd) cmd->time_left = msecs_to_jiffies(PMCRAID_BIST_TIMEOUT); cmd->timer.data = (unsigned long)cmd; cmd->timer.expires = jiffies + msecs_to_jiffies(PMCRAID_BIST_TIMEOUT); - cmd->timer.function = (void (*)(unsigned long))pmcraid_bist_done; + cmd->timer.function = pmcraid_bist_done; add_timer(&cmd->timer); } @@ -617,8 +617,9 @@ static void pmcraid_start_bist(struct pmcraid_cmd *cmd) * Return value * None */ -static void pmcraid_reset_alert_done(struct pmcraid_cmd *cmd) +static void pmcraid_reset_alert_done(unsigned long _cmd) { + struct pmcraid_cmd *cmd = (struct pmcraid_cmd *)_cmd; struct pmcraid_instance *pinstance = cmd->drv_inst; u32 status = ioread32(pinstance->ioa_status); unsigned long lock_flags; @@ -639,8 +640,7 @@ static void pmcraid_reset_alert_done(struct pmcraid_cmd *cmd) cmd->time_left -= PMCRAID_CHECK_FOR_RESET_TIMEOUT; cmd->timer.data = (unsigned long)cmd; cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT; - cmd->timer.function = - (void (*)(unsigned long))pmcraid_reset_alert_done; + cmd->timer.function = pmcraid_reset_alert_done; add_timer(&cmd->timer); } } @@ -678,8 +678,7 @@ static void pmcraid_reset_alert(struct pmcraid_cmd *cmd) cmd->time_left = PMCRAID_RESET_TIMEOUT; cmd->timer.data = (unsigned long)cmd; cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT; - cmd->timer.function = - (void (*)(unsigned long))pmcraid_reset_alert_done; + cmd->timer.function = pmcraid_reset_alert_done; add_timer(&cmd->timer); iowrite32(DOORBELL_IOA_RESET_ALERT, @@ -704,8 +703,9 @@ static void pmcraid_reset_alert(struct pmcraid_cmd *cmd) * Return value: * None */ -static void pmcraid_timeout_handler(struct pmcraid_cmd *cmd) +static void pmcraid_timeout_handler(unsigned long _cmd) { + struct pmcraid_cmd *cmd = (struct pmcraid_cmd *)_cmd; struct pmcraid_instance *pinstance = cmd->drv_inst; unsigned long lock_flags; @@ -920,7 +920,7 @@ static void pmcraid_send_cmd( struct pmcraid_cmd *cmd, void (*cmd_done) (struct pmcraid_cmd *), unsigned long timeout, - void (*timeout_func) (struct pmcraid_cmd *) + void (*timeout_func) (unsigned long) ) { /* initialize done function */ @@ -930,7 +930,7 @@ static void pmcraid_send_cmd( /* setup timeout handler */ cmd->timer.data = (unsigned long)cmd; cmd->timer.expires = jiffies + timeout; - cmd->timer.function = (void (*)(unsigned long))timeout_func; + cmd->timer.function = timeout_func; add_timer(&cmd->timer); } @@ -1968,7 +1968,7 @@ static void pmcraid_soft_reset(struct pmcraid_cmd *cmd) cmd->timer.data = (unsigned long)cmd; cmd->timer.expires = jiffies + msecs_to_jiffies(PMCRAID_TRANSOP_TIMEOUT); - cmd->timer.function = (void (*)(unsigned long))pmcraid_timeout_handler; + cmd->timer.function = pmcraid_timeout_handler; if (!timer_pending(&cmd->timer)) add_timer(&cmd->timer); @@ -2641,9 +2641,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd) /* If this was a SCSI read/write command keep count of errors */ if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD) - atomic_inc(&res->read_failures); + atomic_inc_unchecked(&res->read_failures); else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD) - atomic_inc(&res->write_failures); + atomic_inc_unchecked(&res->write_failures); if (!RES_IS_GSCSI(res->cfg_entry) && masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) { @@ -3469,7 +3469,7 @@ static int pmcraid_queuecommand_lck( * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses * hrrq_id assigned here in queuecommand */ - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) % + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) % pinstance->num_hrrq; cmd->cmd_done = pmcraid_io_done; @@ -3783,7 +3783,7 @@ static long pmcraid_ioctl_passthrough( * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses * hrrq_id assigned here in queuecommand */ - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) % + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) % pinstance->num_hrrq; if (request_size) { @@ -4420,7 +4420,7 @@ static void pmcraid_worker_function(struct work_struct *workp) pinstance = container_of(workp, struct pmcraid_instance, worker_q); /* add resources only after host is added into system */ - if (!atomic_read(&pinstance->expose_resources)) + if (!atomic_read_unchecked(&pinstance->expose_resources)) return; fw_version = be16_to_cpu(pinstance->inq_data->fw_version); @@ -5237,8 +5237,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host, init_waitqueue_head(&pinstance->reset_wait_q); atomic_set(&pinstance->outstanding_cmds, 0); - atomic_set(&pinstance->last_message_id, 0); - atomic_set(&pinstance->expose_resources, 0); + atomic_set_unchecked(&pinstance->last_message_id, 0); + atomic_set_unchecked(&pinstance->expose_resources, 0); INIT_LIST_HEAD(&pinstance->free_res_q); INIT_LIST_HEAD(&pinstance->used_res_q); @@ -5949,7 +5949,7 @@ static int pmcraid_probe(struct pci_dev *pdev, /* Schedule worker thread to handle CCN and take care of adding and * removing devices to OS */ - atomic_set(&pinstance->expose_resources, 1); + atomic_set_unchecked(&pinstance->expose_resources, 1); schedule_work(&pinstance->worker_q); return rc; diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h index e1d150f3f..6c6df444a 100644 --- a/drivers/scsi/pmcraid.h +++ b/drivers/scsi/pmcraid.h @@ -748,7 +748,7 @@ struct pmcraid_instance { struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS]; /* Message id as filled in last fired IOARCB, used to identify HRRQ */ - atomic_t last_message_id; + atomic_unchecked_t last_message_id; /* configuration table */ struct pmcraid_config_table *cfg_table; @@ -777,7 +777,7 @@ struct pmcraid_instance { atomic_t outstanding_cmds; /* should add/delete resources to mid-layer now ?*/ - atomic_t expose_resources; + atomic_unchecked_t expose_resources; @@ -813,8 +813,8 @@ struct pmcraid_resource_entry { struct pmcraid_config_table_entry_ext cfg_entry_ext; }; struct scsi_device *scsi_dev; /* Link scsi_device structure */ - atomic_t read_failures; /* count of failed READ commands */ - atomic_t write_failures; /* count of failed WRITE commands */ + atomic_unchecked_t read_failures; /* count of failed READ commands */ + atomic_unchecked_t write_failures; /* count of failed WRITE commands */ /* To indicate add/delete/modify during CCN */ u8 change_detected; diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index fe7469c90..91e0c0b45 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -2186,7 +2186,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable) return 0; } -struct fc_function_template qla2xxx_transport_functions = { +fc_function_template_no_const qla2xxx_transport_functions = { .show_host_node_name = 1, .show_host_port_name = 1, @@ -2234,7 +2234,7 @@ struct fc_function_template qla2xxx_transport_functions = { .bsg_timeout = qla24xx_bsg_timeout, }; -struct fc_function_template qla2xxx_transport_vport_functions = { +fc_function_template_no_const qla2xxx_transport_vport_functions = { .show_host_node_name = 1, .show_host_port_name = 1, diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 6ca00813c..fbb9efd40 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -178,8 +178,8 @@ extern void qla2x00_disable_board_on_pci_error(struct work_struct *); */ extern struct scsi_host_template qla2xxx_driver_template; extern struct scsi_transport_template *qla2xxx_transport_vport_template; -extern void qla2x00_timer(scsi_qla_host_t *); -extern void qla2x00_start_timer(scsi_qla_host_t *, void *, unsigned long); +extern void qla2x00_timer(unsigned long); +extern void qla2x00_start_timer(scsi_qla_host_t *, void (*)(unsigned long), unsigned long); extern void qla24xx_deallocate_vp_id(scsi_qla_host_t *); extern int qla24xx_disable_vp (scsi_qla_host_t *); extern int qla24xx_enable_vp (scsi_qla_host_t *); @@ -583,8 +583,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t); struct device_attribute; extern struct device_attribute *qla2x00_host_attrs[]; struct fc_function_template; -extern struct fc_function_template qla2xxx_transport_functions; -extern struct fc_function_template qla2xxx_transport_vport_functions; +extern fc_function_template_no_const qla2xxx_transport_functions; +extern fc_function_template_no_const qla2xxx_transport_vport_functions; extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *); extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool); extern void qla2x00_init_host_attr(scsi_qla_host_t *); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 745572b62..1c13c5c6b 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -301,12 +301,12 @@ struct scsi_transport_template *qla2xxx_transport_vport_template = NULL; */ __inline__ void -qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval) +qla2x00_start_timer(scsi_qla_host_t *vha, void (*func)(unsigned long), unsigned long interval) { init_timer(&vha->timer); vha->timer.expires = jiffies + interval * HZ; vha->timer.data = (unsigned long)vha; - vha->timer.function = (void (*)(unsigned long))func; + vha->timer.function = func; add_timer(&vha->timer); vha->timer_active = 1; } @@ -1529,8 +1529,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha) !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) { /* Ok, a 64bit DMA mask is applicable. */ ha->flags.enable_64bit_addressing = 1; - ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64; - ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64; + pax_open_kernel(); + const_cast(ha->isp_ops->calc_req_entries) = qla2x00_calc_iocbs_64; + const_cast(ha->isp_ops->build_iocbs) = qla2x00_build_scsi_iocbs_64; + pax_close_kernel(); return; } } @@ -5410,8 +5412,9 @@ qla2x00_rst_aen(scsi_qla_host_t *vha) * Context: Interrupt ***************************************************************************/ void -qla2x00_timer(scsi_qla_host_t *vha) +qla2x00_timer(unsigned long _vha) { + scsi_qla_host_t *vha = (scsi_qla_host_t *)_vha; unsigned long cpu_flags = 0; int start_dpc = 0; int index; @@ -5673,7 +5676,7 @@ qla2x00_release_firmware(void) } static pci_ers_result_t -qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +qla2xxx_pci_error_detected(struct pci_dev *pdev, enum pci_channel_state state) { scsi_qla_host_t *vha = pci_get_drvdata(pdev); struct qla_hw_data *ha = vha->hw; diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index bff9689f5..8caa18774 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -678,7 +678,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) loop_id = le16_to_cpu(n->u.isp24.nport_handle); if (loop_id == 0xFFFF) { /* Global event */ - atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); + atomic_inc_unchecked(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); spin_lock_irqsave(&ha->tgt.sess_lock, flags); qlt_clear_tgt_db(vha->vha_tgt.qla_tgt); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); @@ -845,8 +845,9 @@ static void qlt_undelete_sess(struct qla_tgt_sess *sess) sess->deleted = 0; } -static void qlt_del_sess_work_fn(struct delayed_work *work) +static void qlt_del_sess_work_fn(struct work_struct *_work) { + struct delayed_work *work = container_of(_work, struct delayed_work, work); struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_del_work); struct scsi_qla_host *vha = tgt->vha; @@ -5825,7 +5826,7 @@ static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha, retry: global_resets = - atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); + atomic_read_unchecked(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); rc = qla24xx_get_loop_id(vha, s_id, &loop_id); if (rc != 0) { @@ -5864,12 +5865,12 @@ static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha, } if (global_resets != - atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) { + atomic_read_unchecked(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043, "qla_target(%d): global reset during session discovery " "(counter was %d, new %d), retrying", vha->vp_idx, global_resets, - atomic_read(&vha->vha_tgt. + atomic_read_unchecked(&vha->vha_tgt. qla_tgt->tgt_global_resets_count)); goto retry; } @@ -6080,8 +6081,7 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) init_waitqueue_head(&tgt->waitQ); INIT_LIST_HEAD(&tgt->sess_list); INIT_LIST_HEAD(&tgt->del_sess_list); - INIT_DELAYED_WORK(&tgt->sess_del_work, - (void (*)(struct work_struct *))qlt_del_sess_work_fn); + INIT_DELAYED_WORK(&tgt->sess_del_work, qlt_del_sess_work_fn); spin_lock_init(&tgt->sess_work_lock); INIT_WORK(&tgt->sess_work, qlt_sess_work_fn); INIT_LIST_HEAD(&tgt->sess_works_list); @@ -6089,7 +6089,7 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) INIT_LIST_HEAD(&tgt->srr_ctio_list); INIT_LIST_HEAD(&tgt->srr_imm_list); INIT_WORK(&tgt->srr_work, qlt_handle_srr_work); - atomic_set(&tgt->tgt_global_resets_count, 0); + atomic_set_unchecked(&tgt->tgt_global_resets_count, 0); base_vha->vha_tgt.qla_tgt = tgt; diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index f26c5f60e..e88e9c5a4 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -876,7 +876,7 @@ struct qla_tgt { struct list_head srr_imm_list; struct work_struct srr_work; - atomic_t tgt_global_resets_count; + atomic_unchecked_t tgt_global_resets_count; struct list_head tgt_list_entry; }; diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index a7cfc270b..151f483fc 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h @@ -306,7 +306,7 @@ struct ddb_entry { * (4000 only) */ atomic_t relogin_timer; /* Max Time to wait for * relogin to complete */ - atomic_t relogin_retry_count; /* Num of times relogin has been + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been * retried */ uint32_t default_time2wait; /* Default Min time between * relogins (+aens) */ diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 01c3610a6..f287da931 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -3956,7 +3956,7 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha, * Timer routines */ -static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func, +static void qla4xxx_start_timer(struct scsi_qla_host *ha, void (*func)(unsigned long), unsigned long interval) { DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n", @@ -3964,7 +3964,7 @@ static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func, init_timer(&ha->timer); ha->timer.expires = jiffies + interval * HZ; ha->timer.data = (unsigned long)ha; - ha->timer.function = (void (*)(unsigned long))func; + ha->timer.function = func; add_timer(&ha->timer); ha->timer_active = 1; } @@ -4490,12 +4490,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) */ if (!iscsi_is_session_online(cls_sess)) { /* Reset retry relogin timer */ - atomic_inc(&ddb_entry->relogin_retry_count); + atomic_inc_unchecked(&ddb_entry->relogin_retry_count); DEBUG2(ql4_printk(KERN_INFO, ha, "%s: index[%d] relogin timed out-retrying" " relogin (%d), retry (%d)\n", __func__, ddb_entry->fw_ddb_index, - atomic_read(&ddb_entry->relogin_retry_count), + atomic_read_unchecked(&ddb_entry->relogin_retry_count), ddb_entry->default_time2wait + 4)); set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); atomic_set(&ddb_entry->retry_relogin_timer, @@ -4508,8 +4508,9 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) * qla4xxx_timer - checks every second for work to do. * @ha: Pointer to host adapter structure. **/ -static void qla4xxx_timer(struct scsi_qla_host *ha) +static void qla4xxx_timer(unsigned long _ha) { + struct scsi_qla_host *ha = (struct scsi_qla_host *)_ha; int start_dpc = 0; uint16_t w; @@ -6603,7 +6604,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha, atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); atomic_set(&ddb_entry->relogin_timer, 0); - atomic_set(&ddb_entry->relogin_retry_count, 0); + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0); def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout); ddb_entry->default_relogin_timeout = (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ? @@ -9557,7 +9558,7 @@ static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type) * RECOVERED - driver's pci_resume() */ static pci_ers_result_t -qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +qla4xxx_pci_error_detected(struct pci_dev *pdev, enum pci_channel_state state) { struct scsi_qla_host *ha = pci_get_drvdata(pdev); diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 1deb6adc4..3057db539 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -591,7 +591,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd) good_bytes = scsi_bufflen(cmd); if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { - int old_good_bytes = good_bytes; + unsigned int old_good_bytes = good_bytes; drv = scsi_cmd_to_driver(cmd); if (drv->done) good_bytes = drv->done(cmd); diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index cf04a364f..54dd630bd 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -290,10 +290,10 @@ struct sdebug_queue { atomic_t blocked; /* to temporarily stop more being queued */ }; -static atomic_t sdebug_cmnd_count; /* number of incoming commands */ -static atomic_t sdebug_completions; /* count of deferred completions */ -static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */ -static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */ +static atomic_unchecked_t sdebug_cmnd_count; /* number of incoming commands */ +static atomic_unchecked_t sdebug_completions; /* count of deferred completions */ +static atomic_unchecked_t sdebug_miss_cpus; /* submission + completion cpus differ */ +static atomic_unchecked_t sdebug_a_tsf; /* 'almost task set full' counter */ struct opcode_info_t { u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */ @@ -3493,9 +3493,9 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp) qc_idx = sd_dp->qc_idx; sqp = sdebug_q_arr + sd_dp->sqa_idx; if (sdebug_statistics) { - atomic_inc(&sdebug_completions); + atomic_inc_unchecked(&sdebug_completions); if (raw_smp_processor_id() != sd_dp->issuing_cpu) - atomic_inc(&sdebug_miss_cpus); + atomic_inc_unchecked(&sdebug_miss_cpus); } if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) { pr_err("wild qc_idx=%d\n", qc_idx); @@ -3967,23 +3967,23 @@ static void tweak_cmnd_count(void) if (modulo < 2) return; block_unblock_all_queues(true); - count = atomic_read(&sdebug_cmnd_count); - atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo); + count = atomic_read_unchecked(&sdebug_cmnd_count); + atomic_set_unchecked(&sdebug_cmnd_count, (count / modulo) * modulo); block_unblock_all_queues(false); } static void clear_queue_stats(void) { - atomic_set(&sdebug_cmnd_count, 0); - atomic_set(&sdebug_completions, 0); - atomic_set(&sdebug_miss_cpus, 0); - atomic_set(&sdebug_a_tsf, 0); + atomic_set_unchecked(&sdebug_cmnd_count, 0); + atomic_set_unchecked(&sdebug_completions, 0); + atomic_set_unchecked(&sdebug_miss_cpus, 0); + atomic_set_unchecked(&sdebug_a_tsf, 0); } static void setup_inject(struct sdebug_queue *sqp, struct sdebug_queued_cmd *sqcp) { - if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0) + if ((atomic_read_unchecked(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0) return; sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts); sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts); @@ -4040,9 +4040,9 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, (SDEBUG_OPT_RARE_TSF & sdebug_opts) && (scsi_result == 0))) { if ((num_in_q == (qdepth - 1)) && - (atomic_inc_return(&sdebug_a_tsf) >= + (atomic_inc_return_unchecked(&sdebug_a_tsf) >= abs(sdebug_every_nth))) { - atomic_set(&sdebug_a_tsf, 0); + atomic_set_unchecked(&sdebug_a_tsf, 0); inject = 1; scsi_result = device_qfull_result; } @@ -4297,10 +4297,10 @@ static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host) TICK_NSEC / 1000, "statistics", sdebug_statistics, sdebug_mq_active); seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n", - atomic_read(&sdebug_cmnd_count), - atomic_read(&sdebug_completions), - "miss_cpus", atomic_read(&sdebug_miss_cpus), - atomic_read(&sdebug_a_tsf)); + atomic_read_unchecked(&sdebug_cmnd_count), + atomic_read_unchecked(&sdebug_completions), + "miss_cpus", atomic_read_unchecked(&sdebug_miss_cpus), + atomic_read_unchecked(&sdebug_a_tsf)); seq_printf(m, "submit_queues=%d\n", submit_queues); for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) { @@ -5253,7 +5253,7 @@ static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth) static bool fake_timeout(struct scsi_cmnd *scp) { - if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) { + if (0 == (atomic_read_unchecked(&sdebug_cmnd_count) % abs(sdebug_every_nth))) { if (sdebug_every_nth < -1) sdebug_every_nth = -1; if (SDEBUG_OPT_TIMEOUT & sdebug_opts) @@ -5284,7 +5284,7 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost, scsi_set_resid(scp, 0); if (sdebug_statistics) - atomic_inc(&sdebug_cmnd_count); + atomic_inc_unchecked(&sdebug_cmnd_count); if (unlikely(sdebug_verbose && !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) { char b[120]; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 2cca9cffc..cbe4c6d72 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1513,7 +1513,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) shost = sdev->host; scsi_init_cmd_errh(cmd); cmd->result = DID_NO_CONNECT << 16; - atomic_inc(&cmd->device->iorequest_cnt); + atomic_inc_unchecked(&cmd->device->iorequest_cnt); /* * SCSI request completion path will do scsi_device_unbusy(), @@ -1536,9 +1536,9 @@ static void scsi_softirq_done(struct request *rq) INIT_LIST_HEAD(&cmd->eh_entry); - atomic_inc(&cmd->device->iodone_cnt); + atomic_inc_unchecked(&cmd->device->iodone_cnt); if (cmd->result) - atomic_inc(&cmd->device->ioerr_cnt); + atomic_inc_unchecked(&cmd->device->ioerr_cnt); disposition = scsi_decide_disposition(cmd); if (disposition != SUCCESS && @@ -1579,7 +1579,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd) struct Scsi_Host *host = cmd->device->host; int rtn = 0; - atomic_inc(&cmd->device->iorequest_cnt); + atomic_inc_unchecked(&cmd->device->iorequest_cnt); /* check if the device is still usable */ if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 82dfe07b1..2b3eeaec6 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -848,7 +848,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct scsi_device *sdev = to_scsi_device(dev); \ - unsigned long long count = atomic_read(&sdev->field); \ + unsigned long long count = atomic_read_unchecked(&sdev->field); \ return snprintf(buf, 20, "0x%llx\n", count); \ } \ static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL) diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 0f3a38695..1616cee48 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -502,7 +502,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class, * Netlink Infrastructure */ -static atomic_t fc_event_seq; +static atomic_unchecked_t fc_event_seq; /** * fc_get_event_number - Obtain the next sequential FC event number @@ -515,7 +515,7 @@ static atomic_t fc_event_seq; u32 fc_get_event_number(void) { - return atomic_add_return(1, &fc_event_seq); + return atomic_add_return_unchecked(1, &fc_event_seq); } EXPORT_SYMBOL(fc_get_event_number); @@ -659,7 +659,7 @@ static __init int fc_transport_init(void) { int error; - atomic_set(&fc_event_seq, 0); + atomic_set_unchecked(&fc_event_seq, 0); error = transport_class_register(&fc_host_class); if (error) @@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val) char *cp; *val = simple_strtoul(buf, &cp, 0); - if ((*cp && (*cp != '\n')) || (*val < 0)) + if (*cp && (*cp != '\n')) return -EINVAL; /* * Check for overflow; dev_loss_tmo is u32 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 42bca619f..ceceb5d22 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -79,7 +79,7 @@ struct iscsi_internal { struct transport_container session_cont; }; -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */ +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */ static struct workqueue_struct *iscsi_eh_timer_workq; static DEFINE_IDA(iscsi_sess_ida); @@ -2073,7 +2073,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) int id = 0; int err; - session->sid = atomic_add_return(1, &iscsi_session_nr); + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr); if (target_id == ISCSI_MAX_TARGET) { id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL); @@ -4523,7 +4523,7 @@ static __init int iscsi_transport_init(void) printk(KERN_INFO "Loading iSCSI transport class v%s.\n", ISCSI_TRANSPORT_VERSION); - atomic_set(&iscsi_session_nr, 0); + atomic_set_unchecked(&iscsi_session_nr, 0); err = class_register(&iscsi_transport_class); if (err) diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c index 319868f3f..a00cda54c 100644 --- a/drivers/scsi/scsi_transport_spi.c +++ b/drivers/scsi/scsi_transport_spi.c @@ -758,7 +758,7 @@ spi_dv_device_compare_inquiry(struct scsi_device *sdev, u8 *buffer, static enum spi_compare_returns spi_dv_retrain(struct scsi_device *sdev, u8 *buffer, u8 *ptr, enum spi_compare_returns - (*compare_fn)(struct scsi_device *, u8 *, u8 *, int)) + (*compare_fn)(struct scsi_device *, u8 *, u8 *, const int)) { struct spi_internal *i = to_spi_internal(sdev->host->transportt); struct scsi_target *starget = sdev->sdev_target; diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index e3cd3ece4..97ab64368 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c @@ -35,7 +35,7 @@ #include "scsi_priv.h" struct srp_host_attrs { - atomic_t next_port_id; + atomic_unchecked_t next_port_id; }; #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data) @@ -105,7 +105,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev, struct Scsi_Host *shost = dev_to_shost(dev); struct srp_host_attrs *srp_host = to_srp_host_attrs(shost); - atomic_set(&srp_host->next_port_id, 0); + atomic_set_unchecked(&srp_host->next_port_id, 0); return 0; } @@ -226,7 +226,7 @@ static ssize_t show_reconnect_delay(struct device *dev, static ssize_t store_reconnect_delay(struct device *dev, struct device_attribute *attr, - const char *buf, const size_t count) + const char *buf, size_t count) { struct srp_rport *rport = transport_class_to_srp_rport(dev); int res, delay; @@ -752,7 +752,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost, rport_fast_io_fail_timedout); INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout); - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id); + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id); dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id); transport_setup_device(&rport->dev); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 51e56296f..caef5f73a 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -113,7 +113,7 @@ static int sd_resume(struct device *); static void sd_rescan(struct device *); static int sd_init_command(struct scsi_cmnd *SCpnt); static void sd_uninit_command(struct scsi_cmnd *SCpnt); -static int sd_done(struct scsi_cmnd *); +static unsigned int sd_done(struct scsi_cmnd *); static int sd_eh_action(struct scsi_cmnd *, int); static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer); static void scsi_disk_release(struct device *cdev); @@ -1768,7 +1768,7 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) * * Note: potentially run from within an ISR. Must not block. **/ -static int sd_done(struct scsi_cmnd *SCpnt) +static unsigned int sd_done(struct scsi_cmnd *SCpnt) { int result = SCpnt->result; unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt); @@ -3081,7 +3081,7 @@ static int sd_probe(struct device *dev) sdkp->disk = gd; sdkp->index = index; atomic_set(&sdkp->openers, 0); - atomic_set(&sdkp->device->ioerr_cnt, 0); + atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0); if (!sdp->request_queue->rq_timeout) { if (sdp->type != TYPE_MOD) diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index dbe5b4b95..1242bc326 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -1083,7 +1083,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) sdp->disk->disk_name, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), NULL, - (char *)arg); + (char __user *)arg); case BLKTRACESTART: return blk_trace_startstop(sdp->device->request_queue, 1); case BLKTRACESTOP: diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index 07b6444d3..b4d84e18d 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -956,7 +956,7 @@ struct pqi_ctrl_info { struct pqi_event pending_events[PQI_NUM_SUPPORTED_EVENTS]; struct work_struct event_work; - atomic_t num_interrupts; + atomic_unchecked_t num_interrupts; int previous_num_interrupts; unsigned int num_heartbeats_requested; struct timer_list heartbeat_timer; diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index a535b2661..d62e7f11b 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -2727,7 +2727,7 @@ static void pqi_heartbeat_timer_handler(unsigned long data) int num_interrupts; struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data; - num_interrupts = atomic_read(&ctrl_info->num_interrupts); + num_interrupts = atomic_read_unchecked(&ctrl_info->num_interrupts); if (num_interrupts == ctrl_info->previous_num_interrupts) { ctrl_info->num_heartbeats_requested++; @@ -2750,7 +2750,7 @@ static void pqi_heartbeat_timer_handler(unsigned long data) static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) { ctrl_info->previous_num_interrupts = - atomic_read(&ctrl_info->num_interrupts); + atomic_read_unchecked(&ctrl_info->num_interrupts); init_timer(&ctrl_info->heartbeat_timer); ctrl_info->heartbeat_timer.expires = @@ -2877,7 +2877,7 @@ static irqreturn_t pqi_irq_handler(int irq, void *data) num_responses_handled += pqi_process_event_intr(ctrl_info); if (num_responses_handled) - atomic_inc(&ctrl_info->num_interrupts); + atomic_inc_unchecked(&ctrl_info->num_interrupts); pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL); pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL); @@ -5535,7 +5535,7 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) spin_lock_init(&ctrl_info->scsi_device_list_lock); INIT_WORK(&ctrl_info->event_work, pqi_event_worker); - atomic_set(&ctrl_info->num_interrupts, 0); + atomic_set_unchecked(&ctrl_info->num_interrupts, 0); INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index bed2bbd6b..3abf75cbe 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -80,7 +80,7 @@ static DEFINE_MUTEX(sr_mutex); static int sr_probe(struct device *); static int sr_remove(struct device *); static int sr_init_command(struct scsi_cmnd *SCpnt); -static int sr_done(struct scsi_cmnd *); +static unsigned int sr_done(struct scsi_cmnd *); static int sr_runtime_suspend(struct device *dev); static const struct dev_pm_ops sr_pm_ops = { @@ -315,13 +315,13 @@ static unsigned int sr_check_events(struct cdrom_device_info *cdi, * It will be notified on the end of a SCSI read / write, and will take one * of several actions based on success or failure. */ -static int sr_done(struct scsi_cmnd *SCpnt) +static unsigned int sr_done(struct scsi_cmnd *SCpnt) { int result = SCpnt->result; - int this_count = scsi_bufflen(SCpnt); - int good_bytes = (result == 0 ? this_count : 0); - int block_sectors = 0; - long error_sector; + unsigned int this_count = scsi_bufflen(SCpnt); + unsigned int good_bytes = (result == 0 ? this_count : 0); + unsigned int block_sectors = 0; + sector_t error_sector; struct scsi_cd *cd = scsi_cd(SCpnt->request->rq_disk); #ifdef DEBUG @@ -354,9 +354,12 @@ static int sr_done(struct scsi_cmnd *SCpnt) if (cd->device->sector_size == 2048) error_sector <<= 2; error_sector &= ~(block_sectors - 1); - good_bytes = (error_sector - - blk_rq_pos(SCpnt->request)) << 9; - if (good_bytes < 0 || good_bytes >= this_count) + if (error_sector >= blk_rq_pos(SCpnt->request)) { + good_bytes = (error_sector - + blk_rq_pos(SCpnt->request)) << 9; + if (good_bytes >= this_count) + good_bytes = 0; + } else good_bytes = 0; /* * The SCSI specification allows for the value diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c index de2c1bfe2..60b856356 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra.c +++ b/drivers/soc/tegra/fuse/fuse-tegra.c @@ -72,7 +72,7 @@ static ssize_t fuse_read(struct file *fd, struct kobject *kobj, return i; } -static struct bin_attribute fuse_bin_attr = { +static bin_attribute_no_const fuse_bin_attr = { .attr = { .name = "fuse", .mode = S_IRUGO, }, .read = fuse_read, }; diff --git a/drivers/spi/spi-bcm-qspi.h b/drivers/spi/spi-bcm-qspi.h index 7abfc75a3..1a20bae5e 100644 --- a/drivers/spi/spi-bcm-qspi.h +++ b/drivers/spi/spi-bcm-qspi.h @@ -67,7 +67,7 @@ struct bcm_qspi_soc_intc { void (*bcm_qspi_int_set)(struct bcm_qspi_soc_intc *soc_intc, int type, bool en); u32 (*bcm_qspi_get_int_status)(struct bcm_qspi_soc_intc *soc_intc); -}; +} __no_const; /* Read controller register*/ static inline u32 bcm_qspi_readl(bool be, void __iomem *addr) diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 838783c3f..d657daea9 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -2995,7 +2995,7 @@ int spi_bus_unlock(struct spi_master *master) EXPORT_SYMBOL_GPL(spi_bus_unlock); /* portable code must never pass more than 32 bytes */ -#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) +#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES) static u8 *buf; diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c index 587f68aa4..1491e4fbe 100644 --- a/drivers/staging/fbtft/fbtft-core.c +++ b/drivers/staging/fbtft/fbtft-core.c @@ -649,7 +649,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display, { struct fb_info *info; struct fbtft_par *par; - struct fb_ops *fbops = NULL; + fb_ops_no_const *fbops = NULL; struct fb_deferred_io *fbdefio = NULL; u8 *vmem = NULL; void *txbuf = NULL; diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h index 89c4b5b76..521d7e095 100644 --- a/drivers/staging/fbtft/fbtft.h +++ b/drivers/staging/fbtft/fbtft.h @@ -93,7 +93,7 @@ struct fbtft_ops { int (*set_var)(struct fbtft_par *par); int (*set_gamma)(struct fbtft_par *par, unsigned long *curves); -}; +} __no_const; /** * struct fbtft_display - Describes the display properties diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c index e72dfa969..15c1dd14c 100644 --- a/drivers/staging/gdm724x/gdm_lte.c +++ b/drivers/staging/gdm724x/gdm_lte.c @@ -410,7 +410,7 @@ static s32 gdm_lte_tx_nic_type(struct net_device *dev, struct sk_buff *skb) return nic_type; } -static int gdm_lte_tx(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t gdm_lte_tx(struct sk_buff *skb, struct net_device *dev) { struct nic *nic = netdev_priv(dev); u32 nic_type; diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c index ae396638f..a2882736e 100644 --- a/drivers/staging/gdm724x/gdm_tty.c +++ b/drivers/staging/gdm724x/gdm_tty.c @@ -44,7 +44,7 @@ #define gdm_tty_send_control(n, r, v, d, l) (\ n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l)) -#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count) +#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count)) static struct tty_driver *gdm_driver[TTY_MAX_COUNT]; static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR]; diff --git a/drivers/staging/greybus/connection.c b/drivers/staging/greybus/connection.c index 557075147..7985bc2de 100644 --- a/drivers/staging/greybus/connection.c +++ b/drivers/staging/greybus/connection.c @@ -187,7 +187,7 @@ _gb_connection_create(struct gb_host_device *hd, int hd_cport_id, connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL; connection->state = GB_CONNECTION_STATE_DISABLED; - atomic_set(&connection->op_cycle, 0); + atomic_set_unchecked(&connection->op_cycle, 0); mutex_init(&connection->mutex); spin_lock_init(&connection->lock); INIT_LIST_HEAD(&connection->operations); diff --git a/drivers/staging/greybus/connection.h b/drivers/staging/greybus/connection.h index 4d9f4c641..0f7a9a083 100644 --- a/drivers/staging/greybus/connection.h +++ b/drivers/staging/greybus/connection.h @@ -55,7 +55,7 @@ struct gb_connection { char name[16]; struct workqueue_struct *wq; - atomic_t op_cycle; + atomic_unchecked_t op_cycle; void *private; diff --git a/drivers/staging/greybus/gpio.c b/drivers/staging/greybus/gpio.c index 250caa00d..8d321a8f7 100644 --- a/drivers/staging/greybus/gpio.c +++ b/drivers/staging/greybus/gpio.c @@ -638,7 +638,7 @@ static int gb_gpio_probe(struct gbphy_device *gbphy_dev, struct gb_connection *connection; struct gb_gpio_controller *ggc; struct gpio_chip *gpio; - struct irq_chip *irqc; + irq_chip_no_const *irqc; int ret; ggc = kzalloc(sizeof(*ggc), GFP_KERNEL); diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c index 8dffd8a7e..ef96a2919 100644 --- a/drivers/staging/greybus/light.c +++ b/drivers/staging/greybus/light.c @@ -30,7 +30,7 @@ struct gb_channel { u32 mode; char *mode_name; struct attribute **attrs; - struct attribute_group *attr_group; + attribute_group_no_const *attr_group; const struct attribute_group **attr_groups; struct led_classdev *led; #if IS_REACHABLE(CONFIG_LEDS_CLASS_FLASH) diff --git a/drivers/staging/greybus/operation.c b/drivers/staging/greybus/operation.c index 0123109a1..51846d3d7 100644 --- a/drivers/staging/greybus/operation.c +++ b/drivers/staging/greybus/operation.c @@ -720,7 +720,7 @@ int gb_operation_request_send(struct gb_operation *operation, if (gb_operation_is_unidirectional(operation)) { operation->id = 0; } else { - cycle = (unsigned int)atomic_inc_return(&connection->op_cycle); + cycle = (unsigned int)atomic_inc_return_unchecked(&connection->op_cycle); operation->id = (u16)(cycle % U16_MAX + 1); } diff --git a/drivers/staging/i4l/icn/icn.c b/drivers/staging/i4l/icn/icn.c index 514bfc2c5..76e653f71 100644 --- a/drivers/staging/i4l/icn/icn.c +++ b/drivers/staging/i4l/icn/icn.c @@ -1039,7 +1039,7 @@ icn_writecmd(const u_char __user *ubuf, const u_char *kbuf, int len, if (count > len) count = len; if (user) { - if (copy_from_user(msg, ubuf, count)) + if (count > sizeof msg || copy_from_user(msg, ubuf, count)) return -EFAULT; } else memcpy(msg, kbuf, count); diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c index b460dda7e..d68d53844 100644 --- a/drivers/staging/iio/adc/ad7280a.c +++ b/drivers/staging/iio/adc/ad7280a.c @@ -547,8 +547,8 @@ static int ad7280_attr_init(struct ad7280_state *st) { int dev, ch, cnt; - st->iio_attr = kcalloc(2, sizeof(*st->iio_attr) * - (st->slave_num + 1) * AD7280A_CELLS_PER_DEV, + st->iio_attr = kcalloc(sizeof(*st->iio_attr) * + (st->slave_num + 1) * AD7280A_CELLS_PER_DEV, 2, GFP_KERNEL); if (!st->iio_attr) return -ENOMEM; diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c index b2b4fa4c3..3120edc08 100644 --- a/drivers/staging/ks7010/ks_wlan_net.c +++ b/drivers/staging/ks7010/ks_wlan_net.c @@ -176,9 +176,10 @@ int ks_wlan_setup_parameter(struct ks_wlan_private *priv, /*------------------------------------------------------------------*/ /* Wireless Handler : get protocol name */ static int ks_wlan_get_name(struct net_device *dev, - struct iw_request_info *info, char *cwrq, + struct iw_request_info *info, union iwreq_data *_cwrq, char *extra) { + char *cwrq = _cwrq->name; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -202,9 +203,10 @@ static int ks_wlan_get_name(struct net_device *dev, /*------------------------------------------------------------------*/ /* Wireless Handler : set frequency */ static int ks_wlan_set_freq(struct net_device *dev, - struct iw_request_info *info, struct iw_freq *fwrq, + struct iw_request_info *info, union iwreq_data *_fwrq, char *extra) { + struct iw_freq *fwrq = &_fwrq->freq; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); int rc = -EINPROGRESS; /* Call commit handler */ @@ -250,9 +252,10 @@ static int ks_wlan_set_freq(struct net_device *dev, /*------------------------------------------------------------------*/ /* Wireless Handler : get frequency */ static int ks_wlan_get_freq(struct net_device *dev, - struct iw_request_info *info, struct iw_freq *fwrq, + struct iw_request_info *info, union iwreq_data *_fwrq, char *extra) { + struct iw_freq *fwrq = &_fwrq->freq; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); int f; @@ -275,8 +278,9 @@ static int ks_wlan_get_freq(struct net_device *dev, /* Wireless Handler : set ESSID */ static int ks_wlan_set_essid(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) + union iwreq_data *_dwrq, char *extra) { + struct iw_point *dwrq = &_dwrq->essid; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); size_t len; @@ -335,8 +339,9 @@ static int ks_wlan_set_essid(struct net_device *dev, /* Wireless Handler : get ESSID */ static int ks_wlan_get_essid(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) + union iwreq_data *_dwrq, char *extra) { + struct iw_point *dwrq = &_dwrq->essid; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -378,8 +383,9 @@ static int ks_wlan_get_essid(struct net_device *dev, /*------------------------------------------------------------------*/ /* Wireless Handler : set AP address */ static int ks_wlan_set_wap(struct net_device *dev, struct iw_request_info *info, - struct sockaddr *ap_addr, char *extra) + union iwreq_data *_ap_addr, char *extra) { + struct sockaddr *ap_addr = &_ap_addr->ap_addr; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -416,8 +422,9 @@ static int ks_wlan_set_wap(struct net_device *dev, struct iw_request_info *info, /*------------------------------------------------------------------*/ /* Wireless Handler : get AP address */ static int ks_wlan_get_wap(struct net_device *dev, struct iw_request_info *info, - struct sockaddr *awrq, char *extra) + union iwreq_data *_awrq, char *extra) { + struct sockaddr *awrq = &_awrq->ap_addr; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -439,9 +446,10 @@ static int ks_wlan_get_wap(struct net_device *dev, struct iw_request_info *info, /*------------------------------------------------------------------*/ /* Wireless Handler : set Nickname */ static int ks_wlan_set_nick(struct net_device *dev, - struct iw_request_info *info, struct iw_point *dwrq, + struct iw_request_info *info, union iwreq_data *_dwrq, char *extra) { + struct iw_point *dwrq = &_dwrq->data; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -463,9 +471,10 @@ static int ks_wlan_set_nick(struct net_device *dev, /*------------------------------------------------------------------*/ /* Wireless Handler : get Nickname */ static int ks_wlan_get_nick(struct net_device *dev, - struct iw_request_info *info, struct iw_point *dwrq, + struct iw_request_info *info, union iwreq_data *_dwrq, char *extra) { + struct iw_point *dwrq = &_dwrq->data; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -483,9 +492,10 @@ static int ks_wlan_get_nick(struct net_device *dev, /*------------------------------------------------------------------*/ /* Wireless Handler : set Bit-Rate */ static int ks_wlan_set_rate(struct net_device *dev, - struct iw_request_info *info, struct iw_param *vwrq, + struct iw_request_info *info, union iwreq_data *_vwrq, char *extra) { + struct iw_param *vwrq = &_vwrq->bitrate; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); int i = 0; @@ -718,9 +728,10 @@ static int ks_wlan_set_rate(struct net_device *dev, /*------------------------------------------------------------------*/ /* Wireless Handler : get Bit-Rate */ static int ks_wlan_get_rate(struct net_device *dev, - struct iw_request_info *info, struct iw_param *vwrq, + struct iw_request_info *info, union iwreq_data *_vwrq, char *extra) { + struct iw_param *vwrq = &_vwrq->bitrate; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -746,8 +757,9 @@ static int ks_wlan_get_rate(struct net_device *dev, /*------------------------------------------------------------------*/ /* Wireless Handler : set RTS threshold */ static int ks_wlan_set_rts(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) + union iwreq_data *_vwrq, char *extra) { + struct iw_param *vwrq = &_vwrq->rts; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); int rthr = vwrq->value; @@ -770,8 +782,9 @@ static int ks_wlan_set_rts(struct net_device *dev, struct iw_request_info *info, /*------------------------------------------------------------------*/ /* Wireless Handler : get RTS threshold */ static int ks_wlan_get_rts(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) + union iwreq_data *_vwrq, char *extra) { + struct iw_param *vwrq = &_vwrq->rts; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -789,9 +802,10 @@ static int ks_wlan_get_rts(struct net_device *dev, struct iw_request_info *info, /*------------------------------------------------------------------*/ /* Wireless Handler : set Fragmentation threshold */ static int ks_wlan_set_frag(struct net_device *dev, - struct iw_request_info *info, struct iw_param *vwrq, + struct iw_request_info *info, union iwreq_data *_vwrq, char *extra) { + struct iw_param *vwrq =&_vwrq->frag; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); int fthr = vwrq->value; @@ -815,9 +829,10 @@ static int ks_wlan_set_frag(struct net_device *dev, /*------------------------------------------------------------------*/ /* Wireless Handler : get Fragmentation threshold */ static int ks_wlan_get_frag(struct net_device *dev, - struct iw_request_info *info, struct iw_param *vwrq, + struct iw_request_info *info, union iwreq_data *_vwrq, char *extra) { + struct iw_param *vwrq =&_vwrq->frag; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -835,9 +850,10 @@ static int ks_wlan_get_frag(struct net_device *dev, /*------------------------------------------------------------------*/ /* Wireless Handler : set Mode of Operation */ static int ks_wlan_set_mode(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, union iwreq_data *_uwrq, char *extra) { + __u32 *uwrq = &_uwrq->mode; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -871,9 +887,10 @@ static int ks_wlan_set_mode(struct net_device *dev, /*------------------------------------------------------------------*/ /* Wireless Handler : get Mode of Operation */ static int ks_wlan_get_mode(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, union iwreq_data *_uwrq, char *extra) { + __u32 *uwrq = &_uwrq->mode; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -901,8 +918,9 @@ static int ks_wlan_get_mode(struct net_device *dev, /* Wireless Handler : set Encryption Key */ static int ks_wlan_set_encode(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) + union iwreq_data *_dwrq, char *extra) { + struct iw_point *dwrq = &_dwrq->encoding; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -1019,8 +1037,9 @@ static int ks_wlan_set_encode(struct net_device *dev, /* Wireless Handler : get Encryption Key */ static int ks_wlan_get_encode(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) + union iwreq_data *_dwrq, char *extra) { + struct iw_point *dwrq = &_dwrq->encoding; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); char zeros[16]; @@ -1075,7 +1094,7 @@ static int ks_wlan_get_encode(struct net_device *dev, /* Wireless Handler : set Tx-Power */ static int ks_wlan_set_txpow(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) + union iwreq_data *vwrq, char *extra) { return -EOPNOTSUPP; /* Not Support */ } @@ -1084,8 +1103,10 @@ static int ks_wlan_set_txpow(struct net_device *dev, /* Wireless Handler : get Tx-Power */ static int ks_wlan_get_txpow(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) + union iwreq_data *_vwrq, char *extra) { + struct iw_param *vwrq = &_vwrq->txpower; + if (priv->sleep_mode == SLP_SLEEP) { return -EPERM; } @@ -1102,7 +1123,7 @@ static int ks_wlan_get_txpow(struct net_device *dev, /* Wireless Handler : set Retry limits */ static int ks_wlan_set_retry(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) + union iwreq_data *vwrq, char *extra) { return -EOPNOTSUPP; /* Not Support */ } @@ -1111,8 +1132,10 @@ static int ks_wlan_set_retry(struct net_device *dev, /* Wireless Handler : get Retry limits */ static int ks_wlan_get_retry(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) + union iwreq_data *_vwrq, char *extra) { + struct iw_param *vwrq =&_vwrq->retry; + if (priv->sleep_mode == SLP_SLEEP) { return -EPERM; } @@ -1130,8 +1153,9 @@ static int ks_wlan_get_retry(struct net_device *dev, /* Wireless Handler : get range info */ static int ks_wlan_get_range(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) + union iwreq_data *_dwrq, char *extra) { + struct iw_point *dwrq = &_dwrq->data; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); struct iw_range *range = (struct iw_range *)extra; @@ -1261,8 +1285,9 @@ static int ks_wlan_get_range(struct net_device *dev, /* Wireless Handler : set Power Management */ static int ks_wlan_set_power(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) + union iwreq_data *_vwrq, char *extra) { + struct iw_param *vwrq =&_vwrq->power; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); short enabled; @@ -1296,8 +1321,9 @@ static int ks_wlan_set_power(struct net_device *dev, /* Wireless Handler : get Power Management */ static int ks_wlan_get_power(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) + union iwreq_data *_vwrq, char *extra) { + struct iw_param *vwrq =&_vwrq->power; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -1317,8 +1343,9 @@ static int ks_wlan_get_power(struct net_device *dev, /* Wireless Handler : get wirless statistics */ static int ks_wlan_get_iwstats(struct net_device *dev, struct iw_request_info *info, - struct iw_quality *vwrq, char *extra) + union iwreq_data *_vwrq, char *extra) { + struct iw_quality *vwrq = &_vwrq->qual; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -1338,7 +1365,7 @@ static int ks_wlan_get_iwstats(struct net_device *dev, /*------------------------------------------------------------------*/ /* Wireless Handler : set Sensitivity */ static int ks_wlan_set_sens(struct net_device *dev, - struct iw_request_info *info, struct iw_param *vwrq, + struct iw_request_info *info, union iwreq_data *vwrq, char *extra) { return -EOPNOTSUPP; /* Not Support */ @@ -1347,9 +1374,11 @@ static int ks_wlan_set_sens(struct net_device *dev, /*------------------------------------------------------------------*/ /* Wireless Handler : get Sensitivity */ static int ks_wlan_get_sens(struct net_device *dev, - struct iw_request_info *info, struct iw_param *vwrq, + struct iw_request_info *info, union iwreq_data *_vwrq, char *extra) { + struct iw_param *vwrq = &_vwrq->sens; + /* Not Support */ vwrq->value = 0; vwrq->disabled = (vwrq->value == 0); @@ -1363,8 +1392,9 @@ static int ks_wlan_get_sens(struct net_device *dev, /* Note : this is deprecated in favor of IWSCAN */ static int ks_wlan_get_aplist(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) + union iwreq_data *_dwrq, char *extra) { + struct iw_point *dwrq = &_dwrq->data; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); struct sockaddr *address = (struct sockaddr *)extra; @@ -1591,9 +1621,10 @@ static inline char *ks_wlan_translate_scan(struct net_device *dev, /*------------------------------------------------------------------*/ /* Wireless Handler : Read Scan Results */ static int ks_wlan_get_scan(struct net_device *dev, - struct iw_request_info *info, struct iw_point *dwrq, + struct iw_request_info *info, union iwreq_data *_dwrq, char *extra) { + struct iw_point *dwrq = &_dwrq->data; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); int i; @@ -1650,7 +1681,7 @@ static int ks_wlan_get_scan(struct net_device *dev, /*------------------------------------------------------------------*/ /* Commit handler : called after a bunch of SET operations */ static int ks_wlan_config_commit(struct net_device *dev, - struct iw_request_info *info, void *zwrq, + struct iw_request_info *info, union iwreq_data *zwrq, char *extra) { struct ks_wlan_private *priv = @@ -1668,8 +1699,9 @@ static int ks_wlan_config_commit(struct net_device *dev, /* Wireless handler : set association ie params */ static int ks_wlan_set_genie(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) + union iwreq_data *_dwrq, char *extra) { + struct iw_point *dwrq =&_dwrq->data; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -1687,8 +1719,9 @@ static int ks_wlan_set_genie(struct net_device *dev, /* Wireless handler : set authentication mode params */ static int ks_wlan_set_auth_mode(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) + union iwreq_data *_vwrq, char *extra) { + struct iw_param *vwrq = &_vwrq->param; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); int index = (vwrq->flags & IW_AUTH_INDEX); @@ -1827,8 +1860,9 @@ static int ks_wlan_set_auth_mode(struct net_device *dev, /* Wireless handler : get authentication mode params */ static int ks_wlan_get_auth_mode(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) + union iwreq_data *_vwrq, char *extra) { + struct iw_param *vwrq = &_vwrq->param; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); int index = (vwrq->flags & IW_AUTH_INDEX); @@ -1873,8 +1907,9 @@ static int ks_wlan_get_auth_mode(struct net_device *dev, /* Wireless Handler : set encoding token & mode (WPA)*/ static int ks_wlan_set_encode_ext(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) + union iwreq_data *_dwrq, char *extra) { + struct iw_point *dwrq = &_dwrq->encoding; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); struct iw_encode_ext *enc; @@ -1981,8 +2016,9 @@ static int ks_wlan_set_encode_ext(struct net_device *dev, /* Wireless Handler : get encoding token & mode (WPA)*/ static int ks_wlan_get_encode_ext(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) + union iwreq_data *_dwrq, char *extra) { + struct iw_point *dwrq = &_dwrq->encoding; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -2004,8 +2040,9 @@ static int ks_wlan_get_encode_ext(struct net_device *dev, /* Wireless Handler : PMKSA cache operation (WPA2) */ static int ks_wlan_set_pmksa(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) + union iwreq_data *_dwrq, char *extra) { + struct iw_point *dwrq = &_dwrq->data; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); struct iw_pmksa *pmksa; @@ -2163,9 +2200,10 @@ static int ks_wlan_set_stop_request(struct net_device *dev, /* Wireless Handler : set MLME */ #include static int ks_wlan_set_mlme(struct net_device *dev, - struct iw_request_info *info, struct iw_point *dwrq, + struct iw_request_info *info, union iwreq_data *_dwrq, char *extra) { + struct iw_point *dwrq = &_dwrq->data; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); struct iw_mlme *mlme = (struct iw_mlme *)extra; @@ -2194,8 +2232,9 @@ static int ks_wlan_set_mlme(struct net_device *dev, /* Private handler : get firemware version */ static int ks_wlan_get_firmware_version(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) + union iwreq_data *_dwrq, char *extra) { + struct iw_point *dwrq = &_dwrq->data; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); strcpy(extra, &(priv->firmware_version[0])); @@ -2265,9 +2304,10 @@ static int ks_wlan_get_connect(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : set preamble */ static int ks_wlan_set_preamble(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, union iwreq_data *_uwrq, char *extra) { + __u32 *uwrq = &_uwrq->mode; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -2290,9 +2330,10 @@ static int ks_wlan_set_preamble(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : get preamble */ static int ks_wlan_get_preamble(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, union iwreq_data *_uwrq, char *extra) { + __u32 *uwrq = &_uwrq->mode; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -2307,9 +2348,10 @@ static int ks_wlan_get_preamble(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : set power save mode */ static int ks_wlan_set_powermgt(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, union iwreq_data *_uwrq, char *extra) { + __u32 *uwrq = &_uwrq->mode; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -2340,9 +2382,10 @@ static int ks_wlan_set_powermgt(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : get power save made */ static int ks_wlan_get_powermgt(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, union iwreq_data *_uwrq, char *extra) { + __u32 *uwrq = &_uwrq->mode; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -2357,9 +2400,10 @@ static int ks_wlan_get_powermgt(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : set scan type */ static int ks_wlan_set_scan_type(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, union iwreq_data *_uwrq, char *extra) { + __u32 *uwrq = &_uwrq->mode; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -2380,9 +2424,10 @@ static int ks_wlan_set_scan_type(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : get scan type */ static int ks_wlan_get_scan_type(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, union iwreq_data *_uwrq, char *extra) { + __u32 *uwrq = &_uwrq->mode; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -2531,9 +2576,10 @@ static int ks_wlan_get_wep_ascii(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : set beacon lost count */ static int ks_wlan_set_beacon_lost(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, union iwreq_data *_uwrq, char *extra) { + __u32 *uwrq = &_uwrq->mode; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -2556,9 +2602,10 @@ static int ks_wlan_set_beacon_lost(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : get beacon lost count */ static int ks_wlan_get_beacon_lost(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, union iwreq_data *_uwrq, char *extra) { + __u32 *uwrq = &_uwrq->mode; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -2573,9 +2620,10 @@ static int ks_wlan_get_beacon_lost(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : set phy type */ static int ks_wlan_set_phy_type(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, union iwreq_data *_uwrq, char *extra) { + __u32 *uwrq = &_uwrq->mode; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -2599,9 +2647,10 @@ static int ks_wlan_set_phy_type(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : get phy type */ static int ks_wlan_get_phy_type(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, union iwreq_data *_uwrq, char *extra) { + __u32 *uwrq = &_uwrq->mode; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -2616,9 +2665,10 @@ static int ks_wlan_get_phy_type(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : set cts mode */ static int ks_wlan_set_cts_mode(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, union iwreq_data *_uwrq, char *extra) { + __u32 *uwrq = &_uwrq->mode; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -2644,9 +2694,10 @@ static int ks_wlan_set_cts_mode(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : get cts mode */ static int ks_wlan_get_cts_mode(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, union iwreq_data *_uwrq, char *extra) { + __u32 *uwrq = &_uwrq->mode; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -2662,8 +2713,9 @@ static int ks_wlan_get_cts_mode(struct net_device *dev, /* Private handler : set sleep mode */ static int ks_wlan_set_sleep_mode(struct net_device *dev, struct iw_request_info *info, - __u32 * uwrq, char *extra) + union iwreq_data *_uwrq, char *extra) { + __u32 *uwrq = &_uwrq->mode; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -2692,8 +2744,9 @@ static int ks_wlan_set_sleep_mode(struct net_device *dev, /* Private handler : get sleep mode */ static int ks_wlan_get_sleep_mode(struct net_device *dev, struct iw_request_info *info, - __u32 * uwrq, char *extra) + union iwreq_data *_uwrq, char *extra) { + __u32 *uwrq = &_uwrq->mode; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -2747,9 +2800,10 @@ static int ks_wlan_get_phy_information_timer(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : set WPS enable */ static int ks_wlan_set_wps_enable(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, union iwreq_data *_uwrq, char *extra) { + __u32 *uwrq = &_uwrq->mode; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); DPRINTK(2, "\n"); @@ -2771,9 +2825,10 @@ static int ks_wlan_set_wps_enable(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : get WPS enable */ static int ks_wlan_get_wps_enable(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, union iwreq_data *_uwrq, char *extra) { + __u32 *uwrq = &_uwrq->mode; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); DPRINTK(2, "\n"); @@ -2792,8 +2847,9 @@ static int ks_wlan_get_wps_enable(struct net_device *dev, /* Private handler : set WPS probe req */ static int ks_wlan_set_wps_probe_req(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) + union iwreq_data *_dwrq, char *extra) { + struct iw_point *dwrq = &_dwrq->data; uint8_t *p = extra; unsigned char len; struct ks_wlan_private *priv = @@ -2850,9 +2906,10 @@ static int ks_wlan_get_wps_probe_req(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : set tx gain control value */ static int ks_wlan_set_tx_gain(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, union iwreq_data *_uwrq, char *extra) { + __u32 *uwrq = &_uwrq->mode; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -2877,9 +2934,10 @@ static int ks_wlan_set_tx_gain(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : get tx gain control value */ static int ks_wlan_get_tx_gain(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, union iwreq_data *_uwrq, char *extra) { + __u32 *uwrq = &_uwrq->mode; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -2895,9 +2953,10 @@ static int ks_wlan_get_tx_gain(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : set rx gain control value */ static int ks_wlan_set_rx_gain(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, union iwreq_data *_uwrq, char *extra) { + __u32 *uwrq = &_uwrq->mode; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -2922,9 +2981,10 @@ static int ks_wlan_set_rx_gain(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : get rx gain control value */ static int ks_wlan_get_rx_gain(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, union iwreq_data *_uwrq, char *extra) { + __u32 *uwrq = &_uwrq->mode; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -2963,9 +3023,10 @@ static int ks_wlan_set_region(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : get eeprom checksum result */ static int ks_wlan_get_eeprom_cksum(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, union iwreq_data *_uwrq, char *extra) { + __u32 *uwrq = &_uwrq->mode; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -3090,8 +3151,9 @@ static void print_hif_event(struct net_device *dev, int event) /*------------------------------------------------------------------*/ /* Private handler : get host command history */ static int ks_wlan_hostt(struct net_device *dev, struct iw_request_info *info, - __u32 * uwrq, char *extra) + union iwreq_data *_uwrq, char *extra) { + __u32 *uwrq = &_uwrq->mode; int i, event; struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -3162,119 +3224,119 @@ static const struct iw_priv_args ks_wlan_private_args[] = { }; static const iw_handler ks_wlan_handler[] = { - (iw_handler) ks_wlan_config_commit, /* SIOCSIWCOMMIT */ - (iw_handler) ks_wlan_get_name, /* SIOCGIWNAME */ - (iw_handler) NULL, /* SIOCSIWNWID */ - (iw_handler) NULL, /* SIOCGIWNWID */ - (iw_handler) ks_wlan_set_freq, /* SIOCSIWFREQ */ - (iw_handler) ks_wlan_get_freq, /* SIOCGIWFREQ */ - (iw_handler) ks_wlan_set_mode, /* SIOCSIWMODE */ - (iw_handler) ks_wlan_get_mode, /* SIOCGIWMODE */ + ks_wlan_config_commit, /* SIOCSIWCOMMIT */ + ks_wlan_get_name, /* SIOCGIWNAME */ + NULL, /* SIOCSIWNWID */ + NULL, /* SIOCGIWNWID */ + ks_wlan_set_freq, /* SIOCSIWFREQ */ + ks_wlan_get_freq, /* SIOCGIWFREQ */ + ks_wlan_set_mode, /* SIOCSIWMODE */ + ks_wlan_get_mode, /* SIOCGIWMODE */ #ifndef KSC_OPNOTSUPP - (iw_handler) ks_wlan_set_sens, /* SIOCSIWSENS */ - (iw_handler) ks_wlan_get_sens, /* SIOCGIWSENS */ + ks_wlan_set_sens, /* SIOCSIWSENS */ + ks_wlan_get_sens, /* SIOCGIWSENS */ #else /* KSC_OPNOTSUPP */ - (iw_handler) NULL, /* SIOCSIWSENS */ - (iw_handler) NULL, /* SIOCGIWSENS */ + NULL, /* SIOCSIWSENS */ + NULL, /* SIOCGIWSENS */ #endif /* KSC_OPNOTSUPP */ - (iw_handler) NULL, /* SIOCSIWRANGE */ - (iw_handler) ks_wlan_get_range, /* SIOCGIWRANGE */ - (iw_handler) NULL, /* SIOCSIWPRIV */ - (iw_handler) NULL, /* SIOCGIWPRIV */ - (iw_handler) NULL, /* SIOCSIWSTATS */ - (iw_handler) ks_wlan_get_iwstats, /* SIOCGIWSTATS */ - (iw_handler) NULL, /* SIOCSIWSPY */ - (iw_handler) NULL, /* SIOCGIWSPY */ - (iw_handler) NULL, /* SIOCSIWTHRSPY */ - (iw_handler) NULL, /* SIOCGIWTHRSPY */ - (iw_handler) ks_wlan_set_wap, /* SIOCSIWAP */ - (iw_handler) ks_wlan_get_wap, /* SIOCGIWAP */ -// (iw_handler) NULL, /* SIOCSIWMLME */ - (iw_handler) ks_wlan_set_mlme, /* SIOCSIWMLME */ - (iw_handler) ks_wlan_get_aplist, /* SIOCGIWAPLIST */ - (iw_handler) ks_wlan_set_scan, /* SIOCSIWSCAN */ - (iw_handler) ks_wlan_get_scan, /* SIOCGIWSCAN */ - (iw_handler) ks_wlan_set_essid, /* SIOCSIWESSID */ - (iw_handler) ks_wlan_get_essid, /* SIOCGIWESSID */ - (iw_handler) ks_wlan_set_nick, /* SIOCSIWNICKN */ - (iw_handler) ks_wlan_get_nick, /* SIOCGIWNICKN */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) ks_wlan_set_rate, /* SIOCSIWRATE */ - (iw_handler) ks_wlan_get_rate, /* SIOCGIWRATE */ - (iw_handler) ks_wlan_set_rts, /* SIOCSIWRTS */ - (iw_handler) ks_wlan_get_rts, /* SIOCGIWRTS */ - (iw_handler) ks_wlan_set_frag, /* SIOCSIWFRAG */ - (iw_handler) ks_wlan_get_frag, /* SIOCGIWFRAG */ + NULL, /* SIOCSIWRANGE */ + ks_wlan_get_range, /* SIOCGIWRANGE */ + NULL, /* SIOCSIWPRIV */ + NULL, /* SIOCGIWPRIV */ + NULL, /* SIOCSIWSTATS */ + ks_wlan_get_iwstats, /* SIOCGIWSTATS */ + NULL, /* SIOCSIWSPY */ + NULL, /* SIOCGIWSPY */ + NULL, /* SIOCSIWTHRSPY */ + NULL, /* SIOCGIWTHRSPY */ + ks_wlan_set_wap, /* SIOCSIWAP */ + ks_wlan_get_wap, /* SIOCGIWAP */ +// NULL, /* SIOCSIWMLME */ + ks_wlan_set_mlme, /* SIOCSIWMLME */ + ks_wlan_get_aplist, /* SIOCGIWAPLIST */ + ks_wlan_set_scan, /* SIOCSIWSCAN */ + ks_wlan_get_scan, /* SIOCGIWSCAN */ + ks_wlan_set_essid, /* SIOCSIWESSID */ + ks_wlan_get_essid, /* SIOCGIWESSID */ + ks_wlan_set_nick, /* SIOCSIWNICKN */ + ks_wlan_get_nick, /* SIOCGIWNICKN */ + NULL, /* -- hole -- */ + NULL, /* -- hole -- */ + ks_wlan_set_rate, /* SIOCSIWRATE */ + ks_wlan_get_rate, /* SIOCGIWRATE */ + ks_wlan_set_rts, /* SIOCSIWRTS */ + ks_wlan_get_rts, /* SIOCGIWRTS */ + ks_wlan_set_frag, /* SIOCSIWFRAG */ + ks_wlan_get_frag, /* SIOCGIWFRAG */ #ifndef KSC_OPNOTSUPP - (iw_handler) ks_wlan_set_txpow, /* SIOCSIWTXPOW */ - (iw_handler) ks_wlan_get_txpow, /* SIOCGIWTXPOW */ - (iw_handler) ks_wlan_set_retry, /* SIOCSIWRETRY */ - (iw_handler) ks_wlan_get_retry, /* SIOCGIWRETRY */ + ks_wlan_set_txpow, /* SIOCSIWTXPOW */ + ks_wlan_get_txpow, /* SIOCGIWTXPOW */ + ks_wlan_set_retry, /* SIOCSIWRETRY */ + ks_wlan_get_retry, /* SIOCGIWRETRY */ #else /* KSC_OPNOTSUPP */ - (iw_handler) NULL, /* SIOCSIWTXPOW */ - (iw_handler) NULL, /* SIOCGIWTXPOW */ - (iw_handler) NULL, /* SIOCSIWRETRY */ - (iw_handler) NULL, /* SIOCGIWRETRY */ + NULL, /* SIOCSIWTXPOW */ + NULL, /* SIOCGIWTXPOW */ + NULL, /* SIOCSIWRETRY */ + NULL, /* SIOCGIWRETRY */ #endif /* KSC_OPNOTSUPP */ - (iw_handler) ks_wlan_set_encode, /* SIOCSIWENCODE */ - (iw_handler) ks_wlan_get_encode, /* SIOCGIWENCODE */ - (iw_handler) ks_wlan_set_power, /* SIOCSIWPOWER */ - (iw_handler) ks_wlan_get_power, /* SIOCGIWPOWER */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) NULL, /* -- hole -- */ -// (iw_handler) NULL, /* SIOCSIWGENIE */ - (iw_handler) ks_wlan_set_genie, /* SIOCSIWGENIE */ - (iw_handler) NULL, /* SIOCGIWGENIE */ - (iw_handler) ks_wlan_set_auth_mode, /* SIOCSIWAUTH */ - (iw_handler) ks_wlan_get_auth_mode, /* SIOCGIWAUTH */ - (iw_handler) ks_wlan_set_encode_ext, /* SIOCSIWENCODEEXT */ - (iw_handler) ks_wlan_get_encode_ext, /* SIOCGIWENCODEEXT */ - (iw_handler) ks_wlan_set_pmksa, /* SIOCSIWPMKSA */ - (iw_handler) NULL, /* -- hole -- */ + ks_wlan_set_encode, /* SIOCSIWENCODE */ + ks_wlan_get_encode, /* SIOCGIWENCODE */ + ks_wlan_set_power, /* SIOCSIWPOWER */ + ks_wlan_get_power, /* SIOCGIWPOWER */ + NULL, /* -- hole -- */ + NULL, /* -- hole -- */ +// NULL, /* SIOCSIWGENIE */ + ks_wlan_set_genie, /* SIOCSIWGENIE */ + NULL, /* SIOCGIWGENIE */ + ks_wlan_set_auth_mode, /* SIOCSIWAUTH */ + ks_wlan_get_auth_mode, /* SIOCGIWAUTH */ + ks_wlan_set_encode_ext, /* SIOCSIWENCODEEXT */ + ks_wlan_get_encode_ext, /* SIOCGIWENCODEEXT */ + ks_wlan_set_pmksa, /* SIOCSIWPMKSA */ + NULL, /* -- hole -- */ }; /* private_handler */ static const iw_handler ks_wlan_private_handler[] = { - (iw_handler) NULL, /* 0 */ - (iw_handler) NULL, /* 1, used to be: KS_WLAN_GET_DRIVER_VERSION */ - (iw_handler) NULL, /* 2 */ - (iw_handler) ks_wlan_get_firmware_version, /* 3 KS_WLAN_GET_FIRM_VERSION */ + NULL, /* 0 */ + NULL, /* 1, used to be: KS_WLAN_GET_DRIVER_VERSION */ + NULL, /* 2 */ + ks_wlan_get_firmware_version, /* 3 KS_WLAN_GET_FIRM_VERSION */ #ifdef WPS - (iw_handler) ks_wlan_set_wps_enable, /* 4 KS_WLAN_SET_WPS_ENABLE */ - (iw_handler) ks_wlan_get_wps_enable, /* 5 KS_WLAN_GET_WPS_ENABLE */ - (iw_handler) ks_wlan_set_wps_probe_req, /* 6 KS_WLAN_SET_WPS_PROBE_REQ */ + ks_wlan_set_wps_enable, /* 4 KS_WLAN_SET_WPS_ENABLE */ + ks_wlan_get_wps_enable, /* 5 KS_WLAN_GET_WPS_ENABLE */ + ks_wlan_set_wps_probe_req, /* 6 KS_WLAN_SET_WPS_PROBE_REQ */ #else - (iw_handler) NULL, /* 4 */ - (iw_handler) NULL, /* 5 */ - (iw_handler) NULL, /* 6 */ + NULL, /* 4 */ + NULL, /* 5 */ + NULL, /* 6 */ #endif /* WPS */ - (iw_handler) ks_wlan_get_eeprom_cksum, /* 7 KS_WLAN_GET_CONNECT */ - (iw_handler) ks_wlan_set_preamble, /* 8 KS_WLAN_SET_PREAMBLE */ - (iw_handler) ks_wlan_get_preamble, /* 9 KS_WLAN_GET_PREAMBLE */ - (iw_handler) ks_wlan_set_powermgt, /* 10 KS_WLAN_SET_POWER_SAVE */ - (iw_handler) ks_wlan_get_powermgt, /* 11 KS_WLAN_GET_POWER_SAVE */ - (iw_handler) ks_wlan_set_scan_type, /* 12 KS_WLAN_SET_SCAN_TYPE */ - (iw_handler) ks_wlan_get_scan_type, /* 13 KS_WLAN_GET_SCAN_TYPE */ - (iw_handler) ks_wlan_set_rx_gain, /* 14 KS_WLAN_SET_RX_GAIN */ - (iw_handler) ks_wlan_get_rx_gain, /* 15 KS_WLAN_GET_RX_GAIN */ - (iw_handler) ks_wlan_hostt, /* 16 KS_WLAN_HOSTT */ - (iw_handler) NULL, /* 17 */ - (iw_handler) ks_wlan_set_beacon_lost, /* 18 KS_WLAN_SET_BECAN_LOST */ - (iw_handler) ks_wlan_get_beacon_lost, /* 19 KS_WLAN_GET_BECAN_LOST */ - (iw_handler) ks_wlan_set_tx_gain, /* 20 KS_WLAN_SET_TX_GAIN */ - (iw_handler) ks_wlan_get_tx_gain, /* 21 KS_WLAN_GET_TX_GAIN */ - (iw_handler) ks_wlan_set_phy_type, /* 22 KS_WLAN_SET_PHY_TYPE */ - (iw_handler) ks_wlan_get_phy_type, /* 23 KS_WLAN_GET_PHY_TYPE */ - (iw_handler) ks_wlan_set_cts_mode, /* 24 KS_WLAN_SET_CTS_MODE */ - (iw_handler) ks_wlan_get_cts_mode, /* 25 KS_WLAN_GET_CTS_MODE */ - (iw_handler) NULL, /* 26 */ - (iw_handler) NULL, /* 27 */ - (iw_handler) ks_wlan_set_sleep_mode, /* 28 KS_WLAN_SET_SLEEP_MODE */ - (iw_handler) ks_wlan_get_sleep_mode, /* 29 KS_WLAN_GET_SLEEP_MODE */ - (iw_handler) NULL, /* 30 */ - (iw_handler) NULL, /* 31 */ + ks_wlan_get_eeprom_cksum, /* 7 KS_WLAN_GET_CONNECT */ + ks_wlan_set_preamble, /* 8 KS_WLAN_SET_PREAMBLE */ + ks_wlan_get_preamble, /* 9 KS_WLAN_GET_PREAMBLE */ + ks_wlan_set_powermgt, /* 10 KS_WLAN_SET_POWER_SAVE */ + ks_wlan_get_powermgt, /* 11 KS_WLAN_GET_POWER_SAVE */ + ks_wlan_set_scan_type, /* 12 KS_WLAN_SET_SCAN_TYPE */ + ks_wlan_get_scan_type, /* 13 KS_WLAN_GET_SCAN_TYPE */ + ks_wlan_set_rx_gain, /* 14 KS_WLAN_SET_RX_GAIN */ + ks_wlan_get_rx_gain, /* 15 KS_WLAN_GET_RX_GAIN */ + ks_wlan_hostt, /* 16 KS_WLAN_HOSTT */ + NULL, /* 17 */ + ks_wlan_set_beacon_lost, /* 18 KS_WLAN_SET_BECAN_LOST */ + ks_wlan_get_beacon_lost, /* 19 KS_WLAN_GET_BECAN_LOST */ + ks_wlan_set_tx_gain, /* 20 KS_WLAN_SET_TX_GAIN */ + ks_wlan_get_tx_gain, /* 21 KS_WLAN_GET_TX_GAIN */ + ks_wlan_set_phy_type, /* 22 KS_WLAN_SET_PHY_TYPE */ + ks_wlan_get_phy_type, /* 23 KS_WLAN_GET_PHY_TYPE */ + ks_wlan_set_cts_mode, /* 24 KS_WLAN_SET_CTS_MODE */ + ks_wlan_get_cts_mode, /* 25 KS_WLAN_GET_CTS_MODE */ + NULL, /* 26 */ + NULL, /* 27 */ + ks_wlan_set_sleep_mode, /* 28 KS_WLAN_SET_SLEEP_MODE */ + ks_wlan_get_sleep_mode, /* 29 KS_WLAN_GET_SLEEP_MODE */ + NULL, /* 30 */ + NULL, /* 31 */ }; static const struct iw_handler_def ks_wlan_handler_def = { @@ -3282,8 +3344,8 @@ static const struct iw_handler_def ks_wlan_handler_def = { .num_private = sizeof(ks_wlan_private_handler) / sizeof(iw_handler), .num_private_args = sizeof(ks_wlan_private_args) / sizeof(struct iw_priv_args), - .standard = (iw_handler *) ks_wlan_handler, - .private = (iw_handler *) ks_wlan_private_handler, + .standard = ks_wlan_handler, + .private = ks_wlan_private_handler, .private_args = (struct iw_priv_args *)ks_wlan_private_args, .get_wireless_stats = ks_get_wireless_stats, }; @@ -3352,7 +3414,7 @@ void ks_wlan_tx_timeout(struct net_device *dev) } static -int ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev) +netdev_tx_t ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ks_wlan_private *priv = netdev_priv(dev); int rc = 0; diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h index e6ca0cf52..93cadeb95 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h @@ -303,10 +303,8 @@ struct ksock_conn { struct ksock_route *ksnc_route; /* owning route */ struct list_head ksnc_list; /* stash on peer's conn list */ struct socket *ksnc_sock; /* actual socket */ - void *ksnc_saved_data_ready; /* socket's original - * data_ready() callback */ - void *ksnc_saved_write_space; /* socket's original - * write_space() callback */ + void (*ksnc_saved_data_ready)(struct sock *sk); /* socket's original data_ready() callback */ + void (*ksnc_saved_write_space)(struct sock *sk); /* socket's original write_space() callback */ atomic_t ksnc_conn_refcount;/* conn refcount */ atomic_t ksnc_sock_refcount;/* sock refcount */ struct ksock_sched *ksnc_scheduler; /* who schedules this connection diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c index b20c5d394..a22fa96b9 100644 --- a/drivers/staging/lustre/lnet/selftest/brw_test.c +++ b/drivers/staging/lustre/lnet/selftest/brw_test.c @@ -324,7 +324,7 @@ brw_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc) CERROR("BRW RPC to %s failed with %d\n", libcfs_id2str(rpc->crpc_dest), rpc->crpc_status); if (!tsi->tsi_stopping) /* rpc could have been aborted */ - atomic_inc(&sn->sn_brw_errors); + atomic_inc_unchecked(&sn->sn_brw_errors); return; } @@ -338,7 +338,7 @@ brw_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc) libcfs_id2str(rpc->crpc_dest), reply->brw_status); if (reply->brw_status) { - atomic_inc(&sn->sn_brw_errors); + atomic_inc_unchecked(&sn->sn_brw_errors); rpc->crpc_status = -(int)reply->brw_status; return; } @@ -349,7 +349,7 @@ brw_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc) if (brw_check_bulk(&rpc->crpc_bulk, reqst->brw_flags, magic)) { CERROR("Bulk data from %s is corrupted!\n", libcfs_id2str(rpc->crpc_dest)); - atomic_inc(&sn->sn_brw_errors); + atomic_inc_unchecked(&sn->sn_brw_errors); rpc->crpc_status = -EBADMSG; } } @@ -484,14 +484,11 @@ brw_server_handle(struct srpc_server_rpc *rpc) return 0; } -struct sfw_test_client_ops brw_test_client; - -void brw_init_test_client(void) -{ - brw_test_client.tso_init = brw_client_init; - brw_test_client.tso_fini = brw_client_fini; - brw_test_client.tso_prep_rpc = brw_client_prep_rpc; - brw_test_client.tso_done_rpc = brw_client_done_rpc; +struct sfw_test_client_ops brw_test_client = { + .tso_init = brw_client_init, + .tso_fini = brw_client_fini, + .tso_prep_rpc = brw_client_prep_rpc, + .tso_done_rpc = brw_client_done_rpc, }; struct srpc_service brw_test_service; diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c index abbd6287b..cfdf11238 100644 --- a/drivers/staging/lustre/lnet/selftest/framework.c +++ b/drivers/staging/lustre/lnet/selftest/framework.c @@ -262,8 +262,8 @@ sfw_init_session(struct sfw_session *sn, lst_sid_t sid, INIT_LIST_HEAD(&sn->sn_list); INIT_LIST_HEAD(&sn->sn_batches); atomic_set(&sn->sn_refcount, 1); /* +1 for caller */ - atomic_set(&sn->sn_brw_errors, 0); - atomic_set(&sn->sn_ping_errors, 0); + atomic_set_unchecked(&sn->sn_brw_errors, 0); + atomic_set_unchecked(&sn->sn_ping_errors, 0); strlcpy(&sn->sn_name[0], name, sizeof(sn->sn_name)); sn->sn_timer_active = 0; @@ -383,8 +383,8 @@ sfw_get_stats(struct srpc_stat_reqst *request, struct srpc_stat_reply *reply) * with 32 bits to send, this is ~49 days */ cnt->running_ms = jiffies_to_msecs(jiffies - sn->sn_started); - cnt->brw_errors = atomic_read(&sn->sn_brw_errors); - cnt->ping_errors = atomic_read(&sn->sn_ping_errors); + cnt->brw_errors = atomic_read_unchecked(&sn->sn_brw_errors); + cnt->ping_errors = atomic_read_unchecked(&sn->sn_ping_errors); cnt->zombie_sessions = atomic_read(&sfw_data.fw_nzombies); cnt->active_batches = 0; @@ -1655,12 +1655,10 @@ sfw_startup(void) INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs); INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions); - brw_init_test_client(); brw_init_test_service(); rc = sfw_register_test(&brw_test_service, &brw_test_client); LASSERT(!rc); - ping_init_test_client(); ping_init_test_service(); rc = sfw_register_test(&ping_test_service, &ping_test_client); LASSERT(!rc); diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c index 9331ca4e3..23511dbe4 100644 --- a/drivers/staging/lustre/lnet/selftest/ping_test.c +++ b/drivers/staging/lustre/lnet/selftest/ping_test.c @@ -74,7 +74,7 @@ ping_client_fini(struct sfw_test_instance *tsi) LASSERT(sn); LASSERT(tsi->tsi_is_client); - errors = atomic_read(&sn->sn_ping_errors); + errors = atomic_read_unchecked(&sn->sn_ping_errors); if (errors) CWARN("%d pings have failed.\n", errors); else @@ -126,7 +126,7 @@ ping_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc) if (rpc->crpc_status) { if (!tsi->tsi_stopping) /* rpc could have been aborted */ - atomic_inc(&sn->sn_ping_errors); + atomic_inc_unchecked(&sn->sn_ping_errors); CERROR("Unable to ping %s (%d): %d\n", libcfs_id2str(rpc->crpc_dest), reqst->pnr_seq, rpc->crpc_status); @@ -141,7 +141,7 @@ ping_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc) if (reply->pnr_magic != LST_PING_TEST_MAGIC) { rpc->crpc_status = -EBADMSG; - atomic_inc(&sn->sn_ping_errors); + atomic_inc_unchecked(&sn->sn_ping_errors); CERROR("Bad magic %u from %s, %u expected.\n", reply->pnr_magic, libcfs_id2str(rpc->crpc_dest), LST_PING_TEST_MAGIC); @@ -150,7 +150,7 @@ ping_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc) if (reply->pnr_seq != reqst->pnr_seq) { rpc->crpc_status = -EBADMSG; - atomic_inc(&sn->sn_ping_errors); + atomic_inc_unchecked(&sn->sn_ping_errors); CERROR("Bad seq %u from %s, %u expected.\n", reply->pnr_seq, libcfs_id2str(rpc->crpc_dest), reqst->pnr_seq); @@ -206,15 +206,12 @@ ping_server_handle(struct srpc_server_rpc *rpc) return 0; } -struct sfw_test_client_ops ping_test_client; - -void ping_init_test_client(void) -{ - ping_test_client.tso_init = ping_client_init; - ping_test_client.tso_fini = ping_client_fini; - ping_test_client.tso_prep_rpc = ping_client_prep_rpc; - ping_test_client.tso_done_rpc = ping_client_done_rpc; -} +struct sfw_test_client_ops ping_test_client = { + .tso_init = ping_client_init, + .tso_fini = ping_client_fini, + .tso_prep_rpc = ping_client_prep_rpc, + .tso_done_rpc = ping_client_done_rpc, +}; struct srpc_service ping_test_service; diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h index d033ac03d..528a10289 100644 --- a/drivers/staging/lustre/lnet/selftest/selftest.h +++ b/drivers/staging/lustre/lnet/selftest/selftest.h @@ -328,8 +328,8 @@ struct sfw_session { struct list_head sn_batches; /* list of batches */ char sn_name[LST_NAME_SIZE]; atomic_t sn_refcount; - atomic_t sn_brw_errors; - atomic_t sn_ping_errors; + atomic_unchecked_t sn_brw_errors; + atomic_unchecked_t sn_ping_errors; unsigned long sn_started; }; @@ -607,13 +607,11 @@ srpc_wait_service_shutdown(struct srpc_service *sv) } extern struct sfw_test_client_ops brw_test_client; -void brw_init_test_client(void); extern struct srpc_service brw_test_service; void brw_init_test_service(void); extern struct sfw_test_client_ops ping_test_client; -void ping_init_test_client(void); extern struct srpc_service ping_test_service; void ping_init_test_service(void); diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h index 72eaee95c..914396cd7 100644 --- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h +++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h @@ -785,7 +785,7 @@ static inline ino_t lu_igif_ino(const struct lu_fid *fid) return fid_seq(fid); } -void lustre_swab_ost_id(struct ost_id *oid); +void lustre_swab_ost_id(void *oid); /** * Get inode generation from a igif. @@ -847,8 +847,8 @@ static inline bool fid_is_sane(const struct lu_fid *fid) fid_seq_is_rsvd(fid_seq(fid))); } -void lustre_swab_lu_fid(struct lu_fid *fid); -void lustre_swab_lu_seq_range(struct lu_seq_range *range); +void lustre_swab_lu_fid(void *fid); +void lustre_swab_lu_seq_range(void *range); static inline bool lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1) { @@ -1144,7 +1144,7 @@ struct ptlrpc_body_v2 { __u64 pb_padding[4]; }; -void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); +void lustre_swab_ptlrpc_body(void *pb); /* message body offset for lustre_msg_v2 */ /* ptlrpc body offset in all request/reply messages */ @@ -1380,7 +1380,7 @@ struct obd_connect_data { * reserve the flag for future use. */ -void lustre_swab_connect(struct obd_connect_data *ocd); +void lustre_swab_connect(void *ocd); /* * Supported checksum algorithms. Up to 32 checksum types are supported. @@ -1742,10 +1742,10 @@ struct hsm_state_set { __u64 hss_clearmask; }; -void lustre_swab_hsm_user_state(struct hsm_user_state *hus); -void lustre_swab_hsm_state_set(struct hsm_state_set *hss); +void lustre_swab_hsm_user_state(void *hus); +void lustre_swab_hsm_state_set(void *hss); -void lustre_swab_obd_statfs(struct obd_statfs *os); +void lustre_swab_obd_statfs(void *os); /* ost_body.data values for OST_BRW */ @@ -1792,7 +1792,7 @@ struct obd_ioobj { #define ioobj_max_brw_set(ioo, num) \ do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0) -void lustre_swab_obd_ioobj(struct obd_ioobj *ioo); +void lustre_swab_obd_ioobj(void *ioo); /* multiple of 8 bytes => can array */ struct niobuf_remote { @@ -1801,7 +1801,7 @@ struct niobuf_remote { __u32 rnb_flags; }; -void lustre_swab_niobuf_remote(struct niobuf_remote *nbr); +void lustre_swab_niobuf_remote(void *nbr); /* lock value block communicated between the filter and llite */ @@ -1866,7 +1866,7 @@ struct obd_quotactl { struct obd_dqblk qc_dqblk; }; -void lustre_swab_obd_quotactl(struct obd_quotactl *q); +void lustre_swab_obd_quotactl(void *q); #define Q_COPY(out, in, member) (out)->member = (in)->member @@ -1972,7 +1972,7 @@ enum mdt_reint_cmd { REINT_MAX }; -void lustre_swab_generic_32s(__u32 *val); +void lustre_swab_generic_32s(void *val); /* the disposition of the intent outlines what was executed */ #define DISP_IT_EXECD 0x00000001 @@ -2132,7 +2132,7 @@ struct mdt_body { __u64 mbo_padding_10; }; /* 216 */ -void lustre_swab_mdt_body(struct mdt_body *b); +void lustre_swab_mdt_body(void *b); struct mdt_ioepoch { struct lustre_handle handle; @@ -2141,7 +2141,7 @@ struct mdt_ioepoch { __u32 padding; }; -void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b); +void lustre_swab_mdt_ioepoch(void *b); /* permissions for md_perm.mp_perm */ enum { @@ -2456,7 +2456,7 @@ struct mdt_rec_reint { __u32 rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */ }; -void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr); +void lustre_swab_mdt_rec_reint(void *rr); /* lmv structures */ struct lmv_desc { @@ -2759,13 +2759,13 @@ union ldlm_gl_desc { struct ldlm_gl_lquota_desc lquota_desc; }; -void lustre_swab_gl_desc(union ldlm_gl_desc *); +void lustre_swab_gl_desc(void *); struct ldlm_intent { __u64 opc; }; -void lustre_swab_ldlm_intent(struct ldlm_intent *i); +void lustre_swab_ldlm_intent(void *i); struct ldlm_resource_desc { enum ldlm_type lr_type; @@ -2790,7 +2790,7 @@ struct ldlm_request { struct lustre_handle lock_handle[LDLM_LOCKREQ_HANDLES]; }; -void lustre_swab_ldlm_request(struct ldlm_request *rq); +void lustre_swab_ldlm_request(void *rq); /* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available. * Otherwise, 2 are available. @@ -2813,7 +2813,7 @@ struct ldlm_reply { __u64 lock_policy_res2; }; -void lustre_swab_ldlm_reply(struct ldlm_reply *r); +void lustre_swab_ldlm_reply(void *r); #define ldlm_flags_to_wire(flags) ((__u32)(flags)) #define ldlm_flags_from_wire(flags) ((__u64)(flags)) @@ -2858,7 +2858,7 @@ struct mgs_target_info { char mti_params[MTI_PARAM_MAXLEN]; }; -void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo); +void lustre_swab_mgs_target_info(void *oinfo); struct mgs_nidtbl_entry { __u64 mne_version; /* table version of this entry */ @@ -2885,14 +2885,14 @@ struct mgs_config_body { __u32 mcb_units; /* # of units for bulk transfer */ }; -void lustre_swab_mgs_config_body(struct mgs_config_body *body); +void lustre_swab_mgs_config_body(void *body); struct mgs_config_res { __u64 mcr_offset; /* index of last config log */ __u64 mcr_size; /* size of the log */ }; -void lustre_swab_mgs_config_res(struct mgs_config_res *body); +void lustre_swab_mgs_config_res(void *body); /* Config marker flags (in config log) */ #define CM_START 0x01 @@ -3338,9 +3338,9 @@ struct ll_fiemap_info_key { struct ll_user_fiemap fiemap; }; -void lustre_swab_ost_body(struct ost_body *b); -void lustre_swab_ost_last_id(__u64 *id); -void lustre_swab_fiemap(struct ll_user_fiemap *fiemap); +void lustre_swab_ost_body(void *b); +void lustre_swab_ost_last_id(void *id); +void lustre_swab_fiemap(void *fiemap); void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum); void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum); @@ -3349,19 +3349,19 @@ void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod, void lustre_swab_lov_mds_md(struct lov_mds_md *lmm); /* llog_swab.c */ -void lustre_swab_llogd_body(struct llogd_body *d); -void lustre_swab_llog_hdr(struct llog_log_hdr *h); -void lustre_swab_llogd_conn_body(struct llogd_conn_body *d); +void lustre_swab_llogd_body(void *d); +void lustre_swab_llog_hdr(void *h); +void lustre_swab_llogd_conn_body(void *d); void lustre_swab_llog_rec(struct llog_rec_hdr *rec); struct lustre_cfg; void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg); /* Functions for dumping PTLRPC fields */ -void dump_rniobuf(struct niobuf_remote *rnb); -void dump_ioo(struct obd_ioobj *nb); -void dump_ost_body(struct ost_body *ob); -void dump_rcs(__u32 *rc); +void dump_rniobuf(void *rnb); +void dump_ioo(void *nb); +void dump_ost_body(void *ob); +void dump_rcs(void *rc); /* security opcodes */ enum sec_cmd { @@ -3394,7 +3394,7 @@ struct lustre_capa { __u8 lc_hmac[CAPA_HMAC_MAX_LEN]; /** HMAC */ } __packed; -void lustre_swab_lustre_capa(struct lustre_capa *c); +void lustre_swab_lustre_capa(void *c); /** lustre_capa::lc_opc */ enum { @@ -3486,7 +3486,7 @@ struct layout_intent { __u64 li_end; }; -void lustre_swab_layout_intent(struct layout_intent *li); +void lustre_swab_layout_intent(void *li); /** * On the wire version of hsm_progress structure. @@ -3506,12 +3506,10 @@ struct hsm_progress_kernel { __u64 hpk_padding2; } __packed; -void lustre_swab_hsm_user_state(struct hsm_user_state *hus); -void lustre_swab_hsm_current_action(struct hsm_current_action *action); -void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk); -void lustre_swab_hsm_user_state(struct hsm_user_state *hus); -void lustre_swab_hsm_user_item(struct hsm_user_item *hui); -void lustre_swab_hsm_request(struct hsm_request *hr); +void lustre_swab_hsm_current_action(void *action); +void lustre_swab_hsm_progress_kernel(void *hpk); +void lustre_swab_hsm_user_item(void *hui); +void lustre_swab_hsm_request(void *hr); /** layout swap request structure * fid1 and fid2 are in mdt_body @@ -3520,7 +3518,7 @@ struct mdc_swap_layouts { __u64 msl_flags; } __packed; -void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl); +void lustre_swab_swap_layouts(void *msl); struct close_data { struct lustre_handle cd_handle; @@ -3529,7 +3527,7 @@ struct close_data { __u64 cd_reserved[8]; }; -void lustre_swab_close_data(struct close_data *data); +void lustre_swab_close_data(void *data); #endif /** @} lustreidl */ diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h index d03534432..b81bfb644 100644 --- a/drivers/staging/lustre/lustre/include/lustre_dlm.h +++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h @@ -969,9 +969,9 @@ struct ldlm_ast_work { struct ldlm_enqueue_info { __u32 ei_type; /** Type of the lock being enqueued. */ __u32 ei_mode; /** Mode of the lock being enqueued. */ - void *ei_cb_bl; /** blocking lock callback */ - void *ei_cb_cp; /** lock completion callback */ - void *ei_cb_gl; /** lock glimpse callback */ + ldlm_blocking_callback ei_cb_bl; /** blocking lock callback */ + ldlm_completion_callback ei_cb_cp; /** lock completion callback */ + ldlm_glimpse_callback ei_cb_gl; /** lock glimpse callback */ void *ei_cbdata; /** Data to be passed into callbacks. */ unsigned int ei_enq_slave:1; /* whether enqueue slave stripes */ }; @@ -1066,7 +1066,7 @@ struct ldlm_callback_suite { ldlm_completion_callback lcs_completion; ldlm_blocking_callback lcs_blocking; ldlm_glimpse_callback lcs_glimpse; -}; +} __no_const; /* ldlm_lockd.c */ int ldlm_get_ref(void); diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h index e9aba99ee..53cd7eff6 100644 --- a/drivers/staging/lustre/lustre/include/lustre_net.h +++ b/drivers/staging/lustre/lustre/include/lustre_net.h @@ -2639,7 +2639,7 @@ void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, u32 n, u32 min_size); void *lustre_msg_buf(struct lustre_msg *m, u32 n, u32 minlen); u32 lustre_msg_buflen(struct lustre_msg *m, u32 n); u32 lustre_msg_bufcount(struct lustre_msg *m); -char *lustre_msg_string(struct lustre_msg *m, u32 n, u32 max_len); +void *lustre_msg_string(struct lustre_msg *m, u32 n, u32 max_len); __u32 lustre_msghdr_get_flags(struct lustre_msg *msg); void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags); __u32 lustre_msg_get_flags(struct lustre_msg *msg); diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h index f6fc4dd05..afdee7ed2 100644 --- a/drivers/staging/lustre/lustre/include/obd.h +++ b/drivers/staging/lustre/lustre/include/obd.h @@ -1076,7 +1076,7 @@ struct md_ops { * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a * wrapper function in include/linux/obd_class.h. */ -}; +} __no_const; struct lsm_operations { void (*lsm_free)(struct lov_stripe_md *); diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c index 861f36f03..69d22b9c6 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c @@ -143,7 +143,7 @@ static int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int added = (mode == LCK_NL); int overlaps = 0; int splitted = 0; - const struct ldlm_callback_suite null_cbs = { NULL }; + const struct ldlm_callback_suite null_cbs = { }; CDEBUG(D_DLMTRACE, "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n", diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c index 35ba6f14d..75c9cdd89 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c @@ -1850,8 +1850,9 @@ static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure) static int replay_lock_interpret(const struct lu_env *env, struct ptlrpc_request *req, - struct ldlm_async_args *aa, int rc) + void *_aa, int rc) { + struct ldlm_async_args *aa = _aa; struct ldlm_lock *lock; struct ldlm_reply *reply; struct obd_export *exp; @@ -1978,7 +1979,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock) CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args)); aa = ptlrpc_req_async_args(req); aa->lock_handle = body->lock_handle[0]; - req->rq_interpret_reply = (ptlrpc_interpterer_t)replay_lock_interpret; + req->rq_interpret_reply = replay_lock_interpret; ptlrpcd_add_req(req); return 0; diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c index 7f32a539d..234abb748 100644 --- a/drivers/staging/lustre/lustre/llite/dir.c +++ b/drivers/staging/lustre/lustre/llite/dir.c @@ -136,11 +136,12 @@ struct page *ll_get_dir_page(struct inode *dir, struct md_op_data *op_data, __u64 offset) { - struct md_callback cb_op; + static struct md_callback cb_op = { + .md_blocking_ast = ll_md_blocking_ast, + }; struct page *page; int rc; - cb_op.md_blocking_ast = ll_md_blocking_ast; rc = md_read_page(ll_i2mdexp(dir), op_data, &cb_op, offset, &page); if (rc) return ERR_PTR(rc); diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h index 4bc551279..08ff257d4 100644 --- a/drivers/staging/lustre/lustre/llite/llite_internal.h +++ b/drivers/staging/lustre/lustre/llite/llite_internal.h @@ -504,16 +504,16 @@ struct ll_sb_info { /* metadata stat-ahead */ unsigned int ll_sa_max; /* max statahead RPCs */ - atomic_t ll_sa_total; /* statahead thread started + atomic_unchecked_t ll_sa_total; /* statahead thread started * count */ - atomic_t ll_sa_wrong; /* statahead thread stopped for + atomic_unchecked_t ll_sa_wrong; /* statahead thread stopped for * low hit ratio */ atomic_t ll_sa_running; /* running statahead thread * count */ - atomic_t ll_agl_total; /* AGL thread started count */ + atomic_unchecked_t ll_agl_total; /* AGL thread started count */ dev_t ll_sdev_orig; /* save s_dev before assign for * clustered nfs diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c index e5c62f4ce..9f82038da 100644 --- a/drivers/staging/lustre/lustre/llite/llite_lib.c +++ b/drivers/staging/lustre/lustre/llite/llite_lib.c @@ -113,10 +113,10 @@ static struct ll_sb_info *ll_init_sbi(struct super_block *sb) /* metadata statahead is enabled by default */ sbi->ll_sa_max = LL_SA_RPC_DEF; - atomic_set(&sbi->ll_sa_total, 0); - atomic_set(&sbi->ll_sa_wrong, 0); + atomic_set_unchecked(&sbi->ll_sa_total, 0); + atomic_set_unchecked(&sbi->ll_sa_wrong, 0); atomic_set(&sbi->ll_sa_running, 0); - atomic_set(&sbi->ll_agl_total, 0); + atomic_set_unchecked(&sbi->ll_agl_total, 0); sbi->ll_flags |= LL_SBI_AGL_ENABLED; /* root squash */ diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c index 23fda9d98..2e43092ad 100644 --- a/drivers/staging/lustre/lustre/llite/lproc_llite.c +++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c @@ -686,9 +686,9 @@ static int ll_statahead_stats_seq_show(struct seq_file *m, void *v) "statahead total: %u\n" "statahead wrong: %u\n" "agl total: %u\n", - atomic_read(&sbi->ll_sa_total), - atomic_read(&sbi->ll_sa_wrong), - atomic_read(&sbi->ll_agl_total)); + atomic_read_unchecked(&sbi->ll_sa_total), + atomic_read_unchecked(&sbi->ll_sa_wrong), + atomic_read_unchecked(&sbi->ll_agl_total)); return 0; } diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c index 067751347..4c7ea0346 100644 --- a/drivers/staging/lustre/lustre/llite/statahead.c +++ b/drivers/staging/lustre/lustre/llite/statahead.c @@ -893,7 +893,7 @@ static int ll_agl_thread(void *arg) CDEBUG(D_READA, "agl thread started: sai %p, parent %pd\n", sai, parent); - atomic_inc(&sbi->ll_agl_total); + atomic_inc_unchecked(&sbi->ll_agl_total); spin_lock(&plli->lli_agl_lock); sai->sai_agl_valid = 1; if (thread_is_init(thread)) @@ -1010,7 +1010,7 @@ static int ll_statahead_thread(void *arg) if (sbi->ll_flags & LL_SBI_AGL_ENABLED) ll_start_agl(parent, sai); - atomic_inc(&sbi->ll_sa_total); + atomic_inc_unchecked(&sbi->ll_sa_total); spin_lock(&lli->lli_sa_lock); if (thread_is_init(sa_thread)) /* If someone else has changed the thread state @@ -1123,7 +1123,7 @@ static int ll_statahead_thread(void *arg) if (sa_low_hit(sai)) { rc = -EFAULT; - atomic_inc(&sbi->ll_sa_wrong); + atomic_inc_unchecked(&sbi->ll_sa_wrong); CDEBUG(D_READA, "Statahead for dir "DFID" hit ratio too low: hit/miss %llu/%llu, sent/replied %llu/%llu, stopping statahead thread: pid %d\n", PFID(&lli->lli_fid), sai->sai_hit, sai->sai_miss, sai->sai_sent, diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h index 07e5ede3e..628274436 100644 --- a/drivers/staging/lustre/lustre/lov/lov_internal.h +++ b/drivers/staging/lustre/lustre/lov/lov_internal.h @@ -107,9 +107,9 @@ struct lov_request_set { */ struct obd_device *set_obd; int set_count; - atomic_t set_completes; - atomic_t set_success; - atomic_t set_finish_checked; + atomic_unchecked_t set_completes; + atomic_unchecked_t set_success; + atomic_unchecked_t set_finish_checked; struct llog_cookie *set_cookies; int set_cookie_sent; struct list_head set_list; diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c index d10157985..def85ec03 100644 --- a/drivers/staging/lustre/lustre/lov/lov_io.c +++ b/drivers/staging/lustre/lustre/lov/lov_io.c @@ -821,12 +821,32 @@ static void lov_empty_io_fini(const struct lu_env *env, } static void lov_empty_impossible(const struct lu_env *env, - struct cl_io_slice *ios) + const struct cl_io_slice *ios) { LBUG(); } -#define LOV_EMPTY_IMPOSSIBLE ((void *)lov_empty_impossible) +static int lov_empty_impossible2(const struct lu_env *env, + const struct cl_io_slice *ios) +{ + LBUG(); +} + +static int lov_empty_impossible3(const struct lu_env *env, + const struct cl_io_slice *slice, + enum cl_req_type crt, + struct cl_2queue *queue) +{ + LBUG(); +} + +static int lov_empty_impossible4(const struct lu_env *env, + const struct cl_io_slice *slice, + struct cl_page_list *queue, int from, int to, + cl_commit_cbt cb) +{ + LBUG(); +} /** * An io operation vector for files without stripes. @@ -836,32 +856,32 @@ static const struct cl_io_operations lov_empty_io_ops = { [CIT_READ] = { .cio_fini = lov_empty_io_fini, #if 0 - .cio_iter_init = LOV_EMPTY_IMPOSSIBLE, - .cio_lock = LOV_EMPTY_IMPOSSIBLE, - .cio_start = LOV_EMPTY_IMPOSSIBLE, - .cio_end = LOV_EMPTY_IMPOSSIBLE + .cio_iter_init = lov_empty_impossible2, + .cio_lock = lov_empty_impossible2, + .cio_start = lov_empty_impossible2, + .cio_end = lov_empty_impossible #endif }, [CIT_WRITE] = { .cio_fini = lov_empty_io_fini, - .cio_iter_init = LOV_EMPTY_IMPOSSIBLE, - .cio_lock = LOV_EMPTY_IMPOSSIBLE, - .cio_start = LOV_EMPTY_IMPOSSIBLE, - .cio_end = LOV_EMPTY_IMPOSSIBLE + .cio_iter_init = lov_empty_impossible2, + .cio_lock = lov_empty_impossible2, + .cio_start = lov_empty_impossible2, + .cio_end = lov_empty_impossible }, [CIT_SETATTR] = { .cio_fini = lov_empty_io_fini, - .cio_iter_init = LOV_EMPTY_IMPOSSIBLE, - .cio_lock = LOV_EMPTY_IMPOSSIBLE, - .cio_start = LOV_EMPTY_IMPOSSIBLE, - .cio_end = LOV_EMPTY_IMPOSSIBLE + .cio_iter_init = lov_empty_impossible2, + .cio_lock = lov_empty_impossible2, + .cio_start = lov_empty_impossible2, + .cio_end = lov_empty_impossible }, [CIT_FAULT] = { .cio_fini = lov_empty_io_fini, - .cio_iter_init = LOV_EMPTY_IMPOSSIBLE, - .cio_lock = LOV_EMPTY_IMPOSSIBLE, - .cio_start = LOV_EMPTY_IMPOSSIBLE, - .cio_end = LOV_EMPTY_IMPOSSIBLE + .cio_iter_init = lov_empty_impossible2, + .cio_lock = lov_empty_impossible2, + .cio_start = lov_empty_impossible2, + .cio_end = lov_empty_impossible }, [CIT_FSYNC] = { .cio_fini = lov_empty_io_fini @@ -870,8 +890,8 @@ static const struct cl_io_operations lov_empty_io_ops = { .cio_fini = lov_empty_io_fini } }, - .cio_submit = LOV_EMPTY_IMPOSSIBLE, - .cio_commit_async = LOV_EMPTY_IMPOSSIBLE + .cio_submit = lov_empty_impossible3, + .cio_commit_async = lov_empty_impossible4 }; int lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj, diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c index b23016f7e..e808e5216 100644 --- a/drivers/staging/lustre/lustre/lov/lov_obd.c +++ b/drivers/staging/lustre/lustre/lov/lov_obd.c @@ -988,7 +988,7 @@ static int lov_getattr_interpret(struct ptlrpc_request_set *rqset, /* don't do attribute merge if this async op failed */ if (rc) - atomic_set(&lovset->set_completes, 0); + atomic_set_unchecked(&lovset->set_completes, 0); err = lov_fini_getattr_set(lovset); return rc ? rc : err; } @@ -1043,7 +1043,7 @@ static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo, } out: if (rc) - atomic_set(&lovset->set_completes, 0); + atomic_set_unchecked(&lovset->set_completes, 0); err = lov_fini_getattr_set(lovset); return rc ? rc : err; } @@ -1055,7 +1055,7 @@ static int lov_setattr_interpret(struct ptlrpc_request_set *rqset, int err; if (rc) - atomic_set(&lovset->set_completes, 0); + atomic_set_unchecked(&lovset->set_completes, 0); err = lov_fini_setattr_set(lovset); return rc ? rc : err; } @@ -1117,7 +1117,7 @@ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo, int err; if (rc) - atomic_set(&set->set_completes, 0); + atomic_set_unchecked(&set->set_completes, 0); err = lov_fini_setattr_set(set); return rc ? rc : err; } @@ -1135,7 +1135,7 @@ int lov_statfs_interpret(struct ptlrpc_request_set *rqset, void *data, int rc) int err; if (rc) - atomic_set(&lovset->set_completes, 0); + atomic_set_unchecked(&lovset->set_completes, 0); err = lov_fini_statfs_set(lovset); return rc ? rc : err; @@ -1168,7 +1168,7 @@ static int lov_statfs_async(struct obd_export *exp, struct obd_info *oinfo, int err; if (rc) - atomic_set(&set->set_completes, 0); + atomic_set_unchecked(&set->set_completes, 0); err = lov_fini_statfs_set(set); return rc ? rc : err; } diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c index 09dcaf484..8d02a468b 100644 --- a/drivers/staging/lustre/lustre/lov/lov_request.c +++ b/drivers/staging/lustre/lustre/lov/lov_request.c @@ -41,9 +41,9 @@ static void lov_init_set(struct lov_request_set *set) { set->set_count = 0; - atomic_set(&set->set_completes, 0); - atomic_set(&set->set_success, 0); - atomic_set(&set->set_finish_checked, 0); + atomic_set_unchecked(&set->set_completes, 0); + atomic_set_unchecked(&set->set_success, 0); + atomic_set_unchecked(&set->set_finish_checked, 0); set->set_cookies = NULL; INIT_LIST_HEAD(&set->set_list); atomic_set(&set->set_refcount, 1); @@ -71,14 +71,14 @@ void lov_finish_set(struct lov_request_set *set) static int lov_set_finished(struct lov_request_set *set, int idempotent) { - int completes = atomic_read(&set->set_completes); + int completes = atomic_read_unchecked(&set->set_completes); CDEBUG(D_INFO, "check set %d/%d\n", completes, set->set_count); if (completes == set->set_count) { if (idempotent) return 1; - if (atomic_inc_return(&set->set_finish_checked) == 1) + if (atomic_inc_return_unchecked(&set->set_finish_checked) == 1) return 1; } return 0; @@ -90,9 +90,9 @@ static void lov_update_set(struct lov_request_set *set, req->rq_complete = 1; req->rq_rc = rc; - atomic_inc(&set->set_completes); + atomic_inc_unchecked(&set->set_completes); if (rc == 0) - atomic_inc(&set->set_success); + atomic_inc_unchecked(&set->set_success); wake_up(&set->set_waitq); } @@ -192,7 +192,7 @@ static int common_attr_done(struct lov_request_set *set) if (!set->set_oi->oi_oa) return 0; - if (!atomic_read(&set->set_success)) + if (!atomic_read_unchecked(&set->set_success)) return -EIO; tmp_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); @@ -239,7 +239,7 @@ int lov_fini_getattr_set(struct lov_request_set *set) if (!set) return 0; LASSERT(set->set_exp); - if (atomic_read(&set->set_completes)) + if (atomic_read_unchecked(&set->set_completes)) rc = common_attr_done(set); lov_put_reqset(set); @@ -332,7 +332,7 @@ int lov_fini_setattr_set(struct lov_request_set *set) if (!set) return 0; LASSERT(set->set_exp); - if (atomic_read(&set->set_completes)) { + if (atomic_read_unchecked(&set->set_completes)) { rc = common_attr_done(set); /* FIXME update qos data here */ } @@ -493,9 +493,9 @@ int lov_fini_statfs_set(struct lov_request_set *set) if (!set) return 0; - if (atomic_read(&set->set_completes)) { + if (atomic_read_unchecked(&set->set_completes)) { rc = lov_fini_statfs(set->set_obd, set->set_oi->oi_osfs, - atomic_read(&set->set_success)); + atomic_read_unchecked(&set->set_success)); } lov_put_reqset(set); return rc; @@ -576,7 +576,7 @@ static int cb_statfs_update(void *cookie, int rc) lov = &lovobd->u.lov; osfs = set->set_oi->oi_osfs; lov_sfs = oinfo->oi_osfs; - success = atomic_read(&set->set_success); + success = atomic_read_unchecked(&set->set_success); /* XXX: the same is done in lov_update_common_set, however * lovset->set_exp is not initialized. */ @@ -604,7 +604,7 @@ static int cb_statfs_update(void *cookie, int rc) if (set->set_oi->oi_flags & OBD_STATFS_PTLRPCD && lov_set_finished(set, 0)) { lov_statfs_interpret(NULL, set, set->set_count != - atomic_read(&set->set_success)); + atomic_read_unchecked(&set->set_success)); } return 0; diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c index f56ea643f..6a146f45b 100644 --- a/drivers/staging/lustre/lustre/mdc/mdc_request.c +++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c @@ -1219,9 +1219,9 @@ struct readpage_param { * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the * lu_dirpage for this integrated page will be adjusted. **/ -static int mdc_read_page_remote(void *data, struct page *page0) +static int mdc_read_page_remote(struct file *data, struct page *page0) { - struct readpage_param *rp = data; + struct readpage_param *rp = (struct readpage_param *)data; struct page **page_pool; struct page *page; struct lu_dirpage *dp; diff --git a/drivers/staging/lustre/lustre/obdclass/llog_swab.c b/drivers/staging/lustre/lustre/obdclass/llog_swab.c index 8c4c1b3f1..630be4679 100644 --- a/drivers/staging/lustre/lustre/obdclass/llog_swab.c +++ b/drivers/staging/lustre/lustre/obdclass/llog_swab.c @@ -54,16 +54,20 @@ static void print_llogd_body(struct llogd_body *d) CDEBUG(D_OTHER, "\tlgd_cur_offset: %#llx\n", d->lgd_cur_offset); } -void lustre_swab_lu_fid(struct lu_fid *fid) +void lustre_swab_lu_fid(void *_fid) { + struct lu_fid *fid = _fid; + __swab64s(&fid->f_seq); __swab32s(&fid->f_oid); __swab32s(&fid->f_ver); } EXPORT_SYMBOL(lustre_swab_lu_fid); -void lustre_swab_ost_id(struct ost_id *oid) +void lustre_swab_ost_id(void *_oid) { + struct ost_id *oid = _oid; + if (fid_seq_is_mdt0(oid->oi.oi_seq)) { __swab64s(&oid->oi.oi_id); __swab64s(&oid->oi.oi_seq); @@ -80,8 +84,10 @@ static void lustre_swab_llog_id(struct llog_logid *log_id) __swab32s(&log_id->lgl_ogen); } -void lustre_swab_llogd_body(struct llogd_body *d) +void lustre_swab_llogd_body(void *_d) { + struct llogd_body *d = _d; + print_llogd_body(d); lustre_swab_llog_id(&d->lgd_logid); __swab32s(&d->lgd_ctxt_idx); @@ -94,8 +100,10 @@ void lustre_swab_llogd_body(struct llogd_body *d) } EXPORT_SYMBOL(lustre_swab_llogd_body); -void lustre_swab_llogd_conn_body(struct llogd_conn_body *d) +void lustre_swab_llogd_conn_body(void *_d) { + struct llogd_conn_body *d = _d; + __swab64s(&d->lgdc_gen.mnt_cnt); __swab64s(&d->lgdc_gen.conn_cnt); lustre_swab_llog_id(&d->lgdc_logid); @@ -110,8 +118,10 @@ static void lustre_swab_ll_fid(struct ll_fid *fid) __swab32s(&fid->f_type); } -void lustre_swab_lu_seq_range(struct lu_seq_range *range) +void lustre_swab_lu_seq_range(void *_range) { + struct lu_seq_range *range = _range; + __swab64s(&range->lsr_start); __swab64s(&range->lsr_end); __swab32s(&range->lsr_index); @@ -294,8 +304,10 @@ static void print_llog_hdr(struct llog_log_hdr *h) CDEBUG(D_OTHER, "\tllh_tail.lrt_len: %#x\n", h->llh_tail.lrt_len); } -void lustre_swab_llog_hdr(struct llog_log_hdr *h) +void lustre_swab_llog_hdr(void *_h) { + struct llog_log_hdr *h = _h; + print_llog_hdr(h); lustre_swab_llog_rec(&h->llh_hdr); diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c index 749781f02..29b770576 100644 --- a/drivers/staging/lustre/lustre/osc/osc_request.c +++ b/drivers/staging/lustre/lustre/osc/osc_request.c @@ -179,8 +179,9 @@ static inline void osc_pack_req_body(struct ptlrpc_request *req, static int osc_getattr_interpret(const struct lu_env *env, struct ptlrpc_request *req, - struct osc_async_args *aa, int rc) + void *_aa, int rc) { + struct osc_async_args *aa = _aa; struct ost_body *body; if (rc != 0) @@ -225,7 +226,7 @@ static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo, osc_pack_req_body(req, oinfo); ptlrpc_request_set_replen(req); - req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_getattr_interpret; + req->rq_interpret_reply = osc_getattr_interpret; CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args)); aa = ptlrpc_req_async_args(req); @@ -321,8 +322,9 @@ static int osc_setattr(const struct lu_env *env, struct obd_export *exp, static int osc_setattr_interpret(const struct lu_env *env, struct ptlrpc_request *req, - struct osc_setattr_args *sa, int rc) + void *_sa, int rc) { + struct osc_setattr_args *sa = _sa; struct ost_body *body; if (rc != 0) @@ -372,8 +374,7 @@ int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo, /* Do not wait for response. */ ptlrpcd_add_req(req); } else { - req->rq_interpret_reply = - (ptlrpc_interpterer_t)osc_setattr_interpret; + req->rq_interpret_reply = osc_setattr_interpret; CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args)); sa = ptlrpc_req_async_args(req); @@ -495,7 +496,7 @@ int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo, ptlrpc_request_set_replen(req); - req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret; + req->rq_interpret_reply = osc_setattr_interpret; CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args)); sa = ptlrpc_req_async_args(req); sa->sa_oa = oinfo->oi_oa; @@ -2091,8 +2092,9 @@ static int osc_enqueue_fini(struct ptlrpc_request *req, static int osc_enqueue_interpret(const struct lu_env *env, struct ptlrpc_request *req, - struct osc_enqueue_args *aa, int rc) + void *_aa, int rc) { + struct osc_enqueue_args *aa = _aa; struct ldlm_lock *lock; struct lustre_handle *lockh = &aa->oa_lockh; enum ldlm_mode mode = aa->oa_mode; @@ -2283,8 +2285,7 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, aa->oa_flags = NULL; } - req->rq_interpret_reply = - (ptlrpc_interpterer_t)osc_enqueue_interpret; + req->rq_interpret_reply = osc_enqueue_interpret; if (rqset == PTLRPCD_SET) ptlrpcd_add_req(req); else @@ -2360,8 +2361,9 @@ int osc_cancel_base(struct lustre_handle *lockh, __u32 mode) static int osc_statfs_interpret(const struct lu_env *env, struct ptlrpc_request *req, - struct osc_async_args *aa, int rc) + void *_aa, int rc) { + struct osc_async_args *aa = _aa; struct obd_statfs *msfs; if (rc == -EBADR) @@ -2429,7 +2431,7 @@ static int osc_statfs_async(struct obd_export *exp, req->rq_no_delay = 1; } - req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret; + req->rq_interpret_reply = osc_statfs_interpret; CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args)); aa = ptlrpc_req_async_args(req); aa->aa_oi = oinfo; diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c index 839ef3e80..8912e22f2 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/layout.c +++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c @@ -778,8 +778,8 @@ struct req_capsule; .rmf_name = (name), \ .rmf_flags = (flags), \ .rmf_size = (size), \ - .rmf_swabber = (void (*)(void *))(swabber), \ - .rmf_dumper = (void (*)(void *))(dumper) \ + .rmf_swabber = (swabber), \ + .rmf_dumper = (dumper) \ } struct req_msg_field RMF_GENERIC_DATA = @@ -1871,8 +1871,7 @@ static void *__req_capsule_get(struct req_capsule *pill, msg = __req_msg(pill, loc); LASSERT(msg); - getter = (field->rmf_flags & RMF_F_STRING) ? - (typeof(getter))lustre_msg_string : lustre_msg_buf; + getter = (field->rmf_flags & RMF_F_STRING) ? lustre_msg_string : lustre_msg_buf; if (field->rmf_flags & RMF_F_STRUCT_ARRAY) { /* diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c index 871768511..8f066edfb 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c +++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c @@ -676,7 +676,7 @@ u32 lustre_msg_bufcount(struct lustre_msg *m) } } -char *lustre_msg_string(struct lustre_msg *m, u32 index, u32 max_len) +void *lustre_msg_string(struct lustre_msg *m, u32 index, u32 max_len) { /* max_len == 0 means the string should fill the buffer */ char *str; @@ -1435,8 +1435,10 @@ EXPORT_SYMBOL(do_set_info_async); /* byte flipping routines for all wire types declared in * lustre_idl.h implemented here. */ -void lustre_swab_ptlrpc_body(struct ptlrpc_body *b) +void lustre_swab_ptlrpc_body(void *_b) { + struct ptlrpc_body *b = _b; + __swab32s(&b->pb_type); __swab32s(&b->pb_version); __swab32s(&b->pb_opc); @@ -1466,8 +1468,10 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *b) CLASSERT(offsetof(typeof(*b), pb_jobid) != 0); } -void lustre_swab_connect(struct obd_connect_data *ocd) +void lustre_swab_connect(void *_ocd) { + struct obd_connect_data *ocd = _ocd; + __swab64s(&ocd->ocd_connect_flags); __swab32s(&ocd->ocd_version); __swab32s(&ocd->ocd_grant); @@ -1541,8 +1545,10 @@ static void lustre_swab_obdo(struct obdo *o) CLASSERT(offsetof(typeof(*o), o_padding_6) != 0); } -void lustre_swab_obd_statfs(struct obd_statfs *os) +void lustre_swab_obd_statfs(void *_os) { + struct obd_statfs *os = _os; + __swab64s(&os->os_type); __swab64s(&os->os_blocks); __swab64s(&os->os_bfree); @@ -1565,37 +1571,49 @@ void lustre_swab_obd_statfs(struct obd_statfs *os) CLASSERT(offsetof(typeof(*os), os_spare9) != 0); } -void lustre_swab_obd_ioobj(struct obd_ioobj *ioo) +void lustre_swab_obd_ioobj(void *_ioo) { + struct obd_ioobj *ioo = _ioo; + lustre_swab_ost_id(&ioo->ioo_oid); __swab32s(&ioo->ioo_max_brw); __swab32s(&ioo->ioo_bufcnt); } -void lustre_swab_niobuf_remote(struct niobuf_remote *nbr) +void lustre_swab_niobuf_remote(void *_nbr) { + struct niobuf_remote *nbr = _nbr; + __swab64s(&nbr->rnb_offset); __swab32s(&nbr->rnb_len); __swab32s(&nbr->rnb_flags); } -void lustre_swab_ost_body(struct ost_body *b) +void lustre_swab_ost_body(void *_b) { + struct ost_body *b = _b; + lustre_swab_obdo(&b->oa); } -void lustre_swab_ost_last_id(u64 *id) +void lustre_swab_ost_last_id(void *_id) { + u64 *id = _id; + __swab64s(id); } -void lustre_swab_generic_32s(__u32 *val) +void lustre_swab_generic_32s(void *_val) { + __u32 *val = _val; + __swab32s(val); } -void lustre_swab_gl_desc(union ldlm_gl_desc *desc) +void lustre_swab_gl_desc(void *_desc) { + union ldlm_gl_desc *desc = _desc; + lustre_swab_lu_fid(&desc->lquota_desc.gl_id.qid_fid); __swab64s(&desc->lquota_desc.gl_flags); __swab64s(&desc->lquota_desc.gl_ver); @@ -1639,8 +1657,10 @@ void lustre_swab_lquota_lvb(struct lquota_lvb *lvb) } EXPORT_SYMBOL(lustre_swab_lquota_lvb); -void lustre_swab_mdt_body(struct mdt_body *b) +void lustre_swab_mdt_body(void *_b) { + struct mdt_body *b = _b; + lustre_swab_lu_fid(&b->mbo_fid1); lustre_swab_lu_fid(&b->mbo_fid2); /* handle is opaque */ @@ -1672,16 +1692,19 @@ void lustre_swab_mdt_body(struct mdt_body *b) CLASSERT(offsetof(typeof(*b), mbo_padding_5) != 0); } -void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b) +void lustre_swab_mdt_ioepoch(void *_b) { + struct mdt_ioepoch *b = _b; + /* handle is opaque */ __swab64s(&b->ioepoch); __swab32s(&b->flags); CLASSERT(offsetof(typeof(*b), padding) != 0); } -void lustre_swab_mgs_target_info(struct mgs_target_info *mti) +void lustre_swab_mgs_target_info(void *_mti) { + struct mgs_target_info *mti = _mti; int i; __swab32s(&mti->mti_lustre_ver); @@ -1718,15 +1741,19 @@ void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *entry) } EXPORT_SYMBOL(lustre_swab_mgs_nidtbl_entry); -void lustre_swab_mgs_config_body(struct mgs_config_body *body) +void lustre_swab_mgs_config_body(void *_body) { + struct mgs_config_body *body = _body; + __swab64s(&body->mcb_offset); __swab32s(&body->mcb_units); __swab16s(&body->mcb_type); } -void lustre_swab_mgs_config_res(struct mgs_config_res *body) +void lustre_swab_mgs_config_res(void *_body) { + struct mgs_config_res *body = _body; + __swab64s(&body->mcr_offset); __swab64s(&body->mcr_size); } @@ -1753,8 +1780,10 @@ static void lustre_swab_obd_dqblk(struct obd_dqblk *b) CLASSERT(offsetof(typeof(*b), dqb_padding) != 0); } -void lustre_swab_obd_quotactl(struct obd_quotactl *q) +void lustre_swab_obd_quotactl(void *_q) { + struct obd_quotactl *q = _q; + __swab32s(&q->qc_cmd); __swab32s(&q->qc_type); __swab32s(&q->qc_id); @@ -1781,8 +1810,9 @@ static void lustre_swab_fiemap_extent(struct ll_fiemap_extent *fm_extent) __swab32s(&fm_extent->fe_device); } -void lustre_swab_fiemap(struct ll_user_fiemap *fiemap) +void lustre_swab_fiemap(void *_fiemap) { + struct ll_user_fiemap *fiemap = _fiemap; __u32 i; __swab64s(&fiemap->fm_start); @@ -1796,8 +1826,10 @@ void lustre_swab_fiemap(struct ll_user_fiemap *fiemap) lustre_swab_fiemap_extent(&fiemap->fm_extents[i]); } -void lustre_swab_mdt_rec_reint (struct mdt_rec_reint *rr) +void lustre_swab_mdt_rec_reint (void *_rr) { + struct mdt_rec_reint *rr = _rr; + __swab32s(&rr->rr_opcode); __swab32s(&rr->rr_cap); __swab32s(&rr->rr_fsuid); @@ -1951,8 +1983,10 @@ static void lustre_swab_ldlm_policy_data(ldlm_wire_policy_data_t *d) __swab32s(&d->l_flock.lfw_pid); } -void lustre_swab_ldlm_intent(struct ldlm_intent *i) +void lustre_swab_ldlm_intent(void *_i) { + struct ldlm_intent *i = _i; + __swab64s(&i->opc); } @@ -1971,16 +2005,20 @@ static void lustre_swab_ldlm_lock_desc(struct ldlm_lock_desc *l) lustre_swab_ldlm_policy_data(&l->l_policy_data); } -void lustre_swab_ldlm_request(struct ldlm_request *rq) +void lustre_swab_ldlm_request(void *_rq) { + struct ldlm_request *rq = _rq; + __swab32s(&rq->lock_flags); lustre_swab_ldlm_lock_desc(&rq->lock_desc); __swab32s(&rq->lock_count); /* lock_handle[] opaque */ } -void lustre_swab_ldlm_reply(struct ldlm_reply *r) +void lustre_swab_ldlm_reply(void *_r) { + struct ldlm_reply *r = _r; + __swab32s(&r->lock_flags); CLASSERT(offsetof(typeof(*r), lock_padding) != 0); lustre_swab_ldlm_lock_desc(&r->lock_desc); @@ -1990,16 +2028,20 @@ void lustre_swab_ldlm_reply(struct ldlm_reply *r) } /* Dump functions */ -void dump_ioo(struct obd_ioobj *ioo) +void dump_ioo(void *_ioo) { + struct obd_ioobj *ioo = _ioo; + CDEBUG(D_RPCTRACE, "obd_ioobj: ioo_oid=" DOSTID ", ioo_max_brw=%#x, ioo_bufct=%d\n", POSTID(&ioo->ioo_oid), ioo->ioo_max_brw, ioo->ioo_bufcnt); } -void dump_rniobuf(struct niobuf_remote *nb) +void dump_rniobuf(void *_nb) { + struct niobuf_remote *nb = _nb; + CDEBUG(D_RPCTRACE, "niobuf_remote: offset=%llu, len=%d, flags=%x\n", nb->rnb_offset, nb->rnb_len, nb->rnb_flags); } @@ -2066,13 +2108,17 @@ static void dump_obdo(struct obdo *oa) CDEBUG(D_RPCTRACE, "obdo: o_lcookie = (llog_cookie dumping not yet implemented)\n"); } -void dump_ost_body(struct ost_body *ob) +void dump_ost_body(void *_ob) { + struct ost_body *ob = _ob; + dump_obdo(&ob->oa); } -void dump_rcs(__u32 *rc) +void dump_rcs(void *_rc) { + __u32 *rc = _rc; + CDEBUG(D_RPCTRACE, "rmf_rcs: %d\n", *rc); } @@ -2148,8 +2194,10 @@ void _debug_req(struct ptlrpc_request *req, } EXPORT_SYMBOL(_debug_req); -void lustre_swab_lustre_capa(struct lustre_capa *c) +void lustre_swab_lustre_capa(void *_c) { + struct lustre_capa *c = _c; + lustre_swab_lu_fid(&c->lc_fid); __swab64s(&c->lc_opc); __swab64s(&c->lc_uid); @@ -2160,14 +2208,18 @@ void lustre_swab_lustre_capa(struct lustre_capa *c) __swab32s(&c->lc_expiry); } -void lustre_swab_hsm_user_state(struct hsm_user_state *state) +void lustre_swab_hsm_user_state(void *_state) { + struct hsm_user_state *state = _state; + __swab32s(&state->hus_states); __swab32s(&state->hus_archive_id); } -void lustre_swab_hsm_state_set(struct hsm_state_set *hss) +void lustre_swab_hsm_state_set(void *_hss) { + struct hsm_state_set *hss = _hss; + __swab32s(&hss->hss_valid); __swab64s(&hss->hss_setmask); __swab64s(&hss->hss_clearmask); @@ -2181,29 +2233,37 @@ static void lustre_swab_hsm_extent(struct hsm_extent *extent) __swab64s(&extent->length); } -void lustre_swab_hsm_current_action(struct hsm_current_action *action) +void lustre_swab_hsm_current_action(void *_action) { + struct hsm_current_action *action = _action; + __swab32s(&action->hca_state); __swab32s(&action->hca_action); lustre_swab_hsm_extent(&action->hca_location); } -void lustre_swab_hsm_user_item(struct hsm_user_item *hui) +void lustre_swab_hsm_user_item(void *_hui) { + struct hsm_user_item *hui = _hui; + lustre_swab_lu_fid(&hui->hui_fid); lustre_swab_hsm_extent(&hui->hui_extent); } -void lustre_swab_layout_intent(struct layout_intent *li) +void lustre_swab_layout_intent(void *_li) { + struct layout_intent *li = _li; + __swab32s(&li->li_opc); __swab32s(&li->li_flags); __swab64s(&li->li_start); __swab64s(&li->li_end); } -void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk) +void lustre_swab_hsm_progress_kernel(void *_hpk) { + struct hsm_progress_kernel *hpk = _hpk; + lustre_swab_lu_fid(&hpk->hpk_fid); __swab64s(&hpk->hpk_cookie); __swab64s(&hpk->hpk_extent.offset); @@ -2212,8 +2272,10 @@ void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk) __swab16s(&hpk->hpk_errval); } -void lustre_swab_hsm_request(struct hsm_request *hr) +void lustre_swab_hsm_request(void *_hr) { + struct hsm_request *hr = _hr; + __swab32s(&hr->hr_action); __swab32s(&hr->hr_archive_id); __swab64s(&hr->hr_flags); @@ -2221,14 +2283,18 @@ void lustre_swab_hsm_request(struct hsm_request *hr) __swab32s(&hr->hr_data_len); } -void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl) +void lustre_swab_swap_layouts(void *_msl) { + struct mdc_swap_layouts *msl = _msl; + __swab64s(&msl->msl_flags); } EXPORT_SYMBOL(lustre_swab_swap_layouts); -void lustre_swab_close_data(struct close_data *cd) +void lustre_swab_close_data(void *_cd) { + struct close_data *cd = _cd; + lustre_swab_lu_fid(&cd->cd_fid); __swab64s(&cd->cd_data_version); } diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c index fb13df586..20345631c 100644 --- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c +++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c @@ -3923,7 +3923,7 @@ static void init_mlme_ext_priv_value(struct adapter *padapter) _12M_RATE_, _24M_RATE_, 0xff, }; - atomic_set(&pmlmeext->event_seq, 0); + atomic_set_unchecked(&pmlmeext->event_seq, 0); pmlmeext->mgnt_seq = 0;/* reset to zero when disconnect at client mode */ pmlmeext->cur_channel = padapter->registrypriv.channel; @@ -4116,7 +4116,7 @@ void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext) static void _mgt_dispatcher(struct adapter *padapter, struct mlme_handler *ptable, struct recv_frame *precv_frame) { - u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + static const u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; u8 *pframe = precv_frame->rx_data; if (ptable->func) { @@ -4135,7 +4135,7 @@ void mgt_dispatcher(struct adapter *padapter, struct recv_frame *precv_frame) #ifdef CONFIG_88EU_AP_MODE struct mlme_priv *pmlmepriv = &padapter->mlmepriv; #endif /* CONFIG_88EU_AP_MODE */ - u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + static const u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; u8 *pframe = precv_frame->rx_data; struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, GetAddr2Ptr(pframe)); @@ -4160,7 +4160,7 @@ void mgt_dispatcher(struct adapter *padapter, struct recv_frame *precv_frame) index = GetFrameSubType(pframe) >> 4; - if (index > 13) { + if (index > ARRAY_SIZE(mlme_sta_tbl)) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Currently we do not support reserved sub-fr-type=%d\n", index)); return; } @@ -4250,7 +4250,7 @@ void report_survey_event(struct adapter *padapter, pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd); pc2h_evt_hdr->len = sizeof(struct survey_event); pc2h_evt_hdr->ID = _Survey_EVT_; - pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq); + pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq); psurvey_evt = (struct survey_event *)(pevtcmd + sizeof(struct C2HEvent_Header)); @@ -4300,7 +4300,7 @@ void report_surveydone_event(struct adapter *padapter) pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd); pc2h_evt_hdr->len = sizeof(struct surveydone_event); pc2h_evt_hdr->ID = _SurveyDone_EVT_; - pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq); + pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq); psurveydone_evt = (struct surveydone_event *)(pevtcmd + sizeof(struct C2HEvent_Header)); psurveydone_evt->bss_cnt = pmlmeext->sitesurvey_res.bss_cnt; @@ -4344,7 +4344,7 @@ void report_join_res(struct adapter *padapter, int res) pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd); pc2h_evt_hdr->len = sizeof(struct joinbss_event); pc2h_evt_hdr->ID = _JoinBss_EVT_; - pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq); + pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq); pjoinbss_evt = (struct joinbss_event *)(pevtcmd + sizeof(struct C2HEvent_Header)); memcpy((unsigned char *)(&(pjoinbss_evt->network.network)), &(pmlmeinfo->network), sizeof(struct wlan_bssid_ex)); @@ -4395,7 +4395,7 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd); pc2h_evt_hdr->len = sizeof(struct stadel_event); pc2h_evt_hdr->ID = _DelSTA_EVT_; - pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq); + pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq); pdel_sta_evt = (struct stadel_event *)(pevtcmd + sizeof(struct C2HEvent_Header)); ether_addr_copy((unsigned char *)(&(pdel_sta_evt->macaddr)), MacAddr); @@ -4448,7 +4448,7 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd); pc2h_evt_hdr->len = sizeof(struct stassoc_event); pc2h_evt_hdr->ID = _AddSTA_EVT_; - pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq); + pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq); padd_sta_evt = (struct stassoc_event *)(pevtcmd + sizeof(struct C2HEvent_Header)); ether_addr_copy((unsigned char *)(&(padd_sta_evt->macaddr)), MacAddr); diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c index d0495a16f..bac1045ea 100644 --- a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c +++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c @@ -31,7 +31,7 @@ int rtw_hal_init_recv_priv(struct adapter *padapter) struct recv_buf *precvbuf; tasklet_init(&precvpriv->recv_tasklet, - (void(*)(unsigned long))rtl8188eu_recv_tasklet, + rtl8188eu_recv_tasklet, (unsigned long)padapter); /* init recv_buf */ diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c index 85650b266..d4511d14e 100644 --- a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c +++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c @@ -26,7 +26,7 @@ s32 rtw_hal_init_xmit_priv(struct adapter *adapt) struct xmit_priv *pxmitpriv = &adapt->xmitpriv; tasklet_init(&pxmitpriv->xmit_tasklet, - (void(*)(unsigned long))rtl8188eu_xmit_tasklet, + rtl8188eu_xmit_tasklet, (unsigned long)adapt); return _SUCCESS; } diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h index 0976a761b..b155d7fb9 100644 --- a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h +++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h @@ -182,17 +182,9 @@ void PHY_GetTxPowerLevel8188E(struct adapter *adapter, u32 *powerlevel); void PHY_ScanOperationBackup8188E(struct adapter *Adapter, u8 Operation); -/* Call after initialization */ -void ChkFwCmdIoDone(struct adapter *adapter); - /* BB/MAC/RF other monitor API */ void PHY_SetRFPathSwitch_8188E(struct adapter *adapter, bool main); -void PHY_SwitchEphyParameter(struct adapter *adapter); - -void PHY_EnableHostClkReq(struct adapter *adapter); - -bool SetAntennaConfig92C(struct adapter *adapter, u8 defaultant); /*--------------------------Exported Function prototype---------------------*/ diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h index fa032b0c1..e4adaa60e 100644 --- a/drivers/staging/rtl8188eu/include/hal_intf.h +++ b/drivers/staging/rtl8188eu/include/hal_intf.h @@ -173,7 +173,6 @@ void rtw_hal_sw_led_deinit(struct adapter *padapter); u32 rtw_hal_power_on(struct adapter *padapter); uint rtw_hal_init(struct adapter *padapter); uint rtw_hal_deinit(struct adapter *padapter); -void rtw_hal_stop(struct adapter *padapter); void rtw_hal_set_hwreg(struct adapter *padapter, u8 variable, u8 *val); void rtw_hal_get_hwreg(struct adapter *padapter, u8 variable, u8 *val); @@ -202,8 +201,6 @@ void rtw_hal_free_recv_priv(struct adapter *padapter); void rtw_hal_update_ra_mask(struct adapter *padapter, u32 mac_id, u8 level); void rtw_hal_add_ra_tid(struct adapter *adapt, u32 bitmap, u8 arg, u8 level); -void rtw_hal_clone_data(struct adapter *dst_adapt, - struct adapter *src_adapt); void rtw_hal_bcn_related_reg_setting(struct adapter *padapter); diff --git a/drivers/staging/rtl8188eu/include/odm_precomp.h b/drivers/staging/rtl8188eu/include/odm_precomp.h index 9e5fe1777..bdb77bb57 100644 --- a/drivers/staging/rtl8188eu/include/odm_precomp.h +++ b/drivers/staging/rtl8188eu/include/odm_precomp.h @@ -70,7 +70,7 @@ void odm_RSSIMonitorCheckCE(struct odm_dm_struct *pDM_Odm); void odm_TXPowerTrackingThermalMeterInit(struct odm_dm_struct *pDM_Odm); void odm_EdcaTurboCheckCE(struct odm_dm_struct *pDM_Odm); void odm_TXPowerTrackingCheckCE(struct odm_dm_struct *pDM_Odm); -void odm_SwAntDivChkAntSwitchCallback(void *FunctionContext); +void odm_SwAntDivChkAntSwitchCallback(unsigned long FunctionContext); void odm_InitHybridAntDiv(struct odm_dm_struct *pDM_Odm); void odm_HwAntDiv(struct odm_dm_struct *pDM_Odm); diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h index 80832a5f0..6468b0c46 100644 --- a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h +++ b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h @@ -52,7 +52,7 @@ enum rx_packet_type { #define INTERRUPT_MSG_FORMAT_LEN 60 void rtl8188eu_recv_hdl(struct adapter *padapter, struct recv_buf *precvbuf); -void rtl8188eu_recv_tasklet(void *priv); +void rtl8188eu_recv_tasklet(unsigned long _priv); void rtl8188e_query_rx_phy_status(struct recv_frame *fr, struct phy_stat *phy); void rtl8188e_process_phy_info(struct adapter *padapter, struct recv_frame *prframe); diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h index 66205b782..c8f7fa0d8 100644 --- a/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h +++ b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h @@ -156,7 +156,7 @@ void rtl8188e_fill_fake_txdesc(struct adapter *padapter, u8 *pDesc, s32 rtl8188eu_init_xmit_priv(struct adapter *padapter); s32 rtl8188eu_xmit_buf_handler(struct adapter *padapter); #define hal_xmit_handler rtl8188eu_xmit_buf_handler -void rtl8188eu_xmit_tasklet(void *priv); +void rtl8188eu_xmit_tasklet(unsigned long _priv); s32 rtl8188eu_xmitframe_complete(struct adapter *padapter, struct xmit_priv *pxmitpriv); diff --git a/drivers/staging/rtl8188eu/include/rtw_cmd.h b/drivers/staging/rtl8188eu/include/rtw_cmd.h index 18a6530c9..545d2c221 100644 --- a/drivers/staging/rtl8188eu/include/rtw_cmd.h +++ b/drivers/staging/rtl8188eu/include/rtw_cmd.h @@ -338,7 +338,6 @@ void rtw_readtssi_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cmd); void rtw_setstaKey_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cmd); void rtw_setassocsta_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cm); -void rtw_getrttbl_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cmd); struct _cmd_callback { u32 cmd_code; diff --git a/drivers/staging/rtl8188eu/include/rtw_eeprom.h b/drivers/staging/rtl8188eu/include/rtw_eeprom.h index 5dd73841d..337cc49f4 100644 --- a/drivers/staging/rtl8188eu/include/rtw_eeprom.h +++ b/drivers/staging/rtl8188eu/include/rtw_eeprom.h @@ -116,10 +116,4 @@ struct eeprom_priv { u8 efuse_eeprom_data[HWSET_MAX_SIZE_512]; }; -void eeprom_write16(struct adapter *padapter, u16 reg, u16 data); -u16 eeprom_read16(struct adapter *padapter, u16 reg); -void read_eeprom_content(struct adapter *padapter); -void eeprom_read_sz(struct adapter *adapt, u16 reg, u8 *data, u32 sz); -void read_eeprom_content_by_attrib(struct adapter *padapter); - #endif /* __RTL871X_EEPROM_H__ */ diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl.h b/drivers/staging/rtl8188eu/include/rtw_ioctl.h index a6b1c854a..340229788 100644 --- a/drivers/staging/rtl8188eu/include/rtw_ioctl.h +++ b/drivers/staging/rtl8188eu/include/rtw_ioctl.h @@ -88,13 +88,4 @@ static int oid_null_function(struct oid_par_priv *poid_par_priv) { extern struct iw_handler_def rtw_handlers_def; -int drv_query_info(struct net_device *miniportadaptercontext, NDIS_OID oid, - void *informationbuffer, u32 informationbufferlength, - u32 *byteswritten, u32 *bytesneeded); - -int drv_set_info(struct net_device *MiniportAdapterContext, - NDIS_OID oid, void *informationbuffer, - u32 informationbufferlength, u32 *bytesread, - u32 *bytesneeded); - #endif /* #ifndef __INC_CEINFO_ */ diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h index 1b1caaf58..fc3809415 100644 --- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h +++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h @@ -404,7 +404,7 @@ struct p2p_oper_class_map { struct mlme_ext_priv { struct adapter *padapter; u8 mlmeext_init; - atomic_t event_seq; + atomic_unchecked_t event_seq; u16 mgnt_seq; unsigned char cur_channel; @@ -550,8 +550,6 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *addr, void beacon_timing_control(struct adapter *padapter); u8 set_tx_beacon_cmd(struct adapter *padapter); -unsigned int setup_beacon_frame(struct adapter *padapter, - unsigned char *beacon_frame); void update_mgnt_tx_rate(struct adapter *padapter, u8 rate); void update_mgntframe_attrib(struct adapter *padapter, struct pkt_attrib *pattrib); @@ -597,12 +595,6 @@ struct cmd_hdl { u8 (*h2cfuns)(struct adapter *padapter, u8 *pbuf); }; -u8 read_macreg_hdl(struct adapter *padapter, u8 *pbuf); -u8 write_macreg_hdl(struct adapter *padapter, u8 *pbuf); -u8 read_bbreg_hdl(struct adapter *padapter, u8 *pbuf); -u8 write_bbreg_hdl(struct adapter *padapter, u8 *pbuf); -u8 read_rfreg_hdl(struct adapter *padapter, u8 *pbuf); -u8 write_rfreg_hdl(struct adapter *padapter, u8 *pbuf); u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf); u8 disconnect_hdl(struct adapter *padapter, u8 *pbuf); u8 createbss_hdl(struct adapter *padapter, u8 *pbuf); @@ -611,8 +603,6 @@ u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf); u8 setauth_hdl(struct adapter *padapter, u8 *pbuf); u8 setkey_hdl(struct adapter *padapter, u8 *pbuf); u8 set_stakey_hdl(struct adapter *padapter, u8 *pbuf); -u8 set_assocsta_hdl(struct adapter *padapter, u8 *pbuf); -u8 del_assocsta_hdl(struct adapter *padapter, u8 *pbuf); u8 add_ba_hdl(struct adapter *padapter, unsigned char *pbuf); u8 mlme_evt_hdl(struct adapter *padapter, unsigned char *pbuf); diff --git a/drivers/staging/rtl8188eu/include/xmit_osdep.h b/drivers/staging/rtl8188eu/include/xmit_osdep.h index f96ca6af9..104d496f9 100644 --- a/drivers/staging/rtl8188eu/include/xmit_osdep.h +++ b/drivers/staging/rtl8188eu/include/xmit_osdep.h @@ -35,7 +35,7 @@ struct sta_xmit_priv; struct xmit_frame; struct xmit_buf; -int rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev); +netdev_tx_t rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev); void rtw_os_xmit_schedule(struct adapter *padapter); diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c index d0d591501..53e64222f 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c @@ -807,10 +807,10 @@ void usb_write_port_cancel(struct adapter *padapter) } } -void rtl8188eu_recv_tasklet(void *priv) +void rtl8188eu_recv_tasklet(unsigned long priv) { struct sk_buff *pskb; - struct adapter *adapt = priv; + struct adapter *adapt = (struct adapter *)priv; struct recv_priv *precvpriv = &adapt->recvpriv; while (NULL != (pskb = skb_dequeue(&precvpriv->rx_skb_queue))) { @@ -826,10 +826,10 @@ void rtl8188eu_recv_tasklet(void *priv) } } -void rtl8188eu_xmit_tasklet(void *priv) +void rtl8188eu_xmit_tasklet(unsigned long priv) { int ret = false; - struct adapter *adapt = priv; + struct adapter *adapt = (struct adapter *)priv; struct xmit_priv *pxmitpriv = &adapt->xmitpriv; if (check_fwstate(&adapt->mlmepriv, _FW_UNDER_SURVEY)) diff --git a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c index 4b1b04e00..c5049c231 100644 --- a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c +++ b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c @@ -208,7 +208,7 @@ static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb) } -int rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev) +netdev_tx_t rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev) { struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev); struct xmit_priv *pxmitpriv = &padapter->xmitpriv; diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c index 74fd8455b..846e69b51 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c @@ -84,7 +84,7 @@ static struct pci_driver rtl8192_pci_driver = { }; static short _rtl92e_is_tx_queue_empty(struct net_device *dev); -static void _rtl92e_watchdog_wq_cb(void *data); +static void _rtl92e_watchdog_wq_cb(struct work_struct *data); static void _rtl92e_watchdog_timer_cb(unsigned long data); static void _rtl92e_hard_data_xmit(struct sk_buff *skb, struct net_device *dev, int rate); @@ -92,13 +92,13 @@ static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev); static void _rtl92e_tx_cmd(struct net_device *dev, struct sk_buff *skb); static short _rtl92e_tx(struct net_device *dev, struct sk_buff *skb); static short _rtl92e_pci_initdescring(struct net_device *dev); -static void _rtl92e_irq_tx_tasklet(struct r8192_priv *priv); -static void _rtl92e_irq_rx_tasklet(struct r8192_priv *priv); +static void _rtl92e_irq_tx_tasklet(unsigned long priv); +static void _rtl92e_irq_rx_tasklet(unsigned long priv); static void _rtl92e_cancel_deferred_work(struct r8192_priv *priv); static int _rtl92e_up(struct net_device *dev, bool is_silent_reset); static int _rtl92e_try_up(struct net_device *dev); static int _rtl92e_down(struct net_device *dev, bool shutdownrf); -static void _rtl92e_restart(void *data); +static void _rtl92e_restart(struct work_struct *data); /**************************************************************************** -----------------------------IO STUFF------------------------- @@ -375,7 +375,7 @@ static struct rtllib_qos_parameters def_qos_parameters = { {0, 0, 0, 0} }; -static void _rtl92e_update_beacon(void *data) +static void _rtl92e_update_beacon(struct work_struct *data) { struct r8192_priv *priv = container_of_work_rsl(data, struct r8192_priv, update_beacon_wq.work); @@ -391,7 +391,7 @@ static void _rtl92e_update_beacon(void *data) _rtl92e_update_cap(dev, net->capability); } -static void _rtl92e_qos_activate(void *data) +static void _rtl92e_qos_activate(struct work_struct *data) { struct r8192_priv *priv = container_of_work_rsl(data, struct r8192_priv, qos_activate); @@ -527,8 +527,9 @@ static int _rtl92e_handle_assoc_response(struct net_device *dev, return 0; } -static void _rtl92e_prepare_beacon(struct r8192_priv *priv) +static void _rtl92e_prepare_beacon(unsigned long _priv) { + struct r8192_priv *priv = (struct r8192_priv *)_priv; struct net_device *dev = priv->rtllib->dev; struct sk_buff *pskb = NULL, *pnewskb = NULL; struct cb_desc *tcb_desc = NULL; @@ -1002,30 +1003,30 @@ static void _rtl92e_init_priv_task(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); - INIT_WORK_RSL(&priv->reset_wq, (void *)_rtl92e_restart, dev); - INIT_WORK_RSL(&priv->rtllib->ips_leave_wq, (void *)rtl92e_ips_leave_wq, + INIT_WORK_RSL(&priv->reset_wq, _rtl92e_restart, dev); + INIT_WORK_RSL(&priv->rtllib->ips_leave_wq, rtl92e_ips_leave_wq, dev); INIT_DELAYED_WORK_RSL(&priv->watch_dog_wq, - (void *)_rtl92e_watchdog_wq_cb, dev); + _rtl92e_watchdog_wq_cb, dev); INIT_DELAYED_WORK_RSL(&priv->txpower_tracking_wq, - (void *)rtl92e_dm_txpower_tracking_wq, dev); + rtl92e_dm_txpower_tracking_wq, dev); INIT_DELAYED_WORK_RSL(&priv->rfpath_check_wq, - (void *)rtl92e_dm_rf_pathcheck_wq, dev); + rtl92e_dm_rf_pathcheck_wq, dev); INIT_DELAYED_WORK_RSL(&priv->update_beacon_wq, - (void *)_rtl92e_update_beacon, dev); - INIT_WORK_RSL(&priv->qos_activate, (void *)_rtl92e_qos_activate, dev); + _rtl92e_update_beacon, dev); + INIT_WORK_RSL(&priv->qos_activate, _rtl92e_qos_activate, dev); INIT_DELAYED_WORK_RSL(&priv->rtllib->hw_wakeup_wq, - (void *) rtl92e_hw_wakeup_wq, dev); + rtl92e_hw_wakeup_wq, dev); INIT_DELAYED_WORK_RSL(&priv->rtllib->hw_sleep_wq, - (void *) rtl92e_hw_sleep_wq, dev); + rtl92e_hw_sleep_wq, dev); tasklet_init(&priv->irq_rx_tasklet, - (void(*)(unsigned long))_rtl92e_irq_rx_tasklet, + _rtl92e_irq_rx_tasklet, (unsigned long)priv); tasklet_init(&priv->irq_tx_tasklet, - (void(*)(unsigned long))_rtl92e_irq_tx_tasklet, + _rtl92e_irq_tx_tasklet, (unsigned long)priv); tasklet_init(&priv->irq_prepare_beacon_tasklet, - (void(*)(unsigned long))_rtl92e_prepare_beacon, + _rtl92e_prepare_beacon, (unsigned long)priv); } @@ -1377,7 +1378,7 @@ static void _rtl92e_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum, } } -static void _rtl92e_watchdog_wq_cb(void *data) +static void _rtl92e_watchdog_wq_cb(struct work_struct *data) { struct r8192_priv *priv = container_of_dwork_rsl(data, struct r8192_priv, watch_dog_wq); @@ -2142,13 +2143,15 @@ static void _rtl92e_tx_resume(struct net_device *dev) } } -static void _rtl92e_irq_tx_tasklet(struct r8192_priv *priv) +static void _rtl92e_irq_tx_tasklet(unsigned long _priv) { + struct r8192_priv *priv = (struct r8192_priv *)_priv; _rtl92e_tx_resume(priv->rtllib->dev); } -static void _rtl92e_irq_rx_tasklet(struct r8192_priv *priv) +static void _rtl92e_irq_rx_tasklet(unsigned long _priv) { + struct r8192_priv *priv= (struct r8192_priv *)_priv; _rtl92e_rx_normal(priv->rtllib->dev); rtl92e_writel(priv->rtllib->dev, INTA_MASK, @@ -2236,7 +2239,7 @@ void rtl92e_commit(struct net_device *dev) _rtl92e_up(dev, false); } -static void _rtl92e_restart(void *data) +static void _rtl92e_restart(struct work_struct *data) { struct r8192_priv *priv = container_of_work_rsl(data, struct r8192_priv, reset_wq); diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h index babc0b3bc..2680a8b0c 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h @@ -586,12 +586,12 @@ void force_pci_posting(struct net_device *dev); void rtl92e_rx_enable(struct net_device *); void rtl92e_tx_enable(struct net_device *); -void rtl92e_hw_sleep_wq(void *data); +void rtl92e_hw_sleep_wq(struct work_struct *data); void rtl92e_commit(struct net_device *dev); void rtl92e_check_rfctrl_gpio_timer(unsigned long data); -void rtl92e_hw_wakeup_wq(void *data); +void rtl92e_hw_wakeup_wq(struct work_struct *data); void rtl92e_reset_desc_ring(struct net_device *dev); void rtl92e_set_wireless_mode(struct net_device *dev, u8 wireless_mode); diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c index 9bc284812..17ccbf7dc 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c @@ -195,7 +195,7 @@ static void _rtl92e_dm_deinit_fsync(struct net_device *dev); static void _rtl92e_dm_check_txrateandretrycount(struct net_device *dev); static void _rtl92e_dm_check_ac_dc_power(struct net_device *dev); static void _rtl92e_dm_check_fsync(struct net_device *dev); -static void _rtl92e_dm_check_rf_ctrl_gpio(void *data); +static void _rtl92e_dm_check_rf_ctrl_gpio(struct work_struct *data); static void _rtl92e_dm_fsync_timer_callback(unsigned long data); /*---------------------Define local function prototype-----------------------*/ @@ -229,7 +229,7 @@ void rtl92e_dm_init(struct net_device *dev) _rtl92e_dm_init_wa_broadcom_iot(dev); INIT_DELAYED_WORK_RSL(&priv->gpio_change_rf_wq, - (void *)_rtl92e_dm_check_rf_ctrl_gpio, dev); + _rtl92e_dm_check_rf_ctrl_gpio, dev); } void rtl92e_dm_deinit(struct net_device *dev) @@ -932,7 +932,7 @@ static void _rtl92e_dm_tx_power_tracking_cb_thermal(struct net_device *dev) priv->txpower_count = 0; } -void rtl92e_dm_txpower_tracking_wq(void *data) +void rtl92e_dm_txpower_tracking_wq(struct work_struct *data) { struct r8192_priv *priv = container_of_dwork_rsl(data, struct r8192_priv, txpower_tracking_wq); @@ -1814,7 +1814,7 @@ static void _rtl92e_dm_init_wa_broadcom_iot(struct net_device *dev) pHTInfo->WAIotTH = WAIotTHVal; } -static void _rtl92e_dm_check_rf_ctrl_gpio(void *data) +static void _rtl92e_dm_check_rf_ctrl_gpio(struct work_struct *data) { struct r8192_priv *priv = container_of_dwork_rsl(data, struct r8192_priv, gpio_change_rf_wq); @@ -1868,7 +1868,7 @@ static void _rtl92e_dm_check_rf_ctrl_gpio(void *data) } } -void rtl92e_dm_rf_pathcheck_wq(void *data) +void rtl92e_dm_rf_pathcheck_wq(struct work_struct *data) { struct r8192_priv *priv = container_of_dwork_rsl(data, struct r8192_priv, diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h index 756a0dd00..d2de5e8ec 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h @@ -191,13 +191,13 @@ void rtl92e_dm_watchdog(struct net_device *dev); void rtl92e_init_adaptive_rate(struct net_device *dev); -void rtl92e_dm_txpower_tracking_wq(void *data); +void rtl92e_dm_txpower_tracking_wq(struct work_struct *data); void rtl92e_dm_cck_txpower_adjust(struct net_device *dev, bool binch14); void rtl92e_dm_restore_state(struct net_device *dev); void rtl92e_dm_backup_state(struct net_device *dev); void rtl92e_dm_init_edca_turbo(struct net_device *dev); -void rtl92e_dm_rf_pathcheck_wq(void *data); +void rtl92e_dm_rf_pathcheck_wq(struct work_struct *data); void rtl92e_dm_init_txpower_tracking(struct net_device *dev); #endif /*__R8192UDM_H__ */ diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c index aa4b015c3..d38df3818 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c @@ -44,7 +44,7 @@ static void _rtl92e_hw_sleep(struct net_device *dev) rtl92e_set_rf_state(dev, eRfSleep, RF_CHANGE_BY_PS); } -void rtl92e_hw_sleep_wq(void *data) +void rtl92e_hw_sleep_wq(struct work_struct *data) { struct rtllib_device *ieee = container_of_dwork_rsl(data, struct rtllib_device, hw_sleep_wq); @@ -72,7 +72,7 @@ void rtl92e_hw_wakeup(struct net_device *dev) rtl92e_set_rf_state(dev, eRfOn, RF_CHANGE_BY_PS); } -void rtl92e_hw_wakeup_wq(void *data) +void rtl92e_hw_wakeup_wq(struct work_struct *data) { struct rtllib_device *ieee = container_of_dwork_rsl(data, struct rtllib_device, hw_wakeup_wq); @@ -172,7 +172,7 @@ void rtl92e_ips_leave(struct net_device *dev) } } -void rtl92e_ips_leave_wq(void *data) +void rtl92e_ips_leave_wq(struct work_struct *data) { struct rtllib_device *ieee = container_of_work_rsl(data, struct rtllib_device, ips_leave_wq); diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h index a46f4cffc..8f46fdaca 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h @@ -24,6 +24,7 @@ #include struct net_device; +struct work_struct; #define RT_CHECK_FOR_HANG_PERIOD 2 @@ -31,7 +32,7 @@ void rtl92e_hw_wakeup(struct net_device *dev); void rtl92e_enter_sleep(struct net_device *dev, u64 time); void rtl92e_rtllib_ips_leave_wq(struct net_device *dev); void rtl92e_rtllib_ips_leave(struct net_device *dev); -void rtl92e_ips_leave_wq(void *data); +void rtl92e_ips_leave_wq(struct work_struct *data); void rtl92e_ips_enter(struct net_device *dev); void rtl92e_ips_leave(struct net_device *dev); diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c index 7413a100c..93d1ec70e 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c @@ -1187,30 +1187,30 @@ static const struct iw_priv_args r8192_private_args[] = { }; static iw_handler r8192_private_handler[] = { - (iw_handler)_rtl92e_wx_set_debug, /*SIOCIWSECONDPRIV*/ - (iw_handler)_rtl92e_wx_set_scan_type, - (iw_handler)_rtl92e_wx_set_rawtx, - (iw_handler)_rtl92e_wx_force_reset, - (iw_handler)NULL, - (iw_handler)NULL, - (iw_handler)_rtl92e_wx_adapter_power_status, - (iw_handler)NULL, - (iw_handler)NULL, - (iw_handler)NULL, - (iw_handler)_rtl92e_wx_set_lps_awake_interval, - (iw_handler)_rtl92e_wx_set_force_lps, - (iw_handler)NULL, - (iw_handler)NULL, - (iw_handler)NULL, - (iw_handler)NULL, - (iw_handler)NULL, - (iw_handler)NULL, - (iw_handler)NULL, - (iw_handler)NULL, - (iw_handler)NULL, - (iw_handler)NULL, - (iw_handler)_rtl92e_wx_set_promisc_mode, - (iw_handler)_rtl92e_wx_get_promisc_mode, + _rtl92e_wx_set_debug, /*SIOCIWSECONDPRIV*/ + _rtl92e_wx_set_scan_type, + _rtl92e_wx_set_rawtx, + _rtl92e_wx_force_reset, + NULL, + NULL, + _rtl92e_wx_adapter_power_status, + NULL, + NULL, + NULL, + _rtl92e_wx_set_lps_awake_interval, + _rtl92e_wx_set_force_lps, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + _rtl92e_wx_set_promisc_mode, + _rtl92e_wx_get_promisc_mode, }; static struct iw_statistics *_rtl92e_get_wireless_stats(struct net_device *dev) diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h index b895a537d..f4ff6063f 100644 --- a/drivers/staging/rtl8192e/rtllib.h +++ b/drivers/staging/rtl8192e/rtllib.h @@ -1993,7 +1993,7 @@ int rtllib_encrypt_fragment( struct sk_buff *frag, int hdr_len); -int rtllib_xmit(struct sk_buff *skb, struct net_device *dev); +netdev_tx_t rtllib_xmit(struct sk_buff *skb, struct net_device *dev); void rtllib_txb_free(struct rtllib_txb *); /* rtllib_rx.c */ @@ -2107,7 +2107,7 @@ int rtllib_wx_set_freq(struct rtllib_device *ieee, struct iw_request_info *a, int rtllib_wx_get_freq(struct rtllib_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *b); -void rtllib_wx_sync_scan_wq(void *data); +void rtllib_wx_sync_scan_wq(struct work_struct *data); int rtllib_wx_set_rawtx(struct rtllib_device *ieee, struct iw_request_info *info, diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c index da74dc49b..0c2626562 100644 --- a/drivers/staging/rtl8192e/rtllib_softmac.c +++ b/drivers/staging/rtl8192e/rtllib_softmac.c @@ -575,7 +575,7 @@ static void rtllib_softmac_scan_syncro(struct rtllib_device *ieee, u8 is_mesh) wireless_send_event(ieee->dev, SIOCGIWSCAN, &wrqu, NULL); } -static void rtllib_softmac_scan_wq(void *data) +static void rtllib_softmac_scan_wq(struct work_struct *data) { struct rtllib_device *ieee = container_of_dwork_rsl(data, struct rtllib_device, softmac_scan_wq); @@ -1517,7 +1517,7 @@ static void rtllib_associate_step2(struct rtllib_device *ieee) } } -static void rtllib_associate_complete_wq(void *data) +static void rtllib_associate_complete_wq(struct work_struct *data) { struct rtllib_device *ieee = (struct rtllib_device *) container_of_work_rsl(data, @@ -1586,7 +1586,7 @@ static void rtllib_associate_complete(struct rtllib_device *ieee) schedule_work(&ieee->associate_complete_wq); } -static void rtllib_associate_procedure_wq(void *data) +static void rtllib_associate_procedure_wq(struct work_struct *data) { struct rtllib_device *ieee = container_of_dwork_rsl(data, struct rtllib_device, @@ -2058,8 +2058,9 @@ static short rtllib_sta_ps_sleep(struct rtllib_device *ieee, u64 *time) } -static inline void rtllib_sta_ps(struct rtllib_device *ieee) +static inline void rtllib_sta_ps(unsigned long _ieee) { + struct rtllib_device *ieee = (struct rtllib_device *)_ieee; u64 time; short sleep; unsigned long flags, flags2; @@ -2583,7 +2584,7 @@ static void rtllib_start_monitor_mode(struct rtllib_device *ieee) } } -static void rtllib_start_ibss_wq(void *data) +static void rtllib_start_ibss_wq(struct work_struct *data) { struct rtllib_device *ieee = container_of_dwork_rsl(data, struct rtllib_device, start_ibss_wq); @@ -2748,7 +2749,7 @@ static void rtllib_start_bss(struct rtllib_device *ieee) spin_unlock_irqrestore(&ieee->lock, flags); } -static void rtllib_link_change_wq(void *data) +static void rtllib_link_change_wq(struct work_struct *data) { struct rtllib_device *ieee = container_of_dwork_rsl(data, struct rtllib_device, link_change_wq); @@ -2774,7 +2775,7 @@ void rtllib_disassociate(struct rtllib_device *ieee) notify_wx_assoc_event(ieee); } -static void rtllib_associate_retry_wq(void *data) +static void rtllib_associate_retry_wq(struct work_struct *data) { struct rtllib_device *ieee = container_of_dwork_rsl(data, struct rtllib_device, associate_retry_wq); @@ -3027,19 +3028,18 @@ void rtllib_softmac_init(struct rtllib_device *ieee) (unsigned long) ieee); INIT_DELAYED_WORK_RSL(&ieee->link_change_wq, - (void *)rtllib_link_change_wq, ieee); + rtllib_link_change_wq, ieee); INIT_DELAYED_WORK_RSL(&ieee->start_ibss_wq, - (void *)rtllib_start_ibss_wq, ieee); + rtllib_start_ibss_wq, ieee); INIT_WORK_RSL(&ieee->associate_complete_wq, - (void *)rtllib_associate_complete_wq, ieee); + rtllib_associate_complete_wq, ieee); INIT_DELAYED_WORK_RSL(&ieee->associate_procedure_wq, - (void *)rtllib_associate_procedure_wq, ieee); + rtllib_associate_procedure_wq, ieee); INIT_DELAYED_WORK_RSL(&ieee->softmac_scan_wq, - (void *)rtllib_softmac_scan_wq, ieee); + rtllib_softmac_scan_wq, ieee); INIT_DELAYED_WORK_RSL(&ieee->associate_retry_wq, - (void *)rtllib_associate_retry_wq, ieee); - INIT_WORK_RSL(&ieee->wx_sync_scan_wq, (void *)rtllib_wx_sync_scan_wq, - ieee); + rtllib_associate_retry_wq, ieee); + INIT_WORK_RSL(&ieee->wx_sync_scan_wq, rtllib_wx_sync_scan_wq, ieee); mutex_init(&ieee->wx_mutex); mutex_init(&ieee->scan_mutex); @@ -3049,7 +3049,7 @@ void rtllib_softmac_init(struct rtllib_device *ieee) spin_lock_init(&ieee->beacon_lock); tasklet_init(&ieee->ps_task, - (void(*)(unsigned long)) rtllib_sta_ps, + rtllib_sta_ps, (unsigned long)ieee); } diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c index 5f1412fc4..70d1cebd3 100644 --- a/drivers/staging/rtl8192e/rtllib_softmac_wx.c +++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c @@ -327,7 +327,7 @@ int rtllib_wx_set_mode(struct rtllib_device *ieee, struct iw_request_info *a, } EXPORT_SYMBOL(rtllib_wx_set_mode); -void rtllib_wx_sync_scan_wq(void *data) +void rtllib_wx_sync_scan_wq(struct work_struct *data) { struct rtllib_device *ieee = container_of_work_rsl(data, struct rtllib_device, wx_sync_scan_wq); diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c index 78a3ad5b2..3b1b31a2b 100644 --- a/drivers/staging/rtl8192e/rtllib_tx.c +++ b/drivers/staging/rtl8192e/rtllib_tx.c @@ -984,7 +984,7 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev) } -int rtllib_xmit(struct sk_buff *skb, struct net_device *dev) +netdev_tx_t rtllib_xmit(struct sk_buff *skb, struct net_device *dev) { memset(skb->cb, 0, sizeof(skb->cb)); return rtllib_xmit_inter(skb, dev); diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h index 077ea13eb..abc53a0aa 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h @@ -2174,7 +2174,7 @@ int ieee80211_set_encryption(struct ieee80211_device *ieee); int ieee80211_encrypt_fragment(struct ieee80211_device *ieee, struct sk_buff *frag, int hdr_len); -int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev); +netdev_tx_t ieee80211_xmit(struct sk_buff *skb, struct net_device *dev); void ieee80211_txb_free(struct ieee80211_txb *); diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c index d7d85b3f1..20140ab91 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c @@ -1769,9 +1769,9 @@ static short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h, } -static inline void ieee80211_sta_ps(struct ieee80211_device *ieee) +static inline void ieee80211_sta_ps(unsigned long _ieee) { - + struct ieee80211_device *ieee = (struct ieee80211_device *)_ieee; u32 th, tl; short sleep; @@ -2739,7 +2739,7 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee) spin_lock_init(&ieee->beacon_lock); tasklet_init(&ieee->ps_task, - (void(*)(unsigned long)) ieee80211_sta_ps, + ieee80211_sta_ps, (unsigned long)ieee); } diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c index 1ab0aead2..41de55c8f 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c @@ -594,7 +594,7 @@ static void ieee80211_query_seqnum(struct ieee80211_device *ieee, } } -int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) +netdev_tx_t ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) { struct ieee80211_device *ieee = netdev_priv(dev); struct ieee80211_txb *txb = NULL; diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c index 457eeb5f5..d6dd4ea7d 100644 --- a/drivers/staging/rtl8192u/r8192U_core.c +++ b/drivers/staging/rtl8192u/r8192U_core.c @@ -2330,7 +2330,7 @@ static void rtl8192_init_priv_lock(struct r8192_priv *priv) static void rtl819x_watchdog_wqcallback(struct work_struct *work); -static void rtl8192_irq_rx_tasklet(struct r8192_priv *priv); +static void rtl8192_irq_rx_tasklet(unsigned long priv); /* init tasklet and wait_queue here. only 2.6 above kernel is considered */ #define DRV_NAME "wlan0" static void rtl8192_init_priv_task(struct net_device *dev) @@ -2353,7 +2353,7 @@ static void rtl8192_init_priv_task(struct net_device *dev) INIT_WORK(&priv->qos_activate, rtl8192_qos_activate); tasklet_init(&priv->irq_rx_tasklet, - (void(*)(unsigned long))rtl8192_irq_rx_tasklet, + rtl8192_irq_rx_tasklet, (unsigned long)priv); } @@ -4890,8 +4890,9 @@ static void rtl8192_rx_cmd(struct sk_buff *skb) } } -static void rtl8192_irq_rx_tasklet(struct r8192_priv *priv) +static void rtl8192_irq_rx_tasklet(unsigned long _priv) { + struct r8192_priv *priv = (struct r8192_priv *)_priv; struct sk_buff *skb; struct rtl8192_rx_info *info; diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c index 66f0e0a35..395de9c75 100644 --- a/drivers/staging/rtl8712/rtl8712_recv.c +++ b/drivers/staging/rtl8712/rtl8712_recv.c @@ -45,7 +45,7 @@ static u8 bridge_tunnel_header[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8}; /* Ethernet-II snap header (RFC1042 for most EtherTypes) */ static u8 rfc1042_header[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00}; -static void recv_tasklet(void *priv); +static void recv_tasklet(unsigned long _priv); int r8712_init_recv_priv(struct recv_priv *precvpriv, struct _adapter *padapter) { @@ -79,7 +79,7 @@ int r8712_init_recv_priv(struct recv_priv *precvpriv, struct _adapter *padapter) } precvpriv->free_recv_buf_queue_cnt = NR_RECVBUFF; tasklet_init(&precvpriv->recv_tasklet, - (void(*)(unsigned long))recv_tasklet, + recv_tasklet, (unsigned long)padapter); skb_queue_head_init(&precvpriv->rx_skb_queue); @@ -1121,7 +1121,7 @@ static int recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb) return _SUCCESS; } -static void recv_tasklet(void *priv) +static void recv_tasklet(unsigned long priv) { struct sk_buff *pskb; struct _adapter *padapter = (struct _adapter *)priv; diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h index 26dd24cdd..2eb37c969 100644 --- a/drivers/staging/rtl8712/rtl871x_io.h +++ b/drivers/staging/rtl8712/rtl871x_io.h @@ -108,7 +108,7 @@ struct _io_ops { u8 *pmem); u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem); -}; +} __no_const; struct io_req { struct list_head list; diff --git a/drivers/staging/rtl8712/rtl871x_ioctl.h b/drivers/staging/rtl8712/rtl871x_ioctl.h index 08bcb3b41..24c20e0ff 100644 --- a/drivers/staging/rtl8712/rtl871x_ioctl.h +++ b/drivers/staging/rtl8712/rtl871x_ioctl.h @@ -77,18 +77,4 @@ uint oid_null_function(struct oid_par_priv *poid_par_priv); extern struct iw_handler_def r871x_handlers_def; -uint drv_query_info(struct net_device *MiniportAdapterContext, - uint Oid, - void *InformationBuffer, - u32 InformationBufferLength, - u32 *BytesWritten, - u32 *BytesNeeded); - -uint drv_set_info(struct net_device *MiniportAdapterContext, - uint Oid, - void *InformationBuffer, - u32 InformationBufferLength, - u32 *BytesRead, - u32 *BytesNeeded); - #endif diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c index be38364c8..87b461b2a 100644 --- a/drivers/staging/rtl8712/rtl871x_xmit.c +++ b/drivers/staging/rtl8712/rtl871x_xmit.c @@ -152,7 +152,7 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv, alloc_hwxmits(padapter); init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry); tasklet_init(&pxmitpriv->xmit_tasklet, - (void(*)(unsigned long))r8712_xmit_bh, + r8712_xmit_bh, (unsigned long)padapter); return _SUCCESS; } diff --git a/drivers/staging/rtl8712/rtl871x_xmit.h b/drivers/staging/rtl8712/rtl871x_xmit.h index d899d0c6d..f27928e2e 100644 --- a/drivers/staging/rtl8712/rtl871x_xmit.h +++ b/drivers/staging/rtl8712/rtl871x_xmit.h @@ -295,7 +295,7 @@ int r8712_pre_xmit(struct _adapter *padapter, struct xmit_frame *pxmitframe); int r8712_xmit_enqueue(struct _adapter *padapter, struct xmit_frame *pxmitframe); int r8712_xmit_direct(struct _adapter *padapter, struct xmit_frame *pxmitframe); -void r8712_xmit_bh(void *priv); +void r8712_xmit_bh(unsigned long priv); void xmitframe_xmitbuf_attach(struct xmit_frame *pxmitframe, struct xmit_buf *pxmitbuf); diff --git a/drivers/staging/rtl8712/usb_ops_linux.c b/drivers/staging/rtl8712/usb_ops_linux.c index fc6bb0be2..3b073c67f 100644 --- a/drivers/staging/rtl8712/usb_ops_linux.c +++ b/drivers/staging/rtl8712/usb_ops_linux.c @@ -319,10 +319,10 @@ void r8712_usb_read_port_cancel(struct _adapter *padapter) } } -void r8712_xmit_bh(void *priv) +void r8712_xmit_bh(unsigned long priv) { int ret = false; - struct _adapter *padapter = priv; + struct _adapter *padapter = (struct _adapter *)priv; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; if (padapter->bDriverStopped || diff --git a/drivers/staging/rtl8712/xmit_linux.c b/drivers/staging/rtl8712/xmit_linux.c index 4ee4136b5..0b001b5be 100644 --- a/drivers/staging/rtl8712/xmit_linux.c +++ b/drivers/staging/rtl8712/xmit_linux.c @@ -159,7 +159,7 @@ void r8712_xmit_complete(struct _adapter *padapter, struct xmit_frame *pxframe) pxframe->pkt = NULL; } -int r8712_xmit_entry(_pkt *pkt, struct net_device *pnetdev) +netdev_tx_t r8712_xmit_entry(_pkt *pkt, struct net_device *pnetdev) { struct xmit_frame *pxmitframe = NULL; struct _adapter *padapter = netdev_priv(pnetdev); diff --git a/drivers/staging/rtl8712/xmit_osdep.h b/drivers/staging/rtl8712/xmit_osdep.h index 8eba7ca0d..6c4ce81ab 100644 --- a/drivers/staging/rtl8712/xmit_osdep.h +++ b/drivers/staging/rtl8712/xmit_osdep.h @@ -46,7 +46,7 @@ struct sta_xmit_priv; struct xmit_frame; struct xmit_buf; -int r8712_xmit_entry(_pkt *pkt, struct net_device *pnetdev); +netdev_tx_t r8712_xmit_entry(_pkt *pkt, struct net_device *pnetdev); void r8712_SetFilter(struct work_struct *work); int r8712_xmit_resource_alloc(struct _adapter *padapter, struct xmit_buf *pxmitbuf); diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c index 7d90e2501..07804243d 100644 --- a/drivers/staging/sm750fb/sm750.c +++ b/drivers/staging/sm750fb/sm750.c @@ -725,6 +725,7 @@ static struct fb_ops lynxfb_ops = { .fb_set_par = lynxfb_ops_set_par, .fb_setcolreg = lynxfb_ops_setcolreg, .fb_blank = lynxfb_ops_blank, + .fb_pan_display = lynxfb_ops_pan_display, .fb_fillrect = cfb_fillrect, .fb_imageblit = cfb_imageblit, .fb_copyarea = cfb_copyarea, @@ -770,7 +771,6 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index) par->index = index; output->channel = &crtc->channel; sm750fb_set_drv(par); - lynxfb_ops.fb_pan_display = lynxfb_ops_pan_display; /* * set current cursor variable and proc pointer, @@ -787,16 +787,20 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index) memset_io(crtc->cursor.vstart, 0, crtc->cursor.size); if (!g_hwcursor) { - lynxfb_ops.fb_cursor = NULL; + pax_open_kernel(); + const_cast(lynxfb_ops.fb_cursor) = NULL; + pax_close_kernel(); hw_cursor_disable(&crtc->cursor); } /* set info->fbops, must be set before fb_find_mode */ if (!sm750_dev->accel_off) { /* use 2d acceleration */ - lynxfb_ops.fb_fillrect = lynxfb_ops_fillrect; - lynxfb_ops.fb_copyarea = lynxfb_ops_copyarea; - lynxfb_ops.fb_imageblit = lynxfb_ops_imageblit; + pax_open_kernel(); + const_cast(lynxfb_ops.fb_fillrect) = lynxfb_ops_fillrect; + const_cast(lynxfb_ops.fb_copyarea) = lynxfb_ops_copyarea; + const_cast(lynxfb_ops.fb_imageblit) = lynxfb_ops_imageblit; + pax_close_kernel(); } info->fbops = &lynxfb_ops; diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c index 59871495e..15c44ea9c 100644 --- a/drivers/staging/unisys/visorbus/visorchipset.c +++ b/drivers/staging/unisys/visorbus/visorchipset.c @@ -2234,7 +2234,7 @@ static __init uint32_t visorutil_spar_detect(void) } } -static int init_unisys(void) +static __init int init_unisys(void) { int result; @@ -2249,7 +2249,7 @@ static int init_unisys(void) return 0; }; -static void exit_unisys(void) +static __exit void exit_unisys(void) { acpi_bus_unregister_driver(&unisys_acpi_driver); } diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c index 136700756..d7a35307b 100644 --- a/drivers/staging/unisys/visornic/visornic_main.c +++ b/drivers/staging/unisys/visornic/visornic_main.c @@ -758,7 +758,7 @@ static inline bool vnic_hit_low_watermark(struct visornic_devdata *devdata, * can be called again. * Returns NETDEV_TX_OK. */ -static int +static netdev_tx_t visornic_xmit(struct sk_buff *skb, struct net_device *netdev) { struct visornic_devdata *devdata; diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c index 7e69bc99d..d5376b298 100644 --- a/drivers/staging/vt6655/rxtx.c +++ b/drivers/staging/vt6655/rxtx.c @@ -1250,7 +1250,7 @@ static void vnt_fill_txkey(struct ieee80211_hdr *hdr, u8 *key_buffer, mic_hdr->payload_len = cpu_to_be16(payload_len); ether_addr_copy(mic_hdr->mic_addr2, hdr->addr2); - pn64 = atomic64_read(&tx_key->tx_pn); + pn64 = atomic64_read_unchecked(&tx_key->tx_pn); mic_hdr->ccmp_pn[5] = pn64; mic_hdr->ccmp_pn[4] = pn64 >> 8; mic_hdr->ccmp_pn[3] = pn64 >> 16; diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c index aa59e7f14..094dd59cd 100644 --- a/drivers/staging/vt6656/rxtx.c +++ b/drivers/staging/vt6656/rxtx.c @@ -749,7 +749,7 @@ static void vnt_fill_txkey(struct vnt_usb_send_context *tx_context, mic_hdr->payload_len = cpu_to_be16(payload_len); ether_addr_copy(mic_hdr->mic_addr2, hdr->addr2); - pn64 = atomic64_read(&tx_key->tx_pn); + pn64 = atomic64_read_unchecked(&tx_key->tx_pn); mic_hdr->ccmp_pn[5] = pn64; mic_hdr->ccmp_pn[4] = pn64 >> 8; mic_hdr->ccmp_pn[3] = pn64 >> 16; diff --git a/drivers/staging/wilc1000/host_interface.h b/drivers/staging/wilc1000/host_interface.h index ddfea29df..5305b3846 100644 --- a/drivers/staging/wilc1000/host_interface.h +++ b/drivers/staging/wilc1000/host_interface.h @@ -1,6 +1,7 @@ #ifndef HOST_INT_H #define HOST_INT_H +#include #include "coreconfigurator.h" #define IP_ALEN 4 diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c index acd4a0e7c..52e779326 100644 --- a/drivers/staging/wilc1000/linux_wlan.c +++ b/drivers/staging/wilc1000/linux_wlan.c @@ -982,7 +982,7 @@ static void linux_wlan_tx_complete(void *priv, int status) kfree(pv_data); } -int wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev) +netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev) { struct wilc_vif *vif; struct tx_complete_data *tx_data = NULL; diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c index f08cf6d9e..9ccd7a46e 100644 --- a/drivers/staging/wilc1000/wilc_spi.c +++ b/drivers/staging/wilc1000/wilc_spi.c @@ -19,6 +19,7 @@ #include #include +#include #include "wilc_wlan_if.h" #include "wilc_wlan.h" #include "wilc_wfi_netdevice.h" diff --git a/drivers/staging/wilc1000/wilc_wlan.h b/drivers/staging/wilc1000/wilc_wlan.h index de6c4ddbf..ec133319b 100644 --- a/drivers/staging/wilc1000/wilc_wlan.h +++ b/drivers/staging/wilc1000/wilc_wlan.h @@ -295,7 +295,7 @@ void wilc_chip_sleep_manually(struct wilc *wilc); void wilc_enable_tcp_ack_filter(bool value); int wilc_wlan_get_num_conn_ifcs(struct wilc *); -int wilc_mac_xmit(struct sk_buff *skb, struct net_device *dev); +netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *dev); int wilc_mac_open(struct net_device *ndev); int wilc_mac_close(struct net_device *ndev); diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c index 825a63a7c..af108c32f 100644 --- a/drivers/staging/wlan-ng/p80211netdev.c +++ b/drivers/staging/wlan-ng/p80211netdev.c @@ -94,7 +94,7 @@ static int p80211knetdev_init(struct net_device *netdev); static int p80211knetdev_open(struct net_device *netdev); static int p80211knetdev_stop(struct net_device *netdev); -static int p80211knetdev_hard_start_xmit(struct sk_buff *skb, +static netdev_tx_t p80211knetdev_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev); static void p80211knetdev_set_multicast_list(struct net_device *dev); static int p80211knetdev_do_ioctl(struct net_device *dev, struct ifreq *ifr, @@ -317,7 +317,7 @@ static void p80211netdev_rx_bh(unsigned long arg) * Returns: * zero on success, non-zero on failure. ----------------------------------------------------------------*/ -static int p80211knetdev_hard_start_xmit(struct sk_buff *skb, +static netdev_tx_t p80211knetdev_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) { int result = 0; diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index 6ca388eca..5ef4426d9 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c @@ -56,7 +56,7 @@ static const u32 sbp_unit_directory_template[] = { #define SESSION_MAINTENANCE_INTERVAL HZ -static atomic_t login_id = ATOMIC_INIT(0); +static atomic_unchecked_t login_id = ATOMIC_INIT(0); static void session_maintenance_work(struct work_struct *); static int sbp_run_transaction(struct fw_card *, int, int, int, int, @@ -422,7 +422,7 @@ static void sbp_management_request_login( login->login_lun = unpacked_lun; login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo); login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)); - login->login_id = atomic_inc_return(&login_id); + login->login_id = atomic_inc_return_unchecked(&login_id); login->tgt_agt = sbp_target_agent_register(login); if (IS_ERR(login->tgt_agt)) { diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c index 81631b110..b10aa5eb1 100644 --- a/drivers/thermal/devfreq_cooling.c +++ b/drivers/thermal/devfreq_cooling.c @@ -363,6 +363,15 @@ static struct thermal_cooling_device_ops devfreq_cooling_ops = { .set_cur_state = devfreq_cooling_set_cur_state, }; +static struct thermal_cooling_device_ops devfreq_cooling_power_ops = { + .get_max_state = devfreq_cooling_get_max_state, + .get_cur_state = devfreq_cooling_get_cur_state, + .set_cur_state = devfreq_cooling_set_cur_state, + .get_requested_power = devfreq_cooling_get_requested_power, + .state2power = devfreq_cooling_state2power, + .power2state = devfreq_cooling_power2state, +}; + /** * devfreq_cooling_gen_tables() - Generate power and freq tables. * @dfc: Pointer to devfreq cooling device. @@ -482,15 +491,9 @@ of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df, dfc->devfreq = df; - if (dfc_power) { + if (dfc_power) dfc->power_ops = dfc_power; - devfreq_cooling_ops.get_requested_power = - devfreq_cooling_get_requested_power; - devfreq_cooling_ops.state2power = devfreq_cooling_state2power; - devfreq_cooling_ops.power2state = devfreq_cooling_power2state; - } - err = devfreq_cooling_gen_tables(dfc); if (err) goto free_dfc; @@ -502,7 +505,7 @@ of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df, snprintf(dev_name, sizeof(dev_name), "thermal-devfreq-%d", dfc->id); cdev = thermal_of_cooling_device_register(np, dev_name, dfc, - &devfreq_cooling_ops); + dfc_power ? &devfreq_cooling_power_ops : &devfreq_cooling_ops); if (IS_ERR(cdev)) { err = PTR_ERR(cdev); dev_err(df->dev.parent, diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c index 5836e5554..708bbd655 100644 --- a/drivers/thermal/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/int340x_thermal/int3400_thermal.c @@ -272,8 +272,10 @@ static int int3400_thermal_probe(struct platform_device *pdev) platform_set_drvdata(pdev, priv); if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) { - int3400_thermal_ops.get_mode = int3400_thermal_get_mode; - int3400_thermal_ops.set_mode = int3400_thermal_set_mode; + pax_open_kernel(); + const_cast(int3400_thermal_ops.get_mode) = int3400_thermal_get_mode; + const_cast(int3400_thermal_ops.set_mode) = int3400_thermal_set_mode; + pax_close_kernel(); } priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0, priv, &int3400_thermal_ops, diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index d04ec3b9e..3fef6c19e 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c @@ -31,6 +31,7 @@ #include #include #include +#include #include "thermal_core.h" @@ -419,18 +420,20 @@ thermal_zone_of_add_sensor(struct device_node *zone, tz->ops = ops; tz->sensor_data = data; - tzd->ops->get_temp = of_thermal_get_temp; - tzd->ops->get_trend = of_thermal_get_trend; + pax_open_kernel(); + const_cast(tzd->ops->get_temp) = of_thermal_get_temp; + const_cast(tzd->ops->get_trend) = of_thermal_get_trend; /* * The thermal zone core will calculate the window if they have set the * optional set_trips pointer. */ if (ops->set_trips) - tzd->ops->set_trips = of_thermal_set_trips; + const_cast(tzd->ops->set_trips) = of_thermal_set_trips; if (ops->set_emul_temp) - tzd->ops->set_emul_temp = of_thermal_set_emul_temp; + const_cast(tzd->ops->set_emul_temp) = of_thermal_set_emul_temp; + pax_close_kernel(); mutex_unlock(&tzd->lock); @@ -557,9 +560,11 @@ void thermal_zone_of_sensor_unregister(struct device *dev, return; mutex_lock(&tzd->lock); - tzd->ops->get_temp = NULL; - tzd->ops->get_trend = NULL; - tzd->ops->set_emul_temp = NULL; + pax_open_kernel(); + const_cast(tzd->ops->get_temp) = NULL; + const_cast(tzd->ops->get_trend) = NULL; + const_cast(tzd->ops->set_emul_temp) = NULL; + pax_close_kernel(); tz->ops = NULL; tz->sensor_data = NULL; diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c index 95f4c1bcd..98efc0d66 100644 --- a/drivers/thermal/x86_pkg_temp_thermal.c +++ b/drivers/thermal/x86_pkg_temp_thermal.c @@ -568,7 +568,7 @@ static int pkg_temp_thermal_cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } -static struct notifier_block pkg_temp_thermal_notifier __refdata = { +static struct notifier_block pkg_temp_thermal_notifier = { .notifier_call = pkg_temp_thermal_cpu_callback, }; diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c index 208f57349..32c03b707 100644 --- a/drivers/tty/amiserial.c +++ b/drivers/tty/amiserial.c @@ -1489,7 +1489,7 @@ static void rs_hangup(struct tty_struct *tty) rs_flush_buffer(tty); shutdown(tty, info); - info->tport.count = 0; + atomic_set(&info->tport.count, 0); tty_port_set_active(&info->tport, 0); info->tport.tty = NULL; wake_up_interruptible(&info->tport.open_wait); @@ -1507,7 +1507,7 @@ static int rs_open(struct tty_struct *tty, struct file * filp) struct tty_port *port = &info->tport; int retval; - port->count++; + atomic_inc(&port->count); port->tty = tty; tty->driver_data = info; tty->port = port; diff --git a/drivers/tty/bfin_jtag_comm.c b/drivers/tty/bfin_jtag_comm.c index ce24182f8..a68ebf3d7 100644 --- a/drivers/tty/bfin_jtag_comm.c +++ b/drivers/tty/bfin_jtag_comm.c @@ -143,7 +143,7 @@ bfin_jc_open(struct tty_struct *tty, struct file *filp) unsigned long flags; spin_lock_irqsave(&port.lock, flags); - port.count++; + atomic_inc(&port.count); spin_unlock_irqrestore(&port.lock, flags); tty_port_tty_set(&port, tty); wake_up_process(bfin_jc_kthread); @@ -157,7 +157,7 @@ bfin_jc_close(struct tty_struct *tty, struct file *filp) bool last; spin_lock_irqsave(&port.lock, flags); - last = --port.count == 0; + last = atomic_dec_and_test(&port.count); spin_unlock_irqrestore(&port.lock, flags); if (last) tty_port_tty_set(&port, NULL); diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c index c3e1604fe..decb00659 100644 --- a/drivers/tty/cyclades.c +++ b/drivers/tty/cyclades.c @@ -1566,12 +1566,12 @@ static int cy_open(struct tty_struct *tty, struct file *filp) #ifdef CY_DEBUG_OPEN printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line, - info->port.count); + atomic_read(&info->port.count)); #endif - info->port.count++; + atomic_inc(&info->port.count); #ifdef CY_DEBUG_COUNT printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n", - current->pid, info->port.count); + current->pid, atomic_read(&info->port.count)); #endif /* @@ -3947,7 +3947,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v) for (j = 0; j < cy_card[i].nports; j++) { info = &cy_card[i].ports[j]; - if (info->port.count) { + if (atomic_read(&info->port.count)) { /* XXX is the ldisc num worth this? */ struct tty_struct *tty; struct tty_ldisc *ld; diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c index ce8648753..c64337636 100644 --- a/drivers/tty/hvc/hvc_console.c +++ b/drivers/tty/hvc/hvc_console.c @@ -143,7 +143,9 @@ static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] = static void hvc_console_print(struct console *co, const char *b, unsigned count) { - char c[N_OUTBUF] __ALIGNED__; + char c_stack[N_OUTBUF] __ALIGNED__; + char *c_alloc = NULL; + char *c; unsigned i = 0, n = 0; int r, donecr = 0, index = co->index; @@ -155,8 +157,13 @@ static void hvc_console_print(struct console *co, const char *b, if (vtermnos[index] == -1) return; + if (slab_is_available()) + c = c_alloc = kmalloc(N_OUTBUF, GFP_ATOMIC); + else + c = c_stack; + while (count > 0 || i > 0) { - if (count > 0 && i < sizeof(c)) { + if (count > 0 && i < sizeof(c_stack)) { if (b[n] == '\n' && !donecr) { c[i++] = '\r'; donecr = 1; @@ -179,6 +186,8 @@ static void hvc_console_print(struct console *co, const char *b, } } } + + kfree(c_alloc); } static struct tty_driver *hvc_console_device(struct console *c, int *index) @@ -343,7 +352,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp) spin_lock_irqsave(&hp->port.lock, flags); /* Check and then increment for fast path open. */ - if (hp->port.count++ > 0) { + if (atomic_inc_return(&hp->port.count) > 1) { spin_unlock_irqrestore(&hp->port.lock, flags); hvc_kick(); return 0; @@ -398,7 +407,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp) spin_lock_irqsave(&hp->port.lock, flags); - if (--hp->port.count == 0) { + if (atomic_dec_return(&hp->port.count) == 0) { spin_unlock_irqrestore(&hp->port.lock, flags); /* We are done with the tty pointer now. */ tty_port_tty_set(&hp->port, NULL); @@ -420,9 +429,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp) */ tty_wait_until_sent(tty, HVC_CLOSE_WAIT); } else { - if (hp->port.count < 0) + if (atomic_read(&hp->port.count) < 0) printk(KERN_ERR "hvc_close %X: oops, count is %d\n", - hp->vtermno, hp->port.count); + hp->vtermno, atomic_read(&hp->port.count)); spin_unlock_irqrestore(&hp->port.lock, flags); } } @@ -452,12 +461,12 @@ static void hvc_hangup(struct tty_struct *tty) * open->hangup case this can be called after the final close so prevent * that from happening for now. */ - if (hp->port.count <= 0) { + if (atomic_read(&hp->port.count) <= 0) { spin_unlock_irqrestore(&hp->port.lock, flags); return; } - hp->port.count = 0; + atomic_set(&hp->port.count, 0); spin_unlock_irqrestore(&hp->port.lock, flags); tty_port_tty_set(&hp->port, NULL); @@ -505,7 +514,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count return -EPIPE; /* FIXME what's this (unprotected) check for? */ - if (hp->port.count <= 0) + if (atomic_read(&hp->port.count) <= 0) return -EIO; spin_lock_irqsave(&hp->lock, flags); diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c index 3c4d7c2b4..3410b8699 100644 --- a/drivers/tty/hvc/hvcs.c +++ b/drivers/tty/hvc/hvcs.c @@ -83,6 +83,7 @@ #include #include #include +#include /* * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00). @@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut spin_lock_irqsave(&hvcsd->lock, flags); - if (hvcsd->port.count > 0) { + if (atomic_read(&hvcsd->port.count) > 0) { spin_unlock_irqrestore(&hvcsd->lock, flags); printk(KERN_INFO "HVCS: vterm state unchanged. " "The hvcs device node is still in use.\n"); @@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty) } } - hvcsd->port.count = 0; + atomic_set(&hvcsd->port.count, 0); hvcsd->port.tty = tty; tty->driver_data = hvcsd; @@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp) unsigned long flags; spin_lock_irqsave(&hvcsd->lock, flags); - hvcsd->port.count++; + atomic_inc(&hvcsd->port.count); hvcsd->todo_mask |= HVCS_SCHED_READ; spin_unlock_irqrestore(&hvcsd->lock, flags); @@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp) hvcsd = tty->driver_data; spin_lock_irqsave(&hvcsd->lock, flags); - if (--hvcsd->port.count == 0) { + if (atomic_dec_and_test(&hvcsd->port.count)) { vio_disable_interrupts(hvcsd->vdev); @@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp) free_irq(irq, hvcsd); return; - } else if (hvcsd->port.count < 0) { + } else if (atomic_read(&hvcsd->port.count) < 0) { printk(KERN_ERR "HVCS: vty-server@%X open_count: %d" " is missmanaged.\n", - hvcsd->vdev->unit_address, hvcsd->port.count); + hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count)); } spin_unlock_irqrestore(&hvcsd->lock, flags); @@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty) spin_lock_irqsave(&hvcsd->lock, flags); /* Preserve this so that we know how many kref refs to put */ - temp_open_count = hvcsd->port.count; + temp_open_count = atomic_read(&hvcsd->port.count); /* * Don't kref put inside the spinlock because the destruction @@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty) tty->driver_data = NULL; hvcsd->port.tty = NULL; - hvcsd->port.count = 0; + atomic_set(&hvcsd->port.count, 0); /* This will drop any buffered data on the floor which is OK in a hangup * scenario. */ @@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty, * the middle of a write operation? This is a crummy place to do this * but we want to keep it all in the spinlock. */ - if (hvcsd->port.count <= 0) { + if (atomic_read(&hvcsd->port.count) <= 0) { spin_unlock_irqrestore(&hvcsd->lock, flags); return -ENODEV; } @@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty) { struct hvcs_struct *hvcsd = tty->driver_data; - if (!hvcsd || hvcsd->port.count <= 0) + if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0) return 0; return HVCS_BUFF_LEN - hvcsd->chars_in_buffer; diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c index 96ce6bd1c..208f20acb 100644 --- a/drivers/tty/hvc/hvsi.c +++ b/drivers/tty/hvc/hvsi.c @@ -85,7 +85,7 @@ struct hvsi_struct { int n_outbuf; uint32_t vtermno; uint32_t virq; - atomic_t seqno; /* HVSI packet sequence number */ + atomic_unchecked_t seqno; /* HVSI packet sequence number */ uint16_t mctrl; uint8_t state; /* HVSI protocol state */ uint8_t flags; @@ -297,7 +297,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno) packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER; packet.hdr.len = sizeof(struct hvsi_query_response); - packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno)); + packet.hdr.seqno = cpu_to_be16(atomic_inc_return_unchecked(&hp->seqno)); packet.verb = cpu_to_be16(VSV_SEND_VERSION_NUMBER); packet.u.version = HVSI_VERSION; packet.query_seqno = cpu_to_be16(query_seqno+1); @@ -557,7 +557,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb) packet.hdr.type = VS_QUERY_PACKET_HEADER; packet.hdr.len = sizeof(struct hvsi_query); - packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno)); + packet.hdr.seqno = cpu_to_be16(atomic_inc_return_unchecked(&hp->seqno)); packet.verb = cpu_to_be16(verb); pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len); @@ -599,7 +599,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl) int wrote; packet.hdr.type = VS_CONTROL_PACKET_HEADER; - packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno)); + packet.hdr.seqno = cpu_to_be16(atomic_inc_return_unchecked(&hp->seqno)); packet.hdr.len = sizeof(struct hvsi_control); packet.verb = cpu_to_be16(VSV_SET_MODEM_CTL); packet.mask = cpu_to_be32(HVSI_TSDTR); @@ -682,7 +682,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count) BUG_ON(count > HVSI_MAX_OUTGOING_DATA); packet.hdr.type = VS_DATA_PACKET_HEADER; - packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno)); + packet.hdr.seqno = cpu_to_be16(atomic_inc_return_unchecked(&hp->seqno)); packet.hdr.len = count + sizeof(struct hvsi_header); memcpy(&packet.data, buf, count); @@ -699,7 +699,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp) struct hvsi_control packet __ALIGNED__; packet.hdr.type = VS_CONTROL_PACKET_HEADER; - packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno)); + packet.hdr.seqno = cpu_to_be16(atomic_inc_return_unchecked(&hp->seqno)); packet.hdr.len = 6; packet.verb = cpu_to_be16(VSV_CLOSE_PROTOCOL); @@ -727,7 +727,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp) tty_port_tty_set(&hp->port, tty); spin_lock_irqsave(&hp->lock, flags); - hp->port.count++; + atomic_inc(&hp->port.count); atomic_set(&hp->seqno, 0); h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE); spin_unlock_irqrestore(&hp->lock, flags); @@ -784,7 +784,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp) spin_lock_irqsave(&hp->lock, flags); - if (--hp->port.count == 0) { + if (atomic_dec_return(&hp->port.count) == 0) { tty_port_tty_set(&hp->port, NULL); hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */ @@ -817,9 +817,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp) spin_lock_irqsave(&hp->lock, flags); } - } else if (hp->port.count < 0) + } else if (atomic_read(&hp->port.count) < 0) printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n", - hp - hvsi_ports, hp->port.count); + hp - hvsi_ports, atomic_read(&hp->port.count)); spin_unlock_irqrestore(&hp->lock, flags); } @@ -834,7 +834,7 @@ static void hvsi_hangup(struct tty_struct *tty) tty_port_tty_set(&hp->port, NULL); spin_lock_irqsave(&hp->lock, flags); - hp->port.count = 0; + atomic_set(&hp->port.count, 0); hp->n_outbuf = 0; spin_unlock_irqrestore(&hp->lock, flags); } diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c index a270f0458..7c77b5d1e 100644 --- a/drivers/tty/hvc/hvsi_lib.c +++ b/drivers/tty/hvc/hvsi_lib.c @@ -8,7 +8,7 @@ static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet) { - packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno)); + packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno)); /* Assumes that always succeeds, works in practice */ return pv->put_chars(pv->termno, (char *)packet, packet->len); @@ -20,7 +20,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv) /* Reset state */ pv->established = 0; - atomic_set(&pv->seqno, 0); + atomic_set_unchecked(&pv->seqno, 0); pr_devel("HVSI@%x: Handshaking started\n", pv->termno); diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c index 2685d59d2..a63936a0a 100644 --- a/drivers/tty/ipwireless/tty.c +++ b/drivers/tty/ipwireless/tty.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "tty.h" #include "network.h" @@ -93,10 +94,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp) return -ENODEV; mutex_lock(&tty->ipw_tty_mutex); - if (tty->port.count == 0) + if (atomic_read(&tty->port.count) == 0) tty->tx_bytes_queued = 0; - tty->port.count++; + atomic_inc(&tty->port.count); tty->port.tty = linux_tty; linux_tty->driver_data = tty; @@ -112,9 +113,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp) static void do_ipw_close(struct ipw_tty *tty) { - tty->port.count--; - - if (tty->port.count == 0) { + if (atomic_dec_return(&tty->port.count) == 0) { struct tty_struct *linux_tty = tty->port.tty; if (linux_tty != NULL) { @@ -135,7 +134,7 @@ static void ipw_hangup(struct tty_struct *linux_tty) return; mutex_lock(&tty->ipw_tty_mutex); - if (tty->port.count == 0) { + if (atomic_read(&tty->port.count) == 0) { mutex_unlock(&tty->ipw_tty_mutex); return; } @@ -158,7 +157,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data, mutex_lock(&tty->ipw_tty_mutex); - if (!tty->port.count) { + if (!atomic_read(&tty->port.count)) { mutex_unlock(&tty->ipw_tty_mutex); return; } @@ -197,7 +196,7 @@ static int ipw_write(struct tty_struct *linux_tty, return -ENODEV; mutex_lock(&tty->ipw_tty_mutex); - if (!tty->port.count) { + if (!atomic_read(&tty->port.count)) { mutex_unlock(&tty->ipw_tty_mutex); return -EINVAL; } @@ -237,7 +236,7 @@ static int ipw_write_room(struct tty_struct *linux_tty) if (!tty) return -ENODEV; - if (!tty->port.count) + if (!atomic_read(&tty->port.count)) return -EINVAL; room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued; @@ -270,7 +269,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty) if (!tty) return 0; - if (!tty->port.count) + if (!atomic_read(&tty->port.count)) return 0; return tty->tx_bytes_queued; @@ -351,7 +350,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty) if (!tty) return -ENODEV; - if (!tty->port.count) + if (!atomic_read(&tty->port.count)) return -EINVAL; return get_control_lines(tty); @@ -367,7 +366,7 @@ ipw_tiocmset(struct tty_struct *linux_tty, if (!tty) return -ENODEV; - if (!tty->port.count) + if (!atomic_read(&tty->port.count)) return -EINVAL; return set_control_lines(tty, set, clear); @@ -381,7 +380,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, if (!tty) return -ENODEV; - if (!tty->port.count) + if (!atomic_read(&tty->port.count)) return -EINVAL; /* FIXME: Exactly how is the tty object locked here .. */ @@ -537,7 +536,7 @@ void ipwireless_tty_free(struct ipw_tty *tty) * are gone */ mutex_lock(&ttyj->ipw_tty_mutex); } - while (ttyj->port.count) + while (atomic_read(&ttyj->port.count)) do_ipw_close(ttyj); ipwireless_disassociate_network_ttys(network, ttyj->channel_idx); diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c index 234123b0c..2e2b1f7ec 100644 --- a/drivers/tty/mips_ejtag_fdc.c +++ b/drivers/tty/mips_ejtag_fdc.c @@ -1258,7 +1258,7 @@ static void kgdbfdc_write_char(u8 chr) kgdbfdc_push_one(); } -static struct kgdb_io kgdbfdc_io_ops = { +static struct kgdb_io kgdbfdc_io_ops __read_only = { .name = "kgdbfdc", .read_char = kgdbfdc_read_char, .write_char = kgdbfdc_write_char, diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c index feaa6edda..81bab86fd 100644 --- a/drivers/tty/moxa.c +++ b/drivers/tty/moxa.c @@ -1186,7 +1186,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp) } ch = &brd->ports[port % MAX_PORTS_PER_BOARD]; - ch->port.count++; + atomic_inc(&ch->port.count); tty->driver_data = ch; tty_port_tty_set(&ch->port, tty); mutex_lock(&ch->port.mutex); diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 54cab59e2..3c05ac4ca 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -1644,7 +1644,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr) spin_lock_init(&dlci->lock); mutex_init(&dlci->mutex); dlci->fifo = &dlci->_fifo; - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) { + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) { kfree(dlci); return NULL; } @@ -2652,7 +2652,7 @@ static inline void muxnet_put(struct gsm_mux_net *mux_net) kref_put(&mux_net->ref, net_free); } -static int gsm_mux_net_start_xmit(struct sk_buff *skb, +static netdev_tx_t gsm_mux_net_start_xmit(struct sk_buff *skb, struct net_device *net) { struct gsm_mux_net *mux_net = netdev_priv(net); @@ -2943,7 +2943,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp) struct gsm_dlci *dlci = tty->driver_data; struct tty_port *port = &dlci->port; - port->count++; + atomic_inc(&port->count); tty_port_tty_set(port, tty); dlci->modem_rx = 0; diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index bdf0e6e89..ea92f7e59 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -1478,7 +1478,7 @@ n_tty_receive_char_lnext(struct tty_struct *tty, unsigned char c, char flag) static void n_tty_receive_buf_real_raw(struct tty_struct *tty, const unsigned char *cp, - char *fp, int count) + char *fp, size_t count) { struct n_tty_data *ldata = tty->disc_data; size_t n, head; @@ -1498,7 +1498,7 @@ n_tty_receive_buf_real_raw(struct tty_struct *tty, const unsigned char *cp, static void n_tty_receive_buf_raw(struct tty_struct *tty, const unsigned char *cp, - char *fp, int count) + char *fp, size_t count) { struct n_tty_data *ldata = tty->disc_data; char flag = TTY_NORMAL; @@ -1515,7 +1515,7 @@ n_tty_receive_buf_raw(struct tty_struct *tty, const unsigned char *cp, static void n_tty_receive_buf_closing(struct tty_struct *tty, const unsigned char *cp, - char *fp, int count) + char *fp, size_t count) { char flag = TTY_NORMAL; @@ -1529,7 +1529,7 @@ n_tty_receive_buf_closing(struct tty_struct *tty, const unsigned char *cp, static void n_tty_receive_buf_standard(struct tty_struct *tty, const unsigned char *cp, - char *fp, int count) + char *fp, size_t count) { struct n_tty_data *ldata = tty->disc_data; char flag = TTY_NORMAL; @@ -1563,7 +1563,7 @@ n_tty_receive_buf_standard(struct tty_struct *tty, const unsigned char *cp, static void n_tty_receive_buf_fast(struct tty_struct *tty, const unsigned char *cp, - char *fp, int count) + char *fp, size_t count) { struct n_tty_data *ldata = tty->disc_data; char flag = TTY_NORMAL; @@ -1588,7 +1588,7 @@ n_tty_receive_buf_fast(struct tty_struct *tty, const unsigned char *cp, } static void __receive_buf(struct tty_struct *tty, const unsigned char *cp, - char *fp, int count) + char *fp, size_t count) { struct n_tty_data *ldata = tty->disc_data; bool preops = I_ISTRIP(tty) || (I_IUCLC(tty) && L_IEXTEN(tty)); @@ -1666,10 +1666,10 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp, */ static int n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp, - char *fp, int count, int flow) + char *fp, size_t count, int flow) { struct n_tty_data *ldata = tty->disc_data; - int room, n, rcvd = 0, overflow; + size_t room, n, rcvd = 0, overflow; down_read(&tty->termios_rwsem); @@ -1692,15 +1692,16 @@ n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp, room = N_TTY_BUF_SIZE - (ldata->read_head - tail); if (I_PARMRK(tty)) room = (room + 2) / 3; - room--; - if (room <= 0) { + if (room <= 1) { overflow = ldata->icanon && ldata->canon_head == tail; - if (overflow && room < 0) + if (overflow && room == 0) ldata->read_head--; room = overflow; ldata->no_room = flow && !room; - } else + } else { + room--; overflow = 0; + } n = min(count, room); if (!n) @@ -2465,7 +2466,8 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops) { *ops = n_tty_ops; ops->owner = NULL; - ops->refcount = ops->flags = 0; + atomic_set(&ops->refcount, 0); + ops->flags = 0; } EXPORT_SYMBOL_GPL(n_tty_inherit_ops); diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index a23fa5ed1..eff0d0783 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -800,7 +800,7 @@ static int ptmx_open(struct inode *inode, struct file *filp) return retval; } -static struct file_operations ptmx_fops __ro_after_init; +static file_operations_no_const ptmx_fops __read_only; static void __init unix98_pty_init(void) { @@ -856,8 +856,10 @@ static void __init unix98_pty_init(void) panic("Couldn't register Unix98 pts driver"); /* Now create the /dev/ptmx special device */ + pax_open_kernel(); tty_default_fops(&ptmx_fops); - ptmx_fops.open = ptmx_open; + const_cast(ptmx_fops.open) = ptmx_open; + pax_close_kernel(); cdev_init(&ptmx_cdev, &ptmx_fops); if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) || diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c index b0cc47c77..daeb1832d 100644 --- a/drivers/tty/rocket.c +++ b/drivers/tty/rocket.c @@ -906,7 +906,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp) tty->driver_data = info; tty_port_tty_set(port, tty); - if (port->count++ == 0) { + if (atomic_inc_return(&port->count) == 1) { atomic_inc(&rp_num_ports_open); #ifdef ROCKET_DEBUG_OPEN @@ -915,7 +915,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp) #endif } #ifdef ROCKET_DEBUG_OPEN - printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count); + printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic_read(&info->port.count)); #endif /* @@ -992,7 +992,7 @@ static void rp_close(struct tty_struct *tty, struct file *filp) return; #ifdef ROCKET_DEBUG_OPEN - printk(KERN_INFO "rp_close ttyR%d, count = %d\n", info->line, info->port.count); + printk(KERN_INFO "rp_close ttyR%d, count = %d\n", info->line, atomic_read(&info->port.count)); #endif if (tty_port_close_start(port, tty, filp) == 0) @@ -1500,7 +1500,7 @@ static void rp_hangup(struct tty_struct *tty) #endif rp_flush_buffer(tty); spin_lock_irqsave(&info->port.lock, flags); - if (info->port.count) + if (atomic_read(&info->port.count)) atomic_dec(&rp_num_ports_open); clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]); spin_unlock_irqrestore(&info->port.lock, flags); diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index e8819aa20..33d2176ca 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -488,9 +488,9 @@ static void univ8250_release_port(struct uart_port *port) static void univ8250_rsa_support(struct uart_ops *ops) { - ops->config_port = univ8250_config_port; - ops->request_port = univ8250_request_port; - ops->release_port = univ8250_release_port; + const_cast(ops->config_port) = univ8250_config_port; + const_cast(ops->request_port) = univ8250_request_port; + const_cast(ops->release_port) = univ8250_release_port; } #else @@ -533,8 +533,10 @@ static void __init serial8250_isa_init_ports(void) } /* chain base port ops to support Remote Supervisor Adapter */ - univ8250_port_ops = *base_ops; + pax_open_kernel(); + memcpy((void *)&univ8250_port_ops, base_ops, sizeof univ8250_port_ops); univ8250_rsa_support(&univ8250_port_ops); + pax_close_kernel(); if (share_irqs) irqflag = IRQF_SHARED; diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index b98c1578f..33d1d98eb 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -5569,7 +5569,7 @@ static struct pci_device_id serial_pci_tbl[] = { }; static pci_ers_result_t serial8250_io_error_detected(struct pci_dev *dev, - pci_channel_state_t state) + enum pci_channel_state state) { struct serial_private *priv = pci_get_drvdata(dev); diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c index 315c84979..7a3188474 100644 --- a/drivers/tty/serial/crisv10.c +++ b/drivers/tty/serial/crisv10.c @@ -3255,7 +3255,7 @@ set_serial_info(struct e100_serial *info, goto check_and_exit; } - if (info->port.count > 1) + if (atomic_read(&info->port.count) > 1) return -EBUSY; /* @@ -3588,7 +3588,7 @@ rs_close(struct tty_struct *tty, struct file * filp) printk("[%d] rs_close ttyS%d, count = %d\n", current->pid, info->line, info->count); #endif - if ((tty->count == 1) && (info->port.count != 1)) { + if ((tty->count == 1) && (atomic_read(&info->port.count) != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always @@ -3598,15 +3598,15 @@ rs_close(struct tty_struct *tty, struct file * filp) */ printk(KERN_ERR "rs_close: bad serial port count; tty->count is 1, " - "info->count is %d\n", info->port.count); - info->port.count = 1; + "info->count is %d\n", atomic_read(&info->port.count)); + atomic_set(&info->port.count, 1); } - if (--info->port.count < 0) { + if (atomic_dec_return(&info->port.count) < 0) { printk(KERN_ERR "rs_close: bad serial port count for ttyS%d: %d\n", - info->line, info->port.count); - info->port.count = 0; + info->line, atomic_read(&info->port.count)); + atomic_set(&info->port.count, 0); } - if (info->port.count) { + if (atomic_read(&info->port.count)) { local_irq_restore(flags); return; } @@ -3731,7 +3731,7 @@ rs_hangup(struct tty_struct *tty) rs_flush_buffer(tty); shutdown(info); info->event = 0; - info->port.count = 0; + atomic_set(&info->port.count, 0); tty_port_set_active(&info->port, 0); info->port.tty = NULL; wake_up_interruptible(&info->port.open_wait); @@ -3774,10 +3774,10 @@ block_til_ready(struct tty_struct *tty, struct file * filp, add_wait_queue(&info->port.open_wait, &wait); #ifdef SERIAL_DEBUG_OPEN printk("block_til_ready before block: ttyS%d, count = %d\n", - info->line, info->port.count); + info->line, atomic_read(&info->port.count)); #endif local_irq_save(flags); - info->port.count--; + atomic_dec(&info->port.count); local_irq_restore(flags); info->port.blocked_open++; while (1) { @@ -3807,7 +3807,7 @@ block_til_ready(struct tty_struct *tty, struct file * filp, } #ifdef SERIAL_DEBUG_OPEN printk("block_til_ready blocking: ttyS%d, count = %d\n", - info->line, info->port.count); + info->line, atomic_read(&info->port.count)); #endif tty_unlock(tty); schedule(); @@ -3816,11 +3816,11 @@ block_til_ready(struct tty_struct *tty, struct file * filp, set_current_state(TASK_RUNNING); remove_wait_queue(&info->port.open_wait, &wait); if (!tty_hung_up_p(filp)) - info->port.count++; + atomic_inc(&info->port.count); info->port.blocked_open--; #ifdef SERIAL_DEBUG_OPEN printk("block_til_ready after blocking: ttyS%d, count = %d\n", - info->line, info->port.count); + info->line, atomic_read(&info->port.count)); #endif if (retval) return retval; @@ -3858,10 +3858,10 @@ rs_open(struct tty_struct *tty, struct file * filp) #ifdef SERIAL_DEBUG_OPEN printk("[%d] rs_open %s, count = %d\n", current->pid, tty->name, - info->port.count); + atomic_read(&info->port.count)); #endif - info->port.count++; + atomic_inc(&info->port.count); tty->driver_data = info; info->port.tty = tty; @@ -3870,7 +3870,7 @@ rs_open(struct tty_struct *tty, struct file * filp) /* * If DMA is enabled try to allocate the irq's. */ - if (info->port.count == 1) { + if (atomic_read(&info->port.count) == 1) { allocated_resources = 1; if (info->dma_in_enabled) { if (request_irq(info->dma_in_irq_nbr, diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c index e5c42fef6..f091b02ba 100644 --- a/drivers/tty/serial/ioc4_serial.c +++ b/drivers/tty/serial/ioc4_serial.c @@ -437,7 +437,7 @@ struct ioc4_soft { } is_intr_info[MAX_IOC4_INTR_ENTS]; /* Number of entries active in the above array */ - atomic_t is_num_intrs; + atomic_unchecked_t is_num_intrs; } is_intr_type[IOC4_NUM_INTR_TYPES]; /* is_ir_lock must be held while @@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type, BUG_ON(!((type == IOC4_SIO_INTR_TYPE) || (type == IOC4_OTHER_INTR_TYPE))); - i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1; + i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1; BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0))); /* Save off the lower level interrupt handler */ @@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg) soft = arg; for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) { - num_intrs = (int)atomic_read( + num_intrs = (int)atomic_read_unchecked( &soft->is_intr_type[intr_type].is_num_intrs); this_mir = this_ir = pending_intrs(soft, intr_type); diff --git a/drivers/tty/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c index a119f11bf..120444e0a 100644 --- a/drivers/tty/serial/jsm/jsm_driver.c +++ b/drivers/tty/serial/jsm/jsm_driver.c @@ -336,7 +336,7 @@ static struct pci_driver jsm_driver = { }; static pci_ers_result_t jsm_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { struct jsm_board *brd = pci_get_drvdata(pdev); diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c index 117df1516..8f7486f5a 100644 --- a/drivers/tty/serial/kgdb_nmi.c +++ b/drivers/tty/serial/kgdb_nmi.c @@ -53,7 +53,9 @@ static int kgdb_nmi_console_setup(struct console *co, char *options) * I/O utilities that messages sent to the console will automatically * be displayed on the dbg_io. */ - dbg_io_ops->is_console = true; + pax_open_kernel(); + const_cast(dbg_io_ops->is_console) = true; + pax_close_kernel(); return 0; } diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c index a260cde74..c3951659d 100644 --- a/drivers/tty/serial/kgdboc.c +++ b/drivers/tty/serial/kgdboc.c @@ -24,8 +24,9 @@ #define MAX_CONFIG_LEN 40 static struct kgdb_io kgdboc_io_ops; +static struct kgdb_io kgdboc_io_ops_console; -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */ +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */ static int configured = -1; static char config[MAX_CONFIG_LEN]; @@ -151,6 +152,8 @@ static void cleanup_kgdboc(void) kgdboc_unregister_kbd(); if (configured == 1) kgdb_unregister_io_module(&kgdboc_io_ops); + else if (configured == 2) + kgdb_unregister_io_module(&kgdboc_io_ops_console); } static int configure_kgdboc(void) @@ -160,13 +163,13 @@ static int configure_kgdboc(void) int err; char *cptr = config; struct console *cons; + int is_console = 0; err = kgdboc_option_setup(config); if (err || !strlen(config) || isspace(config[0])) goto noconfig; err = -ENODEV; - kgdboc_io_ops.is_console = 0; kgdb_tty_driver = NULL; kgdboc_use_kms = 0; @@ -187,7 +190,7 @@ static int configure_kgdboc(void) int idx; if (cons->device && cons->device(cons, &idx) == p && idx == tty_line) { - kgdboc_io_ops.is_console = 1; + is_console = 1; break; } cons = cons->next; @@ -197,7 +200,13 @@ static int configure_kgdboc(void) kgdb_tty_line = tty_line; do_register: - err = kgdb_register_io_module(&kgdboc_io_ops); + if (is_console) { + err = kgdb_register_io_module(&kgdboc_io_ops_console); + configured = 2; + } else { + err = kgdb_register_io_module(&kgdboc_io_ops); + configured = 1; + } if (err) goto noconfig; @@ -205,8 +214,6 @@ static int configure_kgdboc(void) if (err) goto nmi_con_failed; - configured = 1; - return 0; nmi_con_failed: @@ -223,7 +230,7 @@ static int configure_kgdboc(void) static int __init init_kgdboc(void) { /* Already configured? */ - if (configured == 1) + if (configured >= 1) return 0; return configure_kgdboc(); @@ -245,7 +252,7 @@ static void kgdboc_put_char(u8 chr) kgdb_tty_line, chr); } -static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp) +static int param_set_kgdboc_var(const char *kmessage, const struct kernel_param *kp) { int len = strlen(kmessage); @@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp) if (config[len - 1] == '\n') config[len - 1] = '\0'; - if (configured == 1) + if (configured >= 1) cleanup_kgdboc(); /* Go and configure with the new params. */ @@ -304,12 +311,21 @@ static void kgdboc_post_exp_handler(void) kgdboc_restore_input(); } -static struct kgdb_io kgdboc_io_ops = { +static struct kgdb_io kgdboc_io_ops __read_only = { + .name = "kgdboc", + .read_char = kgdboc_get_char, + .write_char = kgdboc_put_char, + .pre_exception = kgdboc_pre_exp_handler, + .post_exception = kgdboc_post_exp_handler, +}; + +static struct kgdb_io kgdboc_io_ops_console __read_only = { .name = "kgdboc", .read_char = kgdboc_get_char, .write_char = kgdboc_put_char, .pre_exception = kgdboc_pre_exp_handler, .post_exception = kgdboc_post_exp_handler, + .is_console = 1 }; #ifdef CONFIG_KGDB_SERIAL_CONSOLE diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c index 7312e7e01..0a0f8b6da 100644 --- a/drivers/tty/serial/msm_serial.c +++ b/drivers/tty/serial/msm_serial.c @@ -1726,7 +1726,7 @@ static struct uart_driver msm_uart_driver = { .cons = MSM_CONSOLE, }; -static atomic_t msm_uart_next_id = ATOMIC_INIT(0); +static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0); static const struct of_device_id msm_uartdm_table[] = { { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 }, @@ -1750,7 +1750,7 @@ static int msm_serial_probe(struct platform_device *pdev) line = pdev->id; if (line < 0) - line = atomic_inc_return(&msm_uart_next_id) - 1; + line = atomic_inc_return_unchecked(&msm_uart_next_id) - 1; if (unlikely(line < 0 || line >= UART_NR)) return -ENXIO; diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index f44615fa4..12db64da9 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c @@ -976,11 +976,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port) ourport->tx_in_progress = 0; } +static int s3c64xx_serial_startup(struct uart_port *port); static int s3c24xx_serial_startup(struct uart_port *port) { struct s3c24xx_uart_port *ourport = to_ourport(port); int ret; + /* Startup sequence is different for s3c64xx and higher SoC's */ + if (s3c24xx_serial_has_interrupt_mask(port)) + return s3c64xx_serial_startup(port); + dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n", port, (unsigned long long)port->mapbase, port->membase); @@ -1687,10 +1692,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport, /* setup info for port */ port->dev = &platdev->dev; - /* Startup sequence is different for s3c64xx and higher SoC's */ - if (s3c24xx_serial_has_interrupt_mask(port)) - s3c24xx_serial_ops.startup = s3c64xx_serial_startup; - port->uartclk = 1; if (cfg->uart_flags & UPF_CONS_FLOW) { diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index f2303f390..e67c47747 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -1468,7 +1468,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp) state = drv->state + tty->index; port = &state->port; spin_lock_irq(&port->lock); - --port->count; + atomic_dec(&port->count); spin_unlock_irq(&port->lock); return; } @@ -1589,7 +1589,7 @@ static void uart_hangup(struct tty_struct *tty) uart_flush_buffer(tty); uart_shutdown(tty, state); spin_lock_irqsave(&port->lock, flags); - port->count = 0; + atomic_set(&port->count, 0); spin_unlock_irqrestore(&port->lock, flags); tty_port_set_active(port, 0); tty_port_tty_set(port, NULL); diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c index c13e27ecb..335a5129e 100644 --- a/drivers/tty/synclink.c +++ b/drivers/tty/synclink.c @@ -3075,7 +3075,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp) if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_close(%s) entry, count=%d\n", - __FILE__,__LINE__, info->device_name, info->port.count); + __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count)); if (tty_port_close_start(&info->port, tty, filp) == 0) goto cleanup; @@ -3093,7 +3093,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp) cleanup: if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__, - tty->driver->name, info->port.count); + tty->driver->name, atomic_read(&info->port.count)); } /* end of mgsl_close() */ @@ -3192,8 +3192,8 @@ static void mgsl_hangup(struct tty_struct *tty) mgsl_flush_buffer(tty); shutdown(info); - - info->port.count = 0; + + atomic_set(&info->port.count, 0); tty_port_set_active(&info->port, 0); info->port.tty = NULL; @@ -3281,10 +3281,10 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp, if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):block_til_ready before block on %s count=%d\n", - __FILE__,__LINE__, tty->driver->name, port->count ); + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count)); spin_lock_irqsave(&info->irq_spinlock, flags); - port->count--; + atomic_dec(&port->count); spin_unlock_irqrestore(&info->irq_spinlock, flags); port->blocked_open++; @@ -3311,7 +3311,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp, if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):block_til_ready blocking on %s count=%d\n", - __FILE__,__LINE__, tty->driver->name, port->count ); + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count)); tty_unlock(tty); schedule(); @@ -3323,12 +3323,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp, /* FIXME: Racy on hangup during close wait */ if (!tty_hung_up_p(filp)) - port->count++; + atomic_inc(&port->count); port->blocked_open--; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):block_til_ready after blocking on %s count=%d\n", - __FILE__,__LINE__, tty->driver->name, port->count ); + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count)); if (!retval) tty_port_set_active(port, 1); @@ -3380,7 +3380,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp) if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_open(%s), old ref count = %d\n", - __FILE__,__LINE__,tty->driver->name, info->port.count); + __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count)); info->port.low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0; @@ -3390,10 +3390,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp) spin_unlock_irqrestore(&info->netlock, flags); goto cleanup; } - info->port.count++; + atomic_inc(&info->port.count); spin_unlock_irqrestore(&info->netlock, flags); - if (info->port.count == 1) { + if (atomic_read(&info->port.count) == 1) { /* 1st open on this device, init hardware */ retval = startup(info); if (retval < 0) @@ -3417,8 +3417,8 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp) if (retval) { if (tty->count == 1) info->port.tty = NULL; /* tty layer will release tty struct */ - if(info->port.count) - info->port.count--; + if (atomic_read(&info->port.count)) + atomic_dec(&info->port.count); } return retval; @@ -7637,7 +7637,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, unsigned short new_crctype; /* return error if TTY interface open */ - if (info->port.count) + if (atomic_read(&info->port.count)) return -EBUSY; switch (encoding) @@ -7733,7 +7733,7 @@ static int hdlcdev_open(struct net_device *dev) /* arbitrate between network and tty opens */ spin_lock_irqsave(&info->netlock, flags); - if (info->port.count != 0 || info->netcount != 0) { + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) { printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name); spin_unlock_irqrestore(&info->netlock, flags); return -EBUSY; @@ -7819,7 +7819,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name); /* return error if TTY interface open */ - if (info->port.count) + if (atomic_read(&info->port.count)) return -EBUSY; if (cmd != SIOCWANDEV) diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c index 7aca2d467..45a71214e 100644 --- a/drivers/tty/synclink_gt.c +++ b/drivers/tty/synclink_gt.c @@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp) tty->driver_data = info; info->port.tty = tty; - DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count)); + DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count))); mutex_lock(&info->port.mutex); info->port.low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0; @@ -682,10 +682,10 @@ static int open(struct tty_struct *tty, struct file *filp) mutex_unlock(&info->port.mutex); goto cleanup; } - info->port.count++; + atomic_inc(&info->port.count); spin_unlock_irqrestore(&info->netlock, flags); - if (info->port.count == 1) { + if (atomic_read(&info->port.count) == 1) { /* 1st open on this device, init hardware */ retval = startup(info); if (retval < 0) { @@ -706,8 +706,8 @@ static int open(struct tty_struct *tty, struct file *filp) if (retval) { if (tty->count == 1) info->port.tty = NULL; /* tty layer will release tty struct */ - if(info->port.count) - info->port.count--; + if(atomic_read(&info->port.count)) + atomic_dec(&info->port.count); } DBGINFO(("%s open rc=%d\n", info->device_name, retval)); @@ -720,7 +720,7 @@ static void close(struct tty_struct *tty, struct file *filp) if (sanity_check(info, tty->name, "close")) return; - DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count)); + DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count))); if (tty_port_close_start(&info->port, tty, filp) == 0) goto cleanup; @@ -737,7 +737,7 @@ static void close(struct tty_struct *tty, struct file *filp) tty_port_close_end(&info->port, tty); info->port.tty = NULL; cleanup: - DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count)); + DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count))); } static void hangup(struct tty_struct *tty) @@ -755,7 +755,7 @@ static void hangup(struct tty_struct *tty) shutdown(info); spin_lock_irqsave(&info->port.lock, flags); - info->port.count = 0; + atomic_set(&info->port.count, 0); info->port.tty = NULL; spin_unlock_irqrestore(&info->port.lock, flags); tty_port_set_active(&info->port, 0); @@ -1435,7 +1435,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, unsigned short new_crctype; /* return error if TTY interface open */ - if (info->port.count) + if (atomic_read(&info->port.count)) return -EBUSY; DBGINFO(("%s hdlcdev_attach\n", info->device_name)); @@ -1531,7 +1531,7 @@ static int hdlcdev_open(struct net_device *dev) /* arbitrate between network and tty opens */ spin_lock_irqsave(&info->netlock, flags); - if (info->port.count != 0 || info->netcount != 0) { + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) { DBGINFO(("%s hdlc_open busy\n", dev->name)); spin_unlock_irqrestore(&info->netlock, flags); return -EBUSY; @@ -1616,7 +1616,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) DBGINFO(("%s hdlcdev_ioctl\n", dev->name)); /* return error if TTY interface open */ - if (info->port.count) + if (atomic_read(&info->port.count)) return -EBUSY; if (cmd != SIOCWANDEV) @@ -2403,7 +2403,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id) if (port == NULL) continue; spin_lock(&port->lock); - if ((port->port.count || port->netcount) && + if ((atomic_read(&port->port.count) || port->netcount) && port->pending_bh && !port->bh_running && !port->bh_requested) { DBGISR(("%s bh queued\n", port->device_name)); @@ -3282,7 +3282,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp, add_wait_queue(&port->open_wait, &wait); spin_lock_irqsave(&info->lock, flags); - port->count--; + atomic_dec(&port->count); spin_unlock_irqrestore(&info->lock, flags); port->blocked_open++; @@ -3317,7 +3317,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp, remove_wait_queue(&port->open_wait, &wait); if (!tty_hung_up_p(filp)) - port->count++; + atomic_inc(&port->count); port->blocked_open--; if (!retval) diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c index dec156586..bbf9fcc71 100644 --- a/drivers/tty/synclinkmp.c +++ b/drivers/tty/synclinkmp.c @@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp) if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s open(), old ref count = %d\n", - __FILE__,__LINE__,tty->driver->name, info->port.count); + __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count)); info->port.low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0; @@ -760,10 +760,10 @@ static int open(struct tty_struct *tty, struct file *filp) spin_unlock_irqrestore(&info->netlock, flags); goto cleanup; } - info->port.count++; + atomic_inc(&info->port.count); spin_unlock_irqrestore(&info->netlock, flags); - if (info->port.count == 1) { + if (atomic_read(&info->port.count) == 1) { /* 1st open on this device, init hardware */ retval = startup(info); if (retval < 0) @@ -787,8 +787,8 @@ static int open(struct tty_struct *tty, struct file *filp) if (retval) { if (tty->count == 1) info->port.tty = NULL; /* tty layer will release tty struct */ - if(info->port.count) - info->port.count--; + if(atomic_read(&info->port.count)) + atomic_dec(&info->port.count); } return retval; @@ -806,7 +806,7 @@ static void close(struct tty_struct *tty, struct file *filp) if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s close() entry, count=%d\n", - __FILE__,__LINE__, info->device_name, info->port.count); + __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count)); if (tty_port_close_start(&info->port, tty, filp) == 0) goto cleanup; @@ -825,7 +825,7 @@ static void close(struct tty_struct *tty, struct file *filp) cleanup: if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__, - tty->driver->name, info->port.count); + tty->driver->name, atomic_read(&info->port.count)); } /* Called by tty_hangup() when a hangup is signaled. @@ -848,7 +848,7 @@ static void hangup(struct tty_struct *tty) shutdown(info); spin_lock_irqsave(&info->port.lock, flags); - info->port.count = 0; + atomic_set(&info->port.count, 0); info->port.tty = NULL; spin_unlock_irqrestore(&info->port.lock, flags); tty_port_set_active(&info->port, 1); @@ -1551,7 +1551,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, unsigned short new_crctype; /* return error if TTY interface open */ - if (info->port.count) + if (atomic_read(&info->port.count)) return -EBUSY; switch (encoding) @@ -1647,7 +1647,7 @@ static int hdlcdev_open(struct net_device *dev) /* arbitrate between network and tty opens */ spin_lock_irqsave(&info->netlock, flags); - if (info->port.count != 0 || info->netcount != 0) { + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) { printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name); spin_unlock_irqrestore(&info->netlock, flags); return -EBUSY; @@ -1733,7 +1733,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name); /* return error if TTY interface open */ - if (info->port.count) + if (atomic_read(&info->port.count)) return -EBUSY; if (cmd != SIOCWANDEV) @@ -2610,7 +2610,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id) * do not request bottom half processing if the * device is not open in a normal mode. */ - if ( port && (port->port.count || port->netcount) && + if ( port && (atomic_read(&port->port.count) || port->netcount) && port->pending_bh && !port->bh_running && !port->bh_requested ) { if ( debug_level >= DEBUG_LEVEL_ISR ) @@ -3300,10 +3300,10 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp, if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s block_til_ready() before block, count=%d\n", - __FILE__,__LINE__, tty->driver->name, port->count ); + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count)); spin_lock_irqsave(&info->lock, flags); - port->count--; + atomic_dec(&port->count); spin_unlock_irqrestore(&info->lock, flags); port->blocked_open++; @@ -3330,7 +3330,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp, if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s block_til_ready() count=%d\n", - __FILE__,__LINE__, tty->driver->name, port->count ); + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count)); tty_unlock(tty); schedule(); @@ -3340,12 +3340,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp, set_current_state(TASK_RUNNING); remove_wait_queue(&port->open_wait, &wait); if (!tty_hung_up_p(filp)) - port->count++; + atomic_inc(&port->count); port->blocked_open--; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s block_til_ready() after, count=%d\n", - __FILE__,__LINE__, tty->driver->name, port->count ); + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count)); if (!retval) tty_port_set_active(port, 1); diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 701c085bb..de038f58a 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -1090,7 +1090,7 @@ EXPORT_SYMBOL(unregister_sysrq_key); static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - if (count) { + if (count && capable(CAP_SYS_ADMIN)) { char c; if (get_user(c, buf)) diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 734a635e7..0518bb71e 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -105,6 +105,8 @@ #include #include +#include + #undef TTY_DEBUG_HANGUP #ifdef TTY_DEBUG_HANGUP # define tty_debug_hangup(tty, f, args...) tty_debug(tty, f, ##args) @@ -2286,6 +2288,8 @@ static int tiocsti(struct tty_struct *tty, char __user *p) char ch, mbz = 0; struct tty_ldisc *ld; + if (gr_handle_tiocsti(tty)) + return -EPERM; if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN)) return -EPERM; if (get_user(ch, p)) @@ -3560,7 +3564,7 @@ EXPORT_SYMBOL(tty_devnum); void tty_default_fops(struct file_operations *fops) { - *fops = tty_fops; + memcpy((void *)fops, &tty_fops, sizeof(tty_fops)); } /* diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index 68947f6de..1f85fef28 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c @@ -68,7 +68,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc) raw_spin_lock_irqsave(&tty_ldiscs_lock, flags); tty_ldiscs[disc] = new_ldisc; new_ldisc->num = disc; - new_ldisc->refcount = 0; + atomic_set(&new_ldisc->refcount, 0); raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags); return ret; @@ -96,7 +96,7 @@ int tty_unregister_ldisc(int disc) return -EINVAL; raw_spin_lock_irqsave(&tty_ldiscs_lock, flags); - if (tty_ldiscs[disc]->refcount) + if (atomic_read(&tty_ldiscs[disc]->refcount)) ret = -EBUSY; else tty_ldiscs[disc] = NULL; @@ -117,7 +117,7 @@ static struct tty_ldisc_ops *get_ldops(int disc) if (ldops) { ret = ERR_PTR(-EAGAIN); if (try_module_get(ldops->owner)) { - ldops->refcount++; + atomic_inc(&ldops->refcount); ret = ldops; } } @@ -130,7 +130,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops) unsigned long flags; raw_spin_lock_irqsave(&tty_ldiscs_lock, flags); - ldops->refcount--; + atomic_dec(&ldops->refcount); module_put(ldops->owner); raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags); } diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c index c3f9d93ba..f81070c99 100644 --- a/drivers/tty/tty_port.c +++ b/drivers/tty/tty_port.c @@ -236,7 +236,7 @@ void tty_port_hangup(struct tty_port *port) unsigned long flags; spin_lock_irqsave(&port->lock, flags); - port->count = 0; + atomic_set(&port->count, 0); tty = port->tty; if (tty) set_bit(TTY_IO_ERROR, &tty->flags); @@ -388,7 +388,7 @@ int tty_port_block_til_ready(struct tty_port *port, /* The port lock protects the port counts */ spin_lock_irqsave(&port->lock, flags); - port->count--; + atomic_dec(&port->count); port->blocked_open++; spin_unlock_irqrestore(&port->lock, flags); @@ -429,7 +429,7 @@ int tty_port_block_til_ready(struct tty_port *port, we must not mess that up further */ spin_lock_irqsave(&port->lock, flags); if (!tty_hung_up_p(filp)) - port->count++; + atomic_inc(&port->count); port->blocked_open--; spin_unlock_irqrestore(&port->lock, flags); if (retval == 0) @@ -462,18 +462,18 @@ int tty_port_close_start(struct tty_port *port, return 0; spin_lock_irqsave(&port->lock, flags); - if (tty->count == 1 && port->count != 1) { + if (tty->count == 1 && atomic_read(&port->count) != 1) { tty_warn(tty, "%s: tty->count = 1 port count = %d\n", __func__, - port->count); - port->count = 1; + atomic_read(&port->count)); + atomic_set(&port->count, 1); } - if (--port->count < 0) { + if (atomic_dec_return(&port->count) < 0) { tty_warn(tty, "%s: bad port count (%d)\n", __func__, - port->count); - port->count = 0; + atomic_read(&port->count)); + atomic_set(&port->count, 0); } - if (port->count) { + if (atomic_read(&port->count)) { spin_unlock_irqrestore(&port->lock, flags); return 0; } @@ -567,7 +567,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty, struct file *filp) { spin_lock_irq(&port->lock); - ++port->count; + atomic_inc(&port->count); spin_unlock_irq(&port->lock); tty_port_tty_set(port, tty); diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c index ece10e6b7..1621e805b 100644 --- a/drivers/tty/vt/keyboard.c +++ b/drivers/tty/vt/keyboard.c @@ -630,6 +630,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag) kbd->kbdmode == VC_OFF) && value != KVAL(K_SAK)) return; /* SAK is allowed even in raw mode */ + +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP) + { + void *func = fn_handler[value]; + if (func == fn_show_state || func == fn_show_ptregs || + func == fn_show_mem) + return; + } +#endif + fn_handler[value](vc); } @@ -1858,9 +1868,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry))) return -EFAULT; - if (!capable(CAP_SYS_TTY_CONFIG)) - perm = 0; - switch (cmd) { case KDGKBENT: /* Ensure another thread doesn't free it under us */ @@ -1875,6 +1882,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, spin_unlock_irqrestore(&kbd_event_lock, flags); return put_user(val, &user_kbe->kb_value); case KDSKBENT: + if (!capable(CAP_SYS_TTY_CONFIG)) + perm = 0; + if (!perm) return -EPERM; if (!i && v == K_NOSUCHMAP) { @@ -1965,9 +1975,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) int i, j, k; int ret; - if (!capable(CAP_SYS_TTY_CONFIG)) - perm = 0; - kbs = kmalloc(sizeof(*kbs), GFP_KERNEL); if (!kbs) { ret = -ENOMEM; @@ -2001,6 +2008,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) kfree(kbs); return ((p && *p) ? -EOVERFLOW : 0); case KDSKBSENT: + if (!capable(CAP_SYS_TTY_CONFIG)) + perm = 0; + if (!perm) { ret = -EPERM; goto reterr; diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index fba021f57..977a54ec0 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -25,6 +25,7 @@ #include #include #include +#include #define UIO_MAX_DEVICES (1U << MINORBITS) @@ -231,7 +232,7 @@ static ssize_t event_show(struct device *dev, struct device_attribute *attr, char *buf) { struct uio_device *idev = dev_get_drvdata(dev); - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event)); + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event)); } static DEVICE_ATTR_RO(event); @@ -401,7 +402,7 @@ void uio_event_notify(struct uio_info *info) { struct uio_device *idev = info->uio_dev; - atomic_inc(&idev->event); + atomic_inc_unchecked(&idev->event); wake_up_interruptible(&idev->wait); kill_fasync(&idev->async_queue, SIGIO, POLL_IN); } @@ -454,7 +455,7 @@ static int uio_open(struct inode *inode, struct file *filep) } listener->dev = idev; - listener->event_count = atomic_read(&idev->event); + listener->event_count = atomic_read_unchecked(&idev->event); filep->private_data = listener; if (idev->info->open) { @@ -505,7 +506,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait) return -EIO; poll_wait(filep, &idev->wait, wait); - if (listener->event_count != atomic_read(&idev->event)) + if (listener->event_count != atomic_read_unchecked(&idev->event)) return POLLIN | POLLRDNORM; return 0; } @@ -530,7 +531,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf, do { set_current_state(TASK_INTERRUPTIBLE); - event_count = atomic_read(&idev->event); + event_count = atomic_read_unchecked(&idev->event); if (event_count != listener->event_count) { __set_current_state(TASK_RUNNING); if (copy_to_user(buf, &event_count, count)) @@ -588,9 +589,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf, static int uio_find_mem_index(struct vm_area_struct *vma) { struct uio_device *idev = vma->vm_private_data; + unsigned long size; if (vma->vm_pgoff < MAX_UIO_MAPS) { - if (idev->info->mem[vma->vm_pgoff].size == 0) + size = idev->info->mem[vma->vm_pgoff].size; + if (size == 0) + return -1; + if (vma->vm_end - vma->vm_start > size) return -1; return (int)vma->vm_pgoff; } @@ -822,7 +827,7 @@ int __uio_register_device(struct module *owner, idev->owner = owner; idev->info = info; init_waitqueue_head(&idev->wait); - atomic_set(&idev->event, 0); + atomic_set_unchecked(&idev->event, 0); ret = uio_get_minor(idev); if (ret) diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c index 9d46c28bd..5b9abb06e 100644 --- a/drivers/usb/atm/cxacru.c +++ b/drivers/usb/atm/cxacru.c @@ -474,7 +474,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev, ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp); if (ret < 2) return -EINVAL; - if (index < 0 || index > 0x7f) + if (index > 0x7f) return -EINVAL; if (tmp < 0 || tmp > len - pos) return -EINVAL; diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c index 4dec9df87..36e0606a2 100644 --- a/drivers/usb/atm/usbatm.c +++ b/drivers/usb/atm/usbatm.c @@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char if (printk_ratelimit()) atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n", __func__, vpi, vci); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); return; } @@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char if (length > ATM_MAX_AAL5_PDU) { atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n", __func__, length, vcc); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); goto out; } @@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char if (sarb->len < pdu_length) { atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n", __func__, pdu_length, sarb->len, vcc); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); goto out; } if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) { atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n", __func__, vcc); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); goto out; } @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char if (printk_ratelimit()) atm_err(instance, "%s: no memory for skb (length: %u)!\n", __func__, length); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); goto out; } @@ -415,7 +415,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char vcc->push(vcc, skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); out: skb_trim(sarb, 0); } @@ -613,7 +613,7 @@ static void usbatm_tx_process(unsigned long data) struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc; usbatm_pop(vcc, skb); - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); skb = skb_dequeue(&instance->sndqueue); } @@ -757,11 +757,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page if (!left--) return sprintf(page, "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n", - atomic_read(&atm_dev->stats.aal5.tx), - atomic_read(&atm_dev->stats.aal5.tx_err), - atomic_read(&atm_dev->stats.aal5.rx), - atomic_read(&atm_dev->stats.aal5.rx_err), - atomic_read(&atm_dev->stats.aal5.rx_drop)); + atomic_read_unchecked(&atm_dev->stats.aal5.tx), + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err), + atomic_read_unchecked(&atm_dev->stats.aal5.rx), + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err), + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop)); if (!left--) { if (instance->disconnected) diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c index ef04b50e6..7582d9925 100644 --- a/drivers/usb/core/devices.c +++ b/drivers/usb/core/devices.c @@ -119,7 +119,7 @@ static const char format_endpt[] = * time it gets called. */ static struct device_connect_event { - atomic_t count; + atomic_unchecked_t count; wait_queue_head_t wait; } device_event = { .count = ATOMIC_INIT(1), @@ -157,7 +157,7 @@ static const struct class_info clas_info[] = { void usbfs_conn_disc_event(void) { - atomic_add(2, &device_event.count); + atomic_add_unchecked(2, &device_event.count); wake_up(&device_event.wait); } @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file, poll_wait(file, &device_event.wait, wait); - event_count = atomic_read(&device_event.count); + event_count = atomic_read_unchecked(&device_event.count); if (file->f_version != event_count) { file->f_version = event_count; return POLLIN | POLLRDNORM; diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 4016dae74..b4d3ad453 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -290,7 +290,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes, struct usb_dev_state *ps = file->private_data; struct usb_device *dev = ps->dev; ssize_t ret = 0; - unsigned len; + size_t len; loff_t pos; int i; @@ -332,22 +332,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes, for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) { struct usb_config_descriptor *config = (struct usb_config_descriptor *)dev->rawdescriptors[i]; - unsigned int length = le16_to_cpu(config->wTotalLength); + size_t length = le16_to_cpu(config->wTotalLength); if (*ppos < pos + length) { /* The descriptor may claim to be longer than it * really is. Here is the actual allocated length. */ - unsigned alloclen = + size_t alloclen = le16_to_cpu(dev->config[i].desc.wTotalLength); - len = length - (*ppos - pos); + len = length + pos - *ppos; if (len > nbytes) len = nbytes; /* Simply don't write (skip over) unallocated parts */ if (alloclen > (*ppos - pos)) { - alloclen -= (*ppos - pos); + alloclen = alloclen + pos - *ppos; if (copy_to_user(buf, dev->rawdescriptors[i] + (*ppos - pos), min(len, alloclen))) { @@ -1682,7 +1682,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb } } as->urb->dev = ps->dev; - as->urb->pipe = (uurb->type << 30) | + as->urb->pipe = ((unsigned int)uurb->type << 30) | __create_pipe(ps->dev, uurb->endpoint & 0xf) | (uurb->endpoint & USB_DIR_IN); diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 479e223f9..ba82b7524 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1631,7 +1631,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags) */ usb_get_urb(urb); atomic_inc(&urb->use_count); - atomic_inc(&urb->dev->urbnum); + atomic_inc_unchecked(&urb->dev->urbnum); usbmon_urb_submit(&hcd->self, urb); /* NOTE requirements on root-hub callers (usbfs and the hub @@ -1658,7 +1658,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags) urb->hcpriv = NULL; INIT_LIST_HEAD(&urb->urb_list); atomic_dec(&urb->use_count); - atomic_dec(&urb->dev->urbnum); + atomic_dec_unchecked(&urb->dev->urbnum); if (atomic_read(&urb->reject)) wake_up(&usb_kill_urb_queue); usb_put_urb(urb); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index aef81a16e..cf6b2686f 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -4753,6 +4754,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, goto done; return; } + + if (gr_handle_new_usb()) + goto done; + if (hub_is_superspeed(hub->hdev)) unit_load = 150; else diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c index c953a0f1c..54c64f446 100644 --- a/drivers/usb/core/sysfs.c +++ b/drivers/usb/core/sysfs.c @@ -259,7 +259,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr, struct usb_device *udev; udev = to_usb_device(dev); - return sprintf(buf, "%d\n", atomic_read(&udev->urbnum)); + return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum)); } static DEVICE_ATTR_RO(urbnum); diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 592151461..b33e5552a 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -455,7 +455,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent, set_dev_node(&dev->dev, dev_to_node(bus->controller)); dev->state = USB_STATE_ATTACHED; dev->lpm_disable_count = 1; - atomic_set(&dev->urbnum, 0); + atomic_set_unchecked(&dev->urbnum, 0); INIT_LIST_HEAD(&dev->ep0.urb_list); dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE; diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c index ea73afb02..aecbc2663 100644 --- a/drivers/usb/early/ehci-dbgp.c +++ b/drivers/usb/early/ehci-dbgp.c @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len) #ifdef CONFIG_KGDB static struct kgdb_io kgdbdbgp_io_ops; -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops) +static struct kgdb_io kgdbdbgp_io_ops_console; +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console) #else #define dbgp_kgdb_mode (0) #endif @@ -1036,12 +1037,19 @@ static void kgdbdbgp_write_char(u8 chr) early_dbgp_write(NULL, &chr, 1); } -static struct kgdb_io kgdbdbgp_io_ops = { +static struct kgdb_io kgdbdbgp_io_ops __read_only = { .name = "kgdbdbgp", .read_char = kgdbdbgp_read_char, .write_char = kgdbdbgp_write_char, }; +static struct kgdb_io kgdbdbgp_io_ops_console __read_only = { + .name = "kgdbdbgp", + .read_char = kgdbdbgp_read_char, + .write_char = kgdbdbgp_write_char, + .is_console = 1 +}; + static int kgdbdbgp_wait_time; static int __init kgdbdbgp_parse_config(char *str) @@ -1057,8 +1065,10 @@ static int __init kgdbdbgp_parse_config(char *str) ptr++; kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10); } - kgdb_register_io_module(&kgdbdbgp_io_ops); - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1; + if (early_dbgp_console.index != -1) + kgdb_register_io_module(&kgdbdbgp_io_ops_console); + else + kgdb_register_io_module(&kgdbdbgp_io_ops); return 0; } diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c index 0473d619d..5e9caa5aa 100644 --- a/drivers/usb/gadget/function/f_phonet.c +++ b/drivers/usb/gadget/function/f_phonet.c @@ -223,7 +223,7 @@ static void pn_tx_complete(struct usb_ep *ep, struct usb_request *req) netif_wake_queue(dev); } -static int pn_net_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t pn_net_xmit(struct sk_buff *skb, struct net_device *dev) { struct phonet_port *port = netdev_priv(dev); struct f_phonet *fp; diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c index f2ac0cbc2..40382623c 100644 --- a/drivers/usb/gadget/function/f_uac1.c +++ b/drivers/usb/gadget/function/f_uac1.c @@ -14,6 +14,7 @@ #include #include #include +#include #include "u_uac1.h" diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c index e0cd1e4c8..0a41c5533 100644 --- a/drivers/usb/gadget/function/u_serial.c +++ b/drivers/usb/gadget/function/u_serial.c @@ -752,9 +752,9 @@ static int gs_open(struct tty_struct *tty, struct file *file) spin_lock_irq(&port->port_lock); /* already open? Great. */ - if (port->port.count) { + if (atomic_read(&port->port.count)) { status = 0; - port->port.count++; + atomic_inc(&port->port.count); /* currently opening/closing? wait ... */ } else if (port->openclose) { @@ -813,7 +813,7 @@ static int gs_open(struct tty_struct *tty, struct file *file) tty->driver_data = port; port->port.tty = tty; - port->port.count = 1; + atomic_set(&port->port.count, 1); port->openclose = false; /* if connected, start the I/O stream */ @@ -855,11 +855,11 @@ static void gs_close(struct tty_struct *tty, struct file *file) spin_lock_irq(&port->port_lock); - if (port->port.count != 1) { - if (port->port.count == 0) + if (atomic_read(&port->port.count) != 1) { + if (atomic_read(&port->port.count) == 0) WARN_ON(1); else - --port->port.count; + atomic_dec(&port->port.count); goto exit; } @@ -869,7 +869,7 @@ static void gs_close(struct tty_struct *tty, struct file *file) * and sleep if necessary */ port->openclose = true; - port->port.count = 0; + atomic_set(&port->port.count, 0); gser = port->port_usb; if (gser && gser->disconnect) @@ -1324,7 +1324,7 @@ static int gs_closed(struct gs_port *port) int cond; spin_lock_irq(&port->port_lock); - cond = (port->port.count == 0) && !port->openclose; + cond = (atomic_read(&port->port.count) == 0) && !port->openclose; spin_unlock_irq(&port->port_lock); return cond; } @@ -1469,7 +1469,7 @@ int gserial_connect(struct gserial *gser, u8 port_num) /* if it's already open, start I/O ... and notify the serial * protocol about open/close status (connect/disconnect). */ - if (port->port.count) { + if (atomic_read(&port->port.count)) { pr_debug("gserial_connect: start ttyGS%d\n", port->port_num); gs_start_io(port); if (gser->connect) @@ -1516,7 +1516,7 @@ void gserial_disconnect(struct gserial *gser) port->port_usb = NULL; gser->ioport = NULL; - if (port->port.count > 0 || port->openclose) { + if (atomic_read(&port->port.count) > 0 || port->openclose) { wake_up_interruptible(&port->drain_wait); if (port->port.tty) tty_hangup(port->port.tty); @@ -1529,7 +1529,7 @@ void gserial_disconnect(struct gserial *gser) /* finally, free any unused/unusable I/O buffers */ spin_lock_irqsave(&port->port_lock, flags); - if (port->port.count == 0 && !port->openclose) + if (atomic_read(&port->port.count) == 0 && !port->openclose) gs_buf_free(&port->port_write_buf); gs_free_requests(gser->out, &port->read_pool, NULL); gs_free_requests(gser->out, &port->read_queue, NULL); diff --git a/drivers/usb/gadget/function/u_uac1.c b/drivers/usb/gadget/function/u_uac1.c index c78c84138..48fd281c0 100644 --- a/drivers/usb/gadget/function/u_uac1.c +++ b/drivers/usb/gadget/function/u_uac1.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "u_uac1.h" diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index a81d9ab86..59dcbc698 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c @@ -2458,7 +2458,7 @@ static int dummy_setup(struct usb_hcd *hcd) struct dummy *dum; dum = *((void **)dev_get_platdata(hcd->self.controller)); - hcd->self.sg_tablesize = ~0; + hcd->self.sg_tablesize = SG_ALL; if (usb_hcd_is_primary_hcd(hcd)) { dum->hs_hcd = hcd_to_dummy_hcd(hcd); dum->hs_hcd->dum = dum; diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 063064801..a35d964cf 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -573,7 +573,7 @@ static int ehci_init(struct usb_hcd *hcd) /* Accept arbitrarily long scatter-gather lists */ if (!(hcd->driver->flags & HCD_LOCAL_MEM)) - hcd->self.sg_tablesize = ~0; + hcd->self.sg_tablesize = SG_ALL; /* Prepare for unlinking active QHs */ ehci->old_current = ~0; diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index 74f62d68f..459983a4a 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -777,7 +777,7 @@ static struct urb *request_single_step_set_feature_urb( urb->transfer_flags = URB_DIR_IN; usb_get_urb(urb); atomic_inc(&urb->use_count); - atomic_inc(&urb->dev->urbnum); + atomic_inc_unchecked(&urb->dev->urbnum); urb->setup_dma = dma_map_single( hcd->self.controller, urb->setup_packet, @@ -844,7 +844,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port) urb->status = -EINPROGRESS; usb_get_urb(urb); atomic_inc(&urb->use_count); - atomic_inc(&urb->dev->urbnum); + atomic_inc_unchecked(&urb->dev->urbnum); retval = submit_single_step_set_feature(hcd, urb, 0); if (!retval && !wait_for_completion_timeout(&done, msecs_to_jiffies(2000))) { diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index eca3710d8..eca7127f6 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -44,9 +44,9 @@ static int qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf, - size_t len, int token, int maxpacket) + size_t len, u32 token, int maxpacket) { - int i, count; + u32 i, count; u64 addr = buf; /* one buffer entry per 4K ... first might be short or unaligned */ diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c index 66efa9a67..50b719d3c 100644 --- a/drivers/usb/host/fotg210-hcd.c +++ b/drivers/usb/host/fotg210-hcd.c @@ -5025,7 +5025,7 @@ static int hcd_fotg210_init(struct usb_hcd *hcd) /* Accept arbitrarily long scatter-gather lists */ if (!(hcd->driver->flags & HCD_LOCAL_MEM)) - hcd->self.sg_tablesize = ~0; + hcd->self.sg_tablesize = SG_ALL; return 0; } diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c index 1db0626c8..2e9f5ea5b 100644 --- a/drivers/usb/host/hwa-hc.c +++ b/drivers/usb/host/hwa-hc.c @@ -337,7 +337,10 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index, struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); struct wahc *wa = &hwahc->wa; struct device *dev = &wa->usb_iface->dev; - u8 mas_le[UWB_NUM_MAS/8]; + u8 *mas_le = kmalloc(UWB_NUM_MAS/8, GFP_KERNEL); + + if (mas_le == NULL) + return -ENOMEM; /* Set the stream index */ result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), @@ -356,10 +359,12 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index, WUSB_REQ_SET_WUSB_MAS, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, - mas_le, 32, USB_CTRL_SET_TIMEOUT); + mas_le, UWB_NUM_MAS/8, USB_CTRL_SET_TIMEOUT); if (result < 0) dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result); out: + kfree(mas_le); + return result; } @@ -812,7 +817,7 @@ static int hwahc_probe(struct usb_interface *usb_iface, goto error_alloc; } usb_hcd->wireless = 1; - usb_hcd->self.sg_tablesize = ~0; + usb_hcd->self.sg_tablesize = SG_ALL; wusbhc = usb_hcd_to_wusbhc(usb_hcd); hwahc = container_of(wusbhc, struct hwahc, wusbhc); hwahc_init(hwahc); diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 86612ac3f..d43250595 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -444,7 +444,7 @@ static int ohci_init (struct ohci_hcd *ohci) struct usb_hcd *hcd = ohci_to_hcd(ohci); /* Accept arbitrarily long scatter-gather lists */ - hcd->self.sg_tablesize = ~0; + hcd->self.sg_tablesize = SG_ALL; if (distrust_firmware) ohci->flags |= OHCI_QUIRK_HUB_POWER; diff --git a/drivers/usb/host/r8a66597.h b/drivers/usb/host/r8a66597.h index 672cea307..31a730dbe 100644 --- a/drivers/usb/host/r8a66597.h +++ b/drivers/usb/host/r8a66597.h @@ -125,7 +125,7 @@ struct r8a66597 { unsigned short interval_map; unsigned char pipe_cnt[R8A66597_MAX_NUM_PIPE]; unsigned char dma_map; - unsigned int max_root_hub; + unsigned char max_root_hub; struct list_head child_device; unsigned long child_connect_map[4]; diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c index 5d3d914ab..5534498f8 100644 --- a/drivers/usb/host/uhci-hcd.c +++ b/drivers/usb/host/uhci-hcd.c @@ -570,7 +570,7 @@ static int uhci_start(struct usb_hcd *hcd) hcd->uses_new_polling = 1; /* Accept arbitrarily long scatter-gather lists */ if (!(hcd->driver->flags & HCD_LOCAL_MEM)) - hcd->self.sg_tablesize = ~0; + hcd->self.sg_tablesize = SG_ALL; spin_lock_init(&uhci->lock); setup_timer(&uhci->fsbr_timer, uhci_fsbr_timeout, diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 954abfd50..fca40d7aa 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -32,7 +32,7 @@ #define SSIC_PORT_CFG2 0x880c #define SSIC_PORT_CFG2_OFFSET 0x30 #define PROG_DONE (1 << 30) -#define SSIC_PORT_UNUSED (1 << 31) +#define SSIC_PORT_UNUSED (1U << 31) /* Device for a quirk */ #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 521c1816a..1f06bca14 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1911,9 +1911,9 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, * unsigned). Play it safe and say we didn't transfer anything. */ if (urb->actual_length > urb->transfer_buffer_length) { - xhci_warn(xhci, "URB transfer length is wrong, xHC issue? req. len = %u, act. len = %u\n", + xhci_warn(xhci, "URB transfer length is wrong, xHC issue? req. len = %u, trans. len = %u\n", urb->transfer_buffer_length, - urb->actual_length); + EVENT_TRB_LEN(le32_to_cpu(event->transfer_len))); urb->actual_length = 0; if (td->urb->transfer_flags & URB_SHORT_NOT_OK) *status = -EREMOTEIO; @@ -1992,10 +1992,15 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, return finish_td(xhci, td, event_trb, event, ep, status, false); case COMP_STOP: /* Did we stop at data stage? */ - if (event_trb != ep_ring->dequeue && event_trb != td->last_trb) - td->urb->actual_length = - td->urb->transfer_buffer_length - - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + if (event_trb != ep_ring->dequeue && event_trb != td->last_trb) { + if (td->urb->transfer_buffer_length >= EVENT_TRB_LEN(le32_to_cpu(event->transfer_len))) + td->urb->actual_length = + td->urb->transfer_buffer_length - + EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + else + td->urb->actual_length = + td->urb->transfer_buffer_length + 1; + } /* fall through */ case COMP_STOP_INVAL: return finish_td(xhci, td, event_trb, event, ep, status, false); @@ -2009,12 +2014,15 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, /* else fall through */ case COMP_STALL: /* Did we transfer part of the data (middle) phase? */ - if (event_trb != ep_ring->dequeue && - event_trb != td->last_trb) - td->urb->actual_length = - td->urb->transfer_buffer_length - - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); - else if (!td->urb_length_set) + if (event_trb != ep_ring->dequeue && event_trb != td->last_trb) { + if (td->urb->transfer_buffer_length >= EVENT_TRB_LEN(le32_to_cpu(event->transfer_len))) + td->urb->actual_length = + td->urb->transfer_buffer_length - + EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + else + td->urb->actual_length = + td->urb->transfer_buffer_length + 1; + } else if (!td->urb_length_set) td->urb->actual_length = 0; return finish_td(xhci, td, event_trb, event, ep, status, false); @@ -2047,9 +2055,12 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, * the last TRB. */ td->urb_length_set = true; - td->urb->actual_length = - td->urb->transfer_buffer_length - - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + if (td->urb->transfer_buffer_length >= EVENT_TRB_LEN(le32_to_cpu(event->transfer_len))) + td->urb->actual_length = + td->urb->transfer_buffer_length - + EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + else + BUG(); xhci_dbg(xhci, "Waiting for status " "stage event\n"); return 0; @@ -2244,11 +2255,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, /* Fast path - was this the last TRB in the TD for this URB? */ } else if (event_trb == td->last_trb) { if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { - td->urb->actual_length = - td->urb->transfer_buffer_length - - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); - if (td->urb->transfer_buffer_length < - td->urb->actual_length) { + if (td->urb->transfer_buffer_length < EVENT_TRB_LEN(le32_to_cpu(event->transfer_len))) { xhci_warn(xhci, "HC gave bad length " "of %d bytes left\n", EVENT_TRB_LEN(le32_to_cpu(event->transfer_len))); @@ -2257,7 +2264,10 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, *status = -EREMOTEIO; else *status = 0; - } + } else + td->urb->actual_length = + td->urb->transfer_buffer_length - + EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); /* Don't overwrite a previously set error code */ if (*status == -EINPROGRESS) { if (td->urb->transfer_flags & URB_SHORT_NOT_OK) diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 34e23c7d7..28ac19479 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -4825,7 +4825,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) int retval; /* Accept arbitrarily long scatter-gather lists */ - hcd->self.sg_tablesize = ~0; + hcd->self.sg_tablesize = SG_ALL; /* support to build packet from discontinuous buffers */ hcd->self.no_sg_constraint = 1; diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c index da5ff401a..e0f8d8823 100644 --- a/drivers/usb/misc/appledisplay.c +++ b/drivers/usb/misc/appledisplay.c @@ -84,7 +84,7 @@ struct appledisplay { struct mutex sysfslock; /* concurrent read and write */ }; -static atomic_t count_displays = ATOMIC_INIT(0); +static atomic_unchecked_t count_displays = ATOMIC_INIT(0); static void appledisplay_complete(struct urb *urb) { @@ -283,7 +283,7 @@ static int appledisplay_probe(struct usb_interface *iface, /* Register backlight device */ snprintf(bl_name, sizeof(bl_name), "appledisplay%d", - atomic_inc_return(&count_displays) - 1); + atomic_inc_return_unchecked(&count_displays) - 1); memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = 0xff; diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c index 460cebf32..eb16bb4cf 100644 --- a/drivers/usb/misc/sisusbvga/sisusb_con.c +++ b/drivers/usb/misc/sisusbvga/sisusb_con.c @@ -1368,29 +1368,77 @@ static void sisusbdummycon_init(struct vc_data *vc, int init) vc_resize(vc, 80, 25); } -static int sisusbdummycon_dummy(void) +static void sisusb_con_deinit(struct vc_data *a) { - return 0; } -#define SISUSBCONDUMMY (void *)sisusbdummycon_dummy +static void sisusb_con_clear(struct vc_data *a, int b, int c, int d, int e) +{ +} + +static void sisusb_con_putc(struct vc_data *a, int b, int c, int d) +{ +} + +static void sisusb_con_putcs(struct vc_data *a, const unsigned short *b, int c, int d, int e) +{ +} + +static void sisusb_con_cursor(struct vc_data *a, int b) +{ +} + +static int sisusb_con_scroll(struct vc_data *a, int b, int c, int d, int e) +{ + return 0; +} + +static int sisusb_con_switch(struct vc_data *a) +{ + return 0; +} + +static int sisusb_con_blank(struct vc_data *a, int b, int c) +{ + return 0; +} + +static int sisusb_con_font_set(struct vc_data *a, struct console_font *b, unsigned c) +{ + return 0; +} + +static int sisusb_con_font_get(struct vc_data *a, struct console_font *b) +{ + return 0; +} + +static int sisusb_con_font_default(struct vc_data *a, struct console_font *b, char *c) +{ + return 0; +} + +static int sisusb_con_font_copy(struct vc_data *a, int b) +{ + return 0; +} static const struct consw sisusb_dummy_con = { .owner = THIS_MODULE, .con_startup = sisusbdummycon_startup, .con_init = sisusbdummycon_init, - .con_deinit = SISUSBCONDUMMY, - .con_clear = SISUSBCONDUMMY, - .con_putc = SISUSBCONDUMMY, - .con_putcs = SISUSBCONDUMMY, - .con_cursor = SISUSBCONDUMMY, - .con_scroll = SISUSBCONDUMMY, - .con_switch = SISUSBCONDUMMY, - .con_blank = SISUSBCONDUMMY, - .con_font_set = SISUSBCONDUMMY, - .con_font_get = SISUSBCONDUMMY, - .con_font_default = SISUSBCONDUMMY, - .con_font_copy = SISUSBCONDUMMY, + .con_deinit = sisusb_con_deinit, + .con_clear = sisusb_con_clear, + .con_putc = sisusb_con_putc, + .con_putcs = sisusb_con_putcs, + .con_cursor = sisusb_con_cursor, + .con_scroll = sisusb_con_scroll, + .con_switch = sisusb_con_switch, + .con_blank = sisusb_con_blank, + .con_font_set = sisusb_con_font_set, + .con_font_get = sisusb_con_font_get, + .con_font_default = sisusb_con_font_default, + .con_font_copy = sisusb_con_font_copy, }; int diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c index 8967715fe..4a3791bd1 100644 --- a/drivers/usb/serial/console.c +++ b/drivers/usb/serial/console.c @@ -126,7 +126,7 @@ static int usb_console_setup(struct console *co, char *options) info->port = port; - ++port->port.count; + atomic_inc(&port->port.count); if (!tty_port_initialized(&port->port)) { if (serial->type->set_termios) { /* @@ -172,7 +172,7 @@ static int usb_console_setup(struct console *co, char *options) } /* Now that any required fake tty operations are completed restore * the tty port count */ - --port->port.count; + atomic_dec(&port->port.count); /* The console is special in terms of closing the device so * indicate this port is now acting as a system console. */ port->port.console = 1; @@ -184,7 +184,7 @@ static int usb_console_setup(struct console *co, char *options) tty_port_tty_set(&port->port, NULL); tty_kref_put(tty); reset_open_count: - port->port.count = 0; + atomic_set(&port->port.count, 0); usb_autopm_put_interface(serial->interface); error_get_interface: usb_serial_put(serial); @@ -195,7 +195,7 @@ static int usb_console_setup(struct console *co, char *options) static void usb_console_write(struct console *co, const char *buf, unsigned count) { - static struct usbcons_info *info = &usbcons_info; + struct usbcons_info *info = &usbcons_info; struct usb_serial_port *port = info->port; struct usb_serial *serial; int retval = -ENODEV; diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c index 1a59f335b..35f4c565f 100644 --- a/drivers/usb/storage/transport.c +++ b/drivers/usb/storage/transport.c @@ -709,7 +709,7 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us) if (need_auto_sense) { int temp_result; struct scsi_eh_save ses; - int sense_size = US_SENSE_SIZE; + unsigned int sense_size = US_SENSE_SIZE; struct scsi_sense_hdr sshdr; const u8 *scdd; u8 fm_ili; diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index 2cba13a53..e6bee5d9d 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c @@ -944,7 +944,7 @@ static void usb_stor_scan_dwork(struct work_struct *work) clear_bit(US_FLIDX_SCAN_PENDING, &us->dflags); } -static unsigned int usb_stor_sg_tablesize(struct usb_interface *intf) +static unsigned short usb_stor_sg_tablesize(struct usb_interface *intf) { struct usb_device *usb_dev = interface_to_usbdev(intf); diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h index 8fae28b40..8b4bfecf0 100644 --- a/drivers/usb/storage/usb.h +++ b/drivers/usb/storage/usb.h @@ -64,7 +64,7 @@ struct us_unusual_dev { __u8 useProtocol; __u8 useTransport; int (*initFunction)(struct us_data *); -}; +} __do_const; /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */ diff --git a/drivers/usb/usbip/vhci.h b/drivers/usb/usbip/vhci.h index 88b71c4e0..31cc1ca6d 100644 --- a/drivers/usb/usbip/vhci.h +++ b/drivers/usb/usbip/vhci.h @@ -96,7 +96,7 @@ struct vhci_hcd { unsigned resuming:1; unsigned long re_timeout; - atomic_t seqnum; + atomic_unchecked_t seqnum; /* * NOTE: @@ -108,7 +108,7 @@ struct vhci_hcd { extern int vhci_num_controllers; extern struct platform_device **vhci_pdevs; -extern struct attribute_group vhci_attr_group; +extern attribute_group_no_const vhci_attr_group; /* vhci_hcd.c */ void rh_port_connect(struct vhci_device *vdev, enum usb_device_speed speed); diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index 03eccf29a..dcf12263e 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -476,7 +476,7 @@ static void vhci_tx_urb(struct urb *urb) spin_lock_irqsave(&vdev->priv_lock, flags); - priv->seqnum = atomic_inc_return(&vhci->seqnum); + priv->seqnum = atomic_inc_return_unchecked(&vhci->seqnum); if (priv->seqnum == 0xffff) dev_info(&urb->dev->dev, "seqnum max\n"); @@ -730,7 +730,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) return -ENOMEM; } - unlink->seqnum = atomic_inc_return(&vhci->seqnum); + unlink->seqnum = atomic_inc_return_unchecked(&vhci->seqnum); if (unlink->seqnum == 0xffff) pr_info("seqnum max\n"); @@ -956,7 +956,7 @@ static int vhci_start(struct usb_hcd *hcd) vdev->rhport = rhport; } - atomic_set(&vhci->seqnum, 0); + atomic_set_unchecked(&vhci->seqnum, 0); spin_lock_init(&vhci->lock); hcd->power_budget = 0; /* no limit */ diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c index fc2d319e2..1886be34c 100644 --- a/drivers/usb/usbip/vhci_rx.c +++ b/drivers/usb/usbip/vhci_rx.c @@ -82,7 +82,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, if (!urb) { pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum); pr_info("max seqnum %d\n", - atomic_read(&vhci->seqnum)); + atomic_read_unchecked(&vhci->seqnum)); usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); return; } diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c index c404017c1..7a4f9d4fd 100644 --- a/drivers/usb/usbip/vhci_sysfs.c +++ b/drivers/usb/usbip/vhci_sysfs.c @@ -68,7 +68,7 @@ static ssize_t status_show_vhci(int pdev_nr, char *out) if (vdev->ud.status == VDEV_ST_USED) { out += sprintf(out, "%03u %08x ", vdev->speed, vdev->devid); - out += sprintf(out, "%16p %s", + out += sprintf(out, "%16pK %s", vdev->ud.tcp_socket, dev_name(&vdev->udev->dev)); @@ -383,7 +383,7 @@ static void finish_status_attrs(void) kfree(status_attrs); } -struct attribute_group vhci_attr_group = { +attribute_group_no_const vhci_attr_group = { .attrs = NULL, }; diff --git a/drivers/usb/usbip/vudc_rx.c b/drivers/usb/usbip/vudc_rx.c index e429b59f6..e0840c648 100644 --- a/drivers/usb/usbip/vudc_rx.c +++ b/drivers/usb/usbip/vudc_rx.c @@ -142,7 +142,7 @@ static int v_recv_cmd_submit(struct vudc *udc, urb_p->urb->status = -EINPROGRESS; /* FIXME: more pipe setup to please usbip_common */ - urb_p->urb->pipe &= ~(3 << 30); + urb_p->urb->pipe &= ~(3U << 30); switch (urb_p->ep->type) { case USB_ENDPOINT_XFER_BULK: urb_p->urb->pipe |= (PIPE_BULK << 30); diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h index edc726715..9f65ce299 100644 --- a/drivers/usb/wusbcore/wa-hc.h +++ b/drivers/usb/wusbcore/wa-hc.h @@ -240,7 +240,7 @@ struct wahc { spinlock_t xfer_list_lock; struct work_struct xfer_enqueue_work; struct work_struct xfer_error_work; - atomic_t xfer_id_count; + atomic_unchecked_t xfer_id_count; kernel_ulong_t quirks; }; @@ -305,7 +305,7 @@ static inline void wa_init(struct wahc *wa) INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run); INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run); wa->dto_in_use = 0; - atomic_set(&wa->xfer_id_count, 1); + atomic_set_unchecked(&wa->xfer_id_count, 1); /* init the buf in URBs */ for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index) usb_init_urb(&(wa->buf_in_urbs[index])); diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c index 167fcc71f..7685175c6 100644 --- a/drivers/usb/wusbcore/wa-xfer.c +++ b/drivers/usb/wusbcore/wa-xfer.c @@ -314,7 +314,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer) */ static void wa_xfer_id_init(struct wa_xfer *xfer) { - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count); + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count); } /* Return the xfer's ID. */ diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 031bc08d0..1bac856f8 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -1292,7 +1292,7 @@ static void vfio_pci_remove(struct pci_dev *pdev) } static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev, - pci_channel_state_t state) + enum pci_channel_state state) { struct vfio_pci_device *vdev; struct vfio_device *device; diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c index 3bb02c60a..a01ff3878 100644 --- a/drivers/vhost/vringh.c +++ b/drivers/vhost/vringh.c @@ -551,7 +551,7 @@ static inline void __vringh_notify_disable(struct vringh *vrh, static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p) { __virtio16 v = 0; - int rc = get_user(v, (__force __virtio16 __user *)p); + int rc = get_user(v, (__force_user __virtio16 *)p); *val = vringh16_to_cpu(vrh, v); return rc; } @@ -559,12 +559,12 @@ static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val) { __virtio16 v = cpu_to_vringh16(vrh, val); - return put_user(v, (__force __virtio16 __user *)p); + return put_user(v, (__force_user __virtio16 *)p); } static inline int copydesc_user(void *dst, const void *src, size_t len) { - return copy_from_user(dst, (__force void __user *)src, len) ? + return copy_from_user(dst, (void __force_user *)src, len) ? -EFAULT : 0; } @@ -572,19 +572,19 @@ static inline int putused_user(struct vring_used_elem *dst, const struct vring_used_elem *src, unsigned int num) { - return copy_to_user((__force void __user *)dst, src, + return copy_to_user((void __force_user *)dst, src, sizeof(*dst) * num) ? -EFAULT : 0; } static inline int xfer_from_user(void *src, void *dst, size_t len) { - return copy_from_user(dst, (__force void __user *)src, len) ? + return copy_from_user(dst, (void __force_user *)src, len) ? -EFAULT : 0; } static inline int xfer_to_user(void *dst, void *src, size_t len) { - return copy_to_user((__force void __user *)dst, src, len) ? + return copy_to_user((void __force_user *)dst, src, len) ? -EFAULT : 0; } @@ -621,9 +621,9 @@ int vringh_init_user(struct vringh *vrh, u64 features, vrh->last_used_idx = 0; vrh->vring.num = num; /* vring expects kernel addresses, but only used via accessors. */ - vrh->vring.desc = (__force struct vring_desc *)desc; - vrh->vring.avail = (__force struct vring_avail *)avail; - vrh->vring.used = (__force struct vring_used *)used; + vrh->vring.desc = (__force_kernel struct vring_desc *)desc; + vrh->vring.avail = (__force_kernel struct vring_avail *)avail; + vrh->vring.used = (__force_kernel struct vring_used *)used; return 0; } EXPORT_SYMBOL(vringh_init_user); @@ -826,7 +826,7 @@ static inline int getu16_kern(const struct vringh *vrh, static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val) { - ACCESS_ONCE(*p) = cpu_to_vringh16(vrh, val); + ACCESS_ONCE_RW(*p) = cpu_to_vringh16(vrh, val); return 0; } diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c index 84a110a71..96312c3af 100644 --- a/drivers/video/backlight/kb3886_bl.c +++ b/drivers/video/backlight/kb3886_bl.c @@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo; static unsigned long kb3886bl_flags; #define KB3886BL_SUSPENDED 0x01 -static struct dmi_system_id kb3886bl_device_table[] __initdata = { +static const struct dmi_system_id kb3886bl_device_table[] __initconst = { { .ident = "Sahara Touch-iT", .matches = { diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c index 9269d5685..78d2a060e 100644 --- a/drivers/video/console/dummycon.c +++ b/drivers/video/console/dummycon.c @@ -41,12 +41,60 @@ static void dummycon_init(struct vc_data *vc, int init) vc_resize(vc, DUMMY_COLUMNS, DUMMY_ROWS); } -static int dummycon_dummy(void) +static void dummycon_deinit(struct vc_data *a) +{ +} + +static void dummycon_clear(struct vc_data *a, int b, int c, int d, int e) +{ +} + +static void dummycon_putc(struct vc_data *a, int b, int c, int d) +{ +} + +static void dummycon_putcs(struct vc_data *a, const unsigned short *b, int c, int d, int e) +{ +} + +static void dummycon_cursor(struct vc_data *a, int b) +{ +} + +static int dummycon_scroll(struct vc_data *a, int b, int c, int d, int e) +{ + return 0; +} + +static int dummycon_switch(struct vc_data *a) { return 0; } -#define DUMMY (void *)dummycon_dummy +static int dummycon_blank(struct vc_data *a, int b, int c) +{ + return 0; +} + +static int dummycon_font_set(struct vc_data *a, struct console_font *b, unsigned c) +{ + return 0; +} + +static int dummycon_font_get(struct vc_data *a, struct console_font *b) +{ + return 0; +} + +static int dummycon_font_default(struct vc_data *a, struct console_font *b , char *c) +{ + return 0; +} + +static int dummycon_font_copy(struct vc_data *a, int b) +{ + return 0; +} /* * The console `switch' structure for the dummy console @@ -58,17 +106,17 @@ const struct consw dummy_con = { .owner = THIS_MODULE, .con_startup = dummycon_startup, .con_init = dummycon_init, - .con_deinit = DUMMY, - .con_clear = DUMMY, - .con_putc = DUMMY, - .con_putcs = DUMMY, - .con_cursor = DUMMY, - .con_scroll = DUMMY, - .con_switch = DUMMY, - .con_blank = DUMMY, - .con_font_set = DUMMY, - .con_font_get = DUMMY, - .con_font_default = DUMMY, - .con_font_copy = DUMMY, + .con_deinit = dummycon_deinit, + .con_clear = dummycon_clear, + .con_putc = dummycon_putc, + .con_putcs = dummycon_putcs, + .con_cursor = dummycon_cursor, + .con_scroll = dummycon_scroll, + .con_switch = dummycon_switch, + .con_blank = dummycon_blank, + .con_font_set = dummycon_font_set, + .con_font_get = dummycon_font_get, + .con_font_default = dummycon_font_default, + .con_font_copy = dummycon_font_copy, }; EXPORT_SYMBOL_GPL(dummy_con); diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index b87f5cfda..6aad4f8c1 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -106,7 +106,7 @@ static int fbcon_softback_size = 32768; static unsigned long softback_buf, softback_curr; static unsigned long softback_in; static unsigned long softback_top, softback_end; -static int softback_lines; +static long softback_lines; /* console mappings */ static int first_fb_vc; static int last_fb_vc = MAX_NR_CONSOLES - 1; diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 11576611a..453a37381 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -1404,21 +1404,26 @@ static int vgacon_scroll(struct vc_data *c, int t, int b, int dir, * The console `switch' structure for the VGA based console */ -static int vgacon_dummy(struct vc_data *c) +static void vgacon_clear(struct vc_data *vc, int a, int b, int c, int d) +{ +} + +static void vgacon_putc(struct vc_data *vc, int a, int b, int c) { - return 0; } -#define DUMMY (void *) vgacon_dummy +static void vgacon_putcs(struct vc_data *vc, const unsigned short *a, int b, int c, int d) +{ +} const struct consw vga_con = { .owner = THIS_MODULE, .con_startup = vgacon_startup, .con_init = vgacon_init, .con_deinit = vgacon_deinit, - .con_clear = DUMMY, - .con_putc = DUMMY, - .con_putcs = DUMMY, + .con_clear = vgacon_clear, + .con_putc = vgacon_putc, + .con_putcs = vgacon_putcs, .con_cursor = vgacon_cursor, .con_scroll = vgacon_scroll, .con_switch = vgacon_switch, diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c index 1928cb2b5..76330ec24 100644 --- a/drivers/video/fbdev/arcfb.c +++ b/drivers/video/fbdev/arcfb.c @@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf, return -ENOSPC; err = 0; - if ((count + p) > fbmemlength) { + if (count > (fbmemlength - p)) { count = fbmemlength - p; err = -ENOSPC; } diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c index fa07242a7..23708c588 100644 --- a/drivers/video/fbdev/aty/aty128fb.c +++ b/drivers/video/fbdev/aty/aty128fb.c @@ -144,7 +144,7 @@ enum { }; /* Must match above enum */ -static char * const r128_family[] = { +static const char * const r128_family[] = { "AGP", "PCI", "PRO AGP", diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c index 11026e726..2b7e1bb35 100644 --- a/drivers/video/fbdev/aty/atyfb_base.c +++ b/drivers/video/fbdev/aty/atyfb_base.c @@ -1335,10 +1335,14 @@ static int atyfb_set_par(struct fb_info *info) par->accel_flags = var->accel_flags; /* hack */ if (var->accel_flags) { - info->fbops->fb_sync = atyfb_sync; + pax_open_kernel(); + const_cast(info->fbops->fb_sync) = atyfb_sync; + pax_close_kernel(); info->flags &= ~FBINFO_HWACCEL_DISABLED; } else { - info->fbops->fb_sync = NULL; + pax_open_kernel(); + const_cast(info->fbops->fb_sync) = NULL; + pax_close_kernel(); info->flags |= FBINFO_HWACCEL_DISABLED; } diff --git a/drivers/video/fbdev/aty/mach64_ct.c b/drivers/video/fbdev/aty/mach64_ct.c index 51f29d627..2c1533910 100644 --- a/drivers/video/fbdev/aty/mach64_ct.c +++ b/drivers/video/fbdev/aty/mach64_ct.c @@ -630,13 +630,14 @@ static void aty_resume_pll_ct(const struct fb_info *info, aty_st_pll_ct(EXT_VPLL_CNTL, pll->ct.ext_vpll_cntl, par); } -static int dummy(void) +static int aty_set_dac(const struct fb_info * info, + const union aty_pll * pll, u32 bpp, u32 accel) { return 0; } const struct aty_dac_ops aty_dac_ct = { - .set_dac = (void *) dummy, + .set_dac = aty_set_dac }; const struct aty_pll_ops aty_pll_ct = { diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c index 2fa0317ab..d687dab4e 100644 --- a/drivers/video/fbdev/aty/mach64_cursor.c +++ b/drivers/video/fbdev/aty/mach64_cursor.c @@ -8,6 +8,7 @@ #include "../core/fb_draw.h" #include +#include #ifdef __sparc__ #include @@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info) info->sprite.buf_align = 16; /* and 64 lines tall. */ info->sprite.flags = FB_PIXMAP_IO; - info->fbops->fb_cursor = atyfb_cursor; + pax_open_kernel(); + const_cast(info->fbops->fb_cursor) = atyfb_cursor; + pax_close_kernel(); return 0; } diff --git a/drivers/video/fbdev/aty/mach64_gx.c b/drivers/video/fbdev/aty/mach64_gx.c index 10c988aef..f7d9299cf 100644 --- a/drivers/video/fbdev/aty/mach64_gx.c +++ b/drivers/video/fbdev/aty/mach64_gx.c @@ -894,17 +894,26 @@ static int aty_set_dac_unsupported(const struct fb_info *info, return 0; } -static int dummy(void) +static int aty_var_to_pll(const struct fb_info * info, u32 vclk_per, u32 bpp, union aty_pll * pll) { return 0; } +static u32 aty_pll_to_var(const struct fb_info * info, const union aty_pll * pll) +{ + return 0; +} + +static void aty_set_pll(const struct fb_info * info, const union aty_pll * pll) +{ +} + const struct aty_dac_ops aty_dac_unsupported = { .set_dac = aty_set_dac_unsupported, }; const struct aty_pll_ops aty_pll_unsupported = { - .var_to_pll = (void *) dummy, - .pll_to_var = (void *) dummy, - .set_pll = (void *) dummy, + .var_to_pll = aty_var_to_pll, + .pll_to_var = aty_pll_to_var, + .set_pll = aty_set_pll, }; diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c index 74b5bcac8..5bddbeaaa 100644 --- a/drivers/video/fbdev/core/fb_defio.c +++ b/drivers/video/fbdev/core/fb_defio.c @@ -208,7 +208,9 @@ void fb_deferred_io_init(struct fb_info *info) BUG_ON(!fbdefio); mutex_init(&fbdefio->lock); - info->fbops->fb_mmap = fb_deferred_io_mmap; + pax_open_kernel(); + const_cast(info->fbops->fb_mmap) = fb_deferred_io_mmap; + pax_close_kernel(); INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work); INIT_LIST_HEAD(&fbdefio->pagelist); if (fbdefio->delay == 0) /* set a default of 1 s */ @@ -239,7 +241,9 @@ void fb_deferred_io_cleanup(struct fb_info *info) page->mapping = NULL; } - info->fbops->fb_mmap = NULL; + pax_open_kernel(); + const_cast(info->fbops->fb_mmap) = NULL; + pax_close_kernel(); mutex_destroy(&fbdefio->lock); } EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup); diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 76c1ad96f..6ec5e9488 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -1301,7 +1301,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix, __u32 data; int err; - err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id)); + err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id)); data = (__u32) (unsigned long) fix->smem_start; err |= put_user(data, &fix32->smem_start); @@ -1435,10 +1435,7 @@ fb_mmap(struct file *file, struct vm_area_struct * vma) return vm_iomap_memory(vma, start, len); } -static int -fb_open(struct inode *inode, struct file *file) -__acquires(&info->lock) -__releases(&info->lock) +static int fb_open(struct inode *inode, struct file *file) { int fbidx = iminor(inode); struct fb_info *info; @@ -1476,10 +1473,7 @@ __releases(&info->lock) return res; } -static int -fb_release(struct inode *inode, struct file *file) -__acquires(&info->lock) -__releases(&info->lock) +static int fb_release(struct inode *inode, struct file *file) { struct fb_info * const info = file->private_data; diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c index 2fd49b235..67e3d8613 100644 --- a/drivers/video/fbdev/hyperv_fb.c +++ b/drivers/video/fbdev/hyperv_fb.c @@ -240,7 +240,7 @@ static uint screen_fb_size; static inline int synthvid_send(struct hv_device *hdev, struct synthvid_msg *msg) { - static atomic64_t request_id = ATOMIC64_INIT(0); + static atomic64_unchecked_t request_id = ATOMIC64_INIT(0); int ret; msg->pipe_hdr.type = PIPE_MSG_DATA; @@ -248,7 +248,7 @@ static inline int synthvid_send(struct hv_device *hdev, ret = vmbus_sendpacket(hdev->channel, msg, msg->vid_hdr.size + sizeof(struct pipe_msg_hdr), - atomic64_inc_return(&request_id), + atomic64_inc_return_unchecked(&request_id), VM_PKT_DATA_INBAND, 0); if (ret) diff --git a/drivers/video/fbdev/i810/i810_accel.c b/drivers/video/fbdev/i810/i810_accel.c index 7672d2ea9..b56437f7b 100644 --- a/drivers/video/fbdev/i810/i810_accel.c +++ b/drivers/video/fbdev/i810/i810_accel.c @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space) } } printk("ringbuffer lockup!!!\n"); + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space); i810_report_error(mmio); par->dev_flags |= LOCKUP; info->pixmap.scan_align = 1; diff --git a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c index a01147fdf..5d896f879 100644 --- a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c +++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c @@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo) #ifdef CONFIG_FB_MATROX_MYSTIQUE struct matrox_switch matrox_mystique = { - MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore, + .preinit = MGA1064_preinit, + .reset = MGA1064_reset, + .init = MGA1064_init, + .restore = MGA1064_restore, }; EXPORT_SYMBOL(matrox_mystique); #endif #ifdef CONFIG_FB_MATROX_G struct matrox_switch matrox_G100 = { - MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore, + .preinit = MGAG100_preinit, + .reset = MGAG100_reset, + .init = MGAG100_init, + .restore = MGAG100_restore, }; EXPORT_SYMBOL(matrox_G100); #endif diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c index 68fa037d8..0273351b2 100644 --- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c +++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c @@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo) } struct matrox_switch matrox_millennium = { - Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore + .preinit = Ti3026_preinit, + .reset = Ti3026_reset, + .init = Ti3026_init, + .restore = Ti3026_restore }; EXPORT_SYMBOL(matrox_millennium); #endif diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c index 11eb09439..622ee31bf 100644 --- a/drivers/video/fbdev/matrox/matroxfb_base.c +++ b/drivers/video/fbdev/matrox/matroxfb_base.c @@ -2176,7 +2176,7 @@ static struct pci_driver matroxfb_driver = { #define RS1056x480 14 /* 132 x 60 text */ #define RSNoxNo 15 /* 10-FF */ -static struct { int xres, yres, left, right, upper, lower, hslen, vslen, vfreq; } timmings[] __initdata = { +static struct { unsigned int xres, yres, left, right, upper, lower, hslen, vslen, vfreq; } timmings[] __initdata = { { 640, 400, 48, 16, 39, 8, 96, 2, 70 }, { 640, 480, 48, 16, 33, 10, 96, 2, 60 }, { 800, 600, 144, 24, 28, 8, 112, 6, 60 }, diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c index fe92eed6d..239e386ad 100644 --- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c +++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c @@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres) struct mb862xxfb_par *par = info->par; if (info->var.bits_per_pixel == 32) { - info->fbops->fb_fillrect = cfb_fillrect; - info->fbops->fb_copyarea = cfb_copyarea; - info->fbops->fb_imageblit = cfb_imageblit; + pax_open_kernel(); + const_cast(info->fbops->fb_fillrect) = cfb_fillrect; + const_cast(info->fbops->fb_copyarea) = cfb_copyarea; + const_cast(info->fbops->fb_imageblit) = cfb_imageblit; + pax_close_kernel(); } else { outreg(disp, GC_L0EM, 3); - info->fbops->fb_fillrect = mb86290fb_fillrect; - info->fbops->fb_copyarea = mb86290fb_copyarea; - info->fbops->fb_imageblit = mb86290fb_imageblit; + pax_open_kernel(); + const_cast(info->fbops->fb_fillrect) = mb86290fb_fillrect; + const_cast(info->fbops->fb_copyarea) = mb86290fb_copyarea; + const_cast(info->fbops->fb_imageblit) = mb86290fb_imageblit; + pax_close_kernel(); } outreg(draw, GDC_REG_DRAW_BASE, 0); outreg(draw, GDC_REG_MODE_MISC, 0x8000); diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c index ce7dab729..89d65219c 100644 --- a/drivers/video/fbdev/nvidia/nvidia.c +++ b/drivers/video/fbdev/nvidia/nvidia.c @@ -660,19 +660,23 @@ static int nvidiafb_set_par(struct fb_info *info) info->fix.line_length = (info->var.xres_virtual * info->var.bits_per_pixel) >> 3; if (info->var.accel_flags) { - info->fbops->fb_imageblit = nvidiafb_imageblit; - info->fbops->fb_fillrect = nvidiafb_fillrect; - info->fbops->fb_copyarea = nvidiafb_copyarea; - info->fbops->fb_sync = nvidiafb_sync; + pax_open_kernel(); + const_cast(info->fbops->fb_imageblit) = nvidiafb_imageblit; + const_cast(info->fbops->fb_fillrect) = nvidiafb_fillrect; + const_cast(info->fbops->fb_copyarea) = nvidiafb_copyarea; + const_cast(info->fbops->fb_sync) = nvidiafb_sync; + pax_close_kernel(); info->pixmap.scan_align = 4; info->flags &= ~FBINFO_HWACCEL_DISABLED; info->flags |= FBINFO_READS_FAST; NVResetGraphics(info); } else { - info->fbops->fb_imageblit = cfb_imageblit; - info->fbops->fb_fillrect = cfb_fillrect; - info->fbops->fb_copyarea = cfb_copyarea; - info->fbops->fb_sync = NULL; + pax_open_kernel(); + const_cast(info->fbops->fb_imageblit) = cfb_imageblit; + const_cast(info->fbops->fb_fillrect) = cfb_fillrect; + const_cast(info->fbops->fb_copyarea) = cfb_copyarea; + const_cast(info->fbops->fb_sync) = NULL; + pax_close_kernel(); info->pixmap.scan_align = 1; info->flags |= FBINFO_HWACCEL_DISABLED; info->flags &= ~FBINFO_READS_FAST; @@ -1164,8 +1168,11 @@ static int nvidia_set_fbinfo(struct fb_info *info) info->pixmap.size = 8 * 1024; info->pixmap.flags = FB_PIXMAP_SYSTEM; - if (!hwcur) - info->fbops->fb_cursor = NULL; + if (!hwcur) { + pax_open_kernel(); + const_cast(info->fbops->fb_cursor) = NULL; + pax_close_kernel(); + } info->var.accel_flags = (!noaccel); diff --git a/drivers/video/fbdev/omap2/omapfb/dss/display.c b/drivers/video/fbdev/omap2/omapfb/dss/display.c index dd5468695..6ef7ef6bb 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/display.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/display.c @@ -161,12 +161,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev) if (dssdev->name == NULL) dssdev->name = dssdev->alias; + pax_open_kernel(); if (drv && drv->get_resolution == NULL) - drv->get_resolution = omapdss_default_get_resolution; + const_cast(drv->get_resolution) = omapdss_default_get_resolution; if (drv && drv->get_recommended_bpp == NULL) - drv->get_recommended_bpp = omapdss_default_get_recommended_bpp; + const_cast(drv->get_recommended_bpp) = omapdss_default_get_recommended_bpp; if (drv && drv->get_timings == NULL) - drv->get_timings = omapdss_default_get_timings; + const_cast(drv->get_timings) = omapdss_default_get_timings; + pax_close_kernel(); mutex_lock(&panel_list_mutex); list_add_tail(&dssdev->panel_list, &panel_list); diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c index 5d6179ef0..f80a0f5d2 100644 --- a/drivers/video/fbdev/s1d13xxxfb.c +++ b/drivers/video/fbdev/s1d13xxxfb.c @@ -880,8 +880,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev) switch(prod_id) { case S1D13506_PROD_ID: /* activate acceleration */ - s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill; - s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea; + pax_open_kernel(); + const_cast(s1d13xxxfb_fbops.fb_fillrect) = s1d13xxxfb_bitblt_solidfill; + const_cast(s1d13xxxfb_fbops.fb_copyarea) = s1d13xxxfb_bitblt_copyarea; + pax_close_kernel(); info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN | FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA; break; diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c index 82c0a8caa..42499a1af 100644 --- a/drivers/video/fbdev/sh_mobile_lcdcfb.c +++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c @@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle) } static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = { - lcdc_sys_write_index, - lcdc_sys_write_data, - lcdc_sys_read_data, + .write_index = lcdc_sys_write_index, + .write_data = lcdc_sys_write_data, + .read_data = lcdc_sys_read_data, }; static int sh_mobile_lcdc_sginit(struct fb_info *info, diff --git a/drivers/video/fbdev/sis/sis_main.h b/drivers/video/fbdev/sis/sis_main.h index 32e23c209..7b73082f9 100644 --- a/drivers/video/fbdev/sis/sis_main.h +++ b/drivers/video/fbdev/sis/sis_main.h @@ -763,7 +763,7 @@ extern void SiS_SetCH700x(struct SiS_Private *SiS_Pr, unsigned short reg, unsig extern unsigned short SiS_GetCH701x(struct SiS_Private *SiS_Pr, unsigned short reg); extern void SiS_SetCH701x(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val); extern void SiS_SetCH70xxANDOR(struct SiS_Private *SiS_Pr, unsigned short reg, - unsigned char myor, unsigned char myand); + unsigned char myor, unsigned short myand); extern void SiS_DDC2Delay(struct SiS_Private *SiS_Pr, unsigned int delaytime); extern void SiS_SetChrontelGPIO(struct SiS_Private *SiS_Pr, unsigned short myvbinfo); extern unsigned short SiS_HandleDDC(struct SiS_Private *SiS_Pr, unsigned int VBFlags, int VGAEngine, diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c index ec2e7e353..0c7dc23f2 100644 --- a/drivers/video/fbdev/smscufx.c +++ b/drivers/video/fbdev/smscufx.c @@ -1174,7 +1174,9 @@ static int ufx_ops_release(struct fb_info *info, int user) fb_deferred_io_cleanup(info); kfree(info->fbdefio); info->fbdefio = NULL; - info->fbops->fb_mmap = ufx_ops_mmap; + pax_open_kernel(); + const_cast(info->fbops->fb_mmap) = ufx_ops_mmap; + pax_close_kernel(); } pr_debug("released /dev/fb%d user=%d count=%d", diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c index e9c2f7ba3..87506f40f 100644 --- a/drivers/video/fbdev/udlfb.c +++ b/drivers/video/fbdev/udlfb.c @@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y, dlfb_urb_completion(urb); error: - atomic_add(bytes_sent, &dev->bytes_sent); - atomic_add(bytes_identical, &dev->bytes_identical); - atomic_add(width*height*2, &dev->bytes_rendered); + atomic_add_unchecked(bytes_sent, &dev->bytes_sent); + atomic_add_unchecked(bytes_identical, &dev->bytes_identical); + atomic_add_unchecked(width*height*2, &dev->bytes_rendered); end_cycles = get_cycles(); - atomic_add(((unsigned int) ((end_cycles - start_cycles) + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles) >> 10)), /* Kcycles */ &dev->cpu_kcycles_used); @@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info, dlfb_urb_completion(urb); error: - atomic_add(bytes_sent, &dev->bytes_sent); - atomic_add(bytes_identical, &dev->bytes_identical); - atomic_add(bytes_rendered, &dev->bytes_rendered); + atomic_add_unchecked(bytes_sent, &dev->bytes_sent); + atomic_add_unchecked(bytes_identical, &dev->bytes_identical); + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered); end_cycles = get_cycles(); - atomic_add(((unsigned int) ((end_cycles - start_cycles) + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles) >> 10)), /* Kcycles */ &dev->cpu_kcycles_used); } @@ -991,7 +991,9 @@ static int dlfb_ops_release(struct fb_info *info, int user) fb_deferred_io_cleanup(info); kfree(info->fbdefio); info->fbdefio = NULL; - info->fbops->fb_mmap = dlfb_ops_mmap; + pax_open_kernel(); + const_cast(info->fbops->fb_mmap) = dlfb_ops_mmap; + pax_close_kernel(); } pr_warn("released /dev/fb%d user=%d count=%d\n", @@ -1373,7 +1375,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev, struct fb_info *fb_info = dev_get_drvdata(fbdev); struct dlfb_data *dev = fb_info->par; return snprintf(buf, PAGE_SIZE, "%u\n", - atomic_read(&dev->bytes_rendered)); + atomic_read_unchecked(&dev->bytes_rendered)); } static ssize_t metrics_bytes_identical_show(struct device *fbdev, @@ -1381,7 +1383,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev, struct fb_info *fb_info = dev_get_drvdata(fbdev); struct dlfb_data *dev = fb_info->par; return snprintf(buf, PAGE_SIZE, "%u\n", - atomic_read(&dev->bytes_identical)); + atomic_read_unchecked(&dev->bytes_identical)); } static ssize_t metrics_bytes_sent_show(struct device *fbdev, @@ -1389,7 +1391,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev, struct fb_info *fb_info = dev_get_drvdata(fbdev); struct dlfb_data *dev = fb_info->par; return snprintf(buf, PAGE_SIZE, "%u\n", - atomic_read(&dev->bytes_sent)); + atomic_read_unchecked(&dev->bytes_sent)); } static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev, @@ -1397,7 +1399,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev, struct fb_info *fb_info = dev_get_drvdata(fbdev); struct dlfb_data *dev = fb_info->par; return snprintf(buf, PAGE_SIZE, "%u\n", - atomic_read(&dev->cpu_kcycles_used)); + atomic_read_unchecked(&dev->cpu_kcycles_used)); } static ssize_t edid_show( @@ -1457,10 +1459,10 @@ static ssize_t metrics_reset_store(struct device *fbdev, struct fb_info *fb_info = dev_get_drvdata(fbdev); struct dlfb_data *dev = fb_info->par; - atomic_set(&dev->bytes_rendered, 0); - atomic_set(&dev->bytes_identical, 0); - atomic_set(&dev->bytes_sent, 0); - atomic_set(&dev->cpu_kcycles_used, 0); + atomic_set_unchecked(&dev->bytes_rendered, 0); + atomic_set_unchecked(&dev->bytes_identical, 0); + atomic_set_unchecked(&dev->bytes_sent, 0); + atomic_set_unchecked(&dev->cpu_kcycles_used, 0); return count; } diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c index 98af9e029..10894160b 100644 --- a/drivers/video/fbdev/uvesafb.c +++ b/drivers/video/fbdev/uvesafb.c @@ -19,6 +19,7 @@ #include #include #include +#include #include